mirror of
https://github.com/moby/moby.git
synced 2026-01-14 17:36:01 +00:00
Compare commits
173 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3025fab86d | ||
|
|
2af1a28b19 | ||
|
|
6e95db3aa6 | ||
|
|
3a8eb7efe5 | ||
|
|
f5f15cef17 | ||
|
|
b6c3f5483b | ||
|
|
88c68b1226 | ||
|
|
faf6a54607 | ||
|
|
bdcc51c004 | ||
|
|
9c6669ca38 | ||
|
|
5205ea9f8e | ||
|
|
64a40af0ef | ||
|
|
f7c3505c82 | ||
|
|
3e0c4ba425 | ||
|
|
2a322a86ec | ||
|
|
89b3062abd | ||
|
|
e39ef953eb | ||
|
|
70ed458f9b | ||
|
|
3bda2f839e | ||
|
|
81b93d3e05 | ||
|
|
28e8a23afd | ||
|
|
d8a1415724 | ||
|
|
4d9560d6df | ||
|
|
357886087c | ||
|
|
37f2a4135a | ||
|
|
30ff73f25a | ||
|
|
30f2e54a95 | ||
|
|
f2bc077921 | ||
|
|
9b7903b291 | ||
|
|
53ecbc1e26 | ||
|
|
b61235e50a | ||
|
|
b3a92aedd5 | ||
|
|
ed9dba83fd | ||
|
|
d8674ab4d9 | ||
|
|
ef804f5954 | ||
|
|
95807d26b8 | ||
|
|
d960588787 | ||
|
|
936bfb792c | ||
|
|
590e249fb9 | ||
|
|
eaac976b86 | ||
|
|
01ba6c607d | ||
|
|
4c146e4945 | ||
|
|
5b2bd7a42c | ||
|
|
277b75e5c2 | ||
|
|
2e72912cdb | ||
|
|
8b58088b87 | ||
|
|
e462220e0e | ||
|
|
578c65d309 | ||
|
|
328131f6d9 | ||
|
|
8d9bd34f6d | ||
|
|
4dc89c2ba5 | ||
|
|
792b33ddad | ||
|
|
6ea1289acc | ||
|
|
254ff38eb1 | ||
|
|
c20fcb4d2c | ||
|
|
b097b8c18e | ||
|
|
0f69c67a27 | ||
|
|
8d5c53e1d4 | ||
|
|
9c662344a1 | ||
|
|
3fb5329499 | ||
|
|
4c81a932c0 | ||
|
|
d333ed926e | ||
|
|
c4c2efb855 | ||
|
|
ba6dcc61d2 | ||
|
|
e5b0f306c9 | ||
|
|
9e12578690 | ||
|
|
85daff009a | ||
|
|
0f4b73cd74 | ||
|
|
e581b23691 | ||
|
|
20bd79ac97 | ||
|
|
1b47203d9a | ||
|
|
237840446b | ||
|
|
eac73050d7 | ||
|
|
7a9644db86 | ||
|
|
de54a079af | ||
|
|
834ded3aee | ||
|
|
50ebafca50 | ||
|
|
c2ae605ee5 | ||
|
|
047fc79ee1 | ||
|
|
7464a5c496 | ||
|
|
d72ea00242 | ||
|
|
93b315efa5 | ||
|
|
a9ce67fb88 | ||
|
|
16c0d67f4d | ||
|
|
683a13108e | ||
|
|
74666f41d7 | ||
|
|
c363a59c2d | ||
|
|
6656e12aaa | ||
|
|
58a2b0e872 | ||
|
|
3a5a738a55 | ||
|
|
86885803ff | ||
|
|
3fe917f56a | ||
|
|
1b71859bb5 | ||
|
|
fe4cd44867 | ||
|
|
b5235a6c4a | ||
|
|
fbdfaeeedc | ||
|
|
b8835f4bba | ||
|
|
486d776e49 | ||
|
|
1b0bc01b70 | ||
|
|
f20597e377 | ||
|
|
12cdef7182 | ||
|
|
7ca5a37fbe | ||
|
|
2256402711 | ||
|
|
c7c462a7b6 | ||
|
|
d4afed04d6 | ||
|
|
0ea0289c77 | ||
|
|
77dcf9e04b | ||
|
|
5e22bc1b27 | ||
|
|
41446f1992 | ||
|
|
355f08e80f | ||
|
|
9a16ce3de0 | ||
|
|
ba83de48c9 | ||
|
|
715612aabb | ||
|
|
1be121e241 | ||
|
|
06cfa32468 | ||
|
|
13693101bf | ||
|
|
0732215719 | ||
|
|
b1754d1e1c | ||
|
|
411e817ddf | ||
|
|
9cc85eaef1 | ||
|
|
820cab90bc | ||
|
|
6bc49067a6 | ||
|
|
6fbdce4b94 | ||
|
|
f5334644ec | ||
|
|
c1d4587d76 | ||
|
|
d6428049a5 | ||
|
|
daba2462f5 | ||
|
|
de5c9cf0b9 | ||
|
|
c62dcf8ab1 | ||
|
|
17315a20ee | ||
|
|
cbd94183ab | ||
|
|
fb9f72aeb6 | ||
|
|
3115daaa91 | ||
|
|
2861734174 | ||
|
|
9c95aea306 | ||
|
|
3e09e197a7 | ||
|
|
65b679ac9c | ||
|
|
12072173df | ||
|
|
92f00d41ba | ||
|
|
a046857fbf | ||
|
|
476d9314d0 | ||
|
|
4021fa775e | ||
|
|
0fd5efe64a | ||
|
|
1017246d24 | ||
|
|
fbfa1bcc96 | ||
|
|
e12db89eef | ||
|
|
5838467405 | ||
|
|
1033f561af | ||
|
|
0fa6816991 | ||
|
|
eaad6f503d | ||
|
|
f679e1d4dc | ||
|
|
fda7b48363 | ||
|
|
258a372230 | ||
|
|
dbec963cee | ||
|
|
e35c6f5625 | ||
|
|
364e2d28ce | ||
|
|
06e19ec444 | ||
|
|
8e96db1c32 | ||
|
|
c21fe3efa3 | ||
|
|
86af4eddb3 | ||
|
|
73511cdee0 | ||
|
|
9326cda7bf | ||
|
|
76fcf9a8e0 | ||
|
|
ef1912d8b6 | ||
|
|
10739af81a | ||
|
|
ac2de55998 | ||
|
|
9a2b531127 | ||
|
|
2f5bbbe16b | ||
|
|
40618081f1 | ||
|
|
21da192ae4 | ||
|
|
2c91196921 | ||
|
|
a9a8787c93 | ||
|
|
c9689eccf5 |
9
.github/workflows/.dco.yml
vendored
9
.github/workflows/.dco.yml
vendored
@@ -7,11 +7,12 @@ on:
|
||||
workflow_call:
|
||||
|
||||
env:
|
||||
ALPINE_VERSION: 3.16
|
||||
ALPINE_VERSION: "3.20"
|
||||
|
||||
jobs:
|
||||
run:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 10 # guardrails timeout for the whole job
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
@@ -39,10 +40,12 @@ jobs:
|
||||
name: Validate
|
||||
run: |
|
||||
docker run --rm \
|
||||
-v "$(pwd):/workspace" \
|
||||
--quiet \
|
||||
-v ./:/workspace \
|
||||
-w /workspace \
|
||||
-e VALIDATE_REPO \
|
||||
-e VALIDATE_BRANCH \
|
||||
alpine:${{ env.ALPINE_VERSION }} sh -c 'apk add --no-cache -q bash git openssh-client && git config --system --add safe.directory /workspace && cd /workspace && hack/validate/dco'
|
||||
alpine:${{ env.ALPINE_VERSION }} sh -c 'apk add --no-cache -q bash git openssh-client && git config --system --add safe.directory /workspace && hack/validate/dco'
|
||||
env:
|
||||
VALIDATE_REPO: ${{ github.server_url }}/${{ github.repository }}.git
|
||||
VALIDATE_BRANCH: ${{ steps.base-ref.outputs.result }}
|
||||
|
||||
1
.github/workflows/.test-prepare.yml
vendored
1
.github/workflows/.test-prepare.yml
vendored
@@ -13,6 +13,7 @@ on:
|
||||
jobs:
|
||||
run:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
outputs:
|
||||
matrix: ${{ steps.set.outputs.matrix }}
|
||||
steps:
|
||||
|
||||
41
.github/workflows/.test.yml
vendored
41
.github/workflows/.test.yml
vendored
@@ -12,21 +12,19 @@ on:
|
||||
default: "graphdriver"
|
||||
|
||||
env:
|
||||
GO_VERSION: "1.21.9"
|
||||
GO_VERSION: "1.22.10"
|
||||
GOTESTLIST_VERSION: v0.3.1
|
||||
TESTSTAT_VERSION: v0.1.25
|
||||
ITG_CLI_MATRIX_SIZE: 6
|
||||
DOCKER_EXPERIMENTAL: 1
|
||||
DOCKER_GRAPHDRIVER: ${{ inputs.storage == 'snapshotter' && 'overlayfs' || 'overlay2' }}
|
||||
TEST_INTEGRATION_USE_SNAPSHOTTER: ${{ inputs.storage == 'snapshotter' && '1' || '' }}
|
||||
SETUP_BUILDX_VERSION: latest
|
||||
SETUP_BUILDKIT_IMAGE: moby/buildkit:latest
|
||||
|
||||
jobs:
|
||||
unit:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
continue-on-error: ${{ github.event_name != 'pull_request' }}
|
||||
timeout-minutes: 120
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
@@ -37,10 +35,6 @@ jobs:
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||
buildkitd-flags: --debug
|
||||
-
|
||||
name: Build dev image
|
||||
uses: docker/bake-action@v4
|
||||
@@ -80,8 +74,8 @@ jobs:
|
||||
|
||||
unit-report:
|
||||
runs-on: ubuntu-20.04
|
||||
continue-on-error: ${{ github.event_name != 'pull_request' }}
|
||||
timeout-minutes: 10
|
||||
continue-on-error: ${{ github.event_name != 'pull_request' }}
|
||||
if: always()
|
||||
needs:
|
||||
- unit
|
||||
@@ -108,8 +102,8 @@ jobs:
|
||||
|
||||
docker-py:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
continue-on-error: ${{ github.event_name != 'pull_request' }}
|
||||
timeout-minutes: 120
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
@@ -123,10 +117,6 @@ jobs:
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||
buildkitd-flags: --debug
|
||||
-
|
||||
name: Build dev image
|
||||
uses: docker/bake-action@v4
|
||||
@@ -165,8 +155,8 @@ jobs:
|
||||
|
||||
integration-flaky:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
continue-on-error: ${{ github.event_name != 'pull_request' }}
|
||||
timeout-minutes: 120
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
@@ -177,10 +167,6 @@ jobs:
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||
buildkitd-flags: --debug
|
||||
-
|
||||
name: Build dev image
|
||||
uses: docker/bake-action@v4
|
||||
@@ -197,8 +183,8 @@ jobs:
|
||||
|
||||
integration:
|
||||
runs-on: ${{ matrix.os }}
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
continue-on-error: ${{ github.event_name != 'pull_request' }}
|
||||
timeout-minutes: 120
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
@@ -235,10 +221,6 @@ jobs:
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||
buildkitd-flags: --debug
|
||||
-
|
||||
name: Build dev image
|
||||
uses: docker/bake-action@v4
|
||||
@@ -295,8 +277,8 @@ jobs:
|
||||
|
||||
integration-report:
|
||||
runs-on: ubuntu-20.04
|
||||
continue-on-error: ${{ github.event_name != 'pull_request' }}
|
||||
timeout-minutes: 10
|
||||
continue-on-error: ${{ github.event_name != 'pull_request' }}
|
||||
if: always()
|
||||
needs:
|
||||
- integration
|
||||
@@ -324,6 +306,7 @@ jobs:
|
||||
|
||||
integration-cli-prepare:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
continue-on-error: ${{ github.event_name != 'pull_request' }}
|
||||
outputs:
|
||||
matrix: ${{ steps.tests.outputs.matrix }}
|
||||
@@ -359,8 +342,8 @@ jobs:
|
||||
|
||||
integration-cli:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
continue-on-error: ${{ github.event_name != 'pull_request' }}
|
||||
timeout-minutes: 120
|
||||
needs:
|
||||
- integration-cli-prepare
|
||||
strategy:
|
||||
@@ -380,10 +363,6 @@ jobs:
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||
buildkitd-flags: --debug
|
||||
-
|
||||
name: Build dev image
|
||||
uses: docker/bake-action@v4
|
||||
@@ -439,8 +418,8 @@ jobs:
|
||||
|
||||
integration-cli-report:
|
||||
runs-on: ubuntu-20.04
|
||||
continue-on-error: ${{ github.event_name != 'pull_request' }}
|
||||
timeout-minutes: 10
|
||||
continue-on-error: ${{ github.event_name != 'pull_request' }}
|
||||
if: always()
|
||||
needs:
|
||||
- integration-cli
|
||||
|
||||
16
.github/workflows/.windows.yml
vendored
16
.github/workflows/.windows.yml
vendored
@@ -19,7 +19,7 @@ on:
|
||||
default: false
|
||||
|
||||
env:
|
||||
GO_VERSION: "1.21.11"
|
||||
GO_VERSION: "1.22.10"
|
||||
GOTESTLIST_VERSION: v0.3.1
|
||||
TESTSTAT_VERSION: v0.1.25
|
||||
WINDOWS_BASE_IMAGE: mcr.microsoft.com/windows/servercore
|
||||
@@ -33,6 +33,7 @@ env:
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ${{ inputs.os }}
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
env:
|
||||
GOPATH: ${{ github.workspace }}\go
|
||||
GOBIN: ${{ github.workspace }}\go\bin
|
||||
@@ -112,7 +113,7 @@ jobs:
|
||||
|
||||
unit-test:
|
||||
runs-on: ${{ inputs.os }}
|
||||
timeout-minutes: 120
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
env:
|
||||
GOPATH: ${{ github.workspace }}\go
|
||||
GOBIN: ${{ github.workspace }}\go\bin
|
||||
@@ -193,7 +194,8 @@ jobs:
|
||||
retention-days: 1
|
||||
|
||||
unit-test-report:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-24.04
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
if: always()
|
||||
needs:
|
||||
- unit-test
|
||||
@@ -219,7 +221,8 @@ jobs:
|
||||
find /tmp/artifacts -type f -name '*-go-test-report.json' -exec teststat -markdown {} \+ >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
integration-test-prepare:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-24.04
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
outputs:
|
||||
matrix: ${{ steps.tests.outputs.matrix }}
|
||||
steps:
|
||||
@@ -253,8 +256,8 @@ jobs:
|
||||
|
||||
integration-test:
|
||||
runs-on: ${{ inputs.os }}
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
continue-on-error: ${{ inputs.storage == 'snapshotter' && github.event_name != 'pull_request' }}
|
||||
timeout-minutes: 120
|
||||
needs:
|
||||
- build
|
||||
- integration-test-prepare
|
||||
@@ -512,7 +515,8 @@ jobs:
|
||||
retention-days: 1
|
||||
|
||||
integration-test-report:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-24.04
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
continue-on-error: ${{ inputs.storage == 'snapshotter' && github.event_name != 'pull_request' }}
|
||||
if: always()
|
||||
needs:
|
||||
|
||||
275
.github/workflows/arm64.yml
vendored
Normal file
275
.github/workflows/arm64.yml
vendored
Normal file
@@ -0,0 +1,275 @@
|
||||
name: arm64
|
||||
|
||||
# Default to 'contents: read', which grants actions to read commits.
|
||||
#
|
||||
# If any permission is set, any permission not included in the list is
|
||||
# implicitly set to "none".
|
||||
#
|
||||
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- 'master'
|
||||
- '[0-9]+.[0-9]+'
|
||||
pull_request:
|
||||
|
||||
env:
|
||||
GO_VERSION: "1.22.10"
|
||||
TESTSTAT_VERSION: v0.1.25
|
||||
DESTDIR: ./build
|
||||
SETUP_BUILDX_VERSION: edge
|
||||
SETUP_BUILDKIT_IMAGE: moby/buildkit:latest
|
||||
DOCKER_EXPERIMENTAL: 1
|
||||
|
||||
jobs:
|
||||
validate-dco:
|
||||
uses: ./.github/workflows/.dco.yml
|
||||
|
||||
build:
|
||||
runs-on: ubuntu-22.04-arm
|
||||
timeout-minutes: 20 # guardrails timeout for the whole job
|
||||
needs:
|
||||
- validate-dco
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
target:
|
||||
- binary
|
||||
- dynbinary
|
||||
steps:
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||
buildkitd-flags: --debug
|
||||
-
|
||||
name: Build
|
||||
uses: docker/bake-action@v6
|
||||
with:
|
||||
targets: ${{ matrix.target }}
|
||||
-
|
||||
name: List artifacts
|
||||
run: |
|
||||
tree -nh ${{ env.DESTDIR }}
|
||||
-
|
||||
name: Check artifacts
|
||||
run: |
|
||||
find ${{ env.DESTDIR }} -type f -exec file -e ascii -- {} +
|
||||
|
||||
build-dev:
|
||||
runs-on: ubuntu-22.04-arm
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
needs:
|
||||
- validate-dco
|
||||
steps:
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||
buildkitd-flags: --debug
|
||||
-
|
||||
name: Build dev image
|
||||
uses: docker/bake-action@v6
|
||||
with:
|
||||
targets: dev
|
||||
set: |
|
||||
*.cache-from=type=gha,scope=dev-arm64
|
||||
*.cache-to=type=gha,scope=dev-arm64,mode=max
|
||||
*.output=type=cacheonly
|
||||
|
||||
test-unit:
|
||||
runs-on: ubuntu-22.04-arm
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
needs:
|
||||
- build-dev
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
-
|
||||
name: Set up runner
|
||||
uses: ./.github/actions/setup-runner
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||
buildkitd-flags: --debug
|
||||
-
|
||||
name: Build dev image
|
||||
uses: docker/bake-action@v6
|
||||
with:
|
||||
targets: dev
|
||||
set: |
|
||||
dev.cache-from=type=gha,scope=dev-arm64
|
||||
-
|
||||
name: Test
|
||||
run: |
|
||||
make -o build test-unit
|
||||
-
|
||||
name: Prepare reports
|
||||
if: always()
|
||||
run: |
|
||||
mkdir -p bundles /tmp/reports
|
||||
find bundles -path '*/root/*overlay2' -prune -o -type f \( -name '*-report.json' -o -name '*.log' -o -name '*.out' -o -name '*.prof' -o -name '*-report.xml' \) -print | xargs sudo tar -czf /tmp/reports.tar.gz
|
||||
tar -xzf /tmp/reports.tar.gz -C /tmp/reports
|
||||
sudo chown -R $(id -u):$(id -g) /tmp/reports
|
||||
tree -nh /tmp/reports
|
||||
-
|
||||
name: Send to Codecov
|
||||
uses: codecov/codecov-action@v4
|
||||
with:
|
||||
directory: ./bundles
|
||||
env_vars: RUNNER_OS
|
||||
flags: unit
|
||||
token: ${{ secrets.CODECOV_TOKEN }} # used to upload coverage reports: https://github.com/moby/buildkit/pull/4660#issue-2142122533
|
||||
-
|
||||
name: Upload reports
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: test-reports-unit-arm64-graphdriver
|
||||
path: /tmp/reports/*
|
||||
retention-days: 1
|
||||
|
||||
test-unit-report:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 10
|
||||
continue-on-error: ${{ github.event_name != 'pull_request' }}
|
||||
if: always()
|
||||
needs:
|
||||
- test-unit
|
||||
steps:
|
||||
-
|
||||
name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
cache-dependency-path: vendor.sum
|
||||
-
|
||||
name: Download reports
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
pattern: test-reports-unit-arm64-*
|
||||
path: /tmp/reports
|
||||
-
|
||||
name: Install teststat
|
||||
run: |
|
||||
go install github.com/vearutop/teststat@${{ env.TESTSTAT_VERSION }}
|
||||
-
|
||||
name: Create summary
|
||||
run: |
|
||||
find /tmp/reports -type f -name '*-go-test-report.json' -exec teststat -markdown {} \+ >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
test-integration:
|
||||
runs-on: ubuntu-22.04-arm
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
continue-on-error: ${{ github.event_name != 'pull_request' }}
|
||||
needs:
|
||||
- build-dev
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
-
|
||||
name: Set up runner
|
||||
uses: ./.github/actions/setup-runner
|
||||
-
|
||||
name: Set up tracing
|
||||
uses: ./.github/actions/setup-tracing
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||
buildkitd-flags: --debug
|
||||
-
|
||||
name: Build dev image
|
||||
uses: docker/bake-action@v6
|
||||
with:
|
||||
targets: dev
|
||||
set: |
|
||||
dev.cache-from=type=gha,scope=dev-arm64
|
||||
-
|
||||
name: Test
|
||||
run: |
|
||||
make -o build test-integration
|
||||
env:
|
||||
TEST_SKIP_INTEGRATION_CLI: 1
|
||||
TESTCOVERAGE: 1
|
||||
-
|
||||
name: Prepare reports
|
||||
if: always()
|
||||
run: |
|
||||
reportsPath="/tmp/reports/arm64-graphdriver"
|
||||
mkdir -p bundles $reportsPath
|
||||
find bundles -path '*/root/*overlay2' -prune -o -type f \( -name '*-report.json' -o -name '*.log' -o -name '*.out' -o -name '*.prof' -o -name '*-report.xml' \) -print | xargs sudo tar -czf /tmp/reports.tar.gz
|
||||
tar -xzf /tmp/reports.tar.gz -C $reportsPath
|
||||
sudo chown -R $(id -u):$(id -g) $reportsPath
|
||||
tree -nh $reportsPath
|
||||
curl -sSLf localhost:16686/api/traces?service=integration-test-client > $reportsPath/jaeger-trace.json
|
||||
-
|
||||
name: Send to Codecov
|
||||
uses: codecov/codecov-action@v4
|
||||
with:
|
||||
directory: ./bundles/test-integration
|
||||
env_vars: RUNNER_OS
|
||||
flags: integration
|
||||
token: ${{ secrets.CODECOV_TOKEN }} # used to upload coverage reports: https://github.com/moby/buildkit/pull/4660#issue-2142122533
|
||||
-
|
||||
name: Test daemon logs
|
||||
if: always()
|
||||
run: |
|
||||
cat bundles/test-integration/docker.log
|
||||
-
|
||||
name: Upload reports
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: test-reports-integration-arm64-graphdriver
|
||||
path: /tmp/reports/*
|
||||
retention-days: 1
|
||||
|
||||
test-integration-report:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 10
|
||||
continue-on-error: ${{ github.event_name != 'pull_request' }}
|
||||
if: always()
|
||||
needs:
|
||||
- test-integration
|
||||
steps:
|
||||
-
|
||||
name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
cache-dependency-path: vendor.sum
|
||||
-
|
||||
name: Download reports
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: /tmp/reports
|
||||
pattern: test-reports-integration-arm64-*
|
||||
merge-multiple: true
|
||||
-
|
||||
name: Install teststat
|
||||
run: |
|
||||
go install github.com/vearutop/teststat@${{ env.TESTSTAT_VERSION }}
|
||||
-
|
||||
name: Create summary
|
||||
run: |
|
||||
find /tmp/reports -type f -name '*-go-test-report.json' -exec teststat -markdown {} \+ >> $GITHUB_STEP_SUMMARY
|
||||
13
.github/workflows/bin-image.yml
vendored
13
.github/workflows/bin-image.yml
vendored
@@ -21,8 +21,6 @@ env:
|
||||
PLATFORM: Moby Engine - Nightly
|
||||
PRODUCT: moby-bin
|
||||
PACKAGER_NAME: The Moby Project
|
||||
SETUP_BUILDX_VERSION: latest
|
||||
SETUP_BUILDKIT_IMAGE: moby/buildkit:latest
|
||||
|
||||
jobs:
|
||||
validate-dco:
|
||||
@@ -31,6 +29,7 @@ jobs:
|
||||
|
||||
prepare:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 20 # guardrails timeout for the whole job
|
||||
outputs:
|
||||
platforms: ${{ steps.platforms.outputs.matrix }}
|
||||
steps:
|
||||
@@ -83,6 +82,7 @@ jobs:
|
||||
|
||||
build:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
needs:
|
||||
- validate-dco
|
||||
- prepare
|
||||
@@ -114,10 +114,6 @@ jobs:
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||
buildkitd-flags: --debug
|
||||
-
|
||||
name: Login to Docker Hub
|
||||
if: github.event_name != 'pull_request' && github.repository == 'moby/moby'
|
||||
@@ -157,6 +153,7 @@ jobs:
|
||||
|
||||
merge:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
needs:
|
||||
- build
|
||||
if: always() && !contains(needs.*.result, 'failure') && !contains(needs.*.result, 'cancelled') && github.event_name != 'pull_request' && github.repository == 'moby/moby'
|
||||
@@ -177,10 +174,6 @@ jobs:
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||
buildkitd-flags: --debug
|
||||
-
|
||||
name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
|
||||
20
.github/workflows/buildkit.yml
vendored
20
.github/workflows/buildkit.yml
vendored
@@ -13,10 +13,8 @@ on:
|
||||
pull_request:
|
||||
|
||||
env:
|
||||
GO_VERSION: "1.21.11"
|
||||
GO_VERSION: "1.22.10"
|
||||
DESTDIR: ./build
|
||||
SETUP_BUILDX_VERSION: latest
|
||||
SETUP_BUILDKIT_IMAGE: moby/buildkit:latest
|
||||
|
||||
jobs:
|
||||
validate-dco:
|
||||
@@ -24,6 +22,7 @@ jobs:
|
||||
|
||||
build:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
needs:
|
||||
- validate-dco
|
||||
steps:
|
||||
@@ -33,10 +32,6 @@ jobs:
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||
buildkitd-flags: --debug
|
||||
-
|
||||
name: Build
|
||||
uses: docker/bake-action@v4
|
||||
@@ -53,7 +48,7 @@ jobs:
|
||||
|
||||
test:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 120
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
needs:
|
||||
- build
|
||||
env:
|
||||
@@ -93,6 +88,11 @@ jobs:
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
path: moby
|
||||
-
|
||||
name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
-
|
||||
name: BuildKit ref
|
||||
run: |
|
||||
@@ -111,10 +111,6 @@ jobs:
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||
buildkitd-flags: --debug
|
||||
-
|
||||
name: Download binary artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
|
||||
15
.github/workflows/ci.yml
vendored
15
.github/workflows/ci.yml
vendored
@@ -14,8 +14,6 @@ on:
|
||||
|
||||
env:
|
||||
DESTDIR: ./build
|
||||
SETUP_BUILDX_VERSION: latest
|
||||
SETUP_BUILDKIT_IMAGE: moby/buildkit:latest
|
||||
|
||||
jobs:
|
||||
validate-dco:
|
||||
@@ -23,6 +21,7 @@ jobs:
|
||||
|
||||
build:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 20 # guardrails timeout for the whole job
|
||||
needs:
|
||||
- validate-dco
|
||||
strategy:
|
||||
@@ -40,10 +39,6 @@ jobs:
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||
buildkitd-flags: --debug
|
||||
-
|
||||
name: Build
|
||||
uses: docker/bake-action@v4
|
||||
@@ -59,7 +54,8 @@ jobs:
|
||||
find ${{ env.DESTDIR }} -type f -exec file -e ascii -- {} +
|
||||
|
||||
prepare-cross:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-24.04
|
||||
timeout-minutes: 20 # guardrails timeout for the whole job
|
||||
needs:
|
||||
- validate-dco
|
||||
outputs:
|
||||
@@ -81,6 +77,7 @@ jobs:
|
||||
|
||||
cross:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 20 # guardrails timeout for the whole job
|
||||
needs:
|
||||
- validate-dco
|
||||
- prepare-cross
|
||||
@@ -102,10 +99,6 @@ jobs:
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||
buildkitd-flags: --debug
|
||||
-
|
||||
name: Build
|
||||
uses: docker/bake-action@v4
|
||||
|
||||
22
.github/workflows/test.yml
vendored
22
.github/workflows/test.yml
vendored
@@ -13,11 +13,9 @@ on:
|
||||
pull_request:
|
||||
|
||||
env:
|
||||
GO_VERSION: "1.21.11"
|
||||
GO_VERSION: "1.22.10"
|
||||
GIT_PAGER: "cat"
|
||||
PAGER: "cat"
|
||||
SETUP_BUILDX_VERSION: latest
|
||||
SETUP_BUILDKIT_IMAGE: moby/buildkit:latest
|
||||
|
||||
jobs:
|
||||
validate-dco:
|
||||
@@ -25,6 +23,7 @@ jobs:
|
||||
|
||||
build-dev:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
needs:
|
||||
- validate-dco
|
||||
strategy:
|
||||
@@ -46,10 +45,6 @@ jobs:
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||
buildkitd-flags: --debug
|
||||
-
|
||||
name: Build dev image
|
||||
uses: docker/bake-action@v4
|
||||
@@ -77,6 +72,7 @@ jobs:
|
||||
|
||||
validate-prepare:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 10 # guardrails timeout for the whole job
|
||||
needs:
|
||||
- validate-dco
|
||||
outputs:
|
||||
@@ -98,7 +94,7 @@ jobs:
|
||||
|
||||
validate:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 120
|
||||
timeout-minutes: 30 # guardrails timeout for the whole job
|
||||
needs:
|
||||
- validate-prepare
|
||||
- build-dev
|
||||
@@ -118,10 +114,6 @@ jobs:
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||
buildkitd-flags: --debug
|
||||
-
|
||||
name: Build dev image
|
||||
uses: docker/bake-action@v4
|
||||
@@ -136,6 +128,7 @@ jobs:
|
||||
|
||||
smoke-prepare:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 10 # guardrails timeout for the whole job
|
||||
needs:
|
||||
- validate-dco
|
||||
outputs:
|
||||
@@ -157,6 +150,7 @@ jobs:
|
||||
|
||||
smoke:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 20 # guardrails timeout for the whole job
|
||||
needs:
|
||||
- smoke-prepare
|
||||
strategy:
|
||||
@@ -178,10 +172,6 @@ jobs:
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||
buildkitd-flags: --debug
|
||||
-
|
||||
name: Test
|
||||
uses: docker/bake-action@v4
|
||||
|
||||
3
.github/workflows/validate-pr.yml
vendored
3
.github/workflows/validate-pr.yml
vendored
@@ -7,6 +7,7 @@ on:
|
||||
jobs:
|
||||
check-area-label:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
steps:
|
||||
- name: Missing `area/` label
|
||||
if: contains(join(github.event.pull_request.labels.*.name, ','), 'impact/') && !contains(join(github.event.pull_request.labels.*.name, ','), 'area/')
|
||||
@@ -19,6 +20,7 @@ jobs:
|
||||
check-changelog:
|
||||
if: contains(join(github.event.pull_request.labels.*.name, ','), 'impact/')
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
env:
|
||||
PR_BODY: |
|
||||
${{ github.event.pull_request.body }}
|
||||
@@ -47,6 +49,7 @@ jobs:
|
||||
|
||||
check-pr-branch:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
env:
|
||||
PR_TITLE: ${{ github.event.pull_request.title }}
|
||||
steps:
|
||||
|
||||
@@ -45,6 +45,11 @@ linters-settings:
|
||||
|
||||
govet:
|
||||
check-shadowing: false
|
||||
|
||||
gosec:
|
||||
excludes:
|
||||
- G115 # FIXME temporarily suppress 'G115: integer overflow conversion': it produces many hits, some of which may be false positives, and need to be looked at; see https://github.com/moby/moby/issues/48358
|
||||
|
||||
depguard:
|
||||
rules:
|
||||
main:
|
||||
@@ -134,16 +139,6 @@ issues:
|
||||
linters:
|
||||
- staticcheck
|
||||
|
||||
- text: "ineffectual assignment to ctx"
|
||||
source: "ctx[, ].*=.*\\(ctx[,)]"
|
||||
linters:
|
||||
- ineffassign
|
||||
|
||||
- text: "SA4006: this value of `ctx` is never used"
|
||||
source: "ctx[, ].*=.*\\(ctx[,)]"
|
||||
linters:
|
||||
- staticcheck
|
||||
|
||||
# Maximum issues count per one linter. Set to 0 to disable. Default is 50.
|
||||
max-issues-per-linter: 0
|
||||
|
||||
|
||||
21
.mailmap
21
.mailmap
@@ -7,7 +7,6 @@
|
||||
#
|
||||
# For an explanation of this file format, consult gitmailmap(5).
|
||||
|
||||
Aaron Yoshitake <airandfingers@gmail.com>
|
||||
Aaron L. Xu <liker.xu@foxmail.com>
|
||||
Aaron L. Xu <liker.xu@foxmail.com> <likexu@harmonycloud.cn>
|
||||
Aaron Lehmann <alehmann@netflix.com>
|
||||
@@ -31,11 +30,9 @@ Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp>
|
||||
Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp> <suda.akihiro@lab.ntt.co.jp>
|
||||
Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp> <suda.kyoto@gmail.com>
|
||||
Akshay Moghe <akshay.moghe@gmail.com>
|
||||
Alano Terblanche <alano.terblanche@docker.com>
|
||||
Alano Terblanche <alano.terblanche@docker.com> <18033717+Benehiko@users.noreply.github.com>
|
||||
Albin Kerouanton <albinker@gmail.com>
|
||||
Albin Kerouanton <albinker@gmail.com> <557933+akerouanton@users.noreply.github.com>
|
||||
Albin Kerouanton <albinker@gmail.com> <albin@akerouanton.name>
|
||||
Albin Kerouanton <albinker@gmail.com> <557933+akerouanton@users.noreply.github.com>
|
||||
Aleksa Sarai <asarai@suse.de>
|
||||
Aleksa Sarai <asarai@suse.de> <asarai@suse.com>
|
||||
Aleksa Sarai <asarai@suse.de> <cyphar@cyphar.com>
|
||||
@@ -62,8 +59,6 @@ Allen Sun <allensun.shl@alibaba-inc.com> <allen.sun@daocloud.io>
|
||||
Allen Sun <allensun.shl@alibaba-inc.com> <shlallen1990@gmail.com>
|
||||
Anca Iordache <anca.iordache@docker.com>
|
||||
Andrea Denisse Gómez <crypto.andrea@protonmail.ch>
|
||||
Andrew Baxter <423qpsxzhh8k3h@s.rendaw.me>
|
||||
Andrew Baxter <423qpsxzhh8k3h@s.rendaw.me> andrew <>
|
||||
Andrew Kim <taeyeonkim90@gmail.com>
|
||||
Andrew Kim <taeyeonkim90@gmail.com> <akim01@fortinet.com>
|
||||
Andrew Weiss <andrew.weiss@docker.com> <andrew.weiss@microsoft.com>
|
||||
@@ -124,7 +119,6 @@ Brian Goff <cpuguy83@gmail.com> <bgoff@cpuguy83-mbp.home>
|
||||
Brian Goff <cpuguy83@gmail.com> <bgoff@cpuguy83-mbp.local>
|
||||
Brian Goff <cpuguy83@gmail.com> <brian.goff@microsoft.com>
|
||||
Brian Goff <cpuguy83@gmail.com> <cpuguy@hey.com>
|
||||
Calvin Liu <flycalvin@qq.com>
|
||||
Cameron Sparr <gh@sparr.email>
|
||||
Carlos de Paula <me@carlosedp.com>
|
||||
Chander Govindarajan <chandergovind@gmail.com>
|
||||
@@ -136,7 +130,6 @@ Chen Mingjie <chenmingjie0828@163.com>
|
||||
Chen Qiu <cheney-90@hotmail.com>
|
||||
Chen Qiu <cheney-90@hotmail.com> <21321229@zju.edu.cn>
|
||||
Chengfei Shang <cfshang@alauda.io>
|
||||
Chentianze <cmoman@126.com>
|
||||
Chris Dias <cdias@microsoft.com>
|
||||
Chris McKinnel <chris.mckinnel@tangentlabs.co.uk>
|
||||
Chris Price <cprice@mirantis.com>
|
||||
@@ -145,8 +138,6 @@ Chris Telfer <ctelfer@docker.com>
|
||||
Chris Telfer <ctelfer@docker.com> <ctelfer@users.noreply.github.com>
|
||||
Christopher Biscardi <biscarch@sketcht.com>
|
||||
Christopher Latham <sudosurootdev@gmail.com>
|
||||
Christopher Petito <chrisjpetito@gmail.com>
|
||||
Christopher Petito <chrisjpetito@gmail.com> <47751006+krissetto@users.noreply.github.com>
|
||||
Christy Norman <christy@linux.vnet.ibm.com>
|
||||
Chun Chen <ramichen@tencent.com> <chenchun.feed@gmail.com>
|
||||
Corbin Coleman <corbin.coleman@docker.com>
|
||||
@@ -350,8 +341,6 @@ John Howard <github@lowenna.com> <john.howard@microsoft.com>
|
||||
John Howard <github@lowenna.com> <john@lowenna.com>
|
||||
John Stephens <johnstep@docker.com> <johnstep@users.noreply.github.com>
|
||||
Jon Surrell <jon.surrell@gmail.com> <jon.surrell@automattic.com>
|
||||
Jonathan A. Sternberg <jonathansternberg@gmail.com>
|
||||
Jonathan A. Sternberg <jonathansternberg@gmail.com> <jonathan.sternberg@docker.com>
|
||||
Jonathan Choy <jonathan.j.choy@gmail.com>
|
||||
Jonathan Choy <jonathan.j.choy@gmail.com> <oni@tetsujinlabs.com>
|
||||
Jordan Arentsen <blissdev@gmail.com>
|
||||
@@ -494,14 +483,14 @@ Mikael Davranche <mikael.davranche@corp.ovh.com> <mikael.davranche@corp.ovh.net>
|
||||
Mike Casas <mkcsas0@gmail.com> <mikecasas@users.noreply.github.com>
|
||||
Mike Goelzer <mike.goelzer@docker.com> <mgoelzer@docker.com>
|
||||
Milas Bowman <devnull@milas.dev>
|
||||
Milas Bowman <devnull@milas.dev> <milas.bowman@docker.com>
|
||||
Milas Bowman <devnull@milas.dev> <milasb@gmail.com>
|
||||
Milas Bowman <devnull@milas.dev> <milas.bowman@docker.com>
|
||||
Milind Chawre <milindchawre@gmail.com>
|
||||
Misty Stanley-Jones <misty@docker.com> <misty@apache.org>
|
||||
Mohammad Banikazemi <MBanikazemi@gmail.com>
|
||||
Mohammad Banikazemi <MBanikazemi@gmail.com> <mb@us.ibm.com>
|
||||
Mohd Sadiq <mohdsadiq058@gmail.com> <42430865+msadiq058@users.noreply.github.com>
|
||||
Mohd Sadiq <mohdsadiq058@gmail.com> <mohdsadiq058@gmail.com>
|
||||
Mohd Sadiq <mohdsadiq058@gmail.com> <42430865+msadiq058@users.noreply.github.com>
|
||||
Mohit Soni <mosoni@ebay.com> <mohitsoni1989@gmail.com>
|
||||
Moorthy RS <rsmoorthy@gmail.com> <rsmoorthy@users.noreply.github.com>
|
||||
Moysés Borges <moysesb@gmail.com>
|
||||
@@ -526,7 +515,6 @@ Olli Janatuinen <olli.janatuinen@gmail.com> <olljanat@users.noreply.github.com>
|
||||
Onur Filiz <onur.filiz@microsoft.com>
|
||||
Onur Filiz <onur.filiz@microsoft.com> <ofiliz@users.noreply.github.com>
|
||||
Ouyang Liduo <oyld0210@163.com>
|
||||
Patrick St. laurent <patrick@saint-laurent.us>
|
||||
Patrick Stapleton <github@gdi2290.com>
|
||||
Paul Liljenberg <liljenberg.paul@gmail.com> <letters@paulnotcom.se>
|
||||
Pavel Tikhomirov <ptikhomirov@virtuozzo.com> <ptikhomirov@parallels.com>
|
||||
@@ -550,8 +538,6 @@ Qin TianHuan <tianhuan@bingotree.cn>
|
||||
Ray Tsang <rayt@google.com> <saturnism@users.noreply.github.com>
|
||||
Renaud Gaubert <rgaubert@nvidia.com> <renaud.gaubert@gmail.com>
|
||||
Richard Scothern <richard.scothern@gmail.com>
|
||||
Rob Murray <rob.murray@docker.com>
|
||||
Rob Murray <rob.murray@docker.com> <148866618+robmry@users.noreply.github.com>
|
||||
Robert Terhaar <rterhaar@atlanticdynamic.com> <robbyt@users.noreply.github.com>
|
||||
Roberto G. Hashioka <roberto.hashioka@docker.com> <roberto_hashioka@hotmail.com>
|
||||
Roberto Muñoz Fernández <robertomf@gmail.com> <roberto.munoz.fernandez.contractor@bbva.com>
|
||||
@@ -562,7 +548,6 @@ Rongxiang Song <tinysong1226@gmail.com>
|
||||
Rony Weng <ronyweng@synology.com>
|
||||
Ross Boucher <rboucher@gmail.com>
|
||||
Rui Cao <ruicao@alauda.io>
|
||||
Rui JingAn <quiterace@gmail.com>
|
||||
Runshen Zhu <runshen.zhu@gmail.com>
|
||||
Ryan Stelly <ryan.stelly@live.com>
|
||||
Ryoga Saito <contact@proelbtn.com>
|
||||
|
||||
20
AUTHORS
20
AUTHORS
@@ -10,7 +10,6 @@ Aaron Huslage <huslage@gmail.com>
|
||||
Aaron L. Xu <liker.xu@foxmail.com>
|
||||
Aaron Lehmann <alehmann@netflix.com>
|
||||
Aaron Welch <welch@packet.net>
|
||||
Aaron Yoshitake <airandfingers@gmail.com>
|
||||
Abel Muiño <amuino@gmail.com>
|
||||
Abhijeet Kasurde <akasurde@redhat.com>
|
||||
Abhinandan Prativadi <aprativadi@gmail.com>
|
||||
@@ -63,7 +62,6 @@ alambike <alambike@gmail.com>
|
||||
Alan Hoyle <alan@alanhoyle.com>
|
||||
Alan Scherger <flyinprogrammer@gmail.com>
|
||||
Alan Thompson <cloojure@gmail.com>
|
||||
Alano Terblanche <alano.terblanche@docker.com>
|
||||
Albert Callarisa <shark234@gmail.com>
|
||||
Albert Zhang <zhgwenming@gmail.com>
|
||||
Albin Kerouanton <albinker@gmail.com>
|
||||
@@ -143,7 +141,6 @@ Andreas Tiefenthaler <at@an-ti.eu>
|
||||
Andrei Gherzan <andrei@resin.io>
|
||||
Andrei Ushakov <aushakov@netflix.com>
|
||||
Andrei Vagin <avagin@gmail.com>
|
||||
Andrew Baxter <423qpsxzhh8k3h@s.rendaw.me>
|
||||
Andrew C. Bodine <acbodine@us.ibm.com>
|
||||
Andrew Clay Shafer <andrewcshafer@gmail.com>
|
||||
Andrew Duckworth <grillopress@gmail.com>
|
||||
@@ -196,7 +193,6 @@ Anton Löfgren <anton.lofgren@gmail.com>
|
||||
Anton Nikitin <anton.k.nikitin@gmail.com>
|
||||
Anton Polonskiy <anton.polonskiy@gmail.com>
|
||||
Anton Tiurin <noxiouz@yandex.ru>
|
||||
Antonio Aguilar <antonio@zoftko.com>
|
||||
Antonio Murdaca <antonio.murdaca@gmail.com>
|
||||
Antonis Kalipetis <akalipetis@gmail.com>
|
||||
Antony Messerli <amesserl@rackspace.com>
|
||||
@@ -225,6 +221,7 @@ Avi Das <andas222@gmail.com>
|
||||
Avi Kivity <avi@scylladb.com>
|
||||
Avi Miller <avi.miller@oracle.com>
|
||||
Avi Vaid <avaid1996@gmail.com>
|
||||
ayoshitake <airandfingers@gmail.com>
|
||||
Azat Khuyiyakhmetov <shadow_uz@mail.ru>
|
||||
Bao Yonglei <baoyonglei@huawei.com>
|
||||
Bardia Keyoumarsi <bkeyouma@ucsc.edu>
|
||||
@@ -319,7 +316,6 @@ Burke Libbey <burke@libbey.me>
|
||||
Byung Kang <byung.kang.ctr@amrdec.army.mil>
|
||||
Caleb Spare <cespare@gmail.com>
|
||||
Calen Pennington <cale@edx.org>
|
||||
Calvin Liu <flycalvin@qq.com>
|
||||
Cameron Boehmer <cameron.boehmer@gmail.com>
|
||||
Cameron Sparr <gh@sparr.email>
|
||||
Cameron Spear <cameronspear@gmail.com>
|
||||
@@ -366,7 +362,6 @@ Chen Qiu <cheney-90@hotmail.com>
|
||||
Cheng-mean Liu <soccerl@microsoft.com>
|
||||
Chengfei Shang <cfshang@alauda.io>
|
||||
Chengguang Xu <cgxu519@gmx.com>
|
||||
Chentianze <cmoman@126.com>
|
||||
Chenyang Yan <memory.yancy@gmail.com>
|
||||
chenyuzhu <chenyuzhi@oschina.cn>
|
||||
Chetan Birajdar <birajdar.chetan@gmail.com>
|
||||
@@ -414,7 +409,6 @@ Christopher Crone <christopher.crone@docker.com>
|
||||
Christopher Currie <codemonkey+github@gmail.com>
|
||||
Christopher Jones <tophj@linux.vnet.ibm.com>
|
||||
Christopher Latham <sudosurootdev@gmail.com>
|
||||
Christopher Petito <chrisjpetito@gmail.com>
|
||||
Christopher Rigor <crigor@gmail.com>
|
||||
Christy Norman <christy@linux.vnet.ibm.com>
|
||||
Chun Chen <ramichen@tencent.com>
|
||||
@@ -783,7 +777,6 @@ Gabriel L. Somlo <gsomlo@gmail.com>
|
||||
Gabriel Linder <linder.gabriel@gmail.com>
|
||||
Gabriel Monroy <gabriel@opdemand.com>
|
||||
Gabriel Nicolas Avellaneda <avellaneda.gabriel@gmail.com>
|
||||
Gabriel Tomitsuka <gabriel@tomitsuka.com>
|
||||
Gaetan de Villele <gdevillele@gmail.com>
|
||||
Galen Sampson <galen.sampson@gmail.com>
|
||||
Gang Qiao <qiaohai8866@gmail.com>
|
||||
@@ -799,7 +792,6 @@ Geoff Levand <geoff@infradead.org>
|
||||
Geoffrey Bachelet <grosfrais@gmail.com>
|
||||
Geon Kim <geon0250@gmail.com>
|
||||
George Kontridze <george@bugsnag.com>
|
||||
George Ma <mayangang@outlook.com>
|
||||
George MacRorie <gmacr31@gmail.com>
|
||||
George Xie <georgexsh@gmail.com>
|
||||
Georgi Hristozov <georgi@forkbomb.nl>
|
||||
@@ -921,7 +913,6 @@ Illo Abdulrahim <abdulrahim.illo@nokia.com>
|
||||
Ilya Dmitrichenko <errordeveloper@gmail.com>
|
||||
Ilya Gusev <mail@igusev.ru>
|
||||
Ilya Khlopotov <ilya.khlopotov@gmail.com>
|
||||
imalasong <2879499479@qq.com>
|
||||
imre Fitos <imre.fitos+github@gmail.com>
|
||||
inglesp <peter.inglesby@gmail.com>
|
||||
Ingo Gottwald <in.gottwald@gmail.com>
|
||||
@@ -939,7 +930,6 @@ J Bruni <joaohbruni@yahoo.com.br>
|
||||
J. Nunn <jbnunn@gmail.com>
|
||||
Jack Danger Canty <jackdanger@squareup.com>
|
||||
Jack Laxson <jackjrabbit@gmail.com>
|
||||
Jack Walker <90711509+j2walker@users.noreply.github.com>
|
||||
Jacob Atzen <jacob@jacobatzen.dk>
|
||||
Jacob Edelman <edelman.jd@gmail.com>
|
||||
Jacob Tomlinson <jacob@tom.linson.uk>
|
||||
@@ -999,7 +989,6 @@ Jason Shepherd <jason@jasonshepherd.net>
|
||||
Jason Smith <jasonrichardsmith@gmail.com>
|
||||
Jason Sommer <jsdirv@gmail.com>
|
||||
Jason Stangroome <jason@codeassassin.com>
|
||||
Jasper Siepkes <siepkes@serviceplanet.nl>
|
||||
Javier Bassi <javierbassi@gmail.com>
|
||||
jaxgeller <jacksongeller@gmail.com>
|
||||
Jay <teguhwpurwanto@gmail.com>
|
||||
@@ -1111,7 +1100,6 @@ Jon Johnson <jonjohnson@google.com>
|
||||
Jon Surrell <jon.surrell@gmail.com>
|
||||
Jon Wedaman <jweede@gmail.com>
|
||||
Jonas Dohse <jonas@dohse.ch>
|
||||
Jonas Geiler <git@jonasgeiler.com>
|
||||
Jonas Heinrich <Jonas@JonasHeinrich.com>
|
||||
Jonas Pfenniger <jonas@pfenniger.name>
|
||||
Jonathan A. Schweder <jonathanschweder@gmail.com>
|
||||
@@ -1279,7 +1267,6 @@ Lakshan Perera <lakshan@laktek.com>
|
||||
Lalatendu Mohanty <lmohanty@redhat.com>
|
||||
Lance Chen <cyen0312@gmail.com>
|
||||
Lance Kinley <lkinley@loyaltymethods.com>
|
||||
Lars Andringa <l.s.andringa@rug.nl>
|
||||
Lars Butler <Lars.Butler@gmail.com>
|
||||
Lars Kellogg-Stedman <lars@redhat.com>
|
||||
Lars R. Damerow <lars@pixar.com>
|
||||
@@ -1686,7 +1673,6 @@ Patrick Böänziger <patrick.baenziger@bsi-software.com>
|
||||
Patrick Devine <patrick.devine@docker.com>
|
||||
Patrick Haas <patrickhaas@google.com>
|
||||
Patrick Hemmer <patrick.hemmer@gmail.com>
|
||||
Patrick St. laurent <patrick@saint-laurent.us>
|
||||
Patrick Stapleton <github@gdi2290.com>
|
||||
Patrik Cyvoct <patrik@ptrk.io>
|
||||
pattichen <craftsbear@gmail.com>
|
||||
@@ -1892,7 +1878,6 @@ Royce Remer <royceremer@gmail.com>
|
||||
Rozhnov Alexandr <nox73@ya.ru>
|
||||
Rudolph Gottesheim <r.gottesheim@loot.at>
|
||||
Rui Cao <ruicao@alauda.io>
|
||||
Rui JingAn <quiterace@gmail.com>
|
||||
Rui Lopes <rgl@ruilopes.com>
|
||||
Ruilin Li <liruilin4@huawei.com>
|
||||
Runshen Zhu <runshen.zhu@gmail.com>
|
||||
@@ -2199,7 +2184,6 @@ Tomek Mańko <tomek.manko@railgun-solutions.com>
|
||||
Tommaso Visconti <tommaso.visconti@gmail.com>
|
||||
Tomoya Tabuchi <t@tomoyat1.com>
|
||||
Tomáš Hrčka <thrcka@redhat.com>
|
||||
Tomáš Virtus <nechtom@gmail.com>
|
||||
tonic <tonicbupt@gmail.com>
|
||||
Tonny Xu <tonny.xu@gmail.com>
|
||||
Tony Abboud <tdabboud@hotmail.com>
|
||||
@@ -2244,7 +2228,6 @@ Victor I. Wood <viw@t2am.com>
|
||||
Victor Lyuboslavsky <victor@victoreda.com>
|
||||
Victor Marmol <vmarmol@google.com>
|
||||
Victor Palma <palma.victor@gmail.com>
|
||||
Victor Toni <victor.toni@gmail.com>
|
||||
Victor Vieux <victor.vieux@docker.com>
|
||||
Victoria Bialas <victoria.bialas@docker.com>
|
||||
Vijaya Kumar K <vijayak@caviumnetworks.com>
|
||||
@@ -2296,7 +2279,6 @@ Wassim Dhif <wassimdhif@gmail.com>
|
||||
Wataru Ishida <ishida.wataru@lab.ntt.co.jp>
|
||||
Wayne Chang <wayne@neverfear.org>
|
||||
Wayne Song <wsong@docker.com>
|
||||
weebney <weebney@gmail.com>
|
||||
Weerasak Chongnguluam <singpor@gmail.com>
|
||||
Wei Fu <fuweid89@gmail.com>
|
||||
Wei Wu <wuwei4455@gmail.com>
|
||||
|
||||
16
Dockerfile
16
Dockerfile
@@ -1,19 +1,19 @@
|
||||
# syntax=docker/dockerfile:1.7
|
||||
|
||||
ARG GO_VERSION=1.21.11
|
||||
ARG GO_VERSION=1.22.10
|
||||
ARG BASE_DEBIAN_DISTRO="bookworm"
|
||||
ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}"
|
||||
ARG XX_VERSION=1.4.0
|
||||
ARG XX_VERSION=1.6.1
|
||||
|
||||
ARG VPNKIT_VERSION=0.5.0
|
||||
|
||||
ARG DOCKERCLI_REPOSITORY="https://github.com/docker/cli.git"
|
||||
ARG DOCKERCLI_VERSION=v26.1.0
|
||||
ARG DOCKERCLI_VERSION=v26.0.0
|
||||
# cli version used for integration-cli tests
|
||||
ARG DOCKERCLI_INTEGRATION_REPOSITORY="https://github.com/docker/cli.git"
|
||||
ARG DOCKERCLI_INTEGRATION_VERSION=v17.06.2-ce
|
||||
ARG BUILDX_VERSION=0.15.1
|
||||
ARG COMPOSE_VERSION=v2.27.1
|
||||
ARG BUILDX_VERSION=0.13.1
|
||||
ARG COMPOSE_VERSION=v2.25.0
|
||||
|
||||
ARG SYSTEMD="false"
|
||||
ARG DOCKER_STATIC=1
|
||||
@@ -147,7 +147,7 @@ RUN git init . && git remote add origin "https://github.com/go-delve/delve.git"
|
||||
# from the https://github.com/go-delve/delve repository.
|
||||
# It can be used to run Docker with a possibility of
|
||||
# attaching debugger to it.
|
||||
ARG DELVE_VERSION=v1.21.1
|
||||
ARG DELVE_VERSION=v1.23.0
|
||||
RUN git fetch -q --depth 1 origin "${DELVE_VERSION}" +refs/tags/*:refs/tags/* && git checkout -q FETCH_HEAD
|
||||
|
||||
FROM base AS delve-supported
|
||||
@@ -229,7 +229,7 @@ FROM binary-dummy AS containerd-windows
|
||||
FROM containerd-${TARGETOS} AS containerd
|
||||
|
||||
FROM base AS golangci_lint
|
||||
ARG GOLANGCI_LINT_VERSION=v1.55.2
|
||||
ARG GOLANGCI_LINT_VERSION=v1.60.2
|
||||
RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||
--mount=type=cache,target=/go/pkg/mod \
|
||||
GOBIN=/build/ GO111MODULE=on go install "github.com/golangci/golangci-lint/cmd/golangci-lint@${GOLANGCI_LINT_VERSION}" \
|
||||
@@ -287,7 +287,7 @@ RUN git init . && git remote add origin "https://github.com/opencontainers/runc.
|
||||
# that is used. If you need to update runc, open a pull request in the containerd
|
||||
# project first, and update both after that is merged. When updating RUNC_VERSION,
|
||||
# consider updating runc in vendor.mod accordingly.
|
||||
ARG RUNC_VERSION=v1.1.13
|
||||
ARG RUNC_VERSION=v1.1.14
|
||||
RUN git fetch -q --depth 1 origin "${RUNC_VERSION}" +refs/tags/*:refs/tags/* && git checkout -q FETCH_HEAD
|
||||
|
||||
FROM base AS runc-build
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
|
||||
# This represents the bare minimum required to build and test Docker.
|
||||
|
||||
ARG GO_VERSION=1.21.11
|
||||
ARG GO_VERSION=1.22.10
|
||||
|
||||
ARG BASE_DEBIAN_DISTRO="bookworm"
|
||||
ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}"
|
||||
|
||||
@@ -161,7 +161,7 @@ FROM ${WINDOWS_BASE_IMAGE}:${WINDOWS_BASE_IMAGE_TAG}
|
||||
# Use PowerShell as the default shell
|
||||
SHELL ["powershell", "-Command", "$ErrorActionPreference = 'Stop'; $ProgressPreference = 'SilentlyContinue';"]
|
||||
|
||||
ARG GO_VERSION=1.21.11
|
||||
ARG GO_VERSION=1.22.10
|
||||
ARG GOTESTSUM_VERSION=v1.8.2
|
||||
ARG GOWINRES_VERSION=v0.3.1
|
||||
ARG CONTAINERD_VERSION=v1.7.18
|
||||
|
||||
165
Jenkinsfile
vendored
165
Jenkinsfile
vendored
@@ -1,165 +0,0 @@
|
||||
#!groovy
|
||||
pipeline {
|
||||
agent none
|
||||
|
||||
options {
|
||||
buildDiscarder(logRotator(daysToKeepStr: '30'))
|
||||
timeout(time: 2, unit: 'HOURS')
|
||||
timestamps()
|
||||
}
|
||||
parameters {
|
||||
booleanParam(name: 'arm64', defaultValue: true, description: 'ARM (arm64) Build/Test')
|
||||
booleanParam(name: 'dco', defaultValue: true, description: 'Run the DCO check')
|
||||
}
|
||||
environment {
|
||||
DOCKER_BUILDKIT = '1'
|
||||
DOCKER_EXPERIMENTAL = '1'
|
||||
DOCKER_GRAPHDRIVER = 'overlay2'
|
||||
CHECK_CONFIG_COMMIT = '33a3680e08d1007e72c3b3f1454f823d8e9948ee'
|
||||
TESTDEBUG = '0'
|
||||
TIMEOUT = '120m'
|
||||
}
|
||||
stages {
|
||||
stage('pr-hack') {
|
||||
when { changeRequest() }
|
||||
steps {
|
||||
script {
|
||||
echo "Workaround for PR auto-cancel feature. Borrowed from https://issues.jenkins-ci.org/browse/JENKINS-43353"
|
||||
def buildNumber = env.BUILD_NUMBER as int
|
||||
if (buildNumber > 1) milestone(buildNumber - 1)
|
||||
milestone(buildNumber)
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('DCO-check') {
|
||||
when {
|
||||
beforeAgent true
|
||||
expression { params.dco }
|
||||
}
|
||||
agent { label 'arm64 && ubuntu-2004' }
|
||||
steps {
|
||||
sh '''
|
||||
docker run --rm \
|
||||
-v "$WORKSPACE:/workspace" \
|
||||
-e VALIDATE_REPO=${GIT_URL} \
|
||||
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
|
||||
alpine sh -c 'apk add --no-cache -q bash git openssh-client && git config --system --add safe.directory /workspace && cd /workspace && hack/validate/dco'
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage('Build') {
|
||||
parallel {
|
||||
stage('arm64') {
|
||||
when {
|
||||
beforeAgent true
|
||||
expression { params.arm64 }
|
||||
}
|
||||
agent { label 'arm64 && ubuntu-2004' }
|
||||
environment {
|
||||
TEST_SKIP_INTEGRATION_CLI = '1'
|
||||
}
|
||||
|
||||
stages {
|
||||
stage("Print info") {
|
||||
steps {
|
||||
sh 'docker version'
|
||||
sh 'docker info'
|
||||
sh '''
|
||||
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
|
||||
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
|
||||
&& bash ${WORKSPACE}/check-config.sh || true
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage("Build dev image") {
|
||||
steps {
|
||||
sh 'docker build --force-rm -t docker:${GIT_COMMIT} .'
|
||||
}
|
||||
}
|
||||
stage("Unit tests") {
|
||||
steps {
|
||||
sh '''
|
||||
sudo modprobe ip6table_filter
|
||||
'''
|
||||
sh '''
|
||||
docker run --rm -t --privileged \
|
||||
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
|
||||
--name docker-pr$BUILD_NUMBER \
|
||||
-e DOCKER_EXPERIMENTAL \
|
||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
-e VALIDATE_REPO=${GIT_URL} \
|
||||
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
|
||||
docker:${GIT_COMMIT} \
|
||||
hack/test/unit
|
||||
'''
|
||||
}
|
||||
post {
|
||||
always {
|
||||
junit testResults: 'bundles/junit-report*.xml', allowEmptyResults: true
|
||||
}
|
||||
}
|
||||
}
|
||||
stage("Integration tests") {
|
||||
environment { TEST_SKIP_INTEGRATION_CLI = '1' }
|
||||
steps {
|
||||
sh '''
|
||||
docker run --rm -t --privileged \
|
||||
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
|
||||
--name docker-pr$BUILD_NUMBER \
|
||||
-e DOCKER_EXPERIMENTAL \
|
||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
-e TESTDEBUG \
|
||||
-e TEST_INTEGRATION_USE_SNAPSHOTTER \
|
||||
-e TEST_SKIP_INTEGRATION_CLI \
|
||||
-e TIMEOUT \
|
||||
-e VALIDATE_REPO=${GIT_URL} \
|
||||
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
|
||||
docker:${GIT_COMMIT} \
|
||||
hack/make.sh \
|
||||
dynbinary \
|
||||
test-integration
|
||||
'''
|
||||
}
|
||||
post {
|
||||
always {
|
||||
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
post {
|
||||
always {
|
||||
sh '''
|
||||
echo "Ensuring container killed."
|
||||
docker rm -vf docker-pr$BUILD_NUMBER || true
|
||||
'''
|
||||
|
||||
sh '''
|
||||
echo "Chowning /workspace to jenkins user"
|
||||
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
|
||||
'''
|
||||
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
|
||||
sh '''
|
||||
bundleName=arm64-integration
|
||||
echo "Creating ${bundleName}-bundles.tar.gz"
|
||||
# exclude overlay2 directories
|
||||
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
|
||||
'''
|
||||
|
||||
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
|
||||
}
|
||||
}
|
||||
cleanup {
|
||||
sh 'make clean'
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -38,7 +38,6 @@
|
||||
"laurazard",
|
||||
"mhbauer",
|
||||
"neersighted",
|
||||
"robmry",
|
||||
"rumpl",
|
||||
"runcom",
|
||||
"samuelkarp",
|
||||
@@ -76,6 +75,7 @@
|
||||
"olljanat",
|
||||
"programmerq",
|
||||
"ripcurld",
|
||||
"robmry",
|
||||
"sam-thibault",
|
||||
"samwhited",
|
||||
"thajeztah"
|
||||
|
||||
20
Makefile
20
Makefile
@@ -1,3 +1,5 @@
|
||||
.PHONY: all binary dynbinary build cross help install manpages run shell test test-docker-py test-integration test-unit validate validate-% win
|
||||
|
||||
DOCKER ?= docker
|
||||
BUILDX ?= $(DOCKER) buildx
|
||||
|
||||
@@ -155,19 +157,15 @@ BAKE_CMD := $(BUILDX) bake
|
||||
|
||||
default: binary
|
||||
|
||||
.PHONY: all
|
||||
all: build ## validate all checks, build linux binaries, run all tests,\ncross build non-linux binaries, and generate archives
|
||||
$(DOCKER_RUN_DOCKER) bash -c 'hack/validate/default && hack/make.sh'
|
||||
|
||||
.PHONY: binary
|
||||
binary: bundles ## build statically linked linux binaries
|
||||
$(BAKE_CMD) binary
|
||||
|
||||
.PHONY: dynbinary
|
||||
dynbinary: bundles ## build dynamically linked linux binaries
|
||||
$(BAKE_CMD) dynbinary
|
||||
|
||||
.PHONY: cross
|
||||
cross: bundles ## cross build the binaries
|
||||
$(BAKE_CMD) binary-cross
|
||||
|
||||
@@ -181,15 +179,12 @@ clean: clean-cache
|
||||
clean-cache: ## remove the docker volumes that are used for caching in the dev-container
|
||||
docker volume rm -f docker-dev-cache docker-mod-cache
|
||||
|
||||
.PHONY: help
|
||||
help: ## this help
|
||||
@awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z0-9_-]+:.*?## / {gsub("\\\\n",sprintf("\n%22c",""), $$2);printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST)
|
||||
|
||||
.PHONY: install
|
||||
install: ## install the linux binaries
|
||||
KEEPBUNDLE=1 hack/make.sh install-binary
|
||||
|
||||
.PHONY: run
|
||||
run: build ## run the docker daemon in a container
|
||||
$(DOCKER_RUN_DOCKER) sh -c "KEEPBUNDLE=1 hack/make.sh install-binary run"
|
||||
|
||||
@@ -202,22 +197,17 @@ endif
|
||||
build: bundles
|
||||
$(BUILD_CMD) $(BUILD_OPTS) $(shell_target) --load -t "$(DOCKER_IMAGE)" .
|
||||
|
||||
.PHONY: shell
|
||||
shell: build ## start a shell inside the build env
|
||||
$(DOCKER_RUN_DOCKER) bash
|
||||
|
||||
.PHONY: test
|
||||
test: build test-unit ## run the unit, integration and docker-py tests
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary test-integration test-docker-py
|
||||
|
||||
.PHONY: test-docker-py
|
||||
test-docker-py: build ## run the docker-py tests
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary test-docker-py
|
||||
|
||||
.PHONY: test-integration-cli
|
||||
test-integration-cli: test-integration ## (DEPRECATED) use test-integration
|
||||
|
||||
.PHONY: test-integration
|
||||
ifneq ($(and $(TEST_SKIP_INTEGRATION),$(TEST_SKIP_INTEGRATION_CLI)),)
|
||||
test-integration:
|
||||
@echo Both integrations suites skipped per environment variables
|
||||
@@ -226,29 +216,23 @@ test-integration: build ## run the integration tests
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary test-integration
|
||||
endif
|
||||
|
||||
.PHONY: test-integration-flaky
|
||||
test-integration-flaky: build ## run the stress test for all new integration tests
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary test-integration-flaky
|
||||
|
||||
.PHONY: test-unit
|
||||
test-unit: build ## run the unit tests
|
||||
$(DOCKER_RUN_DOCKER) hack/test/unit
|
||||
|
||||
.PHONY: validate
|
||||
validate: build ## validate DCO, Seccomp profile generation, gofmt,\n./pkg/ isolation, golint, tests, tomls, go vet and vendor
|
||||
$(DOCKER_RUN_DOCKER) hack/validate/all
|
||||
|
||||
.PHONY: validate-generate-files
|
||||
validate-generate-files:
|
||||
$(BUILD_CMD) --target "validate" \
|
||||
--output "type=cacheonly" \
|
||||
--file "./hack/dockerfiles/generate-files.Dockerfile" .
|
||||
|
||||
.PHONY: validate-%
|
||||
validate-%: build ## validate specific check
|
||||
$(DOCKER_RUN_DOCKER) hack/validate/$*
|
||||
|
||||
.PHONY: win
|
||||
win: bundles ## cross build the binary for windows
|
||||
$(BAKE_CMD) --set *.platform=windows/amd64 binary
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ package api // import "github.com/docker/docker/api"
|
||||
// Common constants for daemon and client.
|
||||
const (
|
||||
// DefaultVersion of the current REST API.
|
||||
DefaultVersion = "1.46"
|
||||
DefaultVersion = "1.45"
|
||||
|
||||
// MinSupportedAPIVersion is the minimum API version that can be supported
|
||||
// by the API server, specified as "major.minor". Note that the daemon
|
||||
|
||||
@@ -24,37 +24,42 @@ func FromError(err error) int {
|
||||
return http.StatusInternalServerError
|
||||
}
|
||||
|
||||
var statusCode int
|
||||
|
||||
// Stop right there
|
||||
// Are you sure you should be adding a new error class here? Do one of the existing ones work?
|
||||
|
||||
// Note that the below functions are already checking the error causal chain for matches.
|
||||
switch {
|
||||
case errdefs.IsNotFound(err):
|
||||
return http.StatusNotFound
|
||||
statusCode = http.StatusNotFound
|
||||
case errdefs.IsInvalidParameter(err):
|
||||
return http.StatusBadRequest
|
||||
statusCode = http.StatusBadRequest
|
||||
case errdefs.IsConflict(err):
|
||||
return http.StatusConflict
|
||||
statusCode = http.StatusConflict
|
||||
case errdefs.IsUnauthorized(err):
|
||||
return http.StatusUnauthorized
|
||||
statusCode = http.StatusUnauthorized
|
||||
case errdefs.IsUnavailable(err):
|
||||
return http.StatusServiceUnavailable
|
||||
statusCode = http.StatusServiceUnavailable
|
||||
case errdefs.IsForbidden(err):
|
||||
return http.StatusForbidden
|
||||
statusCode = http.StatusForbidden
|
||||
case errdefs.IsNotModified(err):
|
||||
return http.StatusNotModified
|
||||
statusCode = http.StatusNotModified
|
||||
case errdefs.IsNotImplemented(err):
|
||||
return http.StatusNotImplemented
|
||||
statusCode = http.StatusNotImplemented
|
||||
case errdefs.IsSystem(err) || errdefs.IsUnknown(err) || errdefs.IsDataLoss(err) || errdefs.IsDeadline(err) || errdefs.IsCancelled(err):
|
||||
return http.StatusInternalServerError
|
||||
statusCode = http.StatusInternalServerError
|
||||
default:
|
||||
if statusCode := statusCodeFromGRPCError(err); statusCode != http.StatusInternalServerError {
|
||||
statusCode = statusCodeFromGRPCError(err)
|
||||
if statusCode != http.StatusInternalServerError {
|
||||
return statusCode
|
||||
}
|
||||
if statusCode := statusCodeFromContainerdError(err); statusCode != http.StatusInternalServerError {
|
||||
statusCode = statusCodeFromContainerdError(err)
|
||||
if statusCode != http.StatusInternalServerError {
|
||||
return statusCode
|
||||
}
|
||||
if statusCode := statusCodeFromDistributionError(err); statusCode != http.StatusInternalServerError {
|
||||
statusCode = statusCodeFromDistributionError(err)
|
||||
if statusCode != http.StatusInternalServerError {
|
||||
return statusCode
|
||||
}
|
||||
if e, ok := err.(causer); ok {
|
||||
@@ -66,9 +71,13 @@ func FromError(err error) int {
|
||||
"error": err,
|
||||
"error_type": fmt.Sprintf("%T", err),
|
||||
}).Debug("FIXME: Got an API for which error does not match any expected type!!!")
|
||||
|
||||
return http.StatusInternalServerError
|
||||
}
|
||||
|
||||
if statusCode == 0 {
|
||||
statusCode = http.StatusInternalServerError
|
||||
}
|
||||
|
||||
return statusCode
|
||||
}
|
||||
|
||||
// statusCodeFromGRPCError returns status code according to gRPC error
|
||||
|
||||
@@ -1,17 +1,12 @@
|
||||
package httputils // import "github.com/docker/docker/api/server/httputils"
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/distribution/reference"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// BoolValue transforms a form value in different formats into a boolean type.
|
||||
@@ -114,24 +109,3 @@ func ArchiveFormValues(r *http.Request, vars map[string]string) (ArchiveOptions,
|
||||
}
|
||||
return ArchiveOptions{name, path}, nil
|
||||
}
|
||||
|
||||
// DecodePlatform decodes the OCI platform JSON string into a Platform struct.
|
||||
func DecodePlatform(platformJSON string) (*ocispec.Platform, error) {
|
||||
var p ocispec.Platform
|
||||
|
||||
if err := json.Unmarshal([]byte(platformJSON), &p); err != nil {
|
||||
return nil, errdefs.InvalidParameter(errors.Wrap(err, "failed to parse platform"))
|
||||
}
|
||||
|
||||
hasAnyOptional := (p.Variant != "" || p.OSVersion != "" || len(p.OSFeatures) > 0)
|
||||
|
||||
if p.OS == "" && p.Architecture == "" && hasAnyOptional {
|
||||
return nil, errdefs.InvalidParameter(errors.New("optional platform fields provided, but OS and Architecture are missing"))
|
||||
}
|
||||
|
||||
if p.OS == "" || p.Architecture == "" {
|
||||
return nil, errdefs.InvalidParameter(errors.New("both OS and Architecture must be provided"))
|
||||
}
|
||||
|
||||
return &p, nil
|
||||
}
|
||||
|
||||
@@ -1,16 +1,9 @@
|
||||
package httputils // import "github.com/docker/docker/api/server/httputils"
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"testing"
|
||||
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/docker/docker/errdefs"
|
||||
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"gotest.tools/v3/assert"
|
||||
)
|
||||
|
||||
func TestBoolValue(t *testing.T) {
|
||||
@@ -110,23 +103,3 @@ func TestInt64ValueOrDefaultWithError(t *testing.T) {
|
||||
t.Fatal("Expected an error.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePlatformInvalid(t *testing.T) {
|
||||
for _, tc := range []ocispec.Platform{
|
||||
{
|
||||
OSVersion: "1.2.3",
|
||||
OSFeatures: []string{"a", "b"},
|
||||
},
|
||||
{OSVersion: "12.0"},
|
||||
{OS: "linux"},
|
||||
{Architecture: "amd64"},
|
||||
} {
|
||||
t.Run(platforms.Format(tc), func(t *testing.T) {
|
||||
js, err := json.Marshal(tc)
|
||||
assert.NilError(t, err)
|
||||
|
||||
_, err = DecodePlatform(string(js))
|
||||
assert.Check(t, errdefs.IsInvalidParameter(err))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,15 +10,11 @@ import (
|
||||
|
||||
// CORSMiddleware injects CORS headers to each request
|
||||
// when it's configured.
|
||||
//
|
||||
// Deprecated: CORS headers should not be set on the API. This feature will be removed in the next release.
|
||||
type CORSMiddleware struct {
|
||||
defaultHeaders string
|
||||
}
|
||||
|
||||
// NewCORSMiddleware creates a new CORSMiddleware with default headers.
|
||||
//
|
||||
// Deprecated: CORS headers should not be set on the API. This feature will be removed in the next release.
|
||||
func NewCORSMiddleware(d string) CORSMiddleware {
|
||||
return CORSMiddleware{defaultHeaders: d}
|
||||
}
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/docker/docker/pkg/progress"
|
||||
"github.com/docker/docker/pkg/streamformatter"
|
||||
units "github.com/docker/go-units"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@@ -104,7 +105,7 @@ func newImageBuildOptions(ctx context.Context, r *http.Request) (*types.ImageBui
|
||||
}
|
||||
|
||||
if ulimitsJSON := r.FormValue("ulimits"); ulimitsJSON != "" {
|
||||
buildUlimits := []*container.Ulimit{}
|
||||
buildUlimits := []*units.Ulimit{}
|
||||
if err := json.Unmarshal([]byte(ulimitsJSON), &buildUlimits); err != nil {
|
||||
return nil, invalidParam{errors.Wrap(err, "error reading ulimit settings")}
|
||||
}
|
||||
|
||||
@@ -14,19 +14,19 @@ import (
|
||||
|
||||
// execBackend includes functions to implement to provide exec functionality.
|
||||
type execBackend interface {
|
||||
ContainerExecCreate(name string, options *container.ExecOptions) (string, error)
|
||||
ContainerExecCreate(name string, config *types.ExecConfig) (string, error)
|
||||
ContainerExecInspect(id string) (*backend.ExecInspect, error)
|
||||
ContainerExecResize(name string, height, width int) error
|
||||
ContainerExecStart(ctx context.Context, name string, options backend.ExecStartConfig) error
|
||||
ContainerExecStart(ctx context.Context, name string, options container.ExecStartOptions) error
|
||||
ExecExists(name string) (bool, error)
|
||||
}
|
||||
|
||||
// copyBackend includes functions to implement to provide container copy functionality.
|
||||
type copyBackend interface {
|
||||
ContainerArchivePath(name string, path string) (content io.ReadCloser, stat *container.PathStat, err error)
|
||||
ContainerArchivePath(name string, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error)
|
||||
ContainerExport(ctx context.Context, name string, out io.Writer) error
|
||||
ContainerExtractToDir(name, path string, copyUIDGID, noOverwriteDirNonDir bool, content io.Reader) error
|
||||
ContainerStatPath(name string, path string) (stat *container.PathStat, err error)
|
||||
ContainerStatPath(name string, path string) (stat *types.ContainerPathStat, err error)
|
||||
}
|
||||
|
||||
// stateBackend includes functions to implement to provide container state lifecycle functionality.
|
||||
@@ -62,7 +62,7 @@ type attachBackend interface {
|
||||
|
||||
// systemBackend includes functions to implement to provide system wide containers functionality
|
||||
type systemBackend interface {
|
||||
ContainersPrune(ctx context.Context, pruneFilters filters.Args) (*container.PruneReport, error)
|
||||
ContainersPrune(ctx context.Context, pruneFilters filters.Args) (*types.ContainersPruneReport, error)
|
||||
}
|
||||
|
||||
type commitBackend interface {
|
||||
|
||||
@@ -22,14 +22,11 @@ import (
|
||||
"github.com/docker/docker/api/types/network"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
containerpkg "github.com/docker/docker/container"
|
||||
networkSettings "github.com/docker/docker/daemon/network"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/docker/libnetwork/netlabel"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/docker/docker/runconfig"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"go.opentelemetry.io/otel"
|
||||
"golang.org/x/net/websocket"
|
||||
)
|
||||
|
||||
@@ -42,13 +39,6 @@ func (s *containerRouter) postCommit(ctx context.Context, w http.ResponseWriter,
|
||||
return err
|
||||
}
|
||||
|
||||
// FIXME(thaJeztah): change this to unmarshal just [container.Config]:
|
||||
// The commit endpoint accepts a [container.Config], but the decoder uses a
|
||||
// [container.CreateRequest], which is a superset, and also contains
|
||||
// [container.HostConfig] and [network.NetworkConfig]. Those structs
|
||||
// are discarded here, but decoder.DecodeConfig also performs validation,
|
||||
// so a request containing those additional fields would result in a
|
||||
// validation error.
|
||||
config, _, _, err := s.decoder.DecodeConfig(r.Body)
|
||||
if err != nil && !errors.Is(err, io.EOF) { // Do not fail if body is empty.
|
||||
return err
|
||||
@@ -104,15 +94,6 @@ func (s *containerRouter) getContainersJSON(ctx context.Context, w http.Response
|
||||
return err
|
||||
}
|
||||
|
||||
version := httputils.VersionFromContext(ctx)
|
||||
|
||||
if versions.LessThan(version, "1.46") {
|
||||
for _, c := range containers {
|
||||
// Ignore HostConfig.Annotations because it was added in API v1.46.
|
||||
c.HostConfig.Annotations = nil
|
||||
}
|
||||
}
|
||||
|
||||
return httputils.WriteJSON(w, http.StatusOK, containers)
|
||||
}
|
||||
|
||||
@@ -197,9 +178,6 @@ func (s *containerRouter) getContainersExport(ctx context.Context, w http.Respon
|
||||
}
|
||||
|
||||
func (s *containerRouter) postContainersStart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
ctx, span := otel.Tracer("").Start(ctx, "containerRouter.postContainersStart")
|
||||
defer span.End()
|
||||
|
||||
// If contentLength is -1, we can assumed chunked encoding
|
||||
// or more technically that the length is unknown
|
||||
// https://golang.org/src/pkg/net/http/request.go#L139
|
||||
@@ -502,7 +480,7 @@ func (s *containerRouter) postContainersCreate(ctx context.Context, w http.Respo
|
||||
// Note that this is not the only place where this conversion has to be
|
||||
// done (as there are various other places where containers get created).
|
||||
if hostConfig.NetworkMode == "" || hostConfig.NetworkMode.IsDefault() {
|
||||
hostConfig.NetworkMode = networkSettings.DefaultNetwork
|
||||
hostConfig.NetworkMode = runconfig.DefaultDaemonNetworkMode()
|
||||
if nw, ok := networkingConfig.EndpointsConfig[network.NetworkDefault]; ok {
|
||||
networkingConfig.EndpointsConfig[hostConfig.NetworkMode.NetworkName()] = nw
|
||||
delete(networkingConfig.EndpointsConfig, network.NetworkDefault)
|
||||
@@ -650,12 +628,6 @@ func (s *containerRouter) postContainersCreate(ctx context.Context, w http.Respo
|
||||
warnings = append(warnings, warn)
|
||||
}
|
||||
|
||||
if warn, err := handleSysctlBC(hostConfig, networkingConfig, version); err != nil {
|
||||
return err
|
||||
} else if warn != "" {
|
||||
warnings = append(warnings, warn)
|
||||
}
|
||||
|
||||
if hostConfig.PidsLimit != nil && *hostConfig.PidsLimit <= 0 {
|
||||
// Don't set a limit if either no limit was specified, or "unlimited" was
|
||||
// explicitly set.
|
||||
@@ -699,11 +671,23 @@ func handleMACAddressBC(config *container.Config, hostConfig *container.HostConf
|
||||
return "", runconfig.ErrConflictContainerNetworkAndMac
|
||||
}
|
||||
|
||||
epConfig, err := epConfigForNetMode(version, hostConfig.NetworkMode, networkingConfig)
|
||||
if err != nil {
|
||||
return "", err
|
||||
// There cannot be more than one entry in EndpointsConfig with API < 1.44.
|
||||
|
||||
// If there's no EndpointsConfig, create a place to store the configured address. It is
|
||||
// safe to use NetworkMode as the network name, whether it's a name or id/short-id, as
|
||||
// it will be normalised later and there is no other EndpointSettings object that might
|
||||
// refer to this network/endpoint.
|
||||
if len(networkingConfig.EndpointsConfig) == 0 {
|
||||
nwName := hostConfig.NetworkMode.NetworkName()
|
||||
networkingConfig.EndpointsConfig[nwName] = &network.EndpointSettings{}
|
||||
}
|
||||
// There's exactly one network in EndpointsConfig, either from the API or just-created.
|
||||
// Migrate the container-wide setting to it.
|
||||
// No need to check for a match between NetworkMode and the names/ids in EndpointsConfig,
|
||||
// the old version of the API would have applied the address to this network anyway.
|
||||
for _, ep := range networkingConfig.EndpointsConfig {
|
||||
ep.MacAddress = deprecatedMacAddress
|
||||
}
|
||||
epConfig.MacAddress = deprecatedMacAddress
|
||||
return "", nil
|
||||
}
|
||||
|
||||
@@ -713,16 +697,31 @@ func handleMACAddressBC(config *container.Config, hostConfig *container.HostConf
|
||||
}
|
||||
var warning string
|
||||
if hostConfig.NetworkMode.IsBridge() || hostConfig.NetworkMode.IsUserDefined() {
|
||||
ep, err := epConfigForNetMode(version, hostConfig.NetworkMode, networkingConfig)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "unable to migrate container-wide MAC address to a specific network")
|
||||
}
|
||||
// ep is the endpoint that needs the container-wide MAC address; migrate the address
|
||||
// to it, or bail out if there's a mismatch.
|
||||
if ep.MacAddress == "" {
|
||||
ep.MacAddress = deprecatedMacAddress
|
||||
} else if ep.MacAddress != deprecatedMacAddress {
|
||||
return "", errdefs.InvalidParameter(errors.New("the container-wide MAC address must match the endpoint-specific MAC address for the main network, or be left empty"))
|
||||
nwName := hostConfig.NetworkMode.NetworkName()
|
||||
// If there's no endpoint config, create a place to store the configured address.
|
||||
if len(networkingConfig.EndpointsConfig) == 0 {
|
||||
networkingConfig.EndpointsConfig[nwName] = &network.EndpointSettings{
|
||||
MacAddress: deprecatedMacAddress,
|
||||
}
|
||||
} else {
|
||||
// There is existing endpoint config - if it's not indexed by NetworkMode.Name(), we
|
||||
// can't tell which network the container-wide settings was intended for. NetworkMode,
|
||||
// the keys in EndpointsConfig and the NetworkID in EndpointsConfig may mix network
|
||||
// name/id/short-id. It's not safe to create EndpointsConfig under the NetworkMode
|
||||
// name to store the container-wide MAC address, because that may result in two sets
|
||||
// of EndpointsConfig for the same network and one set will be discarded later. So,
|
||||
// reject the request ...
|
||||
ep, ok := networkingConfig.EndpointsConfig[nwName]
|
||||
if !ok {
|
||||
return "", errdefs.InvalidParameter(errors.New("if a container-wide MAC address is supplied, HostConfig.NetworkMode must match the identity of a network in NetworkSettings.Networks"))
|
||||
}
|
||||
// ep is the endpoint that needs the container-wide MAC address; migrate the address
|
||||
// to it, or bail out if there's a mismatch.
|
||||
if ep.MacAddress == "" {
|
||||
ep.MacAddress = deprecatedMacAddress
|
||||
} else if ep.MacAddress != deprecatedMacAddress {
|
||||
return "", errdefs.InvalidParameter(errors.New("the container-wide MAC address must match the endpoint-specific MAC address for the main network, or be left empty"))
|
||||
}
|
||||
}
|
||||
}
|
||||
warning = "The container-wide MacAddress field is now deprecated. It should be specified in EndpointsConfig instead."
|
||||
@@ -731,146 +730,6 @@ func handleMACAddressBC(config *container.Config, hostConfig *container.HostConf
|
||||
return warning, nil
|
||||
}
|
||||
|
||||
// handleSysctlBC migrates top level network endpoint-specific '--sysctl'
|
||||
// settings to an DriverOpts for an endpoint. This is necessary because sysctls
|
||||
// are applied during container task creation, but sysctls that name an interface
|
||||
// (for example 'net.ipv6.conf.eth0.forwarding') cannot be applied until the
|
||||
// interface has been created. So, these settings are removed from hostConfig.Sysctls
|
||||
// and added to DriverOpts[netlabel.EndpointSysctls].
|
||||
//
|
||||
// Because interface names ('ethN') are allocated sequentially, and the order of
|
||||
// network connections is not deterministic on container restart, only 'eth0'
|
||||
// would work reliably in a top-level '--sysctl' option, and then only when
|
||||
// there's a single initial network connection. So, settings for 'eth0' are
|
||||
// migrated to the primary interface, identified by 'hostConfig.NetworkMode'.
|
||||
// Settings for other interfaces are treated as errors.
|
||||
//
|
||||
// In the DriverOpts, because the interface name cannot be determined in advance, the
|
||||
// interface name is replaced by "IFNAME". For example, 'net.ipv6.conf.eth0.forwarding'
|
||||
// becomes 'net.ipv6.conf.IFNAME.forwarding'. The value in DriverOpts is a
|
||||
// comma-separated list.
|
||||
//
|
||||
// A warning is generated when settings are migrated.
|
||||
func handleSysctlBC(
|
||||
hostConfig *container.HostConfig,
|
||||
netConfig *network.NetworkingConfig,
|
||||
version string,
|
||||
) (string, error) {
|
||||
if !hostConfig.NetworkMode.IsPrivate() {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
var ep *network.EndpointSettings
|
||||
var toDelete []string
|
||||
var netIfSysctls []string
|
||||
for k, v := range hostConfig.Sysctls {
|
||||
// If the sysctl name matches "net.*.*.eth0.*" ...
|
||||
if spl := strings.SplitN(k, ".", 5); len(spl) == 5 && spl[0] == "net" && strings.HasPrefix(spl[3], "eth") {
|
||||
netIfSysctl := fmt.Sprintf("net.%s.%s.IFNAME.%s=%s", spl[1], spl[2], spl[4], v)
|
||||
// Find the EndpointConfig to migrate settings to, if not already found.
|
||||
if ep == nil {
|
||||
// Per-endpoint sysctls were introduced in API version 1.46. Migration is
|
||||
// needed, but refuse to do it automatically for newer versions of the API.
|
||||
if versions.GreaterThan(version, "1.46") {
|
||||
return "", fmt.Errorf("interface specific sysctl setting %q must be supplied using driver option '%s'",
|
||||
k, netlabel.EndpointSysctls)
|
||||
}
|
||||
var err error
|
||||
ep, err = epConfigForNetMode(version, hostConfig.NetworkMode, netConfig)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("unable to find a network for sysctl %s: %w", k, err)
|
||||
}
|
||||
}
|
||||
// Only try to migrate settings for "eth0", anything else would always
|
||||
// have behaved unpredictably.
|
||||
if spl[3] != "eth0" {
|
||||
return "", fmt.Errorf(`unable to determine network endpoint for sysctl %s, use driver option '%s' to set per-interface sysctls`,
|
||||
k, netlabel.EndpointSysctls)
|
||||
}
|
||||
// Prepare the migration.
|
||||
toDelete = append(toDelete, k)
|
||||
netIfSysctls = append(netIfSysctls, netIfSysctl)
|
||||
}
|
||||
}
|
||||
if ep == nil {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
newDriverOpt := strings.Join(netIfSysctls, ",")
|
||||
warning := fmt.Sprintf(`Migrated sysctl %q to DriverOpts{%q:%q}.`,
|
||||
strings.Join(toDelete, ","),
|
||||
netlabel.EndpointSysctls, newDriverOpt)
|
||||
|
||||
// Append existing per-endpoint sysctls to the migrated sysctls (give priority
|
||||
// to per-endpoint settings).
|
||||
if ep.DriverOpts == nil {
|
||||
ep.DriverOpts = map[string]string{}
|
||||
}
|
||||
if oldDriverOpt, ok := ep.DriverOpts[netlabel.EndpointSysctls]; ok {
|
||||
newDriverOpt += "," + oldDriverOpt
|
||||
}
|
||||
ep.DriverOpts[netlabel.EndpointSysctls] = newDriverOpt
|
||||
|
||||
// Delete migrated settings from the top-level sysctls.
|
||||
for _, k := range toDelete {
|
||||
delete(hostConfig.Sysctls, k)
|
||||
}
|
||||
|
||||
return warning, nil
|
||||
}
|
||||
|
||||
// epConfigForNetMode finds, or creates, an entry in netConfig.EndpointsConfig
|
||||
// corresponding to nwMode.
|
||||
//
|
||||
// nwMode.NetworkName() may be the network's name, its id, or its short-id.
|
||||
//
|
||||
// The corresponding endpoint in netConfig.EndpointsConfig may be keyed on a
|
||||
// different one of name/id/short-id. If there's any ambiguity (there are
|
||||
// endpoints but the names don't match), return an error and do not create a new
|
||||
// endpoint, because it might be a duplicate.
|
||||
func epConfigForNetMode(
|
||||
version string,
|
||||
nwMode container.NetworkMode,
|
||||
netConfig *network.NetworkingConfig,
|
||||
) (*network.EndpointSettings, error) {
|
||||
nwName := nwMode.NetworkName()
|
||||
|
||||
// It's always safe to create an EndpointsConfig entry under nwName if there are
|
||||
// no entries already (because there can't be an entry for this network nwName
|
||||
// refers to under any other name/short-id/id).
|
||||
if len(netConfig.EndpointsConfig) == 0 {
|
||||
es := &network.EndpointSettings{}
|
||||
netConfig.EndpointsConfig = map[string]*network.EndpointSettings{
|
||||
nwName: es,
|
||||
}
|
||||
return es, nil
|
||||
}
|
||||
|
||||
// There cannot be more than one entry in EndpointsConfig with API < 1.44.
|
||||
if versions.LessThan(version, "1.44") {
|
||||
// No need to check for a match between NetworkMode and the names/ids in EndpointsConfig,
|
||||
// the old version of the API would pick this network anyway.
|
||||
for _, ep := range netConfig.EndpointsConfig {
|
||||
return ep, nil
|
||||
}
|
||||
}
|
||||
|
||||
// There is existing endpoint config - if it's not indexed by NetworkMode.Name(), we
|
||||
// can't tell which network the container-wide settings are intended for. NetworkMode,
|
||||
// the keys in EndpointsConfig and the NetworkID in EndpointsConfig may mix network
|
||||
// name/id/short-id. It's not safe to create EndpointsConfig under the NetworkMode
|
||||
// name to store the container-wide setting, because that may result in two sets
|
||||
// of EndpointsConfig for the same network and one set will be discarded later. So,
|
||||
// reject the request ...
|
||||
ep, ok := netConfig.EndpointsConfig[nwName]
|
||||
if !ok {
|
||||
return nil, errdefs.InvalidParameter(
|
||||
errors.New("HostConfig.NetworkMode must match the identity of a network in NetworkSettings.Networks"))
|
||||
}
|
||||
|
||||
return ep, nil
|
||||
}
|
||||
|
||||
func (s *containerRouter) deleteContainers(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
if err := httputils.ParseForm(r); err != nil {
|
||||
return err
|
||||
@@ -925,7 +784,7 @@ func (s *containerRouter) postContainersAttach(ctx context.Context, w http.Respo
|
||||
}
|
||||
|
||||
contentType := types.MediaTypeRawStream
|
||||
setupStreams := func(multiplexed bool, cancel func()) (io.ReadCloser, io.Writer, io.Writer, error) {
|
||||
setupStreams := func(multiplexed bool) (io.ReadCloser, io.Writer, io.Writer, error) {
|
||||
conn, _, err := hijacker.Hijack()
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
@@ -938,13 +797,13 @@ func (s *containerRouter) postContainersAttach(ctx context.Context, w http.Respo
|
||||
if multiplexed && versions.GreaterThanOrEqualTo(httputils.VersionFromContext(ctx), "1.42") {
|
||||
contentType = types.MediaTypeMultiplexedStream
|
||||
}
|
||||
fmt.Fprintf(conn, "HTTP/1.1 101 UPGRADED\r\nContent-Type: "+contentType+"\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n\r\n")
|
||||
// FIXME(thaJeztah): we should not ignore errors here; see https://github.com/moby/moby/pull/48359#discussion_r1725562802
|
||||
fmt.Fprintf(conn, "HTTP/1.1 101 UPGRADED\r\nContent-Type: %v\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n\r\n", contentType)
|
||||
} else {
|
||||
fmt.Fprintf(conn, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n")
|
||||
// FIXME(thaJeztah): we should not ignore errors here; see https://github.com/moby/moby/pull/48359#discussion_r1725562802
|
||||
fmt.Fprint(conn, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n")
|
||||
}
|
||||
|
||||
go notifyClosed(ctx, conn, cancel)
|
||||
|
||||
closer := func() error {
|
||||
httputils.CloseStreams(conn)
|
||||
return nil
|
||||
@@ -993,7 +852,7 @@ func (s *containerRouter) wsContainersAttach(ctx context.Context, w http.Respons
|
||||
|
||||
version := httputils.VersionFromContext(ctx)
|
||||
|
||||
setupStreams := func(multiplexed bool, cancel func()) (io.ReadCloser, io.Writer, io.Writer, error) {
|
||||
setupStreams := func(multiplexed bool) (io.ReadCloser, io.Writer, io.Writer, error) {
|
||||
wsChan := make(chan *websocket.Conn)
|
||||
h := func(conn *websocket.Conn) {
|
||||
wsChan <- conn
|
||||
@@ -1012,8 +871,6 @@ func (s *containerRouter) wsContainersAttach(ctx context.Context, w http.Respons
|
||||
if versions.GreaterThanOrEqualTo(version, "1.28") {
|
||||
conn.PayloadType = websocket.BinaryFrame
|
||||
}
|
||||
|
||||
// TODO: Close notifications
|
||||
return conn, conn, conn, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -1,12 +1,10 @@
|
||||
package container
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/network"
|
||||
"github.com/docker/docker/libnetwork/netlabel"
|
||||
"gotest.tools/v3/assert"
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
)
|
||||
@@ -104,7 +102,7 @@ func TestHandleMACAddressBC(t *testing.T) {
|
||||
ctrWideMAC: "11:22:33:44:55:66",
|
||||
networkMode: "aNetId",
|
||||
epConfig: map[string]*network.EndpointSettings{"aNetName": {}},
|
||||
expError: "unable to migrate container-wide MAC address to a specific network: HostConfig.NetworkMode must match the identity of a network in NetworkSettings.Networks",
|
||||
expError: "if a container-wide MAC address is supplied, HostConfig.NetworkMode must match the identity of a network in NetworkSettings.Networks",
|
||||
expCtrWideMAC: "11:22:33:44:55:66",
|
||||
},
|
||||
{
|
||||
@@ -128,8 +126,8 @@ func TestHandleMACAddressBC(t *testing.T) {
|
||||
}
|
||||
epConfig := make(map[string]*network.EndpointSettings, len(tc.epConfig))
|
||||
for k, v := range tc.epConfig {
|
||||
v := *v
|
||||
epConfig[k] = &v
|
||||
v := v
|
||||
epConfig[k] = v
|
||||
}
|
||||
netCfg := &network.NetworkingConfig{
|
||||
EndpointsConfig: epConfig,
|
||||
@@ -160,191 +158,3 @@ func TestHandleMACAddressBC(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestEpConfigForNetMode(t *testing.T) {
|
||||
testcases := []struct {
|
||||
name string
|
||||
apiVersion string
|
||||
networkMode string
|
||||
epConfig map[string]*network.EndpointSettings
|
||||
expEpId string
|
||||
expNumEps int
|
||||
expError bool
|
||||
}{
|
||||
{
|
||||
name: "old api no eps",
|
||||
apiVersion: "1.43",
|
||||
networkMode: "mynet",
|
||||
expNumEps: 1,
|
||||
},
|
||||
{
|
||||
name: "new api no eps",
|
||||
apiVersion: "1.44",
|
||||
networkMode: "mynet",
|
||||
expNumEps: 1,
|
||||
},
|
||||
{
|
||||
name: "old api with ep",
|
||||
apiVersion: "1.43",
|
||||
networkMode: "mynet",
|
||||
epConfig: map[string]*network.EndpointSettings{
|
||||
"anything": {EndpointID: "epone"},
|
||||
},
|
||||
expEpId: "epone",
|
||||
expNumEps: 1,
|
||||
},
|
||||
{
|
||||
name: "new api with matching ep",
|
||||
apiVersion: "1.44",
|
||||
networkMode: "mynet",
|
||||
epConfig: map[string]*network.EndpointSettings{
|
||||
"mynet": {EndpointID: "epone"},
|
||||
},
|
||||
expEpId: "epone",
|
||||
expNumEps: 1,
|
||||
},
|
||||
{
|
||||
name: "new api with mismatched ep",
|
||||
apiVersion: "1.44",
|
||||
networkMode: "mynet",
|
||||
epConfig: map[string]*network.EndpointSettings{
|
||||
"shortid": {EndpointID: "epone"},
|
||||
},
|
||||
expError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testcases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
netConfig := &network.NetworkingConfig{
|
||||
EndpointsConfig: tc.epConfig,
|
||||
}
|
||||
ep, err := epConfigForNetMode(tc.apiVersion, container.NetworkMode(tc.networkMode), netConfig)
|
||||
if tc.expError {
|
||||
assert.Check(t, is.ErrorContains(err, "HostConfig.NetworkMode must match the identity of a network in NetworkSettings.Networks"))
|
||||
} else {
|
||||
assert.Assert(t, err)
|
||||
assert.Check(t, is.Equal(ep.EndpointID, tc.expEpId))
|
||||
assert.Check(t, is.Len(netConfig.EndpointsConfig, tc.expNumEps))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleSysctlBC(t *testing.T) {
|
||||
testcases := []struct {
|
||||
name string
|
||||
apiVersion string
|
||||
networkMode string
|
||||
sysctls map[string]string
|
||||
epConfig map[string]*network.EndpointSettings
|
||||
expEpSysctls []string
|
||||
expSysctls map[string]string
|
||||
expWarningContains []string
|
||||
expError string
|
||||
}{
|
||||
{
|
||||
name: "migrate to new ep",
|
||||
apiVersion: "1.46",
|
||||
networkMode: "mynet",
|
||||
sysctls: map[string]string{
|
||||
"net.ipv6.conf.all.disable_ipv6": "0",
|
||||
"net.ipv6.conf.eth0.accept_ra": "2",
|
||||
"net.ipv6.conf.eth0.forwarding": "1",
|
||||
},
|
||||
expSysctls: map[string]string{
|
||||
"net.ipv6.conf.all.disable_ipv6": "0",
|
||||
},
|
||||
expEpSysctls: []string{"net.ipv6.conf.IFNAME.forwarding=1", "net.ipv6.conf.IFNAME.accept_ra=2"},
|
||||
expWarningContains: []string{
|
||||
"Migrated",
|
||||
"net.ipv6.conf.eth0.accept_ra", "net.ipv6.conf.IFNAME.accept_ra=2",
|
||||
"net.ipv6.conf.eth0.forwarding", "net.ipv6.conf.IFNAME.forwarding=1",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "migrate nothing",
|
||||
apiVersion: "1.46",
|
||||
networkMode: "mynet",
|
||||
sysctls: map[string]string{
|
||||
"net.ipv6.conf.all.disable_ipv6": "0",
|
||||
},
|
||||
expSysctls: map[string]string{
|
||||
"net.ipv6.conf.all.disable_ipv6": "0",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "migration disabled for newer api",
|
||||
apiVersion: "1.47",
|
||||
networkMode: "mynet",
|
||||
sysctls: map[string]string{
|
||||
"net.ipv6.conf.eth0.accept_ra": "2",
|
||||
},
|
||||
expError: "must be supplied using driver option 'com.docker.network.endpoint.sysctls'",
|
||||
},
|
||||
{
|
||||
name: "only migrate eth0",
|
||||
apiVersion: "1.46",
|
||||
networkMode: "mynet",
|
||||
sysctls: map[string]string{
|
||||
"net.ipv6.conf.eth1.accept_ra": "2",
|
||||
},
|
||||
expError: "unable to determine network endpoint",
|
||||
},
|
||||
{
|
||||
name: "net name mismatch",
|
||||
apiVersion: "1.46",
|
||||
networkMode: "mynet",
|
||||
epConfig: map[string]*network.EndpointSettings{
|
||||
"shortid": {EndpointID: "epone"},
|
||||
},
|
||||
sysctls: map[string]string{
|
||||
"net.ipv6.conf.eth1.accept_ra": "2",
|
||||
},
|
||||
expError: "unable to find a network for sysctl",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testcases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
hostCfg := &container.HostConfig{
|
||||
NetworkMode: container.NetworkMode(tc.networkMode),
|
||||
Sysctls: map[string]string{},
|
||||
}
|
||||
for k, v := range tc.sysctls {
|
||||
hostCfg.Sysctls[k] = v
|
||||
}
|
||||
netCfg := &network.NetworkingConfig{
|
||||
EndpointsConfig: tc.epConfig,
|
||||
}
|
||||
|
||||
warnings, err := handleSysctlBC(hostCfg, netCfg, tc.apiVersion)
|
||||
|
||||
for _, s := range tc.expWarningContains {
|
||||
assert.Check(t, is.Contains(warnings, s))
|
||||
}
|
||||
|
||||
if tc.expError != "" {
|
||||
assert.Check(t, is.ErrorContains(err, tc.expError))
|
||||
} else {
|
||||
assert.Check(t, err)
|
||||
|
||||
assert.Check(t, is.DeepEqual(hostCfg.Sysctls, tc.expSysctls))
|
||||
|
||||
ep := netCfg.EndpointsConfig[tc.networkMode]
|
||||
if ep == nil {
|
||||
assert.Check(t, is.Nil(tc.expEpSysctls))
|
||||
} else {
|
||||
got, ok := ep.DriverOpts[netlabel.EndpointSysctls]
|
||||
assert.Check(t, ok)
|
||||
// Check for expected ep-sysctls.
|
||||
for _, want := range tc.expEpSysctls {
|
||||
assert.Check(t, is.Contains(got, want))
|
||||
}
|
||||
// Check for unexpected ep-sysctls.
|
||||
assert.Check(t, is.Len(got, len(strings.Join(tc.expEpSysctls, ","))))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,12 +10,12 @@ import (
|
||||
"net/http"
|
||||
|
||||
"github.com/docker/docker/api/server/httputils"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types"
|
||||
gddohttputil "github.com/golang/gddo/httputil"
|
||||
)
|
||||
|
||||
// setContainerPathStatHeader encodes the stat to JSON, base64 encode, and place in a header.
|
||||
func setContainerPathStatHeader(stat *container.PathStat, header http.Header) error {
|
||||
func setContainerPathStatHeader(stat *types.ContainerPathStat, header http.Header) error {
|
||||
statJSON, err := json.Marshal(stat)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"github.com/containerd/log"
|
||||
"github.com/docker/docker/api/server/httputils"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/backend"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
"github.com/docker/docker/errdefs"
|
||||
@@ -39,7 +38,7 @@ func (s *containerRouter) postContainerExecCreate(ctx context.Context, w http.Re
|
||||
return err
|
||||
}
|
||||
|
||||
execConfig := &container.ExecOptions{}
|
||||
execConfig := &types.ExecConfig{}
|
||||
if err := httputils.ReadJSON(r, execConfig); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -78,8 +77,8 @@ func (s *containerRouter) postContainerExecStart(ctx context.Context, w http.Res
|
||||
stdout, stderr, outStream io.Writer
|
||||
)
|
||||
|
||||
options := &container.ExecStartOptions{}
|
||||
if err := httputils.ReadJSON(r, options); err != nil {
|
||||
execStartCheck := &types.ExecStartCheck{}
|
||||
if err := httputils.ReadJSON(r, execStartCheck); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -87,21 +86,21 @@ func (s *containerRouter) postContainerExecStart(ctx context.Context, w http.Res
|
||||
return err
|
||||
}
|
||||
|
||||
if options.ConsoleSize != nil {
|
||||
if execStartCheck.ConsoleSize != nil {
|
||||
version := httputils.VersionFromContext(ctx)
|
||||
|
||||
// Not supported before 1.42
|
||||
if versions.LessThan(version, "1.42") {
|
||||
options.ConsoleSize = nil
|
||||
execStartCheck.ConsoleSize = nil
|
||||
}
|
||||
|
||||
// No console without tty
|
||||
if !options.Tty {
|
||||
options.ConsoleSize = nil
|
||||
if !execStartCheck.Tty {
|
||||
execStartCheck.ConsoleSize = nil
|
||||
}
|
||||
}
|
||||
|
||||
if !options.Detach {
|
||||
if !execStartCheck.Detach {
|
||||
var err error
|
||||
// Setting up the streaming http interface.
|
||||
inStream, outStream, err = httputils.HijackConnection(w)
|
||||
@@ -112,43 +111,42 @@ func (s *containerRouter) postContainerExecStart(ctx context.Context, w http.Res
|
||||
|
||||
if _, ok := r.Header["Upgrade"]; ok {
|
||||
contentType := types.MediaTypeRawStream
|
||||
if !options.Tty && versions.GreaterThanOrEqualTo(httputils.VersionFromContext(ctx), "1.42") {
|
||||
if !execStartCheck.Tty && versions.GreaterThanOrEqualTo(httputils.VersionFromContext(ctx), "1.42") {
|
||||
contentType = types.MediaTypeMultiplexedStream
|
||||
}
|
||||
_, _ = fmt.Fprint(outStream, "HTTP/1.1 101 UPGRADED\r\nContent-Type: "+contentType+"\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n")
|
||||
fmt.Fprint(outStream, "HTTP/1.1 101 UPGRADED\r\nContent-Type: "+contentType+"\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n")
|
||||
} else {
|
||||
_, _ = fmt.Fprint(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n")
|
||||
fmt.Fprint(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n")
|
||||
}
|
||||
|
||||
// copy headers that were removed as part of hijack
|
||||
if err := w.Header().WriteSubset(outStream, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
_, _ = fmt.Fprint(outStream, "\r\n")
|
||||
fmt.Fprint(outStream, "\r\n")
|
||||
|
||||
stdin = inStream
|
||||
if options.Tty {
|
||||
stdout = outStream
|
||||
} else {
|
||||
stdout = outStream
|
||||
if !execStartCheck.Tty {
|
||||
stderr = stdcopy.NewStdWriter(outStream, stdcopy.Stderr)
|
||||
stdout = stdcopy.NewStdWriter(outStream, stdcopy.Stdout)
|
||||
}
|
||||
}
|
||||
|
||||
// Now run the user process in container.
|
||||
//
|
||||
// TODO: Maybe we should we pass ctx here if we're not detaching?
|
||||
err := s.backend.ContainerExecStart(context.Background(), execName, backend.ExecStartConfig{
|
||||
options := container.ExecStartOptions{
|
||||
Stdin: stdin,
|
||||
Stdout: stdout,
|
||||
Stderr: stderr,
|
||||
ConsoleSize: options.ConsoleSize,
|
||||
})
|
||||
if err != nil {
|
||||
if options.Detach {
|
||||
ConsoleSize: execStartCheck.ConsoleSize,
|
||||
}
|
||||
|
||||
// Now run the user process in container.
|
||||
// Maybe we should we pass ctx here if we're not detaching?
|
||||
if err := s.backend.ContainerExecStart(context.Background(), execName, options); err != nil {
|
||||
if execStartCheck.Detach {
|
||||
return err
|
||||
}
|
||||
_, _ = fmt.Fprintf(stdout, "%v\r\n", err)
|
||||
stdout.Write([]byte(err.Error() + "\r\n"))
|
||||
log.G(ctx).Errorf("Error running exec %s in container: %v", execName, err)
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -1,54 +0,0 @@
|
||||
package container
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"syscall"
|
||||
|
||||
"github.com/containerd/log"
|
||||
"github.com/docker/docker/internal/unix_noeintr"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func notifyClosed(ctx context.Context, conn net.Conn, notify func()) {
|
||||
sc, ok := conn.(syscall.Conn)
|
||||
if !ok {
|
||||
log.G(ctx).Debug("notifyClosed: conn does not support close notifications")
|
||||
return
|
||||
}
|
||||
|
||||
rc, err := sc.SyscallConn()
|
||||
if err != nil {
|
||||
log.G(ctx).WithError(err).Warn("notifyClosed: failed get raw conn for close notifications")
|
||||
return
|
||||
}
|
||||
|
||||
epFd, err := unix_noeintr.EpollCreate()
|
||||
if err != nil {
|
||||
log.G(ctx).WithError(err).Warn("notifyClosed: failed to create epoll fd")
|
||||
return
|
||||
}
|
||||
defer unix.Close(epFd)
|
||||
|
||||
err = rc.Control(func(fd uintptr) {
|
||||
err := unix_noeintr.EpollCtl(epFd, unix.EPOLL_CTL_ADD, int(fd), &unix.EpollEvent{
|
||||
Events: unix.EPOLLHUP,
|
||||
Fd: int32(fd),
|
||||
})
|
||||
if err != nil {
|
||||
log.G(ctx).WithError(err).Warn("notifyClosed: failed to register fd for close notifications")
|
||||
return
|
||||
}
|
||||
|
||||
events := make([]unix.EpollEvent, 1)
|
||||
if _, err := unix_noeintr.EpollWait(epFd, events, -1); err != nil {
|
||||
log.G(ctx).WithError(err).Warn("notifyClosed: failed to wait for close notifications")
|
||||
return
|
||||
}
|
||||
notify()
|
||||
})
|
||||
if err != nil {
|
||||
log.G(ctx).WithError(err).Warn("notifyClosed: failed to register for close notifications")
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -1,10 +0,0 @@
|
||||
//go:build !linux
|
||||
|
||||
package container
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
)
|
||||
|
||||
func notifyClosed(ctx context.Context, conn net.Conn, notify func()) {}
|
||||
@@ -1,21 +1,13 @@
|
||||
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
|
||||
//go:build go1.21
|
||||
|
||||
package grpc // import "github.com/docker/docker/api/server/router/grpc"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/log"
|
||||
"github.com/docker/docker/api/server/router"
|
||||
grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
|
||||
"github.com/moby/buildkit/util/grpcerrors"
|
||||
"github.com/moby/buildkit/util/stack"
|
||||
"github.com/moby/buildkit/util/tracing"
|
||||
"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
|
||||
"go.opentelemetry.io/otel"
|
||||
"golang.org/x/net/http2"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
@@ -28,15 +20,12 @@ type grpcRouter struct {
|
||||
|
||||
// NewRouter initializes a new grpc http router
|
||||
func NewRouter(backends ...Backend) router.Router {
|
||||
opts := []grpc.ServerOption{
|
||||
grpc.StatsHandler(tracing.ServerStatsHandler(otelgrpc.WithTracerProvider(otel.GetTracerProvider()))),
|
||||
grpc.ChainUnaryInterceptor(unaryInterceptor, grpcerrors.UnaryServerInterceptor),
|
||||
grpc.StreamInterceptor(grpcerrors.StreamServerInterceptor),
|
||||
}
|
||||
unary := grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(unaryInterceptor(), grpcerrors.UnaryServerInterceptor))
|
||||
stream := grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(otelgrpc.StreamServerInterceptor(), grpcerrors.StreamServerInterceptor)) //nolint:staticcheck // TODO(thaJeztah): ignore SA1019 for deprecated options: see https://github.com/moby/moby/issues/47437
|
||||
|
||||
r := &grpcRouter{
|
||||
h2Server: &http2.Server{},
|
||||
grpcServer: grpc.NewServer(opts...),
|
||||
grpcServer: grpc.NewServer(unary, stream),
|
||||
}
|
||||
for _, b := range backends {
|
||||
b.RegisterGRPC(r.grpcServer)
|
||||
@@ -56,20 +45,16 @@ func (gr *grpcRouter) initRoutes() {
|
||||
}
|
||||
}
|
||||
|
||||
func unaryInterceptor(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) {
|
||||
// This method is used by the clients to send their traces to buildkit so they can be included
|
||||
// in the daemon trace and stored in the build history record. This method can not be traced because
|
||||
// it would cause an infinite loop.
|
||||
if strings.HasSuffix(info.FullMethod, "opentelemetry.proto.collector.trace.v1.TraceService/Export") {
|
||||
return handler(ctx, req)
|
||||
}
|
||||
func unaryInterceptor() grpc.UnaryServerInterceptor {
|
||||
withTrace := otelgrpc.UnaryServerInterceptor() //nolint:staticcheck // TODO(thaJeztah): ignore SA1019 for deprecated options: see https://github.com/moby/moby/issues/47437
|
||||
|
||||
resp, err = handler(ctx, req)
|
||||
if err != nil {
|
||||
log.G(ctx).WithError(err).Error(info.FullMethod)
|
||||
if log.GetLevel() >= log.DebugLevel {
|
||||
fmt.Fprintf(os.Stderr, "%+v", stack.Formatter(grpcerrors.FromGRPC(err)))
|
||||
return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) {
|
||||
// This method is used by the clients to send their traces to buildkit so they can be included
|
||||
// in the daemon trace and stored in the build history record. This method can not be traced because
|
||||
// it would cause an infinite loop.
|
||||
if strings.HasSuffix(info.FullMethod, "opentelemetry.proto.collector.trace.v1.TraceService/Export") {
|
||||
return handler(ctx, req)
|
||||
}
|
||||
return withTrace(ctx, req, info, handler)
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"io"
|
||||
|
||||
"github.com/distribution/reference"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/backend"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/image"
|
||||
@@ -27,7 +28,7 @@ type imageBackend interface {
|
||||
Images(ctx context.Context, opts image.ListOptions) ([]*image.Summary, error)
|
||||
GetImage(ctx context.Context, refOrID string, options backend.GetImageOpts) (*dockerimage.Image, error)
|
||||
TagImage(ctx context.Context, id dockerimage.ID, newRef reference.Named) error
|
||||
ImagesPrune(ctx context.Context, pruneFilters filters.Args) (*image.PruneReport, error)
|
||||
ImagesPrune(ctx context.Context, pruneFilters filters.Args) (*types.ImagesPruneReport, error)
|
||||
}
|
||||
|
||||
type importExportBackend interface {
|
||||
@@ -38,7 +39,7 @@ type importExportBackend interface {
|
||||
|
||||
type registryBackend interface {
|
||||
PullImage(ctx context.Context, ref reference.Named, platform *ocispec.Platform, metaHeaders map[string][]string, authConfig *registry.AuthConfig, outStream io.Writer) error
|
||||
PushImage(ctx context.Context, ref reference.Named, platform *ocispec.Platform, metaHeaders map[string][]string, authConfig *registry.AuthConfig, outStream io.Writer) error
|
||||
PushImage(ctx context.Context, ref reference.Named, metaHeaders map[string][]string, authConfig *registry.AuthConfig, outStream io.Writer) error
|
||||
}
|
||||
|
||||
type Searcher interface {
|
||||
|
||||
@@ -142,7 +142,7 @@ func (ir *imageRouter) postImagesCreate(ctx context.Context, w http.ResponseWrit
|
||||
id, progressErr = ir.backend.ImportImage(ctx, tagRef, platform, comment, layerReader, r.Form["changes"])
|
||||
|
||||
if progressErr == nil {
|
||||
output.Write(streamformatter.FormatStatus("", id.String()))
|
||||
_, _ = output.Write(streamformatter.FormatStatus("", "%v", id.String()))
|
||||
}
|
||||
}
|
||||
if progressErr != nil {
|
||||
@@ -205,25 +205,7 @@ func (ir *imageRouter) postImagesPush(ctx context.Context, w http.ResponseWriter
|
||||
ref = r
|
||||
}
|
||||
|
||||
var platform *ocispec.Platform
|
||||
// Platform is optional, and only supported in API version 1.46 and later.
|
||||
// However the PushOptions struct previously was an alias for the PullOptions struct
|
||||
// which also contained a Platform field.
|
||||
// This means that older clients may be sending a platform field, even
|
||||
// though it wasn't really supported by the server.
|
||||
// Don't break these clients and just ignore the platform field on older APIs.
|
||||
if versions.GreaterThanOrEqualTo(httputils.VersionFromContext(ctx), "1.46") {
|
||||
if formPlatform := r.Form.Get("platform"); formPlatform != "" {
|
||||
p, err := httputils.DecodePlatform(formPlatform)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
platform = p
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if err := ir.backend.PushImage(ctx, ref, platform, metaHeaders, authConfig, output); err != nil {
|
||||
if err := ir.backend.PushImage(ctx, ref, metaHeaders, authConfig, output); err != nil {
|
||||
if !output.Flushed() {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package network // import "github.com/docker/docker/api/server/router/network"
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/backend"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/network"
|
||||
@@ -11,20 +12,20 @@ import (
|
||||
// Backend is all the methods that need to be implemented
|
||||
// to provide network specific functionality.
|
||||
type Backend interface {
|
||||
GetNetworks(filters.Args, backend.NetworkListConfig) ([]network.Inspect, error)
|
||||
CreateNetwork(nc network.CreateRequest) (*network.CreateResponse, error)
|
||||
ConnectContainerToNetwork(ctx context.Context, containerName, networkName string, endpointConfig *network.EndpointSettings) error
|
||||
GetNetworks(filters.Args, backend.NetworkListConfig) ([]types.NetworkResource, error)
|
||||
CreateNetwork(nc types.NetworkCreateRequest) (*types.NetworkCreateResponse, error)
|
||||
ConnectContainerToNetwork(containerName, networkName string, endpointConfig *network.EndpointSettings) error
|
||||
DisconnectContainerFromNetwork(containerName string, networkName string, force bool) error
|
||||
DeleteNetwork(networkID string) error
|
||||
NetworksPrune(ctx context.Context, pruneFilters filters.Args) (*network.PruneReport, error)
|
||||
NetworksPrune(ctx context.Context, pruneFilters filters.Args) (*types.NetworksPruneReport, error)
|
||||
}
|
||||
|
||||
// ClusterBackend is all the methods that need to be implemented
|
||||
// to provide cluster network specific functionality.
|
||||
type ClusterBackend interface {
|
||||
GetNetworks(filters.Args) ([]network.Inspect, error)
|
||||
GetNetwork(name string) (network.Inspect, error)
|
||||
GetNetworksByName(name string) ([]network.Inspect, error)
|
||||
CreateNetwork(nc network.CreateRequest) (string, error)
|
||||
GetNetworks(filters.Args) ([]types.NetworkResource, error)
|
||||
GetNetwork(name string) (types.NetworkResource, error)
|
||||
GetNetworksByName(name string) ([]types.NetworkResource, error)
|
||||
CreateNetwork(nc types.NetworkCreateRequest) (string, error)
|
||||
RemoveNetwork(name string) error
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/api/server/httputils"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/backend"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/network"
|
||||
@@ -31,7 +32,7 @@ func (n *networkRouter) getNetworksList(ctx context.Context, w http.ResponseWrit
|
||||
return err
|
||||
}
|
||||
|
||||
var list []network.Summary
|
||||
var list []types.NetworkResource
|
||||
nr, err := n.cluster.GetNetworks(filter)
|
||||
if err == nil {
|
||||
list = nr
|
||||
@@ -59,7 +60,7 @@ func (n *networkRouter) getNetworksList(ctx context.Context, w http.ResponseWrit
|
||||
}
|
||||
|
||||
if list == nil {
|
||||
list = []network.Summary{}
|
||||
list = []types.NetworkResource{}
|
||||
}
|
||||
|
||||
return httputils.WriteJSON(w, http.StatusOK, list)
|
||||
@@ -108,8 +109,8 @@ func (n *networkRouter) getNetwork(ctx context.Context, w http.ResponseWriter, r
|
||||
|
||||
// For full name and partial ID, save the result first, and process later
|
||||
// in case multiple records was found based on the same term
|
||||
listByFullName := map[string]network.Inspect{}
|
||||
listByPartialID := map[string]network.Inspect{}
|
||||
listByFullName := map[string]types.NetworkResource{}
|
||||
listByPartialID := map[string]types.NetworkResource{}
|
||||
|
||||
// TODO(@cpuguy83): All this logic for figuring out which network to return does not belong here
|
||||
// Instead there should be a backend function to just get one network.
|
||||
@@ -203,7 +204,7 @@ func (n *networkRouter) postNetworkCreate(ctx context.Context, w http.ResponseWr
|
||||
return err
|
||||
}
|
||||
|
||||
var create network.CreateRequest
|
||||
var create types.NetworkCreateRequest
|
||||
if err := httputils.ReadJSON(r, &create); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -225,7 +226,7 @@ func (n *networkRouter) postNetworkCreate(ctx context.Context, w http.ResponseWr
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nw = &network.CreateResponse{
|
||||
nw = &types.NetworkCreateResponse{
|
||||
ID: id,
|
||||
}
|
||||
}
|
||||
@@ -238,7 +239,7 @@ func (n *networkRouter) postNetworkConnect(ctx context.Context, w http.ResponseW
|
||||
return err
|
||||
}
|
||||
|
||||
var connect network.ConnectOptions
|
||||
var connect types.NetworkConnect
|
||||
if err := httputils.ReadJSON(r, &connect); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -247,7 +248,7 @@ func (n *networkRouter) postNetworkConnect(ctx context.Context, w http.ResponseW
|
||||
// The reason is that, In case of attachable network in swarm scope, the actual local network
|
||||
// may not be available at the time. At the same time, inside daemon `ConnectContainerToNetwork`
|
||||
// does the ambiguity check anyway. Therefore, passing the name to daemon would be enough.
|
||||
return n.backend.ConnectContainerToNetwork(ctx, connect.Container, vars["id"], connect.EndpointConfig)
|
||||
return n.backend.ConnectContainerToNetwork(connect.Container, vars["id"], connect.EndpointConfig)
|
||||
}
|
||||
|
||||
func (n *networkRouter) postNetworkDisconnect(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
@@ -255,7 +256,7 @@ func (n *networkRouter) postNetworkDisconnect(ctx context.Context, w http.Respon
|
||||
return err
|
||||
}
|
||||
|
||||
var disconnect network.DisconnectOptions
|
||||
var disconnect types.NetworkDisconnect
|
||||
if err := httputils.ReadJSON(r, &disconnect); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -310,9 +311,9 @@ func (n *networkRouter) postNetworksPrune(ctx context.Context, w http.ResponseWr
|
||||
// For full name and partial ID, save the result first, and process later
|
||||
// in case multiple records was found based on the same term
|
||||
// TODO (yongtang): should we wrap with version here for backward compatibility?
|
||||
func (n *networkRouter) findUniqueNetwork(term string) (network.Inspect, error) {
|
||||
listByFullName := map[string]network.Inspect{}
|
||||
listByPartialID := map[string]network.Inspect{}
|
||||
func (n *networkRouter) findUniqueNetwork(term string) (types.NetworkResource, error) {
|
||||
listByFullName := map[string]types.NetworkResource{}
|
||||
listByPartialID := map[string]types.NetworkResource{}
|
||||
|
||||
filter := filters.NewArgs(filters.Arg("idOrName", term))
|
||||
networks, _ := n.backend.GetNetworks(filter, backend.NetworkListConfig{Detailed: true})
|
||||
@@ -362,7 +363,7 @@ func (n *networkRouter) findUniqueNetwork(term string) (network.Inspect, error)
|
||||
}
|
||||
}
|
||||
if len(listByFullName) > 1 {
|
||||
return network.Inspect{}, errdefs.InvalidParameter(errors.Errorf("network %s is ambiguous (%d matches found based on name)", term, len(listByFullName)))
|
||||
return types.NetworkResource{}, errdefs.InvalidParameter(errors.Errorf("network %s is ambiguous (%d matches found based on name)", term, len(listByFullName)))
|
||||
}
|
||||
|
||||
// Find based on partial ID, returns true only if no duplicates
|
||||
@@ -372,8 +373,8 @@ func (n *networkRouter) findUniqueNetwork(term string) (network.Inspect, error)
|
||||
}
|
||||
}
|
||||
if len(listByPartialID) > 1 {
|
||||
return network.Inspect{}, errdefs.InvalidParameter(errors.Errorf("network %s is ambiguous (%d matches found based on ID prefix)", term, len(listByPartialID)))
|
||||
return types.NetworkResource{}, errdefs.InvalidParameter(errors.Errorf("network %s is ambiguous (%d matches found based on ID prefix)", term, len(listByPartialID)))
|
||||
}
|
||||
|
||||
return network.Inspect{}, errdefs.NotFound(libnetwork.ErrNoSuchNetwork(term))
|
||||
return types.NetworkResource{}, errdefs.NotFound(libnetwork.ErrNoSuchNetwork(term))
|
||||
}
|
||||
|
||||
@@ -78,16 +78,6 @@ func adjustForAPIVersion(cliVersion string, service *swarm.ServiceSpec) {
|
||||
if cliVersion == "" {
|
||||
return
|
||||
}
|
||||
if versions.LessThan(cliVersion, "1.46") {
|
||||
if service.TaskTemplate.ContainerSpec != nil {
|
||||
for i, mount := range service.TaskTemplate.ContainerSpec.Mounts {
|
||||
if mount.TmpfsOptions != nil {
|
||||
mount.TmpfsOptions.Options = nil
|
||||
service.TaskTemplate.ContainerSpec.Mounts[i] = mount
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if versions.LessThan(cliVersion, "1.40") {
|
||||
if service.TaskTemplate.ContainerSpec != nil {
|
||||
// Sysctls for docker swarm services weren't supported before
|
||||
@@ -144,12 +134,4 @@ func adjustForAPIVersion(cliVersion string, service *swarm.ServiceSpec) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if versions.LessThan(cliVersion, "1.46") {
|
||||
if service.TaskTemplate.ContainerSpec != nil && service.TaskTemplate.ContainerSpec.OomScoreAdj != 0 {
|
||||
// OomScoreAdj was added in API 1.46
|
||||
service.TaskTemplate.ContainerSpec.OomScoreAdj = 0
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -4,9 +4,8 @@ import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/mount"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"github.com/docker/go-units"
|
||||
)
|
||||
|
||||
func TestAdjustForAPIVersion(t *testing.T) {
|
||||
@@ -39,25 +38,13 @@ func TestAdjustForAPIVersion(t *testing.T) {
|
||||
ConfigName: "configRuntime",
|
||||
},
|
||||
},
|
||||
Ulimits: []*container.Ulimit{
|
||||
Ulimits: []*units.Ulimit{
|
||||
{
|
||||
Name: "nofile",
|
||||
Soft: 100,
|
||||
Hard: 200,
|
||||
},
|
||||
},
|
||||
Mounts: []mount.Mount{
|
||||
{
|
||||
Type: mount.TypeTmpfs,
|
||||
Source: "/foo",
|
||||
Target: "/bar",
|
||||
TmpfsOptions: &mount.TmpfsOptions{
|
||||
Options: [][]string{
|
||||
[]string{"exec"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Placement: &swarm.Placement{
|
||||
MaxReplicas: 222,
|
||||
@@ -70,19 +57,6 @@ func TestAdjustForAPIVersion(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
adjustForAPIVersion("1.46", spec)
|
||||
if !reflect.DeepEqual(
|
||||
spec.TaskTemplate.ContainerSpec.Mounts[0].TmpfsOptions.Options,
|
||||
[][]string{[]string{"exec"}},
|
||||
) {
|
||||
t.Error("TmpfsOptions.Options was stripped from spec")
|
||||
}
|
||||
|
||||
adjustForAPIVersion("1.45", spec)
|
||||
if len(spec.TaskTemplate.ContainerSpec.Mounts[0].TmpfsOptions.Options) != 0 {
|
||||
t.Error("TmpfsOptions.Options not stripped from spec")
|
||||
}
|
||||
|
||||
// first, does calling this with a later version correctly NOT strip
|
||||
// fields? do the later version first, so we can reuse this spec in the
|
||||
// next test.
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
|
||||
//go:build go1.21
|
||||
//go:build go1.19
|
||||
|
||||
package system // import "github.com/docker/docker/api/server/router/system"
|
||||
|
||||
|
||||
@@ -97,10 +97,6 @@ func (s *systemRouter) getInfo(ctx context.Context, w http.ResponseWriter, r *ht
|
||||
info.Runtimes[k] = system.RuntimeWithStatus{Runtime: rt.Runtime}
|
||||
}
|
||||
}
|
||||
if versions.LessThan(version, "1.46") {
|
||||
// Containerd field introduced in API v1.46.
|
||||
info.Containerd = nil
|
||||
}
|
||||
if versions.GreaterThanOrEqualTo(version, "1.42") {
|
||||
info.KernelMemory = false
|
||||
}
|
||||
@@ -277,18 +273,7 @@ func (s *systemRouter) getEvents(ctx context.Context, w http.ResponseWriter, r *
|
||||
buffered, l := s.backend.SubscribeToEvents(since, until, ef)
|
||||
defer s.backend.UnsubscribeFromEvents(l)
|
||||
|
||||
shouldSkip := func(ev events.Message) bool { return false }
|
||||
if versions.LessThan(httputils.VersionFromContext(ctx), "1.46") {
|
||||
// Image create events were added in API 1.46
|
||||
shouldSkip = func(ev events.Message) bool {
|
||||
return ev.Type == "image" && ev.Action == "create"
|
||||
}
|
||||
}
|
||||
|
||||
for _, ev := range buffered {
|
||||
if shouldSkip(ev) {
|
||||
continue
|
||||
}
|
||||
if err := enc.Encode(ev); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -306,9 +291,6 @@ func (s *systemRouter) getEvents(ctx context.Context, w http.ResponseWriter, r *
|
||||
log.G(ctx).Warnf("unexpected event message: %q", ev)
|
||||
continue
|
||||
}
|
||||
if shouldSkip(jev) {
|
||||
continue
|
||||
}
|
||||
if err := enc.Encode(jev); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -3,9 +3,11 @@ package volume // import "github.com/docker/docker/api/server/router/volume"
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/docker/docker/volume/service/opts"
|
||||
// TODO return types need to be refactored into pkg
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/volume"
|
||||
"github.com/docker/docker/volume/service/opts"
|
||||
)
|
||||
|
||||
// Backend is the methods that need to be implemented to provide
|
||||
@@ -15,7 +17,7 @@ type Backend interface {
|
||||
Get(ctx context.Context, name string, opts ...opts.GetOption) (*volume.Volume, error)
|
||||
Create(ctx context.Context, name, driverName string, opts ...opts.CreateOption) (*volume.Volume, error)
|
||||
Remove(ctx context.Context, name string, opts ...opts.RemoveOption) error
|
||||
Prune(ctx context.Context, pruneFilters filters.Args) (*volume.PruneReport, error)
|
||||
Prune(ctx context.Context, pruneFilters filters.Args) (*types.VolumesPruneReport, error)
|
||||
}
|
||||
|
||||
// ClusterBackend is the backend used for Swarm Cluster Volumes. Regular
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"gotest.tools/v3/assert"
|
||||
|
||||
"github.com/docker/docker/api/server/httputils"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/volume"
|
||||
"github.com/docker/docker/errdefs"
|
||||
@@ -635,7 +636,7 @@ func (b *fakeVolumeBackend) Remove(_ context.Context, name string, o ...opts.Rem
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *fakeVolumeBackend) Prune(_ context.Context, _ filters.Args) (*volume.PruneReport, error) {
|
||||
func (b *fakeVolumeBackend) Prune(_ context.Context, _ filters.Args) (*types.VolumesPruneReport, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
||||
478
api/swagger.yaml
478
api/swagger.yaml
@@ -19,10 +19,10 @@ produces:
|
||||
consumes:
|
||||
- "application/json"
|
||||
- "text/plain"
|
||||
basePath: "/v1.46"
|
||||
basePath: "/v1.45"
|
||||
info:
|
||||
title: "Docker Engine API"
|
||||
version: "1.46"
|
||||
version: "1.45"
|
||||
x-logo:
|
||||
url: "https://docs.docker.com/assets/images/logo-docker-main.png"
|
||||
description: |
|
||||
@@ -55,8 +55,8 @@ info:
|
||||
the URL is not supported by the daemon, a HTTP `400 Bad Request` error message
|
||||
is returned.
|
||||
|
||||
If you omit the version-prefix, the current version of the API (v1.46) is used.
|
||||
For example, calling `/info` is the same as calling `/v1.46/info`. Using the
|
||||
If you omit the version-prefix, the current version of the API (v1.45) is used.
|
||||
For example, calling `/info` is the same as calling `/v1.45/info`. Using the
|
||||
API without a version-prefix is deprecated and will be removed in a future release.
|
||||
|
||||
Engine releases in the near future should support this version of the API,
|
||||
@@ -442,21 +442,6 @@ definitions:
|
||||
Mode:
|
||||
description: "The permission mode for the tmpfs mount in an integer."
|
||||
type: "integer"
|
||||
Options:
|
||||
description: |
|
||||
The options to be passed to the tmpfs mount. An array of arrays.
|
||||
Flag options should be provided as 1-length arrays. Other types
|
||||
should be provided as as 2-length arrays, where the first item is
|
||||
the key and the second the value.
|
||||
type: "array"
|
||||
items:
|
||||
type: "array"
|
||||
minItems: 1
|
||||
maxItems: 2
|
||||
items:
|
||||
type: "string"
|
||||
example:
|
||||
[["noexec"]]
|
||||
|
||||
RestartPolicy:
|
||||
description: |
|
||||
@@ -1371,289 +1356,6 @@ definitions:
|
||||
type: "string"
|
||||
example: ["/bin/sh", "-c"]
|
||||
|
||||
ImageConfig:
|
||||
description: |
|
||||
Configuration of the image. These fields are used as defaults
|
||||
when starting a container from the image.
|
||||
type: "object"
|
||||
properties:
|
||||
Hostname:
|
||||
description: |
|
||||
The hostname to use for the container, as a valid RFC 1123 hostname.
|
||||
|
||||
<p><br /></p>
|
||||
|
||||
> **Deprecated**: this field is not part of the image specification and is
|
||||
> always empty. It must not be used, and will be removed in API v1.47.
|
||||
type: "string"
|
||||
example: ""
|
||||
Domainname:
|
||||
description: |
|
||||
The domain name to use for the container.
|
||||
|
||||
<p><br /></p>
|
||||
|
||||
> **Deprecated**: this field is not part of the image specification and is
|
||||
> always empty. It must not be used, and will be removed in API v1.47.
|
||||
type: "string"
|
||||
example: ""
|
||||
User:
|
||||
description: "The user that commands are run as inside the container."
|
||||
type: "string"
|
||||
example: "web:web"
|
||||
AttachStdin:
|
||||
description: |
|
||||
Whether to attach to `stdin`.
|
||||
|
||||
<p><br /></p>
|
||||
|
||||
> **Deprecated**: this field is not part of the image specification and is
|
||||
> always false. It must not be used, and will be removed in API v1.47.
|
||||
type: "boolean"
|
||||
default: false
|
||||
example: false
|
||||
AttachStdout:
|
||||
description: |
|
||||
Whether to attach to `stdout`.
|
||||
|
||||
<p><br /></p>
|
||||
|
||||
> **Deprecated**: this field is not part of the image specification and is
|
||||
> always false. It must not be used, and will be removed in API v1.47.
|
||||
type: "boolean"
|
||||
default: false
|
||||
example: false
|
||||
AttachStderr:
|
||||
description: |
|
||||
Whether to attach to `stderr`.
|
||||
|
||||
<p><br /></p>
|
||||
|
||||
> **Deprecated**: this field is not part of the image specification and is
|
||||
> always false. It must not be used, and will be removed in API v1.47.
|
||||
type: "boolean"
|
||||
default: false
|
||||
example: false
|
||||
ExposedPorts:
|
||||
description: |
|
||||
An object mapping ports to an empty object in the form:
|
||||
|
||||
`{"<port>/<tcp|udp|sctp>": {}}`
|
||||
type: "object"
|
||||
x-nullable: true
|
||||
additionalProperties:
|
||||
type: "object"
|
||||
enum:
|
||||
- {}
|
||||
default: {}
|
||||
example: {
|
||||
"80/tcp": {},
|
||||
"443/tcp": {}
|
||||
}
|
||||
Tty:
|
||||
description: |
|
||||
Attach standard streams to a TTY, including `stdin` if it is not closed.
|
||||
|
||||
<p><br /></p>
|
||||
|
||||
> **Deprecated**: this field is not part of the image specification and is
|
||||
> always false. It must not be used, and will be removed in API v1.47.
|
||||
type: "boolean"
|
||||
default: false
|
||||
example: false
|
||||
OpenStdin:
|
||||
description: |
|
||||
Open `stdin`
|
||||
|
||||
<p><br /></p>
|
||||
|
||||
> **Deprecated**: this field is not part of the image specification and is
|
||||
> always false. It must not be used, and will be removed in API v1.47.
|
||||
type: "boolean"
|
||||
default: false
|
||||
example: false
|
||||
StdinOnce:
|
||||
description: |
|
||||
Close `stdin` after one attached client disconnects.
|
||||
|
||||
<p><br /></p>
|
||||
|
||||
> **Deprecated**: this field is not part of the image specification and is
|
||||
> always false. It must not be used, and will be removed in API v1.47.
|
||||
type: "boolean"
|
||||
default: false
|
||||
example: false
|
||||
Env:
|
||||
description: |
|
||||
A list of environment variables to set inside the container in the
|
||||
form `["VAR=value", ...]`. A variable without `=` is removed from the
|
||||
environment, rather than to have an empty value.
|
||||
type: "array"
|
||||
items:
|
||||
type: "string"
|
||||
example:
|
||||
- "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
|
||||
Cmd:
|
||||
description: |
|
||||
Command to run specified as a string or an array of strings.
|
||||
type: "array"
|
||||
items:
|
||||
type: "string"
|
||||
example: ["/bin/sh"]
|
||||
Healthcheck:
|
||||
$ref: "#/definitions/HealthConfig"
|
||||
ArgsEscaped:
|
||||
description: "Command is already escaped (Windows only)"
|
||||
type: "boolean"
|
||||
default: false
|
||||
example: false
|
||||
x-nullable: true
|
||||
Image:
|
||||
description: |
|
||||
The name (or reference) of the image to use when creating the container,
|
||||
or which was used when the container was created.
|
||||
|
||||
<p><br /></p>
|
||||
|
||||
> **Deprecated**: this field is not part of the image specification and is
|
||||
> always empty. It must not be used, and will be removed in API v1.47.
|
||||
type: "string"
|
||||
default: ""
|
||||
example: ""
|
||||
Volumes:
|
||||
description: |
|
||||
An object mapping mount point paths inside the container to empty
|
||||
objects.
|
||||
type: "object"
|
||||
additionalProperties:
|
||||
type: "object"
|
||||
enum:
|
||||
- {}
|
||||
default: {}
|
||||
example:
|
||||
"/app/data": {}
|
||||
"/app/config": {}
|
||||
WorkingDir:
|
||||
description: "The working directory for commands to run in."
|
||||
type: "string"
|
||||
example: "/public/"
|
||||
Entrypoint:
|
||||
description: |
|
||||
The entry point for the container as a string or an array of strings.
|
||||
|
||||
If the array consists of exactly one empty string (`[""]`) then the
|
||||
entry point is reset to system default (i.e., the entry point used by
|
||||
docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`).
|
||||
type: "array"
|
||||
items:
|
||||
type: "string"
|
||||
example: []
|
||||
NetworkDisabled:
|
||||
description: |
|
||||
Disable networking for the container.
|
||||
|
||||
<p><br /></p>
|
||||
|
||||
> **Deprecated**: this field is not part of the image specification and is
|
||||
> always omitted. It must not be used, and will be removed in API v1.47.
|
||||
type: "boolean"
|
||||
default: false
|
||||
example: false
|
||||
x-nullable: true
|
||||
MacAddress:
|
||||
description: |
|
||||
MAC address of the container.
|
||||
|
||||
<p><br /></p>
|
||||
|
||||
> **Deprecated**: this field is not part of the image specification and is
|
||||
> always omitted. It must not be used, and will be removed in API v1.47.
|
||||
type: "string"
|
||||
default: ""
|
||||
example: ""
|
||||
x-nullable: true
|
||||
OnBuild:
|
||||
description: |
|
||||
`ONBUILD` metadata that were defined in the image's `Dockerfile`.
|
||||
type: "array"
|
||||
x-nullable: true
|
||||
items:
|
||||
type: "string"
|
||||
example: []
|
||||
Labels:
|
||||
description: "User-defined key/value metadata."
|
||||
type: "object"
|
||||
additionalProperties:
|
||||
type: "string"
|
||||
example:
|
||||
com.example.some-label: "some-value"
|
||||
com.example.some-other-label: "some-other-value"
|
||||
StopSignal:
|
||||
description: |
|
||||
Signal to stop a container as a string or unsigned integer.
|
||||
type: "string"
|
||||
example: "SIGTERM"
|
||||
x-nullable: true
|
||||
StopTimeout:
|
||||
description: |
|
||||
Timeout to stop a container in seconds.
|
||||
|
||||
<p><br /></p>
|
||||
|
||||
> **Deprecated**: this field is not part of the image specification and is
|
||||
> always omitted. It must not be used, and will be removed in API v1.47.
|
||||
type: "integer"
|
||||
default: 10
|
||||
x-nullable: true
|
||||
Shell:
|
||||
description: |
|
||||
Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell.
|
||||
type: "array"
|
||||
x-nullable: true
|
||||
items:
|
||||
type: "string"
|
||||
example: ["/bin/sh", "-c"]
|
||||
# FIXME(thaJeztah): temporarily using a full example to remove some "omitempty" fields. Remove once the fields are removed.
|
||||
example:
|
||||
"Hostname": ""
|
||||
"Domainname": ""
|
||||
"User": "web:web"
|
||||
"AttachStdin": false
|
||||
"AttachStdout": false
|
||||
"AttachStderr": false
|
||||
"ExposedPorts": {
|
||||
"80/tcp": {},
|
||||
"443/tcp": {}
|
||||
}
|
||||
"Tty": false
|
||||
"OpenStdin": false
|
||||
"StdinOnce": false
|
||||
"Env": ["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"]
|
||||
"Cmd": ["/bin/sh"]
|
||||
"Healthcheck": {
|
||||
"Test": ["string"],
|
||||
"Interval": 0,
|
||||
"Timeout": 0,
|
||||
"Retries": 0,
|
||||
"StartPeriod": 0,
|
||||
"StartInterval": 0
|
||||
}
|
||||
"ArgsEscaped": true
|
||||
"Image": ""
|
||||
"Volumes": {
|
||||
"/app/data": {},
|
||||
"/app/config": {}
|
||||
}
|
||||
"WorkingDir": "/public/"
|
||||
"Entrypoint": []
|
||||
"OnBuild": []
|
||||
"Labels": {
|
||||
"com.example.some-label": "some-value",
|
||||
"com.example.some-other-label": "some-other-value"
|
||||
}
|
||||
"StopSignal": "SIGTERM"
|
||||
"Shell": ["/bin/sh", "-c"]
|
||||
|
||||
NetworkingConfig:
|
||||
description: |
|
||||
NetworkingConfig represents the container's networking configuration for
|
||||
@@ -2056,7 +1758,7 @@ definitions:
|
||||
Depending on how the image was created, this field may be empty.
|
||||
type: "string"
|
||||
x-nullable: false
|
||||
example: "27.0.1"
|
||||
example: "20.10.7"
|
||||
Author:
|
||||
description: |
|
||||
Name of the author that was specified when committing the image, or as
|
||||
@@ -2065,7 +1767,7 @@ definitions:
|
||||
x-nullable: false
|
||||
example: ""
|
||||
Config:
|
||||
$ref: "#/definitions/ImageConfig"
|
||||
$ref: "#/definitions/ContainerConfig"
|
||||
Architecture:
|
||||
description: |
|
||||
Hardware CPU architecture that the image runs on.
|
||||
@@ -2142,7 +1844,6 @@ definitions:
|
||||
format: "dateTime"
|
||||
example: "2022-02-28T14:40:02.623929178Z"
|
||||
x-nullable: true
|
||||
|
||||
ImageSummary:
|
||||
type: "object"
|
||||
x-go-name: "Summary"
|
||||
@@ -2657,24 +2358,6 @@ definitions:
|
||||
type: "string"
|
||||
example: "10.133.77.91"
|
||||
|
||||
NetworkCreateResponse:
|
||||
description: "OK response to NetworkCreate operation"
|
||||
type: "object"
|
||||
title: "NetworkCreateResponse"
|
||||
x-go-name: "CreateResponse"
|
||||
required: [Id, Warning]
|
||||
properties:
|
||||
Id:
|
||||
description: "The ID of the created network."
|
||||
type: "string"
|
||||
x-nullable: false
|
||||
example: "b5c4fc71e8022147cd25de22b22173de4e3b170134117172eb595cb91b4e7e5d"
|
||||
Warning:
|
||||
description: "Warnings encountered when creating the container"
|
||||
type: "string"
|
||||
x-nullable: false
|
||||
example: ""
|
||||
|
||||
BuildInfo:
|
||||
type: "object"
|
||||
properties:
|
||||
@@ -2874,17 +2557,6 @@ definitions:
|
||||
example:
|
||||
- "server_x"
|
||||
- "server_y"
|
||||
DriverOpts:
|
||||
description: |
|
||||
DriverOpts is a mapping of driver options and values. These options
|
||||
are passed directly to the driver and are driver specific.
|
||||
type: "object"
|
||||
x-nullable: true
|
||||
additionalProperties:
|
||||
type: "string"
|
||||
example:
|
||||
com.example.some-label: "some-value"
|
||||
com.example.some-other-label: "some-other-value"
|
||||
|
||||
# Operational data
|
||||
NetworkID:
|
||||
@@ -2928,6 +2600,17 @@ definitions:
|
||||
type: "integer"
|
||||
format: "int64"
|
||||
example: 64
|
||||
DriverOpts:
|
||||
description: |
|
||||
DriverOpts is a mapping of driver options and values. These options
|
||||
are passed directly to the driver and are driver specific.
|
||||
type: "object"
|
||||
x-nullable: true
|
||||
additionalProperties:
|
||||
type: "string"
|
||||
example:
|
||||
com.example.some-label: "some-value"
|
||||
com.example.some-other-label: "some-other-value"
|
||||
DNSNames:
|
||||
description: |
|
||||
List of all DNS names an endpoint has on a specific network. This
|
||||
@@ -4099,13 +3782,6 @@ definitions:
|
||||
but this is just provided for lookup/display purposes. The
|
||||
secret in the reference will be identified by its ID.
|
||||
type: "string"
|
||||
OomScoreAdj:
|
||||
type: "integer"
|
||||
format: "int64"
|
||||
description: |
|
||||
An integer value containing the score given to the container in
|
||||
order to tune OOM killer preferences.
|
||||
example: 0
|
||||
Configs:
|
||||
description: |
|
||||
Configs contains references to zero or more configs that will be
|
||||
@@ -4302,7 +3978,7 @@ definitions:
|
||||
`node.platform.os` | Node operating system | `node.platform.os==windows`
|
||||
`node.platform.arch` | Node architecture | `node.platform.arch==x86_64`
|
||||
`node.labels` | User-defined node labels | `node.labels.security==high`
|
||||
`engine.labels` | Docker Engine's labels | `engine.labels.operatingsystem==ubuntu-24.04`
|
||||
`engine.labels` | Docker Engine's labels | `engine.labels.operatingsystem==ubuntu-14.04`
|
||||
|
||||
`engine.labels` apply to Docker Engine labels like operating system,
|
||||
drivers, etc. Swarm administrators add `node.labels` for operational
|
||||
@@ -5025,12 +4701,6 @@ definitions:
|
||||
properties:
|
||||
NetworkMode:
|
||||
type: "string"
|
||||
Annotations:
|
||||
description: "Arbitrary key-value metadata attached to container"
|
||||
type: "object"
|
||||
x-nullable: true
|
||||
additionalProperties:
|
||||
type: "string"
|
||||
NetworkSettings:
|
||||
description: "A summary of the container's network settings"
|
||||
type: "object"
|
||||
@@ -5299,7 +4969,7 @@ definitions:
|
||||
Version of the component
|
||||
type: "string"
|
||||
x-nullable: false
|
||||
example: "27.0.1"
|
||||
example: "19.03.12"
|
||||
Details:
|
||||
description: |
|
||||
Key/value pairs of strings with additional information about the
|
||||
@@ -5313,17 +4983,17 @@ definitions:
|
||||
Version:
|
||||
description: "The version of the daemon"
|
||||
type: "string"
|
||||
example: "27.0.1"
|
||||
example: "19.03.12"
|
||||
ApiVersion:
|
||||
description: |
|
||||
The default (and highest) API version that is supported by the daemon
|
||||
type: "string"
|
||||
example: "1.46"
|
||||
example: "1.40"
|
||||
MinAPIVersion:
|
||||
description: |
|
||||
The minimum API version that is supported by the daemon
|
||||
type: "string"
|
||||
example: "1.24"
|
||||
example: "1.12"
|
||||
GitCommit:
|
||||
description: |
|
||||
The Git commit of the source code that was used to build the daemon
|
||||
@@ -5334,7 +5004,7 @@ definitions:
|
||||
The version Go used to compile the daemon, and the version of the Go
|
||||
runtime in use.
|
||||
type: "string"
|
||||
example: "go1.21.11"
|
||||
example: "go1.22.7"
|
||||
Os:
|
||||
description: |
|
||||
The operating system that the daemon is running on ("linux" or "windows")
|
||||
@@ -5351,7 +5021,7 @@ definitions:
|
||||
|
||||
This field is omitted when empty.
|
||||
type: "string"
|
||||
example: "6.8.0-31-generic"
|
||||
example: "4.19.76-linuxkit"
|
||||
Experimental:
|
||||
description: |
|
||||
Indicates if the daemon is started with experimental features enabled.
|
||||
@@ -5557,13 +5227,13 @@ definitions:
|
||||
information is queried from the <kbd>HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\</kbd>
|
||||
registry value, for example _"10.0 14393 (14393.1198.amd64fre.rs1_release_sec.170427-1353)"_.
|
||||
type: "string"
|
||||
example: "6.8.0-31-generic"
|
||||
example: "4.9.38-moby"
|
||||
OperatingSystem:
|
||||
description: |
|
||||
Name of the host's operating system, for example: "Ubuntu 24.04 LTS"
|
||||
Name of the host's operating system, for example: "Ubuntu 16.04.2 LTS"
|
||||
or "Windows Server 2016 Datacenter"
|
||||
type: "string"
|
||||
example: "Ubuntu 24.04 LTS"
|
||||
example: "Alpine Linux v3.5"
|
||||
OSVersion:
|
||||
description: |
|
||||
Version of the host's operating system
|
||||
@@ -5574,7 +5244,7 @@ definitions:
|
||||
> very existence, and the formatting of values, should not be considered
|
||||
> stable, and may change without notice.
|
||||
type: "string"
|
||||
example: "24.04"
|
||||
example: "16.04"
|
||||
OSType:
|
||||
description: |
|
||||
Generic type of the operating system of the host, as returned by the
|
||||
@@ -5676,7 +5346,7 @@ definitions:
|
||||
description: |
|
||||
Version string of the daemon.
|
||||
type: "string"
|
||||
example: "27.0.1"
|
||||
example: "24.0.2"
|
||||
Runtimes:
|
||||
description: |
|
||||
List of [OCI compliant](https://github.com/opencontainers/runtime-spec)
|
||||
@@ -5828,58 +5498,6 @@ definitions:
|
||||
example:
|
||||
- "/etc/cdi"
|
||||
- "/var/run/cdi"
|
||||
Containerd:
|
||||
$ref: "#/definitions/ContainerdInfo"
|
||||
x-nullable: true
|
||||
|
||||
ContainerdInfo:
|
||||
description: |
|
||||
Information for connecting to the containerd instance that is used by the daemon.
|
||||
This is included for debugging purposes only.
|
||||
type: "object"
|
||||
properties:
|
||||
Address:
|
||||
description: "The address of the containerd socket."
|
||||
type: "string"
|
||||
example: "/run/containerd/containerd.sock"
|
||||
Namespaces:
|
||||
description: |
|
||||
The namespaces that the daemon uses for running containers and
|
||||
plugins in containerd. These namespaces can be configured in the
|
||||
daemon configuration, and are considered to be used exclusively
|
||||
by the daemon, Tampering with the containerd instance may cause
|
||||
unexpected behavior.
|
||||
|
||||
As these namespaces are considered to be exclusively accessed
|
||||
by the daemon, it is not recommended to change these values,
|
||||
or to change them to a value that is used by other systems,
|
||||
such as cri-containerd.
|
||||
type: "object"
|
||||
properties:
|
||||
Containers:
|
||||
description: |
|
||||
The default containerd namespace used for containers managed
|
||||
by the daemon.
|
||||
|
||||
The default namespace for containers is "moby", but will be
|
||||
suffixed with the `<uid>.<gid>` of the remapped `root` if
|
||||
user-namespaces are enabled and the containerd image-store
|
||||
is used.
|
||||
type: "string"
|
||||
default: "moby"
|
||||
example: "moby"
|
||||
Plugins:
|
||||
description: |
|
||||
The default containerd namespace used for plugins managed by
|
||||
the daemon.
|
||||
|
||||
The default namespace for plugins is "plugins.moby", but will be
|
||||
suffixed with the `<uid>.<gid>` of the remapped `root` if
|
||||
user-namespaces are enabled and the containerd image-store
|
||||
is used.
|
||||
type: "string"
|
||||
default: "plugins.moby"
|
||||
example: "plugins.moby"
|
||||
|
||||
# PluginsInfo is a temp struct holding Plugins name
|
||||
# registered with docker daemon. It is used by Info struct
|
||||
@@ -6732,8 +6350,6 @@ paths:
|
||||
SizeRootFs: 0
|
||||
HostConfig:
|
||||
NetworkMode: "default"
|
||||
Annotations:
|
||||
io.kubernetes.docker.type: "container"
|
||||
NetworkSettings:
|
||||
Networks:
|
||||
bridge:
|
||||
@@ -6769,9 +6385,6 @@ paths:
|
||||
SizeRootFs: 0
|
||||
HostConfig:
|
||||
NetworkMode: "default"
|
||||
Annotations:
|
||||
io.kubernetes.docker.type: "container"
|
||||
io.kubernetes.sandbox.id: "3befe639bed0fd6afdd65fd1fa84506756f59360ec4adc270b0fdac9be22b4d3"
|
||||
NetworkSettings:
|
||||
Networks:
|
||||
bridge:
|
||||
@@ -6800,9 +6413,6 @@ paths:
|
||||
SizeRootFs: 0
|
||||
HostConfig:
|
||||
NetworkMode: "default"
|
||||
Annotations:
|
||||
io.kubernetes.image.id: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82"
|
||||
io.kubernetes.image.name: "ubuntu:latest"
|
||||
NetworkSettings:
|
||||
Networks:
|
||||
bridge:
|
||||
@@ -6831,8 +6441,6 @@ paths:
|
||||
SizeRootFs: 0
|
||||
HostConfig:
|
||||
NetworkMode: "default"
|
||||
Annotations:
|
||||
io.kubernetes.config.source: "api"
|
||||
NetworkSettings:
|
||||
Networks:
|
||||
bridge:
|
||||
@@ -9110,11 +8718,6 @@ paths:
|
||||
details.
|
||||
type: "string"
|
||||
required: true
|
||||
- name: "platform"
|
||||
in: "query"
|
||||
description: "Select a platform-specific manifest to be pushed. OCI platform (JSON encoded)"
|
||||
type: "string"
|
||||
x-nullable: true
|
||||
tags: ["Image"]
|
||||
/images/{name}/tag:
|
||||
post:
|
||||
@@ -9563,7 +9166,7 @@ paths:
|
||||
|
||||
Containers report these events: `attach`, `commit`, `copy`, `create`, `destroy`, `detach`, `die`, `exec_create`, `exec_detach`, `exec_start`, `exec_die`, `export`, `health_status`, `kill`, `oom`, `pause`, `rename`, `resize`, `restart`, `start`, `stop`, `top`, `unpause`, `update`, and `prune`
|
||||
|
||||
Images report these events: `create, `delete`, `import`, `load`, `pull`, `push`, `save`, `tag`, `untag`, and `prune`
|
||||
Images report these events: `delete`, `import`, `load`, `pull`, `push`, `save`, `tag`, `untag`, and `prune`
|
||||
|
||||
Volumes report these events: `create`, `mount`, `unmount`, `destroy`, and `prune`
|
||||
|
||||
@@ -10519,9 +10122,19 @@ paths:
|
||||
- "application/json"
|
||||
responses:
|
||||
201:
|
||||
description: "Network created successfully"
|
||||
description: "No error"
|
||||
schema:
|
||||
$ref: "#/definitions/NetworkCreateResponse"
|
||||
type: "object"
|
||||
title: "NetworkCreateResponse"
|
||||
properties:
|
||||
Id:
|
||||
description: "The ID of the created network."
|
||||
type: "string"
|
||||
Warning:
|
||||
type: "string"
|
||||
example:
|
||||
Id: "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30"
|
||||
Warning: ""
|
||||
400:
|
||||
description: "bad parameter"
|
||||
schema:
|
||||
@@ -10554,6 +10167,11 @@ paths:
|
||||
description: "The network's name."
|
||||
type: "string"
|
||||
example: "my_network"
|
||||
CheckDuplicate:
|
||||
description: |
|
||||
Deprecated: CheckDuplicate is now always enabled.
|
||||
type: "boolean"
|
||||
example: true
|
||||
Driver:
|
||||
description: "Name of the network driver plugin to use."
|
||||
type: "string"
|
||||
@@ -11726,7 +11344,6 @@ paths:
|
||||
Mode: 384
|
||||
SecretID: "fpjqlhnwb19zds35k8wn80lq9"
|
||||
SecretName: "example_org_domain_key"
|
||||
OomScoreAdj: 0
|
||||
LogDriver:
|
||||
Name: "json-file"
|
||||
Options:
|
||||
@@ -11879,7 +11496,6 @@ paths:
|
||||
Image: "busybox"
|
||||
Args:
|
||||
- "top"
|
||||
OomScoreAdj: 0
|
||||
Resources:
|
||||
Limits: {}
|
||||
Reservations: {}
|
||||
|
||||
@@ -1,26 +0,0 @@
|
||||
package auxprogress
|
||||
|
||||
import (
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// ManifestPushedInsteadOfIndex is a note that is sent when a manifest is pushed
|
||||
// instead of an index. It is sent when the pushed image is an multi-platform
|
||||
// index, but the whole index couldn't be pushed.
|
||||
type ManifestPushedInsteadOfIndex struct {
|
||||
ManifestPushedInsteadOfIndex bool `json:"manifestPushedInsteadOfIndex"` // Always true
|
||||
|
||||
// OriginalIndex is the descriptor of the original image index.
|
||||
OriginalIndex ocispec.Descriptor `json:"originalIndex"`
|
||||
|
||||
// SelectedManifest is the descriptor of the manifest that was pushed instead.
|
||||
SelectedManifest ocispec.Descriptor `json:"selectedManifest"`
|
||||
}
|
||||
|
||||
// ContentMissing is a note that is sent when push fails because the content is missing.
|
||||
type ContentMissing struct {
|
||||
ContentMissing bool `json:"contentMissing"` // Always true
|
||||
|
||||
// Desc is the descriptor of the root object that was attempted to be pushed.
|
||||
Desc ocispec.Descriptor `json:"desc"`
|
||||
}
|
||||
@@ -30,7 +30,7 @@ type ContainerRmConfig struct {
|
||||
|
||||
// ContainerAttachConfig holds the streams to use when connecting to a container to view logs.
|
||||
type ContainerAttachConfig struct {
|
||||
GetStreams func(multiplexed bool, cancel func()) (io.ReadCloser, io.Writer, io.Writer, error)
|
||||
GetStreams func(multiplexed bool) (io.ReadCloser, io.Writer, io.Writer, error)
|
||||
UseStdin bool
|
||||
UseStdout bool
|
||||
UseStderr bool
|
||||
@@ -92,14 +92,6 @@ type ContainerStatsConfig struct {
|
||||
OutStream func() io.Writer
|
||||
}
|
||||
|
||||
// ExecStartConfig holds the options to start container's exec.
|
||||
type ExecStartConfig struct {
|
||||
Stdin io.Reader
|
||||
Stdout io.Writer
|
||||
Stderr io.Writer
|
||||
ConsoleSize *[2]uint `json:",omitempty"`
|
||||
}
|
||||
|
||||
// ExecInspect holds information about a running process started
|
||||
// with docker exec.
|
||||
type ExecInspect struct {
|
||||
|
||||
@@ -2,15 +2,43 @@ package types // import "github.com/docker/docker/api/types"
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"io"
|
||||
"net"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/registry"
|
||||
units "github.com/docker/go-units"
|
||||
)
|
||||
|
||||
// ContainerExecInspect holds information returned by exec inspect.
|
||||
type ContainerExecInspect struct {
|
||||
ExecID string `json:"ID"`
|
||||
ContainerID string
|
||||
Running bool
|
||||
ExitCode int
|
||||
Pid int
|
||||
}
|
||||
|
||||
// CopyToContainerOptions holds information
|
||||
// about files to copy into a container
|
||||
type CopyToContainerOptions struct {
|
||||
AllowOverwriteDirWithFile bool
|
||||
CopyUIDGID bool
|
||||
}
|
||||
|
||||
// EventsOptions holds parameters to filter events with.
|
||||
type EventsOptions struct {
|
||||
Since string
|
||||
Until string
|
||||
Filters filters.Args
|
||||
}
|
||||
|
||||
// NetworkListOptions holds parameters to filter the list of networks with.
|
||||
type NetworkListOptions struct {
|
||||
Filters filters.Args
|
||||
}
|
||||
|
||||
// NewHijackedResponse intializes a HijackedResponse type
|
||||
func NewHijackedResponse(conn net.Conn, mediaType string) HijackedResponse {
|
||||
return HijackedResponse{Conn: conn, Reader: bufio.NewReader(conn), mediaType: mediaType}
|
||||
@@ -73,7 +101,7 @@ type ImageBuildOptions struct {
|
||||
NetworkMode string
|
||||
ShmSize int64
|
||||
Dockerfile string
|
||||
Ulimits []*container.Ulimit
|
||||
Ulimits []*units.Ulimit
|
||||
// BuildArgs needs to be a *string instead of just a string so that
|
||||
// we can tell the difference between "" (empty string) and no value
|
||||
// at all (nil). See the parsing of buildArgs in
|
||||
@@ -94,7 +122,7 @@ type ImageBuildOptions struct {
|
||||
Target string
|
||||
SessionID string
|
||||
Platform string
|
||||
// Version specifies the version of the underlying builder to use
|
||||
// Version specifies the version of the unerlying builder to use
|
||||
Version BuilderVersion
|
||||
// BuildID is an optional identifier that can be passed together with the
|
||||
// build request. The same identifier can be used to gracefully cancel the
|
||||
@@ -129,13 +157,34 @@ type ImageBuildResponse struct {
|
||||
OSType string
|
||||
}
|
||||
|
||||
// ImageImportSource holds source information for ImageImport
|
||||
type ImageImportSource struct {
|
||||
Source io.Reader // Source is the data to send to the server to create this image from. You must set SourceName to "-" to leverage this.
|
||||
SourceName string // SourceName is the name of the image to pull. Set to "-" to leverage the Source attribute.
|
||||
}
|
||||
|
||||
// ImageLoadResponse returns information to the client about a load process.
|
||||
type ImageLoadResponse struct {
|
||||
// Body must be closed to avoid a resource leak
|
||||
Body io.ReadCloser
|
||||
JSON bool
|
||||
}
|
||||
|
||||
// RequestPrivilegeFunc is a function interface that
|
||||
// clients can supply to retry operations after
|
||||
// getting an authorization error.
|
||||
// This function returns the registry authentication
|
||||
// header value in base 64 format, or an error
|
||||
// if the privilege request fails.
|
||||
type RequestPrivilegeFunc func(context.Context) (string, error)
|
||||
type RequestPrivilegeFunc func() (string, error)
|
||||
|
||||
// ImageSearchOptions holds parameters to search images with.
|
||||
type ImageSearchOptions struct {
|
||||
RegistryAuth string
|
||||
PrivilegeFunc RequestPrivilegeFunc
|
||||
Filters filters.Args
|
||||
Limit int
|
||||
}
|
||||
|
||||
// NodeListOptions holds parameters to list nodes with.
|
||||
type NodeListOptions struct {
|
||||
@@ -240,7 +289,7 @@ type PluginInstallOptions struct {
|
||||
RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry
|
||||
RemoteRef string // RemoteRef is the plugin name on the registry
|
||||
PrivilegeFunc RequestPrivilegeFunc
|
||||
AcceptPermissionsFunc func(context.Context, PluginPrivileges) (bool, error)
|
||||
AcceptPermissionsFunc func(PluginPrivileges) (bool, error)
|
||||
Args []string
|
||||
}
|
||||
|
||||
|
||||
18
api/types/configs.go
Normal file
18
api/types/configs.go
Normal file
@@ -0,0 +1,18 @@
|
||||
package types // import "github.com/docker/docker/api/types"
|
||||
|
||||
// ExecConfig is a small subset of the Config struct that holds the configuration
|
||||
// for the exec feature of docker.
|
||||
type ExecConfig struct {
|
||||
User string // User that will run the command
|
||||
Privileged bool // Is the container in privileged mode
|
||||
Tty bool // Attach standard streams to a tty.
|
||||
ConsoleSize *[2]uint `json:",omitempty"` // Initial console size [height, width]
|
||||
AttachStdin bool // Attach the standard input, makes possible user interaction
|
||||
AttachStderr bool // Attach the standard error
|
||||
AttachStdout bool // Attach the standard output
|
||||
Detach bool // Execute in detach mode
|
||||
DetachKeys string // Escape keys for detach
|
||||
Env []string // Environment variables
|
||||
WorkingDir string // Working directory
|
||||
Cmd []string // Execution commands and args
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
package container // import "github.com/docker/docker/api/types/container"
|
||||
|
||||
import (
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types/strslice"
|
||||
@@ -35,6 +36,14 @@ type StopOptions struct {
|
||||
// HealthConfig holds configuration settings for the HEALTHCHECK feature.
|
||||
type HealthConfig = dockerspec.HealthcheckConfig
|
||||
|
||||
// ExecStartOptions holds the options to start container's exec.
|
||||
type ExecStartOptions struct {
|
||||
Stdin io.Reader
|
||||
Stdout io.Writer
|
||||
Stderr io.Writer
|
||||
ConsoleSize *[2]uint `json:",omitempty"`
|
||||
}
|
||||
|
||||
// Config contains the configuration data about a container.
|
||||
// It should hold only portable information about the container.
|
||||
// Here, "portable" means "independent from the host we are running on".
|
||||
|
||||
@@ -1,44 +0,0 @@
|
||||
package container
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
// PruneReport contains the response for Engine API:
|
||||
// POST "/containers/prune"
|
||||
type PruneReport struct {
|
||||
ContainersDeleted []string
|
||||
SpaceReclaimed uint64
|
||||
}
|
||||
|
||||
// PathStat is used to encode the header from
|
||||
// GET "/containers/{name:.*}/archive"
|
||||
// "Name" is the file or directory name.
|
||||
type PathStat struct {
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
Mode os.FileMode `json:"mode"`
|
||||
Mtime time.Time `json:"mtime"`
|
||||
LinkTarget string `json:"linkTarget"`
|
||||
}
|
||||
|
||||
// CopyToContainerOptions holds information
|
||||
// about files to copy into a container
|
||||
type CopyToContainerOptions struct {
|
||||
AllowOverwriteDirWithFile bool
|
||||
CopyUIDGID bool
|
||||
}
|
||||
|
||||
// StatsResponseReader wraps an io.ReadCloser to read (a stream of) stats
|
||||
// for a container, as produced by the GET "/stats" endpoint.
|
||||
//
|
||||
// The OSType field is set to the server's platform to allow
|
||||
// platform-specific handling of the response.
|
||||
//
|
||||
// TODO(thaJeztah): remove this wrapper, and make OSType part of [StatsResponse].
|
||||
type StatsResponseReader struct {
|
||||
Body io.ReadCloser `json:"body"`
|
||||
OSType string `json:"ostype"`
|
||||
}
|
||||
@@ -1,13 +0,0 @@
|
||||
package container
|
||||
|
||||
import "github.com/docker/docker/api/types/network"
|
||||
|
||||
// CreateRequest is the request message sent to the server for container
|
||||
// create calls. It is a config wrapper that holds the container [Config]
|
||||
// (portable) and the corresponding [HostConfig] (non-portable) and
|
||||
// [network.NetworkingConfig].
|
||||
type CreateRequest struct {
|
||||
*Config
|
||||
HostConfig *HostConfig `json:"HostConfig,omitempty"`
|
||||
NetworkingConfig *network.NetworkingConfig `json:"NetworkingConfig,omitempty"`
|
||||
}
|
||||
@@ -1,43 +0,0 @@
|
||||
package container
|
||||
|
||||
// ExecOptions is a small subset of the Config struct that holds the configuration
|
||||
// for the exec feature of docker.
|
||||
type ExecOptions struct {
|
||||
User string // User that will run the command
|
||||
Privileged bool // Is the container in privileged mode
|
||||
Tty bool // Attach standard streams to a tty.
|
||||
ConsoleSize *[2]uint `json:",omitempty"` // Initial console size [height, width]
|
||||
AttachStdin bool // Attach the standard input, makes possible user interaction
|
||||
AttachStderr bool // Attach the standard error
|
||||
AttachStdout bool // Attach the standard output
|
||||
Detach bool // Execute in detach mode
|
||||
DetachKeys string // Escape keys for detach
|
||||
Env []string // Environment variables
|
||||
WorkingDir string // Working directory
|
||||
Cmd []string // Execution commands and args
|
||||
}
|
||||
|
||||
// ExecStartOptions is a temp struct used by execStart
|
||||
// Config fields is part of ExecConfig in runconfig package
|
||||
type ExecStartOptions struct {
|
||||
// ExecStart will first check if it's detached
|
||||
Detach bool
|
||||
// Check if there's a tty
|
||||
Tty bool
|
||||
// Terminal size [height, width], unused if Tty == false
|
||||
ConsoleSize *[2]uint `json:",omitempty"`
|
||||
}
|
||||
|
||||
// ExecAttachOptions is a temp struct used by execAttach.
|
||||
//
|
||||
// TODO(thaJeztah): make this a separate type; ContainerExecAttach does not use the Detach option, and cannot run detached.
|
||||
type ExecAttachOptions = ExecStartOptions
|
||||
|
||||
// ExecInspect holds information returned by exec inspect.
|
||||
type ExecInspect struct {
|
||||
ExecID string `json:"ID"`
|
||||
ContainerID string
|
||||
Running bool
|
||||
ExitCode int
|
||||
Pid int
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
package container // import "github.com/docker/docker/api/types/container"
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
@@ -325,12 +326,12 @@ func ValidateRestartPolicy(policy RestartPolicy) error {
|
||||
if policy.MaximumRetryCount < 0 {
|
||||
msg += " and cannot be negative"
|
||||
}
|
||||
return &errInvalidParameter{fmt.Errorf(msg)}
|
||||
return &errInvalidParameter{errors.New(msg)}
|
||||
}
|
||||
return nil
|
||||
case RestartPolicyOnFailure:
|
||||
if policy.MaximumRetryCount < 0 {
|
||||
return &errInvalidParameter{fmt.Errorf("invalid restart policy: maximum retry count cannot be negative")}
|
||||
return &errInvalidParameter{errors.New("invalid restart policy: maximum retry count cannot be negative")}
|
||||
}
|
||||
return nil
|
||||
case "":
|
||||
@@ -360,12 +361,6 @@ type LogConfig struct {
|
||||
Config map[string]string
|
||||
}
|
||||
|
||||
// Ulimit is an alias for [units.Ulimit], which may be moving to a different
|
||||
// location or become a local type. This alias is to help transitioning.
|
||||
//
|
||||
// Users are recommended to use this alias instead of using [units.Ulimit] directly.
|
||||
type Ulimit = units.Ulimit
|
||||
|
||||
// Resources contains container's resources (cgroups config, ulimits...)
|
||||
type Resources struct {
|
||||
// Applicable to all platforms
|
||||
@@ -393,14 +388,14 @@ type Resources struct {
|
||||
|
||||
// KernelMemory specifies the kernel memory limit (in bytes) for the container.
|
||||
// Deprecated: kernel 5.4 deprecated kmem.limit_in_bytes.
|
||||
KernelMemory int64 `json:",omitempty"`
|
||||
KernelMemoryTCP int64 `json:",omitempty"` // Hard limit for kernel TCP buffer memory (in bytes)
|
||||
MemoryReservation int64 // Memory soft limit (in bytes)
|
||||
MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap
|
||||
MemorySwappiness *int64 // Tuning container memory swappiness behaviour
|
||||
OomKillDisable *bool // Whether to disable OOM Killer or not
|
||||
PidsLimit *int64 // Setting PIDs limit for a container; Set `0` or `-1` for unlimited, or `null` to not change.
|
||||
Ulimits []*Ulimit // List of ulimits to be set in the container
|
||||
KernelMemory int64 `json:",omitempty"`
|
||||
KernelMemoryTCP int64 `json:",omitempty"` // Hard limit for kernel TCP buffer memory (in bytes)
|
||||
MemoryReservation int64 // Memory soft limit (in bytes)
|
||||
MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap
|
||||
MemorySwappiness *int64 // Tuning container memory swappiness behaviour
|
||||
OomKillDisable *bool // Whether to disable OOM Killer or not
|
||||
PidsLimit *int64 // Setting PIDs limit for a container; Set `0` or `-1` for unlimited, or `null` to not change.
|
||||
Ulimits []*units.Ulimit // List of ulimits to be set in the container
|
||||
|
||||
// Applicable to Windows
|
||||
CPUCount int64 `json:"CpuCount"` // CPU count
|
||||
|
||||
@@ -9,6 +9,24 @@ func (i Isolation) IsValid() bool {
|
||||
return i.IsDefault()
|
||||
}
|
||||
|
||||
// NetworkName returns the name of the network stack.
|
||||
func (n NetworkMode) NetworkName() string {
|
||||
if n.IsBridge() {
|
||||
return network.NetworkBridge
|
||||
} else if n.IsHost() {
|
||||
return network.NetworkHost
|
||||
} else if n.IsContainer() {
|
||||
return "container"
|
||||
} else if n.IsNone() {
|
||||
return network.NetworkNone
|
||||
} else if n.IsDefault() {
|
||||
return network.NetworkDefault
|
||||
} else if n.IsUserDefined() {
|
||||
return n.UserDefined()
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// IsBridge indicates whether container uses the bridge network stack
|
||||
func (n NetworkMode) IsBridge() bool {
|
||||
return n == network.NetworkBridge
|
||||
@@ -23,23 +41,3 @@ func (n NetworkMode) IsHost() bool {
|
||||
func (n NetworkMode) IsUserDefined() bool {
|
||||
return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer()
|
||||
}
|
||||
|
||||
// NetworkName returns the name of the network stack.
|
||||
func (n NetworkMode) NetworkName() string {
|
||||
switch {
|
||||
case n.IsDefault():
|
||||
return network.NetworkDefault
|
||||
case n.IsBridge():
|
||||
return network.NetworkBridge
|
||||
case n.IsHost():
|
||||
return network.NetworkHost
|
||||
case n.IsNone():
|
||||
return network.NetworkNone
|
||||
case n.IsContainer():
|
||||
return "container"
|
||||
case n.IsUserDefined():
|
||||
return n.UserDefined()
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,11 +2,6 @@ package container // import "github.com/docker/docker/api/types/container"
|
||||
|
||||
import "github.com/docker/docker/api/types/network"
|
||||
|
||||
// IsValid indicates if an isolation technology is valid
|
||||
func (i Isolation) IsValid() bool {
|
||||
return i.IsDefault() || i.IsHyperV() || i.IsProcess()
|
||||
}
|
||||
|
||||
// IsBridge indicates whether container uses the bridge network stack
|
||||
// in windows it is given the name NAT
|
||||
func (n NetworkMode) IsBridge() bool {
|
||||
@@ -24,24 +19,24 @@ func (n NetworkMode) IsUserDefined() bool {
|
||||
return !n.IsDefault() && !n.IsNone() && !n.IsBridge() && !n.IsContainer()
|
||||
}
|
||||
|
||||
// IsValid indicates if an isolation technology is valid
|
||||
func (i Isolation) IsValid() bool {
|
||||
return i.IsDefault() || i.IsHyperV() || i.IsProcess()
|
||||
}
|
||||
|
||||
// NetworkName returns the name of the network stack.
|
||||
func (n NetworkMode) NetworkName() string {
|
||||
switch {
|
||||
case n.IsDefault():
|
||||
if n.IsDefault() {
|
||||
return network.NetworkDefault
|
||||
case n.IsBridge():
|
||||
} else if n.IsBridge() {
|
||||
return network.NetworkNat
|
||||
case n.IsHost():
|
||||
// Windows currently doesn't support host network-mode, so
|
||||
// this would currently never happen..
|
||||
return network.NetworkHost
|
||||
case n.IsNone():
|
||||
} else if n.IsNone() {
|
||||
return network.NetworkNone
|
||||
case n.IsContainer():
|
||||
} else if n.IsContainer() {
|
||||
return "container"
|
||||
case n.IsUserDefined():
|
||||
} else if n.IsUserDefined() {
|
||||
return n.UserDefined()
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
package events // import "github.com/docker/docker/api/types/events"
|
||||
import "github.com/docker/docker/api/types/filters"
|
||||
|
||||
// Type is used for event-types.
|
||||
type Type string
|
||||
@@ -126,10 +125,3 @@ type Message struct {
|
||||
Time int64 `json:"time,omitempty"`
|
||||
TimeNano int64 `json:"timeNano,omitempty"`
|
||||
}
|
||||
|
||||
// ListOptions holds parameters to filter events with.
|
||||
type ListOptions struct {
|
||||
Since string
|
||||
Until string
|
||||
Filters filters.Args
|
||||
}
|
||||
|
||||
@@ -1,47 +1,9 @@
|
||||
package image
|
||||
|
||||
import (
|
||||
"io"
|
||||
"time"
|
||||
)
|
||||
import "time"
|
||||
|
||||
// Metadata contains engine-local data about the image.
|
||||
type Metadata struct {
|
||||
// LastTagTime is the date and time at which the image was last tagged.
|
||||
LastTagTime time.Time `json:",omitempty"`
|
||||
}
|
||||
|
||||
// PruneReport contains the response for Engine API:
|
||||
// POST "/images/prune"
|
||||
type PruneReport struct {
|
||||
ImagesDeleted []DeleteResponse
|
||||
SpaceReclaimed uint64
|
||||
}
|
||||
|
||||
// LoadResponse returns information to the client about a load process.
|
||||
//
|
||||
// TODO(thaJeztah): remove this type, and just use an io.ReadCloser
|
||||
//
|
||||
// This type was added in https://github.com/moby/moby/pull/18878, related
|
||||
// to https://github.com/moby/moby/issues/19177;
|
||||
//
|
||||
// Make docker load to output json when the response content type is json
|
||||
// Swarm hijacks the response from docker load and returns JSON rather
|
||||
// than plain text like the Engine does. This makes the API library to return
|
||||
// information to figure that out.
|
||||
//
|
||||
// However the "load" endpoint unconditionally returns JSON;
|
||||
// https://github.com/moby/moby/blob/7b9d2ef6e5518a3d3f3cc418459f8df786cfbbd1/api/server/router/image/image_routes.go#L248-L255
|
||||
//
|
||||
// PR https://github.com/moby/moby/pull/21959 made the response-type depend
|
||||
// on whether "quiet" was set, but this logic got changed in a follow-up
|
||||
// https://github.com/moby/moby/pull/25557, which made the JSON response-type
|
||||
// unconditionally, but the output produced depend on whether"quiet" was set.
|
||||
//
|
||||
// We should deprecated the "quiet" option, as it's really a client
|
||||
// responsibility.
|
||||
type LoadResponse struct {
|
||||
// Body must be closed to avoid a resource leak
|
||||
Body io.ReadCloser
|
||||
JSON bool
|
||||
}
|
||||
|
||||
@@ -1,18 +1,6 @@
|
||||
package image
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// ImportSource holds source information for ImageImport
|
||||
type ImportSource struct {
|
||||
Source io.Reader // Source is the data to send to the server to create this image from. You must set SourceName to "-" to leverage this.
|
||||
SourceName string // SourceName is the name of the image to pull. Set to "-" to leverage the Source attribute.
|
||||
}
|
||||
import "github.com/docker/docker/api/types/filters"
|
||||
|
||||
// ImportOptions holds information to import images from the client host.
|
||||
type ImportOptions struct {
|
||||
@@ -39,28 +27,12 @@ type PullOptions struct {
|
||||
// privilege request fails.
|
||||
//
|
||||
// Also see [github.com/docker/docker/api/types.RequestPrivilegeFunc].
|
||||
PrivilegeFunc func(context.Context) (string, error)
|
||||
PrivilegeFunc func() (string, error)
|
||||
Platform string
|
||||
}
|
||||
|
||||
// PushOptions holds information to push images.
|
||||
type PushOptions struct {
|
||||
All bool
|
||||
RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry
|
||||
|
||||
// PrivilegeFunc is a function that clients can supply to retry operations
|
||||
// after getting an authorization error. This function returns the registry
|
||||
// authentication header value in base64 encoded format, or an error if the
|
||||
// privilege request fails.
|
||||
//
|
||||
// Also see [github.com/docker/docker/api/types.RequestPrivilegeFunc].
|
||||
PrivilegeFunc func(context.Context) (string, error)
|
||||
|
||||
// Platform is an optional field that selects a specific platform to push
|
||||
// when the image is a multi-platform image.
|
||||
// Using this will only push a single platform-specific manifest.
|
||||
Platform *ocispec.Platform `json:",omitempty"`
|
||||
}
|
||||
type PushOptions PullOptions
|
||||
|
||||
// ListOptions holds parameters to list images with.
|
||||
type ListOptions struct {
|
||||
|
||||
@@ -119,11 +119,7 @@ type TmpfsOptions struct {
|
||||
SizeBytes int64 `json:",omitempty"`
|
||||
// Mode of the tmpfs upon creation
|
||||
Mode os.FileMode `json:",omitempty"`
|
||||
// Options to be passed to the tmpfs mount. An array of arrays. Flag
|
||||
// options should be provided as 1-length arrays. Other types should be
|
||||
// provided as 2-length arrays, where the first item is the key and the
|
||||
// second the value.
|
||||
Options [][]string `json:",omitempty"`
|
||||
|
||||
// TODO(stevvooe): There are several more tmpfs flags, specified in the
|
||||
// daemon, that are accepted. Only the most basic are added for now.
|
||||
//
|
||||
|
||||
@@ -1,19 +0,0 @@
|
||||
package network
|
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
// CreateResponse NetworkCreateResponse
|
||||
//
|
||||
// OK response to NetworkCreate operation
|
||||
// swagger:model CreateResponse
|
||||
type CreateResponse struct {
|
||||
|
||||
// The ID of the created network.
|
||||
// Required: true
|
||||
ID string `json:"Id"`
|
||||
|
||||
// Warnings encountered when creating the container
|
||||
// Required: true
|
||||
Warning string `json:"Warning"`
|
||||
}
|
||||
@@ -18,7 +18,6 @@ type EndpointSettings struct {
|
||||
// Once the container is running, it becomes operational data (it may contain a
|
||||
// generated address).
|
||||
MacAddress string
|
||||
DriverOpts map[string]string
|
||||
// Operational data
|
||||
NetworkID string
|
||||
EndpointID string
|
||||
@@ -28,6 +27,7 @@ type EndpointSettings struct {
|
||||
IPv6Gateway string
|
||||
GlobalIPv6Address string
|
||||
GlobalIPv6PrefixLen int
|
||||
DriverOpts map[string]string
|
||||
// DNSNames holds all the (non fully qualified) DNS names associated to this endpoint. First entry is used to
|
||||
// generate PTR records.
|
||||
DNSNames []string
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
package network // import "github.com/docker/docker/api/types/network"
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
)
|
||||
|
||||
@@ -19,82 +17,6 @@ const (
|
||||
NetworkNat = "nat"
|
||||
)
|
||||
|
||||
// CreateRequest is the request message sent to the server for network create call.
|
||||
type CreateRequest struct {
|
||||
CreateOptions
|
||||
Name string // Name is the requested name of the network.
|
||||
|
||||
// Deprecated: CheckDuplicate is deprecated since API v1.44, but it defaults to true when sent by the client
|
||||
// package to older daemons.
|
||||
CheckDuplicate *bool `json:",omitempty"`
|
||||
}
|
||||
|
||||
// CreateOptions holds options to create a network.
|
||||
type CreateOptions struct {
|
||||
Driver string // Driver is the driver-name used to create the network (e.g. `bridge`, `overlay`)
|
||||
Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level).
|
||||
EnableIPv6 *bool `json:",omitempty"` // EnableIPv6 represents whether to enable IPv6.
|
||||
IPAM *IPAM // IPAM is the network's IP Address Management.
|
||||
Internal bool // Internal represents if the network is used internal only.
|
||||
Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode.
|
||||
Ingress bool // Ingress indicates the network is providing the routing-mesh for the swarm cluster.
|
||||
ConfigOnly bool // ConfigOnly creates a config-only network. Config-only networks are place-holder networks for network configurations to be used by other networks. ConfigOnly networks cannot be used directly to run containers or services.
|
||||
ConfigFrom *ConfigReference // ConfigFrom specifies the source which will provide the configuration for this network. The specified network must be a config-only network; see [CreateOptions.ConfigOnly].
|
||||
Options map[string]string // Options specifies the network-specific options to use for when creating the network.
|
||||
Labels map[string]string // Labels holds metadata specific to the network being created.
|
||||
}
|
||||
|
||||
// ListOptions holds parameters to filter the list of networks with.
|
||||
type ListOptions struct {
|
||||
Filters filters.Args
|
||||
}
|
||||
|
||||
// InspectOptions holds parameters to inspect network.
|
||||
type InspectOptions struct {
|
||||
Scope string
|
||||
Verbose bool
|
||||
}
|
||||
|
||||
// ConnectOptions represents the data to be used to connect a container to the
|
||||
// network.
|
||||
type ConnectOptions struct {
|
||||
Container string
|
||||
EndpointConfig *EndpointSettings `json:",omitempty"`
|
||||
}
|
||||
|
||||
// DisconnectOptions represents the data to be used to disconnect a container
|
||||
// from the network.
|
||||
type DisconnectOptions struct {
|
||||
Container string
|
||||
Force bool
|
||||
}
|
||||
|
||||
// Inspect is the body of the "get network" http response message.
|
||||
type Inspect struct {
|
||||
Name string // Name is the name of the network
|
||||
ID string `json:"Id"` // ID uniquely identifies a network on a single machine
|
||||
Created time.Time // Created is the time the network created
|
||||
Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level)
|
||||
Driver string // Driver is the Driver name used to create the network (e.g. `bridge`, `overlay`)
|
||||
EnableIPv6 bool // EnableIPv6 represents whether to enable IPv6
|
||||
IPAM IPAM // IPAM is the network's IP Address Management
|
||||
Internal bool // Internal represents if the network is used internal only
|
||||
Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode.
|
||||
Ingress bool // Ingress indicates the network is providing the routing-mesh for the swarm cluster.
|
||||
ConfigFrom ConfigReference // ConfigFrom specifies the source which will provide the configuration for this network.
|
||||
ConfigOnly bool // ConfigOnly networks are place-holder networks for network configurations to be used by other networks. ConfigOnly networks cannot be used directly to run containers or services.
|
||||
Containers map[string]EndpointResource // Containers contains endpoints belonging to the network
|
||||
Options map[string]string // Options holds the network specific options to use for when creating the network
|
||||
Labels map[string]string // Labels holds metadata specific to the network being created
|
||||
Peers []PeerInfo `json:",omitempty"` // List of peer nodes for an overlay network
|
||||
Services map[string]ServiceInfo `json:",omitempty"`
|
||||
}
|
||||
|
||||
// Summary is used as response when listing networks. It currently is an alias
|
||||
// for [Inspect], but may diverge in the future, as not all information may
|
||||
// be included when listing networks.
|
||||
type Summary = Inspect
|
||||
|
||||
// Address represents an IP address
|
||||
type Address struct {
|
||||
Addr string
|
||||
@@ -123,16 +45,6 @@ type ServiceInfo struct {
|
||||
Tasks []Task
|
||||
}
|
||||
|
||||
// EndpointResource contains network resources allocated and used for a
|
||||
// container in a network.
|
||||
type EndpointResource struct {
|
||||
Name string
|
||||
EndpointID string
|
||||
MacAddress string
|
||||
IPv4Address string
|
||||
IPv6Address string
|
||||
}
|
||||
|
||||
// NetworkingConfig represents the container's networking configuration for each of its interfaces
|
||||
// Carries the networking configs specified in the `docker run` and `docker network connect` commands
|
||||
type NetworkingConfig struct {
|
||||
@@ -158,9 +70,3 @@ var acceptedFilters = map[string]bool{
|
||||
func ValidateFilters(filter filters.Args) error {
|
||||
return filter.Validate(acceptedFilters)
|
||||
}
|
||||
|
||||
// PruneReport contains the response for Engine API:
|
||||
// POST "/networks/prune"
|
||||
type PruneReport struct {
|
||||
NetworksDeleted []string
|
||||
}
|
||||
|
||||
@@ -84,6 +84,32 @@ type IndexInfo struct {
|
||||
Official bool
|
||||
}
|
||||
|
||||
// SearchResult describes a search result returned from a registry
|
||||
type SearchResult struct {
|
||||
// StarCount indicates the number of stars this repository has
|
||||
StarCount int `json:"star_count"`
|
||||
// IsOfficial is true if the result is from an official repository.
|
||||
IsOfficial bool `json:"is_official"`
|
||||
// Name is the name of the repository
|
||||
Name string `json:"name"`
|
||||
// IsAutomated indicates whether the result is automated.
|
||||
//
|
||||
// Deprecated: the "is_automated" field is deprecated and will always be "false".
|
||||
IsAutomated bool `json:"is_automated"`
|
||||
// Description is a textual description of the repository
|
||||
Description string `json:"description"`
|
||||
}
|
||||
|
||||
// SearchResults lists a collection search results returned from a registry
|
||||
type SearchResults struct {
|
||||
// Query contains the query string that generated the search results
|
||||
Query string `json:"query"`
|
||||
// NumResults indicates the number of results the query returned
|
||||
NumResults int `json:"num_results"`
|
||||
// Results is a slice containing the actual results for the search
|
||||
Results []SearchResult `json:"results"`
|
||||
}
|
||||
|
||||
// DistributionInspect describes the result obtained from contacting the
|
||||
// registry to retrieve image metadata
|
||||
type DistributionInspect struct {
|
||||
|
||||
@@ -1,47 +0,0 @@
|
||||
package registry
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
)
|
||||
|
||||
// SearchOptions holds parameters to search images with.
|
||||
type SearchOptions struct {
|
||||
RegistryAuth string
|
||||
|
||||
// PrivilegeFunc is a [types.RequestPrivilegeFunc] the client can
|
||||
// supply to retry operations after getting an authorization error.
|
||||
//
|
||||
// It must return the registry authentication header value in base64
|
||||
// format, or an error if the privilege request fails.
|
||||
PrivilegeFunc func(context.Context) (string, error)
|
||||
Filters filters.Args
|
||||
Limit int
|
||||
}
|
||||
|
||||
// SearchResult describes a search result returned from a registry
|
||||
type SearchResult struct {
|
||||
// StarCount indicates the number of stars this repository has
|
||||
StarCount int `json:"star_count"`
|
||||
// IsOfficial is true if the result is from an official repository.
|
||||
IsOfficial bool `json:"is_official"`
|
||||
// Name is the name of the repository
|
||||
Name string `json:"name"`
|
||||
// IsAutomated indicates whether the result is automated.
|
||||
//
|
||||
// Deprecated: the "is_automated" field is deprecated and will always be "false".
|
||||
IsAutomated bool `json:"is_automated"`
|
||||
// Description is a textual description of the repository
|
||||
Description string `json:"description"`
|
||||
}
|
||||
|
||||
// SearchResults lists a collection search results returned from a registry
|
||||
type SearchResults struct {
|
||||
// Query contains the query string that generated the search results
|
||||
Query string `json:"query"`
|
||||
// NumResults indicates the number of results the query returned
|
||||
NumResults int `json:"num_results"`
|
||||
// Results is a slice containing the actual results for the search
|
||||
Results []SearchResult `json:"results"`
|
||||
}
|
||||
@@ -1,4 +1,6 @@
|
||||
package container
|
||||
// Package types is used for API stability in the types and response to the
|
||||
// consumers of the API stats endpoint.
|
||||
package types // import "github.com/docker/docker/api/types"
|
||||
|
||||
import "time"
|
||||
|
||||
@@ -167,10 +169,8 @@ type Stats struct {
|
||||
MemoryStats MemoryStats `json:"memory_stats,omitempty"`
|
||||
}
|
||||
|
||||
// StatsResponse is newly used Networks.
|
||||
//
|
||||
// TODO(thaJeztah): unify with [Stats]. This wrapper was to account for pre-api v1.21 changes, see https://github.com/moby/moby/commit/d3379946ec96fb6163cb8c4517d7d5a067045801
|
||||
type StatsResponse struct {
|
||||
// StatsJSON is newly used Networks
|
||||
type StatsJSON struct {
|
||||
Stats
|
||||
|
||||
Name string `json:"name,omitempty"`
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/mount"
|
||||
"github.com/docker/go-units"
|
||||
)
|
||||
|
||||
// DNSConfig specifies DNS related configurations in resolver configuration file (resolv.conf)
|
||||
@@ -114,6 +115,5 @@ type ContainerSpec struct {
|
||||
Sysctls map[string]string `json:",omitempty"`
|
||||
CapabilityAdd []string `json:",omitempty"`
|
||||
CapabilityDrop []string `json:",omitempty"`
|
||||
Ulimits []*container.Ulimit `json:",omitempty"`
|
||||
OomScoreAdj int64 `json:",omitempty"`
|
||||
Ulimits []*units.Ulimit `json:",omitempty"`
|
||||
}
|
||||
|
||||
@@ -75,8 +75,6 @@ type Info struct {
|
||||
DefaultAddressPools []NetworkAddressPool `json:",omitempty"`
|
||||
CDISpecDirs []string
|
||||
|
||||
Containerd *ContainerdInfo `json:",omitempty"`
|
||||
|
||||
// Legacy API fields for older API versions.
|
||||
legacyFields
|
||||
|
||||
@@ -87,43 +85,6 @@ type Info struct {
|
||||
Warnings []string
|
||||
}
|
||||
|
||||
// ContainerdInfo holds information about the containerd instance used by the daemon.
|
||||
type ContainerdInfo struct {
|
||||
// Address is the path to the containerd socket.
|
||||
Address string `json:",omitempty"`
|
||||
// Namespaces is the containerd namespaces used by the daemon.
|
||||
Namespaces ContainerdNamespaces
|
||||
}
|
||||
|
||||
// ContainerdNamespaces reflects the containerd namespaces used by the daemon.
|
||||
//
|
||||
// These namespaces can be configured in the daemon configuration, and are
|
||||
// considered to be used exclusively by the daemon,
|
||||
//
|
||||
// As these namespaces are considered to be exclusively accessed
|
||||
// by the daemon, it is not recommended to change these values,
|
||||
// or to change them to a value that is used by other systems,
|
||||
// such as cri-containerd.
|
||||
type ContainerdNamespaces struct {
|
||||
// Containers holds the default containerd namespace used for
|
||||
// containers managed by the daemon.
|
||||
//
|
||||
// The default namespace for containers is "moby", but will be
|
||||
// suffixed with the `<uid>.<gid>` of the remapped `root` if
|
||||
// user-namespaces are enabled and the containerd image-store
|
||||
// is used.
|
||||
Containers string
|
||||
|
||||
// Plugins holds the default containerd namespace used for
|
||||
// plugins managed by the daemon.
|
||||
//
|
||||
// The default namespace for plugins is "moby", but will be
|
||||
// suffixed with the `<uid>.<gid>` of the remapped `root` if
|
||||
// user-namespaces are enabled and the containerd image-store
|
||||
// is used.
|
||||
Plugins string
|
||||
}
|
||||
|
||||
type legacyFields struct {
|
||||
ExecutionDriver string `json:",omitempty"` // Deprecated: deprecated since API v1.25, but returned for older versions.
|
||||
}
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package types // import "github.com/docker/docker/api/types"
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
@@ -153,13 +155,36 @@ type Container struct {
|
||||
State string
|
||||
Status string
|
||||
HostConfig struct {
|
||||
NetworkMode string `json:",omitempty"`
|
||||
Annotations map[string]string `json:",omitempty"`
|
||||
NetworkMode string `json:",omitempty"`
|
||||
}
|
||||
NetworkSettings *SummaryNetworkSettings
|
||||
Mounts []MountPoint
|
||||
}
|
||||
|
||||
// CopyConfig contains request body of Engine API:
|
||||
// POST "/containers/"+containerID+"/copy"
|
||||
type CopyConfig struct {
|
||||
Resource string
|
||||
}
|
||||
|
||||
// ContainerPathStat is used to encode the header from
|
||||
// GET "/containers/{name:.*}/archive"
|
||||
// "Name" is the file or directory name.
|
||||
type ContainerPathStat struct {
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
Mode os.FileMode `json:"mode"`
|
||||
Mtime time.Time `json:"mtime"`
|
||||
LinkTarget string `json:"linkTarget"`
|
||||
}
|
||||
|
||||
// ContainerStats contains response of Engine API:
|
||||
// GET "/stats"
|
||||
type ContainerStats struct {
|
||||
Body io.ReadCloser `json:"body"`
|
||||
OSType string `json:"ostype"`
|
||||
}
|
||||
|
||||
// Ping contains response of Engine API:
|
||||
// GET "/_ping"
|
||||
type Ping struct {
|
||||
@@ -205,6 +230,17 @@ type Version struct {
|
||||
BuildTime string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// ExecStartCheck is a temp struct used by execStart
|
||||
// Config fields is part of ExecConfig in runconfig package
|
||||
type ExecStartCheck struct {
|
||||
// ExecStart will first check if it's detached
|
||||
Detach bool
|
||||
// Check if there's a tty
|
||||
Tty bool
|
||||
// Terminal size [height, width], unused if Tty == false
|
||||
ConsoleSize *[2]uint `json:",omitempty"`
|
||||
}
|
||||
|
||||
// HealthcheckResult stores information about a single run of a healthcheck probe
|
||||
type HealthcheckResult struct {
|
||||
Start time.Time // Start is the time this check started
|
||||
@@ -387,6 +423,84 @@ type MountPoint struct {
|
||||
Propagation mount.Propagation
|
||||
}
|
||||
|
||||
// NetworkResource is the body of the "get network" http response message
|
||||
type NetworkResource struct {
|
||||
Name string // Name is the requested name of the network
|
||||
ID string `json:"Id"` // ID uniquely identifies a network on a single machine
|
||||
Created time.Time // Created is the time the network created
|
||||
Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level)
|
||||
Driver string // Driver is the Driver name used to create the network (e.g. `bridge`, `overlay`)
|
||||
EnableIPv6 bool // EnableIPv6 represents whether to enable IPv6
|
||||
IPAM network.IPAM // IPAM is the network's IP Address Management
|
||||
Internal bool // Internal represents if the network is used internal only
|
||||
Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode.
|
||||
Ingress bool // Ingress indicates the network is providing the routing-mesh for the swarm cluster.
|
||||
ConfigFrom network.ConfigReference // ConfigFrom specifies the source which will provide the configuration for this network.
|
||||
ConfigOnly bool // ConfigOnly networks are place-holder networks for network configurations to be used by other networks. ConfigOnly networks cannot be used directly to run containers or services.
|
||||
Containers map[string]EndpointResource // Containers contains endpoints belonging to the network
|
||||
Options map[string]string // Options holds the network specific options to use for when creating the network
|
||||
Labels map[string]string // Labels holds metadata specific to the network being created
|
||||
Peers []network.PeerInfo `json:",omitempty"` // List of peer nodes for an overlay network
|
||||
Services map[string]network.ServiceInfo `json:",omitempty"`
|
||||
}
|
||||
|
||||
// EndpointResource contains network resources allocated and used for a container in a network
|
||||
type EndpointResource struct {
|
||||
Name string
|
||||
EndpointID string
|
||||
MacAddress string
|
||||
IPv4Address string
|
||||
IPv6Address string
|
||||
}
|
||||
|
||||
// NetworkCreate is the expected body of the "create network" http request message
|
||||
type NetworkCreate struct {
|
||||
// Deprecated: CheckDuplicate is deprecated since API v1.44, but it defaults to true when sent by the client
|
||||
// package to older daemons.
|
||||
CheckDuplicate bool `json:",omitempty"`
|
||||
Driver string // Driver is the driver-name used to create the network (e.g. `bridge`, `overlay`)
|
||||
Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level).
|
||||
EnableIPv6 bool // EnableIPv6 represents whether to enable IPv6.
|
||||
IPAM *network.IPAM // IPAM is the network's IP Address Management.
|
||||
Internal bool // Internal represents if the network is used internal only.
|
||||
Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode.
|
||||
Ingress bool // Ingress indicates the network is providing the routing-mesh for the swarm cluster.
|
||||
ConfigOnly bool // ConfigOnly creates a config-only network. Config-only networks are place-holder networks for network configurations to be used by other networks. ConfigOnly networks cannot be used directly to run containers or services.
|
||||
ConfigFrom *network.ConfigReference // ConfigFrom specifies the source which will provide the configuration for this network. The specified network must be a config-only network; see [NetworkCreate.ConfigOnly].
|
||||
Options map[string]string // Options specifies the network-specific options to use for when creating the network.
|
||||
Labels map[string]string // Labels holds metadata specific to the network being created.
|
||||
}
|
||||
|
||||
// NetworkCreateRequest is the request message sent to the server for network create call.
|
||||
type NetworkCreateRequest struct {
|
||||
NetworkCreate
|
||||
Name string // Name is the requested name of the network.
|
||||
}
|
||||
|
||||
// NetworkCreateResponse is the response message sent by the server for network create call
|
||||
type NetworkCreateResponse struct {
|
||||
ID string `json:"Id"`
|
||||
Warning string
|
||||
}
|
||||
|
||||
// NetworkConnect represents the data to be used to connect a container to the network
|
||||
type NetworkConnect struct {
|
||||
Container string
|
||||
EndpointConfig *network.EndpointSettings `json:",omitempty"`
|
||||
}
|
||||
|
||||
// NetworkDisconnect represents the data to be used to disconnect a container from the network
|
||||
type NetworkDisconnect struct {
|
||||
Container string
|
||||
Force bool
|
||||
}
|
||||
|
||||
// NetworkInspectOptions holds parameters to inspect network
|
||||
type NetworkInspectOptions struct {
|
||||
Scope string
|
||||
Verbose bool
|
||||
}
|
||||
|
||||
// DiskUsageObject represents an object type used for disk usage query filtering.
|
||||
type DiskUsageObject string
|
||||
|
||||
@@ -419,6 +533,27 @@ type DiskUsage struct {
|
||||
BuilderSize int64 `json:",omitempty"` // Deprecated: deprecated in API 1.38, and no longer used since API 1.40.
|
||||
}
|
||||
|
||||
// ContainersPruneReport contains the response for Engine API:
|
||||
// POST "/containers/prune"
|
||||
type ContainersPruneReport struct {
|
||||
ContainersDeleted []string
|
||||
SpaceReclaimed uint64
|
||||
}
|
||||
|
||||
// VolumesPruneReport contains the response for Engine API:
|
||||
// POST "/volumes/prune"
|
||||
type VolumesPruneReport struct {
|
||||
VolumesDeleted []string
|
||||
SpaceReclaimed uint64
|
||||
}
|
||||
|
||||
// ImagesPruneReport contains the response for Engine API:
|
||||
// POST "/images/prune"
|
||||
type ImagesPruneReport struct {
|
||||
ImagesDeleted []image.DeleteResponse
|
||||
SpaceReclaimed uint64
|
||||
}
|
||||
|
||||
// BuildCachePruneReport contains the response for Engine API:
|
||||
// POST "/build/prune"
|
||||
type BuildCachePruneReport struct {
|
||||
@@ -426,6 +561,12 @@ type BuildCachePruneReport struct {
|
||||
SpaceReclaimed uint64
|
||||
}
|
||||
|
||||
// NetworksPruneReport contains the response for Engine API:
|
||||
// POST "/networks/prune"
|
||||
type NetworksPruneReport struct {
|
||||
NetworksDeleted []string
|
||||
}
|
||||
|
||||
// SecretCreateResponse contains the information returned to a client
|
||||
// on the creation of a new secret.
|
||||
type SecretCreateResponse struct {
|
||||
|
||||
@@ -1,196 +1,35 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/events"
|
||||
"github.com/docker/docker/api/types/image"
|
||||
"github.com/docker/docker/api/types/network"
|
||||
"github.com/docker/docker/api/types/registry"
|
||||
"github.com/docker/docker/api/types/volume"
|
||||
)
|
||||
|
||||
// ImagesPruneReport contains the response for Engine API:
|
||||
// POST "/images/prune"
|
||||
// ImageImportOptions holds information to import images from the client host.
|
||||
//
|
||||
// Deprecated: use [image.PruneReport].
|
||||
type ImagesPruneReport = image.PruneReport
|
||||
// Deprecated: use [image.ImportOptions].
|
||||
type ImageImportOptions = image.ImportOptions
|
||||
|
||||
// VolumesPruneReport contains the response for Engine API:
|
||||
// POST "/volumes/prune".
|
||||
// ImageCreateOptions holds information to create images.
|
||||
//
|
||||
// Deprecated: use [volume.PruneReport].
|
||||
type VolumesPruneReport = volume.PruneReport
|
||||
// Deprecated: use [image.CreateOptions].
|
||||
type ImageCreateOptions = image.CreateOptions
|
||||
|
||||
// NetworkCreateRequest is the request message sent to the server for network create call.
|
||||
// ImagePullOptions holds information to pull images.
|
||||
//
|
||||
// Deprecated: use [network.CreateRequest].
|
||||
type NetworkCreateRequest = network.CreateRequest
|
||||
// Deprecated: use [image.PullOptions].
|
||||
type ImagePullOptions = image.PullOptions
|
||||
|
||||
// NetworkCreate is the expected body of the "create network" http request message
|
||||
// ImagePushOptions holds information to push images.
|
||||
//
|
||||
// Deprecated: use [network.CreateOptions].
|
||||
type NetworkCreate = network.CreateOptions
|
||||
// Deprecated: use [image.PushOptions].
|
||||
type ImagePushOptions = image.PushOptions
|
||||
|
||||
// NetworkListOptions holds parameters to filter the list of networks with.
|
||||
// ImageListOptions holds parameters to list images with.
|
||||
//
|
||||
// Deprecated: use [network.ListOptions].
|
||||
type NetworkListOptions = network.ListOptions
|
||||
// Deprecated: use [image.ListOptions].
|
||||
type ImageListOptions = image.ListOptions
|
||||
|
||||
// NetworkCreateResponse is the response message sent by the server for network create call.
|
||||
// ImageRemoveOptions holds parameters to remove images.
|
||||
//
|
||||
// Deprecated: use [network.CreateResponse].
|
||||
type NetworkCreateResponse = network.CreateResponse
|
||||
|
||||
// NetworkInspectOptions holds parameters to inspect network.
|
||||
//
|
||||
// Deprecated: use [network.InspectOptions].
|
||||
type NetworkInspectOptions = network.InspectOptions
|
||||
|
||||
// NetworkConnect represents the data to be used to connect a container to the network
|
||||
//
|
||||
// Deprecated: use [network.ConnectOptions].
|
||||
type NetworkConnect = network.ConnectOptions
|
||||
|
||||
// NetworkDisconnect represents the data to be used to disconnect a container from the network
|
||||
//
|
||||
// Deprecated: use [network.DisconnectOptions].
|
||||
type NetworkDisconnect = network.DisconnectOptions
|
||||
|
||||
// EndpointResource contains network resources allocated and used for a container in a network.
|
||||
//
|
||||
// Deprecated: use [network.EndpointResource].
|
||||
type EndpointResource = network.EndpointResource
|
||||
|
||||
// NetworkResource is the body of the "get network" http response message/
|
||||
//
|
||||
// Deprecated: use [network.Inspect] or [network.Summary] (for list operations).
|
||||
type NetworkResource = network.Inspect
|
||||
|
||||
// NetworksPruneReport contains the response for Engine API:
|
||||
// POST "/networks/prune"
|
||||
//
|
||||
// Deprecated: use [network.PruneReport].
|
||||
type NetworksPruneReport = network.PruneReport
|
||||
|
||||
// ExecConfig is a small subset of the Config struct that holds the configuration
|
||||
// for the exec feature of docker.
|
||||
//
|
||||
// Deprecated: use [container.ExecOptions].
|
||||
type ExecConfig = container.ExecOptions
|
||||
|
||||
// ExecStartCheck is a temp struct used by execStart
|
||||
// Config fields is part of ExecConfig in runconfig package
|
||||
//
|
||||
// Deprecated: use [container.ExecStartOptions] or [container.ExecAttachOptions].
|
||||
type ExecStartCheck = container.ExecStartOptions
|
||||
|
||||
// ContainerExecInspect holds information returned by exec inspect.
|
||||
//
|
||||
// Deprecated: use [container.ExecInspect].
|
||||
type ContainerExecInspect = container.ExecInspect
|
||||
|
||||
// ContainersPruneReport contains the response for Engine API:
|
||||
// POST "/containers/prune"
|
||||
//
|
||||
// Deprecated: use [container.PruneReport].
|
||||
type ContainersPruneReport = container.PruneReport
|
||||
|
||||
// ContainerPathStat is used to encode the header from
|
||||
// GET "/containers/{name:.*}/archive"
|
||||
// "Name" is the file or directory name.
|
||||
//
|
||||
// Deprecated: use [container.PathStat].
|
||||
type ContainerPathStat = container.PathStat
|
||||
|
||||
// CopyToContainerOptions holds information
|
||||
// about files to copy into a container.
|
||||
//
|
||||
// Deprecated: use [container.CopyToContainerOptions],
|
||||
type CopyToContainerOptions = container.CopyToContainerOptions
|
||||
|
||||
// ContainerStats contains response of Engine API:
|
||||
// GET "/stats"
|
||||
//
|
||||
// Deprecated: use [container.StatsResponseReader].
|
||||
type ContainerStats = container.StatsResponseReader
|
||||
|
||||
// ThrottlingData stores CPU throttling stats of one running container.
|
||||
// Not used on Windows.
|
||||
//
|
||||
// Deprecated: use [container.ThrottlingData].
|
||||
type ThrottlingData = container.ThrottlingData
|
||||
|
||||
// CPUUsage stores All CPU stats aggregated since container inception.
|
||||
//
|
||||
// Deprecated: use [container.CPUUsage].
|
||||
type CPUUsage = container.CPUUsage
|
||||
|
||||
// CPUStats aggregates and wraps all CPU related info of container
|
||||
//
|
||||
// Deprecated: use [container.CPUStats].
|
||||
type CPUStats = container.CPUStats
|
||||
|
||||
// MemoryStats aggregates all memory stats since container inception on Linux.
|
||||
// Windows returns stats for commit and private working set only.
|
||||
//
|
||||
// Deprecated: use [container.MemoryStats].
|
||||
type MemoryStats = container.MemoryStats
|
||||
|
||||
// BlkioStatEntry is one small entity to store a piece of Blkio stats
|
||||
// Not used on Windows.
|
||||
//
|
||||
// Deprecated: use [container.BlkioStatEntry].
|
||||
type BlkioStatEntry = container.BlkioStatEntry
|
||||
|
||||
// BlkioStats stores All IO service stats for data read and write.
|
||||
// This is a Linux specific structure as the differences between expressing
|
||||
// block I/O on Windows and Linux are sufficiently significant to make
|
||||
// little sense attempting to morph into a combined structure.
|
||||
//
|
||||
// Deprecated: use [container.BlkioStats].
|
||||
type BlkioStats = container.BlkioStats
|
||||
|
||||
// StorageStats is the disk I/O stats for read/write on Windows.
|
||||
//
|
||||
// Deprecated: use [container.StorageStats].
|
||||
type StorageStats = container.StorageStats
|
||||
|
||||
// NetworkStats aggregates the network stats of one container
|
||||
//
|
||||
// Deprecated: use [container.NetworkStats].
|
||||
type NetworkStats = container.NetworkStats
|
||||
|
||||
// PidsStats contains the stats of a container's pids
|
||||
//
|
||||
// Deprecated: use [container.PidsStats].
|
||||
type PidsStats = container.PidsStats
|
||||
|
||||
// Stats is Ultimate struct aggregating all types of stats of one container
|
||||
//
|
||||
// Deprecated: use [container.Stats].
|
||||
type Stats = container.Stats
|
||||
|
||||
// StatsJSON is newly used Networks
|
||||
//
|
||||
// Deprecated: use [container.StatsResponse].
|
||||
type StatsJSON = container.StatsResponse
|
||||
|
||||
// EventsOptions holds parameters to filter events with.
|
||||
//
|
||||
// Deprecated: use [events.ListOptions].
|
||||
type EventsOptions = events.ListOptions
|
||||
|
||||
// ImageSearchOptions holds parameters to search images with.
|
||||
//
|
||||
// Deprecated: use [registry.SearchOptions].
|
||||
type ImageSearchOptions = registry.SearchOptions
|
||||
|
||||
// ImageImportSource holds source information for ImageImport
|
||||
//
|
||||
// Deprecated: use [image.ImportSource].
|
||||
type ImageImportSource image.ImportSource
|
||||
|
||||
// ImageLoadResponse returns information to the client about a load process.
|
||||
//
|
||||
// Deprecated: use [image.LoadResponse].
|
||||
type ImageLoadResponse = image.LoadResponse
|
||||
// Deprecated: use [image.RemoveOptions].
|
||||
type ImageRemoveOptions = image.RemoveOptions
|
||||
|
||||
@@ -6,10 +6,3 @@ import "github.com/docker/docker/api/types/filters"
|
||||
type ListOptions struct {
|
||||
Filters filters.Args
|
||||
}
|
||||
|
||||
// PruneReport contains the response for Engine API:
|
||||
// POST "/volumes/prune"
|
||||
type PruneReport struct {
|
||||
VolumesDeleted []string
|
||||
SpaceReclaimed uint64
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
|
||||
//go:build go1.21
|
||||
//go:build go1.19
|
||||
|
||||
package containerimage
|
||||
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
"github.com/containerd/containerd/remotes/docker"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/backend"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
timetypes "github.com/docker/docker/api/types/time"
|
||||
"github.com/docker/docker/builder"
|
||||
"github.com/docker/docker/builder/builder-next/exporter"
|
||||
@@ -27,6 +26,7 @@ import (
|
||||
"github.com/docker/docker/opts"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/streamformatter"
|
||||
"github.com/docker/go-units"
|
||||
controlapi "github.com/moby/buildkit/api/services/control"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/control"
|
||||
@@ -77,24 +77,23 @@ var cacheFields = map[string]bool{
|
||||
|
||||
// Opt is option struct required for creating the builder
|
||||
type Opt struct {
|
||||
SessionManager *session.Manager
|
||||
Root string
|
||||
EngineID string
|
||||
Dist images.DistributionServices
|
||||
ImageTagger mobyexporter.ImageTagger
|
||||
NetworkController *libnetwork.Controller
|
||||
DefaultCgroupParent string
|
||||
RegistryHosts docker.RegistryHosts
|
||||
BuilderConfig config.BuilderConfig
|
||||
Rootless bool
|
||||
IdentityMapping idtools.IdentityMapping
|
||||
DNSConfig config.DNSConfig
|
||||
ApparmorProfile string
|
||||
UseSnapshotter bool
|
||||
Snapshotter string
|
||||
ContainerdAddress string
|
||||
ContainerdNamespace string
|
||||
ImageExportedCallback exporter.ImageExportedByBuildkit
|
||||
SessionManager *session.Manager
|
||||
Root string
|
||||
EngineID string
|
||||
Dist images.DistributionServices
|
||||
ImageTagger mobyexporter.ImageTagger
|
||||
NetworkController *libnetwork.Controller
|
||||
DefaultCgroupParent string
|
||||
RegistryHosts docker.RegistryHosts
|
||||
BuilderConfig config.BuilderConfig
|
||||
Rootless bool
|
||||
IdentityMapping idtools.IdentityMapping
|
||||
DNSConfig config.DNSConfig
|
||||
ApparmorProfile string
|
||||
UseSnapshotter bool
|
||||
Snapshotter string
|
||||
ContainerdAddress string
|
||||
ContainerdNamespace string
|
||||
}
|
||||
|
||||
// Builder can build using BuildKit backend
|
||||
@@ -393,7 +392,7 @@ func (b *Builder) Build(ctx context.Context, opt backend.BuildConfig) (*builder.
|
||||
req := &controlapi.SolveRequest{
|
||||
Ref: id,
|
||||
Exporters: []*controlapi.Exporter{
|
||||
{Type: exporterName, Attrs: exporterAttrs},
|
||||
&controlapi.Exporter{Type: exporterName, Attrs: exporterAttrs},
|
||||
},
|
||||
Frontend: "dockerfile.v0",
|
||||
FrontendAttrs: frontendAttrs,
|
||||
@@ -613,7 +612,7 @@ func toBuildkitExtraHosts(inp []string, hostGatewayIP net.IP) (string, error) {
|
||||
}
|
||||
|
||||
// toBuildkitUlimits converts ulimits from docker type=soft:hard format to buildkit's csv format
|
||||
func toBuildkitUlimits(inp []*container.Ulimit) (string, error) {
|
||||
func toBuildkitUlimits(inp []*units.Ulimit) (string, error) {
|
||||
if len(inp) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
@@ -46,7 +46,6 @@ import (
|
||||
"github.com/moby/buildkit/util/archutil"
|
||||
"github.com/moby/buildkit/util/entitlements"
|
||||
"github.com/moby/buildkit/util/network/netproviders"
|
||||
"github.com/moby/buildkit/util/tracing"
|
||||
"github.com/moby/buildkit/util/tracing/detect"
|
||||
"github.com/moby/buildkit/worker"
|
||||
"github.com/moby/buildkit/worker/containerd"
|
||||
@@ -68,17 +67,11 @@ func newController(ctx context.Context, rt http.RoundTripper, opt Opt) (*control
|
||||
}
|
||||
|
||||
func getTraceExporter(ctx context.Context) trace.SpanExporter {
|
||||
tc := make(tracing.MultiSpanExporter, 0, 2)
|
||||
if detect.Recorder != nil {
|
||||
tc = append(tc, detect.Recorder)
|
||||
}
|
||||
|
||||
if exp, err := detect.NewSpanExporter(ctx); err != nil {
|
||||
span, _, err := detect.Exporter()
|
||||
if err != nil {
|
||||
log.G(ctx).WithError(err).Error("Failed to detect trace exporter for buildkit controller")
|
||||
} else if !detect.IsNoneSpanExporter(exp) {
|
||||
tc = append(tc, exp)
|
||||
}
|
||||
return tc
|
||||
return span
|
||||
}
|
||||
|
||||
func newSnapshotterController(ctx context.Context, rt http.RoundTripper, opt Opt) (*control.Controller, error) {
|
||||
@@ -138,7 +131,7 @@ func newSnapshotterController(ctx context.Context, rt http.RoundTripper, opt Opt
|
||||
}
|
||||
wo.Executor = exec
|
||||
|
||||
w, err := mobyworker.NewContainerdWorker(ctx, wo, opt.ImageExportedCallback)
|
||||
w, err := mobyworker.NewContainerdWorker(ctx, wo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -149,15 +142,9 @@ func newSnapshotterController(ctx context.Context, rt http.RoundTripper, opt Opt
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
gwf, err := gateway.NewGatewayFrontend(wc.Infos(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
frontends := map[string]frontend.Frontend{
|
||||
"dockerfile.v0": forwarder.NewGatewayForwarder(wc.Infos(), dockerfile.Build),
|
||||
"gateway.v0": gwf,
|
||||
"gateway.v0": gateway.NewGatewayFrontend(wc.Infos()),
|
||||
}
|
||||
|
||||
return control.NewController(control.Opt{
|
||||
@@ -316,12 +303,11 @@ func newGraphDriverController(ctx context.Context, rt http.RoundTripper, opt Opt
|
||||
}
|
||||
|
||||
exp, err := mobyexporter.New(mobyexporter.Opt{
|
||||
ImageStore: dist.ImageStore,
|
||||
ContentStore: store,
|
||||
Differ: differ,
|
||||
ImageTagger: opt.ImageTagger,
|
||||
LeaseManager: lm,
|
||||
ImageExportedCallback: opt.ImageExportedCallback,
|
||||
ImageStore: dist.ImageStore,
|
||||
ContentStore: store,
|
||||
Differ: differ,
|
||||
ImageTagger: opt.ImageTagger,
|
||||
LeaseManager: lm,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -380,14 +366,9 @@ func newGraphDriverController(ctx context.Context, rt http.RoundTripper, opt Opt
|
||||
}
|
||||
wc.Add(w)
|
||||
|
||||
gwf, err := gateway.NewGatewayFrontend(wc.Infos(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
frontends := map[string]frontend.Frontend{
|
||||
"dockerfile.v0": forwarder.NewGatewayForwarder(wc.Infos(), dockerfile.Build),
|
||||
"gateway.v0": gwf,
|
||||
"gateway.v0": gateway.NewGatewayFrontend(wc.Infos()),
|
||||
}
|
||||
|
||||
return control.NewController(control.Opt{
|
||||
|
||||
@@ -113,20 +113,20 @@ func (iface *lnInterface) init(c *libnetwork.Controller, n *libnetwork.Network)
|
||||
defer close(iface.ready)
|
||||
id := identity.NewID()
|
||||
|
||||
ep, err := n.CreateEndpoint(context.TODO(), id, libnetwork.CreateOptionDisableResolution())
|
||||
ep, err := n.CreateEndpoint(id, libnetwork.CreateOptionDisableResolution())
|
||||
if err != nil {
|
||||
iface.err = err
|
||||
return
|
||||
}
|
||||
|
||||
sbx, err := c.NewSandbox(context.TODO(), id, libnetwork.OptionUseExternalKey(), libnetwork.OptionHostsPath(filepath.Join(iface.provider.Root, id, "hosts")),
|
||||
sbx, err := c.NewSandbox(id, libnetwork.OptionUseExternalKey(), libnetwork.OptionHostsPath(filepath.Join(iface.provider.Root, id, "hosts")),
|
||||
libnetwork.OptionResolvConfPath(filepath.Join(iface.provider.Root, id, "resolv.conf")))
|
||||
if err != nil {
|
||||
iface.err = err
|
||||
return
|
||||
}
|
||||
|
||||
if err := ep.Join(context.TODO(), sbx); err != nil {
|
||||
if err := ep.Join(sbx); err != nil {
|
||||
iface.err = err
|
||||
return
|
||||
}
|
||||
@@ -161,7 +161,7 @@ func (iface *lnInterface) Close() error {
|
||||
<-iface.ready
|
||||
if iface.sbx != nil {
|
||||
go func() {
|
||||
if err := iface.sbx.Delete(context.TODO()); err != nil {
|
||||
if err := iface.sbx.Delete(); err != nil {
|
||||
log.G(context.TODO()).WithError(err).Errorf("failed to delete builder network sandbox")
|
||||
}
|
||||
if err := os.RemoveAll(filepath.Join(iface.provider.Root, iface.sbx.ContainerID())); err != nil {
|
||||
|
||||
@@ -22,11 +22,11 @@ func newExecutor(_, _ string, _ *libnetwork.Controller, _ *oci.DNSConfig, _ bool
|
||||
type stubExecutor struct{}
|
||||
|
||||
func (w *stubExecutor) Run(ctx context.Context, id string, root executor.Mount, mounts []executor.Mount, process executor.ProcessInfo, started chan<- struct{}) (resourcetypes.Recorder, error) {
|
||||
return nil, errors.New("buildkit executor not implemented for " + runtime.GOOS)
|
||||
return nil, errors.New("buildkit executor not implemented for "+runtime.GOOS)
|
||||
}
|
||||
|
||||
func (w *stubExecutor) Exec(ctx context.Context, id string, process executor.ProcessInfo) error {
|
||||
return errors.New("buildkit executor not implemented for " + runtime.GOOS)
|
||||
return errors.New("buildkit executor not implemented for "+runtime.GOOS)
|
||||
}
|
||||
|
||||
func getDNSConfig(config.DNSConfig) *oci.DNSConfig {
|
||||
|
||||
@@ -10,8 +10,8 @@ import (
|
||||
"github.com/containerd/containerd/leases"
|
||||
"github.com/containerd/log"
|
||||
distref "github.com/distribution/reference"
|
||||
builderexporter "github.com/docker/docker/builder/builder-next/exporter"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/internal/compatcontext"
|
||||
"github.com/docker/docker/layer"
|
||||
"github.com/moby/buildkit/exporter"
|
||||
"github.com/moby/buildkit/exporter/containerimage"
|
||||
@@ -33,12 +33,11 @@ type ImageTagger interface {
|
||||
|
||||
// Opt defines a struct for creating new exporter
|
||||
type Opt struct {
|
||||
ImageStore image.Store
|
||||
Differ Differ
|
||||
ImageTagger ImageTagger
|
||||
ContentStore content.Store
|
||||
LeaseManager leases.Manager
|
||||
ImageExportedCallback builderexporter.ImageExportedByBuildkit
|
||||
ImageStore image.Store
|
||||
Differ Differ
|
||||
ImageTagger ImageTagger
|
||||
ContentStore content.Store
|
||||
LeaseManager leases.Manager
|
||||
}
|
||||
|
||||
type imageExporter struct {
|
||||
@@ -51,13 +50,12 @@ func New(opt Opt) (exporter.Exporter, error) {
|
||||
return im, nil
|
||||
}
|
||||
|
||||
func (e *imageExporter) Resolve(ctx context.Context, id int, attrs map[string]string) (exporter.ExporterInstance, error) {
|
||||
func (e *imageExporter) Resolve(ctx context.Context, id int, opt map[string]string) (exporter.ExporterInstance, error) {
|
||||
i := &imageExporterInstance{
|
||||
imageExporter: e,
|
||||
id: id,
|
||||
attrs: attrs,
|
||||
}
|
||||
for k, v := range attrs {
|
||||
for k, v := range opt {
|
||||
switch exptypes.ImageExporterOptKey(k) {
|
||||
case exptypes.OptKeyName:
|
||||
for _, v := range strings.Split(v, ",") {
|
||||
@@ -82,17 +80,12 @@ type imageExporterInstance struct {
|
||||
id int
|
||||
targetNames []distref.Named
|
||||
meta map[string][]byte
|
||||
attrs map[string]string
|
||||
}
|
||||
|
||||
func (e *imageExporterInstance) ID() int {
|
||||
return e.id
|
||||
}
|
||||
|
||||
func (e *imageExporterInstance) Type() string {
|
||||
return "image"
|
||||
}
|
||||
|
||||
func (e *imageExporterInstance) Name() string {
|
||||
return "exporting to image"
|
||||
}
|
||||
@@ -101,10 +94,6 @@ func (e *imageExporterInstance) Config() *exporter.Config {
|
||||
return exporter.NewConfig()
|
||||
}
|
||||
|
||||
func (e *imageExporterInstance) Attrs() map[string]string {
|
||||
return e.attrs
|
||||
}
|
||||
|
||||
func (e *imageExporterInstance) Export(ctx context.Context, inp *exporter.Source, inlineCache exptypes.InlineCache, sessionID string) (map[string]string, exporter.DescriptorReference, error) {
|
||||
if len(inp.Refs) > 1 {
|
||||
return nil, nil, fmt.Errorf("exporting multiple references to image store is currently unsupported")
|
||||
@@ -228,10 +217,6 @@ func (e *imageExporterInstance) Export(ctx context.Context, inp *exporter.Source
|
||||
return nil, nil, fmt.Errorf("failed to create a temporary descriptor reference: %w", err)
|
||||
}
|
||||
|
||||
if e.opt.ImageExportedCallback != nil {
|
||||
e.opt.ImageExportedCallback(ctx, id.String(), descRef.Descriptor())
|
||||
}
|
||||
|
||||
return resp, descRef, nil
|
||||
}
|
||||
|
||||
@@ -245,7 +230,7 @@ func (e *imageExporterInstance) newTempReference(ctx context.Context, config []b
|
||||
}
|
||||
|
||||
unlease := func(ctx context.Context) error {
|
||||
err := done(context.WithoutCancel(ctx))
|
||||
err := done(compatcontext.WithoutCancel(ctx))
|
||||
if err != nil {
|
||||
log.G(ctx).WithError(err).Error("failed to delete descriptor reference lease")
|
||||
}
|
||||
|
||||
37
builder/builder-next/exporter/overrides/wrapper.go
Normal file
37
builder/builder-next/exporter/overrides/wrapper.go
Normal file
@@ -0,0 +1,37 @@
|
||||
package overrides
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
"github.com/moby/buildkit/exporter"
|
||||
"github.com/moby/buildkit/exporter/containerimage/exptypes"
|
||||
)
|
||||
|
||||
// Wraps the containerimage exporter's Resolve method to apply moby-specific
|
||||
// overrides to the exporter attributes.
|
||||
type imageExporterMobyWrapper struct {
|
||||
exp exporter.Exporter
|
||||
}
|
||||
|
||||
func NewExporterWrapper(exp exporter.Exporter) (exporter.Exporter, error) {
|
||||
return &imageExporterMobyWrapper{exp: exp}, nil
|
||||
}
|
||||
|
||||
// Resolve applies moby specific attributes to the request.
|
||||
func (e *imageExporterMobyWrapper) Resolve(ctx context.Context, id int, exporterAttrs map[string]string) (exporter.ExporterInstance, error) {
|
||||
if exporterAttrs == nil {
|
||||
exporterAttrs = make(map[string]string)
|
||||
}
|
||||
reposAndTags, err := SanitizeRepoAndTags(strings.Split(exporterAttrs[string(exptypes.OptKeyName)], ","))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
exporterAttrs[string(exptypes.OptKeyName)] = strings.Join(reposAndTags, ",")
|
||||
exporterAttrs[string(exptypes.OptKeyUnpack)] = "true"
|
||||
if _, has := exporterAttrs[string(exptypes.OptKeyDanglingPrefix)]; !has {
|
||||
exporterAttrs[string(exptypes.OptKeyDanglingPrefix)] = "moby-dangling"
|
||||
}
|
||||
|
||||
return e.exp.Resolve(ctx, id, exporterAttrs)
|
||||
}
|
||||
@@ -1,69 +0,0 @@
|
||||
package exporter
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/builder/builder-next/exporter/overrides"
|
||||
"github.com/moby/buildkit/exporter"
|
||||
"github.com/moby/buildkit/exporter/containerimage/exptypes"
|
||||
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
type ImageExportedByBuildkit = func(ctx context.Context, id string, desc ocispec.Descriptor) error
|
||||
|
||||
// Wraps the containerimage exporter's Resolve method to apply moby-specific
|
||||
// overrides to the exporter attributes.
|
||||
type imageExporterMobyWrapper struct {
|
||||
exp exporter.Exporter
|
||||
callback ImageExportedByBuildkit
|
||||
}
|
||||
|
||||
// NewWrapper returns an exporter wrapper that applies moby specific attributes
|
||||
// and hooks the export process.
|
||||
func NewWrapper(exp exporter.Exporter, callback ImageExportedByBuildkit) (exporter.Exporter, error) {
|
||||
return &imageExporterMobyWrapper{exp: exp, callback: callback}, nil
|
||||
}
|
||||
|
||||
// Resolve applies moby specific attributes to the request.
|
||||
func (e *imageExporterMobyWrapper) Resolve(ctx context.Context, id int, exporterAttrs map[string]string) (exporter.ExporterInstance, error) {
|
||||
if exporterAttrs == nil {
|
||||
exporterAttrs = make(map[string]string)
|
||||
}
|
||||
reposAndTags, err := overrides.SanitizeRepoAndTags(strings.Split(exporterAttrs[string(exptypes.OptKeyName)], ","))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
exporterAttrs[string(exptypes.OptKeyName)] = strings.Join(reposAndTags, ",")
|
||||
exporterAttrs[string(exptypes.OptKeyUnpack)] = "true"
|
||||
if _, has := exporterAttrs[string(exptypes.OptKeyDanglingPrefix)]; !has {
|
||||
exporterAttrs[string(exptypes.OptKeyDanglingPrefix)] = "moby-dangling"
|
||||
}
|
||||
|
||||
inst, err := e.exp.Resolve(ctx, id, exporterAttrs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &imageExporterInstanceWrapper{ExporterInstance: inst, callback: e.callback}, nil
|
||||
}
|
||||
|
||||
type imageExporterInstanceWrapper struct {
|
||||
exporter.ExporterInstance
|
||||
callback ImageExportedByBuildkit
|
||||
}
|
||||
|
||||
func (i *imageExporterInstanceWrapper) Export(ctx context.Context, src *exporter.Source, inlineCache exptypes.InlineCache, sessionID string) (map[string]string, exporter.DescriptorReference, error) {
|
||||
out, ref, err := i.ExporterInstance.Export(ctx, src, inlineCache, sessionID)
|
||||
if err != nil {
|
||||
return out, ref, err
|
||||
}
|
||||
|
||||
desc := ref.Descriptor()
|
||||
imageID := out[exptypes.ExporterImageDigestKey]
|
||||
if i.callback != nil {
|
||||
i.callback(ctx, imageID, desc)
|
||||
}
|
||||
return out, ref, nil
|
||||
}
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
|
||||
mobyexporter "github.com/docker/docker/builder/builder-next/exporter"
|
||||
"github.com/docker/docker/builder/builder-next/exporter/overrides"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/exporter"
|
||||
"github.com/moby/buildkit/session"
|
||||
@@ -13,16 +14,15 @@ import (
|
||||
// ContainerdWorker is a local worker instance with dedicated snapshotter, cache, and so on.
|
||||
type ContainerdWorker struct {
|
||||
*base.Worker
|
||||
callback mobyexporter.ImageExportedByBuildkit
|
||||
}
|
||||
|
||||
// NewContainerdWorker instantiates a local worker.
|
||||
func NewContainerdWorker(ctx context.Context, wo base.WorkerOpt, callback mobyexporter.ImageExportedByBuildkit) (*ContainerdWorker, error) {
|
||||
func NewContainerdWorker(ctx context.Context, wo base.WorkerOpt) (*ContainerdWorker, error) {
|
||||
bw, err := base.NewWorker(ctx, wo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &ContainerdWorker{Worker: bw, callback: callback}, nil
|
||||
return &ContainerdWorker{Worker: bw}, nil
|
||||
}
|
||||
|
||||
// Exporter returns exporter by name
|
||||
@@ -33,7 +33,7 @@ func (w *ContainerdWorker) Exporter(name string, sm *session.Manager) (exporter.
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return mobyexporter.NewWrapper(exp, w.callback)
|
||||
return overrides.NewExporterWrapper(exp)
|
||||
default:
|
||||
return w.Worker.Exporter(name, sm)
|
||||
}
|
||||
|
||||
@@ -4,6 +4,8 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
|
||||
"github.com/docker/docker/runconfig/opts"
|
||||
)
|
||||
|
||||
// builtinAllowedBuildArgs is list of built-in allowed build args
|
||||
@@ -136,7 +138,7 @@ func (b *BuildArgs) getAllFromMapping(source map[string]*string) map[string]stri
|
||||
// FilterAllowed returns all allowed args without the filtered args
|
||||
func (b *BuildArgs) FilterAllowed(filter []string) []string {
|
||||
envs := []string{}
|
||||
configEnv := convertKVStringsToMap(filter)
|
||||
configEnv := opts.ConvertKVStringsToMap(filter)
|
||||
|
||||
for key, val := range b.GetAllAllowed() {
|
||||
if _, ok := configEnv[key]; !ok {
|
||||
|
||||
@@ -187,7 +187,7 @@ func buildLabelOptions(labels map[string]string, stages []instructions.Stage) {
|
||||
func (b *Builder) build(ctx context.Context, source builder.Source, dockerfile *parser.Result) (*builder.Result, error) {
|
||||
defer b.imageSources.Unmount()
|
||||
|
||||
stages, metaArgs, err := instructions.Parse(dockerfile.AST, nil)
|
||||
stages, metaArgs, err := instructions.Parse(dockerfile.AST)
|
||||
if err != nil {
|
||||
var uiErr *instructions.UnknownInstructionError
|
||||
if errors.As(err, &uiErr) {
|
||||
@@ -230,8 +230,7 @@ func processMetaArg(meta instructions.ArgCommand, shlex *shell.Lex, args *BuildA
|
||||
// shell.Lex currently only support the concatenated string format
|
||||
envs := convertMapToEnvList(args.GetAllAllowed())
|
||||
if err := meta.Expand(func(word string) (string, error) {
|
||||
newword, _, err := shlex.ProcessWord(word, envs)
|
||||
return newword, err
|
||||
return shlex.ProcessWord(word, envs)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -381,14 +380,3 @@ func convertMapToEnvList(m map[string]string) []string {
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// convertKVStringsToMap converts ["key=value"] to {"key":"value"}
|
||||
func convertKVStringsToMap(values []string) map[string]string {
|
||||
result := make(map[string]string, len(values))
|
||||
for _, value := range values {
|
||||
k, v, _ := strings.Cut(value, "=")
|
||||
result[k] = v
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
@@ -477,7 +477,7 @@ func performCopyForInfo(dest copyInfo, source copyInfo, options copyFileOptions)
|
||||
}
|
||||
// dest.path must be used because destPath has already been cleaned of any
|
||||
// trailing slash
|
||||
if destExistsAsDir || strings.HasSuffix(dest.path, string(os.PathSeparator)) {
|
||||
if endsInSlash(dest.path) || destExistsAsDir {
|
||||
// source.path must be used to get the correct filename when the source
|
||||
// is a symlink
|
||||
destPath = filepath.Join(destPath, filepath.Base(source.path))
|
||||
@@ -524,6 +524,10 @@ func copyFile(archiver *archive.Archiver, source, dest string, identity *idtools
|
||||
return nil
|
||||
}
|
||||
|
||||
func endsInSlash(path string) bool {
|
||||
return strings.HasSuffix(path, string(filepath.Separator))
|
||||
}
|
||||
|
||||
// isExistingDirectory returns true if the path exists and is a directory
|
||||
func isExistingDirectory(path string) (bool, error) {
|
||||
destStat, err := os.Stat(path)
|
||||
|
||||
@@ -224,7 +224,7 @@ func (d *dispatchRequest) getExpandedString(shlex *shell.Lex, str string) (strin
|
||||
substitutionArgs = append(substitutionArgs, key+"="+value)
|
||||
}
|
||||
|
||||
name, _, err := shlex.ProcessWord(str, substitutionArgs)
|
||||
name, err := shlex.ProcessWord(str, substitutionArgs)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
@@ -30,6 +30,7 @@ import (
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/oci"
|
||||
"github.com/docker/docker/runconfig/opts"
|
||||
"github.com/moby/buildkit/frontend/dockerfile/instructions"
|
||||
"github.com/moby/buildkit/frontend/dockerfile/shell"
|
||||
"github.com/pkg/errors"
|
||||
@@ -47,8 +48,7 @@ func dispatch(ctx context.Context, d dispatchRequest, cmd instructions.Command)
|
||||
|
||||
if ex, ok := cmd.(instructions.SupportsSingleWordExpansion); ok {
|
||||
err := ex.Expand(func(word string) (string, error) {
|
||||
newword, _, err := d.shlex.ProcessWord(word, envs)
|
||||
return newword, err
|
||||
return d.shlex.ProcessWord(word, envs)
|
||||
})
|
||||
if err != nil {
|
||||
return errdefs.InvalidParameter(err)
|
||||
@@ -242,7 +242,7 @@ func (s *dispatchState) setDefaultPath() {
|
||||
if defaultPath == "" {
|
||||
return
|
||||
}
|
||||
envMap := convertKVStringsToMap(s.runConfig.Env)
|
||||
envMap := opts.ConvertKVStringsToMap(s.runConfig.Env)
|
||||
if _, ok := envMap["PATH"]; !ok {
|
||||
s.runConfig.Env = append(s.runConfig.Env, "PATH="+defaultPath)
|
||||
}
|
||||
|
||||
@@ -17,11 +17,11 @@ import (
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/network"
|
||||
"github.com/docker/docker/builder"
|
||||
networkSettings "github.com/docker/docker/daemon/network"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/chrootarchive"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/docker/docker/runconfig"
|
||||
"github.com/docker/go-connections/nat"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
@@ -130,24 +130,26 @@ func (b *Builder) performCopy(ctx context.Context, req dispatchRequest, inst cop
|
||||
commentStr := fmt.Sprintf("%s %s%s in %s ", inst.cmdName, chownComment, srcHash, inst.dest)
|
||||
|
||||
// TODO: should this have been using origPaths instead of srcHash in the comment?
|
||||
runConfigWithCommentCmd := copyRunConfig(state.runConfig, withCmdCommentString(commentStr, state.operatingSystem))
|
||||
runConfigWithCommentCmd := copyRunConfig(
|
||||
state.runConfig,
|
||||
withCmdCommentString(commentStr, state.operatingSystem))
|
||||
hit, err := b.probeCache(state, runConfigWithCommentCmd)
|
||||
if err != nil || hit {
|
||||
return err
|
||||
}
|
||||
|
||||
imgMount, err := b.imageSources.Get(ctx, state.imageID, true, req.builder.platform)
|
||||
imageMount, err := b.imageSources.Get(ctx, state.imageID, true, req.builder.platform)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to get destination image %q", state.imageID)
|
||||
}
|
||||
|
||||
rwLayer, err := imgMount.NewRWLayer()
|
||||
rwLayer, err := imageMount.NewRWLayer()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rwLayer.Release()
|
||||
|
||||
destInfo, err := createDestInfo(state.runConfig.WorkingDir, inst, rwLayer)
|
||||
destInfo, err := createDestInfo(state.runConfig.WorkingDir, inst, rwLayer, state.operatingSystem)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -179,10 +181,10 @@ func (b *Builder) performCopy(ctx context.Context, req dispatchRequest, inst cop
|
||||
return errors.Wrapf(err, "failed to copy files")
|
||||
}
|
||||
}
|
||||
return b.exportImage(ctx, state, rwLayer, imgMount.Image(), runConfigWithCommentCmd)
|
||||
return b.exportImage(ctx, state, rwLayer, imageMount.Image(), runConfigWithCommentCmd)
|
||||
}
|
||||
|
||||
func createDestInfo(workingDir string, inst copyInstruction, rwLayer builder.RWLayer) (copyInfo, error) {
|
||||
func createDestInfo(workingDir string, inst copyInstruction, rwLayer builder.RWLayer, platform string) (copyInfo, error) {
|
||||
// Twiddle the destination when it's a relative path - meaning, make it
|
||||
// relative to the WORKINGDIR
|
||||
dest, err := normalizeDest(workingDir, inst.dest)
|
||||
@@ -278,38 +280,38 @@ func withoutHealthcheck() runConfigModifier {
|
||||
}
|
||||
|
||||
func copyRunConfig(runConfig *container.Config, modifiers ...runConfigModifier) *container.Config {
|
||||
cfgCopy := *runConfig
|
||||
cfgCopy.Cmd = copyStringSlice(runConfig.Cmd)
|
||||
cfgCopy.Env = copyStringSlice(runConfig.Env)
|
||||
cfgCopy.Entrypoint = copyStringSlice(runConfig.Entrypoint)
|
||||
cfgCopy.OnBuild = copyStringSlice(runConfig.OnBuild)
|
||||
cfgCopy.Shell = copyStringSlice(runConfig.Shell)
|
||||
copy := *runConfig
|
||||
copy.Cmd = copyStringSlice(runConfig.Cmd)
|
||||
copy.Env = copyStringSlice(runConfig.Env)
|
||||
copy.Entrypoint = copyStringSlice(runConfig.Entrypoint)
|
||||
copy.OnBuild = copyStringSlice(runConfig.OnBuild)
|
||||
copy.Shell = copyStringSlice(runConfig.Shell)
|
||||
|
||||
if cfgCopy.Volumes != nil {
|
||||
cfgCopy.Volumes = make(map[string]struct{}, len(runConfig.Volumes))
|
||||
if copy.Volumes != nil {
|
||||
copy.Volumes = make(map[string]struct{}, len(runConfig.Volumes))
|
||||
for k, v := range runConfig.Volumes {
|
||||
cfgCopy.Volumes[k] = v
|
||||
copy.Volumes[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
if cfgCopy.ExposedPorts != nil {
|
||||
cfgCopy.ExposedPorts = make(nat.PortSet, len(runConfig.ExposedPorts))
|
||||
if copy.ExposedPorts != nil {
|
||||
copy.ExposedPorts = make(nat.PortSet, len(runConfig.ExposedPorts))
|
||||
for k, v := range runConfig.ExposedPorts {
|
||||
cfgCopy.ExposedPorts[k] = v
|
||||
copy.ExposedPorts[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
if cfgCopy.Labels != nil {
|
||||
cfgCopy.Labels = make(map[string]string, len(runConfig.Labels))
|
||||
if copy.Labels != nil {
|
||||
copy.Labels = make(map[string]string, len(runConfig.Labels))
|
||||
for k, v := range runConfig.Labels {
|
||||
cfgCopy.Labels[k] = v
|
||||
copy.Labels[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
for _, modifier := range modifiers {
|
||||
modifier(&cfgCopy)
|
||||
modifier(©)
|
||||
}
|
||||
return &cfgCopy
|
||||
return ©
|
||||
}
|
||||
|
||||
func copyStringSlice(orig []string) []string {
|
||||
@@ -333,7 +335,7 @@ func (b *Builder) probeCache(dispatchState *dispatchState, runConfig *container.
|
||||
if cachedID == "" || err != nil {
|
||||
return false, err
|
||||
}
|
||||
_, _ = fmt.Fprintln(b.Stdout, " ---> Using cache")
|
||||
fmt.Fprint(b.Stdout, " ---> Using cache\n")
|
||||
|
||||
dispatchState.imageID = cachedID
|
||||
return true, nil
|
||||
@@ -352,15 +354,16 @@ func (b *Builder) create(ctx context.Context, runConfig *container.Config) (stri
|
||||
log.G(ctx).Debugf("[BUILDER] Command to be executed: %v", runConfig.Cmd)
|
||||
|
||||
hostConfig := hostConfigFromOptions(b.options)
|
||||
ctr, err := b.containerManager.Create(ctx, runConfig, hostConfig)
|
||||
container, err := b.containerManager.Create(ctx, runConfig, hostConfig)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
for _, warning := range ctr.Warnings {
|
||||
_, _ = fmt.Fprintf(b.Stdout, " ---> [Warning] %s\n", warning)
|
||||
// TODO: could this be moved into containerManager.Create() ?
|
||||
for _, warning := range container.Warnings {
|
||||
fmt.Fprintf(b.Stdout, " ---> [Warning] %s\n", warning)
|
||||
}
|
||||
_, _ = fmt.Fprintf(b.Stdout, " ---> Running in %s\n", stringid.TruncateID(ctr.ID))
|
||||
return ctr.ID, nil
|
||||
fmt.Fprintf(b.Stdout, " ---> Running in %s\n", stringid.TruncateID(container.ID))
|
||||
return container.ID, nil
|
||||
}
|
||||
|
||||
func hostConfigFromOptions(options *types.ImageBuildOptions) *container.HostConfig {
|
||||
@@ -382,7 +385,7 @@ func hostConfigFromOptions(options *types.ImageBuildOptions) *container.HostConf
|
||||
// This is in line with what the ContainerCreate API endpoint does.
|
||||
networkMode := options.NetworkMode
|
||||
if networkMode == "" || networkMode == network.NetworkDefault {
|
||||
networkMode = networkSettings.DefaultNetwork
|
||||
networkMode = runconfig.DefaultDaemonNetworkMode().NetworkName()
|
||||
}
|
||||
|
||||
hc := &container.HostConfig{
|
||||
|
||||
@@ -27,25 +27,25 @@ func parseChownFlag(ctx context.Context, builder *Builder, state *dispatchState,
|
||||
|
||||
passwdPath, err := symlink.FollowSymlinkInScope(filepath.Join(ctrRootPath, "etc", "passwd"), ctrRootPath)
|
||||
if err != nil {
|
||||
return idtools.Identity{}, errors.Wrapf(err, "can't resolve /etc/passwd path in container rootfs")
|
||||
return idtools.Identity{}, errors.Wrap(err, "can't resolve /etc/passwd path in container rootfs")
|
||||
}
|
||||
groupPath, err := symlink.FollowSymlinkInScope(filepath.Join(ctrRootPath, "etc", "group"), ctrRootPath)
|
||||
if err != nil {
|
||||
return idtools.Identity{}, errors.Wrapf(err, "can't resolve /etc/group path in container rootfs")
|
||||
return idtools.Identity{}, errors.Wrap(err, "can't resolve /etc/group path in container rootfs")
|
||||
}
|
||||
uid, err := lookupUser(userStr, passwdPath)
|
||||
if err != nil {
|
||||
return idtools.Identity{}, errors.Wrapf(err, "can't find uid for user "+userStr)
|
||||
return idtools.Identity{}, errors.Wrap(err, "can't find uid for user "+userStr)
|
||||
}
|
||||
gid, err := lookupGroup(grpStr, groupPath)
|
||||
if err != nil {
|
||||
return idtools.Identity{}, errors.Wrapf(err, "can't find gid for group "+grpStr)
|
||||
return idtools.Identity{}, errors.Wrap(err, "can't find gid for group "+grpStr)
|
||||
}
|
||||
|
||||
// convert as necessary because of user namespaces
|
||||
chownPair, err := identityMapping.ToHost(idtools.Identity{UID: uid, GID: gid})
|
||||
if err != nil {
|
||||
return idtools.Identity{}, errors.Wrapf(err, "unable to convert uid/gid to host mapping")
|
||||
return idtools.Identity{}, errors.Wrap(err, "unable to convert uid/gid to host mapping")
|
||||
}
|
||||
return chownPair, nil
|
||||
}
|
||||
|
||||
@@ -44,8 +44,8 @@ func downloadRemote(remoteURL string) (string, io.ReadCloser, error) {
|
||||
// GetWithStatusError does an http.Get() and returns an error if the
|
||||
// status code is 4xx or 5xx.
|
||||
func GetWithStatusError(address string) (resp *http.Response, err error) {
|
||||
// #nosec G107
|
||||
if resp, err = http.Get(address); err != nil {
|
||||
resp, err = http.Get(address) // #nosec G107 -- ignore G107: Potential HTTP request made with variable url
|
||||
if err != nil {
|
||||
if uerr, ok := err.(*url.Error); ok {
|
||||
if derr, ok := uerr.Err.(*net.DNSError); ok && !derr.IsTimeout {
|
||||
return nil, errdefs.NotFound(err)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package debug // import "github.com/docker/docker/cmd/dockerd/debug"
|
||||
package debug // import "github.com/docker/docker/cli/debug"
|
||||
|
||||
import (
|
||||
"os"
|
||||
@@ -1,4 +1,4 @@
|
||||
package debug // import "github.com/docker/docker/cmd/dockerd/debug"
|
||||
package debug // import "github.com/docker/docker/cli/debug"
|
||||
|
||||
import (
|
||||
"os"
|
||||
@@ -49,8 +49,6 @@ import (
|
||||
"net/url"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api"
|
||||
@@ -133,10 +131,7 @@ type Client struct {
|
||||
negotiateVersion bool
|
||||
|
||||
// negotiated indicates that API version negotiation took place
|
||||
negotiated atomic.Bool
|
||||
|
||||
// negotiateLock is used to single-flight the version negotiation process
|
||||
negotiateLock sync.Mutex
|
||||
negotiated bool
|
||||
|
||||
tp trace.TracerProvider
|
||||
|
||||
@@ -271,16 +266,7 @@ func (cli *Client) Close() error {
|
||||
// be negotiated when making the actual requests, and for which cases
|
||||
// we cannot do the negotiation lazily.
|
||||
func (cli *Client) checkVersion(ctx context.Context) error {
|
||||
if !cli.manualOverride && cli.negotiateVersion && !cli.negotiated.Load() {
|
||||
// Ensure exclusive write access to version and negotiated fields
|
||||
cli.negotiateLock.Lock()
|
||||
defer cli.negotiateLock.Unlock()
|
||||
|
||||
// May have been set during last execution of critical zone
|
||||
if cli.negotiated.Load() {
|
||||
return nil
|
||||
}
|
||||
|
||||
if !cli.manualOverride && cli.negotiateVersion && !cli.negotiated {
|
||||
ping, err := cli.Ping(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -326,10 +312,6 @@ func (cli *Client) ClientVersion() string {
|
||||
// added (1.24).
|
||||
func (cli *Client) NegotiateAPIVersion(ctx context.Context) {
|
||||
if !cli.manualOverride {
|
||||
// Avoid concurrent modification of version-related fields
|
||||
cli.negotiateLock.Lock()
|
||||
defer cli.negotiateLock.Unlock()
|
||||
|
||||
ping, err := cli.Ping(ctx)
|
||||
if err != nil {
|
||||
// FIXME(thaJeztah): Ping returns an error when failing to connect to the API; we should not swallow the error here, and instead returning it.
|
||||
@@ -354,10 +336,6 @@ func (cli *Client) NegotiateAPIVersion(ctx context.Context) {
|
||||
// added (1.24).
|
||||
func (cli *Client) NegotiateAPIVersionPing(pingResponse types.Ping) {
|
||||
if !cli.manualOverride {
|
||||
// Avoid concurrent modification of version-related fields
|
||||
cli.negotiateLock.Lock()
|
||||
defer cli.negotiateLock.Unlock()
|
||||
|
||||
cli.negotiateAPIVersionPing(pingResponse)
|
||||
}
|
||||
}
|
||||
@@ -383,7 +361,7 @@ func (cli *Client) negotiateAPIVersionPing(pingResponse types.Ping) {
|
||||
// Store the results, so that automatic API version negotiation (if enabled)
|
||||
// won't be performed on the next request.
|
||||
if cli.negotiateVersion {
|
||||
cli.negotiated.Store(true)
|
||||
cli.negotiated = true
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -342,7 +342,7 @@ func TestNegotiateAPIVersion(t *testing.T) {
|
||||
|
||||
// TestNegotiateAPIVersionOverride asserts that we honor the DOCKER_API_VERSION
|
||||
// environment variable when negotiating versions.
|
||||
func TestNegotiateAPIVersionOverride(t *testing.T) {
|
||||
func TestNegotiateAPVersionOverride(t *testing.T) {
|
||||
const expected = "9.99"
|
||||
t.Setenv("DOCKER_API_VERSION", expected)
|
||||
|
||||
@@ -354,9 +354,9 @@ func TestNegotiateAPIVersionOverride(t *testing.T) {
|
||||
assert.Equal(t, client.ClientVersion(), expected)
|
||||
}
|
||||
|
||||
// TestNegotiateAPIVersionConnectionFailure asserts that we do not modify the
|
||||
// TestNegotiateAPVersionConnectionFailure asserts that we do not modify the
|
||||
// API version when failing to connect.
|
||||
func TestNegotiateAPIVersionConnectionFailure(t *testing.T) {
|
||||
func TestNegotiateAPVersionConnectionFailure(t *testing.T) {
|
||||
const expected = "9.99"
|
||||
|
||||
client, err := NewClientWithOpts(WithHost("tcp://no-such-host.invalid"))
|
||||
|
||||
@@ -11,11 +11,11 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types"
|
||||
)
|
||||
|
||||
// ContainerStatPath returns stat information about a path inside the container filesystem.
|
||||
func (cli *Client) ContainerStatPath(ctx context.Context, containerID, path string) (container.PathStat, error) {
|
||||
func (cli *Client) ContainerStatPath(ctx context.Context, containerID, path string) (types.ContainerPathStat, error) {
|
||||
query := url.Values{}
|
||||
query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API.
|
||||
|
||||
@@ -23,14 +23,14 @@ func (cli *Client) ContainerStatPath(ctx context.Context, containerID, path stri
|
||||
response, err := cli.head(ctx, urlStr, query, nil)
|
||||
defer ensureReaderClosed(response)
|
||||
if err != nil {
|
||||
return container.PathStat{}, err
|
||||
return types.ContainerPathStat{}, err
|
||||
}
|
||||
return getContainerPathStatFromHeader(response.header)
|
||||
}
|
||||
|
||||
// CopyToContainer copies content into the container filesystem.
|
||||
// Note that `content` must be a Reader for a TAR archive
|
||||
func (cli *Client) CopyToContainer(ctx context.Context, containerID, dstPath string, content io.Reader, options container.CopyToContainerOptions) error {
|
||||
func (cli *Client) CopyToContainer(ctx context.Context, containerID, dstPath string, content io.Reader, options types.CopyToContainerOptions) error {
|
||||
query := url.Values{}
|
||||
query.Set("path", filepath.ToSlash(dstPath)) // Normalize the paths used in the API.
|
||||
// Do not allow for an existing directory to be overwritten by a non-directory and vice versa.
|
||||
@@ -55,14 +55,14 @@ func (cli *Client) CopyToContainer(ctx context.Context, containerID, dstPath str
|
||||
|
||||
// CopyFromContainer gets the content from the container and returns it as a Reader
|
||||
// for a TAR archive to manipulate it in the host. It's up to the caller to close the reader.
|
||||
func (cli *Client) CopyFromContainer(ctx context.Context, containerID, srcPath string) (io.ReadCloser, container.PathStat, error) {
|
||||
func (cli *Client) CopyFromContainer(ctx context.Context, containerID, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) {
|
||||
query := make(url.Values, 1)
|
||||
query.Set("path", filepath.ToSlash(srcPath)) // Normalize the paths used in the API.
|
||||
|
||||
apiPath := "/containers/" + containerID + "/archive"
|
||||
response, err := cli.get(ctx, apiPath, query, nil)
|
||||
if err != nil {
|
||||
return nil, container.PathStat{}, err
|
||||
return nil, types.ContainerPathStat{}, err
|
||||
}
|
||||
|
||||
// In order to get the copy behavior right, we need to know information
|
||||
@@ -78,8 +78,8 @@ func (cli *Client) CopyFromContainer(ctx context.Context, containerID, srcPath s
|
||||
return response.body, stat, err
|
||||
}
|
||||
|
||||
func getContainerPathStatFromHeader(header http.Header) (container.PathStat, error) {
|
||||
var stat container.PathStat
|
||||
func getContainerPathStatFromHeader(header http.Header) (types.ContainerPathStat, error) {
|
||||
var stat types.ContainerPathStat
|
||||
|
||||
encodedStat := header.Get("X-Docker-Container-Path-Stat")
|
||||
statDecoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(encodedStat))
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"gotest.tools/v3/assert"
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
@@ -64,7 +64,7 @@ func TestContainerStatPath(t *testing.T) {
|
||||
if path != expectedPath {
|
||||
return nil, fmt.Errorf("path not set in URL query properly")
|
||||
}
|
||||
content, err := json.Marshal(container.PathStat{
|
||||
content, err := json.Marshal(types.ContainerPathStat{
|
||||
Name: "name",
|
||||
Mode: 0o700,
|
||||
})
|
||||
@@ -97,7 +97,7 @@ func TestCopyToContainerError(t *testing.T) {
|
||||
client := &Client{
|
||||
client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")),
|
||||
}
|
||||
err := client.CopyToContainer(context.Background(), "container_id", "path/to/file", bytes.NewReader([]byte("")), container.CopyToContainerOptions{})
|
||||
err := client.CopyToContainer(context.Background(), "container_id", "path/to/file", bytes.NewReader([]byte("")), types.CopyToContainerOptions{})
|
||||
assert.Check(t, is.ErrorType(err, errdefs.IsSystem))
|
||||
}
|
||||
|
||||
@@ -105,7 +105,7 @@ func TestCopyToContainerNotFoundError(t *testing.T) {
|
||||
client := &Client{
|
||||
client: newMockClient(errorMock(http.StatusNotFound, "Not found")),
|
||||
}
|
||||
err := client.CopyToContainer(context.Background(), "container_id", "path/to/file", bytes.NewReader([]byte("")), container.CopyToContainerOptions{})
|
||||
err := client.CopyToContainer(context.Background(), "container_id", "path/to/file", bytes.NewReader([]byte("")), types.CopyToContainerOptions{})
|
||||
assert.Check(t, is.ErrorType(err, errdefs.IsNotFound))
|
||||
}
|
||||
|
||||
@@ -115,7 +115,7 @@ func TestCopyToContainerEmptyResponse(t *testing.T) {
|
||||
client := &Client{
|
||||
client: newMockClient(errorMock(http.StatusNoContent, "No content")),
|
||||
}
|
||||
err := client.CopyToContainer(context.Background(), "container_id", "path/to/file", bytes.NewReader([]byte("")), container.CopyToContainerOptions{})
|
||||
err := client.CopyToContainer(context.Background(), "container_id", "path/to/file", bytes.NewReader([]byte("")), types.CopyToContainerOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
@@ -159,7 +159,7 @@ func TestCopyToContainer(t *testing.T) {
|
||||
}, nil
|
||||
}),
|
||||
}
|
||||
err := client.CopyToContainer(context.Background(), "container_id", expectedPath, bytes.NewReader([]byte("content")), container.CopyToContainerOptions{
|
||||
err := client.CopyToContainer(context.Background(), "container_id", expectedPath, bytes.NewReader([]byte("content")), types.CopyToContainerOptions{
|
||||
AllowOverwriteDirWithFile: false,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -188,7 +188,7 @@ func TestCopyFromContainerNotFoundError(t *testing.T) {
|
||||
func TestCopyFromContainerEmptyResponse(t *testing.T) {
|
||||
client := &Client{
|
||||
client: newMockClient(func(req *http.Request) (*http.Response, error) {
|
||||
content, err := json.Marshal(container.PathStat{
|
||||
content, err := json.Marshal(types.ContainerPathStat{
|
||||
Name: "path/to/file",
|
||||
Mode: 0o700,
|
||||
})
|
||||
@@ -242,7 +242,7 @@ func TestCopyFromContainer(t *testing.T) {
|
||||
return nil, fmt.Errorf("path not set in URL query properly, expected '%s', got %s", expectedPath, path)
|
||||
}
|
||||
|
||||
headercontent, err := json.Marshal(container.PathStat{
|
||||
headercontent, err := json.Marshal(types.ContainerPathStat{
|
||||
Name: "name",
|
||||
Mode: 0o700,
|
||||
})
|
||||
|
||||
@@ -6,12 +6,11 @@ import (
|
||||
"net/http"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
)
|
||||
|
||||
// ContainerExecCreate creates a new exec configuration to run an exec process.
|
||||
func (cli *Client) ContainerExecCreate(ctx context.Context, container string, options container.ExecOptions) (types.IDResponse, error) {
|
||||
func (cli *Client) ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error) {
|
||||
var response types.IDResponse
|
||||
|
||||
// Make sure we negotiated (if the client is configured to do so),
|
||||
@@ -23,14 +22,14 @@ func (cli *Client) ContainerExecCreate(ctx context.Context, container string, op
|
||||
return response, err
|
||||
}
|
||||
|
||||
if err := cli.NewVersionError(ctx, "1.25", "env"); len(options.Env) != 0 && err != nil {
|
||||
if err := cli.NewVersionError(ctx, "1.25", "env"); len(config.Env) != 0 && err != nil {
|
||||
return response, err
|
||||
}
|
||||
if versions.LessThan(cli.ClientVersion(), "1.42") {
|
||||
options.ConsoleSize = nil
|
||||
config.ConsoleSize = nil
|
||||
}
|
||||
|
||||
resp, err := cli.post(ctx, "/containers/"+container+"/exec", nil, options, nil)
|
||||
resp, err := cli.post(ctx, "/containers/"+container+"/exec", nil, config, nil)
|
||||
defer ensureReaderClosed(resp)
|
||||
if err != nil {
|
||||
return response, err
|
||||
@@ -40,7 +39,7 @@ func (cli *Client) ContainerExecCreate(ctx context.Context, container string, op
|
||||
}
|
||||
|
||||
// ContainerExecStart starts an exec process already created in the docker host.
|
||||
func (cli *Client) ContainerExecStart(ctx context.Context, execID string, config container.ExecStartOptions) error {
|
||||
func (cli *Client) ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error {
|
||||
if versions.LessThan(cli.ClientVersion(), "1.42") {
|
||||
config.ConsoleSize = nil
|
||||
}
|
||||
@@ -53,7 +52,7 @@ func (cli *Client) ContainerExecStart(ctx context.Context, execID string, config
|
||||
// It returns a types.HijackedConnection with the hijacked connection
|
||||
// and the a reader to get output. It's up to the called to close
|
||||
// the hijacked connection by calling types.HijackedResponse.Close.
|
||||
func (cli *Client) ContainerExecAttach(ctx context.Context, execID string, config container.ExecAttachOptions) (types.HijackedResponse, error) {
|
||||
func (cli *Client) ContainerExecAttach(ctx context.Context, execID string, config types.ExecStartCheck) (types.HijackedResponse, error) {
|
||||
if versions.LessThan(cli.ClientVersion(), "1.42") {
|
||||
config.ConsoleSize = nil
|
||||
}
|
||||
@@ -63,8 +62,8 @@ func (cli *Client) ContainerExecAttach(ctx context.Context, execID string, confi
|
||||
}
|
||||
|
||||
// ContainerExecInspect returns information about a specific exec process on the docker host.
|
||||
func (cli *Client) ContainerExecInspect(ctx context.Context, execID string) (container.ExecInspect, error) {
|
||||
var response container.ExecInspect
|
||||
func (cli *Client) ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) {
|
||||
var response types.ContainerExecInspect
|
||||
resp, err := cli.get(ctx, "/exec/"+execID+"/json", nil, nil)
|
||||
if err != nil {
|
||||
return response, err
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"gotest.tools/v3/assert"
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
@@ -21,7 +20,7 @@ func TestContainerExecCreateError(t *testing.T) {
|
||||
client := &Client{
|
||||
client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")),
|
||||
}
|
||||
_, err := client.ContainerExecCreate(context.Background(), "container_id", container.ExecOptions{})
|
||||
_, err := client.ContainerExecCreate(context.Background(), "container_id", types.ExecConfig{})
|
||||
assert.Check(t, is.ErrorType(err, errdefs.IsSystem))
|
||||
}
|
||||
|
||||
@@ -33,7 +32,7 @@ func TestContainerExecCreateConnectionError(t *testing.T) {
|
||||
client, err := NewClientWithOpts(WithAPIVersionNegotiation(), WithHost("tcp://no-such-host.invalid"))
|
||||
assert.NilError(t, err)
|
||||
|
||||
_, err = client.ContainerExecCreate(context.Background(), "", container.ExecOptions{})
|
||||
_, err = client.ContainerExecCreate(context.Background(), "", types.ExecConfig{})
|
||||
assert.Check(t, is.ErrorType(err, IsErrConnectionFailed))
|
||||
}
|
||||
|
||||
@@ -51,7 +50,7 @@ func TestContainerExecCreate(t *testing.T) {
|
||||
if err := req.ParseForm(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
execConfig := &container.ExecOptions{}
|
||||
execConfig := &types.ExecConfig{}
|
||||
if err := json.NewDecoder(req.Body).Decode(execConfig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -71,7 +70,7 @@ func TestContainerExecCreate(t *testing.T) {
|
||||
}),
|
||||
}
|
||||
|
||||
r, err := client.ContainerExecCreate(context.Background(), "container_id", container.ExecOptions{
|
||||
r, err := client.ContainerExecCreate(context.Background(), "container_id", types.ExecConfig{
|
||||
User: "user",
|
||||
})
|
||||
if err != nil {
|
||||
@@ -86,7 +85,7 @@ func TestContainerExecStartError(t *testing.T) {
|
||||
client := &Client{
|
||||
client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")),
|
||||
}
|
||||
err := client.ContainerExecStart(context.Background(), "nothing", container.ExecStartOptions{})
|
||||
err := client.ContainerExecStart(context.Background(), "nothing", types.ExecStartCheck{})
|
||||
assert.Check(t, is.ErrorType(err, errdefs.IsSystem))
|
||||
}
|
||||
|
||||
@@ -100,12 +99,12 @@ func TestContainerExecStart(t *testing.T) {
|
||||
if err := req.ParseForm(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
options := &container.ExecStartOptions{}
|
||||
if err := json.NewDecoder(req.Body).Decode(options); err != nil {
|
||||
execStartCheck := &types.ExecStartCheck{}
|
||||
if err := json.NewDecoder(req.Body).Decode(execStartCheck); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if options.Tty || !options.Detach {
|
||||
return nil, fmt.Errorf("expected ExecStartOptions{Detach:true,Tty:false}, got %v", options)
|
||||
if execStartCheck.Tty || !execStartCheck.Detach {
|
||||
return nil, fmt.Errorf("expected execStartCheck{Detach:true,Tty:false}, got %v", execStartCheck)
|
||||
}
|
||||
|
||||
return &http.Response{
|
||||
@@ -115,7 +114,7 @@ func TestContainerExecStart(t *testing.T) {
|
||||
}),
|
||||
}
|
||||
|
||||
err := client.ContainerExecStart(context.Background(), "exec_id", container.ExecStartOptions{
|
||||
err := client.ContainerExecStart(context.Background(), "exec_id", types.ExecStartCheck{
|
||||
Detach: true,
|
||||
Tty: false,
|
||||
})
|
||||
@@ -139,7 +138,7 @@ func TestContainerExecInspect(t *testing.T) {
|
||||
if !strings.HasPrefix(req.URL.Path, expectedURL) {
|
||||
return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL)
|
||||
}
|
||||
b, err := json.Marshal(container.ExecInspect{
|
||||
b, err := json.Marshal(types.ContainerExecInspect{
|
||||
ExecID: "exec_id",
|
||||
ContainerID: "container_id",
|
||||
})
|
||||
|
||||
@@ -5,13 +5,13 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
)
|
||||
|
||||
// ContainersPrune requests the daemon to delete unused data
|
||||
func (cli *Client) ContainersPrune(ctx context.Context, pruneFilters filters.Args) (container.PruneReport, error) {
|
||||
var report container.PruneReport
|
||||
func (cli *Client) ContainersPrune(ctx context.Context, pruneFilters filters.Args) (types.ContainersPruneReport, error) {
|
||||
var report types.ContainersPruneReport
|
||||
|
||||
if err := cli.NewVersionError(ctx, "1.25", "container prune"); err != nil {
|
||||
return report, err
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"gotest.tools/v3/assert"
|
||||
@@ -93,7 +93,7 @@ func TestContainersPrune(t *testing.T) {
|
||||
actual := query.Get(key)
|
||||
assert.Check(t, is.Equal(expected, actual))
|
||||
}
|
||||
content, err := json.Marshal(container.PruneReport{
|
||||
content, err := json.Marshal(types.ContainersPruneReport{
|
||||
ContainersDeleted: []string{"container_id1", "container_id2"},
|
||||
SpaceReclaimed: 9999,
|
||||
})
|
||||
|
||||
@@ -4,12 +4,12 @@ import (
|
||||
"context"
|
||||
"net/url"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types"
|
||||
)
|
||||
|
||||
// ContainerStats returns near realtime stats for a given container.
|
||||
// It's up to the caller to close the io.ReadCloser returned.
|
||||
func (cli *Client) ContainerStats(ctx context.Context, containerID string, stream bool) (container.StatsResponseReader, error) {
|
||||
func (cli *Client) ContainerStats(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error) {
|
||||
query := url.Values{}
|
||||
query.Set("stream", "0")
|
||||
if stream {
|
||||
@@ -18,10 +18,10 @@ func (cli *Client) ContainerStats(ctx context.Context, containerID string, strea
|
||||
|
||||
resp, err := cli.get(ctx, "/containers/"+containerID+"/stats", query, nil)
|
||||
if err != nil {
|
||||
return container.StatsResponseReader{}, err
|
||||
return types.ContainerStats{}, err
|
||||
}
|
||||
|
||||
return container.StatsResponseReader{
|
||||
return types.ContainerStats{
|
||||
Body: resp.body,
|
||||
OSType: getDockerOS(resp.header.Get("Server")),
|
||||
}, nil
|
||||
@@ -29,17 +29,17 @@ func (cli *Client) ContainerStats(ctx context.Context, containerID string, strea
|
||||
|
||||
// ContainerStatsOneShot gets a single stat entry from a container.
|
||||
// It differs from `ContainerStats` in that the API should not wait to prime the stats
|
||||
func (cli *Client) ContainerStatsOneShot(ctx context.Context, containerID string) (container.StatsResponseReader, error) {
|
||||
func (cli *Client) ContainerStatsOneShot(ctx context.Context, containerID string) (types.ContainerStats, error) {
|
||||
query := url.Values{}
|
||||
query.Set("stream", "0")
|
||||
query.Set("one-shot", "1")
|
||||
|
||||
resp, err := cli.get(ctx, "/containers/"+containerID+"/stats", query, nil)
|
||||
if err != nil {
|
||||
return container.StatsResponseReader{}, err
|
||||
return types.ContainerStats{}, err
|
||||
}
|
||||
|
||||
return container.StatsResponseReader{
|
||||
return types.ContainerStats{
|
||||
Body: resp.body,
|
||||
OSType: getDockerOS(resp.header.Get("Server")),
|
||||
}, nil
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/events"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
timetypes "github.com/docker/docker/api/types/time"
|
||||
@@ -15,7 +16,7 @@ import (
|
||||
// by cancelling the context. Once the stream has been completely read an io.EOF error will
|
||||
// be sent over the error channel. If an error is sent all processing will be stopped. It's up
|
||||
// to the caller to reopen the stream in the event of an error by reinvoking this method.
|
||||
func (cli *Client) Events(ctx context.Context, options events.ListOptions) (<-chan events.Message, <-chan error) {
|
||||
func (cli *Client) Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error) {
|
||||
messages := make(chan events.Message)
|
||||
errs := make(chan error, 1)
|
||||
|
||||
@@ -67,7 +68,7 @@ func (cli *Client) Events(ctx context.Context, options events.ListOptions) (<-ch
|
||||
return messages, errs
|
||||
}
|
||||
|
||||
func buildEventsQueryParams(cliVersion string, options events.ListOptions) (url.Values, error) {
|
||||
func buildEventsQueryParams(cliVersion string, options types.EventsOptions) (url.Values, error) {
|
||||
query := url.Values{}
|
||||
ref := time.Now()
|
||||
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/events"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/errdefs"
|
||||
@@ -19,17 +20,17 @@ import (
|
||||
|
||||
func TestEventsErrorInOptions(t *testing.T) {
|
||||
errorCases := []struct {
|
||||
options events.ListOptions
|
||||
options types.EventsOptions
|
||||
expectedError string
|
||||
}{
|
||||
{
|
||||
options: events.ListOptions{
|
||||
options: types.EventsOptions{
|
||||
Since: "2006-01-02TZ",
|
||||
},
|
||||
expectedError: `parsing time "2006-01-02TZ"`,
|
||||
},
|
||||
{
|
||||
options: events.ListOptions{
|
||||
options: types.EventsOptions{
|
||||
Until: "2006-01-02TZ",
|
||||
},
|
||||
expectedError: `parsing time "2006-01-02TZ"`,
|
||||
@@ -51,7 +52,7 @@ func TestEventsErrorFromServer(t *testing.T) {
|
||||
client := &Client{
|
||||
client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")),
|
||||
}
|
||||
_, errs := client.Events(context.Background(), events.ListOptions{})
|
||||
_, errs := client.Events(context.Background(), types.EventsOptions{})
|
||||
err := <-errs
|
||||
assert.Check(t, is.ErrorType(err, errdefs.IsSystem))
|
||||
}
|
||||
@@ -63,13 +64,13 @@ func TestEvents(t *testing.T) {
|
||||
expectedFiltersJSON := fmt.Sprintf(`{"type":{"%s":true}}`, events.ContainerEventType)
|
||||
|
||||
eventsCases := []struct {
|
||||
options events.ListOptions
|
||||
options types.EventsOptions
|
||||
events []events.Message
|
||||
expectedEvents map[string]bool
|
||||
expectedQueryParams map[string]string
|
||||
}{
|
||||
{
|
||||
options: events.ListOptions{
|
||||
options: types.EventsOptions{
|
||||
Filters: fltrs,
|
||||
},
|
||||
expectedQueryParams: map[string]string{
|
||||
@@ -79,7 +80,7 @@ func TestEvents(t *testing.T) {
|
||||
expectedEvents: make(map[string]bool),
|
||||
},
|
||||
{
|
||||
options: events.ListOptions{
|
||||
options: types.EventsOptions{
|
||||
Filters: fltrs,
|
||||
},
|
||||
expectedQueryParams: map[string]string{
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user