mirror of
https://github.com/moby/moby.git
synced 2026-01-16 18:31:41 +00:00
Compare commits
78 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f417435e5f | ||
|
|
acd023d42b | ||
|
|
7a075cacf9 | ||
|
|
aff7177ee7 | ||
|
|
ed7c26339e | ||
|
|
74e3b4fb2e | ||
|
|
4cc0416534 | ||
|
|
f9f9e7ff9a | ||
|
|
5fb4eb941d | ||
|
|
67e9aa6d4d | ||
|
|
61b82be580 | ||
|
|
0227d95f99 | ||
|
|
fa9c5c55e1 | ||
|
|
df96d8d0bd | ||
|
|
1652559be4 | ||
|
|
ab29279200 | ||
|
|
147b5388dd | ||
|
|
60103717bc | ||
|
|
45dede440e | ||
|
|
ba4a2dab16 | ||
|
|
51133117fb | ||
|
|
341a7978a5 | ||
|
|
10e3bfd0ac | ||
|
|
269a0d8feb | ||
|
|
876b1d1dcd | ||
|
|
0bcd64689b | ||
|
|
8d454710cd | ||
|
|
6cf694fe70 | ||
|
|
c12bbf549b | ||
|
|
1ae115175c | ||
|
|
a7f9907f5f | ||
|
|
9150d0115e | ||
|
|
9af7c8ec0a | ||
|
|
3344c502da | ||
|
|
6c9fafdda7 | ||
|
|
f8a8cdaf9e | ||
|
|
7a659049b8 | ||
|
|
0ccf1c2a93 | ||
|
|
28c1a8bc2b | ||
|
|
5b5a58b2cd | ||
|
|
282891f70c | ||
|
|
bbe6f09afc | ||
|
|
5b13a38144 | ||
|
|
990e95dcf0 | ||
|
|
a140d0d95f | ||
|
|
91a8312fb7 | ||
|
|
cf03e96354 | ||
|
|
c48b67160d | ||
|
|
225e043196 | ||
|
|
78174d2e74 | ||
|
|
622e66684a | ||
|
|
85f4e6151a | ||
|
|
3e358447f5 | ||
|
|
dd4de8f388 | ||
|
|
f5ef4e76b3 | ||
|
|
6c5e5271c1 | ||
|
|
693fca6199 | ||
|
|
49487e996a | ||
|
|
0358f31dc2 | ||
|
|
081cffb3fa | ||
|
|
9de19554c7 | ||
|
|
2a80b8a7b2 | ||
|
|
61ffecfa3b | ||
|
|
02cd8dec03 | ||
|
|
1d7df5ecc0 | ||
|
|
4e68a265ed | ||
|
|
e437f890ba | ||
|
|
5a0015f72c | ||
|
|
5babfee371 | ||
|
|
f2d0d87c46 | ||
|
|
6ac38cdbeb | ||
|
|
d7bf237e29 | ||
|
|
f41b342cbe | ||
|
|
f413ba6fdb | ||
|
|
c2ef38f790 | ||
|
|
c01bbbddeb | ||
|
|
32635850ed | ||
|
|
2cf1c762f8 |
6
.github/workflows/.dco.yml
vendored
6
.github/workflows/.dco.yml
vendored
@@ -15,19 +15,19 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
-
|
||||
name: Dump context
|
||||
uses: actions/github-script@v6
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
console.log(JSON.stringify(context, null, 2));
|
||||
-
|
||||
name: Get base ref
|
||||
id: base-ref
|
||||
uses: actions/github-script@v6
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
result-encoding: string
|
||||
script: |
|
||||
|
||||
4
.github/workflows/.test-prepare.yml
vendored
4
.github/workflows/.test-prepare.yml
vendored
@@ -18,11 +18,11 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
-
|
||||
name: Create matrix
|
||||
id: set
|
||||
uses: actions/github-script@v6
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
let matrix = ['graphdriver'];
|
||||
|
||||
88
.github/workflows/.test.yml
vendored
88
.github/workflows/.test.yml
vendored
@@ -28,16 +28,16 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
-
|
||||
name: Set up runner
|
||||
uses: ./.github/actions/setup-runner
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
uses: docker/setup-buildx-action@v3
|
||||
-
|
||||
name: Build dev image
|
||||
uses: docker/bake-action@v2
|
||||
uses: docker/bake-action@v4
|
||||
with:
|
||||
targets: dev
|
||||
set: |
|
||||
@@ -57,7 +57,7 @@ jobs:
|
||||
tree -nh /tmp/reports
|
||||
-
|
||||
name: Send to Codecov
|
||||
uses: codecov/codecov-action@v3
|
||||
uses: codecov/codecov-action@v4
|
||||
with:
|
||||
directory: ./bundles
|
||||
env_vars: RUNNER_OS
|
||||
@@ -65,9 +65,9 @@ jobs:
|
||||
-
|
||||
name: Upload reports
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ inputs.storage }}-unit-reports
|
||||
name: test-reports-unit-${{ inputs.storage }}
|
||||
path: /tmp/reports/*
|
||||
|
||||
unit-report:
|
||||
@@ -80,14 +80,14 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
-
|
||||
name: Download reports
|
||||
uses: actions/download-artifact@v3
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: ${{ inputs.storage }}-unit-reports
|
||||
name: test-reports-unit-${{ inputs.storage }}
|
||||
path: /tmp/reports
|
||||
-
|
||||
name: Install teststat
|
||||
@@ -105,7 +105,7 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
-
|
||||
name: Set up runner
|
||||
uses: ./.github/actions/setup-runner
|
||||
@@ -114,10 +114,10 @@ jobs:
|
||||
uses: ./.github/actions/setup-tracing
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
uses: docker/setup-buildx-action@v3
|
||||
-
|
||||
name: Build dev image
|
||||
uses: docker/bake-action@v2
|
||||
uses: docker/bake-action@v4
|
||||
with:
|
||||
targets: dev
|
||||
set: |
|
||||
@@ -145,9 +145,9 @@ jobs:
|
||||
-
|
||||
name: Upload reports
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ inputs.storage }}-docker-py-reports
|
||||
name: test-reports-docker-py-${{ inputs.storage }}
|
||||
path: /tmp/reports/*
|
||||
|
||||
integration-flaky:
|
||||
@@ -157,16 +157,16 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
-
|
||||
name: Set up runner
|
||||
uses: ./.github/actions/setup-runner
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
uses: docker/setup-buildx-action@v3
|
||||
-
|
||||
name: Build dev image
|
||||
uses: docker/bake-action@v2
|
||||
uses: docker/bake-action@v4
|
||||
with:
|
||||
targets: dev
|
||||
set: |
|
||||
@@ -196,7 +196,7 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
-
|
||||
name: Set up runner
|
||||
uses: ./.github/actions/setup-runner
|
||||
@@ -217,10 +217,10 @@ jobs:
|
||||
echo "CACHE_DEV_SCOPE=${CACHE_DEV_SCOPE}" >> $GITHUB_ENV
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
uses: docker/setup-buildx-action@v3
|
||||
-
|
||||
name: Build dev image
|
||||
uses: docker/bake-action@v2
|
||||
uses: docker/bake-action@v4
|
||||
with:
|
||||
targets: dev
|
||||
set: |
|
||||
@@ -236,10 +236,13 @@ jobs:
|
||||
name: Prepare reports
|
||||
if: always()
|
||||
run: |
|
||||
reportsPath="/tmp/reports/${{ matrix.os }}"
|
||||
reportsName=${{ matrix.os }}
|
||||
if [ -n "${{ matrix.mode }}" ]; then
|
||||
reportsPath="$reportsPath-${{ matrix.mode }}"
|
||||
reportsName="$reportsName-${{ matrix.mode }}"
|
||||
fi
|
||||
reportsPath="/tmp/reports/$reportsName"
|
||||
echo "TESTREPORTS_NAME=$reportsName" >> $GITHUB_ENV
|
||||
|
||||
mkdir -p bundles $reportsPath
|
||||
find bundles -path '*/root/*overlay2' -prune -o -type f \( -name '*-report.json' -o -name '*.log' -o -name '*.out' -o -name '*.prof' -o -name '*-report.xml' \) -print | xargs sudo tar -czf /tmp/reports.tar.gz
|
||||
tar -xzf /tmp/reports.tar.gz -C $reportsPath
|
||||
@@ -249,7 +252,7 @@ jobs:
|
||||
curl -sSLf localhost:16686/api/traces?service=integration-test-client > $reportsPath/jaeger-trace.json
|
||||
-
|
||||
name: Send to Codecov
|
||||
uses: codecov/codecov-action@v3
|
||||
uses: codecov/codecov-action@v4
|
||||
with:
|
||||
directory: ./bundles/test-integration
|
||||
env_vars: RUNNER_OS
|
||||
@@ -262,9 +265,9 @@ jobs:
|
||||
-
|
||||
name: Upload reports
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ inputs.storage }}-integration-reports
|
||||
name: test-reports-integration-${{ inputs.storage }}-${{ env.TESTREPORTS_NAME }}
|
||||
path: /tmp/reports/*
|
||||
|
||||
integration-report:
|
||||
@@ -277,15 +280,16 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
-
|
||||
name: Download reports
|
||||
uses: actions/download-artifact@v3
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: ${{ inputs.storage }}-integration-reports
|
||||
path: /tmp/reports
|
||||
pattern: test-reports-integration-${{ inputs.storage }}-*
|
||||
merge-multiple: true
|
||||
-
|
||||
name: Install teststat
|
||||
run: |
|
||||
@@ -303,10 +307,10 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
-
|
||||
name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
-
|
||||
@@ -343,7 +347,7 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
-
|
||||
name: Set up runner
|
||||
uses: ./.github/actions/setup-runner
|
||||
@@ -352,10 +356,10 @@ jobs:
|
||||
uses: ./.github/actions/setup-tracing
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
uses: docker/setup-buildx-action@v3
|
||||
-
|
||||
name: Build dev image
|
||||
uses: docker/bake-action@v2
|
||||
uses: docker/bake-action@v4
|
||||
with:
|
||||
targets: dev
|
||||
set: |
|
||||
@@ -372,7 +376,10 @@ jobs:
|
||||
name: Prepare reports
|
||||
if: always()
|
||||
run: |
|
||||
reportsPath=/tmp/reports/$(echo -n "${{ matrix.test }}" | sha256sum | cut -d " " -f 1)
|
||||
reportsName=$(echo -n "${{ matrix.test }}" | sha256sum | cut -d " " -f 1)
|
||||
reportsPath=/tmp/reports/$reportsName
|
||||
echo "TESTREPORTS_NAME=$reportsName" >> $GITHUB_ENV
|
||||
|
||||
mkdir -p bundles $reportsPath
|
||||
echo "${{ matrix.test }}" | tr -s '|' '\n' | tee -a "$reportsPath/tests.txt"
|
||||
find bundles -path '*/root/*overlay2' -prune -o -type f \( -name '*-report.json' -o -name '*.log' -o -name '*.out' -o -name '*.prof' -o -name '*-report.xml' \) -print | xargs sudo tar -czf /tmp/reports.tar.gz
|
||||
@@ -383,7 +390,7 @@ jobs:
|
||||
curl -sSLf localhost:16686/api/traces?service=integration-test-client > $reportsPath/jaeger-trace.json
|
||||
-
|
||||
name: Send to Codecov
|
||||
uses: codecov/codecov-action@v3
|
||||
uses: codecov/codecov-action@v4
|
||||
with:
|
||||
directory: ./bundles/test-integration
|
||||
env_vars: RUNNER_OS
|
||||
@@ -396,9 +403,9 @@ jobs:
|
||||
-
|
||||
name: Upload reports
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ inputs.storage }}-integration-cli-reports
|
||||
name: test-reports-integration-cli-${{ inputs.storage }}-${{ env.TESTREPORTS_NAME }}
|
||||
path: /tmp/reports/*
|
||||
|
||||
integration-cli-report:
|
||||
@@ -411,15 +418,16 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
-
|
||||
name: Download reports
|
||||
uses: actions/download-artifact@v3
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: ${{ inputs.storage }}-integration-cli-reports
|
||||
path: /tmp/reports
|
||||
pattern: test-reports-integration-cli-${{ inputs.storage }}-*
|
||||
merge-multiple: true
|
||||
-
|
||||
name: Install teststat
|
||||
run: |
|
||||
|
||||
51
.github/workflows/.windows.yml
vendored
51
.github/workflows/.windows.yml
vendored
@@ -43,7 +43,7 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
path: ${{ env.GOPATH }}/src/github.com/docker/docker
|
||||
-
|
||||
@@ -62,7 +62,7 @@ jobs:
|
||||
}
|
||||
-
|
||||
name: Cache
|
||||
uses: actions/cache@v3
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~\AppData\Local\go-build
|
||||
@@ -103,7 +103,7 @@ jobs:
|
||||
docker cp "${{ env.TEST_CTN_NAME }}`:c`:\containerd\bin\containerd-shim-runhcs-v1.exe" ${{ env.BIN_OUT }}\
|
||||
-
|
||||
name: Upload artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: build-${{ inputs.storage }}-${{ inputs.os }}
|
||||
path: ${{ env.BIN_OUT }}/*
|
||||
@@ -122,7 +122,7 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
path: ${{ env.GOPATH }}/src/github.com/docker/docker
|
||||
-
|
||||
@@ -142,7 +142,7 @@ jobs:
|
||||
}
|
||||
-
|
||||
name: Cache
|
||||
uses: actions/cache@v3
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~\AppData\Local\go-build
|
||||
@@ -176,7 +176,7 @@ jobs:
|
||||
-
|
||||
name: Send to Codecov
|
||||
if: inputs.send_coverage
|
||||
uses: codecov/codecov-action@v3
|
||||
uses: codecov/codecov-action@v4
|
||||
with:
|
||||
working-directory: ${{ env.GOPATH }}\src\github.com\docker\docker
|
||||
directory: bundles
|
||||
@@ -185,7 +185,7 @@ jobs:
|
||||
-
|
||||
name: Upload reports
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ inputs.os }}-${{ inputs.storage }}-unit-reports
|
||||
path: ${{ env.GOPATH }}\src\github.com\docker\docker\bundles\*
|
||||
@@ -198,12 +198,12 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
-
|
||||
name: Download artifacts
|
||||
uses: actions/download-artifact@v3
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: ${{ inputs.os }}-${{ inputs.storage }}-unit-reports
|
||||
path: /tmp/artifacts
|
||||
@@ -223,10 +223,10 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
-
|
||||
name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
-
|
||||
@@ -278,10 +278,11 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
path: ${{ env.GOPATH }}/src/github.com/docker/docker
|
||||
-
|
||||
name: Set up Jaeger
|
||||
run: |
|
||||
# Jaeger is set up on Linux through the setup-tracing action. If you update Jaeger here, don't forget to
|
||||
# update the version set in .github/actions/setup-tracing/action.yml.
|
||||
@@ -296,7 +297,7 @@ jobs:
|
||||
Get-ChildItem Env: | Out-String
|
||||
-
|
||||
name: Download artifacts
|
||||
uses: actions/download-artifact@v3
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: build-${{ inputs.storage }}-${{ inputs.os }}
|
||||
path: ${{ env.BIN_OUT }}
|
||||
@@ -310,6 +311,9 @@ jobs:
|
||||
echo "WINDOWS_BASE_IMAGE_TAG=${{ env.WINDOWS_BASE_TAG_2022 }}" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf-8 -Append
|
||||
}
|
||||
Write-Output "${{ env.BIN_OUT }}" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
|
||||
|
||||
$testName = ([System.BitConverter]::ToString((New-Object System.Security.Cryptography.SHA256Managed).ComputeHash([System.Text.Encoding]::UTF8.GetBytes("${{ matrix.test }}"))) -replace '-').ToLower()
|
||||
echo "TESTREPORTS_NAME=$testName" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf-8 -Append
|
||||
-
|
||||
# removes docker service that is currently installed on the runner. we
|
||||
# could use Uninstall-Package but not yet available on Windows runners.
|
||||
@@ -420,7 +424,7 @@ jobs:
|
||||
DOCKER_HOST: npipe:////./pipe/docker_engine
|
||||
-
|
||||
name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
-
|
||||
@@ -445,7 +449,7 @@ jobs:
|
||||
-
|
||||
name: Send to Codecov
|
||||
if: inputs.send_coverage
|
||||
uses: codecov/codecov-action@v3
|
||||
uses: codecov/codecov-action@v4
|
||||
with:
|
||||
working-directory: ${{ env.GOPATH }}\src\github.com\docker\docker
|
||||
directory: bundles
|
||||
@@ -498,9 +502,9 @@ jobs:
|
||||
-
|
||||
name: Upload reports
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ inputs.os }}-${{ inputs.storage }}-integration-reports-${{ matrix.runtime }}
|
||||
name: ${{ inputs.os }}-${{ inputs.storage }}-integration-reports-${{ matrix.runtime }}-${{ env.TESTREPORTS_NAME }}
|
||||
path: ${{ env.GOPATH }}\src\github.com\docker\docker\bundles\*
|
||||
|
||||
integration-test-report:
|
||||
@@ -523,15 +527,16 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
-
|
||||
name: Download artifacts
|
||||
uses: actions/download-artifact@v3
|
||||
name: Download reports
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: ${{ inputs.os }}-${{ inputs.storage }}-integration-reports-${{ matrix.runtime }}
|
||||
path: /tmp/artifacts
|
||||
path: /tmp/reports
|
||||
pattern: ${{ inputs.os }}-${{ inputs.storage }}-integration-reports-${{ matrix.runtime }}-*
|
||||
merge-multiple: true
|
||||
-
|
||||
name: Install teststat
|
||||
run: |
|
||||
@@ -539,4 +544,4 @@ jobs:
|
||||
-
|
||||
name: Create summary
|
||||
run: |
|
||||
teststat -markdown $(find /tmp/artifacts -type f -name '*.json' -print0 | xargs -0) >> $GITHUB_STEP_SUMMARY
|
||||
teststat -markdown $(find /tmp/reports -type f -name '*.json' -print0 | xargs -0) >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
42
.github/workflows/bin-image.yml
vendored
42
.github/workflows/bin-image.yml
vendored
@@ -34,11 +34,11 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
-
|
||||
name: Docker meta
|
||||
id: meta
|
||||
uses: docker/metadata-action@v4
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: |
|
||||
${{ env.MOBYBIN_REPO_SLUG }}
|
||||
@@ -61,11 +61,13 @@ jobs:
|
||||
type=sha
|
||||
-
|
||||
name: Rename meta bake definition file
|
||||
# see https://github.com/docker/metadata-action/issues/381#issuecomment-1918607161
|
||||
run: |
|
||||
mv "${{ steps.meta.outputs.bake-file }}" "/tmp/bake-meta.json"
|
||||
bakeFile="${{ steps.meta.outputs.bake-file }}"
|
||||
mv "${bakeFile#cwd://}" "/tmp/bake-meta.json"
|
||||
-
|
||||
name: Upload meta bake definition
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: bake-meta
|
||||
path: /tmp/bake-meta.json
|
||||
@@ -88,34 +90,39 @@ jobs:
|
||||
matrix:
|
||||
platform: ${{ fromJson(needs.prepare.outputs.platforms) }}
|
||||
steps:
|
||||
-
|
||||
name: Prepare
|
||||
run: |
|
||||
platform=${{ matrix.platform }}
|
||||
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
-
|
||||
name: Download meta bake definition
|
||||
uses: actions/download-artifact@v3
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: bake-meta
|
||||
path: /tmp
|
||||
-
|
||||
name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
uses: docker/setup-qemu-action@v3
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
uses: docker/setup-buildx-action@v3
|
||||
-
|
||||
name: Login to Docker Hub
|
||||
if: github.event_name != 'pull_request' && github.repository == 'moby/moby'
|
||||
uses: docker/login-action@v2
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_MOBYBIN_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_MOBYBIN_TOKEN }}
|
||||
-
|
||||
name: Build
|
||||
id: bake
|
||||
uses: docker/bake-action@v3
|
||||
uses: docker/bake-action@v4
|
||||
with:
|
||||
files: |
|
||||
./docker-bake.hcl
|
||||
@@ -135,9 +142,9 @@ jobs:
|
||||
-
|
||||
name: Upload digest
|
||||
if: github.event_name != 'pull_request' && github.repository == 'moby/moby'
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: digests
|
||||
name: digests-${{ env.PLATFORM_PAIR }}
|
||||
path: /tmp/digests/*
|
||||
if-no-files-found: error
|
||||
retention-days: 1
|
||||
@@ -150,22 +157,23 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Download meta bake definition
|
||||
uses: actions/download-artifact@v3
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: bake-meta
|
||||
path: /tmp
|
||||
-
|
||||
name: Download digests
|
||||
uses: actions/download-artifact@v3
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: digests
|
||||
path: /tmp/digests
|
||||
pattern: digests-*
|
||||
merge-multiple: true
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
uses: docker/setup-buildx-action@v3
|
||||
-
|
||||
name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_MOBYBIN_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_MOBYBIN_TOKEN }}
|
||||
|
||||
20
.github/workflows/buildkit.yml
vendored
20
.github/workflows/buildkit.yml
vendored
@@ -27,18 +27,18 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
uses: docker/setup-buildx-action@v3
|
||||
-
|
||||
name: Build
|
||||
uses: docker/bake-action@v2
|
||||
uses: docker/bake-action@v4
|
||||
with:
|
||||
targets: binary
|
||||
-
|
||||
name: Upload artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: binary
|
||||
path: ${{ env.DESTDIR }}
|
||||
@@ -78,10 +78,10 @@ jobs:
|
||||
# https://github.com/moby/buildkit/blob/567a99433ca23402d5e9b9f9124005d2e59b8861/client/client_test.go#L5407-L5411
|
||||
-
|
||||
name: Expose GitHub Runtime
|
||||
uses: crazy-max/ghaction-github-runtime@v2
|
||||
uses: crazy-max/ghaction-github-runtime@v3
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
path: moby
|
||||
-
|
||||
@@ -91,20 +91,20 @@ jobs:
|
||||
working-directory: moby
|
||||
-
|
||||
name: Checkout BuildKit ${{ env.BUILDKIT_REF }}
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: ${{ env.BUILDKIT_REPO }}
|
||||
ref: ${{ env.BUILDKIT_REF }}
|
||||
path: buildkit
|
||||
-
|
||||
name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
uses: docker/setup-qemu-action@v3
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
uses: docker/setup-buildx-action@v3
|
||||
-
|
||||
name: Download binary artifacts
|
||||
uses: actions/download-artifact@v3
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: binary
|
||||
path: ./buildkit/build/moby/
|
||||
|
||||
18
.github/workflows/ci.yml
vendored
18
.github/workflows/ci.yml
vendored
@@ -32,15 +32,15 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
uses: docker/setup-buildx-action@v3
|
||||
-
|
||||
name: Build
|
||||
uses: docker/bake-action@v2
|
||||
uses: docker/bake-action@v4
|
||||
with:
|
||||
targets: ${{ matrix.target }}
|
||||
-
|
||||
@@ -53,7 +53,7 @@ jobs:
|
||||
find ${{ env.DESTDIR }} -type f -exec file -e ascii -- {} +
|
||||
-
|
||||
name: Upload artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ matrix.target }}
|
||||
path: ${{ env.DESTDIR }}
|
||||
@@ -69,7 +69,7 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
-
|
||||
name: Create matrix
|
||||
id: platforms
|
||||
@@ -93,7 +93,7 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
-
|
||||
@@ -103,10 +103,10 @@ jobs:
|
||||
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
uses: docker/setup-buildx-action@v3
|
||||
-
|
||||
name: Build
|
||||
uses: docker/bake-action@v2
|
||||
uses: docker/bake-action@v4
|
||||
with:
|
||||
targets: all
|
||||
set: |
|
||||
@@ -121,7 +121,7 @@ jobs:
|
||||
find ${{ env.DESTDIR }} -type f -exec file -e ascii -- {} +
|
||||
-
|
||||
name: Upload artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: cross-${{ env.PLATFORM_PAIR }}
|
||||
path: ${{ env.DESTDIR }}
|
||||
|
||||
24
.github/workflows/test.yml
vendored
24
.github/workflows/test.yml
vendored
@@ -38,13 +38,13 @@ jobs:
|
||||
fi
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
uses: docker/setup-buildx-action@v3
|
||||
-
|
||||
name: Build dev image
|
||||
uses: docker/bake-action@v2
|
||||
uses: docker/bake-action@v4
|
||||
with:
|
||||
targets: dev
|
||||
set: |
|
||||
@@ -75,7 +75,7 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
-
|
||||
name: Create matrix
|
||||
id: scripts
|
||||
@@ -100,7 +100,7 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
-
|
||||
@@ -108,10 +108,10 @@ jobs:
|
||||
uses: ./.github/actions/setup-runner
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
uses: docker/setup-buildx-action@v3
|
||||
-
|
||||
name: Build dev image
|
||||
uses: docker/bake-action@v2
|
||||
uses: docker/bake-action@v4
|
||||
with:
|
||||
targets: dev
|
||||
set: |
|
||||
@@ -130,7 +130,7 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
-
|
||||
name: Create matrix
|
||||
id: platforms
|
||||
@@ -153,7 +153,7 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
-
|
||||
name: Prepare
|
||||
run: |
|
||||
@@ -161,13 +161,13 @@ jobs:
|
||||
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
|
||||
-
|
||||
name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
uses: docker/setup-qemu-action@v3
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
uses: docker/setup-buildx-action@v3
|
||||
-
|
||||
name: Test
|
||||
uses: docker/bake-action@v2
|
||||
uses: docker/bake-action@v4
|
||||
with:
|
||||
targets: binary-smoketest
|
||||
set: |
|
||||
|
||||
@@ -8,12 +8,12 @@ ARG XX_VERSION=1.2.1
|
||||
ARG VPNKIT_VERSION=0.5.0
|
||||
|
||||
ARG DOCKERCLI_REPOSITORY="https://github.com/docker/cli.git"
|
||||
ARG DOCKERCLI_VERSION=v25.0.0
|
||||
ARG DOCKERCLI_VERSION=v25.0.2
|
||||
# cli version used for integration-cli tests
|
||||
ARG DOCKERCLI_INTEGRATION_REPOSITORY="https://github.com/docker/cli.git"
|
||||
ARG DOCKERCLI_INTEGRATION_VERSION=v17.06.2-ce
|
||||
ARG BUILDX_VERSION=0.12.1
|
||||
ARG COMPOSE_VERSION=v2.24.2
|
||||
ARG COMPOSE_VERSION=v2.24.5
|
||||
|
||||
ARG SYSTEMD="false"
|
||||
ARG DOCKER_STATIC=1
|
||||
@@ -352,7 +352,7 @@ FROM base AS rootlesskit-src
|
||||
WORKDIR /usr/src/rootlesskit
|
||||
RUN git init . && git remote add origin "https://github.com/rootless-containers/rootlesskit.git"
|
||||
# When updating, also update vendor.mod and hack/dockerfile/install/rootlesskit.installer accordingly.
|
||||
ARG ROOTLESSKIT_VERSION=v2.0.0
|
||||
ARG ROOTLESSKIT_VERSION=v2.0.1
|
||||
RUN git fetch -q --depth 1 origin "${ROOTLESSKIT_VERSION}" +refs/tags/*:refs/tags/* && git checkout -q FETCH_HEAD
|
||||
|
||||
FROM base AS rootlesskit-build
|
||||
|
||||
3
Makefile
3
Makefile
@@ -250,6 +250,9 @@ swagger-docs: ## preview the API documentation
|
||||
.PHONY: generate-files
|
||||
generate-files:
|
||||
$(eval $@_TMP_OUT := $(shell mktemp -d -t moby-output.XXXXXXXXXX))
|
||||
ifeq ($($@_TMP_OUT),)
|
||||
$(error Could not create temp directory.)
|
||||
endif
|
||||
$(BUILD_CMD) --target "update" \
|
||||
--output "type=local,dest=$($@_TMP_OUT)" \
|
||||
--file "./hack/dockerfiles/generate-files.Dockerfile" .
|
||||
|
||||
@@ -8327,6 +8327,16 @@ paths:
|
||||
description: "BuildKit output configuration"
|
||||
type: "string"
|
||||
default: ""
|
||||
- name: "version"
|
||||
in: "query"
|
||||
type: "string"
|
||||
default: "1"
|
||||
enum: ["1", "2"]
|
||||
description: |
|
||||
Version of the builder backend to use.
|
||||
|
||||
- `1` is the first generation classic (deprecated) builder in the Docker daemon (default)
|
||||
- `2` is [BuildKit](https://github.com/moby/buildkit)
|
||||
responses:
|
||||
200:
|
||||
description: "no error"
|
||||
|
||||
@@ -14,6 +14,9 @@ type EndpointSettings struct {
|
||||
IPAMConfig *EndpointIPAMConfig
|
||||
Links []string
|
||||
Aliases []string // Aliases holds the list of extra, user-specified DNS names for this endpoint.
|
||||
// MacAddress may be used to specify a MAC address when the container is created.
|
||||
// Once the container is running, it becomes operational data (it may contain a
|
||||
// generated address).
|
||||
MacAddress string
|
||||
// Operational data
|
||||
NetworkID string
|
||||
|
||||
@@ -30,30 +30,9 @@ const (
|
||||
ip6 ipFamily = "IPv6"
|
||||
)
|
||||
|
||||
// HasIPv6Subnets checks whether there's any IPv6 subnets in the ipam parameter. It ignores any invalid Subnet and nil
|
||||
// ipam.
|
||||
func HasIPv6Subnets(ipam *IPAM) bool {
|
||||
if ipam == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, cfg := range ipam.Config {
|
||||
subnet, err := netip.ParsePrefix(cfg.Subnet)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if subnet.Addr().Is6() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// ValidateIPAM checks whether the network's IPAM passed as argument is valid. It returns a joinError of the list of
|
||||
// errors found.
|
||||
func ValidateIPAM(ipam *IPAM) error {
|
||||
func ValidateIPAM(ipam *IPAM, enableIPv6 bool) error {
|
||||
if ipam == nil {
|
||||
return nil
|
||||
}
|
||||
@@ -70,6 +49,10 @@ func ValidateIPAM(ipam *IPAM) error {
|
||||
subnetFamily = ip6
|
||||
}
|
||||
|
||||
if !enableIPv6 && subnetFamily == ip6 {
|
||||
continue
|
||||
}
|
||||
|
||||
if subnet != subnet.Masked() {
|
||||
errs = append(errs, fmt.Errorf("invalid subnet %s: it should be %s", subnet, subnet.Masked()))
|
||||
}
|
||||
|
||||
@@ -30,6 +30,12 @@ func TestNetworkWithInvalidIPAM(t *testing.T) {
|
||||
"invalid auxiliary address DefaultGatewayIPv4: parent subnet is an IPv4 block",
|
||||
},
|
||||
},
|
||||
{
|
||||
// Regression test for https://github.com/moby/moby/issues/47202
|
||||
name: "IPv6 subnet is discarded with no error when IPv6 is disabled",
|
||||
ipam: IPAM{Config: []IPAMConfig{{Subnet: "2001:db8::/32"}}},
|
||||
ipv6: false,
|
||||
},
|
||||
{
|
||||
name: "Invalid data - Subnet",
|
||||
ipam: IPAM{Config: []IPAMConfig{{Subnet: "foobar"}}},
|
||||
@@ -122,7 +128,7 @@ func TestNetworkWithInvalidIPAM(t *testing.T) {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
errs := ValidateIPAM(&tc.ipam)
|
||||
errs := ValidateIPAM(&tc.ipam, tc.ipv6)
|
||||
if tc.expectedErrors == nil {
|
||||
assert.NilError(t, errs)
|
||||
return
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
|
||||
//go:build go1.19
|
||||
|
||||
package daemon // import "github.com/docker/docker/daemon"
|
||||
|
||||
import (
|
||||
@@ -442,6 +445,11 @@ func (daemon *Daemon) updateContainerNetworkSettings(container *container.Contai
|
||||
for name, epConfig := range endpointsConfig {
|
||||
container.NetworkSettings.Networks[name] = &network.EndpointSettings{
|
||||
EndpointSettings: epConfig,
|
||||
// At this point, during container creation, epConfig.MacAddress is the
|
||||
// configured value from the API. If there is no configured value, the
|
||||
// same field will later be used to store a generated MAC address. So,
|
||||
// remember the requested address now.
|
||||
DesiredMacAddress: epConfig.MacAddress,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -508,7 +516,7 @@ func (daemon *Daemon) allocateNetwork(cfg *config.Config, container *container.C
|
||||
defaultNetName := runconfig.DefaultDaemonNetworkMode().NetworkName()
|
||||
if nConf, ok := container.NetworkSettings.Networks[defaultNetName]; ok {
|
||||
cleanOperationalData(nConf)
|
||||
if err := daemon.connectToNetwork(cfg, container, defaultNetName, nConf.EndpointSettings, updateSettings); err != nil {
|
||||
if err := daemon.connectToNetwork(cfg, container, defaultNetName, nConf, updateSettings); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -525,7 +533,7 @@ func (daemon *Daemon) allocateNetwork(cfg *config.Config, container *container.C
|
||||
|
||||
for netName, epConf := range networks {
|
||||
cleanOperationalData(epConf)
|
||||
if err := daemon.connectToNetwork(cfg, container, netName, epConf.EndpointSettings, updateSettings); err != nil {
|
||||
if err := daemon.connectToNetwork(cfg, container, netName, epConf, updateSettings); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -634,12 +642,10 @@ func cleanOperationalData(es *network.EndpointSettings) {
|
||||
es.IPv6Gateway = ""
|
||||
es.GlobalIPv6Address = ""
|
||||
es.GlobalIPv6PrefixLen = 0
|
||||
es.MacAddress = ""
|
||||
if es.IPAMOperational {
|
||||
es.IPAMConfig = nil
|
||||
}
|
||||
if es.MACOperational {
|
||||
es.MacAddress = ""
|
||||
}
|
||||
}
|
||||
|
||||
func (daemon *Daemon) updateNetworkConfig(container *container.Container, n *libnetwork.Network, endpointConfig *networktypes.EndpointSettings, updateSettings bool) error {
|
||||
@@ -682,7 +688,7 @@ func buildEndpointDNSNames(ctr *container.Container, aliases []string) []string
|
||||
return sliceutil.Dedup(dnsNames)
|
||||
}
|
||||
|
||||
func (daemon *Daemon) connectToNetwork(cfg *config.Config, container *container.Container, idOrName string, endpointConfig *networktypes.EndpointSettings, updateSettings bool) (retErr error) {
|
||||
func (daemon *Daemon) connectToNetwork(cfg *config.Config, container *container.Container, idOrName string, endpointConfig *network.EndpointSettings, updateSettings bool) (retErr error) {
|
||||
start := time.Now()
|
||||
if container.HostConfig.NetworkMode.IsContainer() {
|
||||
return runconfig.ErrConflictSharedNetwork
|
||||
@@ -692,10 +698,12 @@ func (daemon *Daemon) connectToNetwork(cfg *config.Config, container *container.
|
||||
return nil
|
||||
}
|
||||
if endpointConfig == nil {
|
||||
endpointConfig = &networktypes.EndpointSettings{}
|
||||
endpointConfig = &network.EndpointSettings{
|
||||
EndpointSettings: &networktypes.EndpointSettings{},
|
||||
}
|
||||
}
|
||||
|
||||
n, nwCfg, err := daemon.findAndAttachNetwork(container, idOrName, endpointConfig)
|
||||
n, nwCfg, err := daemon.findAndAttachNetwork(container, idOrName, endpointConfig.EndpointSettings)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -710,26 +718,20 @@ func (daemon *Daemon) connectToNetwork(cfg *config.Config, container *container.
|
||||
}
|
||||
}
|
||||
|
||||
var operIPAM bool
|
||||
operMAC := true
|
||||
endpointConfig.IPAMOperational = false
|
||||
if nwCfg != nil {
|
||||
if epConfig, ok := nwCfg.EndpointsConfig[nwName]; ok {
|
||||
if endpointConfig.IPAMConfig == nil || (endpointConfig.IPAMConfig.IPv4Address == "" && endpointConfig.IPAMConfig.IPv6Address == "" && len(endpointConfig.IPAMConfig.LinkLocalIPs) == 0) {
|
||||
operIPAM = true
|
||||
endpointConfig.IPAMOperational = true
|
||||
}
|
||||
|
||||
// copy IPAMConfig and NetworkID from epConfig via AttachNetwork
|
||||
endpointConfig.IPAMConfig = epConfig.IPAMConfig
|
||||
endpointConfig.NetworkID = epConfig.NetworkID
|
||||
|
||||
// Work out whether the MAC address is user-configured.
|
||||
operMAC = endpointConfig.MacAddress == ""
|
||||
// Copy the configured MAC address (which may be empty).
|
||||
endpointConfig.MacAddress = epConfig.MacAddress
|
||||
}
|
||||
}
|
||||
|
||||
if err := daemon.updateNetworkConfig(container, n, endpointConfig, updateSettings); err != nil {
|
||||
if err := daemon.updateNetworkConfig(container, n, endpointConfig.EndpointSettings, updateSettings); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -752,11 +754,7 @@ func (daemon *Daemon) connectToNetwork(cfg *config.Config, container *container.
|
||||
}
|
||||
}
|
||||
}()
|
||||
container.NetworkSettings.Networks[nwName] = &network.EndpointSettings{
|
||||
EndpointSettings: endpointConfig,
|
||||
IPAMOperational: operIPAM,
|
||||
MACOperational: operMAC,
|
||||
}
|
||||
container.NetworkSettings.Networks[nwName] = endpointConfig
|
||||
|
||||
delete(container.NetworkSettings.Networks, n.ID())
|
||||
|
||||
@@ -1060,7 +1058,10 @@ func (daemon *Daemon) ConnectToNetwork(container *container.Container, idOrName
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if err := daemon.connectToNetwork(&daemon.config().Config, container, idOrName, endpointConfig, true); err != nil {
|
||||
epc := &network.EndpointSettings{
|
||||
EndpointSettings: endpointConfig,
|
||||
}
|
||||
if err := daemon.connectToNetwork(&daemon.config().Config, container, idOrName, epc, true); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -42,44 +42,10 @@ func (i *ImageService) GetImage(ctx context.Context, refOrID string, options ima
|
||||
platform = platforms.OnlyStrict(*options.Platform)
|
||||
}
|
||||
|
||||
var presentImages []imagespec.DockerOCIImage
|
||||
err = i.walkImageManifests(ctx, desc, func(img *ImageManifest) error {
|
||||
conf, err := img.Config(ctx)
|
||||
if err != nil {
|
||||
if cerrdefs.IsNotFound(err) {
|
||||
log.G(ctx).WithFields(log.Fields{
|
||||
"manifestDescriptor": img.Target(),
|
||||
}).Debug("manifest was present, but accessing its config failed, ignoring")
|
||||
return nil
|
||||
}
|
||||
return errdefs.System(fmt.Errorf("failed to get config descriptor: %w", err))
|
||||
}
|
||||
|
||||
var ociimage imagespec.DockerOCIImage
|
||||
if err := readConfig(ctx, i.content, conf, &ociimage); err != nil {
|
||||
if cerrdefs.IsNotFound(err) {
|
||||
log.G(ctx).WithFields(log.Fields{
|
||||
"manifestDescriptor": img.Target(),
|
||||
"configDescriptor": conf,
|
||||
}).Debug("manifest present, but its config is missing, ignoring")
|
||||
return nil
|
||||
}
|
||||
return errdefs.System(fmt.Errorf("failed to read config of the manifest %v: %w", img.Target().Digest, err))
|
||||
}
|
||||
presentImages = append(presentImages, ociimage)
|
||||
return nil
|
||||
})
|
||||
presentImages, err := i.presentImages(ctx, desc, refOrID, platform)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(presentImages) == 0 {
|
||||
ref, _ := reference.ParseAnyReference(refOrID)
|
||||
return nil, images.ErrImageDoesNotExist{Ref: ref}
|
||||
}
|
||||
|
||||
sort.SliceStable(presentImages, func(i, j int) bool {
|
||||
return platform.Less(presentImages[i].Platform, presentImages[j].Platform)
|
||||
})
|
||||
ociimage := presentImages[0]
|
||||
|
||||
img := dockerOciImageToDockerImagePartial(image.ID(desc.Target.Digest), ociimage)
|
||||
@@ -156,6 +122,56 @@ func (i *ImageService) GetImage(ctx context.Context, refOrID string, options ima
|
||||
return img, nil
|
||||
}
|
||||
|
||||
// presentImages returns the images that are present in the content store,
|
||||
// manifests without a config are ignored.
|
||||
// The images are filtered and sorted by platform preference.
|
||||
func (i *ImageService) presentImages(ctx context.Context, desc containerdimages.Image, refOrID string, platform platforms.MatchComparer) ([]imagespec.DockerOCIImage, error) {
|
||||
var presentImages []imagespec.DockerOCIImage
|
||||
err := i.walkImageManifests(ctx, desc, func(img *ImageManifest) error {
|
||||
conf, err := img.Config(ctx)
|
||||
if err != nil {
|
||||
if cerrdefs.IsNotFound(err) {
|
||||
log.G(ctx).WithFields(log.Fields{
|
||||
"manifestDescriptor": img.Target(),
|
||||
}).Debug("manifest was present, but accessing its config failed, ignoring")
|
||||
return nil
|
||||
}
|
||||
return errdefs.System(fmt.Errorf("failed to get config descriptor: %w", err))
|
||||
}
|
||||
|
||||
var ociimage imagespec.DockerOCIImage
|
||||
if err := readConfig(ctx, i.content, conf, &ociimage); err != nil {
|
||||
if errdefs.IsNotFound(err) {
|
||||
log.G(ctx).WithFields(log.Fields{
|
||||
"manifestDescriptor": img.Target(),
|
||||
"configDescriptor": conf,
|
||||
}).Debug("manifest present, but its config is missing, ignoring")
|
||||
return nil
|
||||
}
|
||||
return errdefs.System(fmt.Errorf("failed to read config of the manifest %v: %w", img.Target().Digest, err))
|
||||
}
|
||||
|
||||
if platform.Match(ociimage.Platform) {
|
||||
presentImages = append(presentImages, ociimage)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(presentImages) == 0 {
|
||||
ref, _ := reference.ParseAnyReference(refOrID)
|
||||
return nil, images.ErrImageDoesNotExist{Ref: ref}
|
||||
}
|
||||
|
||||
sort.SliceStable(presentImages, func(i, j int) bool {
|
||||
return platform.Less(presentImages[i].Platform, presentImages[j].Platform)
|
||||
})
|
||||
|
||||
return presentImages, nil
|
||||
}
|
||||
|
||||
func (i *ImageService) GetImageManifest(ctx context.Context, refOrID string, options imagetype.GetImageOpts) (*ocispec.Descriptor, error) {
|
||||
platform := matchAllWithPreference(platforms.Default())
|
||||
if options.Platform != nil {
|
||||
|
||||
@@ -168,6 +168,12 @@ func (i *ImageService) ExportImage(ctx context.Context, names []string, outStrea
|
||||
|
||||
ref, refErr := reference.ParseNormalizedNamed(name)
|
||||
|
||||
if refErr == nil {
|
||||
if _, ok := ref.(reference.Digested); ok {
|
||||
specificDigestResolved = true
|
||||
}
|
||||
}
|
||||
|
||||
if resolveErr != nil || !specificDigestResolved {
|
||||
// Name didn't resolve to anything, or name wasn't explicitly referencing a digest
|
||||
if refErr == nil && reference.IsNameOnly(ref) {
|
||||
|
||||
@@ -2,18 +2,14 @@ package containerd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sort"
|
||||
|
||||
"github.com/containerd/containerd/images"
|
||||
containerdimages "github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/containerd/log"
|
||||
"github.com/distribution/reference"
|
||||
imagetype "github.com/docker/docker/api/types/image"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/opencontainers/image-spec/identity"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@@ -25,33 +21,13 @@ func (i *ImageService) ImageHistory(ctx context.Context, name string) ([]*imaget
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cs := i.client.ContentStore()
|
||||
// TODO: pass platform in from the CLI
|
||||
platform := matchAllWithPreference(platforms.Default())
|
||||
|
||||
var presentImages []ocispec.Image
|
||||
err = i.walkImageManifests(ctx, img, func(img *ImageManifest) error {
|
||||
conf, err := img.Config(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var ociimage ocispec.Image
|
||||
if err := readConfig(ctx, cs, conf, &ociimage); err != nil {
|
||||
return err
|
||||
}
|
||||
presentImages = append(presentImages, ociimage)
|
||||
return nil
|
||||
})
|
||||
presentImages, err := i.presentImages(ctx, img, name, platform)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(presentImages) == 0 {
|
||||
return nil, errdefs.NotFound(errors.New("failed to find image manifest"))
|
||||
}
|
||||
|
||||
sort.SliceStable(presentImages, func(i, j int) bool {
|
||||
return platform.Less(presentImages[i].Platform, presentImages[j].Platform)
|
||||
})
|
||||
ociimage := presentImages[0]
|
||||
|
||||
var (
|
||||
@@ -96,7 +72,7 @@ func (i *ImageService) ImageHistory(ctx context.Context, name string) ([]*imaget
|
||||
}}, history...)
|
||||
}
|
||||
|
||||
findParents := func(img images.Image) []images.Image {
|
||||
findParents := func(img containerdimages.Image) []containerdimages.Image {
|
||||
imgs, err := i.getParentsByBuilderLabel(ctx, img)
|
||||
if err != nil {
|
||||
log.G(ctx).WithFields(log.Fields{
|
||||
@@ -141,7 +117,7 @@ func (i *ImageService) ImageHistory(ctx context.Context, name string) ([]*imaget
|
||||
return history, nil
|
||||
}
|
||||
|
||||
func getImageTags(ctx context.Context, imgs []images.Image) []string {
|
||||
func getImageTags(ctx context.Context, imgs []containerdimages.Image) []string {
|
||||
var tags []string
|
||||
for _, img := range imgs {
|
||||
if isDanglingImage(img) {
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
|
||||
//go:build go1.19
|
||||
|
||||
package daemon // import "github.com/docker/docker/daemon"
|
||||
|
||||
import (
|
||||
@@ -162,8 +165,12 @@ func (daemon *Daemon) getInspectData(daemonCfg *config.Config, container *contai
|
||||
// unversioned API endpoints.
|
||||
if container.Config != nil && container.Config.MacAddress == "" { //nolint:staticcheck // ignore SA1019: field is deprecated, but still used on API < v1.44.
|
||||
if nwm := hostConfig.NetworkMode; nwm.IsDefault() || nwm.IsBridge() || nwm.IsUserDefined() {
|
||||
if epConf, ok := container.NetworkSettings.Networks[nwm.NetworkName()]; ok {
|
||||
container.Config.MacAddress = epConf.MacAddress //nolint:staticcheck // ignore SA1019: field is deprecated, but still used on API < v1.44.
|
||||
name := nwm.NetworkName()
|
||||
if nwm.IsDefault() {
|
||||
name = daemon.netController.Config().DefaultNetwork
|
||||
}
|
||||
if epConf, ok := container.NetworkSettings.Networks[name]; ok {
|
||||
container.Config.MacAddress = epConf.DesiredMacAddress //nolint:staticcheck // ignore SA1019: field is deprecated, but still used on API < v1.44.
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
|
||||
"code.cloudfoundry.org/clock"
|
||||
"github.com/coreos/go-systemd/v22/journal"
|
||||
"github.com/google/uuid"
|
||||
"gotest.tools/v3/assert"
|
||||
|
||||
"github.com/docker/docker/daemon/logger/journald/internal/export"
|
||||
@@ -67,6 +68,14 @@ type Sender struct {
|
||||
// timestamp in zero time after the SYSLOG_TIMESTAMP value was set,
|
||||
// which is higly unrealistic in practice.
|
||||
AssignEventTimestampFromSyslogTimestamp bool
|
||||
// Boot ID for journal entries. Required by systemd-journal-remote as of
|
||||
// https://github.com/systemd/systemd/commit/1eede158519e4e5ed22738c90cb57a91dbecb7f2
|
||||
// (systemd 255).
|
||||
BootID uuid.UUID
|
||||
|
||||
// When set, Send will act as a test helper and redirect
|
||||
// systemd-journal-remote command output to the test log.
|
||||
TB testing.TB
|
||||
}
|
||||
|
||||
// New constructs a new Sender which will write journal entries to outpath. The
|
||||
@@ -82,6 +91,7 @@ func New(outpath string) (*Sender, error) {
|
||||
CmdName: p,
|
||||
OutputPath: outpath,
|
||||
Clock: clock.NewClock(),
|
||||
BootID: uuid.New(), // UUIDv4, like systemd itself generates for sd_id128 values.
|
||||
}
|
||||
return sender, nil
|
||||
}
|
||||
@@ -95,6 +105,7 @@ func NewT(t *testing.T, outpath string) *Sender {
|
||||
t.Skip(err)
|
||||
}
|
||||
assert.NilError(t, err)
|
||||
s.TB = t
|
||||
return s
|
||||
}
|
||||
|
||||
@@ -103,6 +114,9 @@ var validVarName = regexp.MustCompile("^[A-Z0-9][A-Z0-9_]*$")
|
||||
// Send is a drop-in replacement for
|
||||
// github.com/coreos/go-systemd/v22/journal.Send.
|
||||
func (s *Sender) Send(message string, priority journal.Priority, vars map[string]string) error {
|
||||
if s.TB != nil {
|
||||
s.TB.Helper()
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
// https://systemd.io/JOURNAL_EXPORT_FORMATS/ says "if you are
|
||||
// generating this format you shouldn’t care about these special
|
||||
@@ -121,6 +135,9 @@ func (s *Sender) Send(message string, priority journal.Priority, vars map[string
|
||||
if err := export.WriteField(&buf, "__REALTIME_TIMESTAMP", strconv.FormatInt(ts.UnixMicro(), 10)); err != nil {
|
||||
return fmt.Errorf("fake: error writing entry to systemd-journal-remote: %w", err)
|
||||
}
|
||||
if err := export.WriteField(&buf, "_BOOT_ID", fmt.Sprintf("%x", [16]byte(s.BootID))); err != nil {
|
||||
return fmt.Errorf("fake: error writing entry to systemd-journal-remote: %w", err)
|
||||
}
|
||||
if err := export.WriteField(&buf, "MESSAGE", message); err != nil {
|
||||
return fmt.Errorf("fake: error writing entry to systemd-journal-remote: %w", err)
|
||||
}
|
||||
@@ -143,6 +160,16 @@ func (s *Sender) Send(message string, priority journal.Priority, vars map[string
|
||||
// has been flushed to disk when Send returns.
|
||||
cmd := exec.Command(s.CmdName, "--output", s.OutputPath, "-")
|
||||
cmd.Stdin = &buf
|
||||
|
||||
if s.TB != nil {
|
||||
out, err := cmd.CombinedOutput()
|
||||
s.TB.Logf("[systemd-journal-remote] %s", out)
|
||||
var exitErr *exec.ExitError
|
||||
if errors.As(err, &exitErr) {
|
||||
s.TB.Logf("systemd-journal-remote exit status: %d", exitErr.ExitCode())
|
||||
}
|
||||
return err
|
||||
}
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
return cmd.Run()
|
||||
|
||||
@@ -4,7 +4,6 @@ package journald // import "github.com/docker/docker/daemon/logger/journald"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"sync/atomic"
|
||||
@@ -17,7 +16,10 @@ import (
|
||||
"github.com/docker/docker/daemon/logger/journald/internal/sdjournal"
|
||||
)
|
||||
|
||||
const closedDrainTimeout = 5 * time.Second
|
||||
const (
|
||||
closedDrainTimeout = 5 * time.Second
|
||||
waitInterval = 250 * time.Millisecond
|
||||
)
|
||||
|
||||
// Fields which we know are not user-provided attribute fields.
|
||||
var wellKnownFields = map[string]bool{
|
||||
@@ -46,13 +48,13 @@ var wellKnownFields = map[string]bool{
|
||||
}
|
||||
|
||||
type reader struct {
|
||||
s *journald
|
||||
j *sdjournal.Journal
|
||||
logWatcher *logger.LogWatcher
|
||||
config logger.ReadConfig
|
||||
maxOrdinal uint64
|
||||
initialized bool
|
||||
ready chan struct{}
|
||||
s *journald
|
||||
j *sdjournal.Journal
|
||||
logWatcher *logger.LogWatcher
|
||||
config logger.ReadConfig
|
||||
maxOrdinal uint64
|
||||
ready chan struct{}
|
||||
drainDeadline time.Time
|
||||
}
|
||||
|
||||
func getMessage(d map[string]string) (line []byte, ok bool) {
|
||||
@@ -99,101 +101,168 @@ func getAttrs(d map[string]string) []backend.LogAttr {
|
||||
return attrs
|
||||
}
|
||||
|
||||
// errDrainDone is the error returned by drainJournal to signal that there are
|
||||
// no more log entries to send to the log watcher.
|
||||
var errDrainDone = errors.New("journald drain done")
|
||||
// The SeekXYZ() methods all move the journal read pointer to a "conceptual"
|
||||
// position which does not correspond to any journal entry. A subsequent call to
|
||||
// Next(), Previous() or similar is necessary to resolve the read pointer to a
|
||||
// discrete entry.
|
||||
// https://github.com/systemd/systemd/pull/5930#issuecomment-300878104
|
||||
// But that's not all! If there is no discrete entry to resolve the position to,
|
||||
// the call to Next() or Previous() will just leave the read pointer in a
|
||||
// conceptual position, or do something even more bizarre.
|
||||
// https://github.com/systemd/systemd/issues/9934
|
||||
|
||||
// drainJournal reads and sends log messages from the journal.
|
||||
//
|
||||
// drainJournal returns errDrainDone when a terminal stopping condition has been
|
||||
// reached: the watch consumer is gone, a log entry is read which has a
|
||||
// timestamp after until (if until is nonzero), or the log driver is closed and
|
||||
// the last message logged has been sent from the journal. If the end of the
|
||||
// journal is reached without encountering a terminal stopping condition, a nil
|
||||
// error is returned.
|
||||
func (r *reader) drainJournal() error {
|
||||
if !r.initialized {
|
||||
defer func() {
|
||||
r.signalReady()
|
||||
r.initialized = true
|
||||
}()
|
||||
// initialSeekHead positions the journal read pointer at the earliest journal
|
||||
// entry with a timestamp of at least r.config.Since. It returns true if there
|
||||
// is an entry to read at the read pointer.
|
||||
func (r *reader) initialSeekHead() (bool, error) {
|
||||
var err error
|
||||
if r.config.Since.IsZero() {
|
||||
err = r.j.SeekHead()
|
||||
} else {
|
||||
err = r.j.SeekRealtime(r.config.Since)
|
||||
}
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return r.j.Next()
|
||||
}
|
||||
|
||||
var (
|
||||
err error
|
||||
seekedToTail bool
|
||||
)
|
||||
if r.config.Tail >= 0 {
|
||||
if r.config.Until.IsZero() {
|
||||
err = r.j.SeekTail()
|
||||
seekedToTail = true
|
||||
} else {
|
||||
err = r.j.SeekRealtime(r.config.Until)
|
||||
}
|
||||
} else {
|
||||
if r.config.Since.IsZero() {
|
||||
err = r.j.SeekHead()
|
||||
} else {
|
||||
err = r.j.SeekRealtime(r.config.Since)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// SeekTail() followed by Next() behaves incorrectly, so we need
|
||||
// to work around the bug by ensuring the first discrete
|
||||
// movement of the read pointer is Previous() or PreviousSkip().
|
||||
// PreviousSkip() is called inside the loop when config.Tail > 0
|
||||
// so the only special case requiring special handling is
|
||||
// config.Tail == 0.
|
||||
// https://github.com/systemd/systemd/issues/9934
|
||||
if seekedToTail && r.config.Tail == 0 {
|
||||
// Resolve the read pointer to the last entry in the
|
||||
// journal so that the call to Next() inside the loop
|
||||
// advances past it.
|
||||
if ok, err := r.j.Previous(); err != nil || !ok {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// initialSeekTail positions the journal read pointer at a journal entry
|
||||
// relative to the tail of the journal at the time of the call based on the
|
||||
// specification in r.config. It returns true if there is an entry to read at
|
||||
// the read pointer. Otherwise the read pointer is set to a conceptual position
|
||||
// which will be resolved to the desired entry (once written) by advancing
|
||||
// forward with r.j.Next() or similar.
|
||||
func (r *reader) initialSeekTail() (bool, error) {
|
||||
var err error
|
||||
if r.config.Until.IsZero() {
|
||||
err = r.j.SeekTail()
|
||||
} else {
|
||||
err = r.j.SeekRealtime(r.config.Until)
|
||||
}
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
for i := 0; ; i++ {
|
||||
if !r.initialized && i == 0 && r.config.Tail > 0 {
|
||||
if n, err := r.j.PreviousSkip(uint(r.config.Tail)); err != nil || n == 0 {
|
||||
return err
|
||||
var ok bool
|
||||
if r.config.Tail == 0 {
|
||||
ok, err = r.j.Previous()
|
||||
} else {
|
||||
var n int
|
||||
n, err = r.j.PreviousSkip(uint(r.config.Tail))
|
||||
ok = n > 0
|
||||
}
|
||||
if err != nil {
|
||||
return ok, err
|
||||
}
|
||||
if !ok {
|
||||
// The (filtered) journal has no entries. The tail is the head: all new
|
||||
// entries which get written into the journal from this point forward
|
||||
// should be read from the journal. However the read pointer is
|
||||
// positioned at a conceptual position which is not condusive to reading
|
||||
// those entries. The tail of the journal is resolved to the last entry
|
||||
// in the journal _at the time of the first successful Previous() call_,
|
||||
// which means that an arbitrary number of journal entries added in the
|
||||
// interim may be skipped: race condition. While the realtime conceptual
|
||||
// position is not so racy, it is also unhelpful: it is the timestamp
|
||||
// past where reading should stop, so all logs that should be followed
|
||||
// would be skipped over.
|
||||
// Reset the read pointer position to avoid these problems.
|
||||
return r.initialSeekHead()
|
||||
} else if r.config.Tail == 0 {
|
||||
// The journal read pointer is positioned at the discrete position of
|
||||
// the journal entry _before_ the entry to send.
|
||||
return r.j.Next()
|
||||
}
|
||||
|
||||
// Check if the PreviousSkip went too far back.
|
||||
timestamp, err := r.j.Realtime()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if timestamp.Before(r.config.Since) {
|
||||
if err := r.j.SeekRealtime(r.config.Since); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return r.j.Next()
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// wait blocks until the journal has new data to read, the reader's drain
|
||||
// deadline is exceeded, or the log reading consumer is gone.
|
||||
func (r *reader) wait() (bool, error) {
|
||||
for {
|
||||
dur := waitInterval
|
||||
if !r.drainDeadline.IsZero() {
|
||||
dur = time.Until(r.drainDeadline)
|
||||
if dur < 0 {
|
||||
// Container is gone but we haven't found the end of the
|
||||
// logs before the deadline. Maybe it was dropped by
|
||||
// journald, e.g. due to rate-limiting.
|
||||
return false, nil
|
||||
} else if dur > waitInterval {
|
||||
dur = waitInterval
|
||||
}
|
||||
} else if ok, err := r.j.Next(); err != nil || !ok {
|
||||
return err
|
||||
}
|
||||
|
||||
if !r.initialized && i == 0 {
|
||||
// The cursor is in a position which will be unaffected
|
||||
// by subsequent logging.
|
||||
r.signalReady()
|
||||
status, err := r.j.Wait(dur)
|
||||
if err != nil {
|
||||
return false, err
|
||||
} else if status != sdjournal.StatusNOP {
|
||||
return true, nil
|
||||
}
|
||||
select {
|
||||
case <-r.logWatcher.WatchConsumerGone():
|
||||
return false, nil
|
||||
case <-r.s.closed:
|
||||
// Container is gone; don't wait indefinitely for journal entries that will never arrive.
|
||||
if r.maxOrdinal >= atomic.LoadUint64(&r.s.ordinal) {
|
||||
return false, nil
|
||||
}
|
||||
if r.drainDeadline.IsZero() {
|
||||
r.drainDeadline = time.Now().Add(closedDrainTimeout)
|
||||
}
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// nextWait blocks until there is a new journal entry to read, and advances the
|
||||
// journal read pointer to it.
|
||||
func (r *reader) nextWait() (bool, error) {
|
||||
for {
|
||||
if ok, err := r.j.Next(); err != nil || ok {
|
||||
return ok, err
|
||||
}
|
||||
if ok, err := r.wait(); err != nil || !ok {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// drainJournal reads and sends log messages from the journal, starting from the
|
||||
// current read pointer, until the end of the journal or a terminal stopping
|
||||
// condition is reached.
|
||||
//
|
||||
// It returns false when a terminal stopping condition has been reached:
|
||||
// - the watch consumer is gone, or
|
||||
// - (if until is nonzero) a log entry is read which has a timestamp after
|
||||
// until
|
||||
func (r *reader) drainJournal() (bool, error) {
|
||||
for i := 0; ; i++ {
|
||||
// Read the entry's timestamp.
|
||||
timestamp, err := r.j.Realtime()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Check if the PreviousSkip went too far back. Check only the
|
||||
// initial position as we are comparing wall-clock timestamps,
|
||||
// which may not be monotonic. We don't want to skip over
|
||||
// messages sent later in time just because the clock moved
|
||||
// backwards.
|
||||
if !r.initialized && i == 0 && r.config.Tail > 0 && timestamp.Before(r.config.Since) {
|
||||
r.j.SeekRealtime(r.config.Since)
|
||||
continue
|
||||
return true, err
|
||||
}
|
||||
if !r.config.Until.IsZero() && r.config.Until.Before(timestamp) {
|
||||
return errDrainDone
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Read and send the logged message, if there is one to read.
|
||||
data, err := r.j.Data()
|
||||
if err != nil {
|
||||
return err
|
||||
return true, err
|
||||
}
|
||||
|
||||
if data[fieldLogEpoch] == r.s.epoch {
|
||||
@@ -228,7 +297,7 @@ func (r *reader) drainJournal() error {
|
||||
*/
|
||||
select {
|
||||
case <-r.logWatcher.WatchConsumerGone():
|
||||
return errDrainDone
|
||||
return false, nil
|
||||
case r.logWatcher.Msg <- msg:
|
||||
}
|
||||
}
|
||||
@@ -243,41 +312,28 @@ func (r *reader) drainJournal() error {
|
||||
Warn("journald: error processing journal")
|
||||
}
|
||||
}
|
||||
|
||||
if ok, err := r.j.Next(); err != nil || !ok {
|
||||
return true, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *reader) readJournal() error {
|
||||
caughtUp := atomic.LoadUint64(&r.s.ordinal)
|
||||
if err := r.drainJournal(); err != nil {
|
||||
if err != errDrainDone {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
if more, err := r.drainJournal(); err != nil || !more {
|
||||
return err
|
||||
}
|
||||
|
||||
var drainTimeout <-chan time.Time
|
||||
if !r.config.Follow {
|
||||
if r.s.readSyncTimeout == 0 {
|
||||
return nil
|
||||
}
|
||||
tmr := time.NewTimer(r.s.readSyncTimeout)
|
||||
defer tmr.Stop()
|
||||
drainTimeout = tmr.C
|
||||
r.drainDeadline = time.Now().Add(r.s.readSyncTimeout)
|
||||
}
|
||||
|
||||
for {
|
||||
status, err := r.j.Wait(250 * time.Millisecond)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
select {
|
||||
case <-r.logWatcher.WatchConsumerGone():
|
||||
return nil // won't be able to write anything anymore
|
||||
case <-drainTimeout:
|
||||
// Container is gone but we haven't found the end of the
|
||||
// logs within the timeout. Maybe it was dropped by
|
||||
// journald, e.g. due to rate-limiting.
|
||||
return nil
|
||||
case <-r.s.closed:
|
||||
// container is gone, drain journal
|
||||
lastSeq := atomic.LoadUint64(&r.s.ordinal)
|
||||
@@ -285,24 +341,14 @@ func (r *reader) readJournal() error {
|
||||
// All caught up with the logger!
|
||||
return nil
|
||||
}
|
||||
if drainTimeout == nil {
|
||||
tmr := time.NewTimer(closedDrainTimeout)
|
||||
defer tmr.Stop()
|
||||
drainTimeout = tmr.C
|
||||
}
|
||||
default:
|
||||
// container is still alive
|
||||
if status == sdjournal.StatusNOP {
|
||||
// no new data -- keep waiting
|
||||
continue
|
||||
}
|
||||
}
|
||||
err = r.drainJournal()
|
||||
if err != nil {
|
||||
if err != errDrainDone {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
||||
if more, err := r.nextWait(); err != nil || !more {
|
||||
return err
|
||||
}
|
||||
if more, err := r.drainJournal(); err != nil || !more {
|
||||
return err
|
||||
}
|
||||
if !r.config.Follow && r.s.readSyncTimeout > 0 && r.maxOrdinal >= caughtUp {
|
||||
return nil
|
||||
@@ -361,6 +407,33 @@ func (r *reader) readLogs() {
|
||||
return
|
||||
}
|
||||
|
||||
var ok bool
|
||||
if r.config.Tail >= 0 {
|
||||
ok, err = r.initialSeekTail()
|
||||
} else {
|
||||
ok, err = r.initialSeekHead()
|
||||
}
|
||||
if err != nil {
|
||||
r.logWatcher.Err <- err
|
||||
return
|
||||
}
|
||||
r.signalReady()
|
||||
if !ok {
|
||||
if !r.config.Follow {
|
||||
return
|
||||
}
|
||||
// Either the read pointer is positioned at a discrete journal entry, in
|
||||
// which case the position will be unaffected by subsequent logging, or
|
||||
// the read pointer is in the conceptual position corresponding to the
|
||||
// first journal entry to send once it is logged in the future.
|
||||
if more, err := r.nextWait(); err != nil || !more {
|
||||
if err != nil {
|
||||
r.logWatcher.Err <- err
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if err := r.readJournal(); err != nil {
|
||||
r.logWatcher.Err <- err
|
||||
return
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
package journald // import "github.com/docker/docker/daemon/logger/journald"
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -46,32 +47,37 @@ func TestLogRead(t *testing.T) {
|
||||
assert.NilError(t, rotatedJournal.Send("a log message from a totally different process in the active journal", journal.PriInfo, nil))
|
||||
|
||||
return func(t *testing.T) logger.Logger {
|
||||
l, err := new(info)
|
||||
assert.NilError(t, err)
|
||||
l.journalReadDir = journalDir
|
||||
sl := &syncLogger{journald: l, waiters: map[uint64]chan<- struct{}{}}
|
||||
|
||||
s := make(chan sendit, 100)
|
||||
t.Cleanup(func() { close(s) })
|
||||
go func() {
|
||||
for m := range s {
|
||||
<-m.after
|
||||
activeJournal.Send(m.message, m.priority, m.vars)
|
||||
if m.sent != nil {
|
||||
close(m.sent)
|
||||
sl.mu.Lock()
|
||||
sl.sent++
|
||||
if notify, ok := sl.waiters[sl.sent]; ok {
|
||||
delete(sl.waiters, sl.sent)
|
||||
close(notify)
|
||||
}
|
||||
sl.mu.Unlock()
|
||||
}
|
||||
}()
|
||||
l, err := new(info)
|
||||
assert.NilError(t, err)
|
||||
l.journalReadDir = journalDir
|
||||
|
||||
sl := &syncLogger{journald: l}
|
||||
l.sendToJournal = func(message string, priority journal.Priority, vars map[string]string) error {
|
||||
sent := make(chan struct{})
|
||||
sl.mu.Lock()
|
||||
sl.queued++
|
||||
sl.mu.Unlock()
|
||||
s <- sendit{
|
||||
message: message,
|
||||
priority: priority,
|
||||
vars: vars,
|
||||
after: time.After(150 * time.Millisecond),
|
||||
sent: sent,
|
||||
}
|
||||
sl.waitOn = sent
|
||||
return nil
|
||||
}
|
||||
l.readSyncTimeout = 3 * time.Second
|
||||
@@ -88,17 +94,31 @@ type sendit struct {
|
||||
priority journal.Priority
|
||||
vars map[string]string
|
||||
after <-chan time.Time
|
||||
sent chan<- struct{}
|
||||
}
|
||||
|
||||
type syncLogger struct {
|
||||
*journald
|
||||
waitOn <-chan struct{}
|
||||
|
||||
mu sync.Mutex
|
||||
queued, sent uint64
|
||||
waiters map[uint64]chan<- struct{}
|
||||
}
|
||||
|
||||
func (l *syncLogger) Sync() error {
|
||||
if l.waitOn != nil {
|
||||
<-l.waitOn
|
||||
l.mu.Lock()
|
||||
waitFor := l.queued
|
||||
if l.sent >= l.queued {
|
||||
l.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
notify := make(chan struct{})
|
||||
l.waiters[waitFor] = notify
|
||||
l.mu.Unlock()
|
||||
<-notify
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *syncLogger) Close() error {
|
||||
_ = l.Sync()
|
||||
return l.journald.Close()
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package loggertest // import "github.com/docker/docker/daemon/logger/loggertest"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -10,6 +11,7 @@ import (
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
"gotest.tools/v3/assert"
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
"gotest.tools/v3/assert/opt"
|
||||
|
||||
"github.com/docker/docker/api/types/backend"
|
||||
@@ -194,28 +196,31 @@ func (tr Reader) testTailEmptyLogs(t *testing.T, live bool) {
|
||||
func (tr Reader) TestFollow(t *testing.T) {
|
||||
// Reader sends all logs and closes after logger is closed
|
||||
// - Starting from empty log (like run)
|
||||
t.Run("FromEmptyLog", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
l := tr.Factory(t, logger.Info{
|
||||
ContainerID: "followstart0",
|
||||
ContainerName: "logloglog",
|
||||
})(t)
|
||||
lw := l.(logger.LogReader).ReadLogs(logger.ReadConfig{Tail: -1, Follow: true})
|
||||
defer lw.ConsumerGone()
|
||||
for i, tail := range []int{-1, 0, 1, 42} {
|
||||
i, tail := i, tail
|
||||
t.Run(fmt.Sprintf("FromEmptyLog/Tail=%d", tail), func(t *testing.T) {
|
||||
t.Parallel()
|
||||
l := tr.Factory(t, logger.Info{
|
||||
ContainerID: fmt.Sprintf("followstart%d", i),
|
||||
ContainerName: fmt.Sprintf("logloglog%d", i),
|
||||
})(t)
|
||||
lw := l.(logger.LogReader).ReadLogs(logger.ReadConfig{Tail: tail, Follow: true})
|
||||
defer lw.ConsumerGone()
|
||||
|
||||
doneReading := make(chan struct{})
|
||||
var logs []*logger.Message
|
||||
go func() {
|
||||
defer close(doneReading)
|
||||
logs = readAll(t, lw)
|
||||
}()
|
||||
doneReading := make(chan struct{})
|
||||
var logs []*logger.Message
|
||||
go func() {
|
||||
defer close(doneReading)
|
||||
logs = readAll(t, lw)
|
||||
}()
|
||||
|
||||
mm := makeTestMessages()
|
||||
expected := logMessages(t, l, mm)
|
||||
assert.NilError(t, l.Close())
|
||||
<-doneReading
|
||||
assert.DeepEqual(t, logs, expected, compareLog)
|
||||
})
|
||||
mm := makeTestMessages()
|
||||
expected := logMessages(t, l, mm)
|
||||
assert.NilError(t, l.Close())
|
||||
<-doneReading
|
||||
assert.DeepEqual(t, logs, expected, compareLog)
|
||||
})
|
||||
}
|
||||
|
||||
t.Run("AttachMidStream", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
@@ -433,7 +438,7 @@ func (tr Reader) TestConcurrent(t *testing.T) {
|
||||
logAll := func(msgs []*logger.Message) {
|
||||
defer wg.Done()
|
||||
for _, m := range msgs {
|
||||
l.Log(copyLogMessage(m))
|
||||
assert.Check(t, l.Log(copyLogMessage(m)), "failed to log message %+v", m)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -446,6 +451,15 @@ func (tr Reader) TestConcurrent(t *testing.T) {
|
||||
defer l.Close()
|
||||
wg.Wait()
|
||||
}()
|
||||
defer func() {
|
||||
// Make sure log gets closed before we return
|
||||
// so the temporary dir can be deleted
|
||||
select {
|
||||
case <-time.After(10 * time.Second):
|
||||
t.Fatal("timed out waiting for logger to close")
|
||||
case <-closed:
|
||||
}
|
||||
}()
|
||||
|
||||
// Check if the message count, order and content is equal to what was logged
|
||||
for {
|
||||
@@ -469,12 +483,8 @@ func (tr Reader) TestConcurrent(t *testing.T) {
|
||||
*messages = (*messages)[1:]
|
||||
}
|
||||
|
||||
assert.Equal(t, len(stdoutMessages), 0)
|
||||
assert.Equal(t, len(stderrMessages), 0)
|
||||
|
||||
// Make sure log gets closed before we return
|
||||
// so the temporary dir can be deleted
|
||||
<-closed
|
||||
assert.Check(t, is.Len(stdoutMessages, 0), "expected stdout messages were not read")
|
||||
assert.Check(t, is.Len(stderrMessages, 0), "expected stderr messages were not read")
|
||||
}
|
||||
|
||||
// logMessages logs messages to l and returns a slice of messages as would be
|
||||
|
||||
@@ -305,10 +305,6 @@ func (daemon *Daemon) createNetwork(cfg *config.Config, create types.NetworkCrea
|
||||
return nil, errdefs.Forbidden(errors.New(`This node is not a swarm manager. Use "docker swarm init" or "docker swarm join" to connect this node to swarm and try again.`))
|
||||
}
|
||||
|
||||
if network.HasIPv6Subnets(create.IPAM) {
|
||||
create.EnableIPv6 = true
|
||||
}
|
||||
|
||||
networkOptions := make(map[string]string)
|
||||
for k, v := range create.Options {
|
||||
networkOptions[k] = v
|
||||
@@ -335,7 +331,7 @@ func (daemon *Daemon) createNetwork(cfg *config.Config, create types.NetworkCrea
|
||||
nwOptions = append(nwOptions, libnetwork.NetworkOptionConfigOnly())
|
||||
}
|
||||
|
||||
if err := network.ValidateIPAM(create.IPAM); err != nil {
|
||||
if err := network.ValidateIPAM(create.IPAM, create.EnableIPv6); err != nil {
|
||||
return nil, errdefs.InvalidParameter(err)
|
||||
}
|
||||
|
||||
@@ -788,7 +784,7 @@ func (daemon *Daemon) clearAttachableNetworks() {
|
||||
}
|
||||
|
||||
// buildCreateEndpointOptions builds endpoint options from a given network.
|
||||
func buildCreateEndpointOptions(c *container.Container, n *libnetwork.Network, epConfig *network.EndpointSettings, sb *libnetwork.Sandbox, daemonDNS []string) ([]libnetwork.EndpointOption, error) {
|
||||
func buildCreateEndpointOptions(c *container.Container, n *libnetwork.Network, epConfig *internalnetwork.EndpointSettings, sb *libnetwork.Sandbox, daemonDNS []string) ([]libnetwork.EndpointOption, error) {
|
||||
var createOptions []libnetwork.EndpointOption
|
||||
var genericOptions = make(options.Generic)
|
||||
|
||||
@@ -824,8 +820,8 @@ func buildCreateEndpointOptions(c *container.Container, n *libnetwork.Network, e
|
||||
createOptions = append(createOptions, libnetwork.EndpointOptionGeneric(options.Generic{k: v}))
|
||||
}
|
||||
|
||||
if epConfig.MacAddress != "" {
|
||||
mac, err := net.ParseMAC(epConfig.MacAddress)
|
||||
if epConfig.DesiredMacAddress != "" {
|
||||
mac, err := net.ParseMAC(epConfig.DesiredMacAddress)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -33,8 +33,9 @@ type Settings struct {
|
||||
type EndpointSettings struct {
|
||||
*networktypes.EndpointSettings
|
||||
IPAMOperational bool
|
||||
// MACOperational is false if EndpointSettings.MacAddress is a user-configured value.
|
||||
MACOperational bool
|
||||
// DesiredMacAddress is the configured value, it's copied from MacAddress (the
|
||||
// API param field) when the container is created.
|
||||
DesiredMacAddress string
|
||||
}
|
||||
|
||||
// AttachmentStore stores the load balancer IP address for a network id.
|
||||
|
||||
@@ -6356,6 +6356,16 @@ paths:
|
||||
description: "Target build stage"
|
||||
type: "string"
|
||||
default: ""
|
||||
- name: "version"
|
||||
in: "query"
|
||||
type: "string"
|
||||
default: "1"
|
||||
enum: ["1", "2"]
|
||||
description: |
|
||||
Version of the builder backend to use.
|
||||
|
||||
- `1` is the first generation classic (deprecated) builder in the Docker daemon (default)
|
||||
- `2` is [BuildKit](https://github.com/moby/buildkit)
|
||||
responses:
|
||||
200:
|
||||
description: "no error"
|
||||
|
||||
@@ -7390,6 +7390,16 @@ paths:
|
||||
description: "Target build stage"
|
||||
type: "string"
|
||||
default: ""
|
||||
- name: "version"
|
||||
in: "query"
|
||||
type: "string"
|
||||
default: "1"
|
||||
enum: ["1", "2"]
|
||||
description: |
|
||||
Version of the builder backend to use.
|
||||
|
||||
- `1` is the first generation classic (deprecated) builder in the Docker daemon (default)
|
||||
- `2` is [BuildKit](https://github.com/moby/buildkit)
|
||||
responses:
|
||||
200:
|
||||
description: "no error"
|
||||
|
||||
@@ -7701,6 +7701,16 @@ paths:
|
||||
description: "BuildKit output configuration"
|
||||
type: "string"
|
||||
default: ""
|
||||
- name: "version"
|
||||
in: "query"
|
||||
type: "string"
|
||||
default: "1"
|
||||
enum: ["1", "2"]
|
||||
description: |
|
||||
Version of the builder backend to use.
|
||||
|
||||
- `1` is the first generation classic (deprecated) builder in the Docker daemon (default)
|
||||
- `2` is [BuildKit](https://github.com/moby/buildkit)
|
||||
responses:
|
||||
200:
|
||||
description: "no error"
|
||||
|
||||
@@ -7906,6 +7906,16 @@ paths:
|
||||
description: "BuildKit output configuration"
|
||||
type: "string"
|
||||
default: ""
|
||||
- name: "version"
|
||||
in: "query"
|
||||
type: "string"
|
||||
default: "1"
|
||||
enum: ["1", "2"]
|
||||
description: |
|
||||
Version of the builder backend to use.
|
||||
|
||||
- `1` is the first generation classic (deprecated) builder in the Docker daemon (default)
|
||||
- `2` is [BuildKit](https://github.com/moby/buildkit)
|
||||
responses:
|
||||
200:
|
||||
description: "no error"
|
||||
|
||||
@@ -8156,6 +8156,16 @@ paths:
|
||||
description: "BuildKit output configuration"
|
||||
type: "string"
|
||||
default: ""
|
||||
- name: "version"
|
||||
in: "query"
|
||||
type: "string"
|
||||
default: "1"
|
||||
enum: ["1", "2"]
|
||||
description: |
|
||||
Version of the builder backend to use.
|
||||
|
||||
- `1` is the first generation classic (deprecated) builder in the Docker daemon (default)
|
||||
- `2` is [BuildKit](https://github.com/moby/buildkit)
|
||||
responses:
|
||||
200:
|
||||
description: "no error"
|
||||
|
||||
@@ -8174,6 +8174,16 @@ paths:
|
||||
description: "BuildKit output configuration"
|
||||
type: "string"
|
||||
default: ""
|
||||
- name: "version"
|
||||
in: "query"
|
||||
type: "string"
|
||||
default: "1"
|
||||
enum: ["1", "2"]
|
||||
description: |
|
||||
Version of the builder backend to use.
|
||||
|
||||
- `1` is the first generation classic (deprecated) builder in the Docker daemon (default)
|
||||
- `2` is [BuildKit](https://github.com/moby/buildkit)
|
||||
responses:
|
||||
200:
|
||||
description: "no error"
|
||||
|
||||
@@ -8327,6 +8327,16 @@ paths:
|
||||
description: "BuildKit output configuration"
|
||||
type: "string"
|
||||
default: ""
|
||||
- name: "version"
|
||||
in: "query"
|
||||
type: "string"
|
||||
default: "1"
|
||||
enum: ["1", "2"]
|
||||
description: |
|
||||
Version of the builder backend to use.
|
||||
|
||||
- `1` is the first generation classic (deprecated) builder in the Docker daemon (default)
|
||||
- `2` is [BuildKit](https://github.com/moby/buildkit)
|
||||
responses:
|
||||
200:
|
||||
description: "no error"
|
||||
|
||||
@@ -667,8 +667,6 @@ keywords: "API, Docker, rcli, REST, documentation"
|
||||
|
||||
## v1.23 API changes
|
||||
|
||||
[Docker Engine API v1.23](v1.23.md) documentation
|
||||
|
||||
* `GET /containers/json` returns the state of the container, one of `created`, `restarting`, `running`, `paused`, `exited` or `dead`.
|
||||
* `GET /containers/json` returns the mount points for the container.
|
||||
* `GET /networks/(name)` now returns an `Internal` field showing whether the network is internal or not.
|
||||
@@ -689,8 +687,6 @@ keywords: "API, Docker, rcli, REST, documentation"
|
||||
|
||||
## v1.22 API changes
|
||||
|
||||
[Docker Engine API v1.22](v1.22.md) documentation
|
||||
|
||||
* The `HostConfig.LxcConf` field has been removed, and is no longer available on
|
||||
`POST /containers/create` and `GET /containers/(id)/json`.
|
||||
* `POST /container/(name)/update` updates the resources of a container.
|
||||
@@ -725,8 +721,6 @@ keywords: "API, Docker, rcli, REST, documentation"
|
||||
|
||||
## v1.21 API changes
|
||||
|
||||
[Docker Engine API v1.21](v1.21.md) documentation
|
||||
|
||||
* `GET /volumes` lists volumes from all volume drivers.
|
||||
* `POST /volumes/create` to create a volume.
|
||||
* `GET /volumes/(name)` get low-level information about a volume.
|
||||
@@ -760,8 +754,6 @@ keywords: "API, Docker, rcli, REST, documentation"
|
||||
|
||||
## v1.20 API changes
|
||||
|
||||
[Docker Engine API v1.20](v1.20.md) documentation
|
||||
|
||||
* `GET /containers/(id)/archive` get an archive of filesystem content from a container.
|
||||
* `PUT /containers/(id)/archive` upload an archive of content to be extracted to
|
||||
an existing directory inside a container's filesystem.
|
||||
@@ -772,8 +764,6 @@ list of additional groups that the container process will run as.
|
||||
|
||||
## v1.19 API changes
|
||||
|
||||
[Docker Engine API v1.19](v1.19.md) documentation
|
||||
|
||||
* When the daemon detects a version mismatch with the client, usually when
|
||||
the client is newer than the daemon, an HTTP 400 is now returned instead
|
||||
of a 404.
|
||||
@@ -788,8 +778,6 @@ end point now returns the new boolean fields `CpuCfsPeriod`, `CpuCfsQuota`, and
|
||||
|
||||
## v1.18 API changes
|
||||
|
||||
[Docker Engine API v1.18](v1.18.md) documentation
|
||||
|
||||
* `GET /version` now returns `Os`, `Arch` and `KernelVersion`.
|
||||
* `POST /containers/create` and `POST /containers/(id)/start`allow you to set ulimit settings for use in the container.
|
||||
* `GET /info` now returns `SystemTime`, `HttpProxy`,`HttpsProxy` and `NoProxy`.
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
#!/bin/sh
|
||||
|
||||
# When updating, also update vendor.mod and Dockerfile accordingly.
|
||||
: "${ROOTLESSKIT_VERSION:=v2.0.0}"
|
||||
: "${ROOTLESSKIT_VERSION:=v2.0.1}"
|
||||
|
||||
install_rootlesskit() {
|
||||
case "$1" in
|
||||
|
||||
4
image/cache/cache.go
vendored
4
image/cache/cache.go
vendored
@@ -7,7 +7,6 @@ import (
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/containerd/log"
|
||||
containertypes "github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/dockerversion"
|
||||
@@ -250,11 +249,12 @@ func getLocalCachedImage(imageStore image.Store, imgID image.ID, config *contain
|
||||
}
|
||||
|
||||
imgPlatform := img.Platform()
|
||||
|
||||
// Discard old linux/amd64 images with empty platform.
|
||||
if imgPlatform.OS == "" && imgPlatform.Architecture == "" {
|
||||
continue
|
||||
}
|
||||
if !platforms.OnlyStrict(platform).Match(imgPlatform) {
|
||||
if !comparePlatform(platform, imgPlatform) {
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
27
image/cache/compare.go
vendored
27
image/cache/compare.go
vendored
@@ -1,7 +1,11 @@
|
||||
package cache // import "github.com/docker/docker/image/cache"
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// TODO: Remove once containerd image service directly uses the ImageCache and
|
||||
@@ -10,6 +14,29 @@ func CompareConfig(a, b *container.Config) bool {
|
||||
return compare(a, b)
|
||||
}
|
||||
|
||||
func comparePlatform(builderPlatform, imagePlatform ocispec.Platform) bool {
|
||||
// On Windows, only check the Major and Minor versions.
|
||||
// The Build and Revision compatibility depends on whether `process` or
|
||||
// `hyperv` isolation used.
|
||||
//
|
||||
// Fixes https://github.com/moby/moby/issues/47307
|
||||
if builderPlatform.OS == "windows" && imagePlatform.OS == builderPlatform.OS {
|
||||
// OSVersion format is:
|
||||
// Major.Minor.Build.Revision
|
||||
builderParts := strings.Split(builderPlatform.OSVersion, ".")
|
||||
imageParts := strings.Split(imagePlatform.OSVersion, ".")
|
||||
|
||||
if len(builderParts) >= 3 && len(imageParts) >= 3 {
|
||||
// Keep only Major & Minor.
|
||||
builderParts[0] = imageParts[0]
|
||||
builderParts[1] = imageParts[1]
|
||||
imagePlatform.OSVersion = strings.Join(builderParts, ".")
|
||||
}
|
||||
}
|
||||
|
||||
return platforms.Only(builderPlatform).Match(imagePlatform)
|
||||
}
|
||||
|
||||
// compare two Config struct. Do not container-specific fields:
|
||||
// - Image
|
||||
// - Hostname
|
||||
|
||||
80
image/cache/compare_test.go
vendored
80
image/cache/compare_test.go
vendored
@@ -1,11 +1,15 @@
|
||||
package cache // import "github.com/docker/docker/image/cache"
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/strslice"
|
||||
"github.com/docker/go-connections/nat"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"gotest.tools/v3/assert"
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
)
|
||||
|
||||
// Just to make life easier
|
||||
@@ -124,3 +128,79 @@ func TestCompare(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPlatformCompare(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
builder ocispec.Platform
|
||||
image ocispec.Platform
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
name: "same os and arch",
|
||||
builder: ocispec.Platform{Architecture: "amd64", OS: runtime.GOOS},
|
||||
image: ocispec.Platform{Architecture: "amd64", OS: runtime.GOOS},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "same os different arch",
|
||||
builder: ocispec.Platform{Architecture: "amd64", OS: runtime.GOOS},
|
||||
image: ocispec.Platform{Architecture: "arm64", OS: runtime.GOOS},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "same os smaller host variant",
|
||||
builder: ocispec.Platform{Variant: "v7", Architecture: "arm", OS: runtime.GOOS},
|
||||
image: ocispec.Platform{Variant: "v8", Architecture: "arm", OS: runtime.GOOS},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "same os higher host variant",
|
||||
builder: ocispec.Platform{Variant: "v8", Architecture: "arm", OS: runtime.GOOS},
|
||||
image: ocispec.Platform{Variant: "v7", Architecture: "arm", OS: runtime.GOOS},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
// Test for https://github.com/moby/moby/issues/47307
|
||||
name: "different build and revision",
|
||||
builder: ocispec.Platform{Architecture: "amd64", OS: "windows", OSVersion: "10.0.22621"},
|
||||
image: ocispec.Platform{Architecture: "amd64", OS: "windows", OSVersion: "10.0.17763.5329"},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "different revision",
|
||||
builder: ocispec.Platform{Architecture: "amd64", OS: "windows", OSVersion: "10.0.17763.1234"},
|
||||
image: ocispec.Platform{Architecture: "amd64", OS: "windows", OSVersion: "10.0.17763.5329"},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "different major",
|
||||
builder: ocispec.Platform{Architecture: "amd64", OS: "windows", OSVersion: "11.0.17763.5329"},
|
||||
image: ocispec.Platform{Architecture: "amd64", OS: "windows", OSVersion: "10.0.17763.5329"},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "different minor same osver",
|
||||
builder: ocispec.Platform{Architecture: "amd64", OS: "windows", OSVersion: "10.0.17763.5329"},
|
||||
image: ocispec.Platform{Architecture: "amd64", OS: "windows", OSVersion: "10.1.17763.5329"},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "different arch same osver",
|
||||
builder: ocispec.Platform{Architecture: "arm64", OS: "windows", OSVersion: "10.0.17763.5329"},
|
||||
image: ocispec.Platform{Architecture: "amd64", OS: "windows", OSVersion: "10.0.17763.5329"},
|
||||
expected: false,
|
||||
},
|
||||
} {
|
||||
tc := tc
|
||||
// OSVersion comparison is only performed by containerd platform
|
||||
// matcher if built on Windows.
|
||||
if (tc.image.OSVersion != "" || tc.builder.OSVersion != "") && runtime.GOOS != "windows" {
|
||||
continue
|
||||
}
|
||||
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
assert.Check(t, is.Equal(comparePlatform(tc.builder, tc.image), tc.expected))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -260,6 +260,12 @@ func (s *saveSession) save(outStream io.Writer) error {
|
||||
}
|
||||
size := int64(len(data))
|
||||
|
||||
untaggedMfstDesc := ocispec.Descriptor{
|
||||
MediaType: ocispec.MediaTypeImageManifest,
|
||||
Digest: dgst,
|
||||
Size: size,
|
||||
Platform: m.Config.Platform,
|
||||
}
|
||||
for _, ref := range imageDescr.refs {
|
||||
familiarName := reference.FamiliarName(ref)
|
||||
if _, ok := reposLegacy[familiarName]; !ok {
|
||||
@@ -268,16 +274,17 @@ func (s *saveSession) save(outStream io.Writer) error {
|
||||
reposLegacy[familiarName][ref.Tag()] = digest.Digest(imageDescr.layers[len(imageDescr.layers)-1]).Encoded()
|
||||
repoTags = append(repoTags, reference.FamiliarString(ref))
|
||||
|
||||
manifestDescriptors = append(manifestDescriptors, ocispec.Descriptor{
|
||||
MediaType: ocispec.MediaTypeImageManifest,
|
||||
Digest: dgst,
|
||||
Size: size,
|
||||
Platform: m.Config.Platform,
|
||||
Annotations: map[string]string{
|
||||
images.AnnotationImageName: ref.String(),
|
||||
ocispec.AnnotationRefName: ref.Tag(),
|
||||
},
|
||||
})
|
||||
taggedManifest := untaggedMfstDesc
|
||||
taggedManifest.Annotations = map[string]string{
|
||||
images.AnnotationImageName: ref.String(),
|
||||
ocispec.AnnotationRefName: ref.Tag(),
|
||||
}
|
||||
manifestDescriptors = append(manifestDescriptors, taggedManifest)
|
||||
}
|
||||
|
||||
// If no ref was assigned, make sure still add the image is still included in index.json.
|
||||
if len(manifestDescriptors) == 0 {
|
||||
manifestDescriptors = append(manifestDescriptors, untaggedMfstDesc)
|
||||
}
|
||||
|
||||
for _, l := range imageDescr.layers {
|
||||
|
||||
@@ -657,6 +657,9 @@ func (s *DockerSwarmSuite) TearDownTest(ctx context.Context, c *testing.T) {
|
||||
s.daemonsLock.Lock()
|
||||
for _, d := range s.daemons {
|
||||
if d != nil {
|
||||
if c.Failed() {
|
||||
d.TailLogsT(c, 100)
|
||||
}
|
||||
d.Stop(c)
|
||||
d.Cleanup(c)
|
||||
}
|
||||
|
||||
@@ -27,6 +27,7 @@ import (
|
||||
remoteipam "github.com/docker/docker/libnetwork/ipams/remote/api"
|
||||
"github.com/docker/docker/pkg/plugins"
|
||||
"github.com/docker/docker/testutil"
|
||||
testdaemon "github.com/docker/docker/testutil/daemon"
|
||||
"github.com/moby/swarmkit/v2/ca/keyutils"
|
||||
"github.com/vishvananda/netlink"
|
||||
"gotest.tools/v3/assert"
|
||||
@@ -1260,6 +1261,8 @@ func (s *DockerSwarmSuite) TestSwarmJoinPromoteLocked(c *testing.T) {
|
||||
poll.WaitOn(c, pollCheck(c, d3.CheckLocalNodeState(ctx), checker.Equals(swarm.LocalNodeStateActive)), poll.WithTimeout(time.Second))
|
||||
}
|
||||
|
||||
const swarmIsEncryptedMsg = "Swarm is encrypted and needs to be unlocked"
|
||||
|
||||
func (s *DockerSwarmSuite) TestSwarmRotateUnlockKey(c *testing.T) {
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
@@ -1280,12 +1283,16 @@ func (s *DockerSwarmSuite) TestSwarmRotateUnlockKey(c *testing.T) {
|
||||
d.RestartNode(c)
|
||||
assert.Equal(c, getNodeStatus(c, d), swarm.LocalNodeStateLocked)
|
||||
|
||||
outs, _ = d.Cmd("node", "ls")
|
||||
assert.Assert(c, strings.Contains(outs, "Swarm is encrypted and needs to be unlocked"), outs)
|
||||
cmd := d.Command("swarm", "unlock")
|
||||
cmd.Stdin = bytes.NewBufferString(unlockKey)
|
||||
result := icmd.RunCmd(cmd)
|
||||
unlock := func(d *daemon.Daemon, key string) *icmd.Result {
|
||||
cmd := d.Command("swarm", "unlock")
|
||||
cmd.Stdin = strings.NewReader(key)
|
||||
return icmd.RunCmd(cmd)
|
||||
}
|
||||
|
||||
outs, _ = d.Cmd("node", "ls")
|
||||
assert.Assert(c, strings.Contains(outs, swarmIsEncryptedMsg), outs)
|
||||
|
||||
result := unlock(d, unlockKey)
|
||||
if result.Error == nil {
|
||||
// On occasion, the daemon may not have finished
|
||||
// rotating the KEK before restarting. The test is
|
||||
@@ -1295,13 +1302,16 @@ func (s *DockerSwarmSuite) TestSwarmRotateUnlockKey(c *testing.T) {
|
||||
// restart again, the new key should be required this
|
||||
// time.
|
||||
|
||||
time.Sleep(3 * time.Second)
|
||||
// Wait for the rotation to happen
|
||||
// Since there are multiple rotations, we need to wait until for the number of rotations we are currently on to be reflected in the logs
|
||||
// This is a little janky... its depending on specific log messages AND these are debug logs... but it is the best we can do for now.
|
||||
matcher := testdaemon.ScanLogsMatchCount(testdaemon.ScanLogsMatchString("successfully rotated KEK"), i+1)
|
||||
poll.WaitOn(c, d.PollCheckLogs(ctx, matcher), poll.WithDelay(3*time.Second), poll.WithTimeout(time.Minute))
|
||||
d.Restart(c)
|
||||
|
||||
d.RestartNode(c)
|
||||
|
||||
cmd = d.Command("swarm", "unlock")
|
||||
cmd.Stdin = bytes.NewBufferString(unlockKey)
|
||||
result = icmd.RunCmd(cmd)
|
||||
result = unlock(d, unlockKey)
|
||||
}
|
||||
result.Assert(c, icmd.Expected{
|
||||
ExitCode: 1,
|
||||
@@ -1309,28 +1319,20 @@ func (s *DockerSwarmSuite) TestSwarmRotateUnlockKey(c *testing.T) {
|
||||
})
|
||||
|
||||
outs, _ = d.Cmd("node", "ls")
|
||||
assert.Assert(c, strings.Contains(outs, "Swarm is encrypted and needs to be unlocked"), outs)
|
||||
cmd = d.Command("swarm", "unlock")
|
||||
cmd.Stdin = bytes.NewBufferString(newUnlockKey)
|
||||
icmd.RunCmd(cmd).Assert(c, icmd.Success)
|
||||
assert.Assert(c, strings.Contains(outs, swarmIsEncryptedMsg), outs)
|
||||
unlock(d, newUnlockKey).Assert(c, icmd.Success)
|
||||
|
||||
assert.Equal(c, getNodeStatus(c, d), swarm.LocalNodeStateActive)
|
||||
|
||||
retry := 0
|
||||
for {
|
||||
checkNodeLs := func(t poll.LogT) poll.Result {
|
||||
// an issue sometimes prevents leader to be available right away
|
||||
outs, err = d.Cmd("node", "ls")
|
||||
if err != nil && retry < 5 {
|
||||
if strings.Contains(outs, "swarm does not have a leader") {
|
||||
retry++
|
||||
time.Sleep(3 * time.Second)
|
||||
continue
|
||||
}
|
||||
out, err := d.Cmd("node", "ls")
|
||||
if err != nil {
|
||||
return poll.Continue("error running node ls: %v: %s", err, out)
|
||||
}
|
||||
assert.NilError(c, err)
|
||||
assert.Assert(c, !strings.Contains(outs, "Swarm is encrypted and needs to be unlocked"), outs)
|
||||
break
|
||||
return poll.Success()
|
||||
}
|
||||
poll.WaitOn(c, checkNodeLs, poll.WithDelay(3*time.Second), poll.WithTimeout(time.Minute))
|
||||
|
||||
unlockKey = newUnlockKey
|
||||
}
|
||||
@@ -1368,15 +1370,23 @@ func (s *DockerSwarmSuite) TestSwarmClusterRotateUnlockKey(c *testing.T) {
|
||||
d2.RestartNode(c)
|
||||
d3.RestartNode(c)
|
||||
|
||||
unlock := func(d *daemon.Daemon, key string) *icmd.Result {
|
||||
cmd := d.Command("swarm", "unlock")
|
||||
cmd.Stdin = strings.NewReader(key)
|
||||
return icmd.RunCmd(cmd)
|
||||
}
|
||||
|
||||
const swarmIsEncryptedMsg = "Swarm is encrypted and needs to be unlocked"
|
||||
|
||||
for _, d := range []*daemon.Daemon{d2, d3} {
|
||||
assert.Equal(c, getNodeStatus(c, d), swarm.LocalNodeStateLocked)
|
||||
|
||||
outs, _ := d.Cmd("node", "ls")
|
||||
assert.Assert(c, strings.Contains(outs, "Swarm is encrypted and needs to be unlocked"), outs)
|
||||
cmd := d.Command("swarm", "unlock")
|
||||
cmd.Stdin = bytes.NewBufferString(unlockKey)
|
||||
result := icmd.RunCmd(cmd)
|
||||
assert.Assert(c, strings.Contains(outs, swarmIsEncryptedMsg), outs)
|
||||
|
||||
// unlock with the original key should fail
|
||||
// Use poll here because the daemon may not have finished
|
||||
result := unlock(d, unlockKey)
|
||||
if result.Error == nil {
|
||||
// On occasion, the daemon may not have finished
|
||||
// rotating the KEK before restarting. The test is
|
||||
@@ -1386,13 +1396,14 @@ func (s *DockerSwarmSuite) TestSwarmClusterRotateUnlockKey(c *testing.T) {
|
||||
// restart again, the new key should be required this
|
||||
// time.
|
||||
|
||||
time.Sleep(3 * time.Second)
|
||||
// Wait for the rotation to happen
|
||||
// Since there are multiple rotations, we need to wait until for the number of rotations we are currently on to be reflected in the logs
|
||||
// This is a little janky... its depending on specific log messages AND these are debug logs... but it is the best we can do for now.
|
||||
matcher := testdaemon.ScanLogsMatchCount(testdaemon.ScanLogsMatchString("successfully rotated KEK"), i+1)
|
||||
poll.WaitOn(c, d.PollCheckLogs(ctx, matcher), poll.WithDelay(3*time.Second), poll.WithTimeout(time.Minute))
|
||||
d.Restart(c)
|
||||
|
||||
d.RestartNode(c)
|
||||
|
||||
cmd = d.Command("swarm", "unlock")
|
||||
cmd.Stdin = bytes.NewBufferString(unlockKey)
|
||||
result = icmd.RunCmd(cmd)
|
||||
result = unlock(d, unlockKey)
|
||||
}
|
||||
result.Assert(c, icmd.Expected{
|
||||
ExitCode: 1,
|
||||
@@ -1400,31 +1411,21 @@ func (s *DockerSwarmSuite) TestSwarmClusterRotateUnlockKey(c *testing.T) {
|
||||
})
|
||||
|
||||
outs, _ = d.Cmd("node", "ls")
|
||||
assert.Assert(c, strings.Contains(outs, "Swarm is encrypted and needs to be unlocked"), outs)
|
||||
cmd = d.Command("swarm", "unlock")
|
||||
cmd.Stdin = bytes.NewBufferString(newUnlockKey)
|
||||
icmd.RunCmd(cmd).Assert(c, icmd.Success)
|
||||
assert.Assert(c, strings.Contains(outs, swarmIsEncryptedMsg), outs)
|
||||
|
||||
// now unlock with the rotated key, this should succeed
|
||||
unlock(d, newUnlockKey).Assert(c, icmd.Success)
|
||||
assert.Equal(c, getNodeStatus(c, d), swarm.LocalNodeStateActive)
|
||||
|
||||
retry := 0
|
||||
for {
|
||||
checkNodeLs := func(t poll.LogT) poll.Result {
|
||||
// an issue sometimes prevents leader to be available right away
|
||||
outs, err = d.Cmd("node", "ls")
|
||||
if err != nil && retry < 5 {
|
||||
if strings.Contains(outs, "swarm does not have a leader") {
|
||||
retry++
|
||||
c.Logf("[%s] got 'swarm does not have a leader'. retrying (attempt %d/5)", d.ID(), retry)
|
||||
time.Sleep(3 * time.Second)
|
||||
continue
|
||||
} else {
|
||||
c.Logf("[%s] gave error: '%v'. retrying (attempt %d/5): %s", d.ID(), err, retry, outs)
|
||||
}
|
||||
out, err := d.Cmd("node", "ls")
|
||||
if err != nil {
|
||||
return poll.Continue("error running node ls: %v: %s", err, out)
|
||||
}
|
||||
assert.NilError(c, err, "[%s] failed after %d retries: %v (%s)", d.ID(), retry, err, outs)
|
||||
assert.Assert(c, !strings.Contains(outs, "Swarm is encrypted and needs to be unlocked"), outs)
|
||||
break
|
||||
return poll.Success()
|
||||
}
|
||||
poll.WaitOn(c, checkNodeLs, poll.WithDelay(3*time.Second), poll.WithTimeout(time.Minute))
|
||||
}
|
||||
|
||||
unlockKey = newUnlockKey
|
||||
|
||||
@@ -18,6 +18,8 @@ import (
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
"github.com/docker/docker/integration/internal/build"
|
||||
"github.com/docker/docker/integration/internal/container"
|
||||
"github.com/docker/docker/internal/testutils"
|
||||
"github.com/docker/docker/internal/testutils/specialimage"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/testutil/fakecontext"
|
||||
"github.com/opencontainers/go-digest"
|
||||
@@ -88,45 +90,126 @@ func TestSaveCheckTimes(t *testing.T) {
|
||||
}
|
||||
|
||||
// Regression test for https://github.com/moby/moby/issues/47065
|
||||
func TestSaveCheckManifestLayers(t *testing.T) {
|
||||
func TestSaveOCI(t *testing.T) {
|
||||
skip.If(t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.44"), "OCI layout support was introduced in v25")
|
||||
|
||||
ctx := setupTest(t)
|
||||
client := testEnv.APIClient()
|
||||
|
||||
t.Parallel()
|
||||
|
||||
const repoName = "busybox:latest"
|
||||
img, _, err := client.ImageInspectWithRaw(ctx, repoName)
|
||||
const busybox = "busybox:latest"
|
||||
inspectBusybox, _, err := client.ImageInspectWithRaw(ctx, busybox)
|
||||
assert.NilError(t, err)
|
||||
|
||||
rdr, err := client.ImageSave(ctx, []string{repoName})
|
||||
assert.NilError(t, err)
|
||||
type testCase struct {
|
||||
image string
|
||||
expectedOCIRef string
|
||||
expectedContainerdRef string
|
||||
}
|
||||
|
||||
tarfs := tarIndexFS(t, rdr)
|
||||
testCases := []testCase{
|
||||
// Busybox by tagged name
|
||||
testCase{image: busybox, expectedContainerdRef: "docker.io/library/busybox:latest", expectedOCIRef: "latest"},
|
||||
|
||||
indexData, err := fs.ReadFile(tarfs, "index.json")
|
||||
assert.NilError(t, err)
|
||||
// Busybox by ID
|
||||
testCase{image: inspectBusybox.ID},
|
||||
}
|
||||
|
||||
var index ocispec.Index
|
||||
assert.NilError(t, json.Unmarshal(indexData, &index))
|
||||
if testEnv.DaemonInfo.OSType != "windows" {
|
||||
multiLayerImage := specialimage.Load(ctx, t, client, specialimage.MultiLayer)
|
||||
// Multi-layer image
|
||||
testCases = append(testCases, testCase{image: multiLayerImage, expectedContainerdRef: "docker.io/library/multilayer:latest", expectedOCIRef: "latest"})
|
||||
|
||||
assert.Assert(t, is.Len(index.Manifests, 1))
|
||||
}
|
||||
|
||||
manifestData, err := fs.ReadFile(tarfs, "blobs/sha256/"+index.Manifests[0].Digest.Encoded())
|
||||
assert.NilError(t, err)
|
||||
// Busybox frozen image will have empty RepoDigests when loaded into the
|
||||
// graphdriver image store so we can't use it.
|
||||
// This will work with the containerd image store though.
|
||||
if len(inspectBusybox.RepoDigests) > 0 {
|
||||
// Digested reference
|
||||
testCases = append(testCases, testCase{
|
||||
image: inspectBusybox.RepoDigests[0],
|
||||
})
|
||||
}
|
||||
|
||||
var manifest ocispec.Manifest
|
||||
assert.NilError(t, json.Unmarshal(manifestData, &manifest))
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.image, func(t *testing.T) {
|
||||
// Get information about the original image.
|
||||
inspect, _, err := client.ImageInspectWithRaw(ctx, tc.image)
|
||||
assert.NilError(t, err)
|
||||
|
||||
assert.Check(t, is.Len(manifest.Layers, len(img.RootFS.Layers)))
|
||||
for _, l := range manifest.Layers {
|
||||
stat, err := fs.Stat(tarfs, "blobs/sha256/"+l.Digest.Encoded())
|
||||
if !assert.Check(t, err) {
|
||||
continue
|
||||
}
|
||||
rdr, err := client.ImageSave(ctx, []string{tc.image})
|
||||
assert.NilError(t, err)
|
||||
defer rdr.Close()
|
||||
|
||||
assert.Check(t, is.Equal(l.Size, stat.Size()))
|
||||
tarfs := tarIndexFS(t, rdr)
|
||||
|
||||
indexData, err := fs.ReadFile(tarfs, "index.json")
|
||||
assert.NilError(t, err, "failed to read index.json")
|
||||
|
||||
var index ocispec.Index
|
||||
assert.NilError(t, json.Unmarshal(indexData, &index), "failed to unmarshal index.json")
|
||||
|
||||
// All test images are single-platform, so they should have only one manifest.
|
||||
assert.Assert(t, is.Len(index.Manifests, 1))
|
||||
|
||||
manifestData, err := fs.ReadFile(tarfs, "blobs/sha256/"+index.Manifests[0].Digest.Encoded())
|
||||
assert.NilError(t, err)
|
||||
|
||||
var manifest ocispec.Manifest
|
||||
assert.NilError(t, json.Unmarshal(manifestData, &manifest))
|
||||
|
||||
t.Run("Manifest", func(t *testing.T) {
|
||||
assert.Check(t, is.Len(manifest.Layers, len(inspect.RootFS.Layers)))
|
||||
|
||||
var digests []string
|
||||
// Check if layers referenced by the manifest exist in the archive
|
||||
// and match the layers from the original image.
|
||||
for _, l := range manifest.Layers {
|
||||
layerPath := "blobs/sha256/" + l.Digest.Encoded()
|
||||
stat, err := fs.Stat(tarfs, layerPath)
|
||||
assert.NilError(t, err)
|
||||
|
||||
assert.Check(t, is.Equal(l.Size, stat.Size()))
|
||||
|
||||
f, err := tarfs.Open(layerPath)
|
||||
assert.NilError(t, err)
|
||||
|
||||
layerDigest, err := testutils.UncompressedTarDigest(f)
|
||||
f.Close()
|
||||
|
||||
assert.NilError(t, err)
|
||||
|
||||
digests = append(digests, layerDigest.String())
|
||||
}
|
||||
|
||||
assert.Check(t, is.DeepEqual(digests, inspect.RootFS.Layers))
|
||||
})
|
||||
|
||||
t.Run("Config", func(t *testing.T) {
|
||||
configData, err := fs.ReadFile(tarfs, "blobs/sha256/"+manifest.Config.Digest.Encoded())
|
||||
assert.NilError(t, err)
|
||||
|
||||
var config ocispec.Image
|
||||
assert.NilError(t, json.Unmarshal(configData, &config))
|
||||
|
||||
var diffIDs []string
|
||||
for _, l := range config.RootFS.DiffIDs {
|
||||
diffIDs = append(diffIDs, l.String())
|
||||
}
|
||||
|
||||
assert.Check(t, is.DeepEqual(diffIDs, inspect.RootFS.Layers))
|
||||
})
|
||||
|
||||
t.Run("Containerd image name", func(t *testing.T) {
|
||||
assert.Check(t, is.Equal(index.Manifests[0].Annotations["io.containerd.image.name"], tc.expectedContainerdRef))
|
||||
})
|
||||
|
||||
t.Run("OCI reference tag", func(t *testing.T) {
|
||||
assert.Check(t, is.Equal(index.Manifests[0].Annotations["org.opencontainers.image.ref.name"], tc.expectedOCIRef))
|
||||
})
|
||||
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -4,8 +4,11 @@ import (
|
||||
"testing"
|
||||
|
||||
containertypes "github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/docker/integration/internal/container"
|
||||
"github.com/docker/docker/integration/internal/network"
|
||||
"github.com/docker/docker/libnetwork/drivers/bridge"
|
||||
"github.com/docker/docker/testutil"
|
||||
"github.com/docker/docker/testutil/daemon"
|
||||
"gotest.tools/v3/assert"
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
@@ -35,7 +38,7 @@ func TestMACAddrOnRestart(t *testing.T) {
|
||||
const netName = "testmacaddrs"
|
||||
network.CreateNoError(ctx, t, c, netName,
|
||||
network.WithDriver("bridge"),
|
||||
network.WithOption("com.docker.network.bridge.name", netName))
|
||||
network.WithOption(bridge.BridgeName, netName))
|
||||
defer network.RemoveNoError(ctx, t, c, netName)
|
||||
|
||||
const ctr1Name = "ctr1"
|
||||
@@ -77,3 +80,154 @@ func TestMACAddrOnRestart(t *testing.T) {
|
||||
assert.Check(t, ctr1MAC != ctr2MAC,
|
||||
"expected containers to have different MAC addresses; got %q for both", ctr1MAC)
|
||||
}
|
||||
|
||||
// Check that a configured MAC address is restored after a container restart,
|
||||
// and after a daemon restart.
|
||||
func TestCfgdMACAddrOnRestart(t *testing.T) {
|
||||
skip.If(t, testEnv.DaemonInfo.OSType == "windows")
|
||||
|
||||
ctx := setupTest(t)
|
||||
|
||||
d := daemon.New(t)
|
||||
d.StartWithBusybox(ctx, t)
|
||||
defer d.Stop(t)
|
||||
|
||||
c := d.NewClientT(t)
|
||||
defer c.Close()
|
||||
|
||||
const netName = "testcfgmacaddr"
|
||||
network.CreateNoError(ctx, t, c, netName,
|
||||
network.WithDriver("bridge"),
|
||||
network.WithOption(bridge.BridgeName, netName))
|
||||
defer network.RemoveNoError(ctx, t, c, netName)
|
||||
|
||||
const wantMAC = "02:42:ac:11:00:42"
|
||||
const ctr1Name = "ctr1"
|
||||
id1 := container.Run(ctx, t, c,
|
||||
container.WithName(ctr1Name),
|
||||
container.WithImage("busybox:latest"),
|
||||
container.WithCmd("top"),
|
||||
container.WithNetworkMode(netName),
|
||||
container.WithMacAddress(netName, wantMAC))
|
||||
defer c.ContainerRemove(ctx, id1, containertypes.RemoveOptions{
|
||||
Force: true,
|
||||
})
|
||||
|
||||
inspect := container.Inspect(ctx, t, c, ctr1Name)
|
||||
gotMAC := inspect.NetworkSettings.Networks[netName].MacAddress
|
||||
assert.Check(t, is.Equal(wantMAC, gotMAC))
|
||||
|
||||
startAndCheck := func() {
|
||||
t.Helper()
|
||||
err := c.ContainerStart(ctx, ctr1Name, containertypes.StartOptions{})
|
||||
assert.Assert(t, is.Nil(err))
|
||||
inspect = container.Inspect(ctx, t, c, ctr1Name)
|
||||
gotMAC = inspect.NetworkSettings.Networks[netName].MacAddress
|
||||
assert.Check(t, is.Equal(wantMAC, gotMAC))
|
||||
}
|
||||
|
||||
// Restart the container, check that the MAC address is restored.
|
||||
err := c.ContainerStop(ctx, ctr1Name, containertypes.StopOptions{})
|
||||
assert.Assert(t, is.Nil(err))
|
||||
startAndCheck()
|
||||
|
||||
// Restart the daemon, check that the MAC address is restored.
|
||||
err = c.ContainerStop(ctx, ctr1Name, containertypes.StopOptions{})
|
||||
assert.Assert(t, is.Nil(err))
|
||||
d.Restart(t)
|
||||
startAndCheck()
|
||||
}
|
||||
|
||||
// Regression test for https://github.com/moby/moby/issues/47228 - check that a
|
||||
// generated MAC address is not included in the Config section of 'inspect'
|
||||
// output, but a configured address is.
|
||||
func TestInspectCfgdMAC(t *testing.T) {
|
||||
skip.If(t, testEnv.DaemonInfo.OSType == "windows")
|
||||
|
||||
ctx := setupTest(t)
|
||||
|
||||
d := daemon.New(t)
|
||||
d.StartWithBusybox(ctx, t)
|
||||
defer d.Stop(t)
|
||||
|
||||
testcases := []struct {
|
||||
name string
|
||||
desiredMAC string
|
||||
netName string
|
||||
ctrWide bool
|
||||
}{
|
||||
{
|
||||
name: "generated address default bridge",
|
||||
netName: "bridge",
|
||||
},
|
||||
{
|
||||
name: "configured address default bridge",
|
||||
desiredMAC: "02:42:ac:11:00:42",
|
||||
netName: "bridge",
|
||||
},
|
||||
{
|
||||
name: "generated address custom bridge",
|
||||
netName: "testnet",
|
||||
},
|
||||
{
|
||||
name: "configured address custom bridge",
|
||||
desiredMAC: "02:42:ac:11:00:42",
|
||||
netName: "testnet",
|
||||
},
|
||||
{
|
||||
name: "ctr-wide address default bridge",
|
||||
desiredMAC: "02:42:ac:11:00:42",
|
||||
netName: "bridge",
|
||||
ctrWide: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testcases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
ctx := testutil.StartSpan(ctx, t)
|
||||
|
||||
var copts []client.Opt
|
||||
if tc.ctrWide {
|
||||
copts = append(copts, client.WithVersion("1.43"))
|
||||
}
|
||||
c := d.NewClientT(t, copts...)
|
||||
defer c.Close()
|
||||
|
||||
if tc.netName != "bridge" {
|
||||
const netName = "inspectcfgmac"
|
||||
network.CreateNoError(ctx, t, c, netName,
|
||||
network.WithDriver("bridge"),
|
||||
network.WithOption(bridge.BridgeName, netName))
|
||||
defer network.RemoveNoError(ctx, t, c, netName)
|
||||
}
|
||||
|
||||
const ctrName = "ctr"
|
||||
opts := []func(*container.TestContainerConfig){
|
||||
container.WithName(ctrName),
|
||||
container.WithCmd("top"),
|
||||
container.WithImage("busybox:latest"),
|
||||
}
|
||||
// Don't specify the network name for the bridge network, because that
|
||||
// exercises a different code path (the network name isn't set until the
|
||||
// container starts, until then it's "default").
|
||||
if tc.netName != "bridge" {
|
||||
opts = append(opts, container.WithNetworkMode(tc.netName))
|
||||
}
|
||||
if tc.desiredMAC != "" {
|
||||
if tc.ctrWide {
|
||||
opts = append(opts, container.WithContainerWideMacAddress(tc.desiredMAC))
|
||||
} else {
|
||||
opts = append(opts, container.WithMacAddress(tc.netName, tc.desiredMAC))
|
||||
}
|
||||
}
|
||||
id := container.Create(ctx, t, c, opts...)
|
||||
defer c.ContainerRemove(ctx, id, containertypes.RemoveOptions{
|
||||
Force: true,
|
||||
})
|
||||
|
||||
inspect := container.Inspect(ctx, t, c, ctrName)
|
||||
configMAC := inspect.Config.MacAddress //nolint:staticcheck // ignore SA1019: field is deprecated, but still used on API < v1.44.
|
||||
assert.Check(t, is.DeepEqual(configMAC, tc.desiredMAC))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -124,6 +124,49 @@ func TestPluginInstall(t *testing.T) {
|
||||
assert.NilError(t, err)
|
||||
})
|
||||
|
||||
t.Run("with digest", func(t *testing.T) {
|
||||
ctx := setupTest(t)
|
||||
|
||||
reg := registry.NewV2(t)
|
||||
defer reg.Close()
|
||||
|
||||
name := "test-" + strings.ToLower(t.Name())
|
||||
repo := path.Join(registry.DefaultURL, name+":latest")
|
||||
err := plugin.Create(ctx, client, repo)
|
||||
assert.NilError(t, err)
|
||||
|
||||
rdr, err := client.PluginPush(ctx, repo, "")
|
||||
assert.NilError(t, err)
|
||||
defer rdr.Close()
|
||||
|
||||
buf := &strings.Builder{}
|
||||
assert.NilError(t, err)
|
||||
var digest string
|
||||
assert.NilError(t, jsonmessage.DisplayJSONMessagesStream(rdr, buf, 0, false, func(j jsonmessage.JSONMessage) {
|
||||
if j.Aux != nil {
|
||||
var r types.PushResult
|
||||
assert.NilError(t, json.Unmarshal(*j.Aux, &r))
|
||||
digest = r.Digest
|
||||
}
|
||||
}), buf)
|
||||
|
||||
err = client.PluginRemove(ctx, repo, types.PluginRemoveOptions{Force: true})
|
||||
assert.NilError(t, err)
|
||||
|
||||
rdr, err = client.PluginInstall(ctx, repo, types.PluginInstallOptions{
|
||||
Disabled: true,
|
||||
RemoteRef: repo + "@" + digest,
|
||||
})
|
||||
assert.NilError(t, err)
|
||||
defer rdr.Close()
|
||||
|
||||
_, err = io.Copy(io.Discard, rdr)
|
||||
assert.NilError(t, err)
|
||||
|
||||
_, _, err = client.PluginInspectWithRaw(ctx, repo)
|
||||
assert.NilError(t, err)
|
||||
})
|
||||
|
||||
t.Run("with htpasswd", func(t *testing.T) {
|
||||
ctx := setupTest(t)
|
||||
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
|
||||
//go:build go1.19
|
||||
|
||||
package sliceutil
|
||||
|
||||
func Dedup[T comparable](slice []T) []T {
|
||||
|
||||
24
internal/testutils/archive.go
Normal file
24
internal/testutils/archive.go
Normal file
@@ -0,0 +1,24 @@
|
||||
package testutils
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// UncompressedTarDigest returns the canonical digest of the uncompressed tar stream.
|
||||
func UncompressedTarDigest(compressedTar io.Reader) (digest.Digest, error) {
|
||||
rd, err := archive.DecompressStream(compressedTar)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
defer rd.Close()
|
||||
|
||||
digester := digest.Canonical.Digester()
|
||||
if _, err := io.Copy(digester.Hash(), rd); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return digester.Digest(), nil
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
package specialimage
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
@@ -41,7 +42,10 @@ func Load(ctx context.Context, t *testing.T, apiClient client.APIClient, imageFu
|
||||
t.Fatalf("Failed load: %s", string(respBody))
|
||||
}
|
||||
|
||||
decoder := json.NewDecoder(resp.Body)
|
||||
all, err := io.ReadAll(resp.Body)
|
||||
assert.NilError(t, err)
|
||||
|
||||
decoder := json.NewDecoder(bytes.NewReader(all))
|
||||
for {
|
||||
var msg jsonmessage.JSONMessage
|
||||
err := decoder.Decode(&msg)
|
||||
@@ -61,6 +65,6 @@ func Load(ctx context.Context, t *testing.T, apiClient client.APIClient, imageFu
|
||||
}
|
||||
}
|
||||
|
||||
t.Fatal("failed to read image ID")
|
||||
t.Fatalf("failed to read image ID\n%s", string(all))
|
||||
return ""
|
||||
}
|
||||
|
||||
201
internal/testutils/specialimage/multilayer.go
Normal file
201
internal/testutils/specialimage/multilayer.go
Normal file
@@ -0,0 +1,201 @@
|
||||
package specialimage
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/distribution/reference"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/google/uuid"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/opencontainers/image-spec/specs-go"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
func MultiLayer(dir string) error {
|
||||
const imageRef = "multilayer:latest"
|
||||
|
||||
layer1Desc, err := writeLayerWithOneFile(dir, "foo", []byte("1"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
layer2Desc, err := writeLayerWithOneFile(dir, "bar", []byte("2"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
layer3Desc, err := writeLayerWithOneFile(dir, "hello", []byte("world"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
configDesc, err := writeJsonBlob(dir, ocispec.MediaTypeImageConfig, ocispec.Image{
|
||||
Platform: platforms.DefaultSpec(),
|
||||
Config: ocispec.ImageConfig{
|
||||
Env: []string{"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"},
|
||||
},
|
||||
RootFS: ocispec.RootFS{
|
||||
Type: "layers",
|
||||
DiffIDs: []digest.Digest{layer1Desc.Digest, layer2Desc.Digest, layer3Desc.Digest},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
manifest := ocispec.Manifest{
|
||||
MediaType: ocispec.MediaTypeImageManifest,
|
||||
Config: configDesc,
|
||||
Layers: []ocispec.Descriptor{layer1Desc, layer2Desc, layer3Desc},
|
||||
}
|
||||
|
||||
legacyManifests := []manifestItem{
|
||||
manifestItem{
|
||||
Config: blobPath(configDesc),
|
||||
RepoTags: []string{imageRef},
|
||||
Layers: []string{blobPath(layer1Desc), blobPath(layer2Desc), blobPath(layer3Desc)},
|
||||
},
|
||||
}
|
||||
|
||||
ref, err := reference.ParseNormalizedNamed(imageRef)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return singlePlatformImage(dir, ref, manifest, legacyManifests)
|
||||
}
|
||||
|
||||
// Legacy manifest item (manifests.json)
|
||||
type manifestItem struct {
|
||||
Config string
|
||||
RepoTags []string
|
||||
Layers []string
|
||||
}
|
||||
|
||||
func singlePlatformImage(dir string, ref reference.Named, manifest ocispec.Manifest, legacyManifests []manifestItem) error {
|
||||
manifestDesc, err := writeJsonBlob(dir, ocispec.MediaTypeImageManifest, manifest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if ref != nil {
|
||||
manifestDesc.Annotations = map[string]string{
|
||||
"io.containerd.image.name": ref.String(),
|
||||
}
|
||||
|
||||
if tagged, ok := ref.(reference.Tagged); ok {
|
||||
manifestDesc.Annotations[ocispec.AnnotationRefName] = tagged.Tag()
|
||||
}
|
||||
}
|
||||
|
||||
if err := writeJson(ocispec.Index{
|
||||
Versioned: specs.Versioned{SchemaVersion: 2},
|
||||
MediaType: ocispec.MediaTypeImageIndex,
|
||||
Manifests: []ocispec.Descriptor{manifestDesc},
|
||||
}, filepath.Join(dir, "index.json")); err != nil {
|
||||
return err
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := writeJson(legacyManifests, filepath.Join(dir, "manifest.json")); err != nil {
|
||||
return err
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return os.WriteFile(filepath.Join(dir, "oci-layout"), []byte(`{"imageLayoutVersion": "1.0.0"}`), 0o644)
|
||||
}
|
||||
|
||||
func fileArchive(dir string, name string, content []byte) (io.ReadCloser, error) {
|
||||
tmp, err := os.MkdirTemp("", "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := os.WriteFile(filepath.Join(tmp, name), content, 0o644); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return archive.Tar(tmp, archive.Uncompressed)
|
||||
}
|
||||
|
||||
func writeLayerWithOneFile(dir string, filename string, content []byte) (ocispec.Descriptor, error) {
|
||||
rd, err := fileArchive(dir, filename, content)
|
||||
if err != nil {
|
||||
return ocispec.Descriptor{}, err
|
||||
}
|
||||
|
||||
return writeBlob(dir, ocispec.MediaTypeImageLayer, rd)
|
||||
}
|
||||
|
||||
func writeJsonBlob(dir string, mt string, obj any) (ocispec.Descriptor, error) {
|
||||
b, err := json.Marshal(obj)
|
||||
if err != nil {
|
||||
return ocispec.Descriptor{}, err
|
||||
}
|
||||
|
||||
return writeBlob(dir, mt, bytes.NewReader(b))
|
||||
}
|
||||
|
||||
func writeJson(obj any, path string) error {
|
||||
b, err := json.Marshal(obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return os.WriteFile(path, b, 0o644)
|
||||
}
|
||||
|
||||
func writeBlob(dir string, mt string, rd io.Reader) (_ ocispec.Descriptor, outErr error) {
|
||||
digester := digest.Canonical.Digester()
|
||||
hashTee := io.TeeReader(rd, digester.Hash())
|
||||
|
||||
blobsPath := filepath.Join(dir, "blobs", "sha256")
|
||||
if err := os.MkdirAll(blobsPath, 0o755); err != nil {
|
||||
return ocispec.Descriptor{}, err
|
||||
}
|
||||
|
||||
tmpPath := filepath.Join(blobsPath, uuid.New().String())
|
||||
file, err := os.Create(tmpPath)
|
||||
if err != nil {
|
||||
return ocispec.Descriptor{}, err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if outErr != nil {
|
||||
file.Close()
|
||||
os.Remove(tmpPath)
|
||||
}
|
||||
}()
|
||||
|
||||
if _, err := io.Copy(file, hashTee); err != nil {
|
||||
return ocispec.Descriptor{}, err
|
||||
}
|
||||
|
||||
digest := digester.Digest()
|
||||
|
||||
stat, err := os.Stat(tmpPath)
|
||||
if err != nil {
|
||||
return ocispec.Descriptor{}, err
|
||||
}
|
||||
|
||||
file.Close()
|
||||
if err := os.Rename(tmpPath, filepath.Join(blobsPath, digest.Encoded())); err != nil {
|
||||
return ocispec.Descriptor{}, err
|
||||
}
|
||||
|
||||
return ocispec.Descriptor{
|
||||
MediaType: mt,
|
||||
Digest: digest,
|
||||
Size: stat.Size(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func blobPath(desc ocispec.Descriptor) string {
|
||||
return "blobs/sha256/" + desc.Digest.Encoded()
|
||||
}
|
||||
@@ -190,12 +190,13 @@ func (r *remote) startContainerd() error {
|
||||
runtime.LockOSThread()
|
||||
defer runtime.UnlockOSThread()
|
||||
err := cmd.Start()
|
||||
startedCh <- err
|
||||
if err != nil {
|
||||
startedCh <- err
|
||||
return
|
||||
}
|
||||
|
||||
r.daemonWaitCh = make(chan struct{})
|
||||
startedCh <- nil
|
||||
|
||||
// Reap our child when needed
|
||||
if err := cmd.Wait(); err != nil {
|
||||
r.logger.WithError(err).Errorf("containerd did not exit successfully")
|
||||
|
||||
@@ -2,9 +2,11 @@ package bridge
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
|
||||
"github.com/containerd/log"
|
||||
"github.com/docker/docker/libnetwork/netutils"
|
||||
@@ -47,6 +49,14 @@ func setupDevice(config *networkConfiguration, i *bridgeInterface) error {
|
||||
|
||||
func setupMTU(config *networkConfiguration, i *bridgeInterface) error {
|
||||
if err := i.nlh.LinkSetMTU(i.Link, config.Mtu); err != nil {
|
||||
// Before Linux v4.17, bridges couldn't be configured "manually" with an MTU greater than 1500, although it
|
||||
// could be autoconfigured with such a value when interfaces were added to the bridge. In that case, the
|
||||
// bridge MTU would be set automatically by the kernel to the lowest MTU of all interfaces attached. To keep
|
||||
// compatibility with older kernels, we need to discard -EINVAL.
|
||||
// TODO(aker): remove this once we drop support for CentOS/RHEL 7.
|
||||
if config.Mtu > 1500 && config.Mtu <= 0xFFFF && errors.Is(err, syscall.EINVAL) {
|
||||
return nil
|
||||
}
|
||||
log.G(context.TODO()).WithError(err).Errorf("Failed to set bridge MTU %s via netlink", config.BridgeName)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -3,11 +3,13 @@ package bridge
|
||||
import (
|
||||
"bytes"
|
||||
"net"
|
||||
"syscall"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/internal/testutils/netnsutils"
|
||||
"github.com/docker/docker/libnetwork/netutils"
|
||||
"github.com/vishvananda/netlink"
|
||||
"gotest.tools/v3/assert"
|
||||
)
|
||||
|
||||
func TestSetupNewBridge(t *testing.T) {
|
||||
@@ -92,3 +94,35 @@ func TestGenerateRandomMAC(t *testing.T) {
|
||||
t.Fatalf("Generated twice the same MAC address %v", mac1)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMTUBiggerThan1500(t *testing.T) {
|
||||
defer netnsutils.SetupTestOSContext(t)()
|
||||
|
||||
nh, err := netlink.NewHandle()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer nh.Close()
|
||||
|
||||
config := &networkConfiguration{BridgeName: DefaultBridgeName, Mtu: 9000}
|
||||
br := &bridgeInterface{nlh: nh}
|
||||
|
||||
assert.NilError(t, setupDevice(config, br))
|
||||
assert.NilError(t, setupMTU(config, br))
|
||||
}
|
||||
|
||||
func TestMTUBiggerThan64K(t *testing.T) {
|
||||
defer netnsutils.SetupTestOSContext(t)()
|
||||
|
||||
nh, err := netlink.NewHandle()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer nh.Close()
|
||||
|
||||
config := &networkConfiguration{BridgeName: DefaultBridgeName, Mtu: 65536}
|
||||
br := &bridgeInterface{nlh: nh}
|
||||
|
||||
assert.NilError(t, setupDevice(config, br))
|
||||
assert.ErrorIs(t, setupMTU(config, br), syscall.EINVAL)
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/log"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/docker/libnetwork/iptables"
|
||||
"github.com/docker/docker/libnetwork/types"
|
||||
"github.com/vishvananda/netlink"
|
||||
@@ -408,6 +409,17 @@ func setupInternalNetworkRules(bridgeIface string, addr *net.IPNet, icc, insert
|
||||
var version iptables.IPVersion
|
||||
var inDropRule, outDropRule iptRule
|
||||
|
||||
// Either add or remove the interface from the firewalld zone, if firewalld is running.
|
||||
if insert {
|
||||
if err := iptables.AddInterfaceFirewalld(bridgeIface); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := iptables.DelInterfaceFirewalld(bridgeIface); err != nil && !errdefs.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if addr.IP.To4() != nil {
|
||||
version = iptables.IPv4
|
||||
inDropRule = iptRule{
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
|
||||
//go:build go1.19
|
||||
|
||||
package libnetwork
|
||||
|
||||
import (
|
||||
|
||||
@@ -3,11 +3,15 @@ package ioutils // import "github.com/docker/docker/pkg/ioutils"
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"runtime/debug"
|
||||
"sync/atomic"
|
||||
|
||||
// make sure crypto.SHA256, crypto.sha512 and crypto.SHA384 are registered
|
||||
// TODO remove once https://github.com/opencontainers/go-digest/pull/64 is merged.
|
||||
_ "crypto/sha256"
|
||||
_ "crypto/sha512"
|
||||
|
||||
"github.com/containerd/log"
|
||||
)
|
||||
|
||||
// ReadCloserWrapper wraps an io.Reader, and implements an io.ReadCloser
|
||||
@@ -16,10 +20,15 @@ import (
|
||||
type ReadCloserWrapper struct {
|
||||
io.Reader
|
||||
closer func() error
|
||||
closed atomic.Bool
|
||||
}
|
||||
|
||||
// Close calls back the passed closer function
|
||||
func (r *ReadCloserWrapper) Close() error {
|
||||
if !r.closed.CompareAndSwap(false, true) {
|
||||
subsequentCloseWarn("ReadCloserWrapper")
|
||||
return nil
|
||||
}
|
||||
return r.closer()
|
||||
}
|
||||
|
||||
@@ -87,6 +96,7 @@ type cancelReadCloser struct {
|
||||
cancel func()
|
||||
pR *io.PipeReader // Stream to read from
|
||||
pW *io.PipeWriter
|
||||
closed atomic.Bool
|
||||
}
|
||||
|
||||
// NewCancelReadCloser creates a wrapper that closes the ReadCloser when the
|
||||
@@ -146,6 +156,17 @@ func (p *cancelReadCloser) closeWithError(err error) {
|
||||
// Close closes the wrapper its underlying reader. It will cause
|
||||
// future calls to Read to return io.EOF.
|
||||
func (p *cancelReadCloser) Close() error {
|
||||
if !p.closed.CompareAndSwap(false, true) {
|
||||
subsequentCloseWarn("cancelReadCloser")
|
||||
return nil
|
||||
}
|
||||
p.closeWithError(io.EOF)
|
||||
return nil
|
||||
}
|
||||
|
||||
func subsequentCloseWarn(name string) {
|
||||
log.G(context.TODO()).Error("subsequent attempt to close " + name)
|
||||
if log.GetLevel() >= log.DebugLevel {
|
||||
log.G(context.TODO()).Errorf("stack trace: %s", string(debug.Stack()))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
package ioutils // import "github.com/docker/docker/pkg/ioutils"
|
||||
|
||||
import "io"
|
||||
import (
|
||||
"io"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// NopWriter represents a type which write operation is nop.
|
||||
type NopWriter struct{}
|
||||
@@ -29,9 +32,14 @@ func (f *NopFlusher) Flush() {}
|
||||
type writeCloserWrapper struct {
|
||||
io.Writer
|
||||
closer func() error
|
||||
closed atomic.Bool
|
||||
}
|
||||
|
||||
func (r *writeCloserWrapper) Close() error {
|
||||
if !r.closed.CompareAndSwap(false, true) {
|
||||
subsequentCloseWarn("WriteCloserWrapper")
|
||||
return nil
|
||||
}
|
||||
return r.closer()
|
||||
}
|
||||
|
||||
|
||||
@@ -200,8 +200,13 @@ func withFetchProgress(cs content.Store, out progress.Output, ref reference.Name
|
||||
switch desc.MediaType {
|
||||
case ocispec.MediaTypeImageManifest, images.MediaTypeDockerSchema2Manifest:
|
||||
tn := reference.TagNameOnly(ref)
|
||||
tagged := tn.(reference.Tagged)
|
||||
progress.Messagef(out, tagged.Tag(), "Pulling from %s", reference.FamiliarName(ref))
|
||||
var tagOrDigest string
|
||||
if tagged, ok := tn.(reference.Tagged); ok {
|
||||
tagOrDigest = tagged.Tag()
|
||||
} else {
|
||||
tagOrDigest = tn.String()
|
||||
}
|
||||
progress.Messagef(out, tagOrDigest, "Pulling from %s", reference.FamiliarName(ref))
|
||||
progress.Messagef(out, "", "Digest: %s", desc.Digest.String())
|
||||
return nil, nil
|
||||
case
|
||||
|
||||
@@ -64,6 +64,7 @@
|
||||
"alarm",
|
||||
"bind",
|
||||
"brk",
|
||||
"cachestat",
|
||||
"capget",
|
||||
"capset",
|
||||
"chdir",
|
||||
@@ -109,6 +110,7 @@
|
||||
"fchdir",
|
||||
"fchmod",
|
||||
"fchmodat",
|
||||
"fchmodat2",
|
||||
"fchown",
|
||||
"fchown32",
|
||||
"fchownat",
|
||||
@@ -130,8 +132,11 @@
|
||||
"ftruncate",
|
||||
"ftruncate64",
|
||||
"futex",
|
||||
"futex_requeue",
|
||||
"futex_time64",
|
||||
"futex_wait",
|
||||
"futex_waitv",
|
||||
"futex_wake",
|
||||
"futimesat",
|
||||
"getcpu",
|
||||
"getcwd",
|
||||
@@ -203,6 +208,7 @@
|
||||
"lstat",
|
||||
"lstat64",
|
||||
"madvise",
|
||||
"map_shadow_stack",
|
||||
"membarrier",
|
||||
"memfd_create",
|
||||
"memfd_secret",
|
||||
@@ -780,7 +786,8 @@
|
||||
"names": [
|
||||
"get_mempolicy",
|
||||
"mbind",
|
||||
"set_mempolicy"
|
||||
"set_mempolicy",
|
||||
"set_mempolicy_home_node"
|
||||
],
|
||||
"action": "SCMP_ACT_ALLOW",
|
||||
"includes": {
|
||||
|
||||
@@ -56,6 +56,7 @@ func DefaultProfile() *Seccomp {
|
||||
"alarm",
|
||||
"bind",
|
||||
"brk",
|
||||
"cachestat", // kernel v6.5, libseccomp v2.5.5
|
||||
"capget",
|
||||
"capset",
|
||||
"chdir",
|
||||
@@ -101,6 +102,7 @@ func DefaultProfile() *Seccomp {
|
||||
"fchdir",
|
||||
"fchmod",
|
||||
"fchmodat",
|
||||
"fchmodat2", // kernel v6.6, libseccomp v2.5.5
|
||||
"fchown",
|
||||
"fchown32",
|
||||
"fchownat",
|
||||
@@ -122,8 +124,11 @@ func DefaultProfile() *Seccomp {
|
||||
"ftruncate",
|
||||
"ftruncate64",
|
||||
"futex",
|
||||
"futex_requeue", // kernel v6.7, libseccomp v2.5.5
|
||||
"futex_time64",
|
||||
"futex_wait", // kernel v6.7, libseccomp v2.5.5
|
||||
"futex_waitv",
|
||||
"futex_wake", // kernel v6.7, libseccomp v2.5.5
|
||||
"futimesat",
|
||||
"getcpu",
|
||||
"getcwd",
|
||||
@@ -195,6 +200,7 @@ func DefaultProfile() *Seccomp {
|
||||
"lstat",
|
||||
"lstat64",
|
||||
"madvise",
|
||||
"map_shadow_stack", // kernel v6.6, libseccomp v2.5.5
|
||||
"membarrier",
|
||||
"memfd_create",
|
||||
"memfd_secret",
|
||||
@@ -768,6 +774,7 @@ func DefaultProfile() *Seccomp {
|
||||
"get_mempolicy",
|
||||
"mbind",
|
||||
"set_mempolicy",
|
||||
"set_mempolicy_home_node", // kernel v5.17, libseccomp v2.5.4
|
||||
},
|
||||
Action: specs.ActAllow,
|
||||
},
|
||||
|
||||
@@ -338,6 +338,17 @@ func ScanLogsMatchString(contains string) func(string) bool {
|
||||
}
|
||||
}
|
||||
|
||||
// ScanLogsMatchCount returns a function that can be used to scan the daemon logs until the passed in matcher function matches `count` times
|
||||
func ScanLogsMatchCount(f func(string) bool, count int) func(string) bool {
|
||||
matched := 0
|
||||
return func(line string) bool {
|
||||
if f(line) {
|
||||
matched++
|
||||
}
|
||||
return matched == count
|
||||
}
|
||||
}
|
||||
|
||||
// ScanLogsMatchAll returns a function that can be used to scan the daemon logs until *all* the passed in strings are matched
|
||||
func ScanLogsMatchAll(contains ...string) func(string) bool {
|
||||
matched := make(map[string]bool)
|
||||
|
||||
@@ -9,7 +9,7 @@ go 1.20
|
||||
require (
|
||||
cloud.google.com/go/compute/metadata v0.2.3
|
||||
cloud.google.com/go/logging v1.7.0
|
||||
code.cloudfoundry.org/clock v1.0.0
|
||||
code.cloudfoundry.org/clock v1.1.0
|
||||
dario.cat/mergo v1.0.0
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1
|
||||
@@ -65,7 +65,7 @@ require (
|
||||
github.com/moby/locker v1.0.1
|
||||
github.com/moby/patternmatcher v0.6.0
|
||||
github.com/moby/pubsub v1.0.0
|
||||
github.com/moby/swarmkit/v2 v2.0.0-20230911190601-f082dd7a0cee
|
||||
github.com/moby/swarmkit/v2 v2.0.0-20240125134710-dcda100a8261
|
||||
github.com/moby/sys/mount v0.3.3
|
||||
github.com/moby/sys/mountinfo v0.7.1
|
||||
github.com/moby/sys/sequential v0.5.0
|
||||
|
||||
16
vendor.sum
16
vendor.sum
@@ -46,8 +46,8 @@ cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiy
|
||||
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
|
||||
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
|
||||
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
|
||||
code.cloudfoundry.org/clock v1.0.0 h1:kFXWQM4bxYvdBw2X8BbBeXwQNgfoWv1vqAk2ZZyBN2o=
|
||||
code.cloudfoundry.org/clock v1.0.0/go.mod h1:QD9Lzhd/ux6eNQVUDVRJX/RKTigpewimNYBi7ivZKY8=
|
||||
code.cloudfoundry.org/clock v1.1.0 h1:XLzC6W3Ah/Y7ht1rmZ6+QfPdt1iGWEAAtIZXgiaj57c=
|
||||
code.cloudfoundry.org/clock v1.1.0/go.mod h1:yA3fxddT9RINQL2XHS7PS+OXxKCGhfrZmlNUCIM6AKo=
|
||||
code.gitea.io/sdk/gitea v0.12.0/go.mod h1:z3uwDV/b9Ls47NGukYM9XhnHtqPh/J+t40lsUrR6JDY=
|
||||
contrib.go.opencensus.io/exporter/aws v0.0.0-20181029163544-2befc13012d0/go.mod h1:uu1P0UCM/6RbsMrgPa98ll8ZcHM858i/AD06a9aLRCA=
|
||||
contrib.go.opencensus.io/exporter/ocagent v0.5.0/go.mod h1:ImxhfLRpxoYiSq891pBrLVhN+qmP8BTVvdH2YLs7Gl0=
|
||||
@@ -505,6 +505,7 @@ github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG
|
||||
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
|
||||
github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4=
|
||||
github.com/go-toolsmith/astcopy v1.0.0/go.mod h1:vrgyG+5Bxrnz4MZWPF+pI4R8h3qKRjjyvV/DSez4WVQ=
|
||||
github.com/go-toolsmith/astequal v0.0.0-20180903214952-dcb477bfacd6/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY=
|
||||
@@ -644,6 +645,7 @@ github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hf
|
||||
github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 h1:yAJXTCF9TqKcTiHJAE8dj7HMvPfh66eeA2JYW7eFpSE=
|
||||
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/rpmpack v0.0.0-20191226140753-aa36bfddb3a0/go.mod h1:RaTPr0KUf2K7fnZYLNDrr8rxAamWs3iNywJLtQ2AzBg=
|
||||
@@ -910,8 +912,8 @@ github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkV
|
||||
github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc=
|
||||
github.com/moby/pubsub v1.0.0 h1:jkp/imWsmJz2f6LyFsk7EkVeN2HxR/HTTOY8kHrsxfA=
|
||||
github.com/moby/pubsub v1.0.0/go.mod h1:bXSO+3h5MNXXCaEG+6/NlAIk7MMZbySZlnB+cUQhKKc=
|
||||
github.com/moby/swarmkit/v2 v2.0.0-20230911190601-f082dd7a0cee h1:T7Mz81wbNduphN0195OfYadKSs/uQKr4zrKtsCSfQTc=
|
||||
github.com/moby/swarmkit/v2 v2.0.0-20230911190601-f082dd7a0cee/go.mod h1:z90qBxgz/fNu4YjmMHi0hyPDKDitpOWzOJnvv9+KB2U=
|
||||
github.com/moby/swarmkit/v2 v2.0.0-20240125134710-dcda100a8261 h1:mjLf2jYrqtIS4LvLzg0gNyJR4rMXS4X5Bg1A4hOhVMs=
|
||||
github.com/moby/swarmkit/v2 v2.0.0-20240125134710-dcda100a8261/go.mod h1:oRJU1d0hrkkwCtouwfQGcIAKcVEkclMYoLWocqrg6gI=
|
||||
github.com/moby/sys/mount v0.1.0/go.mod h1:FVQFLDRWwyBjDTBNQXDlWnSFREqOo3OKX9aqhmeoo74=
|
||||
github.com/moby/sys/mount v0.1.1/go.mod h1:FVQFLDRWwyBjDTBNQXDlWnSFREqOo3OKX9aqhmeoo74=
|
||||
github.com/moby/sys/mount v0.3.3 h1:fX1SVkXFJ47XWDoeFW4Sq7PdQJnV2QIDZAqjNqgEjUs=
|
||||
@@ -968,10 +970,10 @@ github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+
|
||||
github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg=
|
||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||
github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc=
|
||||
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
|
||||
github.com/onsi/ginkgo/v2 v2.1.3 h1:e/3Cwtogj0HA+25nMP1jCMDIf8RtRYbGwGGuBIFztkc=
|
||||
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
|
||||
github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
|
||||
github.com/onsi/ginkgo/v2 v2.9.2 h1:BA2GMJOtfGAfagzYtrAlufIP0lq6QERkFmHLMLPwFSU=
|
||||
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
@@ -980,8 +982,8 @@ github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7J
|
||||
github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
|
||||
github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/onsi/gomega v1.17.0 h1:9Luw4uT5HTjHTN8+aNcSThgH1vdXnmdJ8xIfZ4wyTRE=
|
||||
github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
|
||||
github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE=
|
||||
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
|
||||
github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||
|
||||
8
vendor/code.cloudfoundry.org/clock/README.md
generated
vendored
8
vendor/code.cloudfoundry.org/clock/README.md
generated
vendored
@@ -3,3 +3,11 @@
|
||||
**Note**: This repository should be imported as `code.cloudfoundry.org/clock`.
|
||||
|
||||
Provides a `Clock` interface, useful for injecting time dependencies in tests.
|
||||
|
||||
## Reporting issues and requesting features
|
||||
|
||||
Please report all issues and feature requests in [cloudfoundry/diego-release](https://github.com/cloudfoundry/diego-release/issues).
|
||||
|
||||
## Contributing
|
||||
|
||||
For tagging please use the semver compatible version format e.g. `v1.0.0`.
|
||||
|
||||
2
vendor/github.com/moby/swarmkit/v2/api/genericresource/validate.go
generated
vendored
2
vendor/github.com/moby/swarmkit/v2/api/genericresource/validate.go
generated
vendored
@@ -63,7 +63,7 @@ func HasResource(res *api.GenericResource, resources []*api.GenericResource) boo
|
||||
return false
|
||||
}
|
||||
|
||||
if res.GetDiscreteResourceSpec().Value < rtype.DiscreteResourceSpec.Value {
|
||||
if res.GetDiscreteResourceSpec().Value > rtype.DiscreteResourceSpec.Value {
|
||||
return false
|
||||
}
|
||||
|
||||
|
||||
6
vendor/modules.txt
vendored
6
vendor/modules.txt
vendored
@@ -18,8 +18,8 @@ cloud.google.com/go/logging/internal
|
||||
cloud.google.com/go/longrunning
|
||||
cloud.google.com/go/longrunning/autogen
|
||||
cloud.google.com/go/longrunning/autogen/longrunningpb
|
||||
# code.cloudfoundry.org/clock v1.0.0
|
||||
## explicit
|
||||
# code.cloudfoundry.org/clock v1.1.0
|
||||
## explicit; go 1.20
|
||||
code.cloudfoundry.org/clock
|
||||
# dario.cat/mergo v1.0.0
|
||||
## explicit; go 1.13
|
||||
@@ -846,7 +846,7 @@ github.com/moby/patternmatcher/ignorefile
|
||||
# github.com/moby/pubsub v1.0.0
|
||||
## explicit; go 1.19
|
||||
github.com/moby/pubsub
|
||||
# github.com/moby/swarmkit/v2 v2.0.0-20230911190601-f082dd7a0cee
|
||||
# github.com/moby/swarmkit/v2 v2.0.0-20240125134710-dcda100a8261
|
||||
## explicit; go 1.18
|
||||
github.com/moby/swarmkit/v2/agent
|
||||
github.com/moby/swarmkit/v2/agent/configs
|
||||
|
||||
Reference in New Issue
Block a user