mirror of
https://github.com/moby/moby.git
synced 2026-01-12 19:21:41 +00:00
Compare commits
156 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f415784c1a | ||
|
|
4ef26e4c35 | ||
|
|
2b409606ac | ||
|
|
00fbff3423 | ||
|
|
92df858a5b | ||
|
|
00f9f839c6 | ||
|
|
acd2546285 | ||
|
|
d334795adb | ||
|
|
71967c3a82 | ||
|
|
f06fd6d3c9 | ||
|
|
ce61e5777b | ||
|
|
26d6c35b1b | ||
|
|
a14b16e1f3 | ||
|
|
3ea40f50ef | ||
|
|
7c47f6d831 | ||
|
|
0847330073 | ||
|
|
b4c0ebf6d4 | ||
|
|
00f6814357 | ||
|
|
165516eb47 | ||
|
|
f099e911bd | ||
|
|
bace1b8a3b | ||
|
|
f9e54290b5 | ||
|
|
fc3df55230 | ||
|
|
b22872af60 | ||
|
|
c7e17ae65d | ||
|
|
d60c71a9d7 | ||
|
|
ad54b8f9ce | ||
|
|
8075689abd | ||
|
|
480dfaef06 | ||
|
|
e604d70e22 | ||
|
|
b6b13b20af | ||
|
|
b539aea3cd | ||
|
|
e43e322a3b | ||
|
|
89ea2469df | ||
|
|
f69e64ab12 | ||
|
|
67fbdf3c28 | ||
|
|
33a7e83e6d | ||
|
|
684b2688d2 | ||
|
|
b61930cc82 | ||
|
|
1db0510301 | ||
|
|
9ff06c515c | ||
|
|
8f0a803fc6 | ||
|
|
7d8c7c21f2 | ||
|
|
9cd4021dae | ||
|
|
4d6c4e44d7 | ||
|
|
e5b652add3 | ||
|
|
ca41647695 | ||
|
|
199b2496e7 | ||
|
|
65ec8c89a6 | ||
|
|
c447682dee | ||
|
|
a749f055d9 | ||
|
|
5a12eaf718 | ||
|
|
59f062b233 | ||
|
|
842a9c522a | ||
|
|
651b2feb27 | ||
|
|
a43c1eef18 | ||
|
|
728de37428 | ||
|
|
5bf90ded7a | ||
|
|
51d13163c5 | ||
|
|
9ca52f5fb9 | ||
|
|
ec820662de | ||
|
|
f3f1e091a8 | ||
|
|
16dc168388 | ||
|
|
12aaf29287 | ||
|
|
ca5250dc9f | ||
|
|
c912e5278b | ||
|
|
e9ed499888 | ||
|
|
6856a17655 | ||
|
|
31f4c5914e | ||
|
|
35f7b1d7c9 | ||
|
|
743a0df9ec | ||
|
|
bacba3726f | ||
|
|
f93d90cee3 | ||
|
|
00232ac981 | ||
|
|
88d0ed889d | ||
|
|
32c814a85f | ||
|
|
fb8e5d85f6 | ||
|
|
fb6695de75 | ||
|
|
089d70f3c8 | ||
|
|
2710c239df | ||
|
|
7982904677 | ||
|
|
fbffa88b76 | ||
|
|
41f080df25 | ||
|
|
c64e8a8117 | ||
|
|
0316eaaa23 | ||
|
|
270166cbe5 | ||
|
|
a012739c2c | ||
|
|
e53cf6bc02 | ||
|
|
4c5a99d08c | ||
|
|
f2126bfc7f | ||
|
|
95aff4f75c | ||
|
|
4d168615cc | ||
|
|
614ecc8201 | ||
|
|
26a318189b | ||
|
|
4e2e8fe181 | ||
|
|
4f2d0e656b | ||
|
|
8b44d5e80a | ||
|
|
4749b46391 | ||
|
|
3acb76ef2f | ||
|
|
d4f1fb1db2 | ||
|
|
b7346c5fb5 | ||
|
|
545e84c7ff | ||
|
|
bc97de45b4 | ||
|
|
e78b2bdb84 | ||
|
|
5d123a0ef8 | ||
|
|
9c1b0fb58f | ||
|
|
5c192650eb | ||
|
|
00d8bed6cf | ||
|
|
c31faaed8c | ||
|
|
e5e7d89092 | ||
|
|
f19a4f7d9e | ||
|
|
74d2c7fd53 | ||
|
|
cad60979a1 | ||
|
|
7656928264 | ||
|
|
c96356750f | ||
|
|
c231772a5c | ||
|
|
6ac44a4973 | ||
|
|
da1d6a4cac | ||
|
|
f91f92463d | ||
|
|
b1bcc6be66 | ||
|
|
341bb5120f | ||
|
|
87b232ecbc | ||
|
|
b88ec510b5 | ||
|
|
c1992d0de0 | ||
|
|
06ef6f04fd | ||
|
|
9dfebd37c2 | ||
|
|
14697bedd9 | ||
|
|
881e809b0a | ||
|
|
897614add1 | ||
|
|
af633391ca | ||
|
|
6b3afa4c2d | ||
|
|
17c33f93ce | ||
|
|
8de9fb4d4b | ||
|
|
b67e668c0f | ||
|
|
56885c08ba | ||
|
|
3ea3d5c759 | ||
|
|
5e024a24a0 | ||
|
|
fa10b16add | ||
|
|
b3b6bc9770 | ||
|
|
b8551186f7 | ||
|
|
3bd7323b96 | ||
|
|
ddc689206b | ||
|
|
ebbb4cad63 | ||
|
|
a926bec8fc | ||
|
|
89a48b65fc | ||
|
|
74360f99d7 | ||
|
|
aae4029600 | ||
|
|
822b2b6a1d | ||
|
|
a2802d0746 | ||
|
|
9281aea6ce | ||
|
|
0e655eaff2 | ||
|
|
b1d6fd957d | ||
|
|
7540f88434 | ||
|
|
19dd685407 | ||
|
|
f8d9617c43 | ||
|
|
bec5e8eed1 |
2
.github/workflows/.dco.yml
vendored
2
.github/workflows/.dco.yml
vendored
@@ -20,7 +20,7 @@ env:
|
||||
|
||||
jobs:
|
||||
run:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-24.04
|
||||
timeout-minutes: 10 # guardrails timeout for the whole job
|
||||
steps:
|
||||
-
|
||||
|
||||
2
.github/workflows/.test-prepare.yml
vendored
2
.github/workflows/.test-prepare.yml
vendored
@@ -21,7 +21,7 @@ on:
|
||||
|
||||
jobs:
|
||||
run:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-24.04
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
outputs:
|
||||
matrix: ${{ steps.set.outputs.matrix }}
|
||||
|
||||
68
.github/workflows/.test.yml
vendored
68
.github/workflows/.test.yml
vendored
@@ -21,7 +21,7 @@ on:
|
||||
default: "graphdriver"
|
||||
|
||||
env:
|
||||
GO_VERSION: "1.22.10"
|
||||
GO_VERSION: "1.23.9"
|
||||
GOTESTLIST_VERSION: v0.3.1
|
||||
TESTSTAT_VERSION: v0.1.25
|
||||
ITG_CLI_MATRIX_SIZE: 6
|
||||
@@ -31,7 +31,7 @@ env:
|
||||
|
||||
jobs:
|
||||
unit:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-24.04
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
continue-on-error: ${{ github.event_name != 'pull_request' }}
|
||||
steps:
|
||||
@@ -41,12 +41,31 @@ jobs:
|
||||
-
|
||||
name: Set up runner
|
||||
uses: ./.github/actions/setup-runner
|
||||
-
|
||||
name: Prepare
|
||||
run: |
|
||||
CACHE_DEV_SCOPE=dev
|
||||
if [[ "${{ matrix.mode }}" == *"rootless"* ]]; then
|
||||
# In rootless mode, tests will run in the host's namspace not the rootlesskit
|
||||
# namespace. So, probably no different to non-rootless unit tests and can be
|
||||
# removed from the test matrix.
|
||||
echo "DOCKER_ROOTLESS=1" >> $GITHUB_ENV
|
||||
fi
|
||||
if [[ "${{ matrix.mode }}" == *"firewalld"* ]]; then
|
||||
echo "FIREWALLD=true" >> $GITHUB_ENV
|
||||
CACHE_DEV_SCOPE="${CACHE_DEV_SCOPE}firewalld"
|
||||
fi
|
||||
if [[ "${{ matrix.mode }}" == *"systemd"* ]]; then
|
||||
echo "SYSTEMD=true" >> $GITHUB_ENV
|
||||
CACHE_DEV_SCOPE="${CACHE_DEV_SCOPE}systemd"
|
||||
fi
|
||||
echo "CACHE_DEV_SCOPE=${CACHE_DEV_SCOPE}" >> $GITHUB_ENV
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
-
|
||||
name: Build dev image
|
||||
uses: docker/bake-action@v4
|
||||
uses: docker/bake-action@v6
|
||||
with:
|
||||
targets: dev
|
||||
set: |
|
||||
@@ -82,7 +101,7 @@ jobs:
|
||||
retention-days: 1
|
||||
|
||||
unit-report:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-24.04
|
||||
timeout-minutes: 10
|
||||
continue-on-error: ${{ github.event_name != 'pull_request' }}
|
||||
if: always()
|
||||
@@ -110,7 +129,7 @@ jobs:
|
||||
find /tmp/reports -type f -name '*-go-test-report.json' -exec teststat -markdown {} \+ >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
docker-py:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-24.04
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
continue-on-error: ${{ github.event_name != 'pull_request' }}
|
||||
steps:
|
||||
@@ -128,7 +147,7 @@ jobs:
|
||||
uses: docker/setup-buildx-action@v3
|
||||
-
|
||||
name: Build dev image
|
||||
uses: docker/bake-action@v4
|
||||
uses: docker/bake-action@v6
|
||||
with:
|
||||
targets: dev
|
||||
set: |
|
||||
@@ -136,7 +155,7 @@ jobs:
|
||||
-
|
||||
name: Test
|
||||
run: |
|
||||
make -o build test-docker-py
|
||||
make TEST_SKIP_INTEGRATION_CLI=1 -o build test-docker-py
|
||||
-
|
||||
name: Prepare reports
|
||||
if: always()
|
||||
@@ -163,7 +182,7 @@ jobs:
|
||||
retention-days: 1
|
||||
|
||||
integration-flaky:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-24.04
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
continue-on-error: ${{ github.event_name != 'pull_request' }}
|
||||
steps:
|
||||
@@ -178,7 +197,7 @@ jobs:
|
||||
uses: docker/setup-buildx-action@v3
|
||||
-
|
||||
name: Build dev image
|
||||
uses: docker/bake-action@v4
|
||||
uses: docker/bake-action@v6
|
||||
with:
|
||||
targets: dev
|
||||
set: |
|
||||
@@ -198,13 +217,17 @@ jobs:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os:
|
||||
- ubuntu-20.04
|
||||
- ubuntu-22.04
|
||||
- ubuntu-24.04
|
||||
mode:
|
||||
- ""
|
||||
- rootless
|
||||
- systemd
|
||||
- firewalld
|
||||
#- rootless-systemd FIXME: https://github.com/moby/moby/issues/44084
|
||||
exclude:
|
||||
- os: ubuntu-24.04 # FIXME: https://github.com/moby/moby/pull/49579#issuecomment-2698622223
|
||||
mode: rootless
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
@@ -226,13 +249,17 @@ jobs:
|
||||
echo "SYSTEMD=true" >> $GITHUB_ENV
|
||||
CACHE_DEV_SCOPE="${CACHE_DEV_SCOPE}systemd"
|
||||
fi
|
||||
if [[ "${{ matrix.mode }}" == *"firewalld"* ]]; then
|
||||
echo "FIREWALLD=true" >> $GITHUB_ENV
|
||||
CACHE_DEV_SCOPE="${CACHE_DEV_SCOPE}firewalld"
|
||||
fi
|
||||
echo "CACHE_DEV_SCOPE=${CACHE_DEV_SCOPE}" >> $GITHUB_ENV
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
-
|
||||
name: Build dev image
|
||||
uses: docker/bake-action@v4
|
||||
uses: docker/bake-action@v6
|
||||
with:
|
||||
targets: dev
|
||||
set: |
|
||||
@@ -285,7 +312,7 @@ jobs:
|
||||
retention-days: 1
|
||||
|
||||
integration-report:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-24.04
|
||||
timeout-minutes: 10
|
||||
continue-on-error: ${{ github.event_name != 'pull_request' }}
|
||||
if: always()
|
||||
@@ -314,7 +341,7 @@ jobs:
|
||||
find /tmp/reports -type f -name '*-go-test-report.json' -exec teststat -markdown {} \+ >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
integration-cli-prepare:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-24.04
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
continue-on-error: ${{ github.event_name != 'pull_request' }}
|
||||
outputs:
|
||||
@@ -350,7 +377,7 @@ jobs:
|
||||
echo ${{ steps.tests.outputs.matrix }}
|
||||
|
||||
integration-cli:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-24.04
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
continue-on-error: ${{ github.event_name != 'pull_request' }}
|
||||
needs:
|
||||
@@ -369,12 +396,21 @@ jobs:
|
||||
-
|
||||
name: Set up tracing
|
||||
uses: ./.github/actions/setup-tracing
|
||||
-
|
||||
name: Prepare
|
||||
run: |
|
||||
CACHE_DEV_SCOPE=dev
|
||||
if [[ "${{ matrix.mode }}" == *"firewalld"* ]]; then
|
||||
echo "FIREWALLD=true" >> $GITHUB_ENV
|
||||
CACHE_DEV_SCOPE="${CACHE_DEV_SCOPE}firewalld"
|
||||
fi
|
||||
echo "CACHE_DEV_SCOPE=${CACHE_DEV_SCOPE}" >> $GITHUB_ENV
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
-
|
||||
name: Build dev image
|
||||
uses: docker/bake-action@v4
|
||||
uses: docker/bake-action@v6
|
||||
with:
|
||||
targets: dev
|
||||
set: |
|
||||
@@ -426,7 +462,7 @@ jobs:
|
||||
retention-days: 1
|
||||
|
||||
integration-cli-report:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-24.04
|
||||
timeout-minutes: 10
|
||||
continue-on-error: ${{ github.event_name != 'pull_request' }}
|
||||
if: always()
|
||||
|
||||
16
.github/workflows/.windows.yml
vendored
16
.github/workflows/.windows.yml
vendored
@@ -28,12 +28,12 @@ on:
|
||||
default: false
|
||||
|
||||
env:
|
||||
GO_VERSION: "1.22.10"
|
||||
GO_VERSION: "1.23.9"
|
||||
GOTESTLIST_VERSION: v0.3.1
|
||||
TESTSTAT_VERSION: v0.1.25
|
||||
WINDOWS_BASE_IMAGE: mcr.microsoft.com/windows/servercore
|
||||
WINDOWS_BASE_TAG_2019: ltsc2019
|
||||
WINDOWS_BASE_TAG_2022: ltsc2022
|
||||
WINDOWS_BASE_TAG_2025: ltsc2025
|
||||
TEST_IMAGE_NAME: moby:test
|
||||
TEST_CTN_NAME: moby
|
||||
DOCKER_BUILDKIT: 0
|
||||
@@ -65,8 +65,8 @@ jobs:
|
||||
run: |
|
||||
New-Item -ItemType "directory" -Path "${{ github.workspace }}\go-build"
|
||||
New-Item -ItemType "directory" -Path "${{ github.workspace }}\go\pkg\mod"
|
||||
If ("${{ inputs.os }}" -eq "windows-2019") {
|
||||
echo "WINDOWS_BASE_IMAGE_TAG=${{ env.WINDOWS_BASE_TAG_2019 }}" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf-8 -Append
|
||||
If ("${{ inputs.os }}" -eq "windows-2025") {
|
||||
echo "WINDOWS_BASE_IMAGE_TAG=${{ env.WINDOWS_BASE_TAG_2025 }}" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf-8 -Append
|
||||
} ElseIf ("${{ inputs.os }}" -eq "windows-2022") {
|
||||
echo "WINDOWS_BASE_IMAGE_TAG=${{ env.WINDOWS_BASE_TAG_2022 }}" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf-8 -Append
|
||||
}
|
||||
@@ -145,8 +145,8 @@ jobs:
|
||||
New-Item -ItemType "directory" -Path "${{ github.workspace }}\go-build"
|
||||
New-Item -ItemType "directory" -Path "${{ github.workspace }}\go\pkg\mod"
|
||||
New-Item -ItemType "directory" -Path "bundles"
|
||||
If ("${{ inputs.os }}" -eq "windows-2019") {
|
||||
echo "WINDOWS_BASE_IMAGE_TAG=${{ env.WINDOWS_BASE_TAG_2019 }}" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf-8 -Append
|
||||
If ("${{ inputs.os }}" -eq "windows-2025") {
|
||||
echo "WINDOWS_BASE_IMAGE_TAG=${{ env.WINDOWS_BASE_TAG_2025 }}" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf-8 -Append
|
||||
} ElseIf ("${{ inputs.os }}" -eq "windows-2022") {
|
||||
echo "WINDOWS_BASE_IMAGE_TAG=${{ env.WINDOWS_BASE_TAG_2022 }}" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf-8 -Append
|
||||
}
|
||||
@@ -319,8 +319,8 @@ jobs:
|
||||
name: Init
|
||||
run: |
|
||||
New-Item -ItemType "directory" -Path "bundles"
|
||||
If ("${{ inputs.os }}" -eq "windows-2019") {
|
||||
echo "WINDOWS_BASE_IMAGE_TAG=${{ env.WINDOWS_BASE_TAG_2019 }}" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf-8 -Append
|
||||
If ("${{ inputs.os }}" -eq "windows-2025") {
|
||||
echo "WINDOWS_BASE_IMAGE_TAG=${{ env.WINDOWS_BASE_TAG_2025 }}" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf-8 -Append
|
||||
} ElseIf ("${{ inputs.os }}" -eq "windows-2022") {
|
||||
echo "WINDOWS_BASE_IMAGE_TAG=${{ env.WINDOWS_BASE_TAG_2022 }}" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf-8 -Append
|
||||
}
|
||||
|
||||
275
.github/workflows/arm64.yml
vendored
Normal file
275
.github/workflows/arm64.yml
vendored
Normal file
@@ -0,0 +1,275 @@
|
||||
name: arm64
|
||||
|
||||
# Default to 'contents: read', which grants actions to read commits.
|
||||
#
|
||||
# If any permission is set, any permission not included in the list is
|
||||
# implicitly set to "none".
|
||||
#
|
||||
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- 'master'
|
||||
- '[0-9]+.[0-9]+'
|
||||
pull_request:
|
||||
|
||||
env:
|
||||
GO_VERSION: "1.23.9"
|
||||
TESTSTAT_VERSION: v0.1.25
|
||||
DESTDIR: ./build
|
||||
SETUP_BUILDX_VERSION: edge
|
||||
SETUP_BUILDKIT_IMAGE: moby/buildkit:latest
|
||||
DOCKER_EXPERIMENTAL: 1
|
||||
|
||||
jobs:
|
||||
validate-dco:
|
||||
uses: ./.github/workflows/.dco.yml
|
||||
|
||||
build:
|
||||
runs-on: ubuntu-24.04-arm
|
||||
timeout-minutes: 20 # guardrails timeout for the whole job
|
||||
needs:
|
||||
- validate-dco
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
target:
|
||||
- binary
|
||||
- dynbinary
|
||||
steps:
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||
buildkitd-flags: --debug
|
||||
-
|
||||
name: Build
|
||||
uses: docker/bake-action@v6
|
||||
with:
|
||||
targets: ${{ matrix.target }}
|
||||
-
|
||||
name: List artifacts
|
||||
run: |
|
||||
tree -nh ${{ env.DESTDIR }}
|
||||
-
|
||||
name: Check artifacts
|
||||
run: |
|
||||
find ${{ env.DESTDIR }} -type f -exec file -e ascii -- {} +
|
||||
|
||||
build-dev:
|
||||
runs-on: ubuntu-24.04-arm
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
needs:
|
||||
- validate-dco
|
||||
steps:
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||
buildkitd-flags: --debug
|
||||
-
|
||||
name: Build dev image
|
||||
uses: docker/bake-action@v6
|
||||
with:
|
||||
targets: dev
|
||||
set: |
|
||||
*.cache-from=type=gha,scope=dev-arm64
|
||||
*.cache-to=type=gha,scope=dev-arm64
|
||||
*.output=type=cacheonly
|
||||
|
||||
test-unit:
|
||||
runs-on: ubuntu-24.04-arm
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
needs:
|
||||
- build-dev
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
-
|
||||
name: Set up runner
|
||||
uses: ./.github/actions/setup-runner
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||
buildkitd-flags: --debug
|
||||
-
|
||||
name: Build dev image
|
||||
uses: docker/bake-action@v6
|
||||
with:
|
||||
targets: dev
|
||||
set: |
|
||||
dev.cache-from=type=gha,scope=dev-arm64
|
||||
-
|
||||
name: Test
|
||||
run: |
|
||||
make -o build test-unit
|
||||
-
|
||||
name: Prepare reports
|
||||
if: always()
|
||||
run: |
|
||||
mkdir -p bundles /tmp/reports
|
||||
find bundles -path '*/root/*overlay2' -prune -o -type f \( -name '*-report.json' -o -name '*.log' -o -name '*.out' -o -name '*.prof' -o -name '*-report.xml' \) -print | xargs sudo tar -czf /tmp/reports.tar.gz
|
||||
tar -xzf /tmp/reports.tar.gz -C /tmp/reports
|
||||
sudo chown -R $(id -u):$(id -g) /tmp/reports
|
||||
tree -nh /tmp/reports
|
||||
-
|
||||
name: Send to Codecov
|
||||
uses: codecov/codecov-action@v4
|
||||
with:
|
||||
directory: ./bundles
|
||||
env_vars: RUNNER_OS
|
||||
flags: unit
|
||||
token: ${{ secrets.CODECOV_TOKEN }} # used to upload coverage reports: https://github.com/moby/buildkit/pull/4660#issue-2142122533
|
||||
-
|
||||
name: Upload reports
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: test-reports-unit-arm64-graphdriver
|
||||
path: /tmp/reports/*
|
||||
retention-days: 1
|
||||
|
||||
test-unit-report:
|
||||
runs-on: ubuntu-24.04
|
||||
timeout-minutes: 10
|
||||
continue-on-error: ${{ github.event_name != 'pull_request' }}
|
||||
if: always()
|
||||
needs:
|
||||
- test-unit
|
||||
steps:
|
||||
-
|
||||
name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
cache-dependency-path: vendor.sum
|
||||
-
|
||||
name: Download reports
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
pattern: test-reports-unit-arm64-*
|
||||
path: /tmp/reports
|
||||
-
|
||||
name: Install teststat
|
||||
run: |
|
||||
go install github.com/vearutop/teststat@${{ env.TESTSTAT_VERSION }}
|
||||
-
|
||||
name: Create summary
|
||||
run: |
|
||||
find /tmp/reports -type f -name '*-go-test-report.json' -exec teststat -markdown {} \+ >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
test-integration:
|
||||
runs-on: ubuntu-24.04-arm
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
continue-on-error: ${{ github.event_name != 'pull_request' }}
|
||||
needs:
|
||||
- build-dev
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
-
|
||||
name: Set up runner
|
||||
uses: ./.github/actions/setup-runner
|
||||
-
|
||||
name: Set up tracing
|
||||
uses: ./.github/actions/setup-tracing
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||
buildkitd-flags: --debug
|
||||
-
|
||||
name: Build dev image
|
||||
uses: docker/bake-action@v6
|
||||
with:
|
||||
targets: dev
|
||||
set: |
|
||||
dev.cache-from=type=gha,scope=dev-arm64
|
||||
-
|
||||
name: Test
|
||||
run: |
|
||||
make -o build test-integration
|
||||
env:
|
||||
TEST_SKIP_INTEGRATION_CLI: 1
|
||||
TESTCOVERAGE: 1
|
||||
-
|
||||
name: Prepare reports
|
||||
if: always()
|
||||
run: |
|
||||
reportsPath="/tmp/reports/arm64-graphdriver"
|
||||
mkdir -p bundles $reportsPath
|
||||
find bundles -path '*/root/*overlay2' -prune -o -type f \( -name '*-report.json' -o -name '*.log' -o -name '*.out' -o -name '*.prof' -o -name '*-report.xml' \) -print | xargs sudo tar -czf /tmp/reports.tar.gz
|
||||
tar -xzf /tmp/reports.tar.gz -C $reportsPath
|
||||
sudo chown -R $(id -u):$(id -g) $reportsPath
|
||||
tree -nh $reportsPath
|
||||
curl -sSLf localhost:16686/api/traces?service=integration-test-client > $reportsPath/jaeger-trace.json
|
||||
-
|
||||
name: Send to Codecov
|
||||
uses: codecov/codecov-action@v4
|
||||
with:
|
||||
directory: ./bundles/test-integration
|
||||
env_vars: RUNNER_OS
|
||||
flags: integration
|
||||
token: ${{ secrets.CODECOV_TOKEN }} # used to upload coverage reports: https://github.com/moby/buildkit/pull/4660#issue-2142122533
|
||||
-
|
||||
name: Test daemon logs
|
||||
if: always()
|
||||
run: |
|
||||
cat bundles/test-integration/docker.log
|
||||
-
|
||||
name: Upload reports
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: test-reports-integration-arm64-graphdriver
|
||||
path: /tmp/reports/*
|
||||
retention-days: 1
|
||||
|
||||
test-integration-report:
|
||||
runs-on: ubuntu-24.04
|
||||
timeout-minutes: 10
|
||||
continue-on-error: ${{ github.event_name != 'pull_request' }}
|
||||
if: always()
|
||||
needs:
|
||||
- test-integration
|
||||
steps:
|
||||
-
|
||||
name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
cache-dependency-path: vendor.sum
|
||||
-
|
||||
name: Download reports
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: /tmp/reports
|
||||
pattern: test-reports-integration-arm64-*
|
||||
merge-multiple: true
|
||||
-
|
||||
name: Install teststat
|
||||
run: |
|
||||
go install github.com/vearutop/teststat@${{ env.TESTSTAT_VERSION }}
|
||||
-
|
||||
name: Create summary
|
||||
run: |
|
||||
find /tmp/reports -type f -name '*-go-test-report.json' -exec teststat -markdown {} \+ >> $GITHUB_STEP_SUMMARY
|
||||
19
.github/workflows/bin-image.yml
vendored
19
.github/workflows/bin-image.yml
vendored
@@ -37,7 +37,7 @@ jobs:
|
||||
uses: ./.github/workflows/.dco.yml
|
||||
|
||||
prepare:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-24.04
|
||||
timeout-minutes: 20 # guardrails timeout for the whole job
|
||||
outputs:
|
||||
platforms: ${{ steps.platforms.outputs.matrix }}
|
||||
@@ -90,7 +90,7 @@ jobs:
|
||||
echo "matrix=$(docker buildx bake bin-image-cross --print | jq -cr '.target."bin-image-cross".platforms')" >>${GITHUB_OUTPUT}
|
||||
|
||||
build:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-24.04
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
needs:
|
||||
- validate-dco
|
||||
@@ -101,16 +101,16 @@ jobs:
|
||||
matrix:
|
||||
platform: ${{ fromJson(needs.prepare.outputs.platforms) }}
|
||||
steps:
|
||||
-
|
||||
name: Prepare
|
||||
run: |
|
||||
platform=${{ matrix.platform }}
|
||||
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
-
|
||||
name: Prepare
|
||||
run: |
|
||||
platform=${{ matrix.platform }}
|
||||
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
|
||||
-
|
||||
name: Download meta bake definition
|
||||
uses: actions/download-artifact@v4
|
||||
@@ -133,8 +133,9 @@ jobs:
|
||||
-
|
||||
name: Build
|
||||
id: bake
|
||||
uses: docker/bake-action@v4
|
||||
uses: docker/bake-action@v6
|
||||
with:
|
||||
source: .
|
||||
files: |
|
||||
./docker-bake.hcl
|
||||
/tmp/bake-meta.json
|
||||
@@ -161,7 +162,7 @@ jobs:
|
||||
retention-days: 1
|
||||
|
||||
merge:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-24.04
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
needs:
|
||||
- build
|
||||
|
||||
18
.github/workflows/buildkit.yml
vendored
18
.github/workflows/buildkit.yml
vendored
@@ -22,8 +22,8 @@ on:
|
||||
pull_request:
|
||||
|
||||
env:
|
||||
GO_VERSION: "1.22.10"
|
||||
ALPINE_VERSION: "3.19"
|
||||
GO_VERSION: "1.23.9"
|
||||
ALPINE_VERSION: "3.20"
|
||||
DESTDIR: ./build
|
||||
|
||||
jobs:
|
||||
@@ -31,20 +31,17 @@ jobs:
|
||||
uses: ./.github/workflows/.dco.yml
|
||||
|
||||
build:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-24.04
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
needs:
|
||||
- validate-dco
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
-
|
||||
name: Build
|
||||
uses: docker/bake-action@v4
|
||||
uses: docker/bake-action@v6
|
||||
with:
|
||||
targets: binary
|
||||
-
|
||||
@@ -57,7 +54,7 @@ jobs:
|
||||
retention-days: 1
|
||||
|
||||
test:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-24.04
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
needs:
|
||||
- build
|
||||
@@ -103,7 +100,10 @@ jobs:
|
||||
-
|
||||
name: BuildKit ref
|
||||
run: |
|
||||
echo "$(./hack/buildkit-ref)" >> $GITHUB_ENV
|
||||
# FIXME(aepifanov) temporarily overriding version to use for tests; remove with the next release of buildkit
|
||||
# echo "BUILDKIT_REF=$(./hack/buildkit-ref)" >> $GITHUB_ENV
|
||||
echo "BUILDKIT_REPO=moby/buildkit" >> $GITHUB_ENV
|
||||
echo "BUILDKIT_REF=b10aeed77fd8a370f6aec7ae4b212ab291914e08" >> $GITHUB_ENV
|
||||
working-directory: moby
|
||||
-
|
||||
name: Checkout BuildKit ${{ env.BUILDKIT_REF }}
|
||||
|
||||
18
.github/workflows/ci.yml
vendored
18
.github/workflows/ci.yml
vendored
@@ -29,7 +29,7 @@ jobs:
|
||||
uses: ./.github/workflows/.dco.yml
|
||||
|
||||
build:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-24.04
|
||||
timeout-minutes: 20 # guardrails timeout for the whole job
|
||||
needs:
|
||||
- validate-dco
|
||||
@@ -40,17 +40,12 @@ jobs:
|
||||
- binary
|
||||
- dynbinary
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
-
|
||||
name: Build
|
||||
uses: docker/bake-action@v4
|
||||
uses: docker/bake-action@v6
|
||||
with:
|
||||
targets: ${{ matrix.target }}
|
||||
-
|
||||
@@ -85,7 +80,7 @@ jobs:
|
||||
echo ${{ steps.platforms.outputs.matrix }}
|
||||
|
||||
cross:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-24.04
|
||||
timeout-minutes: 20 # guardrails timeout for the whole job
|
||||
needs:
|
||||
- validate-dco
|
||||
@@ -95,11 +90,6 @@ jobs:
|
||||
matrix:
|
||||
platform: ${{ fromJson(needs.prepare-cross.outputs.matrix) }}
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
-
|
||||
name: Prepare
|
||||
run: |
|
||||
@@ -110,7 +100,7 @@ jobs:
|
||||
uses: docker/setup-buildx-action@v3
|
||||
-
|
||||
name: Build
|
||||
uses: docker/bake-action@v4
|
||||
uses: docker/bake-action@v6
|
||||
with:
|
||||
targets: all
|
||||
set: |
|
||||
|
||||
27
.github/workflows/test.yml
vendored
27
.github/workflows/test.yml
vendored
@@ -22,7 +22,7 @@ on:
|
||||
pull_request:
|
||||
|
||||
env:
|
||||
GO_VERSION: "1.22.10"
|
||||
GO_VERSION: "1.23.9"
|
||||
GIT_PAGER: "cat"
|
||||
PAGER: "cat"
|
||||
|
||||
@@ -31,7 +31,7 @@ jobs:
|
||||
uses: ./.github/workflows/.dco.yml
|
||||
|
||||
build-dev:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-24.04
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
needs:
|
||||
- validate-dco
|
||||
@@ -41,6 +41,7 @@ jobs:
|
||||
mode:
|
||||
- ""
|
||||
- systemd
|
||||
- firewalld
|
||||
steps:
|
||||
-
|
||||
name: Prepare
|
||||
@@ -48,20 +49,17 @@ jobs:
|
||||
if [ "${{ matrix.mode }}" = "systemd" ]; then
|
||||
echo "SYSTEMD=true" >> $GITHUB_ENV
|
||||
fi
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
-
|
||||
name: Build dev image
|
||||
uses: docker/bake-action@v4
|
||||
uses: docker/bake-action@v6
|
||||
with:
|
||||
targets: dev
|
||||
set: |
|
||||
*.cache-from=type=gha,scope=dev${{ matrix.mode }}
|
||||
*.cache-to=type=gha,scope=dev${{ matrix.mode }},mode=max
|
||||
*.cache-to=type=gha,scope=dev${{ matrix.mode }}
|
||||
*.output=type=cacheonly
|
||||
|
||||
test:
|
||||
@@ -80,7 +78,7 @@ jobs:
|
||||
storage: ${{ matrix.storage }}
|
||||
|
||||
validate-prepare:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-24.04
|
||||
timeout-minutes: 10 # guardrails timeout for the whole job
|
||||
needs:
|
||||
- validate-dco
|
||||
@@ -102,7 +100,7 @@ jobs:
|
||||
echo ${{ steps.scripts.outputs.matrix }}
|
||||
|
||||
validate:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-24.04
|
||||
timeout-minutes: 30 # guardrails timeout for the whole job
|
||||
needs:
|
||||
- validate-prepare
|
||||
@@ -125,7 +123,7 @@ jobs:
|
||||
uses: docker/setup-buildx-action@v3
|
||||
-
|
||||
name: Build dev image
|
||||
uses: docker/bake-action@v4
|
||||
uses: docker/bake-action@v6
|
||||
with:
|
||||
targets: dev
|
||||
set: |
|
||||
@@ -136,7 +134,7 @@ jobs:
|
||||
make -o build validate-${{ matrix.script }}
|
||||
|
||||
smoke-prepare:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-24.04
|
||||
timeout-minutes: 10 # guardrails timeout for the whole job
|
||||
needs:
|
||||
- validate-dco
|
||||
@@ -158,7 +156,7 @@ jobs:
|
||||
echo ${{ steps.platforms.outputs.matrix }}
|
||||
|
||||
smoke:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-24.04
|
||||
timeout-minutes: 20 # guardrails timeout for the whole job
|
||||
needs:
|
||||
- smoke-prepare
|
||||
@@ -167,9 +165,6 @@ jobs:
|
||||
matrix:
|
||||
platform: ${{ fromJson(needs.smoke-prepare.outputs.matrix) }}
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
-
|
||||
name: Prepare
|
||||
run: |
|
||||
@@ -183,7 +178,7 @@ jobs:
|
||||
uses: docker/setup-buildx-action@v3
|
||||
-
|
||||
name: Test
|
||||
uses: docker/bake-action@v4
|
||||
uses: docker/bake-action@v6
|
||||
with:
|
||||
targets: binary-smoketest
|
||||
set: |
|
||||
|
||||
6
.github/workflows/validate-pr.yml
vendored
6
.github/workflows/validate-pr.yml
vendored
@@ -15,7 +15,7 @@ on:
|
||||
|
||||
jobs:
|
||||
check-area-label:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-24.04
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
steps:
|
||||
- name: Missing `area/` label
|
||||
@@ -28,7 +28,7 @@ jobs:
|
||||
|
||||
check-changelog:
|
||||
if: contains(join(github.event.pull_request.labels.*.name, ','), 'impact/')
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-24.04
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
env:
|
||||
PR_BODY: |
|
||||
@@ -57,7 +57,7 @@ jobs:
|
||||
echo "$desc"
|
||||
|
||||
check-pr-branch:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-24.04
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
env:
|
||||
PR_TITLE: ${{ github.event.pull_request.title }}
|
||||
|
||||
7
.github/workflows/windows-2022.yml
vendored
7
.github/workflows/windows-2022.yml
vendored
@@ -14,12 +14,9 @@ concurrency:
|
||||
cancel-in-progress: true
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 10 * * *'
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- 'master'
|
||||
- '[0-9]+.[0-9]+'
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
validate-dco:
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
name: windows-2019
|
||||
name: windows-2025
|
||||
|
||||
# Default to 'contents: read', which grants actions to read commits.
|
||||
#
|
||||
@@ -14,9 +14,13 @@ concurrency:
|
||||
cancel-in-progress: true
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 10 * * *'
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- 'master'
|
||||
- '[0-9]+.[0-9]+'
|
||||
- '[0-9]+.x'
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
validate-dco:
|
||||
@@ -37,6 +41,6 @@ jobs:
|
||||
matrix:
|
||||
storage: ${{ fromJson(needs.test-prepare.outputs.matrix) }}
|
||||
with:
|
||||
os: windows-2019
|
||||
os: windows-2025
|
||||
storage: ${{ matrix.storage }}
|
||||
send_coverage: false
|
||||
@@ -50,11 +50,18 @@ linters-settings:
|
||||
deny:
|
||||
- pkg: io/ioutil
|
||||
desc: The io/ioutil package has been deprecated, see https://go.dev/doc/go1.16#ioutil
|
||||
- pkg: "github.com/stretchr/testify/assert"
|
||||
desc: Use "gotest.tools/v3/assert" instead
|
||||
- pkg: "github.com/stretchr/testify/require"
|
||||
desc: Use "gotest.tools/v3/assert" instead
|
||||
- pkg: "github.com/stretchr/testify/suite"
|
||||
desc: Do not use
|
||||
revive:
|
||||
rules:
|
||||
# FIXME make sure all packages have a description. Currently, there's many packages without.
|
||||
- name: package-comments
|
||||
disabled: true
|
||||
- name: redefines-builtin-id
|
||||
issues:
|
||||
# The default exclusion rules are a bit too permissive, so copying the relevant ones below
|
||||
exclude-use-default: false
|
||||
|
||||
24
Dockerfile
24
Dockerfile
@@ -1,6 +1,6 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
ARG GO_VERSION=1.22.10
|
||||
ARG GO_VERSION=1.23.9
|
||||
ARG BASE_DEBIAN_DISTRO="bookworm"
|
||||
ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}"
|
||||
ARG XX_VERSION=1.6.1
|
||||
@@ -16,6 +16,7 @@ ARG BUILDX_VERSION=0.12.1
|
||||
ARG COMPOSE_VERSION=v2.24.5
|
||||
|
||||
ARG SYSTEMD="false"
|
||||
ARG FIREWALLD="false"
|
||||
ARG DOCKER_STATIC=1
|
||||
|
||||
# REGISTRY_VERSION specifies the version of the registry to download from
|
||||
@@ -198,7 +199,7 @@ RUN git init . && git remote add origin "https://github.com/containerd/container
|
||||
# When updating the binary version you may also need to update the vendor
|
||||
# version to pick up bug fixes or new APIs, however, usually the Go packages
|
||||
# are built from a commit from the master branch.
|
||||
ARG CONTAINERD_VERSION=v1.7.25
|
||||
ARG CONTAINERD_VERSION=v1.7.27
|
||||
RUN git fetch -q --depth 1 origin "${CONTAINERD_VERSION}" +refs/tags/*:refs/tags/* && git checkout -q FETCH_HEAD
|
||||
|
||||
FROM base AS containerd-build
|
||||
@@ -283,7 +284,7 @@ RUN git init . && git remote add origin "https://github.com/opencontainers/runc.
|
||||
# that is used. If you need to update runc, open a pull request in the containerd
|
||||
# project first, and update both after that is merged. When updating RUNC_VERSION,
|
||||
# consider updating runc in vendor.mod accordingly.
|
||||
ARG RUNC_VERSION=v1.2.4
|
||||
ARG RUNC_VERSION=v1.2.5
|
||||
RUN git fetch -q --depth 1 origin "${RUNC_VERSION}" +refs/tags/*:refs/tags/* && git checkout -q FETCH_HEAD
|
||||
|
||||
FROM base AS runc-build
|
||||
@@ -449,8 +450,8 @@ FROM binary-dummy AS containerutil-linux
|
||||
FROM containerutil-build AS containerutil-windows-amd64
|
||||
FROM containerutil-windows-${TARGETARCH} AS containerutil-windows
|
||||
FROM containerutil-${TARGETOS} AS containerutil
|
||||
FROM docker/buildx-bin:${BUILDX_VERSION} as buildx
|
||||
FROM docker/compose-bin:${COMPOSE_VERSION} as compose
|
||||
FROM docker/buildx-bin:${BUILDX_VERSION} AS buildx
|
||||
FROM docker/compose-bin:${COMPOSE_VERSION} AS compose
|
||||
|
||||
FROM base AS dev-systemd-false
|
||||
COPY --link --from=frozen-images /build/ /docker-frozen-images
|
||||
@@ -500,7 +501,16 @@ RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \
|
||||
systemd-sysv
|
||||
ENTRYPOINT ["hack/dind-systemd"]
|
||||
|
||||
FROM dev-systemd-${SYSTEMD} AS dev-base
|
||||
FROM dev-systemd-${SYSTEMD} AS dev-firewalld-false
|
||||
|
||||
FROM dev-systemd-true AS dev-firewalld-true
|
||||
RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \
|
||||
--mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \
|
||||
apt-get update && apt-get install -y --no-install-recommends \
|
||||
firewalld
|
||||
RUN sed -i 's/FirewallBackend=nftables/FirewallBackend=iptables/' /etc/firewalld/firewalld.conf
|
||||
|
||||
FROM dev-firewalld-${FIREWALLD} AS dev-base
|
||||
RUN groupadd -r docker
|
||||
RUN useradd --create-home --gid docker unprivilegeduser \
|
||||
&& mkdir -p /home/unprivilegeduser/.local/share/docker \
|
||||
@@ -644,7 +654,7 @@ COPY --link --from=build /build /
|
||||
# smoke tests
|
||||
# usage:
|
||||
# > docker buildx bake binary-smoketest
|
||||
FROM --platform=$TARGETPLATFORM base AS smoketest
|
||||
FROM base AS smoketest
|
||||
WORKDIR /usr/local/bin
|
||||
COPY --from=build /build .
|
||||
RUN <<EOT
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
|
||||
# This represents the bare minimum required to build and test Docker.
|
||||
|
||||
ARG GO_VERSION=1.22.10
|
||||
ARG GO_VERSION=1.23.9
|
||||
|
||||
ARG BASE_DEBIAN_DISTRO="bookworm"
|
||||
ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}"
|
||||
|
||||
@@ -161,10 +161,10 @@ FROM ${WINDOWS_BASE_IMAGE}:${WINDOWS_BASE_IMAGE_TAG}
|
||||
# Use PowerShell as the default shell
|
||||
SHELL ["powershell", "-Command", "$ErrorActionPreference = 'Stop'; $ProgressPreference = 'SilentlyContinue';"]
|
||||
|
||||
ARG GO_VERSION=1.22.10
|
||||
ARG GOTESTSUM_VERSION=v1.8.2
|
||||
ARG GO_VERSION=1.23.9
|
||||
ARG GOWINRES_VERSION=v0.3.1
|
||||
ARG CONTAINERD_VERSION=v1.7.25
|
||||
ARG CONTAINERD_VERSION=v1.7.27
|
||||
|
||||
# Environment variable notes:
|
||||
# - GO_VERSION must be consistent with 'Dockerfile' used by Linux.
|
||||
@@ -255,14 +255,11 @@ RUN `
|
||||
Remove-Item C:\gitsetup.zip; `
|
||||
`
|
||||
Write-Host INFO: Downloading containerd; `
|
||||
Install-Package -Force 7Zip4PowerShell; `
|
||||
$location='https://github.com/containerd/containerd/releases/download/'+$Env:CONTAINERD_VERSION+'/containerd-'+$Env:CONTAINERD_VERSION.TrimStart('v')+'-windows-amd64.tar.gz'; `
|
||||
Download-File $location C:\containerd.tar.gz; `
|
||||
New-Item -Path C:\containerd -ItemType Directory; `
|
||||
Expand-7Zip C:\containerd.tar.gz C:\; `
|
||||
Expand-7Zip C:\containerd.tar C:\containerd; `
|
||||
tar -xzf C:\containerd.tar.gz -C C:\containerd; `
|
||||
Remove-Item C:\containerd.tar.gz; `
|
||||
Remove-Item C:\containerd.tar; `
|
||||
`
|
||||
# Ensure all directories exist that we will require below....
|
||||
$srcDir = """$Env:GOPATH`\src\github.com\docker\docker\bundles"""; `
|
||||
|
||||
171
Jenkinsfile
vendored
171
Jenkinsfile
vendored
@@ -1,171 +0,0 @@
|
||||
#!groovy
|
||||
pipeline {
|
||||
agent none
|
||||
|
||||
options {
|
||||
buildDiscarder(logRotator(daysToKeepStr: '30'))
|
||||
timeout(time: 2, unit: 'HOURS')
|
||||
timestamps()
|
||||
}
|
||||
parameters {
|
||||
booleanParam(name: 'arm64', defaultValue: true, description: 'ARM (arm64) Build/Test')
|
||||
booleanParam(name: 'dco', defaultValue: true, description: 'Run the DCO check')
|
||||
}
|
||||
environment {
|
||||
DOCKER_BUILDKIT = '1'
|
||||
DOCKER_EXPERIMENTAL = '1'
|
||||
DOCKER_GRAPHDRIVER = 'overlay2'
|
||||
CHECK_CONFIG_COMMIT = '33a3680e08d1007e72c3b3f1454f823d8e9948ee'
|
||||
TESTDEBUG = '0'
|
||||
TIMEOUT = '120m'
|
||||
}
|
||||
stages {
|
||||
stage('pr-hack') {
|
||||
when { changeRequest() }
|
||||
steps {
|
||||
script {
|
||||
echo "Workaround for PR auto-cancel feature. Borrowed from https://issues.jenkins-ci.org/browse/JENKINS-43353"
|
||||
def buildNumber = env.BUILD_NUMBER as int
|
||||
if (buildNumber > 1) milestone(buildNumber - 1)
|
||||
milestone(buildNumber)
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('DCO-check') {
|
||||
when {
|
||||
beforeAgent true
|
||||
expression { params.dco }
|
||||
}
|
||||
agent { label 'arm64 && ubuntu-2004' }
|
||||
steps {
|
||||
sh '''
|
||||
docker run --rm \
|
||||
-v "$WORKSPACE:/workspace" \
|
||||
-e VALIDATE_REPO=${GIT_URL} \
|
||||
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
|
||||
alpine sh -c 'apk add --no-cache -q bash git openssh-client && git config --system --add safe.directory /workspace && cd /workspace && hack/validate/dco'
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage('Build') {
|
||||
parallel {
|
||||
stage('arm64') {
|
||||
when {
|
||||
beforeAgent true
|
||||
expression { params.arm64 }
|
||||
}
|
||||
agent { label 'arm64 && ubuntu-2004' }
|
||||
environment {
|
||||
TEST_SKIP_INTEGRATION_CLI = '1'
|
||||
}
|
||||
|
||||
stages {
|
||||
stage("Load kernel modules") {
|
||||
steps {
|
||||
sh '''
|
||||
sudo modprobe ip6table_filter
|
||||
sudo modprobe -va br_netfilter
|
||||
sudo systemctl restart docker.service
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage("Print info") {
|
||||
steps {
|
||||
sh 'docker version'
|
||||
sh 'docker info'
|
||||
sh '''
|
||||
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
|
||||
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
|
||||
&& bash ${WORKSPACE}/check-config.sh || true
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage("Build dev image") {
|
||||
steps {
|
||||
sh 'docker build --force-rm -t docker:${GIT_COMMIT} .'
|
||||
}
|
||||
}
|
||||
stage("Unit tests") {
|
||||
steps {
|
||||
sh '''
|
||||
docker run --rm -t --privileged \
|
||||
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
|
||||
--name docker-pr$BUILD_NUMBER \
|
||||
-e DOCKER_EXPERIMENTAL \
|
||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
-e VALIDATE_REPO=${GIT_URL} \
|
||||
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
|
||||
docker:${GIT_COMMIT} \
|
||||
hack/test/unit
|
||||
'''
|
||||
}
|
||||
post {
|
||||
always {
|
||||
junit testResults: 'bundles/junit-report*.xml', allowEmptyResults: true
|
||||
}
|
||||
}
|
||||
}
|
||||
stage("Integration tests") {
|
||||
environment { TEST_SKIP_INTEGRATION_CLI = '1' }
|
||||
steps {
|
||||
sh '''
|
||||
docker run --rm -t --privileged \
|
||||
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
|
||||
--name docker-pr$BUILD_NUMBER \
|
||||
-e DOCKER_EXPERIMENTAL \
|
||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
-e TESTDEBUG \
|
||||
-e TEST_INTEGRATION_USE_SNAPSHOTTER \
|
||||
-e TEST_SKIP_INTEGRATION_CLI \
|
||||
-e TIMEOUT \
|
||||
-e VALIDATE_REPO=${GIT_URL} \
|
||||
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
|
||||
docker:${GIT_COMMIT} \
|
||||
hack/make.sh \
|
||||
dynbinary \
|
||||
test-integration
|
||||
'''
|
||||
}
|
||||
post {
|
||||
always {
|
||||
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
post {
|
||||
always {
|
||||
sh '''
|
||||
echo "Ensuring container killed."
|
||||
docker rm -vf docker-pr$BUILD_NUMBER || true
|
||||
'''
|
||||
|
||||
sh '''
|
||||
echo "Chowning /workspace to jenkins user"
|
||||
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
|
||||
'''
|
||||
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
|
||||
sh '''
|
||||
bundleName=arm64-integration
|
||||
echo "Creating ${bundleName}-bundles.tar.gz"
|
||||
# exclude overlay2 directories
|
||||
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
|
||||
'''
|
||||
|
||||
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
|
||||
}
|
||||
}
|
||||
cleanup {
|
||||
sh 'make clean'
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
5
Makefile
5
Makefile
@@ -56,6 +56,7 @@ DOCKER_ENVS := \
|
||||
-e DOCKER_USERLANDPROXY \
|
||||
-e DOCKERD_ARGS \
|
||||
-e DELVE_PORT \
|
||||
-e FIREWALLD \
|
||||
-e GITHUB_ACTIONS \
|
||||
-e TEST_FORCE_VALIDATE \
|
||||
-e TEST_INTEGRATION_DIR \
|
||||
@@ -63,7 +64,6 @@ DOCKER_ENVS := \
|
||||
-e TEST_INTEGRATION_FAIL_FAST \
|
||||
-e TEST_SKIP_INTEGRATION \
|
||||
-e TEST_SKIP_INTEGRATION_CLI \
|
||||
-e TEST_IGNORE_CGROUP_CHECK \
|
||||
-e TESTCOVERAGE \
|
||||
-e TESTDEBUG \
|
||||
-e TESTDIRS \
|
||||
@@ -150,6 +150,9 @@ DOCKER_BUILD_ARGS += --build-arg=DOCKERCLI_INTEGRATION_REPOSITORY
|
||||
ifdef DOCKER_SYSTEMD
|
||||
DOCKER_BUILD_ARGS += --build-arg=SYSTEMD=true
|
||||
endif
|
||||
ifdef FIREWALLD
|
||||
DOCKER_BUILD_ARGS += --build-arg=FIREWALLD=true
|
||||
endif
|
||||
|
||||
BUILD_OPTS := ${DOCKER_BUILD_ARGS} ${DOCKER_BUILD_OPTS}
|
||||
BUILD_CMD := $(BUILDX) build
|
||||
|
||||
@@ -25,15 +25,15 @@ func NewRouter(b Backend, d experimentalProvider) router.Router {
|
||||
}
|
||||
|
||||
// Routes returns the available routers to the build controller
|
||||
func (r *buildRouter) Routes() []router.Route {
|
||||
return r.routes
|
||||
func (br *buildRouter) Routes() []router.Route {
|
||||
return br.routes
|
||||
}
|
||||
|
||||
func (r *buildRouter) initRoutes() {
|
||||
r.routes = []router.Route{
|
||||
router.NewPostRoute("/build", r.postBuild),
|
||||
router.NewPostRoute("/build/prune", r.postPrune),
|
||||
router.NewPostRoute("/build/cancel", r.postCancel),
|
||||
func (br *buildRouter) initRoutes() {
|
||||
br.routes = []router.Route{
|
||||
router.NewPostRoute("/build", br.postBuild),
|
||||
router.NewPostRoute("/build/prune", br.postPrune),
|
||||
router.NewPostRoute("/build/cancel", br.postCancel),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -23,14 +23,14 @@ func NewRouter(b Backend, decoder httputils.ContainerDecoder) router.Router {
|
||||
}
|
||||
|
||||
// Routes returns the available routers to the checkpoint controller
|
||||
func (r *checkpointRouter) Routes() []router.Route {
|
||||
return r.routes
|
||||
func (cr *checkpointRouter) Routes() []router.Route {
|
||||
return cr.routes
|
||||
}
|
||||
|
||||
func (r *checkpointRouter) initRoutes() {
|
||||
r.routes = []router.Route{
|
||||
router.NewGetRoute("/containers/{name:.*}/checkpoints", r.getContainerCheckpoints, router.Experimental),
|
||||
router.NewPostRoute("/containers/{name:.*}/checkpoints", r.postContainerCheckpoint, router.Experimental),
|
||||
router.NewDeleteRoute("/containers/{name}/checkpoints/{checkpoint}", r.deleteContainerCheckpoint, router.Experimental),
|
||||
func (cr *checkpointRouter) initRoutes() {
|
||||
cr.routes = []router.Route{
|
||||
router.NewGetRoute("/containers/{name:.*}/checkpoints", cr.getContainerCheckpoints, router.Experimental),
|
||||
router.NewPostRoute("/containers/{name:.*}/checkpoints", cr.postContainerCheckpoint, router.Experimental),
|
||||
router.NewDeleteRoute("/containers/{name}/checkpoints/{checkpoint}", cr.deleteContainerCheckpoint, router.Experimental),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
"github.com/docker/docker/api/types/checkpoint"
|
||||
)
|
||||
|
||||
func (s *checkpointRouter) postContainerCheckpoint(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
func (cr *checkpointRouter) postContainerCheckpoint(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
if err := httputils.ParseForm(r); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -18,7 +18,7 @@ func (s *checkpointRouter) postContainerCheckpoint(ctx context.Context, w http.R
|
||||
return err
|
||||
}
|
||||
|
||||
err := s.backend.CheckpointCreate(vars["name"], options)
|
||||
err := cr.backend.CheckpointCreate(vars["name"], options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -27,12 +27,12 @@ func (s *checkpointRouter) postContainerCheckpoint(ctx context.Context, w http.R
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *checkpointRouter) getContainerCheckpoints(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
func (cr *checkpointRouter) getContainerCheckpoints(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
if err := httputils.ParseForm(r); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
checkpoints, err := s.backend.CheckpointList(vars["name"], checkpoint.ListOptions{
|
||||
checkpoints, err := cr.backend.CheckpointList(vars["name"], checkpoint.ListOptions{
|
||||
CheckpointDir: r.Form.Get("dir"),
|
||||
})
|
||||
if err != nil {
|
||||
@@ -42,12 +42,12 @@ func (s *checkpointRouter) getContainerCheckpoints(ctx context.Context, w http.R
|
||||
return httputils.WriteJSON(w, http.StatusOK, checkpoints)
|
||||
}
|
||||
|
||||
func (s *checkpointRouter) deleteContainerCheckpoint(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
func (cr *checkpointRouter) deleteContainerCheckpoint(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
if err := httputils.ParseForm(r); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err := s.backend.CheckpointDelete(vars["name"], checkpoint.DeleteOptions{
|
||||
err := cr.backend.CheckpointDelete(vars["name"], checkpoint.DeleteOptions{
|
||||
CheckpointDir: r.Form.Get("dir"),
|
||||
CheckpointID: vars["checkpoint"],
|
||||
})
|
||||
|
||||
@@ -18,14 +18,14 @@ func NewRouter(backend Backend) router.Router {
|
||||
}
|
||||
|
||||
// Routes returns the available routes
|
||||
func (r *distributionRouter) Routes() []router.Route {
|
||||
return r.routes
|
||||
func (dr *distributionRouter) Routes() []router.Route {
|
||||
return dr.routes
|
||||
}
|
||||
|
||||
// initRoutes initializes the routes in the distribution router
|
||||
func (r *distributionRouter) initRoutes() {
|
||||
r.routes = []router.Route{
|
||||
func (dr *distributionRouter) initRoutes() {
|
||||
dr.routes = []router.Route{
|
||||
// GET
|
||||
router.NewGetRoute("/distribution/{name:.*}/json", r.getDistributionInfo),
|
||||
router.NewGetRoute("/distribution/{name:.*}/json", dr.getDistributionInfo),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func (s *distributionRouter) getDistributionInfo(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
func (dr *distributionRouter) getDistributionInfo(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
if err := httputils.ParseForm(r); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -43,7 +43,7 @@ func (s *distributionRouter) getDistributionInfo(ctx context.Context, w http.Res
|
||||
// For a search it is not an error if no auth was given. Ignore invalid
|
||||
// AuthConfig to increase compatibility with the existing API.
|
||||
authConfig, _ := registry.DecodeAuthConfig(r.Header.Get(registry.AuthHeader))
|
||||
repos, err := s.backend.GetRepositories(ctx, namedRef, authConfig)
|
||||
repos, err := dr.backend.GetRepositories(ctx, namedRef, authConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -64,7 +64,7 @@ func (s *distributionRouter) getDistributionInfo(ctx context.Context, w http.Res
|
||||
// - https://github.com/moby/moby/blob/12c7411b6b7314bef130cd59f1c7384a7db06d0b/distribution/pull.go#L76-L152
|
||||
var lastErr error
|
||||
for _, repo := range repos {
|
||||
distributionInspect, err := s.fetchManifest(ctx, repo, namedRef)
|
||||
distributionInspect, err := dr.fetchManifest(ctx, repo, namedRef)
|
||||
if err != nil {
|
||||
lastErr = err
|
||||
continue
|
||||
@@ -74,7 +74,7 @@ func (s *distributionRouter) getDistributionInfo(ctx context.Context, w http.Res
|
||||
return lastErr
|
||||
}
|
||||
|
||||
func (s *distributionRouter) fetchManifest(ctx context.Context, distrepo distribution.Repository, namedRef reference.Named) (registry.DistributionInspect, error) {
|
||||
func (dr *distributionRouter) fetchManifest(ctx context.Context, distrepo distribution.Repository, namedRef reference.Named) (registry.DistributionInspect, error) {
|
||||
var distributionInspect registry.DistributionInspect
|
||||
if canonicalRef, ok := namedRef.(reference.Canonical); !ok {
|
||||
namedRef = reference.TagNameOnly(namedRef)
|
||||
|
||||
@@ -22,22 +22,22 @@ func NewRouter(b Backend, c ClusterBackend) router.Router {
|
||||
}
|
||||
|
||||
// Routes returns the available routes to the network controller
|
||||
func (r *networkRouter) Routes() []router.Route {
|
||||
return r.routes
|
||||
func (n *networkRouter) Routes() []router.Route {
|
||||
return n.routes
|
||||
}
|
||||
|
||||
func (r *networkRouter) initRoutes() {
|
||||
r.routes = []router.Route{
|
||||
func (n *networkRouter) initRoutes() {
|
||||
n.routes = []router.Route{
|
||||
// GET
|
||||
router.NewGetRoute("/networks", r.getNetworksList),
|
||||
router.NewGetRoute("/networks/", r.getNetworksList),
|
||||
router.NewGetRoute("/networks/{id:.+}", r.getNetwork),
|
||||
router.NewGetRoute("/networks", n.getNetworksList),
|
||||
router.NewGetRoute("/networks/", n.getNetworksList),
|
||||
router.NewGetRoute("/networks/{id:.+}", n.getNetwork),
|
||||
// POST
|
||||
router.NewPostRoute("/networks/create", r.postNetworkCreate),
|
||||
router.NewPostRoute("/networks/{id:.*}/connect", r.postNetworkConnect),
|
||||
router.NewPostRoute("/networks/{id:.*}/disconnect", r.postNetworkDisconnect),
|
||||
router.NewPostRoute("/networks/prune", r.postNetworksPrune),
|
||||
router.NewPostRoute("/networks/create", n.postNetworkCreate),
|
||||
router.NewPostRoute("/networks/{id:.*}/connect", n.postNetworkConnect),
|
||||
router.NewPostRoute("/networks/{id:.*}/disconnect", n.postNetworkDisconnect),
|
||||
router.NewPostRoute("/networks/prune", n.postNetworksPrune),
|
||||
// DELETE
|
||||
router.NewDeleteRoute("/networks/{id:.*}", r.deleteNetwork),
|
||||
router.NewDeleteRoute("/networks/{id:.*}", n.deleteNetwork),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,22 +18,22 @@ func NewRouter(b Backend) router.Router {
|
||||
}
|
||||
|
||||
// Routes returns the available routers to the plugin controller
|
||||
func (r *pluginRouter) Routes() []router.Route {
|
||||
return r.routes
|
||||
func (pr *pluginRouter) Routes() []router.Route {
|
||||
return pr.routes
|
||||
}
|
||||
|
||||
func (r *pluginRouter) initRoutes() {
|
||||
r.routes = []router.Route{
|
||||
router.NewGetRoute("/plugins", r.listPlugins),
|
||||
router.NewGetRoute("/plugins/{name:.*}/json", r.inspectPlugin),
|
||||
router.NewGetRoute("/plugins/privileges", r.getPrivileges),
|
||||
router.NewDeleteRoute("/plugins/{name:.*}", r.removePlugin),
|
||||
router.NewPostRoute("/plugins/{name:.*}/enable", r.enablePlugin),
|
||||
router.NewPostRoute("/plugins/{name:.*}/disable", r.disablePlugin),
|
||||
router.NewPostRoute("/plugins/pull", r.pullPlugin),
|
||||
router.NewPostRoute("/plugins/{name:.*}/push", r.pushPlugin),
|
||||
router.NewPostRoute("/plugins/{name:.*}/upgrade", r.upgradePlugin),
|
||||
router.NewPostRoute("/plugins/{name:.*}/set", r.setPlugin),
|
||||
router.NewPostRoute("/plugins/create", r.createPlugin),
|
||||
func (pr *pluginRouter) initRoutes() {
|
||||
pr.routes = []router.Route{
|
||||
router.NewGetRoute("/plugins", pr.listPlugins),
|
||||
router.NewGetRoute("/plugins/{name:.*}/json", pr.inspectPlugin),
|
||||
router.NewGetRoute("/plugins/privileges", pr.getPrivileges),
|
||||
router.NewDeleteRoute("/plugins/{name:.*}", pr.removePlugin),
|
||||
router.NewPostRoute("/plugins/{name:.*}/enable", pr.enablePlugin),
|
||||
router.NewPostRoute("/plugins/{name:.*}/disable", pr.disablePlugin),
|
||||
router.NewPostRoute("/plugins/pull", pr.pullPlugin),
|
||||
router.NewPostRoute("/plugins/{name:.*}/push", pr.pushPlugin),
|
||||
router.NewPostRoute("/plugins/{name:.*}/upgrade", pr.upgradePlugin),
|
||||
router.NewPostRoute("/plugins/{name:.*}/set", pr.setPlugin),
|
||||
router.NewPostRoute("/plugins/create", pr.createPlugin),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,12 +18,12 @@ func NewRouter(b Backend) router.Router {
|
||||
}
|
||||
|
||||
// Routes returns the available routers to the session controller
|
||||
func (r *sessionRouter) Routes() []router.Route {
|
||||
return r.routes
|
||||
func (sr *sessionRouter) Routes() []router.Route {
|
||||
return sr.routes
|
||||
}
|
||||
|
||||
func (r *sessionRouter) initRoutes() {
|
||||
r.routes = []router.Route{
|
||||
router.NewPostRoute("/session", r.startSession),
|
||||
func (sr *sessionRouter) initRoutes() {
|
||||
sr.routes = []router.Route{
|
||||
router.NewPostRoute("/session", sr.startSession),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
|
||||
//go:build go1.19
|
||||
//go:build go1.23
|
||||
|
||||
package system // import "github.com/docker/docker/api/server/router/system"
|
||||
|
||||
|
||||
@@ -20,21 +20,21 @@ func NewRouter(b Backend, cb ClusterBackend) router.Router {
|
||||
}
|
||||
|
||||
// Routes returns the available routes to the volumes controller
|
||||
func (r *volumeRouter) Routes() []router.Route {
|
||||
return r.routes
|
||||
func (v *volumeRouter) Routes() []router.Route {
|
||||
return v.routes
|
||||
}
|
||||
|
||||
func (r *volumeRouter) initRoutes() {
|
||||
r.routes = []router.Route{
|
||||
func (v *volumeRouter) initRoutes() {
|
||||
v.routes = []router.Route{
|
||||
// GET
|
||||
router.NewGetRoute("/volumes", r.getVolumesList),
|
||||
router.NewGetRoute("/volumes/{name:.*}", r.getVolumeByName),
|
||||
router.NewGetRoute("/volumes", v.getVolumesList),
|
||||
router.NewGetRoute("/volumes/{name:.*}", v.getVolumeByName),
|
||||
// POST
|
||||
router.NewPostRoute("/volumes/create", r.postVolumesCreate),
|
||||
router.NewPostRoute("/volumes/prune", r.postVolumesPrune),
|
||||
router.NewPostRoute("/volumes/create", v.postVolumesCreate),
|
||||
router.NewPostRoute("/volumes/prune", v.postVolumesPrune),
|
||||
// PUT
|
||||
router.NewPutRoute("/volumes/{name:.*}", r.putVolumesUpdate),
|
||||
router.NewPutRoute("/volumes/{name:.*}", v.putVolumesUpdate),
|
||||
// DELETE
|
||||
router.NewDeleteRoute("/volumes/{name:.*}", r.deleteVolumes),
|
||||
router.NewDeleteRoute("/volumes/{name:.*}", v.deleteVolumes),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
|
||||
//go:build go1.19
|
||||
//go:build go1.23
|
||||
|
||||
package containerimage
|
||||
|
||||
|
||||
@@ -278,38 +278,38 @@ func withoutHealthcheck() runConfigModifier {
|
||||
}
|
||||
|
||||
func copyRunConfig(runConfig *container.Config, modifiers ...runConfigModifier) *container.Config {
|
||||
copy := *runConfig
|
||||
copy.Cmd = copyStringSlice(runConfig.Cmd)
|
||||
copy.Env = copyStringSlice(runConfig.Env)
|
||||
copy.Entrypoint = copyStringSlice(runConfig.Entrypoint)
|
||||
copy.OnBuild = copyStringSlice(runConfig.OnBuild)
|
||||
copy.Shell = copyStringSlice(runConfig.Shell)
|
||||
c := *runConfig
|
||||
c.Cmd = copyStringSlice(runConfig.Cmd)
|
||||
c.Env = copyStringSlice(runConfig.Env)
|
||||
c.Entrypoint = copyStringSlice(runConfig.Entrypoint)
|
||||
c.OnBuild = copyStringSlice(runConfig.OnBuild)
|
||||
c.Shell = copyStringSlice(runConfig.Shell)
|
||||
|
||||
if copy.Volumes != nil {
|
||||
copy.Volumes = make(map[string]struct{}, len(runConfig.Volumes))
|
||||
if c.Volumes != nil {
|
||||
c.Volumes = make(map[string]struct{}, len(runConfig.Volumes))
|
||||
for k, v := range runConfig.Volumes {
|
||||
copy.Volumes[k] = v
|
||||
c.Volumes[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
if copy.ExposedPorts != nil {
|
||||
copy.ExposedPorts = make(nat.PortSet, len(runConfig.ExposedPorts))
|
||||
if c.ExposedPorts != nil {
|
||||
c.ExposedPorts = make(nat.PortSet, len(runConfig.ExposedPorts))
|
||||
for k, v := range runConfig.ExposedPorts {
|
||||
copy.ExposedPorts[k] = v
|
||||
c.ExposedPorts[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
if copy.Labels != nil {
|
||||
copy.Labels = make(map[string]string, len(runConfig.Labels))
|
||||
if c.Labels != nil {
|
||||
c.Labels = make(map[string]string, len(runConfig.Labels))
|
||||
for k, v := range runConfig.Labels {
|
||||
copy.Labels[k] = v
|
||||
c.Labels[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
for _, modifier := range modifiers {
|
||||
modifier(©)
|
||||
modifier(&c)
|
||||
}
|
||||
return ©
|
||||
return &c
|
||||
}
|
||||
|
||||
func copyStringSlice(orig []string) []string {
|
||||
|
||||
@@ -166,17 +166,17 @@ func fullMutableRunConfig() *container.Config {
|
||||
|
||||
func TestDeepCopyRunConfig(t *testing.T) {
|
||||
runConfig := fullMutableRunConfig()
|
||||
copy := copyRunConfig(runConfig)
|
||||
assert.Check(t, is.DeepEqual(fullMutableRunConfig(), copy))
|
||||
deepCopy := copyRunConfig(runConfig)
|
||||
assert.Check(t, is.DeepEqual(fullMutableRunConfig(), deepCopy))
|
||||
|
||||
copy.Cmd[1] = "arg2"
|
||||
copy.Env[1] = "env2=new"
|
||||
copy.ExposedPorts["10002"] = struct{}{}
|
||||
copy.Volumes["three"] = struct{}{}
|
||||
copy.Entrypoint[1] = "arg2"
|
||||
copy.OnBuild[0] = "start"
|
||||
copy.Labels["label3"] = "value3"
|
||||
copy.Shell[0] = "sh"
|
||||
deepCopy.Cmd[1] = "arg2"
|
||||
deepCopy.Env[1] = "env2=new"
|
||||
deepCopy.ExposedPorts["10002"] = struct{}{}
|
||||
deepCopy.Volumes["three"] = struct{}{}
|
||||
deepCopy.Entrypoint[1] = "arg2"
|
||||
deepCopy.OnBuild[0] = "start"
|
||||
deepCopy.Labels["label3"] = "value3"
|
||||
deepCopy.Shell[0] = "sh"
|
||||
assert.Check(t, is.DeepEqual(fullMutableRunConfig(), runConfig))
|
||||
}
|
||||
|
||||
|
||||
@@ -9,7 +9,9 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"slices"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -67,6 +69,14 @@ import (
|
||||
"tags.cncf.io/container-device-interface/pkg/cdi"
|
||||
)
|
||||
|
||||
// strongTLSCiphers defines a secure, modern set of TLS cipher suites for use by the daemon.
|
||||
var strongTLSCiphers = []uint16{
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
||||
}
|
||||
|
||||
// DaemonCli represents the daemon CLI.
|
||||
type DaemonCli struct {
|
||||
*config.Config
|
||||
@@ -779,6 +789,18 @@ func newAPIServerTLSConfig(config *config.Config) (*tls.Config, error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "invalid TLS configuration")
|
||||
}
|
||||
// Optionally enforce strong TLS ciphers via the environment variable DOCKER_DISABLE_WEAK_CIPHERS.
|
||||
// When set to true, weak TLS ciphers are disabled, restricting the daemon to a modern, secure
|
||||
// subset of cipher suites.
|
||||
if disableWeakCiphers := os.Getenv("DOCKER_DISABLE_WEAK_CIPHERS"); disableWeakCiphers != "" {
|
||||
disable, err := strconv.ParseBool(disableWeakCiphers)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "invalid value for DOCKER_DISABLE_WEAK_CIPHERS")
|
||||
}
|
||||
if disable {
|
||||
tlsConfig.CipherSuites = slices.Clone(strongTLSCiphers)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return tlsConfig, nil
|
||||
|
||||
@@ -46,11 +46,11 @@ func (s *Health) Status() string {
|
||||
// obeying the locking semantics.
|
||||
//
|
||||
// Status may be set directly if another lock is used.
|
||||
func (s *Health) SetStatus(new string) {
|
||||
func (s *Health) SetStatus(healthStatus string) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
s.Health.Status = new
|
||||
s.Health.Status = healthStatus
|
||||
}
|
||||
|
||||
// OpenMonitorChannel creates and returns a new monitor channel. If there
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
|
||||
//go:build go1.23
|
||||
|
||||
package container // import "github.com/docker/docker/container"
|
||||
|
||||
import (
|
||||
|
||||
@@ -72,30 +72,11 @@ fetch_blob() {
|
||||
shift
|
||||
local curlArgs=("$@")
|
||||
|
||||
local curlHeaders
|
||||
curlHeaders="$(
|
||||
curl -S "${curlArgs[@]}" \
|
||||
-H "Authorization: Bearer $token" \
|
||||
"$registryBase/v2/$image/blobs/$digest" \
|
||||
-o "$targetFile" \
|
||||
-D-
|
||||
)"
|
||||
curlHeaders="$(echo "$curlHeaders" | tr -d '\r')"
|
||||
if grep -qE "^HTTP/[0-9].[0-9] 3" <<< "$curlHeaders"; then
|
||||
rm -f "$targetFile"
|
||||
|
||||
local blobRedirect
|
||||
blobRedirect="$(echo "$curlHeaders" | awk -F ': ' 'tolower($1) == "location" { print $2; exit }')"
|
||||
if [ -z "$blobRedirect" ]; then
|
||||
echo >&2 "error: failed fetching '$image' blob '$digest'"
|
||||
echo "$curlHeaders" | head -1 >&2
|
||||
return 1
|
||||
fi
|
||||
|
||||
curl -fSL "${curlArgs[@]}" \
|
||||
"$blobRedirect" \
|
||||
-o "$targetFile"
|
||||
fi
|
||||
curl -L -S "${curlArgs[@]}" \
|
||||
-H "Authorization: Bearer $token" \
|
||||
"$registryBase/v2/$image/blobs/$digest" \
|
||||
-o "$targetFile" \
|
||||
-D-
|
||||
}
|
||||
|
||||
# handle 'application/vnd.docker.distribution.manifest.v2+json' manifest
|
||||
|
||||
60
daemon/cluster/convert/pluginadapter.go
Normal file
60
daemon/cluster/convert/pluginadapter.go
Normal file
@@ -0,0 +1,60 @@
|
||||
package convert
|
||||
|
||||
import (
|
||||
"github.com/docker/docker/pkg/plugingetter"
|
||||
"github.com/moby/swarmkit/v2/node/plugin"
|
||||
)
|
||||
|
||||
// SwarmPluginGetter adapts a plugingetter.PluginGetter to a Swarmkit plugin.Getter.
|
||||
func SwarmPluginGetter(pg plugingetter.PluginGetter) plugin.Getter {
|
||||
return pluginGetter{pg}
|
||||
}
|
||||
|
||||
type pluginGetter struct {
|
||||
pg plugingetter.PluginGetter
|
||||
}
|
||||
|
||||
var _ plugin.Getter = (*pluginGetter)(nil)
|
||||
|
||||
type swarmPlugin struct {
|
||||
plugingetter.CompatPlugin
|
||||
}
|
||||
|
||||
func (p swarmPlugin) Client() plugin.Client {
|
||||
return p.CompatPlugin.Client()
|
||||
}
|
||||
|
||||
type addrPlugin struct {
|
||||
plugingetter.CompatPlugin
|
||||
plugingetter.PluginAddr
|
||||
}
|
||||
|
||||
var _ plugin.AddrPlugin = (*addrPlugin)(nil)
|
||||
|
||||
func (p addrPlugin) Client() plugin.Client {
|
||||
return p.CompatPlugin.Client()
|
||||
}
|
||||
|
||||
func adaptPluginForSwarm(p plugingetter.CompatPlugin) plugin.Plugin {
|
||||
if pa, ok := p.(plugingetter.PluginAddr); ok {
|
||||
return addrPlugin{p, pa}
|
||||
}
|
||||
return swarmPlugin{p}
|
||||
}
|
||||
|
||||
func (g pluginGetter) Get(name string, capability string) (plugin.Plugin, error) {
|
||||
p, err := g.pg.Get(name, capability, plugingetter.Lookup)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return adaptPluginForSwarm(p), nil
|
||||
}
|
||||
|
||||
func (g pluginGetter) GetAllManagedPluginsByCap(capability string) []plugin.Plugin {
|
||||
pp := g.pg.GetAllManagedPluginsByCap(capability)
|
||||
ret := make([]plugin.Plugin, len(pp))
|
||||
for i, p := range pp {
|
||||
ret[i] = adaptPluginForSwarm(p)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
@@ -52,7 +52,7 @@ func NewExecutor(b executorpkg.Backend, p plugin.Backend, i executorpkg.ImageBac
|
||||
pluginBackend: p,
|
||||
imageBackend: i,
|
||||
volumeBackend: v,
|
||||
dependencies: agent.NewDependencyManager(b.PluginGetter()),
|
||||
dependencies: agent.NewDependencyManager(convert.SwarmPluginGetter(b.PluginGetter())),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -214,36 +214,35 @@ func (e *executor) Configure(ctx context.Context, node *api.Node) error {
|
||||
|
||||
if ingressNA == nil {
|
||||
e.backend.ReleaseIngress()
|
||||
return e.backend.GetAttachmentStore().ResetAttachments(attachments)
|
||||
}
|
||||
|
||||
options := types.NetworkCreate{
|
||||
Driver: ingressNA.Network.DriverState.Name,
|
||||
IPAM: &network.IPAM{
|
||||
Driver: ingressNA.Network.IPAM.Driver.Name,
|
||||
},
|
||||
Options: ingressNA.Network.DriverState.Options,
|
||||
Ingress: true,
|
||||
}
|
||||
|
||||
for _, ic := range ingressNA.Network.IPAM.Configs {
|
||||
c := network.IPAMConfig{
|
||||
Subnet: ic.Subnet,
|
||||
IPRange: ic.Range,
|
||||
Gateway: ic.Gateway,
|
||||
} else {
|
||||
options := types.NetworkCreate{
|
||||
Driver: ingressNA.Network.DriverState.Name,
|
||||
IPAM: &network.IPAM{
|
||||
Driver: ingressNA.Network.IPAM.Driver.Name,
|
||||
},
|
||||
Options: ingressNA.Network.DriverState.Options,
|
||||
Ingress: true,
|
||||
}
|
||||
options.IPAM.Config = append(options.IPAM.Config, c)
|
||||
}
|
||||
|
||||
_, err := e.backend.SetupIngress(clustertypes.NetworkCreateRequest{
|
||||
ID: ingressNA.Network.ID,
|
||||
NetworkCreateRequest: types.NetworkCreateRequest{
|
||||
Name: ingressNA.Network.Spec.Annotations.Name,
|
||||
NetworkCreate: options,
|
||||
},
|
||||
}, ingressNA.Addresses[0])
|
||||
if err != nil {
|
||||
return err
|
||||
for _, ic := range ingressNA.Network.IPAM.Configs {
|
||||
c := network.IPAMConfig{
|
||||
Subnet: ic.Subnet,
|
||||
IPRange: ic.Range,
|
||||
Gateway: ic.Gateway,
|
||||
}
|
||||
options.IPAM.Config = append(options.IPAM.Config, c)
|
||||
}
|
||||
|
||||
_, err := e.backend.SetupIngress(clustertypes.NetworkCreateRequest{
|
||||
ID: ingressNA.Network.ID,
|
||||
NetworkCreateRequest: types.NetworkCreateRequest{
|
||||
Name: ingressNA.Network.Spec.Annotations.Name,
|
||||
NetworkCreate: options,
|
||||
},
|
||||
}, ingressNA.Addresses[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
|
||||
@@ -10,10 +10,12 @@ import (
|
||||
|
||||
"github.com/containerd/log"
|
||||
types "github.com/docker/docker/api/types/swarm"
|
||||
"github.com/docker/docker/daemon/cluster/convert"
|
||||
"github.com/docker/docker/daemon/cluster/executor/container"
|
||||
lncluster "github.com/docker/docker/libnetwork/cluster"
|
||||
"github.com/docker/docker/libnetwork/cnmallocator"
|
||||
swarmapi "github.com/moby/swarmkit/v2/api"
|
||||
swarmallocator "github.com/moby/swarmkit/v2/manager/allocator/cnmallocator"
|
||||
"github.com/moby/swarmkit/v2/manager/allocator/networkallocator"
|
||||
swarmnode "github.com/moby/swarmkit/v2/node"
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/grpc"
|
||||
@@ -123,7 +125,7 @@ func (n *nodeRunner) start(conf nodeStartConfig) error {
|
||||
ListenControlAPI: control,
|
||||
ListenRemoteAPI: conf.ListenAddr,
|
||||
AdvertiseRemoteAPI: conf.AdvertiseAddr,
|
||||
NetworkConfig: &swarmallocator.NetworkConfig{
|
||||
NetworkConfig: &networkallocator.Config{
|
||||
DefaultAddrPool: conf.DefaultAddressPool,
|
||||
SubnetSize: conf.SubnetSize,
|
||||
VXLANUDPPort: conf.DataPathPort,
|
||||
@@ -144,7 +146,8 @@ func (n *nodeRunner) start(conf nodeStartConfig) error {
|
||||
ElectionTick: n.cluster.config.RaftElectionTick,
|
||||
UnlockKey: conf.lockKey,
|
||||
AutoLockManagers: conf.autolock,
|
||||
PluginGetter: n.cluster.config.Backend.PluginGetter(),
|
||||
PluginGetter: convert.SwarmPluginGetter(n.cluster.config.Backend.PluginGetter()),
|
||||
NetworkProvider: cnmallocator.NewProvider(n.cluster.config.Backend.PluginGetter()),
|
||||
}
|
||||
if conf.availability != "" {
|
||||
avail, ok := swarmapi.NodeSpec_Availability_value[strings.ToUpper(string(conf.availability))]
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
|
||||
//go:build go1.19
|
||||
//go:build go1.23
|
||||
|
||||
package daemon // import "github.com/docker/docker/daemon"
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
|
||||
//go:build go1.19
|
||||
//go:build go1.23
|
||||
|
||||
// Package daemon exposes the functions that occur on the host server
|
||||
// that the Docker daemon is running.
|
||||
@@ -373,6 +373,16 @@ func (daemon *Daemon) restore(cfg *configStore) error {
|
||||
Type: local.Name,
|
||||
}
|
||||
}
|
||||
|
||||
// The logger option 'fluentd-async-connect' has been
|
||||
// deprecated in v20.10 in favor of 'fluentd-async', and
|
||||
// removed in v28.0.
|
||||
if v, ok := c.HostConfig.LogConfig.Config["fluentd-async-connect"]; ok {
|
||||
if _, ok := c.HostConfig.LogConfig.Config["fluentd-async"]; !ok {
|
||||
c.HostConfig.LogConfig.Config["fluentd-async"] = v
|
||||
}
|
||||
delete(c.HostConfig.LogConfig.Config, "fluentd-async-connect")
|
||||
}
|
||||
}
|
||||
|
||||
if err := daemon.checkpointAndSave(c); err != nil {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
|
||||
//go:build go1.19
|
||||
//go:build go1.23
|
||||
|
||||
package daemon // import "github.com/docker/docker/daemon"
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
|
||||
//go:build go1.19
|
||||
//go:build go1.23
|
||||
|
||||
package daemon // import "github.com/docker/docker/daemon"
|
||||
|
||||
|
||||
@@ -224,17 +224,17 @@ func (j *Journal) Data() (map[string]string, error) {
|
||||
j.restartData()
|
||||
for {
|
||||
var (
|
||||
data unsafe.Pointer
|
||||
len C.size_t
|
||||
data unsafe.Pointer
|
||||
length C.size_t
|
||||
)
|
||||
rc := C.sd_journal_enumerate_data(j.j, &data, &len)
|
||||
rc := C.sd_journal_enumerate_data(j.j, &data, &length)
|
||||
if rc == 0 {
|
||||
return m, nil
|
||||
} else if rc < 0 {
|
||||
return m, fmt.Errorf("journald: error enumerating entry data: %w", syscall.Errno(-rc))
|
||||
}
|
||||
|
||||
k, v, _ := strings.Cut(C.GoStringN((*C.char)(data), C.int(len)), "=")
|
||||
k, v, _ := strings.Cut(C.GoStringN((*C.char)(data), C.int(length)), "=")
|
||||
m[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
@@ -102,10 +102,10 @@ func New(info logger.Info) (logger.Logger, error) {
|
||||
return nil, fmt.Errorf("journald is not enabled on this host")
|
||||
}
|
||||
|
||||
return new(info)
|
||||
return newJournald(info)
|
||||
}
|
||||
|
||||
func new(info logger.Info) (*journald, error) {
|
||||
func newJournald(info logger.Info) (*journald, error) {
|
||||
// parse log tag
|
||||
tag, err := loggerutils.ParseLogTag(info, loggerutils.DefaultTemplate)
|
||||
if err != nil {
|
||||
|
||||
@@ -24,7 +24,7 @@ func TestLogRead(t *testing.T) {
|
||||
// LogReader needs to filter out.
|
||||
rotatedJournal := fake.NewT(t, journalDir+"/rotated.journal")
|
||||
rotatedJournal.AssignEventTimestampFromSyslogTimestamp = true
|
||||
l, err := new(logger.Info{
|
||||
l, err := newJournald(logger.Info{
|
||||
ContainerID: "wrongone0001",
|
||||
ContainerName: "fake",
|
||||
})
|
||||
@@ -36,7 +36,7 @@ func TestLogRead(t *testing.T) {
|
||||
|
||||
activeJournal := fake.NewT(t, journalDir+"/fake.journal")
|
||||
activeJournal.AssignEventTimestampFromSyslogTimestamp = true
|
||||
l, err = new(logger.Info{
|
||||
l, err = newJournald(logger.Info{
|
||||
ContainerID: "wrongone0002",
|
||||
ContainerName: "fake",
|
||||
})
|
||||
@@ -47,7 +47,7 @@ func TestLogRead(t *testing.T) {
|
||||
assert.NilError(t, rotatedJournal.Send("a log message from a totally different process in the active journal", journal.PriInfo, nil))
|
||||
|
||||
return func(t *testing.T) logger.Logger {
|
||||
l, err := new(info)
|
||||
l, err := newJournald(info)
|
||||
assert.NilError(t, err)
|
||||
l.journalReadDir = journalDir
|
||||
sl := &syncLogger{journald: l, waiters: map[uint64]chan<- struct{}{}}
|
||||
|
||||
@@ -510,12 +510,12 @@ func logMessages(t *testing.T, l logger.Logger, messages []*logger.Message) []*l
|
||||
// existing behavior of the json-file log driver.
|
||||
func transformToExpected(m *logger.Message) *logger.Message {
|
||||
// Copy the log message again so as not to mutate the input.
|
||||
copy := copyLogMessage(m)
|
||||
logMessageCopy := copyLogMessage(m)
|
||||
if m.PLogMetaData == nil || m.PLogMetaData.Last {
|
||||
copy.Line = append(copy.Line, '\n')
|
||||
logMessageCopy.Line = append(logMessageCopy.Line, '\n')
|
||||
}
|
||||
|
||||
return copy
|
||||
return logMessageCopy
|
||||
}
|
||||
|
||||
func copyLogMessage(src *logger.Message) *logger.Message {
|
||||
|
||||
@@ -22,7 +22,7 @@ const extName = "LogDriver"
|
||||
type logPlugin interface {
|
||||
StartLogging(streamPath string, info Info) (err error)
|
||||
StopLogging(streamPath string) (err error)
|
||||
Capabilities() (cap Capability, err error)
|
||||
Capabilities() (capability Capability, err error)
|
||||
ReadLogs(info Info, config ReadConfig) (stream io.ReadCloser, err error)
|
||||
}
|
||||
|
||||
@@ -90,9 +90,9 @@ func makePluginCreator(name string, l logPlugin, scopePath func(s string) string
|
||||
logInfo: logCtx,
|
||||
}
|
||||
|
||||
cap, err := a.plugin.Capabilities()
|
||||
caps, err := a.plugin.Capabilities()
|
||||
if err == nil {
|
||||
a.capabilities = cap
|
||||
a.capabilities = caps
|
||||
}
|
||||
|
||||
stream, err := openPluginStream(a)
|
||||
@@ -107,7 +107,7 @@ func makePluginCreator(name string, l logPlugin, scopePath func(s string) string
|
||||
return nil, errors.Wrapf(err, "error creating logger")
|
||||
}
|
||||
|
||||
if cap.ReadLogs {
|
||||
if caps.ReadLogs {
|
||||
return &pluginAdapterWithRead{a}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -80,13 +80,11 @@ func (pp *logPluginProxy) Capabilities() (cap Capability, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
cap = ret.Cap
|
||||
|
||||
if ret.Err != "" {
|
||||
err = errors.New(ret.Err)
|
||||
}
|
||||
|
||||
return
|
||||
return ret.Cap, err
|
||||
}
|
||||
|
||||
type logPluginProxyReadLogsRequest struct {
|
||||
|
||||
@@ -641,7 +641,7 @@ func getMaxMountAndExistenceCheckAttempts(layer PushLayer) (maxMountAttempts, ma
|
||||
func getRepositoryMountCandidates(
|
||||
repoInfo reference.Named,
|
||||
hmacKey []byte,
|
||||
max int,
|
||||
maxCandidates int,
|
||||
v2Metadata []metadata.V2Metadata,
|
||||
) []metadata.V2Metadata {
|
||||
candidates := []metadata.V2Metadata{}
|
||||
@@ -658,9 +658,9 @@ func getRepositoryMountCandidates(
|
||||
}
|
||||
|
||||
sortV2MetadataByLikenessAndAge(repoInfo, hmacKey, candidates)
|
||||
if max >= 0 && len(candidates) > max {
|
||||
if maxCandidates >= 0 && len(candidates) > maxCandidates {
|
||||
// select the youngest metadata
|
||||
candidates = candidates[:max]
|
||||
candidates = candidates[:maxCandidates]
|
||||
}
|
||||
|
||||
return candidates
|
||||
|
||||
@@ -52,9 +52,9 @@ type DownloadOption func(*LayerDownloadManager)
|
||||
|
||||
// WithMaxDownloadAttempts configures the maximum number of download
|
||||
// attempts for a download manager.
|
||||
func WithMaxDownloadAttempts(max int) DownloadOption {
|
||||
func WithMaxDownloadAttempts(maxDownloadAttempts int) DownloadOption {
|
||||
return func(dlm *LayerDownloadManager) {
|
||||
dlm.maxDownloadAttempts = max
|
||||
dlm.maxDownloadAttempts = maxDownloadAttempts
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -172,11 +172,16 @@ variable "SYSTEMD" {
|
||||
default = "false"
|
||||
}
|
||||
|
||||
variable "FIREWALLD" {
|
||||
default = "false"
|
||||
}
|
||||
|
||||
target "dev" {
|
||||
inherits = ["_common"]
|
||||
target = "dev"
|
||||
args = {
|
||||
SYSTEMD = SYSTEMD
|
||||
FIREWALLD = FIREWALLD
|
||||
}
|
||||
tags = ["docker-dev"]
|
||||
output = ["type=docker"]
|
||||
|
||||
@@ -56,12 +56,27 @@ if [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security; then
|
||||
}
|
||||
fi
|
||||
|
||||
# Allow connections coming from the host (through eth0). This is needed to
|
||||
# access the daemon port (independently of which port is used), or run a
|
||||
# 'remote' Delve session, etc...
|
||||
if [ "${FIREWALLD:-}" = "true" ]; then
|
||||
cat > /etc/firewalld/zones/trusted.xml << EOF
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<zone target="ACCEPT">
|
||||
<short>Trusted</short>
|
||||
<description>All network connections are accepted.</description>
|
||||
<interface name="eth0"/>
|
||||
<forward/>
|
||||
</zone>
|
||||
EOF
|
||||
fi
|
||||
|
||||
env > /etc/docker-entrypoint-env
|
||||
|
||||
cat > /etc/systemd/system/docker-entrypoint.target << EOF
|
||||
[Unit]
|
||||
Description=the target for docker-entrypoint.service
|
||||
Requires=docker-entrypoint.service systemd-logind.service systemd-user-sessions.service
|
||||
Requires=docker-entrypoint.service systemd-logind.service systemd-user-sessions.service $([ "${FIREWALLD:-}" = "true" ] && echo firewalld.service)
|
||||
EOF
|
||||
|
||||
quoted_args="$(printf " %q" "${@}")"
|
||||
|
||||
@@ -15,7 +15,7 @@ set -e
|
||||
# the binary version you may also need to update the vendor version to pick up
|
||||
# bug fixes or new APIs, however, usually the Go packages are built from a
|
||||
# commit from the master branch.
|
||||
: "${CONTAINERD_VERSION:=v1.7.25}"
|
||||
: "${CONTAINERD_VERSION:=v1.7.27}"
|
||||
|
||||
install_containerd() (
|
||||
echo "Install containerd version $CONTAINERD_VERSION"
|
||||
|
||||
@@ -9,7 +9,7 @@ set -e
|
||||
# the containerd project first, and update both after that is merged.
|
||||
#
|
||||
# When updating RUNC_VERSION, consider updating runc in vendor.mod accordingly
|
||||
: "${RUNC_VERSION:=v1.2.4}"
|
||||
: "${RUNC_VERSION:=v1.2.5}"
|
||||
|
||||
install_runc() {
|
||||
RUNC_BUILDTAGS="${RUNC_BUILDTAGS:-"seccomp"}"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
ARG GO_VERSION=1.22.10
|
||||
ARG GO_VERSION=1.23.9
|
||||
ARG BASE_DEBIAN_DISTRO="bookworm"
|
||||
ARG PROTOC_VERSION=3.11.4
|
||||
|
||||
|
||||
@@ -327,10 +327,26 @@ Function Run-UnitTests() {
|
||||
$pkgList = $pkgList | Select-String -NotMatch "github.com/docker/docker/integration"
|
||||
$pkgList = $pkgList -replace "`r`n", " "
|
||||
|
||||
$jsonFilePath = $bundlesDir + "\go-test-report-unit-flaky-tests.json"
|
||||
$xmlFilePath = $bundlesDir + "\junit-report-unit-flaky-tests.xml"
|
||||
$coverageFilePath = $bundlesDir + "\coverage-report-unit-flaky-tests.txt"
|
||||
$goTestArg = "--rerun-fails=4 --format=standard-verbose --jsonfile=$jsonFilePath --junitfile=$xmlFilePath """ + "--packages=$pkgList" + """ -- " + $raceParm + " -coverprofile=$coverageFilePath -covermode=atomic -ldflags -w -a -test.timeout=10m -test.run=TestFlaky.*"
|
||||
Write-Host "INFO: Invoking unit tests run with $GOTESTSUM_LOCATION\gotestsum.exe $goTestArg"
|
||||
$pinfo = New-Object System.Diagnostics.ProcessStartInfo
|
||||
$pinfo.FileName = "$GOTESTSUM_LOCATION\gotestsum.exe"
|
||||
$pinfo.WorkingDirectory = "$($PWD.Path)"
|
||||
$pinfo.UseShellExecute = $false
|
||||
$pinfo.Arguments = $goTestArg
|
||||
$p = New-Object System.Diagnostics.Process
|
||||
$p.StartInfo = $pinfo
|
||||
$p.Start() | Out-Null
|
||||
$p.WaitForExit()
|
||||
if ($p.ExitCode -ne 0) { Throw "Unit tests (flaky) failed" }
|
||||
|
||||
$jsonFilePath = $bundlesDir + "\go-test-report-unit-tests.json"
|
||||
$xmlFilePath = $bundlesDir + "\junit-report-unit-tests.xml"
|
||||
$coverageFilePath = $bundlesDir + "\coverage-report-unit-tests.txt"
|
||||
$goTestArg = "--format=standard-verbose --jsonfile=$jsonFilePath --junitfile=$xmlFilePath -- " + $raceParm + " -coverprofile=$coverageFilePath -covermode=atomic -ldflags -w -a """ + "-test.timeout=10m" + """ $pkgList"
|
||||
$goTestArg = "--format=standard-verbose --jsonfile=$jsonFilePath --junitfile=$xmlFilePath -- " + $raceParm + " -coverprofile=$coverageFilePath -covermode=atomic -ldflags -w -a -test.timeout=10m -test.skip=TestFlaky.*" + " $pkgList"
|
||||
Write-Host "INFO: Invoking unit tests run with $GOTESTSUM_LOCATION\gotestsum.exe $goTestArg"
|
||||
$pinfo = New-Object System.Diagnostics.ProcessStartInfo
|
||||
$pinfo.FileName = "$GOTESTSUM_LOCATION\gotestsum.exe"
|
||||
|
||||
@@ -72,12 +72,6 @@ if [ "$DOCKER_EXPERIMENTAL" ]; then
|
||||
fi
|
||||
|
||||
dockerd="dockerd"
|
||||
if [ -f "/sys/fs/cgroup/cgroup.controllers" ]; then
|
||||
if [ -z "$TEST_IGNORE_CGROUP_CHECK" ] && [ -z "$TEST_SKIP_INTEGRATION_CLI" ]; then
|
||||
echo >&2 '# cgroup v2 requires TEST_SKIP_INTEGRATION_CLI to be set'
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -n "$DOCKER_ROOTLESS" ]; then
|
||||
if [ -z "$TEST_SKIP_INTEGRATION_CLI" ]; then
|
||||
|
||||
@@ -38,15 +38,38 @@ if [ -n "${base_pkg_list}" ]; then
|
||||
${base_pkg_list}
|
||||
fi
|
||||
if [ -n "${libnetwork_pkg_list}" ]; then
|
||||
rerun_flaky=1
|
||||
|
||||
gotest_extra_flags="-skip=TestFlaky.*"
|
||||
# Custom -run passed, don't run flaky tests separately.
|
||||
if echo "$TESTFLAGS" | grep -Eq '(-run|-test.run)[= ]'; then
|
||||
rerun_flaky=0
|
||||
gotest_extra_flags=""
|
||||
fi
|
||||
|
||||
# libnetwork tests invoke iptables, and cannot be run in parallel. Execute
|
||||
# tests within /libnetwork with '-p=1' to run them sequentially. See
|
||||
# https://github.com/moby/moby/issues/42458#issuecomment-873216754 for details.
|
||||
gotestsum --format=standard-quiet --jsonfile=bundles/go-test-report-libnetwork.json --junitfile=bundles/junit-report-libnetwork.xml -- \
|
||||
"${BUILDFLAGS[@]}" \
|
||||
gotestsum --format=standard-quiet --jsonfile=bundles/go-test-report-libnetwork.json --junitfile=bundles/junit-report-libnetwork.xml \
|
||||
-- "${BUILDFLAGS[@]}" \
|
||||
-cover \
|
||||
-coverprofile=bundles/coverage-libnetwork.out \
|
||||
-covermode=atomic \
|
||||
-p=1 \
|
||||
${gotest_extra_flags} \
|
||||
${TESTFLAGS} \
|
||||
${libnetwork_pkg_list}
|
||||
|
||||
if [ $rerun_flaky -eq 1 ]; then
|
||||
gotestsum --format=standard-quiet --jsonfile=bundles/go-test-report-libnetwork-flaky.json --junitfile=bundles/junit-report-libnetwork-flaky.xml \
|
||||
--packages "${libnetwork_pkg_list}" \
|
||||
--rerun-fails=4 \
|
||||
-- "${BUILDFLAGS[@]}" \
|
||||
-cover \
|
||||
-coverprofile=bundles/coverage-libnetwork-flaky.out \
|
||||
-covermode=atomic \
|
||||
-p=1 \
|
||||
-test.run 'TestFlaky.*' \
|
||||
${TESTFLAGS}
|
||||
fi
|
||||
fi
|
||||
|
||||
@@ -5,27 +5,30 @@ set -e
|
||||
SCRIPTDIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
source "${SCRIPTDIR}/.validate"
|
||||
|
||||
tidy_files=('vendor.mod' 'vendor.sum')
|
||||
modules_files=('man/go.mod' 'vendor.mod')
|
||||
tidy_files=("${modules_files[@]}" 'man/go.sum' 'vendor.sum')
|
||||
vendor_files=("${tidy_files[@]}" 'vendor/')
|
||||
|
||||
validate_vendor_tidy() {
|
||||
validate_tidy_modules() {
|
||||
# check that all go.mod files exist in HEAD; go.sum files are generated by 'go mod tidy'
|
||||
# so we don't need to check for their existence beforehand
|
||||
for f in "${modules_files[@]}"; do
|
||||
if [ ! -f "$f" ]; then
|
||||
echo >&2 "ERROR: missing $f"
|
||||
return 1
|
||||
fi
|
||||
done
|
||||
# run mod tidy
|
||||
./hack/vendor.sh tidy
|
||||
# check if any files have changed
|
||||
git diff --quiet HEAD -- "${tidy_files[@]}"
|
||||
git diff --quiet HEAD -- "${tidy_files[@]}" && [ -z "$(git ls-files --others --exclude-standard)" ]
|
||||
}
|
||||
|
||||
validate_vendor_diff() {
|
||||
mapfile -t changed_files < <(validate_diff --diff-filter=ACMR --name-only -- "${vendor_files[@]}")
|
||||
|
||||
if [ -n "${TEST_FORCE_VALIDATE:-}" ] || [ "${#changed_files[@]}" -gt 0 ]; then
|
||||
# recreate vendor/
|
||||
./hack/vendor.sh vendor
|
||||
# check if any files have changed
|
||||
git diff --quiet HEAD -- "${vendor_files[@]}"
|
||||
else
|
||||
echo >&2 'INFO: no vendor changes in diff; skipping vendor check.'
|
||||
fi
|
||||
# recreate vendor/
|
||||
./hack/vendor.sh vendor
|
||||
# check if any files have changed
|
||||
git diff --quiet HEAD -- "${vendor_files[@]}" && [ -z "$(git ls-files --others --exclude-standard)" ]
|
||||
}
|
||||
|
||||
validate_vendor_license() {
|
||||
@@ -37,16 +40,22 @@ validate_vendor_license() {
|
||||
done < <(awk '/^# /{ print $2 }' vendor/modules.txt)
|
||||
}
|
||||
|
||||
if validate_vendor_tidy && validate_vendor_diff && validate_vendor_license; then
|
||||
if validate_tidy_modules && validate_vendor_diff && validate_vendor_license; then
|
||||
echo >&2 'PASS: Vendoring has been performed correctly!'
|
||||
else
|
||||
{
|
||||
echo 'FAIL: Vendoring was not performed correctly!'
|
||||
echo
|
||||
echo 'The following files changed during re-vendor:'
|
||||
echo
|
||||
git diff --name-status HEAD -- "${vendor_files[@]}"
|
||||
echo
|
||||
if [ -n "$(git ls-files --others --exclude-standard)" ]; then
|
||||
echo 'The following files are missing:'
|
||||
git ls-files --others --exclude-standard
|
||||
echo
|
||||
fi
|
||||
if [ -n "$(git diff --name-status HEAD -- "${vendor_files[@]}")" ]; then
|
||||
echo 'The following files changed during re-vendor:'
|
||||
git diff --name-status HEAD -- "${vendor_files[@]}"
|
||||
echo
|
||||
fi
|
||||
echo 'Please revendor with hack/vendor.sh'
|
||||
echo
|
||||
git diff --diff-filter=M -- "${vendor_files[@]}"
|
||||
|
||||
@@ -7,15 +7,32 @@
|
||||
set -e
|
||||
|
||||
SCRIPTDIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_DIR="$(cd "$SCRIPTDIR/.." && pwd)"
|
||||
|
||||
tidy() (
|
||||
(
|
||||
set -x
|
||||
"${SCRIPTDIR}"/with-go-mod.sh go mod tidy -modfile vendor.mod -compat 1.18
|
||||
)
|
||||
|
||||
(
|
||||
set -x
|
||||
cd man
|
||||
go mod tidy
|
||||
)
|
||||
)
|
||||
|
||||
vendor() (
|
||||
(
|
||||
set -x
|
||||
"${SCRIPTDIR}"/with-go-mod.sh go mod vendor -modfile vendor.mod
|
||||
)
|
||||
|
||||
(
|
||||
set -x
|
||||
cd man
|
||||
go mod vendor
|
||||
)
|
||||
)
|
||||
|
||||
help() {
|
||||
|
||||
@@ -25,7 +25,7 @@ else
|
||||
tee "${ROOTDIR}/go.mod" >&2 <<- EOF
|
||||
module github.com/docker/docker
|
||||
|
||||
go 1.20
|
||||
go 1.23.0
|
||||
EOF
|
||||
trap 'rm -f "${ROOTDIR}/go.mod"' EXIT
|
||||
fi
|
||||
|
||||
@@ -27,6 +27,7 @@ var expectedNetworkInterfaceStats = strings.Split("rx_bytes rx_dropped rx_errors
|
||||
|
||||
func (s *DockerAPISuite) TestAPIStatsNoStreamGetCpu(c *testing.T) {
|
||||
skip.If(c, RuntimeIsWindowsContainerd(), "FIXME: Broken on Windows + containerd combination")
|
||||
skip.If(c, onlyCgroupsv2(), "FIXME: cgroupsV2 not supported yet")
|
||||
out := cli.DockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "while true;usleep 100; do echo 'Hello'; done").Stdout()
|
||||
id := strings.TrimSpace(out)
|
||||
cli.WaitRun(c, id)
|
||||
|
||||
@@ -3959,6 +3959,7 @@ func (s *DockerCLIBuildSuite) TestBuildEmptyStringVolume(c *testing.T) {
|
||||
|
||||
func (s *DockerCLIBuildSuite) TestBuildContainerWithCgroupParent(c *testing.T) {
|
||||
testRequires(c, testEnv.IsLocalDaemon, DaemonIsLinux)
|
||||
skip.If(c, onlyCgroupsv2(), "FIXME: cgroupsV2 not supported yet")
|
||||
|
||||
cgroupParent := "test"
|
||||
data, err := os.ReadFile("/proc/self/cgroup")
|
||||
|
||||
@@ -1739,6 +1739,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartContainerLinksRestart(c *testing.T)
|
||||
|
||||
func (s *DockerDaemonSuite) TestDaemonCgroupParent(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux)
|
||||
skip.If(c, onlyCgroupsv2(), "FIXME: cgroupsV2 not supported yet")
|
||||
|
||||
cgroupParent := "test"
|
||||
name := "cgroup-test"
|
||||
|
||||
@@ -3204,6 +3204,7 @@ func (s *DockerCLIRunSuite) TestRunWithUlimits(c *testing.T) {
|
||||
func (s *DockerCLIRunSuite) TestRunContainerWithCgroupParent(c *testing.T) {
|
||||
// Not applicable on Windows as uses Unix specific functionality
|
||||
testRequires(c, DaemonIsLinux)
|
||||
skip.If(c, onlyCgroupsv2(), "FIXME: cgroupsV2 not supported yet")
|
||||
|
||||
// cgroup-parent relative path
|
||||
testRunContainerWithCgroupParent(c, "test", "cgroup-test")
|
||||
@@ -3239,6 +3240,7 @@ func testRunContainerWithCgroupParent(c *testing.T, cgroupParent, name string) {
|
||||
func (s *DockerCLIRunSuite) TestRunInvalidCgroupParent(c *testing.T) {
|
||||
// Not applicable on Windows as uses Unix specific functionality
|
||||
testRequires(c, DaemonIsLinux)
|
||||
skip.If(c, onlyCgroupsv2(), "FIXME: cgroupsV2 not supported yet")
|
||||
|
||||
testRunInvalidCgroupParent(c, "../../../../../../../../SHOULD_NOT_EXIST", "SHOULD_NOT_EXIST", "cgroup-invalid-test")
|
||||
|
||||
@@ -3279,6 +3281,7 @@ func (s *DockerCLIRunSuite) TestRunContainerWithCgroupMountRO(c *testing.T) {
|
||||
// Not applicable on Windows as uses Unix specific functionality
|
||||
// --read-only + userns has remount issues
|
||||
testRequires(c, DaemonIsLinux, NotUserNamespace)
|
||||
skip.If(c, onlyCgroupsv2(), "FIXME: cgroupsV2 not supported yet")
|
||||
|
||||
filename := "/sys/fs/cgroup/devices/test123"
|
||||
out, _, err := dockerCmdWithError("run", "busybox", "touch", filename)
|
||||
@@ -4428,6 +4431,7 @@ func (s *DockerCLIRunSuite) TestRunHostnameInHostMode(c *testing.T) {
|
||||
|
||||
func (s *DockerCLIRunSuite) TestRunAddDeviceCgroupRule(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux)
|
||||
skip.If(c, onlyCgroupsv2(), "FIXME: cgroupsV2 not supported yet")
|
||||
|
||||
const deviceRule = "c 7:128 rwm"
|
||||
|
||||
|
||||
@@ -26,6 +26,7 @@ import (
|
||||
"github.com/moby/sys/mount"
|
||||
"gotest.tools/v3/assert"
|
||||
"gotest.tools/v3/icmd"
|
||||
"gotest.tools/v3/skip"
|
||||
)
|
||||
|
||||
// #6509
|
||||
@@ -450,6 +451,7 @@ func (s *DockerCLIRunSuite) TestRunAttachInvalidDetachKeySequencePreserved(c *te
|
||||
// "test" should be printed
|
||||
func (s *DockerCLIRunSuite) TestRunWithCPUQuota(c *testing.T) {
|
||||
testRequires(c, cpuCfsQuota)
|
||||
skip.If(c, onlyCgroupsv2(), "FIXME: cgroupsV2 not supported yet")
|
||||
|
||||
const file = "/sys/fs/cgroup/cpu/cpu.cfs_quota_us"
|
||||
out := cli.DockerCmd(c, "run", "--cpu-quota", "8000", "--name", "test", "busybox", "cat", file).Combined()
|
||||
@@ -461,6 +463,7 @@ func (s *DockerCLIRunSuite) TestRunWithCPUQuota(c *testing.T) {
|
||||
|
||||
func (s *DockerCLIRunSuite) TestRunWithCpuPeriod(c *testing.T) {
|
||||
testRequires(c, cpuCfsPeriod)
|
||||
skip.If(c, onlyCgroupsv2(), "FIXME: cgroupsV2 not supported yet")
|
||||
|
||||
const file = "/sys/fs/cgroup/cpu/cpu.cfs_period_us"
|
||||
out := cli.DockerCmd(c, "run", "--cpu-period", "50000", "--name", "test", "busybox", "cat", file).Combined()
|
||||
@@ -491,6 +494,7 @@ func (s *DockerCLIRunSuite) TestRunWithInvalidCpuPeriod(c *testing.T) {
|
||||
|
||||
func (s *DockerCLIRunSuite) TestRunWithCPUShares(c *testing.T) {
|
||||
testRequires(c, cpuShare)
|
||||
skip.If(c, onlyCgroupsv2(), "FIXME: cgroupsV2 not supported yet")
|
||||
|
||||
const file = "/sys/fs/cgroup/cpu/cpu.shares"
|
||||
out := cli.DockerCmd(c, "run", "--cpu-shares", "1000", "--name", "test", "busybox", "cat", file).Combined()
|
||||
@@ -511,6 +515,7 @@ func (s *DockerCLIRunSuite) TestRunEchoStdoutWithCPUSharesAndMemoryLimit(c *test
|
||||
|
||||
func (s *DockerCLIRunSuite) TestRunWithCpusetCpus(c *testing.T) {
|
||||
testRequires(c, cgroupCpuset)
|
||||
skip.If(c, onlyCgroupsv2(), "FIXME: cgroupsV2 not supported yet")
|
||||
|
||||
const file = "/sys/fs/cgroup/cpuset/cpuset.cpus"
|
||||
out := cli.DockerCmd(c, "run", "--cpuset-cpus", "0", "--name", "test", "busybox", "cat", file).Combined()
|
||||
@@ -522,6 +527,7 @@ func (s *DockerCLIRunSuite) TestRunWithCpusetCpus(c *testing.T) {
|
||||
|
||||
func (s *DockerCLIRunSuite) TestRunWithCpusetMems(c *testing.T) {
|
||||
testRequires(c, cgroupCpuset)
|
||||
skip.If(c, onlyCgroupsv2(), "FIXME: cgroupsV2 not supported yet")
|
||||
|
||||
const file = "/sys/fs/cgroup/cpuset/cpuset.mems"
|
||||
out := cli.DockerCmd(c, "run", "--cpuset-mems", "0", "--name", "test", "busybox", "cat", file).Combined()
|
||||
@@ -533,6 +539,7 @@ func (s *DockerCLIRunSuite) TestRunWithCpusetMems(c *testing.T) {
|
||||
|
||||
func (s *DockerCLIRunSuite) TestRunWithBlkioWeight(c *testing.T) {
|
||||
testRequires(c, blkioWeight)
|
||||
skip.If(c, onlyCgroupsv2(), "FIXME: cgroupsV2 not supported yet")
|
||||
|
||||
const file = "/sys/fs/cgroup/blkio/blkio.weight"
|
||||
out := cli.DockerCmd(c, "run", "--blkio-weight", "300", "--name", "test", "busybox", "cat", file).Combined()
|
||||
@@ -544,6 +551,7 @@ func (s *DockerCLIRunSuite) TestRunWithBlkioWeight(c *testing.T) {
|
||||
|
||||
func (s *DockerCLIRunSuite) TestRunWithInvalidBlkioWeight(c *testing.T) {
|
||||
testRequires(c, blkioWeight)
|
||||
skip.If(c, onlyCgroupsv2(), "FIXME: cgroupsV2 not supported yet")
|
||||
out, _, err := dockerCmdWithError("run", "--blkio-weight", "5", "busybox", "true")
|
||||
assert.ErrorContains(c, err, "", out)
|
||||
expected := "Range of blkio weight is from 10 to 1000"
|
||||
@@ -602,6 +610,7 @@ func (s *DockerCLIRunSuite) TestRunOOMExitCode(c *testing.T) {
|
||||
|
||||
func (s *DockerCLIRunSuite) TestRunWithMemoryLimit(c *testing.T) {
|
||||
testRequires(c, memoryLimitSupport)
|
||||
skip.If(c, onlyCgroupsv2(), "FIXME: cgroupsV2 not supported yet")
|
||||
|
||||
const file = "/sys/fs/cgroup/memory/memory.limit_in_bytes"
|
||||
cli.DockerCmd(c, "run", "-m", "32M", "--name", "test", "busybox", "cat", file).Assert(c, icmd.Expected{
|
||||
@@ -646,6 +655,7 @@ func (s *DockerCLIRunSuite) TestRunWithSwappinessInvalid(c *testing.T) {
|
||||
|
||||
func (s *DockerCLIRunSuite) TestRunWithMemoryReservation(c *testing.T) {
|
||||
testRequires(c, testEnv.IsLocalDaemon, memoryReservationSupport)
|
||||
skip.If(c, onlyCgroupsv2(), "FIXME: cgroupsV2 not supported yet")
|
||||
|
||||
const file = "/sys/fs/cgroup/memory/memory.soft_limit_in_bytes"
|
||||
out := cli.DockerCmd(c, "run", "--memory-reservation", "200M", "--name", "test", "busybox", "cat", file).Combined()
|
||||
@@ -729,6 +739,7 @@ func (s *DockerCLIRunSuite) TestRunInvalidCpusetMemsFlagValue(c *testing.T) {
|
||||
|
||||
func (s *DockerCLIRunSuite) TestRunInvalidCPUShares(c *testing.T) {
|
||||
testRequires(c, cpuShare, DaemonIsLinux)
|
||||
skip.If(c, onlyCgroupsv2(), "FIXME: cgroupsV2 not supported yet")
|
||||
out, _, err := dockerCmdWithError("run", "--cpu-shares", "1", "busybox", "echo", "test")
|
||||
assert.ErrorContains(c, err, "", out)
|
||||
expected := "minimum allowed cpu-shares is 2"
|
||||
@@ -1383,6 +1394,7 @@ func (s *DockerCLIRunSuite) TestRunDeviceSymlink(c *testing.T) {
|
||||
// TestRunPIDsLimit makes sure the pids cgroup is set with --pids-limit
|
||||
func (s *DockerCLIRunSuite) TestRunPIDsLimit(c *testing.T) {
|
||||
testRequires(c, testEnv.IsLocalDaemon, pidsLimit)
|
||||
skip.If(c, onlyCgroupsv2(), "FIXME: cgroupsV2 not supported yet")
|
||||
|
||||
const file = "/sys/fs/cgroup/pids/pids.max"
|
||||
out := cli.DockerCmd(c, "run", "--name", "skittles", "--pids-limit", "4", "busybox", "cat", file).Combined()
|
||||
@@ -1394,6 +1406,7 @@ func (s *DockerCLIRunSuite) TestRunPIDsLimit(c *testing.T) {
|
||||
|
||||
func (s *DockerCLIRunSuite) TestRunPrivilegedAllowedDevices(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux, NotUserNamespace)
|
||||
skip.If(c, onlyCgroupsv2(), "FIXME: cgroupsV2 not supported yet")
|
||||
|
||||
const file = "/sys/fs/cgroup/devices/devices.list"
|
||||
out := cli.DockerCmd(c, "run", "--privileged", "busybox", "cat", file).Combined()
|
||||
@@ -1548,6 +1561,7 @@ func (s *DockerDaemonSuite) TestRunWithDaemonDefaultSeccompProfile(c *testing.T)
|
||||
|
||||
func (s *DockerCLIRunSuite) TestRunWithNanoCPUs(c *testing.T) {
|
||||
testRequires(c, cpuCfsQuota, cpuCfsPeriod)
|
||||
skip.If(c, onlyCgroupsv2(), "FIXME: cgroupsV2 not supported yet")
|
||||
|
||||
const file1 = "/sys/fs/cgroup/cpu/cpu.cfs_quota_us"
|
||||
const file2 = "/sys/fs/cgroup/cpu/cpu.cfs_period_us"
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"github.com/docker/docker/testutil"
|
||||
"github.com/docker/docker/testutil/request"
|
||||
"gotest.tools/v3/assert"
|
||||
"gotest.tools/v3/skip"
|
||||
)
|
||||
|
||||
func (s *DockerCLIUpdateSuite) TearDownTest(ctx context.Context, c *testing.T) {
|
||||
@@ -31,6 +32,7 @@ func (s *DockerCLIUpdateSuite) OnTimeout(c *testing.T) {
|
||||
func (s *DockerCLIUpdateSuite) TestUpdateRunningContainer(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux)
|
||||
testRequires(c, memoryLimitSupport)
|
||||
skip.If(c, onlyCgroupsv2(), "FIXME: cgroupsV2 not supported yet")
|
||||
|
||||
const name = "test-update-container"
|
||||
cli.DockerCmd(c, "run", "-d", "--name", name, "-m", "300M", "busybox", "top")
|
||||
@@ -46,6 +48,7 @@ func (s *DockerCLIUpdateSuite) TestUpdateRunningContainer(c *testing.T) {
|
||||
func (s *DockerCLIUpdateSuite) TestUpdateRunningContainerWithRestart(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux)
|
||||
testRequires(c, memoryLimitSupport)
|
||||
skip.If(c, onlyCgroupsv2(), "FIXME: cgroupsV2 not supported yet")
|
||||
|
||||
const name = "test-update-container"
|
||||
cli.DockerCmd(c, "run", "-d", "--name", name, "-m", "300M", "busybox", "top")
|
||||
@@ -62,6 +65,7 @@ func (s *DockerCLIUpdateSuite) TestUpdateRunningContainerWithRestart(c *testing.
|
||||
func (s *DockerCLIUpdateSuite) TestUpdateStoppedContainer(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux)
|
||||
testRequires(c, memoryLimitSupport)
|
||||
skip.If(c, onlyCgroupsv2(), "FIXME: cgroupsV2 not supported yet")
|
||||
|
||||
const name = "test-update-container"
|
||||
const file = "/sys/fs/cgroup/memory/memory.limit_in_bytes"
|
||||
@@ -77,6 +81,7 @@ func (s *DockerCLIUpdateSuite) TestUpdateStoppedContainer(c *testing.T) {
|
||||
func (s *DockerCLIUpdateSuite) TestUpdatePausedContainer(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux)
|
||||
testRequires(c, cpuShare)
|
||||
skip.If(c, onlyCgroupsv2(), "FIXME: cgroupsV2 not supported yet")
|
||||
|
||||
const name = "test-update-container"
|
||||
cli.DockerCmd(c, "run", "-d", "--name", name, "--cpu-shares", "1000", "busybox", "top")
|
||||
@@ -95,6 +100,7 @@ func (s *DockerCLIUpdateSuite) TestUpdateWithUntouchedFields(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux)
|
||||
testRequires(c, memoryLimitSupport)
|
||||
testRequires(c, cpuShare)
|
||||
skip.If(c, onlyCgroupsv2(), "FIXME: cgroupsV2 not supported yet")
|
||||
|
||||
const name = "test-update-container"
|
||||
cli.DockerCmd(c, "run", "-d", "--name", name, "-m", "300M", "--cpu-shares", "800", "busybox", "top")
|
||||
@@ -135,6 +141,7 @@ func (s *DockerCLIUpdateSuite) TestUpdateSwapMemoryOnly(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux)
|
||||
testRequires(c, memoryLimitSupport)
|
||||
testRequires(c, swapMemorySupport)
|
||||
skip.If(c, onlyCgroupsv2(), "FIXME: cgroupsV2 not supported yet")
|
||||
|
||||
const name = "test-update-container"
|
||||
cli.DockerCmd(c, "run", "-d", "--name", name, "--memory", "300M", "--memory-swap", "500M", "busybox", "top")
|
||||
@@ -151,6 +158,7 @@ func (s *DockerCLIUpdateSuite) TestUpdateInvalidSwapMemory(c *testing.T) {
|
||||
testRequires(c, DaemonIsLinux)
|
||||
testRequires(c, memoryLimitSupport)
|
||||
testRequires(c, swapMemorySupport)
|
||||
skip.If(c, onlyCgroupsv2(), "FIXME: cgroupsV2 not supported yet")
|
||||
|
||||
const name = "test-update-container"
|
||||
cli.DockerCmd(c, "run", "-d", "--name", name, "--memory", "300M", "--memory-swap", "500M", "busybox", "top")
|
||||
@@ -244,6 +252,7 @@ func (s *DockerCLIUpdateSuite) TestUpdateNotAffectMonitorRestartPolicy(c *testin
|
||||
|
||||
func (s *DockerCLIUpdateSuite) TestUpdateWithNanoCPUs(c *testing.T) {
|
||||
testRequires(c, cpuCfsQuota, cpuCfsPeriod)
|
||||
skip.If(c, onlyCgroupsv2(), "FIXME: cgroupsV2 not supported yet")
|
||||
|
||||
const file1 = "/sys/fs/cgroup/cpu/cpu.cfs_quota_us"
|
||||
const file2 = "/sys/fs/cgroup/cpu/cpu.cfs_period_us"
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/cgroups/v3"
|
||||
"github.com/docker/docker/pkg/sysinfo"
|
||||
)
|
||||
|
||||
@@ -67,6 +68,11 @@ func bridgeNfIptables() bool {
|
||||
return !sysInfo.BridgeNFCallIPTablesDisabled
|
||||
}
|
||||
|
||||
func onlyCgroupsv2() bool {
|
||||
// Only check for unified, cgroup v1 tests can run under other modes
|
||||
return cgroups.Mode() == cgroups.Unified
|
||||
}
|
||||
|
||||
func unprivilegedUsernsClone() bool {
|
||||
content, err := os.ReadFile("/proc/sys/kernel/unprivileged_userns_clone")
|
||||
return err != nil || !strings.Contains(string(content), "0")
|
||||
|
||||
@@ -2,3 +2,7 @@ package main
|
||||
|
||||
func setupLocalInfo() {
|
||||
}
|
||||
|
||||
func onlyCgroupsv2() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
@@ -31,6 +32,8 @@ func TestCopyFromContainerPathDoesNotExist(t *testing.T) {
|
||||
assert.Check(t, is.ErrorContains(err, "Could not find the file /dne in container "+cid))
|
||||
}
|
||||
|
||||
// TestCopyFromContainerPathIsNotDir tests that an error is returned when
|
||||
// trying to create a directory on a path that's a file.
|
||||
func TestCopyFromContainerPathIsNotDir(t *testing.T) {
|
||||
skip.If(t, testEnv.UsingSnapshotter(), "FIXME: https://github.com/moby/moby/issues/47107")
|
||||
ctx := setupTest(t)
|
||||
@@ -38,14 +41,29 @@ func TestCopyFromContainerPathIsNotDir(t *testing.T) {
|
||||
apiClient := testEnv.APIClient()
|
||||
cid := container.Create(ctx, t, apiClient)
|
||||
|
||||
path := "/etc/passwd/"
|
||||
expected := "not a directory"
|
||||
// Pick a path that already exists as a file; on Linux "/etc/passwd"
|
||||
// is expected to be there, so we pick that for convenience.
|
||||
existingFile := "/etc/passwd/"
|
||||
expected := []string{"not a directory"}
|
||||
if testEnv.DaemonInfo.OSType == "windows" {
|
||||
path = "c:/windows/system32/drivers/etc/hosts/"
|
||||
expected = "The filename, directory name, or volume label syntax is incorrect."
|
||||
existingFile = "c:/windows/system32/drivers/etc/hosts/"
|
||||
|
||||
// Depending on the version of Windows, this produces a "ERROR_INVALID_NAME" (Windows < 2025),
|
||||
// or a "ERROR_DIRECTORY" (Windows 2025); https://learn.microsoft.com/en-us/windows/win32/debug/system-error-codes--0-499-
|
||||
expected = []string{
|
||||
"The directory name is invalid.", // ERROR_DIRECTORY
|
||||
"The filename, directory name, or volume label syntax is incorrect.", // ERROR_INVALID_NAME
|
||||
}
|
||||
}
|
||||
_, _, err := apiClient.CopyFromContainer(ctx, cid, path)
|
||||
assert.Assert(t, is.ErrorContains(err, expected))
|
||||
_, _, err := apiClient.CopyFromContainer(ctx, cid, existingFile)
|
||||
var found bool
|
||||
for _, expErr := range expected {
|
||||
if err != nil && strings.Contains(err.Error(), expErr) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
assert.Check(t, found, "Expected error to be one of %v, but got %v", expected, err)
|
||||
}
|
||||
|
||||
func TestCopyToContainerPathDoesNotExist(t *testing.T) {
|
||||
|
||||
412
integration/container/isolation_windows_test.go
Normal file
412
integration/container/isolation_windows_test.go
Normal file
@@ -0,0 +1,412 @@
|
||||
package container // import "github.com/docker/docker/integration/container"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
containertypes "github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/mount"
|
||||
"github.com/docker/docker/api/types/volume"
|
||||
"github.com/docker/docker/integration/internal/container"
|
||||
"github.com/docker/docker/testutil"
|
||||
"gotest.tools/v3/assert"
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
)
|
||||
|
||||
// TestWindowsProcessIsolation validates process isolation on Windows.
|
||||
func TestWindowsProcessIsolation(t *testing.T) {
|
||||
ctx := setupTest(t)
|
||||
apiClient := testEnv.APIClient()
|
||||
|
||||
testcases := []struct {
|
||||
name string
|
||||
description string
|
||||
validate func(t *testing.T, ctx context.Context, id string)
|
||||
}{
|
||||
{
|
||||
name: "Process isolation basic container lifecycle",
|
||||
description: "Validate container can start, run, and stop with process isolation",
|
||||
validate: func(t *testing.T, ctx context.Context, id string) {
|
||||
// Verify container is running
|
||||
ctrInfo := container.Inspect(ctx, t, apiClient, id)
|
||||
assert.Check(t, is.Equal(ctrInfo.State.Running, true))
|
||||
assert.Check(t, is.Equal(ctrInfo.HostConfig.Isolation, containertypes.IsolationProcess))
|
||||
|
||||
execCtx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer cancel()
|
||||
res := container.ExecT(execCtx, t, apiClient, id, []string{"cmd", "/c", "echo", "test"})
|
||||
assert.Check(t, is.Equal(res.ExitCode, 0))
|
||||
assert.Check(t, strings.Contains(res.Stdout(), "test"))
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Process isolation filesystem access",
|
||||
description: "Validate filesystem operations work correctly with process isolation",
|
||||
validate: func(t *testing.T, ctx context.Context, id string) {
|
||||
execCtx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Create a test file
|
||||
res := container.ExecT(execCtx, t, apiClient, id,
|
||||
[]string{"cmd", "/c", "echo test123 > C:\\testfile.txt"})
|
||||
assert.Check(t, is.Equal(res.ExitCode, 0))
|
||||
|
||||
// Read the test file
|
||||
execCtx2, cancel2 := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer cancel2()
|
||||
res2 := container.ExecT(execCtx2, t, apiClient, id,
|
||||
[]string{"cmd", "/c", "type", "C:\\testfile.txt"})
|
||||
assert.Check(t, is.Equal(res2.ExitCode, 0))
|
||||
assert.Check(t, strings.Contains(res2.Stdout(), "test123"))
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Process isolation network connectivity",
|
||||
description: "Validate network connectivity works with process isolation",
|
||||
validate: func(t *testing.T, ctx context.Context, id string) {
|
||||
execCtx, cancel := context.WithTimeout(ctx, 15*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Test localhost connectivity
|
||||
res := container.ExecT(execCtx, t, apiClient, id,
|
||||
[]string{"ping", "-n", "1", "-w", "3000", "localhost"})
|
||||
assert.Check(t, is.Equal(res.ExitCode, 0))
|
||||
assert.Check(t, strings.Contains(res.Stdout(), "Reply from") ||
|
||||
strings.Contains(res.Stdout(), "Received = 1"))
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Process isolation environment variables",
|
||||
description: "Validate environment variables are properly isolated",
|
||||
validate: func(t *testing.T, ctx context.Context, id string) {
|
||||
execCtx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Check that container has expected environment variables
|
||||
res := container.ExecT(execCtx, t, apiClient, id,
|
||||
[]string{"cmd", "/c", "set"})
|
||||
assert.Check(t, is.Equal(res.ExitCode, 0))
|
||||
|
||||
// Should have Windows-specific environment variables
|
||||
stdout := res.Stdout()
|
||||
assert.Check(t, strings.Contains(stdout, "COMPUTERNAME") ||
|
||||
strings.Contains(stdout, "OS=Windows"))
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Process isolation CPU access",
|
||||
description: "Validate container can access CPU information",
|
||||
validate: func(t *testing.T, ctx context.Context, id string) {
|
||||
execCtx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Check NUMBER_OF_PROCESSORS environment variable
|
||||
res := container.ExecT(execCtx, t, apiClient, id,
|
||||
[]string{"cmd", "/c", "echo", "%NUMBER_OF_PROCESSORS%"})
|
||||
assert.Check(t, is.Equal(res.ExitCode, 0))
|
||||
|
||||
// Should return a number
|
||||
output := strings.TrimSpace(res.Stdout())
|
||||
assert.Check(t, output != "" && output != "%NUMBER_OF_PROCESSORS%",
|
||||
"NUMBER_OF_PROCESSORS not set")
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testcases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
ctx := testutil.StartSpan(ctx, t)
|
||||
|
||||
// Create and start container with process isolation
|
||||
id := container.Run(ctx, t, apiClient,
|
||||
container.WithIsolation(containertypes.IsolationProcess),
|
||||
container.WithCmd("ping", "-t", "localhost"),
|
||||
)
|
||||
defer apiClient.ContainerRemove(ctx, id, containertypes.RemoveOptions{Force: true})
|
||||
|
||||
tc.validate(t, ctx, id)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestWindowsHyperVIsolation validates Hyper-V isolation on Windows.
|
||||
func TestWindowsHyperVIsolation(t *testing.T) {
|
||||
ctx := setupTest(t)
|
||||
apiClient := testEnv.APIClient()
|
||||
|
||||
testcases := []struct {
|
||||
name string
|
||||
description string
|
||||
validate func(t *testing.T, ctx context.Context, id string)
|
||||
}{
|
||||
{
|
||||
name: "Hyper-V isolation basic container lifecycle",
|
||||
description: "Validate container can start, run, and stop with Hyper-V isolation",
|
||||
validate: func(t *testing.T, ctx context.Context, id string) {
|
||||
// Verify container is running
|
||||
ctrInfo := container.Inspect(ctx, t, apiClient, id)
|
||||
assert.Check(t, is.Equal(ctrInfo.State.Running, true))
|
||||
assert.Check(t, is.Equal(ctrInfo.HostConfig.Isolation, containertypes.IsolationHyperV))
|
||||
|
||||
// Execute a simple command
|
||||
execCtx, cancel := context.WithTimeout(ctx, 15*time.Second)
|
||||
defer cancel()
|
||||
res := container.ExecT(execCtx, t, apiClient, id, []string{"cmd", "/c", "echo", "hyperv-test"})
|
||||
assert.Check(t, is.Equal(res.ExitCode, 0))
|
||||
assert.Check(t, strings.Contains(res.Stdout(), "hyperv-test"))
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Hyper-V isolation filesystem operations",
|
||||
description: "Validate filesystem isolation with Hyper-V",
|
||||
validate: func(t *testing.T, ctx context.Context, id string) {
|
||||
execCtx, cancel := context.WithTimeout(ctx, 15*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Test file creation
|
||||
res := container.ExecT(execCtx, t, apiClient, id,
|
||||
[]string{"cmd", "/c", "echo hyperv-file > C:\\hvtest.txt"})
|
||||
assert.Check(t, is.Equal(res.ExitCode, 0))
|
||||
|
||||
// Test file read
|
||||
execCtx2, cancel2 := context.WithTimeout(ctx, 15*time.Second)
|
||||
defer cancel2()
|
||||
res2 := container.ExecT(execCtx2, t, apiClient, id,
|
||||
[]string{"cmd", "/c", "type", "C:\\hvtest.txt"})
|
||||
assert.Check(t, is.Equal(res2.ExitCode, 0))
|
||||
assert.Check(t, strings.Contains(res2.Stdout(), "hyperv-file"))
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Hyper-V isolation network connectivity",
|
||||
description: "Validate network works with Hyper-V isolation",
|
||||
validate: func(t *testing.T, ctx context.Context, id string) {
|
||||
execCtx, cancel := context.WithTimeout(ctx, 15*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Test localhost connectivity
|
||||
res := container.ExecT(execCtx, t, apiClient, id,
|
||||
[]string{"ping", "-n", "1", "-w", "5000", "localhost"})
|
||||
assert.Check(t, is.Equal(res.ExitCode, 0))
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testcases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
ctx := testutil.StartSpan(ctx, t)
|
||||
|
||||
// Create and start container with Hyper-V isolation
|
||||
id := container.Run(ctx, t, apiClient,
|
||||
container.WithIsolation(containertypes.IsolationHyperV),
|
||||
container.WithCmd("ping", "-t", "localhost"),
|
||||
)
|
||||
defer apiClient.ContainerRemove(ctx, id, containertypes.RemoveOptions{Force: true})
|
||||
|
||||
tc.validate(t, ctx, id)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestWindowsIsolationComparison validates that both isolation modes can coexist
|
||||
// and that containers can be created with different isolation modes on Windows.
|
||||
func TestWindowsIsolationComparison(t *testing.T) {
|
||||
ctx := setupTest(t)
|
||||
apiClient := testEnv.APIClient()
|
||||
|
||||
// Create container with process isolation
|
||||
processID := container.Run(ctx, t, apiClient,
|
||||
container.WithIsolation(containertypes.IsolationProcess),
|
||||
container.WithCmd("ping", "-t", "localhost"),
|
||||
)
|
||||
defer apiClient.ContainerRemove(ctx, processID, containertypes.RemoveOptions{Force: true})
|
||||
|
||||
processInfo := container.Inspect(ctx, t, apiClient, processID)
|
||||
assert.Check(t, is.Equal(processInfo.HostConfig.Isolation, containertypes.IsolationProcess))
|
||||
assert.Check(t, is.Equal(processInfo.State.Running, true))
|
||||
|
||||
// Create container with Hyper-V isolation
|
||||
hypervID := container.Run(ctx, t, apiClient,
|
||||
container.WithIsolation(containertypes.IsolationHyperV),
|
||||
container.WithCmd("ping", "-t", "localhost"),
|
||||
)
|
||||
defer apiClient.ContainerRemove(ctx, hypervID, containertypes.RemoveOptions{Force: true})
|
||||
|
||||
hypervInfo := container.Inspect(ctx, t, apiClient, hypervID)
|
||||
assert.Check(t, is.Equal(hypervInfo.HostConfig.Isolation, containertypes.IsolationHyperV))
|
||||
assert.Check(t, is.Equal(hypervInfo.State.Running, true))
|
||||
|
||||
// Verify both containers can run simultaneously
|
||||
processInfo2 := container.Inspect(ctx, t, apiClient, processID)
|
||||
hypervInfo2 := container.Inspect(ctx, t, apiClient, hypervID)
|
||||
assert.Check(t, is.Equal(processInfo2.State.Running, true))
|
||||
assert.Check(t, is.Equal(hypervInfo2.State.Running, true))
|
||||
}
|
||||
|
||||
// TestWindowsProcessIsolationResourceConstraints validates resource constraints
|
||||
// work correctly with process isolation on Windows.
|
||||
func TestWindowsProcessIsolationResourceConstraints(t *testing.T) {
|
||||
ctx := setupTest(t)
|
||||
apiClient := testEnv.APIClient()
|
||||
|
||||
testcases := []struct {
|
||||
name string
|
||||
cpuShares int64
|
||||
nanoCPUs int64
|
||||
memoryLimit int64
|
||||
cpuCount int64
|
||||
validateConfig func(t *testing.T, ctrInfo types.ContainerJSON)
|
||||
}{
|
||||
{
|
||||
name: "CPU shares constraint - config only",
|
||||
cpuShares: 512,
|
||||
// Note: CPU shares are accepted by the API but NOT enforced on Windows.
|
||||
// This test only verifies the configuration is stored correctly.
|
||||
// Actual enforcement does not work - containers get equal CPU regardless of shares.
|
||||
// Use NanoCPUs (--cpus flag) for actual CPU limiting on Windows.
|
||||
validateConfig: func(t *testing.T, ctrInfo types.ContainerJSON) {
|
||||
assert.Check(t, is.Equal(ctrInfo.HostConfig.CPUShares, int64(512)))
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "CPU limit (NanoCPUs) constraint",
|
||||
nanoCPUs: 2000000000, // 2.0 CPUs
|
||||
// NanoCPUs enforce hard CPU limits on Windows (unlike CPUShares which don't work)
|
||||
validateConfig: func(t *testing.T, ctrInfo types.ContainerJSON) {
|
||||
assert.Check(t, is.Equal(ctrInfo.HostConfig.NanoCPUs, int64(2000000000)))
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Memory limit constraint",
|
||||
memoryLimit: 512 * 1024 * 1024, // 512MB
|
||||
// Memory limits enforce hard limits on container memory usage
|
||||
validateConfig: func(t *testing.T, ctrInfo types.ContainerJSON) {
|
||||
assert.Check(t, is.Equal(ctrInfo.HostConfig.Memory, int64(512*1024*1024)))
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "CPU count constraint",
|
||||
cpuCount: 2,
|
||||
// CPU count limits the number of CPUs available to the container
|
||||
validateConfig: func(t *testing.T, ctrInfo types.ContainerJSON) {
|
||||
assert.Check(t, is.Equal(ctrInfo.HostConfig.CPUCount, int64(2)))
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testcases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
ctx := testutil.StartSpan(ctx, t)
|
||||
|
||||
opts := []func(*container.TestContainerConfig){
|
||||
container.WithIsolation(containertypes.IsolationProcess),
|
||||
container.WithCmd("ping", "-t", "localhost"),
|
||||
}
|
||||
|
||||
if tc.cpuShares > 0 {
|
||||
opts = append(opts, func(config *container.TestContainerConfig) {
|
||||
config.HostConfig.CPUShares = tc.cpuShares
|
||||
})
|
||||
}
|
||||
|
||||
if tc.nanoCPUs > 0 {
|
||||
opts = append(opts, func(config *container.TestContainerConfig) {
|
||||
config.HostConfig.NanoCPUs = tc.nanoCPUs
|
||||
})
|
||||
}
|
||||
|
||||
if tc.memoryLimit > 0 {
|
||||
opts = append(opts, func(config *container.TestContainerConfig) {
|
||||
config.HostConfig.Memory = tc.memoryLimit
|
||||
})
|
||||
}
|
||||
|
||||
if tc.cpuCount > 0 {
|
||||
opts = append(opts, func(config *container.TestContainerConfig) {
|
||||
config.HostConfig.CPUCount = tc.cpuCount
|
||||
})
|
||||
}
|
||||
|
||||
id := container.Run(ctx, t, apiClient, opts...)
|
||||
defer apiClient.ContainerRemove(ctx, id, containertypes.RemoveOptions{Force: true})
|
||||
|
||||
ctrInfo := container.Inspect(ctx, t, apiClient, id)
|
||||
tc.validateConfig(t, ctrInfo)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestWindowsProcessIsolationVolumeMount validates volume mounting with process isolation on Windows.
|
||||
func TestWindowsProcessIsolationVolumeMount(t *testing.T) {
|
||||
ctx := setupTest(t)
|
||||
apiClient := testEnv.APIClient()
|
||||
|
||||
volumeName := "process-iso-test-volume"
|
||||
volRes, err := apiClient.VolumeCreate(ctx, volume.CreateOptions{
|
||||
Name: volumeName,
|
||||
})
|
||||
assert.NilError(t, err)
|
||||
defer func() {
|
||||
// Force volume removal in case container cleanup fails
|
||||
apiClient.VolumeRemove(ctx, volRes.Name, true)
|
||||
}()
|
||||
|
||||
// Create container with volume mount
|
||||
id := container.Run(ctx, t, apiClient,
|
||||
container.WithIsolation(containertypes.IsolationProcess),
|
||||
container.WithCmd("ping", "-t", "localhost"),
|
||||
container.WithMount(mount.Mount{
|
||||
Type: mount.TypeVolume,
|
||||
Source: volumeName,
|
||||
Target: "C:\\data",
|
||||
}),
|
||||
)
|
||||
defer apiClient.ContainerRemove(ctx, id, containertypes.RemoveOptions{Force: true})
|
||||
|
||||
// Write data to mounted volume
|
||||
execCtx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer cancel()
|
||||
res := container.ExecT(execCtx, t, apiClient, id,
|
||||
[]string{"cmd", "/c", "echo volume-test > C:\\data\\test.txt"})
|
||||
assert.Check(t, is.Equal(res.ExitCode, 0))
|
||||
|
||||
// Read data from mounted volume
|
||||
execCtx2, cancel2 := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer cancel2()
|
||||
res2 := container.ExecT(execCtx2, t, apiClient, id,
|
||||
[]string{"cmd", "/c", "type", "C:\\data\\test.txt"})
|
||||
assert.Check(t, is.Equal(res2.ExitCode, 0))
|
||||
assert.Check(t, strings.Contains(res2.Stdout(), "volume-test"))
|
||||
|
||||
// Verify container has volume mount
|
||||
ctrInfo := container.Inspect(ctx, t, apiClient, id)
|
||||
assert.Check(t, len(ctrInfo.Mounts) == 1)
|
||||
assert.Check(t, is.Equal(ctrInfo.Mounts[0].Type, mount.TypeVolume))
|
||||
assert.Check(t, is.Equal(ctrInfo.Mounts[0].Name, volumeName))
|
||||
}
|
||||
|
||||
// TestWindowsHyperVIsolationResourceLimits validates resource limits work with Hyper-V isolation.
|
||||
// This ensures Windows can properly enforce resource constraints on Hyper-V containers.
|
||||
func TestWindowsHyperVIsolationResourceLimits(t *testing.T) {
|
||||
ctx := setupTest(t)
|
||||
apiClient := testEnv.APIClient()
|
||||
|
||||
// Create container with memory limit
|
||||
memoryLimit := int64(512 * 1024 * 1024) // 512MB
|
||||
id := container.Run(ctx, t, apiClient,
|
||||
container.WithIsolation(containertypes.IsolationHyperV),
|
||||
container.WithCmd("ping", "-t", "localhost"),
|
||||
func(config *container.TestContainerConfig) {
|
||||
config.HostConfig.Memory = memoryLimit
|
||||
},
|
||||
)
|
||||
defer apiClient.ContainerRemove(ctx, id, containertypes.RemoveOptions{Force: true})
|
||||
|
||||
// Verify resource limit is set
|
||||
ctrInfo := container.Inspect(ctx, t, apiClient, id)
|
||||
assert.Check(t, is.Equal(ctrInfo.HostConfig.Memory, memoryLimit))
|
||||
assert.Check(t, is.Equal(ctrInfo.HostConfig.Isolation, containertypes.IsolationHyperV))
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
package container
|
||||
|
||||
import (
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
@@ -56,6 +57,16 @@ func WithExposedPorts(ports ...string) func(*TestContainerConfig) {
|
||||
}
|
||||
}
|
||||
|
||||
// WithPortMap sets/replaces port mappings.
|
||||
func WithPortMap(pm nat.PortMap) func(*TestContainerConfig) {
|
||||
return func(c *TestContainerConfig) {
|
||||
c.HostConfig.PortBindings = nat.PortMap{}
|
||||
for p, b := range pm {
|
||||
c.HostConfig.PortBindings[p] = slices.Clone(b)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// WithTty sets the TTY mode of the container
|
||||
func WithTty(tty bool) func(*TestContainerConfig) {
|
||||
return func(c *TestContainerConfig) {
|
||||
|
||||
@@ -6,11 +6,17 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
containertypes "github.com/docker/docker/api/types/container"
|
||||
networktypes "github.com/docker/docker/api/types/network"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
ctr "github.com/docker/docker/integration/internal/container"
|
||||
"github.com/docker/docker/integration/internal/network"
|
||||
"github.com/docker/docker/internal/testutils/networking"
|
||||
"github.com/docker/docker/libnetwork/drivers/bridge"
|
||||
"github.com/docker/docker/testutil/daemon"
|
||||
"github.com/docker/go-connections/nat"
|
||||
"gotest.tools/v3/assert"
|
||||
"gotest.tools/v3/icmd"
|
||||
"gotest.tools/v3/skip"
|
||||
)
|
||||
|
||||
@@ -43,3 +49,62 @@ func TestCreateWithMultiNetworks(t *testing.T) {
|
||||
ifacesWithAddress := strings.Count(res.Stdout.String(), "\n")
|
||||
assert.Equal(t, ifacesWithAddress, 3)
|
||||
}
|
||||
|
||||
// TestFirewalldReloadNoZombies checks that when firewalld is reloaded, rules
|
||||
// belonging to deleted networks/containers do not reappear.
|
||||
func TestFirewalldReloadNoZombies(t *testing.T) {
|
||||
skip.If(t, testEnv.DaemonInfo.OSType == "windows")
|
||||
skip.If(t, !networking.FirewalldRunning(), "firewalld is not running")
|
||||
skip.If(t, testEnv.IsRootless, "no firewalld in rootless netns")
|
||||
|
||||
ctx := setupTest(t)
|
||||
d := daemon.New(t)
|
||||
d.StartWithBusybox(ctx, t)
|
||||
defer d.Stop(t)
|
||||
c := d.NewClientT(t)
|
||||
|
||||
const bridgeName = "br-fwdreload"
|
||||
removed := false
|
||||
nw := network.CreateNoError(ctx, t, c, "testnet",
|
||||
network.WithOption(bridge.BridgeName, bridgeName))
|
||||
defer func() {
|
||||
if !removed {
|
||||
network.RemoveNoError(ctx, t, c, nw)
|
||||
}
|
||||
}()
|
||||
|
||||
cid := ctr.Run(ctx, t, c,
|
||||
ctr.WithExposedPorts("80/tcp", "81/tcp"),
|
||||
ctr.WithPortMap(nat.PortMap{"80/tcp": {{HostPort: "8000"}}}))
|
||||
defer func() {
|
||||
if !removed {
|
||||
ctr.Remove(ctx, t, c, cid, containertypes.RemoveOptions{Force: true})
|
||||
}
|
||||
}()
|
||||
|
||||
iptablesSave := icmd.Command("iptables-save")
|
||||
resBeforeDel := icmd.RunCmd(iptablesSave)
|
||||
assert.NilError(t, resBeforeDel.Error)
|
||||
assert.Check(t, strings.Contains(resBeforeDel.Combined(), bridgeName),
|
||||
"With container: expected rules for %s in: %s", bridgeName, resBeforeDel.Combined())
|
||||
|
||||
// Delete the container and its network.
|
||||
ctr.Remove(ctx, t, c, cid, containertypes.RemoveOptions{Force: true})
|
||||
network.RemoveNoError(ctx, t, c, nw)
|
||||
removed = true
|
||||
|
||||
// Check the network does not appear in iptables rules.
|
||||
resAfterDel := icmd.RunCmd(iptablesSave)
|
||||
assert.NilError(t, resAfterDel.Error)
|
||||
assert.Check(t, !strings.Contains(resAfterDel.Combined(), bridgeName),
|
||||
"After deletes: did not expect rules for %s in: %s", bridgeName, resAfterDel.Combined())
|
||||
|
||||
// firewall-cmd --reload, and wait for the daemon to restore rules.
|
||||
networking.FirewalldReload(t, d)
|
||||
|
||||
// Check that rules for the deleted container/network have not reappeared.
|
||||
resAfterReload := icmd.RunCmd(iptablesSave)
|
||||
assert.NilError(t, resAfterReload.Error)
|
||||
assert.Check(t, !strings.Contains(resAfterReload.Combined(), bridgeName),
|
||||
"After deletes: did not expect rules for %s in: %s", bridgeName, resAfterReload.Combined())
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
containertypes "github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/integration/internal/container"
|
||||
"github.com/docker/docker/integration/internal/network"
|
||||
"github.com/docker/docker/internal/testutils/networking"
|
||||
"github.com/docker/docker/testutil"
|
||||
"github.com/docker/docker/testutil/daemon"
|
||||
"gotest.tools/v3/assert"
|
||||
@@ -160,6 +161,8 @@ func TestBridgeICC(t *testing.T) {
|
||||
Force: true,
|
||||
})
|
||||
|
||||
networking.FirewalldReload(t, d)
|
||||
|
||||
pingHost := tc.pingHost
|
||||
if pingHost == "" {
|
||||
if tc.linkLocal {
|
||||
@@ -235,7 +238,7 @@ func TestBridgeICCWindows(t *testing.T) {
|
||||
pingCmd := []string{"ping", "-n", "1", "-w", "3000", ctr1Name}
|
||||
|
||||
const ctr2Name = "ctr2"
|
||||
attachCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||
attachCtx, cancel := context.WithTimeout(ctx, 15*time.Second)
|
||||
defer cancel()
|
||||
res := container.RunAttach(attachCtx, t, c,
|
||||
container.WithName(ctr2Name),
|
||||
@@ -351,6 +354,7 @@ func TestBridgeINC(t *testing.T) {
|
||||
defer c.ContainerRemove(ctx, id1, containertypes.RemoveOptions{
|
||||
Force: true,
|
||||
})
|
||||
networking.FirewalldReload(t, d)
|
||||
|
||||
ctr1Info := container.Inspect(ctx, t, c, id1)
|
||||
targetAddr := ctr1Info.NetworkSettings.Networks[bridge1].IPAddress
|
||||
@@ -575,6 +579,7 @@ func TestInternalNwConnectivity(t *testing.T) {
|
||||
container.WithNetworkMode(bridgeName),
|
||||
)
|
||||
defer c.ContainerRemove(ctx, id, containertypes.RemoveOptions{Force: true})
|
||||
networking.FirewalldReload(t, d)
|
||||
|
||||
execCtx, cancel := context.WithTimeout(ctx, 20*time.Second)
|
||||
defer cancel()
|
||||
|
||||
378
integration/networking/drivers_windows_test.go
Normal file
378
integration/networking/drivers_windows_test.go
Normal file
@@ -0,0 +1,378 @@
|
||||
package networking
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
containertypes "github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/integration/internal/container"
|
||||
"github.com/docker/docker/integration/internal/network"
|
||||
"github.com/docker/docker/testutil"
|
||||
"github.com/docker/go-connections/nat"
|
||||
"gotest.tools/v3/assert"
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
"gotest.tools/v3/poll"
|
||||
"gotest.tools/v3/skip"
|
||||
)
|
||||
|
||||
// TestWindowsNetworkDrivers validates Windows-specific network drivers for Windows.
|
||||
// Tests: NAT, Transparent, and L2Bridge network drivers.
|
||||
func TestWindowsNetworkDrivers(t *testing.T) {
|
||||
ctx := setupTest(t)
|
||||
c := testEnv.APIClient()
|
||||
|
||||
testcases := []struct {
|
||||
name string
|
||||
driver string
|
||||
}{
|
||||
{
|
||||
// NAT connectivity is already tested in TestBridgeICCWindows (bridge_test.go),
|
||||
// so we only validate network creation here.
|
||||
name: "NAT driver network creation",
|
||||
driver: "nat",
|
||||
},
|
||||
{
|
||||
// Only test creation of a Transparent driver network, connectivity depends on external
|
||||
// network infrastructure.
|
||||
name: "Transparent driver network creation",
|
||||
driver: "transparent",
|
||||
},
|
||||
{
|
||||
// L2Bridge driver requires specific host network adapter configuration, test will skip
|
||||
// if host configuration is missing.
|
||||
name: "L2Bridge driver network creation",
|
||||
driver: "l2bridge",
|
||||
},
|
||||
}
|
||||
|
||||
for tcID, tc := range testcases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
ctx := testutil.StartSpan(ctx, t)
|
||||
|
||||
netName := fmt.Sprintf("test-%s-%d", tc.driver, tcID)
|
||||
|
||||
// Create network with specified driver
|
||||
netResp, err := c.NetworkCreate(ctx, netName, types.NetworkCreate{
|
||||
Driver: tc.driver,
|
||||
})
|
||||
if err != nil {
|
||||
// L2Bridge may fail if host network configuration is not available
|
||||
if tc.driver == "l2bridge" {
|
||||
errStr := strings.ToLower(err.Error())
|
||||
if strings.Contains(errStr, "the network does not have a subnet for this endpoint") {
|
||||
t.Skipf("Driver %s requires host network configuration: %v", tc.driver, err)
|
||||
}
|
||||
}
|
||||
t.Fatalf("Failed to create network with %s driver: %v", tc.driver, err)
|
||||
}
|
||||
defer network.RemoveNoError(ctx, t, c, netName)
|
||||
|
||||
// Inspect network to validate driver is correctly set
|
||||
netInfo, err := c.NetworkInspect(ctx, netResp.ID, types.NetworkInspectOptions{})
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, is.Equal(netInfo.Driver, tc.driver), "Network driver mismatch")
|
||||
assert.Check(t, is.Equal(netInfo.Name, netName), "Network name mismatch")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestWindowsNATDriverPortMapping validates NAT port mapping by testing host connectivity.
|
||||
func TestWindowsNATDriverPortMapping(t *testing.T) {
|
||||
ctx := setupTest(t)
|
||||
c := testEnv.APIClient()
|
||||
|
||||
// Use default NAT network which supports port mapping
|
||||
netName := "nat"
|
||||
|
||||
// PowerShell HTTP listener on port 80
|
||||
psScript := `
|
||||
$listener = New-Object System.Net.HttpListener
|
||||
$listener.Prefixes.Add('http://+:80/')
|
||||
$listener.Start()
|
||||
while ($listener.IsListening) {
|
||||
$context = $listener.GetContext()
|
||||
$response = $context.Response
|
||||
$content = [System.Text.Encoding]::UTF8.GetBytes('OK')
|
||||
$response.ContentLength64 = $content.Length
|
||||
$response.OutputStream.Write($content, 0, $content.Length)
|
||||
$response.OutputStream.Close()
|
||||
}
|
||||
`
|
||||
|
||||
// Create container with port mapping 80->8080
|
||||
ctrName := "port-mapping-test"
|
||||
id := container.Run(ctx, t, c,
|
||||
container.WithName(ctrName),
|
||||
container.WithCmd("powershell", "-Command", psScript),
|
||||
container.WithNetworkMode(netName),
|
||||
container.WithExposedPorts("80/tcp"),
|
||||
container.WithPortMap(nat.PortMap{
|
||||
"80/tcp": []nat.PortBinding{{HostPort: "8080"}},
|
||||
}),
|
||||
)
|
||||
defer c.ContainerRemove(ctx, id, containertypes.RemoveOptions{Force: true})
|
||||
|
||||
// Verify port mapping metadata
|
||||
ctrInfo := container.Inspect(ctx, t, c, id)
|
||||
portKey := nat.Port("80/tcp")
|
||||
assert.Check(t, ctrInfo.NetworkSettings.Ports[portKey] != nil, "Port mapping not found")
|
||||
assert.Check(t, len(ctrInfo.NetworkSettings.Ports[portKey]) > 0, "No host port binding")
|
||||
assert.Check(t, is.Equal(ctrInfo.NetworkSettings.Ports[portKey][0].HostPort, "8080"))
|
||||
|
||||
// Test actual connectivity from host to container via mapped port
|
||||
httpClient := &http.Client{Timeout: 2 * time.Second}
|
||||
checkHTTP := func(t poll.LogT) poll.Result {
|
||||
resp, err := httpClient.Get("http://localhost:8080")
|
||||
if err != nil {
|
||||
return poll.Continue("connection failed: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return poll.Continue("failed to read body: %v", err)
|
||||
}
|
||||
|
||||
if !strings.Contains(string(body), "OK") {
|
||||
return poll.Continue("unexpected response body: %s", string(body))
|
||||
}
|
||||
return poll.Success()
|
||||
}
|
||||
|
||||
poll.WaitOn(t, checkHTTP, poll.WithTimeout(10*time.Second))
|
||||
}
|
||||
|
||||
// TestWindowsNetworkDNSResolution validates DNS resolution on Windows networks.
|
||||
func TestWindowsNetworkDNSResolution(t *testing.T) {
|
||||
ctx := setupTest(t)
|
||||
c := testEnv.APIClient()
|
||||
|
||||
testcases := []struct {
|
||||
name string
|
||||
driver string
|
||||
customDNS bool
|
||||
dnsServers []string
|
||||
}{
|
||||
{
|
||||
name: "Default NAT network DNS resolution",
|
||||
driver: "nat",
|
||||
},
|
||||
{
|
||||
name: "Custom DNS servers on NAT network",
|
||||
driver: "nat",
|
||||
customDNS: true,
|
||||
dnsServers: []string{"8.8.8.8", "8.8.4.4"},
|
||||
},
|
||||
}
|
||||
|
||||
for tcID, tc := range testcases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
ctx := testutil.StartSpan(ctx, t)
|
||||
|
||||
netName := fmt.Sprintf("test-dns-%s-%d", tc.driver, tcID)
|
||||
|
||||
// Create network with optional custom DNS
|
||||
netOpts := []func(*types.NetworkCreate){
|
||||
network.WithDriver(tc.driver),
|
||||
}
|
||||
if tc.customDNS {
|
||||
// Note: DNS options may need to be set via network options on Windows
|
||||
for _, dns := range tc.dnsServers {
|
||||
netOpts = append(netOpts, network.WithOption("com.docker.network.windowsshim.dnsservers", dns))
|
||||
}
|
||||
}
|
||||
|
||||
network.CreateNoError(ctx, t, c, netName, netOpts...)
|
||||
defer network.RemoveNoError(ctx, t, c, netName)
|
||||
|
||||
// Create container and verify DNS resolution
|
||||
ctrName := fmt.Sprintf("dns-test-%d", tcID)
|
||||
id := container.Run(ctx, t, c,
|
||||
container.WithName(ctrName),
|
||||
container.WithNetworkMode(netName),
|
||||
)
|
||||
defer c.ContainerRemove(ctx, id, containertypes.RemoveOptions{Force: true})
|
||||
|
||||
// Test DNS resolution by pinging container by name from another container
|
||||
pingCmd := []string{"ping", "-n", "1", "-w", "3000", ctrName}
|
||||
|
||||
attachCtx, cancel := context.WithTimeout(ctx, 15*time.Second)
|
||||
defer cancel()
|
||||
res := container.RunAttach(attachCtx, t, c,
|
||||
container.WithCmd(pingCmd...),
|
||||
container.WithNetworkMode(netName),
|
||||
)
|
||||
defer c.ContainerRemove(ctx, res.ContainerID, containertypes.RemoveOptions{Force: true})
|
||||
|
||||
assert.Check(t, is.Equal(res.ExitCode, 0), "DNS resolution failed")
|
||||
assert.Check(t, is.Contains(res.Stdout.String(), "Sent = 1, Received = 1, Lost = 0"))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestWindowsNetworkLifecycle validates network lifecycle operations on Windows.
|
||||
// Tests network creation, container attachment, detachment, and deletion.
|
||||
func TestWindowsNetworkLifecycle(t *testing.T) {
|
||||
// Skip this test on Windows Containerd because NetworkConnect operations fail with an
|
||||
// unsupported platform request error:
|
||||
// https://github.com/moby/moby/issues/51589
|
||||
skip.If(t, testEnv.RuntimeIsWindowsContainerd(),
|
||||
"Skipping test: fails on Containerd due to unsupported platform request error during NetworkConnect operations")
|
||||
|
||||
ctx := setupTest(t)
|
||||
c := testEnv.APIClient()
|
||||
|
||||
netName := "lifecycle-test-nat"
|
||||
|
||||
netID := network.CreateNoError(ctx, t, c, netName,
|
||||
network.WithDriver("nat"),
|
||||
)
|
||||
|
||||
netInfo, err := c.NetworkInspect(ctx, netID, types.NetworkInspectOptions{})
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, is.Equal(netInfo.Name, netName))
|
||||
|
||||
// Create container on network
|
||||
ctrName := "lifecycle-ctr"
|
||||
id := container.Run(ctx, t, c,
|
||||
container.WithName(ctrName),
|
||||
container.WithNetworkMode(netName),
|
||||
)
|
||||
|
||||
ctrInfo := container.Inspect(ctx, t, c, id)
|
||||
assert.Check(t, ctrInfo.NetworkSettings.Networks[netName] != nil)
|
||||
|
||||
// Disconnect container from network
|
||||
err = c.NetworkDisconnect(ctx, netID, id, false)
|
||||
assert.NilError(t, err)
|
||||
|
||||
ctrInfo = container.Inspect(ctx, t, c, id)
|
||||
assert.Check(t, ctrInfo.NetworkSettings.Networks[netName] == nil, "Container still connected after disconnect")
|
||||
|
||||
// Reconnect container to network
|
||||
err = c.NetworkConnect(ctx, netID, id, nil)
|
||||
assert.NilError(t, err)
|
||||
|
||||
ctrInfo = container.Inspect(ctx, t, c, id)
|
||||
assert.Check(t, ctrInfo.NetworkSettings.Networks[netName] != nil, "Container not reconnected")
|
||||
|
||||
c.ContainerRemove(ctx, id, containertypes.RemoveOptions{Force: true})
|
||||
|
||||
network.RemoveNoError(ctx, t, c, netName)
|
||||
|
||||
_, err = c.NetworkInspect(ctx, netID, types.NetworkInspectOptions{})
|
||||
assert.Check(t, err != nil, "Network still exists after deletion")
|
||||
}
|
||||
|
||||
// TestWindowsNetworkIsolation validates network isolation between containers on different networks.
|
||||
// Ensures containers on different networks cannot communicate, validating Windows network driver isolation.
|
||||
func TestWindowsNetworkIsolation(t *testing.T) {
|
||||
ctx := setupTest(t)
|
||||
c := testEnv.APIClient()
|
||||
|
||||
// Create two separate NAT networks
|
||||
net1Name := "isolation-net1"
|
||||
net2Name := "isolation-net2"
|
||||
|
||||
network.CreateNoError(ctx, t, c, net1Name, network.WithDriver("nat"))
|
||||
defer network.RemoveNoError(ctx, t, c, net1Name)
|
||||
|
||||
network.CreateNoError(ctx, t, c, net2Name, network.WithDriver("nat"))
|
||||
defer network.RemoveNoError(ctx, t, c, net2Name)
|
||||
|
||||
// Create container on first network
|
||||
ctr1Name := "isolated-ctr1"
|
||||
id1 := container.Run(ctx, t, c,
|
||||
container.WithName(ctr1Name),
|
||||
container.WithNetworkMode(net1Name),
|
||||
)
|
||||
defer c.ContainerRemove(ctx, id1, containertypes.RemoveOptions{Force: true})
|
||||
|
||||
ctr1Info := container.Inspect(ctx, t, c, id1)
|
||||
ctr1IP := ctr1Info.NetworkSettings.Networks[net1Name].IPAddress
|
||||
assert.Check(t, ctr1IP != "", "Container IP not assigned")
|
||||
|
||||
// Create container on second network and try to ping first container
|
||||
pingCmd := []string{"ping", "-n", "1", "-w", "2000", ctr1IP}
|
||||
|
||||
attachCtx, cancel := context.WithTimeout(ctx, 15*time.Second)
|
||||
defer cancel()
|
||||
res := container.RunAttach(attachCtx, t, c,
|
||||
container.WithCmd(pingCmd...),
|
||||
container.WithNetworkMode(net2Name),
|
||||
)
|
||||
defer c.ContainerRemove(ctx, res.ContainerID, containertypes.RemoveOptions{Force: true})
|
||||
|
||||
// Ping should fail, demonstrating network isolation
|
||||
assert.Check(t, res.ExitCode != 0, "Ping succeeded unexpectedly - networks are not isolated")
|
||||
// Windows ping failure can have various error messages, but we should see some indication of failure
|
||||
stdout := res.Stdout.String()
|
||||
stderr := res.Stderr.String()
|
||||
|
||||
// Check for common Windows ping failure indicators
|
||||
hasFailureIndicator := strings.Contains(stdout, "Destination host unreachable") ||
|
||||
strings.Contains(stdout, "Request timed out") ||
|
||||
strings.Contains(stdout, "100% loss") ||
|
||||
strings.Contains(stdout, "Lost = 1") ||
|
||||
strings.Contains(stderr, "unreachable") ||
|
||||
strings.Contains(stderr, "timeout")
|
||||
|
||||
assert.Check(t, hasFailureIndicator,
|
||||
"Expected ping failure indicators not found. Exit code: %d, stdout: %q, stderr: %q",
|
||||
res.ExitCode, stdout, stderr)
|
||||
}
|
||||
|
||||
// TestWindowsNetworkEndpointManagement validates endpoint creation and management on Windows networks.
|
||||
// Tests that multiple containers can be created and managed on the same network.
|
||||
func TestWindowsNetworkEndpointManagement(t *testing.T) {
|
||||
ctx := setupTest(t)
|
||||
c := testEnv.APIClient()
|
||||
|
||||
netName := "endpoint-test-nat"
|
||||
network.CreateNoError(ctx, t, c, netName, network.WithDriver("nat"))
|
||||
defer network.RemoveNoError(ctx, t, c, netName)
|
||||
|
||||
// Create multiple containers on the same network
|
||||
const numContainers = 3
|
||||
containerIDs := make([]string, numContainers)
|
||||
|
||||
for i := 0; i < numContainers; i++ {
|
||||
ctrName := fmt.Sprintf("endpoint-ctr-%d", i)
|
||||
id := container.Run(ctx, t, c,
|
||||
container.WithName(ctrName),
|
||||
container.WithNetworkMode(netName),
|
||||
)
|
||||
containerIDs[i] = id
|
||||
defer c.ContainerRemove(ctx, id, containertypes.RemoveOptions{Force: true})
|
||||
}
|
||||
|
||||
netInfo, err := c.NetworkInspect(ctx, netName, types.NetworkInspectOptions{})
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, is.Equal(len(netInfo.Containers), numContainers),
|
||||
"Expected %d containers, got %d", numContainers, len(netInfo.Containers))
|
||||
|
||||
// Verify each container has network connectivity to others
|
||||
for i := 0; i < numContainers-1; i++ {
|
||||
targetName := fmt.Sprintf("endpoint-ctr-%d", i)
|
||||
pingCmd := []string{"ping", "-n", "1", "-w", "3000", targetName}
|
||||
|
||||
sourceName := fmt.Sprintf("endpoint-ctr-%d", i+1)
|
||||
attachCtx, cancel := context.WithTimeout(ctx, 15*time.Second)
|
||||
defer cancel()
|
||||
res := container.RunAttach(attachCtx, t, c,
|
||||
container.WithName(fmt.Sprintf("%s-pinger", sourceName)),
|
||||
container.WithCmd(pingCmd...),
|
||||
container.WithNetworkMode(netName),
|
||||
)
|
||||
defer c.ContainerRemove(ctx, res.ContainerID, containertypes.RemoveOptions{Force: true})
|
||||
|
||||
assert.Check(t, is.Equal(res.ExitCode, 0),
|
||||
"Container %s failed to ping %s", sourceName, targetName)
|
||||
}
|
||||
}
|
||||
71
integration/service/network_linux_test.go
Normal file
71
integration/service/network_linux_test.go
Normal file
@@ -0,0 +1,71 @@
|
||||
package service
|
||||
|
||||
import (
|
||||
stdnet "net"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
swarmtypes "github.com/docker/docker/api/types/swarm"
|
||||
"github.com/docker/docker/integration/internal/swarm"
|
||||
"github.com/docker/docker/internal/testutils/networking"
|
||||
"gotest.tools/v3/assert"
|
||||
"gotest.tools/v3/icmd"
|
||||
"gotest.tools/v3/poll"
|
||||
"gotest.tools/v3/skip"
|
||||
)
|
||||
|
||||
func TestRestoreIngressRulesOnFirewalldReload(t *testing.T) {
|
||||
skip.If(t, testEnv.IsRemoteDaemon)
|
||||
skip.If(t, testEnv.IsRootless, "rootless mode doesn't support Swarm-mode")
|
||||
//skip.If(t, testEnv.FirewallBackendDriver() == "iptables")
|
||||
skip.If(t, !networking.FirewalldRunning(), "Need firewalld to test restoration ingress rules")
|
||||
ctx := setupTest(t)
|
||||
|
||||
// Check the published port is accessible.
|
||||
checkHTTP := func(_ poll.LogT) poll.Result {
|
||||
res := icmd.RunCommand("curl", "-v", "-o", "/dev/null", "-w", "%{http_code}\n",
|
||||
"http://"+stdnet.JoinHostPort("localhost", "8080"))
|
||||
// A "404 Not Found" means the server responded, but it's got nothing to serve.
|
||||
if !strings.Contains(res.Stdout(), "404") {
|
||||
return poll.Continue("404 - not found in: %s, %+v", res.Stdout(), res)
|
||||
}
|
||||
return poll.Success()
|
||||
}
|
||||
|
||||
d := swarm.NewSwarm(ctx, t, testEnv)
|
||||
defer d.Stop(t)
|
||||
c := d.NewClientT(t)
|
||||
defer c.Close()
|
||||
|
||||
serviceID := swarm.CreateService(ctx, t, d,
|
||||
swarm.ServiceWithName("test-ingress-on-firewalld-reload"),
|
||||
swarm.ServiceWithCommand([]string{"httpd", "-f"}),
|
||||
swarm.ServiceWithEndpoint(&swarmtypes.EndpointSpec{
|
||||
Ports: []swarmtypes.PortConfig{
|
||||
{
|
||||
Protocol: "tcp",
|
||||
TargetPort: 80,
|
||||
PublishedPort: 8080,
|
||||
PublishMode: swarmtypes.PortConfigPublishModeIngress,
|
||||
},
|
||||
},
|
||||
}),
|
||||
)
|
||||
defer func() {
|
||||
err := c.ServiceRemove(ctx, serviceID)
|
||||
assert.NilError(t, err)
|
||||
}()
|
||||
|
||||
t.Log("Waiting for the service to start")
|
||||
poll.WaitOn(t, swarm.RunningTasksCount(ctx, c, serviceID, 1), swarm.ServicePoll)
|
||||
t.Log("Checking http access to the service")
|
||||
poll.WaitOn(t, checkHTTP, poll.WithTimeout(30*time.Second))
|
||||
|
||||
t.Log("Firewalld reload")
|
||||
networking.FirewalldReload(t, d)
|
||||
|
||||
t.Log("Checking http access to the service")
|
||||
// It takes a while before this works ...
|
||||
poll.WaitOn(t, checkHTTP, poll.WithTimeout(30*time.Second))
|
||||
}
|
||||
32
internal/iterutil/iterutil.go
Normal file
32
internal/iterutil/iterutil.go
Normal file
@@ -0,0 +1,32 @@
|
||||
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
|
||||
//go:build go1.23
|
||||
|
||||
package iterutil
|
||||
|
||||
import (
|
||||
"iter"
|
||||
"maps"
|
||||
)
|
||||
|
||||
// SameValues checks if a and b yield the same values, independent of order.
|
||||
func SameValues[T comparable](a, b iter.Seq[T]) bool {
|
||||
m, n := make(map[T]int), make(map[T]int)
|
||||
for v := range a {
|
||||
m[v]++
|
||||
}
|
||||
for v := range b {
|
||||
n[v]++
|
||||
}
|
||||
return maps.Equal(m, n)
|
||||
}
|
||||
|
||||
// Deref adapts an iterator of pointers to an iterator of values.
|
||||
func Deref[T any, P *T](s iter.Seq[P]) iter.Seq[T] {
|
||||
return func(yield func(T) bool) {
|
||||
for p := range s {
|
||||
if !yield(*p) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
31
internal/iterutil/iterutil_test.go
Normal file
31
internal/iterutil/iterutil_test.go
Normal file
@@ -0,0 +1,31 @@
|
||||
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
|
||||
//go:build go1.23
|
||||
|
||||
package iterutil
|
||||
|
||||
import (
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
"gotest.tools/v3/assert"
|
||||
)
|
||||
|
||||
func TestSameValues(t *testing.T) {
|
||||
a := []int{1, 2, 3, 4, 3}
|
||||
b := []int{3, 4, 3, 2, 1}
|
||||
c := []int{1, 2, 3, 4}
|
||||
|
||||
assert.Check(t, SameValues(slices.Values(a), slices.Values(a)))
|
||||
assert.Check(t, SameValues(slices.Values(c), slices.Values(c)))
|
||||
assert.Check(t, SameValues(slices.Values(a), slices.Values(b)))
|
||||
assert.Check(t, !SameValues(slices.Values(a), slices.Values(c)))
|
||||
}
|
||||
|
||||
func TestDeref(t *testing.T) {
|
||||
a := make([]*int, 3)
|
||||
for i := range a {
|
||||
a[i] = &i
|
||||
}
|
||||
b := slices.Collect(Deref(slices.Values(a)))
|
||||
assert.DeepEqual(t, b, []int{0, 1, 2})
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
|
||||
//go:build go1.19
|
||||
//go:build go1.23
|
||||
|
||||
package sliceutil
|
||||
|
||||
|
||||
60
internal/testutils/networking/iptables.go
Normal file
60
internal/testutils/networking/iptables.go
Normal file
@@ -0,0 +1,60 @@
|
||||
package networking
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/testutil/daemon"
|
||||
"golang.org/x/net/context"
|
||||
"gotest.tools/v3/assert"
|
||||
"gotest.tools/v3/icmd"
|
||||
"gotest.tools/v3/poll"
|
||||
)
|
||||
|
||||
func FirewalldRunning() bool {
|
||||
state, err := exec.Command("firewall-cmd", "--state").CombinedOutput()
|
||||
return err == nil && strings.TrimSpace(string(state)) == "running"
|
||||
}
|
||||
|
||||
func extractLogTime(s string) (time.Time, error) {
|
||||
// time="2025-07-15T13:46:13.414214418Z" level=info msg=""
|
||||
re := regexp.MustCompile(`time="([^"]+)"`)
|
||||
matches := re.FindStringSubmatch(s)
|
||||
if len(matches) < 2 {
|
||||
return time.Time{}, fmt.Errorf("timestamp not found in log line: %s, matches: %+v", s, matches)
|
||||
}
|
||||
|
||||
return time.Parse(time.RFC3339Nano, matches[1])
|
||||
}
|
||||
|
||||
// FirewalldReload reloads firewalld and waits for the daemon to re-create its rules.
|
||||
// It's a no-op if firewalld is not running, and the test fails if the reload does
|
||||
// not complete.
|
||||
func FirewalldReload(t *testing.T, d *daemon.Daemon) {
|
||||
t.Helper()
|
||||
if !FirewalldRunning() {
|
||||
return
|
||||
}
|
||||
timeBeforeReload := time.Now()
|
||||
res := icmd.RunCommand("firewall-cmd", "--reload")
|
||||
assert.NilError(t, res.Error)
|
||||
|
||||
ctx := context.Background()
|
||||
poll.WaitOn(t, d.PollCheckLogs(ctx, func(s string) bool {
|
||||
if !strings.Contains(s, "Firewalld reload completed") {
|
||||
return false
|
||||
}
|
||||
lastReload, err := extractLogTime(s)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if lastReload.After(timeBeforeReload) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}))
|
||||
}
|
||||
47
internal/testutils/networking/iptables_test.go
Normal file
47
internal/testutils/networking/iptables_test.go
Normal file
@@ -0,0 +1,47 @@
|
||||
package networking
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func Test_getTimeFromLogMsg(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
s string
|
||||
want time.Time
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "valid time",
|
||||
s: `time="2025-07-15T13:46:13.414214418Z" level=info msg=""`,
|
||||
want: time.Date(2025, 7, 15, 13, 46, 13, 414214418, time.UTC),
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "invalid format",
|
||||
s: `time="invalid-time-format" level=info msg=""`,
|
||||
want: time.Time{},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "missing time",
|
||||
s: `level=info msg=""`,
|
||||
want: time.Time{},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := extractLogTime(tt.s)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("getTimeFromLogMsg() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("getTimeFromLogMsg() got = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,3 +1,6 @@
|
||||
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
|
||||
//go:build go1.23
|
||||
|
||||
package libnetwork
|
||||
|
||||
//go:generate protoc -I=. -I=../vendor/ --gogofaster_out=import_path=github.com/docker/docker/libnetwork:. agent.proto
|
||||
@@ -7,10 +10,13 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/netip"
|
||||
"slices"
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
"github.com/containerd/log"
|
||||
"github.com/docker/docker/internal/iterutil"
|
||||
"github.com/docker/docker/libnetwork/cluster"
|
||||
"github.com/docker/docker/libnetwork/discoverapi"
|
||||
"github.com/docker/docker/libnetwork/driverapi"
|
||||
@@ -490,17 +496,19 @@ func (n *Network) Services() map[string]ServiceInfo {
|
||||
// Walk through the driver's tables, have the driver decode the entries
|
||||
// and return the tuple {ep ID, value}. value is a string that coveys
|
||||
// relevant info about the endpoint.
|
||||
for _, table := range n.driverTables {
|
||||
if table.objType != driverapi.EndpointObject {
|
||||
continue
|
||||
}
|
||||
for key, value := range agent.networkDB.GetTableByNetwork(table.name, nwID) {
|
||||
epID, info := d.DecodeTableEntry(table.name, key, value.Value)
|
||||
if ep, ok := eps[epID]; !ok {
|
||||
log.G(context.TODO()).Errorf("Inconsistent driver and libnetwork state for endpoint %s", epID)
|
||||
} else {
|
||||
ep.info = info
|
||||
eps[epID] = ep
|
||||
if d, ok := d.(driverapi.TableWatcher); ok {
|
||||
for _, table := range n.driverTables {
|
||||
if table.objType != driverapi.EndpointObject {
|
||||
continue
|
||||
}
|
||||
for key, value := range agent.networkDB.GetTableByNetwork(table.name, nwID) {
|
||||
epID, info := d.DecodeTableEntry(table.name, key, value.Value)
|
||||
if ep, ok := eps[epID]; !ok {
|
||||
log.G(context.TODO()).Errorf("Inconsistent driver and libnetwork state for endpoint %s", epID)
|
||||
} else {
|
||||
ep.info = info
|
||||
eps[epID] = ep
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -777,23 +785,6 @@ func (n *Network) addDriverWatches() {
|
||||
agent.driverCancelFuncs[n.ID()] = append(agent.driverCancelFuncs[n.ID()], cancel)
|
||||
agent.mu.Unlock()
|
||||
go c.handleTableEvents(ch, n.handleDriverTableEvent)
|
||||
d, err := n.driver(false)
|
||||
if err != nil {
|
||||
log.G(context.TODO()).Errorf("Could not resolve driver %s while walking driver tabl: %v", n.networkType, err)
|
||||
return
|
||||
}
|
||||
|
||||
err = agent.networkDB.WalkTable(table.name, func(nid, key string, value []byte, deleted bool) bool {
|
||||
// skip the entries that are mark for deletion, this is safe because this function is
|
||||
// called at initialization time so there is no state to delete
|
||||
if nid == n.ID() && !deleted {
|
||||
d.EventNotify(driverapi.Create, nid, table.name, key, value)
|
||||
}
|
||||
return false
|
||||
})
|
||||
if err != nil {
|
||||
log.G(context.TODO()).WithError(err).Warn("Error while walking networkdb")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -830,33 +821,14 @@ func (n *Network) handleDriverTableEvent(ev events.Event) {
|
||||
log.G(context.TODO()).Errorf("Could not resolve driver %s while handling driver table event: %v", n.networkType, err)
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
etype driverapi.EventType
|
||||
tname string
|
||||
key string
|
||||
value []byte
|
||||
)
|
||||
|
||||
switch event := ev.(type) {
|
||||
case networkdb.CreateEvent:
|
||||
tname = event.Table
|
||||
key = event.Key
|
||||
value = event.Value
|
||||
etype = driverapi.Create
|
||||
case networkdb.DeleteEvent:
|
||||
tname = event.Table
|
||||
key = event.Key
|
||||
value = event.Value
|
||||
etype = driverapi.Delete
|
||||
case networkdb.UpdateEvent:
|
||||
tname = event.Table
|
||||
key = event.Key
|
||||
value = event.Value
|
||||
etype = driverapi.Delete
|
||||
ed, ok := d.(driverapi.TableWatcher)
|
||||
if !ok {
|
||||
log.G(context.TODO()).Errorf("Could not notify driver %s about table event: driver does not implement TableWatcher interface", n.networkType)
|
||||
return
|
||||
}
|
||||
|
||||
d.EventNotify(etype, n.ID(), tname, key, value)
|
||||
event := ev.(networkdb.WatchEvent)
|
||||
ed.EventNotify(n.ID(), event.Table, event.Key, event.Prev, event.Value)
|
||||
}
|
||||
|
||||
func (c *Controller) handleNodeTableEvent(ev events.Event) {
|
||||
@@ -865,13 +837,14 @@ func (c *Controller) handleNodeTableEvent(ev events.Event) {
|
||||
isAdd bool
|
||||
nodeAddr networkdb.NodeAddr
|
||||
)
|
||||
switch event := ev.(type) {
|
||||
case networkdb.CreateEvent:
|
||||
event := ev.(networkdb.WatchEvent)
|
||||
switch {
|
||||
case event.IsCreate():
|
||||
value = event.Value
|
||||
isAdd = true
|
||||
case networkdb.DeleteEvent:
|
||||
value = event.Value
|
||||
case networkdb.UpdateEvent:
|
||||
case event.IsDelete():
|
||||
value = event.Prev
|
||||
case event.IsUpdate():
|
||||
log.G(context.TODO()).Errorf("Unexpected update node table event = %#v", event)
|
||||
}
|
||||
|
||||
@@ -883,94 +856,139 @@ func (c *Controller) handleNodeTableEvent(ev events.Event) {
|
||||
c.processNodeDiscovery([]net.IP{nodeAddr.Addr}, isAdd)
|
||||
}
|
||||
|
||||
type endpointEvent struct {
|
||||
EndpointRecord
|
||||
// Virtual IP of the service to which this endpoint belongs.
|
||||
VirtualIP netip.Addr
|
||||
// IP assigned to this endpoint.
|
||||
EndpointIP netip.Addr
|
||||
}
|
||||
|
||||
func unmarshalEndpointRecord(data []byte) (*endpointEvent, error) {
|
||||
var epRec EndpointRecord
|
||||
if err := proto.Unmarshal(data, &epRec); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal endpoint record: %w", err)
|
||||
}
|
||||
|
||||
vip, _ := netip.ParseAddr(epRec.VirtualIP)
|
||||
eip, _ := netip.ParseAddr(epRec.EndpointIP)
|
||||
|
||||
if epRec.Name == "" || !eip.IsValid() {
|
||||
return nil, fmt.Errorf("invalid endpoint name/ip in service table event %s", data)
|
||||
}
|
||||
|
||||
return &endpointEvent{
|
||||
EndpointRecord: epRec,
|
||||
VirtualIP: vip,
|
||||
EndpointIP: eip,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// EquivalentTo returns true if ev is semantically equivalent to other.
|
||||
func (ev *endpointEvent) EquivalentTo(other *endpointEvent) bool {
|
||||
return ev.Name == other.Name &&
|
||||
ev.ServiceName == other.ServiceName &&
|
||||
ev.ServiceID == other.ServiceID &&
|
||||
ev.VirtualIP == other.VirtualIP &&
|
||||
ev.EndpointIP == other.EndpointIP &&
|
||||
ev.ServiceDisabled == other.ServiceDisabled &&
|
||||
iterutil.SameValues(
|
||||
iterutil.Deref(slices.Values(ev.IngressPorts)),
|
||||
iterutil.Deref(slices.Values(other.IngressPorts))) &&
|
||||
iterutil.SameValues(slices.Values(ev.Aliases), slices.Values(other.Aliases)) &&
|
||||
iterutil.SameValues(slices.Values(ev.TaskAliases), slices.Values(other.TaskAliases))
|
||||
}
|
||||
|
||||
func (c *Controller) handleEpTableEvent(ev events.Event) {
|
||||
var (
|
||||
nid string
|
||||
eid string
|
||||
value []byte
|
||||
epRec EndpointRecord
|
||||
)
|
||||
event := ev.(networkdb.WatchEvent)
|
||||
nid := event.NetworkID
|
||||
eid := event.Key
|
||||
|
||||
switch event := ev.(type) {
|
||||
case networkdb.CreateEvent:
|
||||
nid = event.NetworkID
|
||||
eid = event.Key
|
||||
value = event.Value
|
||||
case networkdb.DeleteEvent:
|
||||
nid = event.NetworkID
|
||||
eid = event.Key
|
||||
value = event.Value
|
||||
case networkdb.UpdateEvent:
|
||||
nid = event.NetworkID
|
||||
eid = event.Key
|
||||
value = event.Value
|
||||
default:
|
||||
log.G(context.TODO()).Errorf("Unexpected update service table event = %#v", event)
|
||||
return
|
||||
var prev, epRec *endpointEvent
|
||||
if event.Prev != nil {
|
||||
var err error
|
||||
prev, err = unmarshalEndpointRecord(event.Prev)
|
||||
if err != nil {
|
||||
log.G(context.TODO()).WithError(err).Error("error unmarshaling previous value from service table event")
|
||||
return
|
||||
}
|
||||
}
|
||||
if event.Value != nil {
|
||||
var err error
|
||||
epRec, err = unmarshalEndpointRecord(event.Value)
|
||||
if err != nil {
|
||||
log.G(context.TODO()).WithError(err).Error("error unmarshaling service table event")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
err := proto.Unmarshal(value, &epRec)
|
||||
if err != nil {
|
||||
log.G(context.TODO()).Errorf("Failed to unmarshal service table value: %v", err)
|
||||
return
|
||||
}
|
||||
logger := log.G(context.TODO()).WithFields(log.Fields{
|
||||
"evt": event,
|
||||
"R": epRec,
|
||||
"prev": prev,
|
||||
})
|
||||
logger.Debug("handleEpTableEvent")
|
||||
|
||||
containerName := epRec.Name
|
||||
svcName := epRec.ServiceName
|
||||
svcID := epRec.ServiceID
|
||||
vip := net.ParseIP(epRec.VirtualIP)
|
||||
ip := net.ParseIP(epRec.EndpointIP)
|
||||
ingressPorts := epRec.IngressPorts
|
||||
serviceAliases := epRec.Aliases
|
||||
taskAliases := epRec.TaskAliases
|
||||
if prev != nil {
|
||||
if epRec != nil && prev.EquivalentTo(epRec) {
|
||||
// Avoid flapping if we would otherwise remove a service
|
||||
// binding then immediately replace it with an equivalent one.
|
||||
return
|
||||
}
|
||||
|
||||
if containerName == "" || ip == nil {
|
||||
log.G(context.TODO()).Errorf("Invalid endpoint name/ip received while handling service table event %s", value)
|
||||
return
|
||||
}
|
||||
|
||||
switch ev.(type) {
|
||||
case networkdb.CreateEvent:
|
||||
log.G(context.TODO()).Debugf("handleEpTableEvent ADD %s R:%v", eid, epRec)
|
||||
if svcID != "" {
|
||||
if prev.ServiceID != "" {
|
||||
// This is a remote task part of a service
|
||||
if err := c.addServiceBinding(svcName, svcID, nid, eid, containerName, vip, ingressPorts, serviceAliases, taskAliases, ip, "handleEpTableEvent"); err != nil {
|
||||
log.G(context.TODO()).Errorf("failed adding service binding for %s epRec:%v err:%v", eid, epRec, err)
|
||||
return
|
||||
if !prev.ServiceDisabled {
|
||||
err := c.rmServiceBinding(prev.ServiceName, prev.ServiceID, nid, eid,
|
||||
prev.Name, prev.VirtualIP.AsSlice(), prev.IngressPorts,
|
||||
prev.Aliases, prev.TaskAliases, prev.EndpointIP.AsSlice(),
|
||||
"handleEpTableEvent", true, true)
|
||||
if err != nil {
|
||||
logger.WithError(err).Error("failed removing service binding")
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// This is a remote container simply attached to an attachable network
|
||||
if err := c.addContainerNameResolution(nid, eid, containerName, taskAliases, ip, "handleEpTableEvent"); err != nil {
|
||||
log.G(context.TODO()).Errorf("failed adding container name resolution for %s epRec:%v err:%v", eid, epRec, err)
|
||||
err := c.delContainerNameResolution(nid, eid, prev.Name, prev.TaskAliases,
|
||||
prev.EndpointIP.AsSlice(), "handleEpTableEvent")
|
||||
if err != nil {
|
||||
logger.WithError(err).Errorf("failed removing container name resolution")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
case networkdb.DeleteEvent:
|
||||
log.G(context.TODO()).Debugf("handleEpTableEvent DEL %s R:%v", eid, epRec)
|
||||
if svcID != "" {
|
||||
if epRec != nil {
|
||||
if epRec.ServiceID != "" {
|
||||
// This is a remote task part of a service
|
||||
if err := c.rmServiceBinding(svcName, svcID, nid, eid, containerName, vip, ingressPorts, serviceAliases, taskAliases, ip, "handleEpTableEvent", true, true); err != nil {
|
||||
log.G(context.TODO()).Errorf("failed removing service binding for %s epRec:%v err:%v", eid, epRec, err)
|
||||
return
|
||||
if epRec.ServiceDisabled {
|
||||
// Don't double-remove a service binding
|
||||
if prev == nil || prev.ServiceID != epRec.ServiceID || !prev.ServiceDisabled {
|
||||
err := c.rmServiceBinding(epRec.ServiceName, epRec.ServiceID,
|
||||
nid, eid, epRec.Name, epRec.VirtualIP.AsSlice(),
|
||||
epRec.IngressPorts, epRec.Aliases, epRec.TaskAliases,
|
||||
epRec.EndpointIP.AsSlice(), "handleEpTableEvent", true, false)
|
||||
if err != nil {
|
||||
logger.WithError(err).Error("failed disabling service binding")
|
||||
return
|
||||
}
|
||||
}
|
||||
} else {
|
||||
err := c.addServiceBinding(epRec.ServiceName, epRec.ServiceID, nid, eid,
|
||||
epRec.Name, epRec.VirtualIP.AsSlice(), epRec.IngressPorts,
|
||||
epRec.Aliases, epRec.TaskAliases, epRec.EndpointIP.AsSlice(),
|
||||
"handleEpTableEvent")
|
||||
if err != nil {
|
||||
logger.WithError(err).Error("failed adding service binding")
|
||||
return
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// This is a remote container simply attached to an attachable network
|
||||
if err := c.delContainerNameResolution(nid, eid, containerName, taskAliases, ip, "handleEpTableEvent"); err != nil {
|
||||
log.G(context.TODO()).Errorf("failed removing container name resolution for %s epRec:%v err:%v", eid, epRec, err)
|
||||
err := c.addContainerNameResolution(nid, eid, epRec.Name, epRec.TaskAliases,
|
||||
epRec.EndpointIP.AsSlice(), "handleEpTableEvent")
|
||||
if err != nil {
|
||||
logger.WithError(err).Errorf("failed adding container name resolution")
|
||||
}
|
||||
}
|
||||
case networkdb.UpdateEvent:
|
||||
log.G(context.TODO()).Debugf("handleEpTableEvent UPD %s R:%v", eid, epRec)
|
||||
// We currently should only get these to inform us that an endpoint
|
||||
// is disabled. Report if otherwise.
|
||||
if svcID == "" || !epRec.ServiceDisabled {
|
||||
log.G(context.TODO()).Errorf("Unexpected update table event for %s epRec:%v", eid, epRec)
|
||||
return
|
||||
}
|
||||
// This is a remote task that is part of a service that is now disabled
|
||||
if err := c.rmServiceBinding(svcName, svcID, nid, eid, containerName, vip, ingressPorts, serviceAliases, taskAliases, ip, "handleEpTableEvent", true, false); err != nil {
|
||||
log.G(context.TODO()).Errorf("failed disabling service binding for %s epRec:%v err:%v", eid, epRec, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
93
libnetwork/agent_test.go
Normal file
93
libnetwork/agent_test.go
Normal file
@@ -0,0 +1,93 @@
|
||||
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
|
||||
//go:build go1.23
|
||||
|
||||
package libnetwork
|
||||
|
||||
import (
|
||||
"net/netip"
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
"gotest.tools/v3/assert"
|
||||
)
|
||||
|
||||
func TestEndpointEvent_EquivalentTo(t *testing.T) {
|
||||
assert.Check(t, (&endpointEvent{}).EquivalentTo(&endpointEvent{}))
|
||||
|
||||
a := endpointEvent{
|
||||
EndpointRecord: EndpointRecord{
|
||||
Name: "foo",
|
||||
ServiceName: "bar",
|
||||
ServiceID: "baz",
|
||||
IngressPorts: []*PortConfig{
|
||||
{
|
||||
Protocol: ProtocolTCP,
|
||||
TargetPort: 80,
|
||||
},
|
||||
{
|
||||
Name: "dns",
|
||||
Protocol: ProtocolUDP,
|
||||
TargetPort: 5353,
|
||||
PublishedPort: 53,
|
||||
},
|
||||
},
|
||||
},
|
||||
VirtualIP: netip.MustParseAddr("10.0.0.42"),
|
||||
EndpointIP: netip.MustParseAddr("192.168.69.42"),
|
||||
}
|
||||
assert.Check(t, a.EquivalentTo(&a))
|
||||
|
||||
reflexiveEquiv := func(a, b *endpointEvent) bool {
|
||||
t.Helper()
|
||||
assert.Check(t, a.EquivalentTo(b) == b.EquivalentTo(a), "reflexive equivalence")
|
||||
return a.EquivalentTo(b)
|
||||
}
|
||||
|
||||
b := a
|
||||
b.ServiceDisabled = true
|
||||
assert.Check(t, !reflexiveEquiv(&a, &b), "differing by ServiceDisabled")
|
||||
|
||||
c := a
|
||||
c.IngressPorts = slices.Clone(a.IngressPorts)
|
||||
slices.Reverse(c.IngressPorts)
|
||||
assert.Check(t, reflexiveEquiv(&a, &c), "IngressPorts order should not matter")
|
||||
|
||||
d := a
|
||||
d.IngressPorts = append(d.IngressPorts, a.IngressPorts[0])
|
||||
assert.Check(t, !reflexiveEquiv(&a, &d), "Differing number of copies of IngressPort entries should not be equivalent")
|
||||
d.IngressPorts = a.IngressPorts[:1]
|
||||
assert.Check(t, !reflexiveEquiv(&a, &d), "Removing an IngressPort entry should not be equivalent")
|
||||
|
||||
e := a
|
||||
e.Aliases = []string{"alias1", "alias2"}
|
||||
assert.Check(t, !reflexiveEquiv(&a, &e), "Differing Aliases should not be equivalent")
|
||||
|
||||
f := a
|
||||
f.TaskAliases = []string{"taskalias1", "taskalias2"}
|
||||
assert.Check(t, !reflexiveEquiv(&a, &f), "Adding TaskAliases should not be equivalent")
|
||||
g := a
|
||||
g.TaskAliases = []string{"taskalias2", "taskalias1"}
|
||||
assert.Check(t, reflexiveEquiv(&f, &g), "TaskAliases order should not matter")
|
||||
g.TaskAliases = g.TaskAliases[:1]
|
||||
assert.Check(t, !reflexiveEquiv(&f, &g), "Differing number of TaskAliases should not be equivalent")
|
||||
|
||||
h := a
|
||||
h.EndpointIP = netip.MustParseAddr("192.168.69.43")
|
||||
assert.Check(t, !reflexiveEquiv(&a, &h), "Differing EndpointIP should not be equivalent")
|
||||
|
||||
i := a
|
||||
i.VirtualIP = netip.MustParseAddr("10.0.0.69")
|
||||
assert.Check(t, !reflexiveEquiv(&a, &i), "Differing VirtualIP should not be equivalent")
|
||||
|
||||
j := a
|
||||
j.ServiceID = "qux"
|
||||
assert.Check(t, !reflexiveEquiv(&a, &j), "Differing ServiceID should not be equivalent")
|
||||
|
||||
k := a
|
||||
k.ServiceName = "quux"
|
||||
assert.Check(t, !reflexiveEquiv(&a, &k), "Differing ServiceName should not be equivalent")
|
||||
|
||||
l := a
|
||||
l.Name = "aaaaa"
|
||||
assert.Check(t, !reflexiveEquiv(&a, &l), "Differing Name should not be equivalent")
|
||||
}
|
||||
@@ -232,7 +232,7 @@ func (h *Bitmap) IsSet(ordinal uint64) bool {
|
||||
}
|
||||
|
||||
// set/reset the bit
|
||||
func (h *Bitmap) set(ordinal, start, end uint64, any bool, release bool, serial bool) (uint64, error) {
|
||||
func (h *Bitmap) set(ordinal, start, end uint64, isAvailable bool, release bool, serial bool) (uint64, error) {
|
||||
var (
|
||||
bitPos uint64
|
||||
bytePos uint64
|
||||
@@ -248,7 +248,7 @@ func (h *Bitmap) set(ordinal, start, end uint64, any bool, release bool, serial
|
||||
if release {
|
||||
bytePos, bitPos = ordinalToPos(ordinal)
|
||||
} else {
|
||||
if any {
|
||||
if isAvailable {
|
||||
bytePos, bitPos, err = getAvailableFromCurrent(h.head, start, curr, end)
|
||||
ret = posToOrdinal(bytePos, bitPos)
|
||||
if err == nil {
|
||||
|
||||
@@ -80,13 +80,6 @@ func watchTableEntries(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
func handleTableEvents(tableName string, ch *events.Channel) {
|
||||
var (
|
||||
// nid string
|
||||
eid string
|
||||
value []byte
|
||||
isAdd bool
|
||||
)
|
||||
|
||||
log.G(context.TODO()).Infof("Started watching table:%s", tableName)
|
||||
for {
|
||||
select {
|
||||
@@ -95,27 +88,17 @@ func handleTableEvents(tableName string, ch *events.Channel) {
|
||||
return
|
||||
|
||||
case evt := <-ch.C:
|
||||
log.G(context.TODO()).Infof("Recevied new event on:%s", tableName)
|
||||
switch event := evt.(type) {
|
||||
case networkdb.CreateEvent:
|
||||
// nid = event.NetworkID
|
||||
eid = event.Key
|
||||
value = event.Value
|
||||
isAdd = true
|
||||
case networkdb.DeleteEvent:
|
||||
// nid = event.NetworkID
|
||||
eid = event.Key
|
||||
value = event.Value
|
||||
isAdd = false
|
||||
default:
|
||||
log.G(context.TODO()).Infof("Received new event on:%s", tableName)
|
||||
event, ok := evt.(networkdb.WatchEvent)
|
||||
if !ok {
|
||||
log.G(context.TODO()).Fatalf("Unexpected table event = %#v", event)
|
||||
}
|
||||
if isAdd {
|
||||
// log.G(ctx).Infof("Add %s %s", tableName, eid)
|
||||
clientWatchTable[tableName].entries[eid] = string(value)
|
||||
if event.Value != nil {
|
||||
// log.G(ctx).Infof("Add %s %s", tableName, event.Key)
|
||||
clientWatchTable[tableName].entries[event.Key] = string(event.Value)
|
||||
} else {
|
||||
// log.G(ctx).Infof("Del %s %s", tableName, eid)
|
||||
delete(clientWatchTable[tableName].entries, eid)
|
||||
// log.G(ctx).Infof("Del %s %s", tableName, event.Key)
|
||||
delete(clientWatchTable[tableName].entries, event.Key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
14
libnetwork/cnmallocator/allocator_test.go
Normal file
14
libnetwork/cnmallocator/allocator_test.go
Normal file
@@ -0,0 +1,14 @@
|
||||
package cnmallocator
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/moby/swarmkit/v2/manager/allocator"
|
||||
"gotest.tools/v3/skip"
|
||||
)
|
||||
|
||||
func TestAllocator(t *testing.T) {
|
||||
skip.If(t, runtime.GOOS == "windows", "Allocator tests are hardcoded to use Linux network driver names")
|
||||
allocator.RunAllocatorTests(t, NewProvider(nil))
|
||||
}
|
||||
@@ -11,6 +11,6 @@ var initializers = map[string]func(driverapi.Registerer) error{
|
||||
}
|
||||
|
||||
// PredefinedNetworks returns the list of predefined network structures
|
||||
func PredefinedNetworks() []networkallocator.PredefinedNetworkData {
|
||||
func (*Provider) PredefinedNetworks() []networkallocator.PredefinedNetworkData {
|
||||
return nil
|
||||
}
|
||||
@@ -5,14 +5,15 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/log"
|
||||
"github.com/docker/docker/libnetwork/ipamapi"
|
||||
builtinIpam "github.com/docker/docker/libnetwork/ipams/builtin"
|
||||
nullIpam "github.com/docker/docker/libnetwork/ipams/null"
|
||||
"github.com/docker/docker/libnetwork/ipamutils"
|
||||
"github.com/moby/swarmkit/v2/log"
|
||||
"github.com/moby/swarmkit/v2/manager/allocator/networkallocator"
|
||||
)
|
||||
|
||||
func initIPAMDrivers(r ipamapi.Registerer, netConfig *NetworkConfig) error {
|
||||
func initIPAMDrivers(r ipamapi.Registerer, netConfig *networkallocator.Config) error {
|
||||
var addressPool []*ipamutils.NetworkToSplit
|
||||
var str strings.Builder
|
||||
str.WriteString("Subnetlist - ")
|
||||
@@ -36,7 +37,7 @@ func initIPAMDrivers(r ipamapi.Registerer, netConfig *NetworkConfig) error {
|
||||
return err
|
||||
}
|
||||
if addressPool != nil {
|
||||
log.G(context.TODO()).Infof("Swarm initialized global default address pool to: " + str.String())
|
||||
log.G(context.TODO()).Info("Swarm initialized global default address pool to: " + str.String())
|
||||
}
|
||||
|
||||
for _, fn := range [](func(ipamapi.Registerer) error){
|
||||
@@ -19,7 +19,7 @@ var initializers = map[string]func(driverapi.Registerer) error{
|
||||
}
|
||||
|
||||
// PredefinedNetworks returns the list of predefined network structures
|
||||
func PredefinedNetworks() []networkallocator.PredefinedNetworkData {
|
||||
func (*Provider) PredefinedNetworks() []networkallocator.PredefinedNetworkData {
|
||||
return []networkallocator.PredefinedNetworkData{
|
||||
{Name: "bridge", Driver: "bridge"},
|
||||
{Name: "host", Driver: "host"},
|
||||
@@ -14,7 +14,7 @@ var initializers = map[string]func(driverapi.Registerer) error{
|
||||
}
|
||||
|
||||
// PredefinedNetworks returns the list of predefined network structures
|
||||
func PredefinedNetworks() []networkallocator.PredefinedNetworkData {
|
||||
func (*Provider) PredefinedNetworks() []networkallocator.PredefinedNetworkData {
|
||||
return []networkallocator.PredefinedNetworkData{
|
||||
{Name: "nat", Driver: "nat"},
|
||||
}
|
||||
@@ -10,6 +10,6 @@ import (
|
||||
const initializers = nil
|
||||
|
||||
// PredefinedNetworks returns the list of predefined network structures
|
||||
func PredefinedNetworks() []networkallocator.PredefinedNetworkData {
|
||||
func (*Provider) PredefinedNetworks() []networkallocator.PredefinedNetworkData {
|
||||
return nil
|
||||
}
|
||||
@@ -30,13 +30,6 @@ func (d *manager) CreateNetwork(id string, option map[string]interface{}, nInfo
|
||||
return types.NotImplementedErrorf("not implemented")
|
||||
}
|
||||
|
||||
func (d *manager) EventNotify(etype driverapi.EventType, nid, tableName, key string, value []byte) {
|
||||
}
|
||||
|
||||
func (d *manager) DecodeTableEntry(tablename string, key string, value []byte) (string, map[string]string) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (d *manager) DeleteNetwork(nid string) error {
|
||||
return types.NotImplementedErrorf("not implemented")
|
||||
}
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"net"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/log"
|
||||
"github.com/docker/docker/libnetwork/driverapi"
|
||||
"github.com/docker/docker/libnetwork/drivers/remote"
|
||||
"github.com/docker/docker/libnetwork/drvregistry"
|
||||
@@ -15,7 +16,6 @@ import (
|
||||
"github.com/docker/docker/libnetwork/scope"
|
||||
"github.com/docker/docker/pkg/plugingetter"
|
||||
"github.com/moby/swarmkit/v2/api"
|
||||
"github.com/moby/swarmkit/v2/log"
|
||||
"github.com/moby/swarmkit/v2/manager/allocator/networkallocator"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@@ -40,9 +40,6 @@ type cnmNetworkAllocator struct {
|
||||
// The driver registry for all internal and external network drivers.
|
||||
networkRegistry drvregistry.Networks
|
||||
|
||||
// The port allocator instance for allocating node ports
|
||||
portAllocator *portAllocator
|
||||
|
||||
// Local network state used by cnmNetworkAllocator to do network management.
|
||||
networks map[string]*network
|
||||
|
||||
@@ -87,27 +84,14 @@ type networkDriver struct {
|
||||
capability *driverapi.Capability
|
||||
}
|
||||
|
||||
// NetworkConfig is used to store network related cluster config in the Manager.
|
||||
type NetworkConfig struct {
|
||||
// DefaultAddrPool specifies default subnet pool for global scope networks
|
||||
DefaultAddrPool []string
|
||||
|
||||
// SubnetSize specifies the subnet size of the networks created from
|
||||
// the default subnet pool
|
||||
SubnetSize uint32
|
||||
|
||||
// VXLANUDPPort specifies the UDP port number for VXLAN traffic
|
||||
VXLANUDPPort uint32
|
||||
}
|
||||
|
||||
// New returns a new NetworkAllocator handle
|
||||
func New(pg plugingetter.PluginGetter, netConfig *NetworkConfig) (networkallocator.NetworkAllocator, error) {
|
||||
// NewAllocator returns a new NetworkAllocator handle
|
||||
func (p *Provider) NewAllocator(netConfig *networkallocator.Config) (networkallocator.NetworkAllocator, error) {
|
||||
na := &cnmNetworkAllocator{
|
||||
networks: make(map[string]*network),
|
||||
services: make(map[string]struct{}),
|
||||
tasks: make(map[string]struct{}),
|
||||
nodes: make(map[string]map[string]struct{}),
|
||||
pg: pg,
|
||||
pg: p.pg,
|
||||
}
|
||||
|
||||
for ntype, i := range initializers {
|
||||
@@ -115,23 +99,17 @@ func New(pg plugingetter.PluginGetter, netConfig *NetworkConfig) (networkallocat
|
||||
return nil, fmt.Errorf("failed to register %q network driver: %w", ntype, err)
|
||||
}
|
||||
}
|
||||
if err := remote.Register(&na.networkRegistry, pg); err != nil {
|
||||
if err := remote.Register(&na.networkRegistry, p.pg); err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize network driver plugins: %w", err)
|
||||
}
|
||||
|
||||
if err := initIPAMDrivers(&na.ipamRegistry, netConfig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := remoteipam.Register(&na.ipamRegistry, pg); err != nil {
|
||||
if err := remoteipam.Register(&na.ipamRegistry, p.pg); err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize IPAM driver plugins: %w", err)
|
||||
}
|
||||
|
||||
pa, err := newPortAllocator()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
na.portAllocator = pa
|
||||
return na, nil
|
||||
}
|
||||
|
||||
@@ -209,11 +187,8 @@ func (na *cnmNetworkAllocator) Deallocate(n *api.Network) error {
|
||||
}
|
||||
|
||||
// AllocateService allocates all the network resources such as virtual
|
||||
// IP and ports needed by the service.
|
||||
// IP needed by the service.
|
||||
func (na *cnmNetworkAllocator) AllocateService(s *api.Service) (err error) {
|
||||
if err = na.portAllocator.serviceAllocatePorts(s); err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
na.DeallocateService(s)
|
||||
@@ -300,7 +275,7 @@ networkLoop:
|
||||
}
|
||||
|
||||
// DeallocateService de-allocates all the network resources such as
|
||||
// virtual IP and ports associated with the service.
|
||||
// virtual IP associated with the service.
|
||||
func (na *cnmNetworkAllocator) DeallocateService(s *api.Service) error {
|
||||
if s.Endpoint == nil {
|
||||
return nil
|
||||
@@ -316,7 +291,6 @@ func (na *cnmNetworkAllocator) DeallocateService(s *api.Service) error {
|
||||
}
|
||||
s.Endpoint.VirtualIPs = nil
|
||||
|
||||
na.portAllocator.serviceDeallocatePorts(s)
|
||||
delete(na.services, s.ID)
|
||||
|
||||
return nil
|
||||
@@ -373,19 +347,8 @@ func (na *cnmNetworkAllocator) IsTaskAllocated(t *api.Task) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// HostPublishPortsNeedUpdate returns true if the passed service needs
|
||||
// allocations for its published ports in host (non ingress) mode
|
||||
func (na *cnmNetworkAllocator) HostPublishPortsNeedUpdate(s *api.Service) bool {
|
||||
return na.portAllocator.hostPublishPortsNeedUpdate(s)
|
||||
}
|
||||
|
||||
// IsServiceAllocated returns false if the passed service needs to have network resources allocated/updated.
|
||||
func (na *cnmNetworkAllocator) IsServiceAllocated(s *api.Service, flags ...func(*networkallocator.ServiceAllocationOpts)) bool {
|
||||
var options networkallocator.ServiceAllocationOpts
|
||||
for _, flag := range flags {
|
||||
flag(&options)
|
||||
}
|
||||
|
||||
specNetworks := serviceNetworks(s)
|
||||
|
||||
// If endpoint mode is VIP and allocator does not have the
|
||||
@@ -447,10 +410,6 @@ func (na *cnmNetworkAllocator) IsServiceAllocated(s *api.Service, flags ...func(
|
||||
}
|
||||
}
|
||||
|
||||
if (s.Spec.Endpoint != nil && len(s.Spec.Endpoint.Ports) != 0) ||
|
||||
(s.Endpoint != nil && len(s.Endpoint.Ports) != 0) {
|
||||
return na.portAllocator.isPortsAllocatedOnInit(s, options.OnInit)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
789
libnetwork/cnmallocator/networkallocator_test.go
Normal file
789
libnetwork/cnmallocator/networkallocator_test.go
Normal file
@@ -0,0 +1,789 @@
|
||||
package cnmallocator
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/libnetwork/types"
|
||||
"github.com/moby/swarmkit/v2/api"
|
||||
"github.com/moby/swarmkit/v2/manager/allocator/networkallocator"
|
||||
"gotest.tools/v3/assert"
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
)
|
||||
|
||||
func newNetworkAllocator(t *testing.T) networkallocator.NetworkAllocator {
|
||||
na, err := (&Provider{}).NewAllocator(nil)
|
||||
assert.Check(t, err)
|
||||
assert.Check(t, na != nil)
|
||||
return na
|
||||
}
|
||||
|
||||
func TestNew(t *testing.T) {
|
||||
newNetworkAllocator(t)
|
||||
}
|
||||
|
||||
func TestAllocateInvalidIPAM(t *testing.T) {
|
||||
na := newNetworkAllocator(t)
|
||||
n := &api.Network{
|
||||
ID: "testID",
|
||||
Spec: api.NetworkSpec{
|
||||
Annotations: api.Annotations{
|
||||
Name: "test",
|
||||
},
|
||||
DriverConfig: &api.Driver{},
|
||||
IPAM: &api.IPAMOptions{
|
||||
Driver: &api.Driver{
|
||||
Name: "invalidipam,",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
err := na.Allocate(n)
|
||||
assert.Check(t, is.ErrorContains(err, ""))
|
||||
}
|
||||
|
||||
func TestAllocateInvalidDriver(t *testing.T) {
|
||||
na := newNetworkAllocator(t)
|
||||
n := &api.Network{
|
||||
ID: "testID",
|
||||
Spec: api.NetworkSpec{
|
||||
Annotations: api.Annotations{
|
||||
Name: "test",
|
||||
},
|
||||
DriverConfig: &api.Driver{
|
||||
Name: "invaliddriver",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := na.Allocate(n)
|
||||
assert.Check(t, is.ErrorContains(err, ""))
|
||||
}
|
||||
|
||||
func TestNetworkDoubleAllocate(t *testing.T) {
|
||||
na := newNetworkAllocator(t)
|
||||
n := &api.Network{
|
||||
ID: "testID",
|
||||
Spec: api.NetworkSpec{
|
||||
Annotations: api.Annotations{
|
||||
Name: "test",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := na.Allocate(n)
|
||||
assert.Check(t, err)
|
||||
|
||||
err = na.Allocate(n)
|
||||
assert.Check(t, is.ErrorContains(err, ""))
|
||||
}
|
||||
|
||||
func TestAllocateEmptyConfig(t *testing.T) {
|
||||
na1 := newNetworkAllocator(t)
|
||||
na2 := newNetworkAllocator(t)
|
||||
n1 := &api.Network{
|
||||
ID: "testID1",
|
||||
Spec: api.NetworkSpec{
|
||||
Annotations: api.Annotations{
|
||||
Name: "test1",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
n2 := &api.Network{
|
||||
ID: "testID2",
|
||||
Spec: api.NetworkSpec{
|
||||
Annotations: api.Annotations{
|
||||
Name: "test2",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := na1.Allocate(n1)
|
||||
assert.Check(t, err)
|
||||
assert.Check(t, n1.IPAM.Configs != nil)
|
||||
assert.Check(t, is.Equal(len(n1.IPAM.Configs), 1))
|
||||
assert.Check(t, is.Equal(n1.IPAM.Configs[0].Range, ""))
|
||||
assert.Check(t, is.Equal(len(n1.IPAM.Configs[0].Reserved), 0))
|
||||
|
||||
_, subnet11, err := net.ParseCIDR(n1.IPAM.Configs[0].Subnet)
|
||||
assert.Check(t, err)
|
||||
|
||||
gwip11 := net.ParseIP(n1.IPAM.Configs[0].Gateway)
|
||||
assert.Check(t, gwip11 != nil)
|
||||
|
||||
err = na1.Allocate(n2)
|
||||
assert.Check(t, err)
|
||||
assert.Check(t, n2.IPAM.Configs != nil)
|
||||
assert.Check(t, is.Equal(len(n2.IPAM.Configs), 1))
|
||||
assert.Check(t, is.Equal(n2.IPAM.Configs[0].Range, ""))
|
||||
assert.Check(t, is.Equal(len(n2.IPAM.Configs[0].Reserved), 0))
|
||||
|
||||
_, subnet21, err := net.ParseCIDR(n2.IPAM.Configs[0].Subnet)
|
||||
assert.Check(t, err)
|
||||
|
||||
gwip21 := net.ParseIP(n2.IPAM.Configs[0].Gateway)
|
||||
assert.Check(t, gwip21 != nil)
|
||||
|
||||
// Allocate n1 ans n2 with another allocator instance but in
|
||||
// intentionally reverse order.
|
||||
err = na2.Allocate(n2)
|
||||
assert.Check(t, err)
|
||||
assert.Check(t, n2.IPAM.Configs != nil)
|
||||
assert.Check(t, is.Equal(len(n2.IPAM.Configs), 1))
|
||||
assert.Check(t, is.Equal(n2.IPAM.Configs[0].Range, ""))
|
||||
assert.Check(t, is.Equal(len(n2.IPAM.Configs[0].Reserved), 0))
|
||||
|
||||
_, subnet22, err := net.ParseCIDR(n2.IPAM.Configs[0].Subnet)
|
||||
assert.Check(t, err)
|
||||
assert.Check(t, is.DeepEqual(subnet21, subnet22))
|
||||
|
||||
gwip22 := net.ParseIP(n2.IPAM.Configs[0].Gateway)
|
||||
assert.Check(t, is.DeepEqual(gwip21, gwip22))
|
||||
|
||||
err = na2.Allocate(n1)
|
||||
assert.Check(t, err)
|
||||
assert.Check(t, n1.IPAM.Configs != nil)
|
||||
assert.Check(t, is.Equal(len(n1.IPAM.Configs), 1))
|
||||
assert.Check(t, is.Equal(n1.IPAM.Configs[0].Range, ""))
|
||||
assert.Check(t, is.Equal(len(n1.IPAM.Configs[0].Reserved), 0))
|
||||
|
||||
_, subnet12, err := net.ParseCIDR(n1.IPAM.Configs[0].Subnet)
|
||||
assert.Check(t, err)
|
||||
assert.Check(t, is.DeepEqual(subnet11, subnet12))
|
||||
|
||||
gwip12 := net.ParseIP(n1.IPAM.Configs[0].Gateway)
|
||||
assert.Check(t, is.DeepEqual(gwip11, gwip12))
|
||||
}
|
||||
|
||||
func TestAllocateWithOneSubnet(t *testing.T) {
|
||||
na := newNetworkAllocator(t)
|
||||
n := &api.Network{
|
||||
ID: "testID",
|
||||
Spec: api.NetworkSpec{
|
||||
Annotations: api.Annotations{
|
||||
Name: "test",
|
||||
},
|
||||
DriverConfig: &api.Driver{},
|
||||
IPAM: &api.IPAMOptions{
|
||||
Driver: &api.Driver{},
|
||||
Configs: []*api.IPAMConfig{
|
||||
{
|
||||
Subnet: "192.168.1.0/24",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := na.Allocate(n)
|
||||
assert.Check(t, err)
|
||||
assert.Check(t, is.Equal(len(n.IPAM.Configs), 1))
|
||||
assert.Check(t, is.Equal(n.IPAM.Configs[0].Range, ""))
|
||||
assert.Check(t, is.Equal(len(n.IPAM.Configs[0].Reserved), 0))
|
||||
assert.Check(t, is.Equal(n.IPAM.Configs[0].Subnet, "192.168.1.0/24"))
|
||||
|
||||
ip := net.ParseIP(n.IPAM.Configs[0].Gateway)
|
||||
assert.Check(t, ip != nil)
|
||||
}
|
||||
|
||||
func TestAllocateWithOneSubnetGateway(t *testing.T) {
|
||||
na := newNetworkAllocator(t)
|
||||
n := &api.Network{
|
||||
ID: "testID",
|
||||
Spec: api.NetworkSpec{
|
||||
Annotations: api.Annotations{
|
||||
Name: "test",
|
||||
},
|
||||
DriverConfig: &api.Driver{},
|
||||
IPAM: &api.IPAMOptions{
|
||||
Driver: &api.Driver{},
|
||||
Configs: []*api.IPAMConfig{
|
||||
{
|
||||
Subnet: "192.168.1.0/24",
|
||||
Gateway: "192.168.1.1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := na.Allocate(n)
|
||||
assert.Check(t, err)
|
||||
assert.Check(t, is.Equal(len(n.IPAM.Configs), 1))
|
||||
assert.Check(t, is.Equal(n.IPAM.Configs[0].Range, ""))
|
||||
assert.Check(t, is.Equal(len(n.IPAM.Configs[0].Reserved), 0))
|
||||
assert.Check(t, is.Equal(n.IPAM.Configs[0].Subnet, "192.168.1.0/24"))
|
||||
assert.Check(t, is.Equal(n.IPAM.Configs[0].Gateway, "192.168.1.1"))
|
||||
}
|
||||
|
||||
func TestAllocateWithOneSubnetInvalidGateway(t *testing.T) {
|
||||
na := newNetworkAllocator(t)
|
||||
n := &api.Network{
|
||||
ID: "testID",
|
||||
Spec: api.NetworkSpec{
|
||||
Annotations: api.Annotations{
|
||||
Name: "test",
|
||||
},
|
||||
DriverConfig: &api.Driver{},
|
||||
IPAM: &api.IPAMOptions{
|
||||
Driver: &api.Driver{},
|
||||
Configs: []*api.IPAMConfig{
|
||||
{
|
||||
Subnet: "192.168.1.0/24",
|
||||
Gateway: "192.168.2.1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := na.Allocate(n)
|
||||
assert.Check(t, is.ErrorContains(err, ""))
|
||||
}
|
||||
|
||||
// TestAllocateWithSmallSubnet validates that /32 subnets don't produce an error,
|
||||
// as /31 and /32 subnets are supported by docker daemon, starting with
|
||||
// https://github.com/moby/moby/commit/3a938df4b570aad3bfb4d5342379582e872fc1a3,
|
||||
func TestAllocateWithSmallSubnet(t *testing.T) {
|
||||
na := newNetworkAllocator(t)
|
||||
n := &api.Network{
|
||||
ID: "testID",
|
||||
Spec: api.NetworkSpec{
|
||||
Annotations: api.Annotations{
|
||||
Name: "test",
|
||||
},
|
||||
DriverConfig: &api.Driver{},
|
||||
IPAM: &api.IPAMOptions{
|
||||
Driver: &api.Driver{},
|
||||
Configs: []*api.IPAMConfig{
|
||||
{
|
||||
Subnet: "1.1.1.1/32",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := na.Allocate(n)
|
||||
assert.Check(t, err)
|
||||
}
|
||||
|
||||
func TestAllocateWithTwoSubnetsNoGateway(t *testing.T) {
|
||||
na := newNetworkAllocator(t)
|
||||
n := &api.Network{
|
||||
ID: "testID",
|
||||
Spec: api.NetworkSpec{
|
||||
Annotations: api.Annotations{
|
||||
Name: "test",
|
||||
},
|
||||
DriverConfig: &api.Driver{},
|
||||
IPAM: &api.IPAMOptions{
|
||||
Driver: &api.Driver{},
|
||||
Configs: []*api.IPAMConfig{
|
||||
{
|
||||
Subnet: "192.168.1.0/24",
|
||||
},
|
||||
{
|
||||
Subnet: "192.168.2.0/24",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := na.Allocate(n)
|
||||
assert.Check(t, err)
|
||||
assert.Check(t, is.Equal(len(n.IPAM.Configs), 2))
|
||||
assert.Check(t, is.Equal(n.IPAM.Configs[0].Range, ""))
|
||||
assert.Check(t, is.Equal(len(n.IPAM.Configs[0].Reserved), 0))
|
||||
assert.Check(t, is.Equal(n.IPAM.Configs[0].Subnet, "192.168.1.0/24"))
|
||||
assert.Check(t, is.Equal(n.IPAM.Configs[1].Range, ""))
|
||||
assert.Check(t, is.Equal(len(n.IPAM.Configs[1].Reserved), 0))
|
||||
assert.Check(t, is.Equal(n.IPAM.Configs[1].Subnet, "192.168.2.0/24"))
|
||||
|
||||
ip := net.ParseIP(n.IPAM.Configs[0].Gateway)
|
||||
assert.Check(t, ip != nil)
|
||||
ip = net.ParseIP(n.IPAM.Configs[1].Gateway)
|
||||
assert.Check(t, ip != nil)
|
||||
}
|
||||
|
||||
func TestFree(t *testing.T) {
|
||||
na := newNetworkAllocator(t)
|
||||
n := &api.Network{
|
||||
ID: "testID",
|
||||
Spec: api.NetworkSpec{
|
||||
Annotations: api.Annotations{
|
||||
Name: "test",
|
||||
},
|
||||
DriverConfig: &api.Driver{},
|
||||
IPAM: &api.IPAMOptions{
|
||||
Driver: &api.Driver{},
|
||||
Configs: []*api.IPAMConfig{
|
||||
{
|
||||
Subnet: "192.168.1.0/24",
|
||||
Gateway: "192.168.1.1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := na.Allocate(n)
|
||||
assert.Check(t, err)
|
||||
|
||||
err = na.Deallocate(n)
|
||||
assert.Check(t, err)
|
||||
|
||||
// Reallocate again to make sure it succeeds.
|
||||
err = na.Allocate(n)
|
||||
assert.Check(t, err)
|
||||
}
|
||||
|
||||
func TestAllocateTaskFree(t *testing.T) {
|
||||
na1 := newNetworkAllocator(t)
|
||||
na2 := newNetworkAllocator(t)
|
||||
n1 := &api.Network{
|
||||
ID: "testID1",
|
||||
Spec: api.NetworkSpec{
|
||||
Annotations: api.Annotations{
|
||||
Name: "test1",
|
||||
},
|
||||
DriverConfig: &api.Driver{},
|
||||
IPAM: &api.IPAMOptions{
|
||||
Driver: &api.Driver{},
|
||||
Configs: []*api.IPAMConfig{
|
||||
{
|
||||
Subnet: "192.168.1.0/24",
|
||||
Gateway: "192.168.1.1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
n2 := &api.Network{
|
||||
ID: "testID2",
|
||||
Spec: api.NetworkSpec{
|
||||
Annotations: api.Annotations{
|
||||
Name: "test2",
|
||||
},
|
||||
DriverConfig: &api.Driver{},
|
||||
IPAM: &api.IPAMOptions{
|
||||
Driver: &api.Driver{},
|
||||
Configs: []*api.IPAMConfig{
|
||||
{
|
||||
Subnet: "192.168.2.0/24",
|
||||
Gateway: "192.168.2.1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
task1 := &api.Task{
|
||||
Networks: []*api.NetworkAttachment{
|
||||
{
|
||||
Network: n1,
|
||||
},
|
||||
{
|
||||
Network: n2,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
task2 := &api.Task{
|
||||
Networks: []*api.NetworkAttachment{
|
||||
{
|
||||
Network: n1,
|
||||
},
|
||||
{
|
||||
Network: n2,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := na1.Allocate(n1)
|
||||
assert.Check(t, err)
|
||||
|
||||
err = na1.Allocate(n2)
|
||||
assert.Check(t, err)
|
||||
|
||||
err = na1.AllocateTask(task1)
|
||||
assert.Check(t, err)
|
||||
assert.Check(t, is.Equal(len(task1.Networks[0].Addresses), 1))
|
||||
assert.Check(t, is.Equal(len(task1.Networks[1].Addresses), 1))
|
||||
|
||||
_, subnet1, _ := net.ParseCIDR("192.168.1.0/24")
|
||||
_, subnet2, _ := net.ParseCIDR("192.168.2.0/24")
|
||||
|
||||
// variable coding: network/task/allocator
|
||||
ip111, _, err := net.ParseCIDR(task1.Networks[0].Addresses[0])
|
||||
assert.Check(t, err)
|
||||
|
||||
ip211, _, err := net.ParseCIDR(task1.Networks[1].Addresses[0])
|
||||
assert.Check(t, err)
|
||||
|
||||
assert.Check(t, is.Equal(subnet1.Contains(ip111), true))
|
||||
assert.Check(t, is.Equal(subnet2.Contains(ip211), true))
|
||||
|
||||
err = na1.AllocateTask(task2)
|
||||
assert.Check(t, err)
|
||||
assert.Check(t, is.Equal(len(task2.Networks[0].Addresses), 1))
|
||||
assert.Check(t, is.Equal(len(task2.Networks[1].Addresses), 1))
|
||||
|
||||
ip121, _, err := net.ParseCIDR(task2.Networks[0].Addresses[0])
|
||||
assert.Check(t, err)
|
||||
|
||||
ip221, _, err := net.ParseCIDR(task2.Networks[1].Addresses[0])
|
||||
assert.Check(t, err)
|
||||
|
||||
assert.Check(t, is.Equal(subnet1.Contains(ip121), true))
|
||||
assert.Check(t, is.Equal(subnet2.Contains(ip221), true))
|
||||
|
||||
// Now allocate the same the same tasks in a second allocator
|
||||
// but intentionally in reverse order.
|
||||
err = na2.Allocate(n1)
|
||||
assert.Check(t, err)
|
||||
|
||||
err = na2.Allocate(n2)
|
||||
assert.Check(t, err)
|
||||
|
||||
err = na2.AllocateTask(task2)
|
||||
assert.Check(t, err)
|
||||
assert.Check(t, is.Equal(len(task2.Networks[0].Addresses), 1))
|
||||
assert.Check(t, is.Equal(len(task2.Networks[1].Addresses), 1))
|
||||
|
||||
ip122, _, err := net.ParseCIDR(task2.Networks[0].Addresses[0])
|
||||
assert.Check(t, err)
|
||||
|
||||
ip222, _, err := net.ParseCIDR(task2.Networks[1].Addresses[0])
|
||||
assert.Check(t, err)
|
||||
|
||||
assert.Check(t, is.Equal(subnet1.Contains(ip122), true))
|
||||
assert.Check(t, is.Equal(subnet2.Contains(ip222), true))
|
||||
assert.Check(t, is.DeepEqual(ip121, ip122))
|
||||
assert.Check(t, is.DeepEqual(ip221, ip222))
|
||||
|
||||
err = na2.AllocateTask(task1)
|
||||
assert.Check(t, err)
|
||||
assert.Check(t, is.Equal(len(task1.Networks[0].Addresses), 1))
|
||||
assert.Check(t, is.Equal(len(task1.Networks[1].Addresses), 1))
|
||||
|
||||
ip112, _, err := net.ParseCIDR(task1.Networks[0].Addresses[0])
|
||||
assert.Check(t, err)
|
||||
|
||||
ip212, _, err := net.ParseCIDR(task1.Networks[1].Addresses[0])
|
||||
assert.Check(t, err)
|
||||
|
||||
assert.Check(t, is.Equal(subnet1.Contains(ip112), true))
|
||||
assert.Check(t, is.Equal(subnet2.Contains(ip212), true))
|
||||
assert.Check(t, is.DeepEqual(ip111, ip112))
|
||||
assert.Check(t, is.DeepEqual(ip211, ip212))
|
||||
|
||||
// Deallocate task
|
||||
err = na1.DeallocateTask(task1)
|
||||
assert.Check(t, err)
|
||||
assert.Check(t, is.Equal(len(task1.Networks[0].Addresses), 0))
|
||||
assert.Check(t, is.Equal(len(task1.Networks[1].Addresses), 0))
|
||||
|
||||
// Try allocation after free
|
||||
err = na1.AllocateTask(task1)
|
||||
assert.Check(t, err)
|
||||
assert.Check(t, is.Equal(len(task1.Networks[0].Addresses), 1))
|
||||
assert.Check(t, is.Equal(len(task1.Networks[1].Addresses), 1))
|
||||
|
||||
ip111, _, err = net.ParseCIDR(task1.Networks[0].Addresses[0])
|
||||
assert.Check(t, err)
|
||||
|
||||
ip211, _, err = net.ParseCIDR(task1.Networks[1].Addresses[0])
|
||||
assert.Check(t, err)
|
||||
|
||||
assert.Check(t, is.Equal(subnet1.Contains(ip111), true))
|
||||
assert.Check(t, is.Equal(subnet2.Contains(ip211), true))
|
||||
|
||||
err = na1.DeallocateTask(task1)
|
||||
assert.Check(t, err)
|
||||
assert.Check(t, is.Equal(len(task1.Networks[0].Addresses), 0))
|
||||
assert.Check(t, is.Equal(len(task1.Networks[1].Addresses), 0))
|
||||
|
||||
// Try to free endpoints on an already freed task
|
||||
err = na1.DeallocateTask(task1)
|
||||
assert.Check(t, err)
|
||||
}
|
||||
|
||||
func TestAllocateService(t *testing.T) {
|
||||
na := newNetworkAllocator(t)
|
||||
n := &api.Network{
|
||||
ID: "testID",
|
||||
Spec: api.NetworkSpec{
|
||||
Annotations: api.Annotations{
|
||||
Name: "test",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
s := &api.Service{
|
||||
ID: "testID1",
|
||||
Spec: api.ServiceSpec{
|
||||
Task: api.TaskSpec{
|
||||
Networks: []*api.NetworkAttachmentConfig{
|
||||
{
|
||||
Target: "testID",
|
||||
},
|
||||
},
|
||||
},
|
||||
Endpoint: &api.EndpointSpec{
|
||||
Ports: []*api.PortConfig{
|
||||
{
|
||||
Name: "http",
|
||||
TargetPort: 80,
|
||||
},
|
||||
{
|
||||
Name: "https",
|
||||
TargetPort: 443,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := na.Allocate(n)
|
||||
assert.Check(t, err)
|
||||
assert.Check(t, n.IPAM.Configs != nil)
|
||||
assert.Check(t, is.Equal(len(n.IPAM.Configs), 1))
|
||||
assert.Check(t, is.Equal(n.IPAM.Configs[0].Range, ""))
|
||||
assert.Check(t, is.Equal(len(n.IPAM.Configs[0].Reserved), 0))
|
||||
|
||||
_, subnet, err := net.ParseCIDR(n.IPAM.Configs[0].Subnet)
|
||||
assert.Check(t, err)
|
||||
|
||||
gwip := net.ParseIP(n.IPAM.Configs[0].Gateway)
|
||||
assert.Check(t, gwip != nil)
|
||||
|
||||
err = na.AllocateService(s)
|
||||
assert.Check(t, err)
|
||||
assert.Check(t, is.Len(s.Endpoint.Ports, 0)) // Network allocator is not responsible for allocating ports.
|
||||
|
||||
assert.Check(t, is.Equal(1, len(s.Endpoint.VirtualIPs)))
|
||||
|
||||
assert.Check(t, is.DeepEqual(s.Endpoint.Spec, s.Spec.Endpoint))
|
||||
|
||||
ip, _, err := net.ParseCIDR(s.Endpoint.VirtualIPs[0].Addr)
|
||||
assert.Check(t, err)
|
||||
|
||||
assert.Check(t, is.Equal(true, subnet.Contains(ip)))
|
||||
}
|
||||
|
||||
func TestDeallocateServiceAllocateIngressMode(t *testing.T) {
|
||||
na := newNetworkAllocator(t)
|
||||
|
||||
n := &api.Network{
|
||||
ID: "testNetID1",
|
||||
Spec: api.NetworkSpec{
|
||||
Annotations: api.Annotations{
|
||||
Name: "test",
|
||||
},
|
||||
Ingress: true,
|
||||
},
|
||||
}
|
||||
|
||||
err := na.Allocate(n)
|
||||
assert.Check(t, err)
|
||||
|
||||
s := &api.Service{
|
||||
ID: "testID1",
|
||||
Spec: api.ServiceSpec{
|
||||
Endpoint: &api.EndpointSpec{
|
||||
Ports: []*api.PortConfig{
|
||||
{
|
||||
Name: "some_tcp",
|
||||
TargetPort: 1234,
|
||||
PublishedPort: 1234,
|
||||
PublishMode: api.PublishModeIngress,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Endpoint: &api.Endpoint{},
|
||||
}
|
||||
|
||||
s.Endpoint.VirtualIPs = append(s.Endpoint.VirtualIPs,
|
||||
&api.Endpoint_VirtualIP{NetworkID: n.ID})
|
||||
|
||||
err = na.AllocateService(s)
|
||||
assert.Check(t, err)
|
||||
assert.Check(t, is.Len(s.Endpoint.VirtualIPs, 1))
|
||||
|
||||
err = na.DeallocateService(s)
|
||||
assert.Check(t, err)
|
||||
assert.Check(t, is.Len(s.Endpoint.Ports, 0))
|
||||
assert.Check(t, is.Len(s.Endpoint.VirtualIPs, 0))
|
||||
// Allocate again.
|
||||
s.Endpoint.VirtualIPs = append(s.Endpoint.VirtualIPs,
|
||||
&api.Endpoint_VirtualIP{NetworkID: n.ID})
|
||||
|
||||
err = na.AllocateService(s)
|
||||
assert.Check(t, err)
|
||||
assert.Check(t, is.Len(s.Endpoint.VirtualIPs, 1))
|
||||
}
|
||||
|
||||
func TestServiceNetworkUpdate(t *testing.T) {
|
||||
na := newNetworkAllocator(t)
|
||||
|
||||
n1 := &api.Network{
|
||||
ID: "testID1",
|
||||
Spec: api.NetworkSpec{
|
||||
Annotations: api.Annotations{
|
||||
Name: "test",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
n2 := &api.Network{
|
||||
ID: "testID2",
|
||||
Spec: api.NetworkSpec{
|
||||
Annotations: api.Annotations{
|
||||
Name: "test2",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Allocate both networks
|
||||
err := na.Allocate(n1)
|
||||
assert.Check(t, err)
|
||||
|
||||
err = na.Allocate(n2)
|
||||
assert.Check(t, err)
|
||||
|
||||
// Attach a network to a service spec nd allocate a service
|
||||
s := &api.Service{
|
||||
ID: "testID1",
|
||||
Spec: api.ServiceSpec{
|
||||
Task: api.TaskSpec{
|
||||
Networks: []*api.NetworkAttachmentConfig{
|
||||
{
|
||||
Target: "testID1",
|
||||
},
|
||||
},
|
||||
},
|
||||
Endpoint: &api.EndpointSpec{
|
||||
Mode: api.ResolutionModeVirtualIP,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err = na.AllocateService(s)
|
||||
assert.Check(t, err)
|
||||
assert.Check(t, na.IsServiceAllocated(s))
|
||||
assert.Check(t, is.Len(s.Endpoint.VirtualIPs, 1))
|
||||
|
||||
// Now update the same service with another network
|
||||
s.Spec.Task.Networks = append(s.Spec.Task.Networks, &api.NetworkAttachmentConfig{Target: "testID2"})
|
||||
|
||||
assert.Check(t, !na.IsServiceAllocated(s))
|
||||
err = na.AllocateService(s)
|
||||
assert.Check(t, err)
|
||||
|
||||
assert.Check(t, na.IsServiceAllocated(s))
|
||||
assert.Check(t, is.Len(s.Endpoint.VirtualIPs, 2))
|
||||
|
||||
s.Spec.Task.Networks = s.Spec.Task.Networks[:1]
|
||||
|
||||
// Check if service needs update and allocate with updated service spec
|
||||
assert.Check(t, !na.IsServiceAllocated(s))
|
||||
|
||||
err = na.AllocateService(s)
|
||||
assert.Check(t, err)
|
||||
assert.Check(t, na.IsServiceAllocated(s))
|
||||
assert.Check(t, is.Len(s.Endpoint.VirtualIPs, 1))
|
||||
|
||||
s.Spec.Task.Networks = s.Spec.Task.Networks[:0]
|
||||
// Check if service needs update with all the networks removed and allocate with updated service spec
|
||||
assert.Check(t, !na.IsServiceAllocated(s))
|
||||
|
||||
err = na.AllocateService(s)
|
||||
assert.Check(t, err)
|
||||
assert.Check(t, na.IsServiceAllocated(s))
|
||||
assert.Check(t, is.Len(s.Endpoint.VirtualIPs, 0))
|
||||
|
||||
// Attach a network and allocate service
|
||||
s.Spec.Task.Networks = append(s.Spec.Task.Networks, &api.NetworkAttachmentConfig{Target: "testID2"})
|
||||
assert.Check(t, !na.IsServiceAllocated(s))
|
||||
|
||||
err = na.AllocateService(s)
|
||||
assert.Check(t, err)
|
||||
|
||||
assert.Check(t, na.IsServiceAllocated(s))
|
||||
assert.Check(t, is.Len(s.Endpoint.VirtualIPs, 1))
|
||||
|
||||
}
|
||||
|
||||
type mockIpam struct {
|
||||
actualIpamOptions map[string]string
|
||||
}
|
||||
|
||||
func (a *mockIpam) GetDefaultAddressSpaces() (string, string, error) {
|
||||
return "defaultAS", "defaultAS", nil
|
||||
}
|
||||
|
||||
func (a *mockIpam) RequestPool(addressSpace, pool, subPool string, options map[string]string, v6 bool) (string, *net.IPNet, map[string]string, error) {
|
||||
a.actualIpamOptions = options
|
||||
|
||||
poolCidr, _ := types.ParseCIDR(pool)
|
||||
return fmt.Sprintf("%s/%s", "defaultAS", pool), poolCidr, nil, nil
|
||||
}
|
||||
|
||||
func (a *mockIpam) ReleasePool(poolID string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *mockIpam) RequestAddress(poolID string, ip net.IP, opts map[string]string) (*net.IPNet, map[string]string, error) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
func (a *mockIpam) ReleaseAddress(poolID string, ip net.IP) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *mockIpam) IsBuiltIn() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func TestCorrectlyPassIPAMOptions(t *testing.T) {
|
||||
var err error
|
||||
expectedIpamOptions := map[string]string{"network-name": "freddie"}
|
||||
|
||||
na := newNetworkAllocator(t)
|
||||
ipamDriver := &mockIpam{}
|
||||
|
||||
err = na.(*cnmNetworkAllocator).ipamRegistry.RegisterIpamDriver("mockipam", ipamDriver)
|
||||
assert.Check(t, err)
|
||||
|
||||
n := &api.Network{
|
||||
ID: "testID",
|
||||
Spec: api.NetworkSpec{
|
||||
Annotations: api.Annotations{
|
||||
Name: "test",
|
||||
},
|
||||
DriverConfig: &api.Driver{},
|
||||
IPAM: &api.IPAMOptions{
|
||||
Driver: &api.Driver{
|
||||
Name: "mockipam",
|
||||
Options: expectedIpamOptions,
|
||||
},
|
||||
Configs: []*api.IPAMConfig{
|
||||
{
|
||||
Subnet: "192.168.1.0/24",
|
||||
Gateway: "192.168.1.1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
err = na.Allocate(n)
|
||||
|
||||
assert.Check(t, is.DeepEqual(expectedIpamOptions, ipamDriver.actualIpamOptions))
|
||||
assert.Check(t, err)
|
||||
}
|
||||
91
libnetwork/cnmallocator/provider.go
Normal file
91
libnetwork/cnmallocator/provider.go
Normal file
@@ -0,0 +1,91 @@
|
||||
package cnmallocator
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/libnetwork/driverapi"
|
||||
"github.com/docker/docker/libnetwork/drivers/overlay/overlayutils"
|
||||
"github.com/docker/docker/libnetwork/ipamapi"
|
||||
"github.com/docker/docker/pkg/plugingetter"
|
||||
"github.com/moby/swarmkit/v2/api"
|
||||
"github.com/moby/swarmkit/v2/manager/allocator/networkallocator"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
type Provider struct {
|
||||
pg plugingetter.PluginGetter
|
||||
}
|
||||
|
||||
var _ networkallocator.Provider = &Provider{}
|
||||
|
||||
// NewProvider returns a new cnmallocator provider.
|
||||
func NewProvider(pg plugingetter.PluginGetter) *Provider {
|
||||
return &Provider{pg: pg}
|
||||
}
|
||||
|
||||
// ValidateIPAMDriver implements networkallocator.NetworkProvider.
|
||||
func (p *Provider) ValidateIPAMDriver(driver *api.Driver) error {
|
||||
if driver == nil {
|
||||
// It is ok to not specify the driver. We will choose
|
||||
// a default driver.
|
||||
return nil
|
||||
}
|
||||
|
||||
if driver.Name == "" {
|
||||
return status.Errorf(codes.InvalidArgument, "driver name: if driver is specified name is required")
|
||||
}
|
||||
if strings.ToLower(driver.Name) == ipamapi.DefaultIPAM {
|
||||
return nil
|
||||
}
|
||||
return p.validatePluginDriver(driver, ipamapi.PluginEndpointType)
|
||||
}
|
||||
|
||||
// ValidateIngressNetworkDriver implements networkallocator.NetworkProvider.
|
||||
func (p *Provider) ValidateIngressNetworkDriver(driver *api.Driver) error {
|
||||
if driver != nil && driver.Name != "overlay" {
|
||||
return status.Errorf(codes.Unimplemented, "only overlay driver is currently supported for ingress network")
|
||||
}
|
||||
return p.ValidateNetworkDriver(driver)
|
||||
}
|
||||
|
||||
// ValidateNetworkDriver implements networkallocator.NetworkProvider.
|
||||
func (p *Provider) ValidateNetworkDriver(driver *api.Driver) error {
|
||||
if driver == nil {
|
||||
// It is ok to not specify the driver. We will choose
|
||||
// a default driver.
|
||||
return nil
|
||||
}
|
||||
|
||||
if driver.Name == "" {
|
||||
return status.Errorf(codes.InvalidArgument, "driver name: if driver is specified name is required")
|
||||
}
|
||||
|
||||
// First check against the known drivers
|
||||
if IsBuiltInDriver(driver.Name) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return p.validatePluginDriver(driver, driverapi.NetworkPluginEndpointType)
|
||||
}
|
||||
|
||||
func (p *Provider) validatePluginDriver(driver *api.Driver, pluginType string) error {
|
||||
if p.pg == nil {
|
||||
return status.Errorf(codes.InvalidArgument, "plugin %s not supported", driver.Name)
|
||||
}
|
||||
|
||||
plug, err := p.pg.Get(driver.Name, pluginType, plugingetter.Lookup)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.InvalidArgument, "error during lookup of plugin %s", driver.Name)
|
||||
}
|
||||
|
||||
if plug.IsV1() {
|
||||
return status.Errorf(codes.InvalidArgument, "legacy plugin %s of type %s is not supported in swarm mode", driver.Name, pluginType)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Provider) SetDefaultVXLANUDPPort(port uint32) error {
|
||||
return overlayutils.ConfigVXLANUDPPort(port)
|
||||
}
|
||||
31
libnetwork/cnmallocator/provider_test.go
Normal file
31
libnetwork/cnmallocator/provider_test.go
Normal file
@@ -0,0 +1,31 @@
|
||||
package cnmallocator
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/moby/swarmkit/v2/api"
|
||||
"github.com/moby/swarmkit/v2/testutils"
|
||||
"google.golang.org/grpc/codes"
|
||||
"gotest.tools/v3/assert"
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
)
|
||||
|
||||
func TestValidateDriver(t *testing.T) {
|
||||
p := NewProvider(nil)
|
||||
|
||||
for _, tt := range []struct {
|
||||
name string
|
||||
validator func(*api.Driver) error
|
||||
}{
|
||||
{"IPAM", p.ValidateIPAMDriver},
|
||||
{"Network", p.ValidateNetworkDriver},
|
||||
} {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.Check(t, tt.validator(nil))
|
||||
|
||||
err := tt.validator(&api.Driver{Name: ""})
|
||||
assert.Check(t, is.ErrorContains(err, ""))
|
||||
assert.Check(t, is.Equal(codes.InvalidArgument, testutils.ErrorCode(err)))
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
|
||||
//go:build go1.19
|
||||
//go:build go1.23
|
||||
|
||||
package config
|
||||
|
||||
|
||||
@@ -163,7 +163,7 @@ func New(cfgOptions ...config.Option) (*Controller, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
setupArrangeUserFilterRule(c)
|
||||
c.setupPlatformFirewall()
|
||||
return c, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -59,11 +59,20 @@ type Driver interface {
|
||||
// programming that was done so far
|
||||
RevokeExternalConnectivity(nid, eid string) error
|
||||
|
||||
// Type returns the type of this driver, the network type this driver manages
|
||||
Type() string
|
||||
|
||||
// IsBuiltIn returns true if it is a built-in driver
|
||||
IsBuiltIn() bool
|
||||
}
|
||||
|
||||
// TableWatcher is an optional interface for a network driver.
|
||||
type TableWatcher interface {
|
||||
// EventNotify notifies the driver when a CRUD operation has
|
||||
// happened on a table of its interest as soon as this node
|
||||
// receives such an event in the gossip layer. This method is
|
||||
// only invoked for the global scope driver.
|
||||
EventNotify(event EventType, nid string, tableName string, key string, value []byte)
|
||||
EventNotify(nid string, tableName string, key string, prev, value []byte)
|
||||
|
||||
// DecodeTableEntry passes the driver a key, value pair from table it registered
|
||||
// with libnetwork. Driver should return {object ID, map[string]string} tuple.
|
||||
@@ -74,12 +83,6 @@ type Driver interface {
|
||||
// For example: overlay driver returns the VTEP IP of the host that has the endpoint
|
||||
// which is shown in 'network inspect --verbose'
|
||||
DecodeTableEntry(tablename string, key string, value []byte) (string, map[string]string)
|
||||
|
||||
// Type returns the type of this driver, the network type this driver manages
|
||||
Type() string
|
||||
|
||||
// IsBuiltIn returns true if it is a built-in driver
|
||||
IsBuiltIn() bool
|
||||
}
|
||||
|
||||
// NetworkInfo provides a go interface for drivers to provide network
|
||||
@@ -170,18 +173,6 @@ type IPAMData struct {
|
||||
AuxAddresses map[string]*net.IPNet
|
||||
}
|
||||
|
||||
// EventType defines a type for the CRUD event
|
||||
type EventType uint8
|
||||
|
||||
const (
|
||||
// Create event is generated when a table entry is created,
|
||||
Create EventType = 1 + iota
|
||||
// Update event is generated when a table entry is updated.
|
||||
Update
|
||||
// Delete event is generated when a table entry is deleted.
|
||||
Delete
|
||||
)
|
||||
|
||||
// ObjectType represents the type of object driver wants to store in libnetwork's networkDB
|
||||
type ObjectType int
|
||||
|
||||
|
||||
@@ -213,32 +213,32 @@ func ValidateFixedCIDRV6(val string) error {
|
||||
|
||||
// Validate performs a static validation on the network configuration parameters.
|
||||
// Whatever can be assessed a priori before attempting any programming.
|
||||
func (c *networkConfiguration) Validate() error {
|
||||
if c.Mtu < 0 {
|
||||
return ErrInvalidMtu(c.Mtu)
|
||||
func (ncfg *networkConfiguration) Validate() error {
|
||||
if ncfg.Mtu < 0 {
|
||||
return ErrInvalidMtu(ncfg.Mtu)
|
||||
}
|
||||
|
||||
// If bridge v4 subnet is specified
|
||||
if c.AddressIPv4 != nil {
|
||||
if ncfg.AddressIPv4 != nil {
|
||||
// If default gw is specified, it must be part of bridge subnet
|
||||
if c.DefaultGatewayIPv4 != nil {
|
||||
if !c.AddressIPv4.Contains(c.DefaultGatewayIPv4) {
|
||||
if ncfg.DefaultGatewayIPv4 != nil {
|
||||
if !ncfg.AddressIPv4.Contains(ncfg.DefaultGatewayIPv4) {
|
||||
return &ErrInvalidGateway{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if c.EnableIPv6 {
|
||||
if ncfg.EnableIPv6 {
|
||||
// If IPv6 is enabled, AddressIPv6 must have been configured.
|
||||
if c.AddressIPv6 == nil {
|
||||
if ncfg.AddressIPv6 == nil {
|
||||
return errdefs.System(errors.New("no IPv6 address was allocated for the bridge"))
|
||||
}
|
||||
// AddressIPv6 must be IPv6, and not overlap with the LL subnet prefix.
|
||||
if err := validateIPv6Subnet(c.AddressIPv6); err != nil {
|
||||
if err := validateIPv6Subnet(ncfg.AddressIPv6); err != nil {
|
||||
return err
|
||||
}
|
||||
// If a default gw is specified, it must belong to AddressIPv6's subnet
|
||||
if c.DefaultGatewayIPv6 != nil && !c.AddressIPv6.Contains(c.DefaultGatewayIPv6) {
|
||||
if ncfg.DefaultGatewayIPv6 != nil && !ncfg.AddressIPv6.Contains(ncfg.DefaultGatewayIPv6) {
|
||||
return &ErrInvalidGateway{}
|
||||
}
|
||||
}
|
||||
@@ -247,73 +247,73 @@ func (c *networkConfiguration) Validate() error {
|
||||
}
|
||||
|
||||
// Conflicts check if two NetworkConfiguration objects overlap
|
||||
func (c *networkConfiguration) Conflicts(o *networkConfiguration) error {
|
||||
func (ncfg *networkConfiguration) Conflicts(o *networkConfiguration) error {
|
||||
if o == nil {
|
||||
return errors.New("same configuration")
|
||||
}
|
||||
|
||||
// Also empty, because only one network with empty name is allowed
|
||||
if c.BridgeName == o.BridgeName {
|
||||
if ncfg.BridgeName == o.BridgeName {
|
||||
return errors.New("networks have same bridge name")
|
||||
}
|
||||
|
||||
// They must be in different subnets
|
||||
if (c.AddressIPv4 != nil && o.AddressIPv4 != nil) &&
|
||||
(c.AddressIPv4.Contains(o.AddressIPv4.IP) || o.AddressIPv4.Contains(c.AddressIPv4.IP)) {
|
||||
if (ncfg.AddressIPv4 != nil && o.AddressIPv4 != nil) &&
|
||||
(ncfg.AddressIPv4.Contains(o.AddressIPv4.IP) || o.AddressIPv4.Contains(ncfg.AddressIPv4.IP)) {
|
||||
return errors.New("networks have overlapping IPv4")
|
||||
}
|
||||
|
||||
// They must be in different v6 subnets
|
||||
if (c.AddressIPv6 != nil && o.AddressIPv6 != nil) &&
|
||||
(c.AddressIPv6.Contains(o.AddressIPv6.IP) || o.AddressIPv6.Contains(c.AddressIPv6.IP)) {
|
||||
if (ncfg.AddressIPv6 != nil && o.AddressIPv6 != nil) &&
|
||||
(ncfg.AddressIPv6.Contains(o.AddressIPv6.IP) || o.AddressIPv6.Contains(ncfg.AddressIPv6.IP)) {
|
||||
return errors.New("networks have overlapping IPv6")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *networkConfiguration) fromLabels(labels map[string]string) error {
|
||||
func (ncfg *networkConfiguration) fromLabels(labels map[string]string) error {
|
||||
var err error
|
||||
for label, value := range labels {
|
||||
switch label {
|
||||
case BridgeName:
|
||||
c.BridgeName = value
|
||||
ncfg.BridgeName = value
|
||||
case netlabel.DriverMTU:
|
||||
if c.Mtu, err = strconv.Atoi(value); err != nil {
|
||||
if ncfg.Mtu, err = strconv.Atoi(value); err != nil {
|
||||
return parseErr(label, value, err.Error())
|
||||
}
|
||||
case netlabel.EnableIPv6:
|
||||
if c.EnableIPv6, err = strconv.ParseBool(value); err != nil {
|
||||
if ncfg.EnableIPv6, err = strconv.ParseBool(value); err != nil {
|
||||
return parseErr(label, value, err.Error())
|
||||
}
|
||||
case EnableIPMasquerade:
|
||||
if c.EnableIPMasquerade, err = strconv.ParseBool(value); err != nil {
|
||||
if ncfg.EnableIPMasquerade, err = strconv.ParseBool(value); err != nil {
|
||||
return parseErr(label, value, err.Error())
|
||||
}
|
||||
case EnableICC:
|
||||
if c.EnableICC, err = strconv.ParseBool(value); err != nil {
|
||||
if ncfg.EnableICC, err = strconv.ParseBool(value); err != nil {
|
||||
return parseErr(label, value, err.Error())
|
||||
}
|
||||
case InhibitIPv4:
|
||||
if c.InhibitIPv4, err = strconv.ParseBool(value); err != nil {
|
||||
if ncfg.InhibitIPv4, err = strconv.ParseBool(value); err != nil {
|
||||
return parseErr(label, value, err.Error())
|
||||
}
|
||||
case DefaultBridge:
|
||||
if c.DefaultBridge, err = strconv.ParseBool(value); err != nil {
|
||||
if ncfg.DefaultBridge, err = strconv.ParseBool(value); err != nil {
|
||||
return parseErr(label, value, err.Error())
|
||||
}
|
||||
case DefaultBindingIP:
|
||||
if c.DefaultBindingIP = net.ParseIP(value); c.DefaultBindingIP == nil {
|
||||
if ncfg.DefaultBindingIP = net.ParseIP(value); ncfg.DefaultBindingIP == nil {
|
||||
return parseErr(label, value, "nil ip")
|
||||
}
|
||||
case netlabel.ContainerIfacePrefix:
|
||||
c.ContainerIfacePrefix = value
|
||||
ncfg.ContainerIfacePrefix = value
|
||||
case netlabel.HostIPv4:
|
||||
if c.HostIPv4 = net.ParseIP(value); c.HostIPv4 == nil {
|
||||
if ncfg.HostIPv4 = net.ParseIP(value); ncfg.HostIPv4 == nil {
|
||||
return parseErr(label, value, "nil ip")
|
||||
}
|
||||
case netlabel.HostIPv6:
|
||||
if c.HostIPv6 = net.ParseIP(value); c.HostIPv6 == nil {
|
||||
if ncfg.HostIPv6 = net.ParseIP(value); ncfg.HostIPv6 == nil {
|
||||
return parseErr(label, value, "nil ip")
|
||||
}
|
||||
}
|
||||
@@ -483,6 +483,8 @@ func (d *driver) configure(option map[string]interface{}) error {
|
||||
d.config = config
|
||||
d.Unlock()
|
||||
|
||||
iptables.OnReloaded(d.handleFirewalldReload)
|
||||
|
||||
return d.initStore(option)
|
||||
}
|
||||
|
||||
@@ -528,7 +530,7 @@ func parseNetworkGenericOptions(data interface{}) (*networkConfiguration, error)
|
||||
return config, err
|
||||
}
|
||||
|
||||
func (c *networkConfiguration) processIPAM(id string, ipamV4Data, ipamV6Data []driverapi.IPAMData) error {
|
||||
func (ncfg *networkConfiguration) processIPAM(id string, ipamV4Data, ipamV6Data []driverapi.IPAMData) error {
|
||||
if len(ipamV4Data) > 1 || len(ipamV6Data) > 1 {
|
||||
return types.ForbiddenErrorf("bridge driver doesn't support multiple subnets")
|
||||
}
|
||||
@@ -538,22 +540,22 @@ func (c *networkConfiguration) processIPAM(id string, ipamV4Data, ipamV6Data []d
|
||||
}
|
||||
|
||||
if ipamV4Data[0].Gateway != nil {
|
||||
c.AddressIPv4 = types.GetIPNetCopy(ipamV4Data[0].Gateway)
|
||||
ncfg.AddressIPv4 = types.GetIPNetCopy(ipamV4Data[0].Gateway)
|
||||
}
|
||||
|
||||
if gw, ok := ipamV4Data[0].AuxAddresses[DefaultGatewayV4AuxKey]; ok {
|
||||
c.DefaultGatewayIPv4 = gw.IP
|
||||
ncfg.DefaultGatewayIPv4 = gw.IP
|
||||
}
|
||||
|
||||
if len(ipamV6Data) > 0 {
|
||||
c.AddressIPv6 = ipamV6Data[0].Pool
|
||||
ncfg.AddressIPv6 = ipamV6Data[0].Pool
|
||||
|
||||
if ipamV6Data[0].Gateway != nil {
|
||||
c.AddressIPv6 = types.GetIPNetCopy(ipamV6Data[0].Gateway)
|
||||
ncfg.AddressIPv6 = types.GetIPNetCopy(ipamV6Data[0].Gateway)
|
||||
}
|
||||
|
||||
if gw, ok := ipamV6Data[0].AuxAddresses[DefaultGatewayV6AuxKey]; ok {
|
||||
c.DefaultGatewayIPv6 = gw.IP
|
||||
ncfg.DefaultGatewayIPv6 = gw.IP
|
||||
}
|
||||
}
|
||||
|
||||
@@ -623,13 +625,6 @@ func (d *driver) NetworkFree(id string) error {
|
||||
return types.NotImplementedErrorf("not implemented")
|
||||
}
|
||||
|
||||
func (d *driver) EventNotify(etype driverapi.EventType, nid, tableName, key string, value []byte) {
|
||||
}
|
||||
|
||||
func (d *driver) DecodeTableEntry(tablename string, key string, value []byte) (string, map[string]string) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// Create a new network using bridge plugin
|
||||
func (d *driver) CreateNetwork(id string, option map[string]interface{}, nInfo driverapi.NetworkInfo, ipV4Data, ipV6Data []driverapi.IPAMData) error {
|
||||
if len(ipV4Data) == 0 || ipV4Data[0].Pool.String() == "0.0.0.0/0" {
|
||||
@@ -800,12 +795,6 @@ func (d *driver) createNetwork(config *networkConfiguration) (err error) {
|
||||
// Setup IP6Tables.
|
||||
{config.EnableIPv6 && d.config.EnableIP6Tables, network.setupIP6Tables},
|
||||
|
||||
// We want to track firewalld configuration so that
|
||||
// if it is started/reloaded, the rules can be applied correctly
|
||||
{d.config.EnableIPTables, network.setupFirewalld},
|
||||
// same for IPv6
|
||||
{config.EnableIPv6 && d.config.EnableIP6Tables, network.setupFirewalld6},
|
||||
|
||||
// Setup DefaultGatewayIPv4
|
||||
{config.DefaultGatewayIPv4 != nil, setupGatewayIPv4},
|
||||
|
||||
@@ -1287,16 +1276,15 @@ func (d *driver) Leave(nid, eid string) error {
|
||||
return EndpointNotFoundError(eid)
|
||||
}
|
||||
|
||||
if !network.config.EnableICC {
|
||||
if err = d.link(network, endpoint, false); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *driver) ProgramExternalConnectivity(nid, eid string, options map[string]interface{}) error {
|
||||
// Make sure the network isn't deleted, or the in middle of a firewalld reload, while
|
||||
// updating its iptables rules.
|
||||
d.configNetwork.Lock()
|
||||
defer d.configNetwork.Unlock()
|
||||
|
||||
network, err := d.getNetwork(nid)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1348,6 +1336,11 @@ func (d *driver) ProgramExternalConnectivity(nid, eid string, options map[string
|
||||
}
|
||||
|
||||
func (d *driver) RevokeExternalConnectivity(nid, eid string) error {
|
||||
// Make sure this function isn't deleting iptables rules while handleFirewalldReloadNw
|
||||
// is restoring those same rules.
|
||||
d.configNetwork.Lock()
|
||||
defer d.configNetwork.Unlock()
|
||||
|
||||
network, err := d.getNetwork(nid)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1378,9 +1371,88 @@ func (d *driver) RevokeExternalConnectivity(nid, eid string) error {
|
||||
return fmt.Errorf("failed to update bridge endpoint %.7s to store: %v", endpoint.id, err)
|
||||
}
|
||||
|
||||
if !network.config.EnableICC {
|
||||
if err = d.link(network, endpoint, false); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *driver) handleFirewalldReload() {
|
||||
if !d.config.EnableIPTables && !d.config.EnableIP6Tables {
|
||||
return
|
||||
}
|
||||
|
||||
d.Lock()
|
||||
nids := make([]string, 0, len(d.networks))
|
||||
for _, nw := range d.networks {
|
||||
nids = append(nids, nw.id)
|
||||
}
|
||||
d.Unlock()
|
||||
|
||||
for _, nid := range nids {
|
||||
d.handleFirewalldReloadNw(nid)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *driver) handleFirewalldReloadNw(nid string) {
|
||||
// Make sure the network isn't being deleted, and ProgramExternalConnectivity/RevokeExternalConnectivity
|
||||
// aren't modifying iptables rules, while restoring the rules.
|
||||
d.configNetwork.Lock()
|
||||
defer d.configNetwork.Unlock()
|
||||
|
||||
nw, err := d.getNetwork(nid)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if d.config.EnableIPTables {
|
||||
if err := nw.setupIP4Tables(nw.config, nw.bridge); err != nil {
|
||||
log.G(context.TODO()).WithFields(log.Fields{
|
||||
"network": nw.id,
|
||||
"error": err,
|
||||
}).Warn("Failed to restore IPv4 per-port iptables rules on firewalld reload")
|
||||
}
|
||||
}
|
||||
if d.config.EnableIP6Tables {
|
||||
if err := nw.setupIP6Tables(nw.config, nw.bridge); err != nil {
|
||||
log.G(context.TODO()).WithFields(log.Fields{
|
||||
"network": nw.id,
|
||||
"error": err,
|
||||
}).Warn("Failed to restore IPv6 per-port iptables rules on firewalld reload")
|
||||
}
|
||||
}
|
||||
nw.portMapper.ReMapAll()
|
||||
|
||||
// Restore the inter-network connectivity (INC) rules.
|
||||
if err := nw.isolateNetwork(true); err != nil {
|
||||
log.G(context.TODO()).WithFields(log.Fields{
|
||||
"network": nw.id,
|
||||
"error": err,
|
||||
}).Warn("Failed to restore inter-network iptables rules on firewalld reload")
|
||||
}
|
||||
|
||||
// Re-add legacy links - only added during ProgramExternalConnectivity, but legacy
|
||||
// links are default-bridge-only, and it's not possible to connect a container to
|
||||
// the default bridge and a user-defined network. So, the default bridge is always
|
||||
// the gateway and, if there are legacy links configured they need to be set up.
|
||||
if !nw.config.EnableICC {
|
||||
nw.Lock()
|
||||
defer nw.Unlock()
|
||||
for _, ep := range nw.endpoints {
|
||||
if err := d.link(nw, ep, true); err != nil {
|
||||
log.G(context.Background()).WithFields(log.Fields{
|
||||
"nid": nw.id,
|
||||
"eid": ep.id,
|
||||
"error": err,
|
||||
}).Error("Failed to re-create link on firewalld reload")
|
||||
}
|
||||
}
|
||||
}
|
||||
log.G(context.TODO()).Info("Restored iptables rules on firewalld reload")
|
||||
}
|
||||
|
||||
func (d *driver) link(network *bridgeNetwork, endpoint *bridgeEndpoint, enable bool) (retErr error) {
|
||||
cc := endpoint.containerConfig
|
||||
ec := endpoint.extConnConfig
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user