mirror of
https://github.com/moby/moby.git
synced 2026-01-13 03:31:39 +00:00
Compare commits
87 Commits
docker-v29
...
docker-29.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
08440b6ee8 | ||
|
|
b0e62060b0 | ||
|
|
515dbc8c71 | ||
|
|
adf3073cb6 | ||
|
|
8b2c317218 | ||
|
|
3eca177282 | ||
|
|
c4f4c6765e | ||
|
|
f942bce11a | ||
|
|
a1f7fff7a9 | ||
|
|
0e600c7fc4 | ||
|
|
734bb626e4 | ||
|
|
5eaae6db52 | ||
|
|
8ebb104e36 | ||
|
|
fbf3ed25f8 | ||
|
|
518779c90b | ||
|
|
ecf8643fe1 | ||
|
|
5a99e1d1a4 | ||
|
|
bae170eeb7 | ||
|
|
8f33623c5d | ||
|
|
bdc1e7b0fe | ||
|
|
298e2f7d52 | ||
|
|
3376758770 | ||
|
|
bb2e099c3a | ||
|
|
5898ee60f4 | ||
|
|
05a5be917d | ||
|
|
ab55325b58 | ||
|
|
5c69198edd | ||
|
|
cbaccdaf6d | ||
|
|
09d5128bff | ||
|
|
b54adb2d03 | ||
|
|
de45c2ae4f | ||
|
|
4212eb0abf | ||
|
|
6f9d1ec3fb | ||
|
|
f132381992 | ||
|
|
81d930f527 | ||
|
|
7000f92763 | ||
|
|
69963d84f8 | ||
|
|
43ed81ed85 | ||
|
|
bced6f6100 | ||
|
|
4b8f9dd251 | ||
|
|
e4f1408738 | ||
|
|
5ecc72679d | ||
|
|
7687298e0a | ||
|
|
45be1a39b3 | ||
|
|
a828af4d8d | ||
|
|
616e53c12b | ||
|
|
587d38292b | ||
|
|
f97f234729 | ||
|
|
a1836eb283 | ||
|
|
2e3a23c8ec | ||
|
|
4ff8942d0d | ||
|
|
69c4ea7aad | ||
|
|
3964729182 | ||
|
|
8c0751aa4d | ||
|
|
7517464283 | ||
|
|
2faf258d4d | ||
|
|
310aa9241a | ||
|
|
52fae09ec0 | ||
|
|
955650b33f | ||
|
|
9a84135d52 | ||
|
|
56e8e43339 | ||
|
|
83f00e9f2b | ||
|
|
14a955db2f | ||
|
|
4612690e23 | ||
|
|
6280a80f32 | ||
|
|
9cbafeac46 | ||
|
|
1fa8a31556 | ||
|
|
3c6e5f0f5a | ||
|
|
e9ff10bf36 | ||
|
|
7faaa44e18 | ||
|
|
e9f9d7a81e | ||
|
|
28665176e5 | ||
|
|
43f91f775a | ||
|
|
bb0d79cb1a | ||
|
|
198b5e3ed5 | ||
|
|
2ad480ccf5 | ||
|
|
cb6c1c3aca | ||
|
|
2a18530fb2 | ||
|
|
14c4e0d73a | ||
|
|
d23fd38f8b | ||
|
|
3076530aa6 | ||
|
|
7a3cdd2c86 | ||
|
|
d7b6f3a7d3 | ||
|
|
7f5694cda1 | ||
|
|
0e2d804e48 | ||
|
|
7242ccd7a0 | ||
|
|
b6705d5e1a |
2
.github/workflows/.dco.yml
vendored
2
.github/workflows/.dco.yml
vendored
@@ -25,7 +25,7 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
-
|
||||
|
||||
10
.github/workflows/.test-unit.yml
vendored
10
.github/workflows/.test-unit.yml
vendored
@@ -16,7 +16,7 @@ on:
|
||||
workflow_call:
|
||||
|
||||
env:
|
||||
GO_VERSION: "1.25.4"
|
||||
GO_VERSION: "1.25.5"
|
||||
GOTESTLIST_VERSION: v0.3.1
|
||||
TESTSTAT_VERSION: v0.1.25
|
||||
SETUP_BUILDX_VERSION: edge
|
||||
@@ -36,7 +36,7 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
-
|
||||
name: Set up runner
|
||||
uses: ./.github/actions/setup-runner
|
||||
@@ -87,7 +87,7 @@ jobs:
|
||||
-
|
||||
name: Upload reports
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v5
|
||||
with:
|
||||
name: test-reports-unit--${{ matrix.mode }}
|
||||
path: /tmp/reports/*
|
||||
@@ -103,13 +103,13 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
cache: false
|
||||
-
|
||||
name: Download reports
|
||||
uses: actions/download-artifact@v4
|
||||
uses: actions/download-artifact@v6
|
||||
with:
|
||||
pattern: test-reports-unit-*
|
||||
path: /tmp/reports
|
||||
|
||||
28
.github/workflows/.test.yml
vendored
28
.github/workflows/.test.yml
vendored
@@ -21,7 +21,7 @@ on:
|
||||
default: "graphdriver"
|
||||
|
||||
env:
|
||||
GO_VERSION: "1.25.4"
|
||||
GO_VERSION: "1.25.5"
|
||||
GOTESTLIST_VERSION: v0.3.1
|
||||
TESTSTAT_VERSION: v0.1.25
|
||||
ITG_CLI_MATRIX_SIZE: 6
|
||||
@@ -39,7 +39,7 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
-
|
||||
name: Set up runner
|
||||
uses: ./.github/actions/setup-runner
|
||||
@@ -82,7 +82,7 @@ jobs:
|
||||
-
|
||||
name: Upload reports
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v5
|
||||
with:
|
||||
name: test-reports-docker-py-${{ inputs.storage }}
|
||||
path: /tmp/reports/*
|
||||
@@ -95,7 +95,7 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
-
|
||||
name: Set up runner
|
||||
uses: ./.github/actions/setup-runner
|
||||
@@ -169,7 +169,7 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
-
|
||||
name: Set up runner
|
||||
uses: ./.github/actions/setup-runner
|
||||
@@ -250,7 +250,7 @@ jobs:
|
||||
-
|
||||
name: Upload reports
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v5
|
||||
with:
|
||||
name: test-reports-integration-${{ inputs.storage }}-${{ env.TESTREPORTS_NAME }}
|
||||
path: /tmp/reports/*
|
||||
@@ -266,13 +266,13 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
cache: false
|
||||
-
|
||||
name: Download reports
|
||||
uses: actions/download-artifact@v4
|
||||
uses: actions/download-artifact@v6
|
||||
with:
|
||||
path: /tmp/reports
|
||||
pattern: test-reports-integration-${{ inputs.storage }}-*
|
||||
@@ -295,10 +295,10 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
-
|
||||
name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
cache: false
|
||||
@@ -393,7 +393,7 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
-
|
||||
name: Set up runner
|
||||
uses: ./.github/actions/setup-runner
|
||||
@@ -466,7 +466,7 @@ jobs:
|
||||
-
|
||||
name: Upload reports
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v5
|
||||
with:
|
||||
name: test-reports-integration-cli-${{ inputs.storage }}-${{ matrix.mode }}-${{ env.TESTREPORTS_NAME }}
|
||||
path: /tmp/reports/*
|
||||
@@ -482,13 +482,13 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
cache: false
|
||||
-
|
||||
name: Download reports
|
||||
uses: actions/download-artifact@v4
|
||||
uses: actions/download-artifact@v6
|
||||
with:
|
||||
path: /tmp/reports
|
||||
pattern: test-reports-integration-cli-${{ inputs.storage }}-${{ matrix.mode }}-*
|
||||
|
||||
10
.github/workflows/.vm.yml
vendored
10
.github/workflows/.vm.yml
vendored
@@ -20,7 +20,7 @@ on:
|
||||
type: string
|
||||
|
||||
env:
|
||||
GO_VERSION: "1.25.4"
|
||||
GO_VERSION: "1.25.5"
|
||||
TESTSTAT_VERSION: v0.1.25
|
||||
|
||||
jobs:
|
||||
@@ -39,7 +39,7 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
-
|
||||
name: Set up Lima
|
||||
uses: lima-vm/lima-actions/setup@03b96d61959e83b2c737e44162c3088e81de0886 # v1.0.1
|
||||
@@ -167,7 +167,7 @@ jobs:
|
||||
-
|
||||
name: Upload reports
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v5
|
||||
with:
|
||||
name: test-reports-integration-${{ env.TESTREPORTS_NAME }}
|
||||
path: /tmp/reports/*
|
||||
@@ -183,7 +183,7 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
cache: false
|
||||
@@ -192,7 +192,7 @@ jobs:
|
||||
run: echo "TESTREPORTS_NAME=$(basename ${{ inputs.template }})*" >> $GITHUB_ENV
|
||||
-
|
||||
name: Download reports
|
||||
uses: actions/download-artifact@v4
|
||||
uses: actions/download-artifact@v6
|
||||
with:
|
||||
path: /tmp/reports
|
||||
pattern: test-reports-integration-${{ env.TESTREPORTS_NAME }}
|
||||
|
||||
30
.github/workflows/.windows.yml
vendored
30
.github/workflows/.windows.yml
vendored
@@ -28,7 +28,7 @@ on:
|
||||
default: false
|
||||
|
||||
env:
|
||||
GO_VERSION: "1.25.4"
|
||||
GO_VERSION: "1.25.5"
|
||||
GOTESTLIST_VERSION: v0.3.1
|
||||
TESTSTAT_VERSION: v0.1.25
|
||||
WINDOWS_BASE_IMAGE: mcr.microsoft.com/windows/servercore
|
||||
@@ -53,7 +53,7 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
path: ${{ env.GOPATH }}/src/github.com/docker/docker
|
||||
-
|
||||
@@ -98,7 +98,7 @@ jobs:
|
||||
docker cp "${{ env.TEST_CTN_NAME }}`:c`:\containerd\bin\containerd-shim-runhcs-v1.exe" ${{ env.BIN_OUT }}\
|
||||
-
|
||||
name: Upload artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v5
|
||||
with:
|
||||
name: build-${{ inputs.storage }}-${{ inputs.os }}
|
||||
path: ${{ env.BIN_OUT }}/*
|
||||
@@ -117,7 +117,7 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
path: ${{ env.GOPATH }}/src/github.com/docker/docker
|
||||
-
|
||||
@@ -166,7 +166,7 @@ jobs:
|
||||
-
|
||||
name: Upload reports
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v5
|
||||
with:
|
||||
name: ${{ inputs.os }}-${{ inputs.storage }}-unit-reports
|
||||
path: ${{ env.GOPATH }}\src\github.com\docker\docker\bundles\*
|
||||
@@ -181,13 +181,13 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
cache: false
|
||||
-
|
||||
name: Download artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
uses: actions/download-artifact@v6
|
||||
with:
|
||||
name: ${{ inputs.os }}-${{ inputs.storage }}-unit-reports
|
||||
path: /tmp/artifacts
|
||||
@@ -208,10 +208,10 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
-
|
||||
name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
cache: false
|
||||
@@ -264,12 +264,12 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
path: ${{ env.GOPATH }}/src/github.com/docker/docker
|
||||
-
|
||||
name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
cache: false
|
||||
@@ -292,7 +292,7 @@ jobs:
|
||||
Get-ChildItem Env: | Out-String
|
||||
-
|
||||
name: Download artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
uses: actions/download-artifact@v6
|
||||
with:
|
||||
name: build-${{ inputs.storage }}-${{ inputs.os }}
|
||||
path: ${{ env.BIN_OUT }}
|
||||
@@ -469,7 +469,7 @@ jobs:
|
||||
-
|
||||
name: Upload reports
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v5
|
||||
with:
|
||||
name: ${{ inputs.os }}-${{ inputs.storage }}-integration-reports-${{ matrix.runtime }}-${{ env.TESTREPORTS_NAME }}
|
||||
path: ${{ env.GOPATH }}\src\github.com\docker\docker\bundles\*
|
||||
@@ -496,13 +496,13 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
cache: false
|
||||
-
|
||||
name: Download reports
|
||||
uses: actions/download-artifact@v4
|
||||
uses: actions/download-artifact@v6
|
||||
with:
|
||||
path: /tmp/reports
|
||||
pattern: ${{ inputs.os }}-${{ inputs.storage }}-integration-reports-${{ matrix.runtime }}-*
|
||||
|
||||
18
.github/workflows/arm64.yml
vendored
18
.github/workflows/arm64.yml
vendored
@@ -23,7 +23,7 @@ on:
|
||||
pull_request:
|
||||
|
||||
env:
|
||||
GO_VERSION: "1.25.4"
|
||||
GO_VERSION: "1.25.5"
|
||||
TESTSTAT_VERSION: v0.1.25
|
||||
DESTDIR: ./build
|
||||
SETUP_BUILDX_VERSION: edge
|
||||
@@ -101,7 +101,7 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
-
|
||||
name: Set up runner
|
||||
uses: ./.github/actions/setup-runner
|
||||
@@ -146,7 +146,7 @@ jobs:
|
||||
-
|
||||
name: Upload reports
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v5
|
||||
with:
|
||||
name: test-reports-unit-arm64-graphdriver
|
||||
path: /tmp/reports/*
|
||||
@@ -162,13 +162,13 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
cache: false
|
||||
-
|
||||
name: Download reports
|
||||
uses: actions/download-artifact@v4
|
||||
uses: actions/download-artifact@v6
|
||||
with:
|
||||
pattern: test-reports-unit-arm64-*
|
||||
path: /tmp/reports
|
||||
@@ -191,7 +191,7 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
-
|
||||
name: Set up runner
|
||||
uses: ./.github/actions/setup-runner
|
||||
@@ -250,7 +250,7 @@ jobs:
|
||||
-
|
||||
name: Upload reports
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v5
|
||||
with:
|
||||
name: test-reports-integration-arm64-graphdriver
|
||||
path: /tmp/reports/*
|
||||
@@ -266,13 +266,13 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
cache: false
|
||||
-
|
||||
name: Download reports
|
||||
uses: actions/download-artifact@v4
|
||||
uses: actions/download-artifact@v6
|
||||
with:
|
||||
path: /tmp/reports
|
||||
pattern: test-reports-integration-arm64-*
|
||||
|
||||
14
.github/workflows/bin-image.yml
vendored
14
.github/workflows/bin-image.yml
vendored
@@ -49,7 +49,7 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
-
|
||||
name: Docker meta
|
||||
id: meta
|
||||
@@ -83,7 +83,7 @@ jobs:
|
||||
mv "${bakeFile#cwd://}" "/tmp/bake-meta.json"
|
||||
-
|
||||
name: Upload meta bake definition
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v5
|
||||
with:
|
||||
name: bake-meta
|
||||
path: /tmp/bake-meta.json
|
||||
@@ -109,7 +109,7 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
-
|
||||
@@ -119,7 +119,7 @@ jobs:
|
||||
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
|
||||
-
|
||||
name: Download meta bake definition
|
||||
uses: actions/download-artifact@v4
|
||||
uses: actions/download-artifact@v6
|
||||
with:
|
||||
name: bake-meta
|
||||
path: /tmp
|
||||
@@ -164,7 +164,7 @@ jobs:
|
||||
-
|
||||
name: Upload digest
|
||||
if: github.event_name != 'pull_request' && github.repository == 'moby/moby'
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v5
|
||||
with:
|
||||
name: digests-${{ env.PLATFORM_PAIR }}
|
||||
path: /tmp/digests/*
|
||||
@@ -180,13 +180,13 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Download meta bake definition
|
||||
uses: actions/download-artifact@v4
|
||||
uses: actions/download-artifact@v6
|
||||
with:
|
||||
name: bake-meta
|
||||
path: /tmp
|
||||
-
|
||||
name: Download digests
|
||||
uses: actions/download-artifact@v4
|
||||
uses: actions/download-artifact@v6
|
||||
with:
|
||||
path: /tmp/digests
|
||||
pattern: digests-*
|
||||
|
||||
28
.github/workflows/buildkit.yml
vendored
28
.github/workflows/buildkit.yml
vendored
@@ -23,7 +23,7 @@ on:
|
||||
pull_request:
|
||||
|
||||
env:
|
||||
GO_VERSION: "1.25.4"
|
||||
GO_VERSION: "1.25.5"
|
||||
DESTDIR: ./build
|
||||
SETUP_BUILDX_VERSION: edge
|
||||
SETUP_BUILDKIT_IMAGE: moby/buildkit:latest
|
||||
@@ -53,7 +53,7 @@ jobs:
|
||||
targets: binary
|
||||
-
|
||||
name: Upload artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v5
|
||||
with:
|
||||
name: binary
|
||||
path: ${{ env.DESTDIR }}
|
||||
@@ -100,12 +100,12 @@ jobs:
|
||||
uses: crazy-max/ghaction-github-runtime@v3
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
path: moby
|
||||
-
|
||||
name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
cache: false
|
||||
@@ -116,7 +116,7 @@ jobs:
|
||||
working-directory: moby
|
||||
-
|
||||
name: Checkout BuildKit ${{ env.BUILDKIT_REF }}
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
repository: ${{ env.BUILDKIT_REPO }}
|
||||
ref: ${{ env.BUILDKIT_REF }}
|
||||
@@ -133,7 +133,7 @@ jobs:
|
||||
buildkitd-flags: --debug
|
||||
-
|
||||
name: Download binary artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
uses: actions/download-artifact@v6
|
||||
with:
|
||||
name: binary
|
||||
path: ./buildkit/build/moby/
|
||||
@@ -184,7 +184,7 @@ jobs:
|
||||
working-directory: ${{ env.GOPATH }}/src/github.com/docker/docker
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
path: ${{ env.GOPATH }}/src/github.com/docker/docker
|
||||
|
||||
@@ -199,7 +199,7 @@ jobs:
|
||||
echo "WINDOWS_BASE_IMAGE_TAG=${{ env.WINDOWS_BASE_TAG_2022 }}" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf-8 -Append
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
cache: false
|
||||
@@ -225,7 +225,7 @@ jobs:
|
||||
go install github.com/distribution/distribution/v3/cmd/registry@latest
|
||||
|
||||
- name: Checkout BuildKit
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
repository: moby/buildkit
|
||||
ref: master
|
||||
@@ -248,7 +248,7 @@ jobs:
|
||||
cp ${{ env.GOPATH }}\bin\buildctl.exe ${{ env.BIN_OUT }}
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v5
|
||||
with:
|
||||
name: build-windows
|
||||
path: ${{ env.BIN_OUT }}/*
|
||||
@@ -307,12 +307,12 @@ jobs:
|
||||
uses: crazy-max/ghaction-github-runtime@v3
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
path: moby
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
cache: false
|
||||
@@ -324,14 +324,14 @@ jobs:
|
||||
working-directory: moby
|
||||
|
||||
- name: Checkout BuildKit ${{ env.BUILDKIT_REF }}
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
repository: ${{ env.BUILDKIT_REPO }}
|
||||
ref: ${{ env.BUILDKIT_REF }}
|
||||
path: buildkit
|
||||
|
||||
- name: Download Moby artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
uses: actions/download-artifact@v6
|
||||
with:
|
||||
name: build-windows
|
||||
path: ${{ env.BIN_OUT }}
|
||||
|
||||
2
.github/workflows/ci.yml
vendored
2
.github/workflows/ci.yml
vendored
@@ -75,7 +75,7 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
-
|
||||
name: Create matrix
|
||||
id: platforms
|
||||
|
||||
6
.github/workflows/codeql.yml
vendored
6
.github/workflows/codeql.yml
vendored
@@ -34,7 +34,7 @@ on:
|
||||
- cron: '0 9 * * 4'
|
||||
|
||||
env:
|
||||
GO_VERSION: "1.25.4"
|
||||
GO_VERSION: "1.25.5"
|
||||
|
||||
jobs:
|
||||
codeql:
|
||||
@@ -47,11 +47,11 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 2
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
cache: false
|
||||
|
||||
34
.github/workflows/test.yml
vendored
34
.github/workflows/test.yml
vendored
@@ -23,7 +23,7 @@ on:
|
||||
pull_request:
|
||||
|
||||
env:
|
||||
GO_VERSION: "1.25.4"
|
||||
GO_VERSION: "1.25.5"
|
||||
GIT_PAGER: "cat"
|
||||
PAGER: "cat"
|
||||
SETUP_BUILDX_VERSION: edge
|
||||
@@ -67,7 +67,14 @@ jobs:
|
||||
set: |
|
||||
*.cache-from=type=gha,scope=dev${{ matrix.mode }}
|
||||
*.cache-to=type=gha,scope=dev${{ matrix.mode }}
|
||||
*.output=type=cacheonly
|
||||
${{ matrix.mode == '' && '*.output=type=docker,dest=/tmp/dev-image.tar' || '*.output=type=cacheonly' }}
|
||||
-
|
||||
name: Cache dev image
|
||||
if: matrix.mode == ''
|
||||
uses: actions/cache/save@v4
|
||||
with:
|
||||
key: dev-image-${{ github.run_id }}
|
||||
path: /tmp/dev-image.tar
|
||||
|
||||
test:
|
||||
if: ${{ github.event_name != 'pull_request' || !contains(github.event.pull_request.labels.*.name, 'ci/validate-only') }}
|
||||
@@ -103,7 +110,7 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
-
|
||||
name: Create matrix
|
||||
id: scripts
|
||||
@@ -122,13 +129,12 @@ jobs:
|
||||
- validate-prepare
|
||||
- build-dev
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
script: ${{ fromJson(needs.validate-prepare.outputs.matrix) }}
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
-
|
||||
@@ -139,15 +145,19 @@ jobs:
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||
driver: docker
|
||||
buildkitd-flags: --debug
|
||||
-
|
||||
name: Build dev image
|
||||
uses: docker/bake-action@v6
|
||||
name: Restore dev image
|
||||
uses: actions/cache/restore@v4
|
||||
with:
|
||||
targets: dev
|
||||
set: |
|
||||
dev.cache-from=type=gha,scope=dev
|
||||
key: dev-image-${{ github.run_id }}
|
||||
path: /tmp/dev-image.tar
|
||||
fail-on-cache-miss: true
|
||||
-
|
||||
name: Load dev image
|
||||
run: |
|
||||
docker load -i /tmp/dev-image.tar
|
||||
-
|
||||
name: Validate
|
||||
run: |
|
||||
@@ -164,7 +174,7 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
-
|
||||
name: Create matrix
|
||||
id: platforms
|
||||
|
||||
19
.github/workflows/validate-pr.yml
vendored
19
.github/workflows/validate-pr.yml
vendored
@@ -81,13 +81,24 @@ jobs:
|
||||
- name: Check release branch
|
||||
id: title_branch
|
||||
run: |
|
||||
# If PR targets a different branch than master, the PR title should mention the target branch in square brackets, for example:
|
||||
# [27.1 backport] Some change that needs backporting to 27.1
|
||||
# [27.1] Change directly targeting the 27.1 branch
|
||||
# [docker-29.x] Change directly targeting the docker-29.x branch
|
||||
# [docker-29.x backport] Some change that needs backporting to docker-29.x
|
||||
|
||||
# get the intended major version prefix ("[27.1 backport]" -> "27.") from the PR title.
|
||||
[[ "$PR_TITLE" =~ ^\[([0-9]*\.)[^]]*\] ]] && branch="${BASH_REMATCH[1]}"
|
||||
target_branch=$(echo "$PR_TITLE" | sed -nE 's/^\[([^]]+)\].*/\1/p' | sed 's/ backport$//')
|
||||
|
||||
# get major version prefix from the release branch ("27.x -> "27.")
|
||||
[[ "$GITHUB_BASE_REF" =~ ^([0-9]*\.) ]] && target_branch="${BASH_REMATCH[1]}" || target_branch="$GITHUB_BASE_REF"
|
||||
echo "target_branch: $target_branch"
|
||||
echo "GITHUB_BASE_REF: $GITHUB_BASE_REF"
|
||||
|
||||
if [[ "$target_branch" != "$branch" ]] && ! [[ "$GITHUB_BASE_REF" == "master" && "$branch" == "" ]]; then
|
||||
# If the PR is opened against the master branch and the target branch is not specified, exit early.
|
||||
if [[ "$GITHUB_BASE_REF" == "master" && "$target_branch" == "" ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [[ "$target_branch" != "$GITHUB_BASE_REF" ]]; then
|
||||
echo "::error::PR is opened against the $GITHUB_BASE_REF branch, but its title suggests otherwise."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -3,7 +3,7 @@ version: "2"
|
||||
run:
|
||||
# prevent golangci-lint from deducting the go version to lint for through go.mod,
|
||||
# which causes it to fallback to go1.17 semantics.
|
||||
go: "1.25.4"
|
||||
go: "1.25.5"
|
||||
concurrency: 2
|
||||
# Only supported with go modules enabled (build flag -mod=vendor only valid when using modules)
|
||||
# modules-download-mode: vendor
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
ARG GO_VERSION=1.25.4
|
||||
ARG GO_VERSION=1.25.5
|
||||
ARG BASE_DEBIAN_DISTRO="bookworm"
|
||||
ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}"
|
||||
|
||||
@@ -21,7 +21,7 @@ ARG DOCKERCLI_INTEGRATION_REPOSITORY="https://github.com/docker/cli.git"
|
||||
ARG DOCKERCLI_INTEGRATION_VERSION=v25.0.5
|
||||
|
||||
# BUILDX_VERSION is the version of buildx to install in the dev container.
|
||||
ARG BUILDX_VERSION=0.29.1
|
||||
ARG BUILDX_VERSION=0.30.1
|
||||
|
||||
# COMPOSE_VERSION is the version of compose to install in the dev container.
|
||||
ARG COMPOSE_VERSION=v2.40.0
|
||||
@@ -254,7 +254,7 @@ RUN git init . && git remote add origin "https://github.com/opencontainers/runc.
|
||||
# This version should usually match the version that is used by the containerd version
|
||||
# that is used. If you need to update runc, open a pull request in the containerd
|
||||
# project first, and update both after that is merged.
|
||||
ARG RUNC_VERSION=v1.3.3
|
||||
ARG RUNC_VERSION=v1.3.4
|
||||
RUN git fetch -q --depth 1 origin "${RUNC_VERSION}" +refs/tags/*:refs/tags/* && git checkout -q FETCH_HEAD
|
||||
|
||||
FROM base AS runc-build
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
|
||||
# This represents the bare minimum required to build and test Docker.
|
||||
|
||||
ARG GO_VERSION=1.25.4
|
||||
ARG GO_VERSION=1.25.5
|
||||
|
||||
ARG BASE_DEBIAN_DISTRO="bookworm"
|
||||
ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}"
|
||||
|
||||
@@ -161,7 +161,7 @@ FROM ${WINDOWS_BASE_IMAGE}:${WINDOWS_BASE_IMAGE_TAG}
|
||||
# Use PowerShell as the default shell
|
||||
SHELL ["powershell", "-Command", "$ErrorActionPreference = 'Stop'; $ProgressPreference = 'SilentlyContinue';"]
|
||||
|
||||
ARG GO_VERSION=1.25.4
|
||||
ARG GO_VERSION=1.25.5
|
||||
|
||||
# GOTESTSUM_VERSION is the version of gotest.tools/gotestsum to install.
|
||||
ARG GOTESTSUM_VERSION=v1.13.0
|
||||
|
||||
@@ -556,4 +556,5 @@ if ! command -v "cmd_entrypoint_${command}" > /dev/null 2>&1; then
|
||||
fi
|
||||
|
||||
# main
|
||||
shift
|
||||
"cmd_entrypoint_${command}" "$@"
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
|
||||
"github.com/moby/go-archive"
|
||||
"github.com/moby/go-archive/chrootarchive"
|
||||
"github.com/moby/go-archive/compression"
|
||||
containertypes "github.com/moby/moby/api/types/container"
|
||||
"github.com/moby/moby/api/types/events"
|
||||
"github.com/moby/moby/v2/daemon/container"
|
||||
@@ -278,7 +279,7 @@ func (daemon *Daemon) containerCopy(container *container.Container, resource str
|
||||
filter = []string{f}
|
||||
}
|
||||
archv, err := chrootarchive.Tar(basePath, &archive.TarOptions{
|
||||
Compression: archive.Uncompressed,
|
||||
Compression: compression.None,
|
||||
IncludeFiles: filter,
|
||||
}, container.BaseFS)
|
||||
if err != nil {
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
|
||||
"github.com/moby/buildkit/frontend/dockerfile/instructions"
|
||||
"github.com/moby/go-archive"
|
||||
"github.com/moby/go-archive/compression"
|
||||
"github.com/moby/moby/v2/daemon/builder/remotecontext"
|
||||
"github.com/moby/sys/reexec"
|
||||
"gotest.tools/v3/assert"
|
||||
@@ -105,7 +106,7 @@ func TestDispatch(t *testing.T) {
|
||||
createTestTempFile(t, contextDir, filename, content, 0o777)
|
||||
}
|
||||
|
||||
tarStream, err := archive.Tar(contextDir, archive.Uncompressed)
|
||||
tarStream, err := archive.Tar(contextDir, compression.None)
|
||||
if err != nil {
|
||||
t.Fatalf("Error when creating tar stream: %s", err)
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/moby/go-archive"
|
||||
"github.com/moby/go-archive/compression"
|
||||
"github.com/moby/moby/api/types/container"
|
||||
"github.com/moby/moby/api/types/network"
|
||||
"github.com/moby/moby/v2/daemon/builder"
|
||||
@@ -61,7 +62,7 @@ func readAndCheckDockerfile(t *testing.T, testName, contextDir, dockerfilePath,
|
||||
if runtime.GOOS != "windows" {
|
||||
skip.If(t, os.Getuid() != 0, "skipping test that requires root")
|
||||
}
|
||||
tarStream, err := archive.Tar(contextDir, archive.Uncompressed)
|
||||
tarStream, err := archive.Tar(contextDir, compression.None)
|
||||
assert.NilError(t, err)
|
||||
|
||||
defer func() {
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
|
||||
"github.com/containerd/log"
|
||||
"github.com/moby/go-archive"
|
||||
"github.com/moby/go-archive/compression"
|
||||
"github.com/moby/moby/v2/daemon/builder"
|
||||
"github.com/moby/moby/v2/daemon/builder/remotecontext/git"
|
||||
)
|
||||
@@ -17,7 +18,7 @@ func MakeGitContext(gitURL string) (builder.Source, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c, err := archive.Tar(root, archive.Uncompressed)
|
||||
c, err := archive.Tar(root, compression.None)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/moby/go-archive"
|
||||
"github.com/moby/go-archive/compression"
|
||||
"github.com/moby/moby/v2/daemon/builder"
|
||||
"github.com/moby/sys/reexec"
|
||||
"github.com/pkg/errors"
|
||||
@@ -128,7 +129,7 @@ func TestRemoveDirectory(t *testing.T) {
|
||||
|
||||
func makeTestArchiveContext(t *testing.T, dir string) builder.Source {
|
||||
skip.If(t, os.Getuid() != 0, "skipping test that requires root")
|
||||
tarStream, err := archive.Tar(dir, archive.Uncompressed)
|
||||
tarStream, err := archive.Tar(dir, compression.None)
|
||||
if err != nil {
|
||||
t.Fatalf("error: %s", err)
|
||||
}
|
||||
|
||||
@@ -335,7 +335,7 @@ func validateHealthCheck(healthConfig *containertypes.HealthConfig) error {
|
||||
|
||||
func validatePortBindings(ports networktypes.PortMap) error {
|
||||
for port := range ports {
|
||||
if !port.IsValid() {
|
||||
if !port.IsValid() || port.Num() == 0 {
|
||||
return errors.Errorf("invalid port specification: %q", port.String())
|
||||
}
|
||||
|
||||
|
||||
@@ -647,7 +647,10 @@ func (container *Container) BackfillEmptyPBs() {
|
||||
if container.HostConfig == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if container.HostConfig.PortBindings == nil {
|
||||
container.HostConfig.PortBindings = networktypes.PortMap{}
|
||||
return
|
||||
}
|
||||
for portProto, pb := range container.HostConfig.PortBindings {
|
||||
if len(pb) > 0 || pb == nil {
|
||||
continue
|
||||
|
||||
@@ -2,6 +2,7 @@ package containerd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
@@ -63,13 +64,6 @@ func (i *ImageService) ImageInspect(ctx context.Context, refOrID string, opts im
|
||||
}
|
||||
}
|
||||
|
||||
var img dockerspec.DockerOCIImage
|
||||
if multi.Best != nil {
|
||||
if err := multi.Best.ReadConfig(ctx, &img); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
parent, err := i.getImageLabelByDigest(ctx, target.Digest, imageLabelClassicBuilderParent)
|
||||
if err != nil {
|
||||
log.G(ctx).WithError(err).Warn("failed to determine Parent property")
|
||||
@@ -104,29 +98,36 @@ func (i *ImageService) ImageInspect(ctx context.Context, refOrID string, opts im
|
||||
GraphDriverLegacy: &storage.DriverData{Name: i.snapshotter},
|
||||
}
|
||||
|
||||
var img dockerspec.DockerOCIImage
|
||||
if multi.Best != nil {
|
||||
imgConfig := img.Config
|
||||
resp.Author = img.Author
|
||||
resp.Config = &imgConfig
|
||||
resp.Architecture = img.Architecture
|
||||
resp.Variant = img.Variant
|
||||
resp.Os = img.OS
|
||||
resp.OsVersion = img.OSVersion
|
||||
if err := multi.Best.ReadConfig(ctx, &img); err != nil && !cerrdefs.IsNotFound(err) {
|
||||
return nil, fmt.Errorf("failed to read image config: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(img.History) > 0 {
|
||||
resp.Comment = img.History[len(img.History)-1].Comment
|
||||
}
|
||||
// Copy the config
|
||||
imgConfig := img.Config
|
||||
resp.Config = &imgConfig
|
||||
|
||||
if img.Created != nil {
|
||||
resp.Created = img.Created.Format(time.RFC3339Nano)
|
||||
}
|
||||
resp.Author = img.Author
|
||||
resp.Architecture = img.Architecture
|
||||
resp.Variant = img.Variant
|
||||
resp.Os = img.OS
|
||||
resp.OsVersion = img.OSVersion
|
||||
|
||||
resp.RootFS = imagetypes.RootFS{
|
||||
Type: img.RootFS.Type,
|
||||
}
|
||||
for _, layer := range img.RootFS.DiffIDs {
|
||||
resp.RootFS.Layers = append(resp.RootFS.Layers, layer.String())
|
||||
}
|
||||
if len(img.History) > 0 {
|
||||
resp.Comment = img.History[len(img.History)-1].Comment
|
||||
}
|
||||
|
||||
if img.Created != nil {
|
||||
resp.Created = img.Created.Format(time.RFC3339Nano)
|
||||
}
|
||||
|
||||
resp.RootFS = imagetypes.RootFS{
|
||||
Type: img.RootFS.Type,
|
||||
}
|
||||
for _, layer := range img.RootFS.DiffIDs {
|
||||
resp.RootFS.Layers = append(resp.RootFS.Layers, layer.String())
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
|
||||
@@ -61,6 +61,32 @@ func TestImageInspect(t *testing.T) {
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("inspect image with one layer missing", func(t *testing.T) {
|
||||
ctx := logtest.WithT(ctx, t)
|
||||
service := fakeImageService(t, ctx, cs)
|
||||
|
||||
img := toContainerdImage(t, specialimage.MultiLayer)
|
||||
|
||||
_, err := service.images.Create(ctx, img)
|
||||
assert.NilError(t, err)
|
||||
|
||||
// Get the manifest to access the layers
|
||||
mfst, err := c8dimages.Manifest(ctx, cs, img.Target, nil)
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, len(mfst.Layers) > 0, "image should have at least one layer")
|
||||
|
||||
// Delete the last layer from the content store
|
||||
lastLayer := mfst.Layers[len(mfst.Layers)-1]
|
||||
err = cs.Delete(ctx, lastLayer.Digest)
|
||||
assert.NilError(t, err)
|
||||
|
||||
inspect, err := service.ImageInspect(ctx, img.Name, imagebackend.ImageInspectOpts{})
|
||||
assert.NilError(t, err)
|
||||
|
||||
assert.Check(t, inspect.Config != nil)
|
||||
assert.Check(t, is.Len(inspect.RootFS.Layers, len(mfst.Layers)))
|
||||
})
|
||||
|
||||
t.Run("inspect image with platform parameter", func(t *testing.T) {
|
||||
ctx := logtest.WithT(ctx, t)
|
||||
service := fakeImageService(t, ctx, cs)
|
||||
|
||||
@@ -305,34 +305,33 @@ func (i *ImageService) multiPlatformSummary(ctx context.Context, img c8dimages.I
|
||||
mfstSummary.ImageData.Platform = *target.Platform
|
||||
}
|
||||
|
||||
if !available {
|
||||
return nil
|
||||
}
|
||||
|
||||
var dockerImage dockerspec.DockerOCIImage
|
||||
if err := img.ReadConfig(ctx, &dockerImage); err != nil {
|
||||
if err := img.ReadConfig(ctx, &dockerImage); err != nil && !cerrdefs.IsNotFound(err) {
|
||||
logger.WithError(err).Warn("failed to read image config")
|
||||
return nil
|
||||
}
|
||||
|
||||
if target.Platform == nil {
|
||||
mfstSummary.ImageData.Platform = dockerImage.Platform
|
||||
if dockerImage.Platform.OS != "" {
|
||||
if target.Platform == nil {
|
||||
mfstSummary.ImageData.Platform = dockerImage.Platform
|
||||
}
|
||||
logger = logger.WithField("platform", mfstSummary.ImageData.Platform)
|
||||
}
|
||||
logger = logger.WithField("platform", mfstSummary.ImageData.Platform)
|
||||
|
||||
chainIDs := identity.ChainIDs(dockerImage.RootFS.DiffIDs)
|
||||
if dockerImage.RootFS.DiffIDs != nil {
|
||||
chainIDs := identity.ChainIDs(dockerImage.RootFS.DiffIDs)
|
||||
|
||||
snapshotUsage, err := img.SnapshotUsage(ctx, i.snapshotterService(i.snapshotter))
|
||||
if err != nil {
|
||||
logger.WithFields(log.Fields{"error": err}).Warn("failed to determine platform specific unpacked size")
|
||||
snapshotUsage, err := img.SnapshotUsage(ctx, i.snapshotterService(i.snapshotter))
|
||||
if err != nil {
|
||||
logger.WithFields(log.Fields{"error": err}).Warn("failed to determine platform specific unpacked size")
|
||||
}
|
||||
unpackedSize := snapshotUsage.Size
|
||||
|
||||
mfstSummary.ImageData.Size.Unpacked = unpackedSize
|
||||
mfstSummary.Size.Total += unpackedSize
|
||||
summary.TotalSize += unpackedSize
|
||||
|
||||
summary.AllChainIDs = append(summary.AllChainIDs, chainIDs...)
|
||||
}
|
||||
unpackedSize := snapshotUsage.Size
|
||||
|
||||
mfstSummary.ImageData.Size.Unpacked = unpackedSize
|
||||
mfstSummary.Size.Total += unpackedSize
|
||||
summary.TotalSize += unpackedSize
|
||||
|
||||
summary.AllChainIDs = append(summary.AllChainIDs, chainIDs...)
|
||||
|
||||
for _, c := range i.containers.List() {
|
||||
if c.ImageManifest != nil && c.ImageManifest.Digest == target.Digest {
|
||||
@@ -360,8 +359,9 @@ func (i *ImageService) multiPlatformSummary(ctx context.Context, img c8dimages.I
|
||||
if err != nil {
|
||||
if errors.Is(err, errNotManifestOrIndex) {
|
||||
log.G(ctx).WithFields(log.Fields{
|
||||
"error": err,
|
||||
"image": img.Name,
|
||||
"error": err,
|
||||
"image": img.Name,
|
||||
"descriptor": img.Target,
|
||||
}).Warn("unexpected image target (neither a manifest nor index)")
|
||||
} else {
|
||||
return nil, err
|
||||
@@ -443,15 +443,6 @@ func (i *ImageService) singlePlatformImage(ctx context.Context, contentStore con
|
||||
}
|
||||
}
|
||||
|
||||
cfgDesc, err := imageManifest.Image.Config(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var cfg configLabels
|
||||
if err := readJSON(ctx, contentStore, cfgDesc, &cfg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var unpackedSize int64
|
||||
if snapshotUsage, err := imageManifest.SnapshotUsage(ctx, i.snapshotterService(i.snapshotter)); err != nil {
|
||||
log.G(ctx).WithFields(log.Fields{"image": imageManifest.Name(), "error": err}).Warn("failed to calculate unpacked size of image")
|
||||
@@ -474,7 +465,6 @@ func (i *ImageService) singlePlatformImage(ctx context.Context, contentStore con
|
||||
RepoDigests: repoDigests,
|
||||
RepoTags: repoTags,
|
||||
Size: totalSize,
|
||||
Labels: cfg.Config.Labels,
|
||||
// -1 indicates that the value has not been set (avoids ambiguity
|
||||
// between 0 (default) and "not set". We cannot use a pointer (nil)
|
||||
// for this, as the JSON representation uses "omitempty", which would
|
||||
@@ -482,9 +472,25 @@ func (i *ImageService) singlePlatformImage(ctx context.Context, contentStore con
|
||||
SharedSize: -1,
|
||||
Containers: -1,
|
||||
}
|
||||
|
||||
var cfg configLabels
|
||||
if err := imageManifest.ReadConfig(ctx, &cfg); err != nil {
|
||||
if !cerrdefs.IsNotFound(err) {
|
||||
log.G(ctx).WithFields(log.Fields{
|
||||
"image": imageManifest.Name(),
|
||||
"error": err,
|
||||
}).Warn("failed to read image config")
|
||||
}
|
||||
}
|
||||
|
||||
if cfg.Created != nil {
|
||||
summary.Created = cfg.Created.Unix()
|
||||
}
|
||||
if cfg.Config.Labels != nil {
|
||||
summary.Labels = cfg.Config.Labels
|
||||
} else {
|
||||
summary.Labels = map[string]string{}
|
||||
}
|
||||
|
||||
return summary, nil
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
cerrdefs "github.com/containerd/errdefs"
|
||||
"github.com/containerd/platforms"
|
||||
"github.com/moby/go-archive"
|
||||
"github.com/moby/go-archive/compression"
|
||||
"github.com/moby/moby/v2/daemon/server/imagebackend"
|
||||
"github.com/moby/moby/v2/internal/testutil/labelstore"
|
||||
"github.com/moby/moby/v2/internal/testutil/specialimage"
|
||||
@@ -39,7 +40,7 @@ func TestImageLoad(t *testing.T) {
|
||||
imgSvc.defaultPlatformOverride = platforms.Only(linuxAmd64)
|
||||
|
||||
tryLoad := func(ctx context.Context, t *testing.T, dir string, platformList []ocispec.Platform) error {
|
||||
tarRc, err := archive.Tar(dir, archive.Uncompressed)
|
||||
tarRc, err := archive.Tar(dir, compression.None)
|
||||
assert.NilError(t, err)
|
||||
defer tarRc.Close()
|
||||
|
||||
|
||||
@@ -641,18 +641,25 @@ func (daemon *Daemon) restore(ctx context.Context, cfg *configStore, containers
|
||||
}
|
||||
group.Wait()
|
||||
|
||||
for id := range removeContainers {
|
||||
for id, c := range removeContainers {
|
||||
group.Add(1)
|
||||
go func(cid string) {
|
||||
go func(cid string, c *container.Container) {
|
||||
_ = sem.Acquire(context.Background(), 1)
|
||||
defer group.Done()
|
||||
defer sem.Release(1)
|
||||
|
||||
if c.State.IsDead() {
|
||||
if err := daemon.cleanupContainer(c, backend.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err != nil {
|
||||
log.G(ctx).WithField("container", cid).WithError(err).Error("failed to remove dead container")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if err := daemon.containerRm(&cfg.Config, cid, &backend.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err != nil {
|
||||
log.G(ctx).WithField("container", cid).WithError(err).Error("failed to remove container")
|
||||
}
|
||||
|
||||
sem.Release(1)
|
||||
group.Done()
|
||||
}(id)
|
||||
}(id, c)
|
||||
}
|
||||
group.Wait()
|
||||
|
||||
|
||||
@@ -300,6 +300,18 @@ func TestValidateContainerIsolation(t *testing.T) {
|
||||
assert.Check(t, is.Error(err, "invalid isolation 'invalid' on "+runtime.GOOS))
|
||||
}
|
||||
|
||||
func TestInvalidContainerPort0(t *testing.T) {
|
||||
d := Daemon{}
|
||||
|
||||
hc := containertypes.HostConfig{
|
||||
PortBindings: map[network.Port][]network.PortBinding{
|
||||
network.MustParsePort("0/tcp"): {},
|
||||
},
|
||||
}
|
||||
_, err := d.verifyContainerSettings(&configStore{}, &hc, nil, false)
|
||||
assert.Error(t, err, `invalid port specification: "0/tcp"`)
|
||||
}
|
||||
|
||||
func TestFindNetworkErrorType(t *testing.T) {
|
||||
d := Daemon{}
|
||||
_, err := d.FindNetwork("fakeNet")
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/containerd/log"
|
||||
"github.com/moby/go-archive"
|
||||
"github.com/moby/go-archive/chrootarchive"
|
||||
"github.com/moby/go-archive/compression"
|
||||
"github.com/moby/moby/api/types/events"
|
||||
"github.com/moby/moby/v2/daemon/container"
|
||||
"github.com/moby/moby/v2/errdefs"
|
||||
@@ -65,7 +66,7 @@ func (daemon *Daemon) containerExport(ctx context.Context, ctr *container.Contai
|
||||
}()
|
||||
|
||||
archv, err := chrootarchive.Tar(basefs, &archive.TarOptions{
|
||||
Compression: archive.Uncompressed,
|
||||
Compression: compression.None,
|
||||
IDMap: daemon.idMapping,
|
||||
}, basefs)
|
||||
if err != nil {
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/containerd/log"
|
||||
"github.com/moby/go-archive"
|
||||
"github.com/moby/go-archive/chrootarchive"
|
||||
"github.com/moby/go-archive/compression"
|
||||
"github.com/moby/moby/v2/pkg/ioutils"
|
||||
"github.com/moby/sys/user"
|
||||
)
|
||||
@@ -64,7 +65,7 @@ func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch io.ReadCloser, retErr
|
||||
}()
|
||||
|
||||
if parent == "" {
|
||||
tarArchive, err := archive.Tar(layerFs, archive.Uncompressed)
|
||||
tarArchive, err := archive.Tar(layerFs, compression.None)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
"github.com/docker/go-units"
|
||||
"github.com/moby/go-archive"
|
||||
"github.com/moby/go-archive/chrootarchive"
|
||||
"github.com/moby/go-archive/compression"
|
||||
"github.com/moby/locker"
|
||||
"github.com/moby/moby/v2/daemon/graphdriver"
|
||||
"github.com/moby/moby/v2/daemon/graphdriver/overlayutils"
|
||||
@@ -721,7 +722,7 @@ func (d *Driver) Diff(id, parent string) (io.ReadCloser, error) {
|
||||
diffPath := d.getDiffPath(id)
|
||||
logger.Debugf("Tar with options on %s", diffPath)
|
||||
return archive.TarWithOptions(diffPath, &archive.TarOptions{
|
||||
Compression: archive.Uncompressed,
|
||||
Compression: compression.None,
|
||||
IDMap: d.idMap,
|
||||
WhiteoutFormat: archive.OverlayWhiteoutFormat,
|
||||
})
|
||||
|
||||
@@ -103,8 +103,7 @@ func (i *ImageService) Images(ctx context.Context, opts imagebackend.ListOptions
|
||||
}
|
||||
|
||||
var (
|
||||
summaries = make([]imagetypes.Summary, 0, len(selectedImages))
|
||||
summaryMap = make(map[*image.Image]imagetypes.Summary, len(selectedImages))
|
||||
summaryMap = make(map[*image.Image]*imagetypes.Summary, len(selectedImages))
|
||||
allContainers []*container.Container
|
||||
)
|
||||
for id, img := range selectedImages {
|
||||
@@ -210,8 +209,7 @@ func (i *ImageService) Images(ctx context.Context, opts imagebackend.ListOptions
|
||||
}
|
||||
}
|
||||
summary.Containers = containersCount
|
||||
summaryMap[img] = *summary
|
||||
summaries = append(summaries, *summary)
|
||||
summaryMap[img] = summary
|
||||
}
|
||||
|
||||
if opts.SharedSize {
|
||||
@@ -257,6 +255,10 @@ func (i *ImageService) Images(ctx context.Context, opts imagebackend.ListOptions
|
||||
}
|
||||
}
|
||||
|
||||
summaries := make([]imagetypes.Summary, 0, len(summaryMap))
|
||||
for _, summary := range summaryMap {
|
||||
summaries = append(summaries, *summary)
|
||||
}
|
||||
sort.Sort(sort.Reverse(byCreated(summaries)))
|
||||
|
||||
return summaries, nil
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
"github.com/distribution/reference"
|
||||
"github.com/docker/distribution"
|
||||
"github.com/moby/go-archive"
|
||||
"github.com/moby/go-archive/compression"
|
||||
"github.com/moby/moby/api/types/events"
|
||||
"github.com/moby/moby/v2/daemon/internal/image"
|
||||
v1 "github.com/moby/moby/v2/daemon/internal/image/v1"
|
||||
@@ -395,7 +396,7 @@ func (s *saveSession) writeTar(ctx context.Context, tempDir string, outStream io
|
||||
ctx, span := tracing.StartSpan(ctx, "writeTar")
|
||||
defer span.End()
|
||||
|
||||
fs, err := archive.Tar(tempDir, archive.Uncompressed)
|
||||
fs, err := archive.Tar(tempDir, compression.None)
|
||||
if err != nil {
|
||||
span.SetStatus(err)
|
||||
return err
|
||||
|
||||
@@ -294,12 +294,15 @@ func (ls *layerStore) registerWithDescriptor(ts io.Reader, parent ChainID, descr
|
||||
descriptor: descriptor,
|
||||
}
|
||||
|
||||
if cErr = ls.driver.Create(layer.cacheID, pid, nil); cErr != nil {
|
||||
tx, cErr := ls.store.StartTransaction()
|
||||
if cErr != nil {
|
||||
return nil, cErr
|
||||
}
|
||||
|
||||
tx, cErr := ls.store.StartTransaction()
|
||||
if cErr != nil {
|
||||
if cErr = ls.driver.Create(layer.cacheID, pid, nil); cErr != nil {
|
||||
if err := tx.Cancel(); err != nil {
|
||||
log.G(context.TODO()).WithFields(log.Fields{"cache-id": layer.cacheID, "error": err}).Error("Error canceling metadata transaction")
|
||||
}
|
||||
return nil, cErr
|
||||
}
|
||||
|
||||
@@ -532,6 +535,9 @@ func (ls *layerStore) CreateRWLayer(name string, parent ChainID, opts *CreateRWL
|
||||
return nil, err
|
||||
}
|
||||
if err := ls.saveMount(m); err != nil {
|
||||
if removeErr := ls.driver.Remove(m.mountID); removeErr != nil {
|
||||
log.G(context.TODO()).WithFields(log.Fields{"mount-id": m.mountID, "error": removeErr}).Error("Failed to clean up RW layer after mount save failure")
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -642,7 +648,7 @@ func (ls *layerStore) saveMount(mount *mountedLayer) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ls *layerStore) initMount(graphID, parent, mountLabel string, initFunc MountInit, storageOpt map[string]string) (string, error) {
|
||||
func (ls *layerStore) initMount(graphID, parent, mountLabel string, initFunc MountInit, storageOpt map[string]string) (_ string, retErr error) {
|
||||
// Use "<graph-id>-init" to maintain compatibility with graph drivers
|
||||
// which are expecting this layer with this special name. If all
|
||||
// graph drivers can be updated to not rely on knowing about this layer
|
||||
@@ -657,6 +663,16 @@ func (ls *layerStore) initMount(graphID, parent, mountLabel string, initFunc Mou
|
||||
if err := ls.driver.CreateReadWrite(initID, parent, createOpts); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Clean up init layer if any subsequent operation fails
|
||||
defer func() {
|
||||
if retErr != nil {
|
||||
if err := ls.driver.Remove(initID); err != nil {
|
||||
log.G(context.TODO()).WithFields(log.Fields{"init-id": initID, "error": err}).Error("Failed to clean up init layer after error")
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
p, err := ls.driver.Get(initID, "")
|
||||
if err != nil {
|
||||
return "", err
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
|
||||
"github.com/containerd/continuity/driver"
|
||||
"github.com/moby/go-archive"
|
||||
"github.com/moby/go-archive/compression"
|
||||
"github.com/moby/moby/v2/daemon/graphdriver"
|
||||
"github.com/moby/moby/v2/daemon/graphdriver/vfs"
|
||||
"github.com/moby/moby/v2/daemon/internal/stringid"
|
||||
@@ -587,7 +588,7 @@ func tarFromFiles(files ...FileApplier) ([]byte, error) {
|
||||
}
|
||||
}
|
||||
|
||||
r, err := archive.Tar(td, archive.Uncompressed)
|
||||
r, err := archive.Tar(td, compression.None)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -42,6 +42,9 @@ func decodeCreateRequest(src io.Reader) (container.CreateRequest, error) {
|
||||
if w.HostConfig == nil {
|
||||
w.HostConfig = &container.HostConfig{}
|
||||
}
|
||||
if w.HostConfig.PortBindings == nil {
|
||||
w.HostConfig.PortBindings = make(network.PortMap)
|
||||
}
|
||||
// Make sure NetworkMode has an acceptable value. We do this to ensure
|
||||
// backwards compatible API behavior.
|
||||
//
|
||||
|
||||
@@ -220,7 +220,7 @@ func TestAddPortMappings(t *testing.T) {
|
||||
enableProxy bool
|
||||
hairpin bool
|
||||
busyPortIPv4 int
|
||||
rootless bool
|
||||
newPDC func() nat.PortDriverClient
|
||||
hostAddrs []string
|
||||
noProxy6To4 bool
|
||||
|
||||
@@ -667,7 +667,7 @@ func TestAddPortMappings(t *testing.T) {
|
||||
{PortBinding: types.PortBinding{Proto: types.TCP, Port: 80}},
|
||||
},
|
||||
enableProxy: true,
|
||||
rootless: true,
|
||||
newPDC: func() nat.PortDriverClient { return newMockPortDriverClient(true) },
|
||||
expPBs: []types.PortBinding{
|
||||
{Proto: types.TCP, IP: ctrIP4.IP, Port: 22, HostIP: net.IPv4zero, HostPort: firstEphemPort},
|
||||
{Proto: types.TCP, IP: ctrIP6.IP, Port: 22, HostIP: net.IPv6zero, HostPort: firstEphemPort},
|
||||
@@ -675,6 +675,21 @@ func TestAddPortMappings(t *testing.T) {
|
||||
{Proto: types.TCP, IP: ctrIP6.IP, Port: 80, HostIP: net.IPv6zero, HostPort: firstEphemPort + 1},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "rootless, ipv6 not supported",
|
||||
epAddrV4: ctrIP4,
|
||||
epAddrV6: ctrIP6,
|
||||
cfg: []portmapperapi.PortBindingReq{
|
||||
{PortBinding: types.PortBinding{Proto: types.TCP, Port: 22}},
|
||||
{PortBinding: types.PortBinding{Proto: types.TCP, Port: 80}},
|
||||
},
|
||||
enableProxy: true,
|
||||
newPDC: func() nat.PortDriverClient { return newMockPortDriverClient(false) },
|
||||
expPBs: []types.PortBinding{
|
||||
{Proto: types.TCP, IP: ctrIP4.IP, Port: 22, HostIP: net.IPv4zero, HostPort: firstEphemPort},
|
||||
{Proto: types.TCP, IP: ctrIP4.IP, Port: 80, HostIP: net.IPv4zero, HostPort: firstEphemPort + 1},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "rootless without proxy",
|
||||
epAddrV4: ctrIP4,
|
||||
@@ -683,8 +698,8 @@ func TestAddPortMappings(t *testing.T) {
|
||||
{PortBinding: types.PortBinding{Proto: types.TCP, Port: 22}},
|
||||
{PortBinding: types.PortBinding{Proto: types.TCP, Port: 80}},
|
||||
},
|
||||
rootless: true,
|
||||
hairpin: true,
|
||||
newPDC: func() nat.PortDriverClient { return newMockPortDriverClient(true) },
|
||||
hairpin: true,
|
||||
expPBs: []types.PortBinding{
|
||||
{Proto: types.TCP, IP: ctrIP4.IP, Port: 22, HostIP: net.IPv4zero, HostPort: firstEphemPort},
|
||||
{Proto: types.TCP, IP: ctrIP6.IP, Port: 22, HostIP: net.IPv6zero, HostPort: firstEphemPort},
|
||||
@@ -745,8 +760,8 @@ func TestAddPortMappings(t *testing.T) {
|
||||
}
|
||||
|
||||
var pdc nat.PortDriverClient
|
||||
if tc.rootless {
|
||||
pdc = newMockPortDriverClient()
|
||||
if tc.newPDC != nil {
|
||||
pdc = tc.newPDC()
|
||||
}
|
||||
|
||||
pms := &drvregistry.PortMappers{}
|
||||
@@ -780,7 +795,7 @@ func TestAddPortMappings(t *testing.T) {
|
||||
n.firewallerNetwork = fwn
|
||||
|
||||
expChildIP := func(hostIP net.IP) net.IP {
|
||||
if !tc.rootless {
|
||||
if pdc == nil {
|
||||
return hostIP
|
||||
}
|
||||
if hostIP.To4() == nil {
|
||||
@@ -938,16 +953,21 @@ func (p mockPortDriverPort) String() string {
|
||||
|
||||
type mockPortDriverClient struct {
|
||||
openPorts map[mockPortDriverPort]bool
|
||||
supportV6 bool
|
||||
}
|
||||
|
||||
func newMockPortDriverClient() *mockPortDriverClient {
|
||||
func newMockPortDriverClient(supportV6 bool) *mockPortDriverClient {
|
||||
return &mockPortDriverClient{
|
||||
openPorts: map[mockPortDriverPort]bool{},
|
||||
supportV6: supportV6,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *mockPortDriverClient) ChildHostIP(hostIP netip.Addr) netip.Addr {
|
||||
func (c *mockPortDriverClient) ChildHostIP(proto string, hostIP netip.Addr) netip.Addr {
|
||||
if hostIP.Is6() {
|
||||
if !c.supportV6 {
|
||||
return netip.Addr{}
|
||||
}
|
||||
return netip.IPv6Loopback()
|
||||
}
|
||||
return netip.MustParseAddr("127.0.0.1")
|
||||
|
||||
@@ -75,14 +75,38 @@ func NewPortDriverClient(ctx context.Context) (*PortDriverClient, error) {
|
||||
return pdc, nil
|
||||
}
|
||||
|
||||
// proto normalizes the protocol to match what the rootlesskit API expects.
|
||||
func (c *PortDriverClient) proto(proto string, hostIP netip.Addr) string {
|
||||
// proto is like "tcp", but we need to convert it to "tcp4" or "tcp6" explicitly
|
||||
// for libnetwork >= 20201216
|
||||
//
|
||||
// See https://github.com/moby/libnetwork/pull/2604/files#diff-8fa48beed55dd033bf8e4f8c40b31cf69d0b2cc5d4bb53cde8594670ea6c938aR20
|
||||
// See also https://github.com/rootless-containers/rootlesskit/issues/231
|
||||
apiProto := proto
|
||||
if !strings.HasSuffix(apiProto, "4") && !strings.HasSuffix(apiProto, "6") {
|
||||
if hostIP.Is6() {
|
||||
apiProto += "6"
|
||||
} else {
|
||||
apiProto += "4"
|
||||
}
|
||||
}
|
||||
return apiProto
|
||||
}
|
||||
|
||||
// ChildHostIP returns the address that must be used in the child network
|
||||
// namespace in place of hostIP, a host IP address. In particular, port
|
||||
// mappings from host IP addresses, and DNAT rules, must use this child
|
||||
// address in place of the real host address.
|
||||
func (c *PortDriverClient) ChildHostIP(hostIP netip.Addr) netip.Addr {
|
||||
// address in place of the real host address. It may return an invalid
|
||||
// netip.Addr if the proto and IP family aren't supported.
|
||||
func (c *PortDriverClient) ChildHostIP(proto string, hostIP netip.Addr) netip.Addr {
|
||||
if c == nil {
|
||||
return hostIP
|
||||
}
|
||||
if _, ok := c.protos[c.proto(proto, hostIP)]; !ok {
|
||||
// This happens when apiProto="tcp6", portDriverName="slirp4netns",
|
||||
// because "slirp4netns" port driver does not support listening on IPv6 yet.
|
||||
return netip.Addr{}
|
||||
}
|
||||
if c.childIP.IsValid() {
|
||||
return c.childIP
|
||||
}
|
||||
@@ -117,20 +141,8 @@ func (c *PortDriverClient) AddPort(
|
||||
if c == nil {
|
||||
return func() error { return nil }, nil
|
||||
}
|
||||
// proto is like "tcp", but we need to convert it to "tcp4" or "tcp6" explicitly
|
||||
// for libnetwork >= 20201216
|
||||
//
|
||||
// See https://github.com/moby/libnetwork/pull/2604/files#diff-8fa48beed55dd033bf8e4f8c40b31cf69d0b2cc5d4bb53cde8594670ea6c938aR20
|
||||
// See also https://github.com/rootless-containers/rootlesskit/issues/231
|
||||
apiProto := proto
|
||||
if !strings.HasSuffix(apiProto, "4") && !strings.HasSuffix(apiProto, "6") {
|
||||
if hostIP.Is6() {
|
||||
apiProto += "6"
|
||||
} else {
|
||||
apiProto += "4"
|
||||
}
|
||||
}
|
||||
|
||||
apiProto := c.proto(proto, hostIP)
|
||||
if _, ok := c.protos[apiProto]; !ok {
|
||||
// This happens when apiProto="tcp6", portDriverName="slirp4netns",
|
||||
// because "slirp4netns" port driver does not support listening on IPv6 yet.
|
||||
|
||||
@@ -251,9 +251,14 @@ func deleteEpFromResolverImpl(
|
||||
}
|
||||
|
||||
func findHNSEp(ip4, ip6 *net.IPNet, hnsEndpoints []hcsshim.HNSEndpoint) *hcsshim.HNSEndpoint {
|
||||
if ip4 == nil && ip6 == nil {
|
||||
return nil
|
||||
}
|
||||
for _, hnsEp := range hnsEndpoints {
|
||||
if (hnsEp.IPAddress != nil && hnsEp.IPAddress.Equal(ip4.IP)) ||
|
||||
(hnsEp.IPv6Address != nil && hnsEp.IPv6Address.Equal(ip6.IP)) {
|
||||
if ip4 != nil && hnsEp.IPAddress != nil && hnsEp.IPAddress.Equal(ip4.IP) {
|
||||
return &hnsEp
|
||||
}
|
||||
if ip6 != nil && hnsEp.IPv6Address != nil && hnsEp.IPv6Address.Equal(ip6.IP) {
|
||||
return &hnsEp
|
||||
}
|
||||
}
|
||||
|
||||
@@ -135,6 +135,7 @@ func StartProxy(pb types.PortBinding,
|
||||
return nil
|
||||
}
|
||||
stopped.Store(true)
|
||||
log.G(context.Background()).WithField("pb", pb).Debug("Stopping userland proxy")
|
||||
if err := cmd.Process.Signal(os.Interrupt); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/netip"
|
||||
"slices"
|
||||
"strconv"
|
||||
|
||||
"github.com/containerd/log"
|
||||
@@ -13,12 +14,13 @@ import (
|
||||
"github.com/moby/moby/v2/daemon/libnetwork/portallocator"
|
||||
"github.com/moby/moby/v2/daemon/libnetwork/portmapperapi"
|
||||
"github.com/moby/moby/v2/daemon/libnetwork/types"
|
||||
"github.com/moby/moby/v2/internal/sliceutil"
|
||||
)
|
||||
|
||||
const driverName = "nat"
|
||||
|
||||
type PortDriverClient interface {
|
||||
ChildHostIP(hostIP netip.Addr) netip.Addr
|
||||
ChildHostIP(proto string, hostIP netip.Addr) netip.Addr
|
||||
AddPort(ctx context.Context, proto string, hostIP, childIP netip.Addr, hostPort int) (func() error, error)
|
||||
}
|
||||
|
||||
@@ -73,12 +75,18 @@ func (pm PortMapper) MapPorts(ctx context.Context, cfg []portmapperapi.PortBindi
|
||||
}
|
||||
}()
|
||||
|
||||
addrs := make([]net.IP, 0, len(cfg))
|
||||
for i := range cfg {
|
||||
cfg[i] = setChildHostIP(pm.pdc, cfg[i])
|
||||
addrs = append(addrs, cfg[i].ChildHostIP)
|
||||
for i := len(cfg) - 1; i >= 0; i-- {
|
||||
var supported bool
|
||||
if cfg[i], supported = setChildHostIP(pm.pdc, cfg[i]); !supported {
|
||||
cfg = slices.Delete(cfg, i, i+1)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
addrs := sliceutil.Map(cfg, func(req portmapperapi.PortBindingReq) net.IP {
|
||||
return req.ChildHostIP
|
||||
})
|
||||
|
||||
pa := portallocator.NewOSAllocator()
|
||||
allocatedPort, socks, err := pa.RequestPortsInRange(addrs, proto, int(hostPort), int(hostPortEnd))
|
||||
if err != nil {
|
||||
@@ -127,14 +135,21 @@ func (pm PortMapper) UnmapPorts(ctx context.Context, pbs []portmapperapi.PortBin
|
||||
return errors.Join(errs...)
|
||||
}
|
||||
|
||||
func setChildHostIP(pdc PortDriverClient, req portmapperapi.PortBindingReq) portmapperapi.PortBindingReq {
|
||||
// setChildHostIP returns a modified PortBindingReq that contains the IP
|
||||
// address that should be used for port allocation, firewall rules, etc. It
|
||||
// returns false when the PortBindingReq isn't supported by the PortDriverClient.
|
||||
func setChildHostIP(pdc PortDriverClient, req portmapperapi.PortBindingReq) (portmapperapi.PortBindingReq, bool) {
|
||||
if pdc == nil {
|
||||
req.ChildHostIP = req.HostIP
|
||||
return req
|
||||
return req, true
|
||||
}
|
||||
hip, _ := netip.AddrFromSlice(req.HostIP)
|
||||
req.ChildHostIP = pdc.ChildHostIP(hip.Unmap()).AsSlice()
|
||||
return req
|
||||
chip := pdc.ChildHostIP(req.Proto.String(), hip.Unmap())
|
||||
if !chip.IsValid() {
|
||||
return req, false
|
||||
}
|
||||
req.ChildHostIP = chip.AsSlice()
|
||||
return req, true
|
||||
}
|
||||
|
||||
// configPortDriver passes the port binding's details to rootlesskit, and updates the
|
||||
|
||||
@@ -264,17 +264,8 @@ func (sb *Sandbox) loadResolvConf(path string) (*resolvconf.ResolvConf, error) {
|
||||
// be a copy of the host's file, with overrides for nameservers, options and search
|
||||
// domains applied.
|
||||
func (sb *Sandbox) setupDNS() error {
|
||||
sb.restoreResolvConfPath()
|
||||
|
||||
// When the container is restarted, a new Sandbox is created but the same resolv.conf is re-used. If it was
|
||||
// user-modified, do not attempt to overwrite it.
|
||||
if !sb.config.useDefaultSandBox {
|
||||
if mod, err := resolvconf.UserModified(sb.config.resolvConfPath, sb.config.resolvConfHashFile); err != nil || mod {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure the directory exists.
|
||||
sb.restoreResolvConfPath()
|
||||
dir, _ := filepath.Split(sb.config.resolvConfPath)
|
||||
if err := createBasePath(dir); err != nil {
|
||||
return err
|
||||
@@ -338,7 +329,15 @@ func (sb *Sandbox) rebuildDNS() error {
|
||||
// upstream nameservers.
|
||||
sb.setExternalResolvers(extNameServers)
|
||||
|
||||
return rc.WriteFile(sb.config.resolvConfPath, sb.config.resolvConfHashFile, filePerm)
|
||||
// Write the file for the container - preserving old behaviour, not updating the
|
||||
// hash file (so, no further updates will be made).
|
||||
// TODO(robmry) - I think that's probably accidental, I can't find a reason for it,
|
||||
// and the old resolvconf.Build() function wrote the file but not the hash, which
|
||||
// is surprising. But, before fixing it, a guard/flag needs to be added to
|
||||
// sb.updateDNS() to make sure that when an endpoint joins a sandbox that already
|
||||
// has an internal resolver, the container's resolv.conf is still (re)configured
|
||||
// for an internal resolver.
|
||||
return rc.WriteFile(sb.config.resolvConfPath, "", filePerm)
|
||||
}
|
||||
|
||||
func createBasePath(dir string) error {
|
||||
|
||||
@@ -14,17 +14,12 @@ import (
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
)
|
||||
|
||||
func getResolvConf(t *testing.T, rcPath string) resolvconf.ResolvConf {
|
||||
func getResolvConfOptions(t *testing.T, rcPath string) []string {
|
||||
t.Helper()
|
||||
resolv, err := os.ReadFile(rcPath)
|
||||
assert.NilError(t, err)
|
||||
rc, err := resolvconf.Parse(bytes.NewBuffer(resolv), "")
|
||||
assert.NilError(t, err)
|
||||
return rc
|
||||
}
|
||||
|
||||
func getResolvConfOptions(t *testing.T, rcPath string) []string {
|
||||
rc := getResolvConf(t, rcPath)
|
||||
return rc.Options()
|
||||
}
|
||||
|
||||
@@ -95,60 +90,3 @@ func TestDNSOptions(t *testing.T) {
|
||||
dnsOptionsList = getResolvConfOptions(t, sb2.config.resolvConfPath)
|
||||
assert.Check(t, is.DeepEqual([]string{"ndots:0"}, dnsOptionsList))
|
||||
}
|
||||
|
||||
func TestNonHostNetDNSRestart(t *testing.T) {
|
||||
c, err := New(context.Background(), config.OptionDataDir(t.TempDir()))
|
||||
assert.NilError(t, err)
|
||||
|
||||
// Step 1: Create initial sandbox (simulating first container start)
|
||||
sb, err := c.NewSandbox(context.Background(), "cnt1")
|
||||
assert.NilError(t, err)
|
||||
|
||||
defer func() {
|
||||
_ = sb.Delete(context.Background())
|
||||
}()
|
||||
|
||||
sb.startResolver(false)
|
||||
|
||||
err = sb.setupDNS()
|
||||
assert.NilError(t, err)
|
||||
err = sb.rebuildDNS()
|
||||
assert.NilError(t, err)
|
||||
|
||||
// Step 2: Simulate user manually overwriting the container's resolv.conf
|
||||
resolvConfPath := sb.config.resolvConfPath
|
||||
modifiedContent := []byte(`nameserver 1.1.1.1`)
|
||||
err = os.WriteFile(resolvConfPath, modifiedContent, 0644)
|
||||
assert.NilError(t, err)
|
||||
|
||||
// Step 3: Delete the sandbox (simulating container stop)
|
||||
err = sb.Delete(context.Background())
|
||||
assert.NilError(t, err)
|
||||
|
||||
// Step 4: Create a new sandbox (simulating container restart)
|
||||
sbRestart, err := c.NewSandbox(context.Background(), "cnt1",
|
||||
OptionResolvConfPath(resolvConfPath),
|
||||
)
|
||||
assert.NilError(t, err)
|
||||
defer func() {
|
||||
if err := sbRestart.Delete(context.Background()); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
sbRestart.startResolver(false)
|
||||
|
||||
// Step 5: Call setupDNS on restart - should preserve user modifications
|
||||
err = sbRestart.setupDNS()
|
||||
assert.NilError(t, err)
|
||||
|
||||
rc := getResolvConf(t, sbRestart.config.resolvConfPath)
|
||||
assert.Check(t, is.Equal("1.1.1.1", rc.NameServers()[0].String()))
|
||||
|
||||
// Step 6: Call rebuildDNS on restart - should preserve user modifications
|
||||
err = sbRestart.rebuildDNS()
|
||||
assert.NilError(t, err)
|
||||
|
||||
rc = getResolvConf(t, sbRestart.config.resolvConfPath)
|
||||
assert.Check(t, is.Equal("1.1.1.1", rc.NameServers()[0].String()))
|
||||
}
|
||||
|
||||
@@ -366,11 +366,6 @@ func (sb *Sandbox) populateNetworkResourcesOS(ctx context.Context, ep *Endpoint)
|
||||
|
||||
if ep.needResolver() {
|
||||
sb.startResolver(false)
|
||||
} else {
|
||||
// Make sure /etc/resolv.conf is set up.
|
||||
if err := sb.updateDNS(ep.getNetwork().enableIPv6); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if i != nil && i.srcName != "" {
|
||||
@@ -453,6 +448,10 @@ func (sb *Sandbox) populateNetworkResourcesOS(ctx context.Context, ep *Endpoint)
|
||||
}
|
||||
|
||||
sb.addHostsEntries(ctx, ep.getEtcHostsAddrs())
|
||||
// Make sure /etc/resolv.conf is set up.
|
||||
if err := sb.updateDNS(ep.getNetwork().enableIPv6); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Populate load balancer only after updating all the other
|
||||
// information including gateway and other routes so that
|
||||
|
||||
@@ -1038,10 +1038,13 @@ func buildPortsRelatedCreateEndpointOptions(c *container.Container, n *libnetwor
|
||||
)
|
||||
|
||||
ports := c.HostConfig.PortBindings
|
||||
if c.HostConfig.PublishAllPorts {
|
||||
if c.HostConfig.PublishAllPorts && len(c.Config.ExposedPorts) > 0 {
|
||||
// Add exposed ports to a copy of the map to make sure a "publishedPorts" entry is created
|
||||
// for each exposed port, even if there's no specific binding.
|
||||
ports = maps.Clone(c.HostConfig.PortBindings)
|
||||
if ports == nil {
|
||||
ports = networktypes.PortMap{}
|
||||
}
|
||||
for p := range c.Config.ExposedPorts {
|
||||
if _, exists := ports[p]; !exists {
|
||||
ports[p] = nil
|
||||
|
||||
@@ -2,6 +2,7 @@ package daemon
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@@ -259,10 +260,11 @@ func (daemon *Daemon) registerMountPoints(ctr *container.Container, defaultReadO
|
||||
StorageOpt: ctr.HostConfig.StorageOpt,
|
||||
}
|
||||
|
||||
// Include the destination in the layer name to make it unique for each mount point and container.
|
||||
// Hash the source and destination to create a safe, unique identifier for each mount point and container.
|
||||
// This makes sure that the same image can be mounted multiple times with different destinations.
|
||||
// Hex encode the destination to create a safe, unique identifier
|
||||
layerName := hex.EncodeToString([]byte(ctr.ID + ",src=" + mp.Source + ",dst=" + mp.Destination))
|
||||
// We hash it so that the snapshot name is friendly to the underlying filesystem and doesn't exceed path length limits.
|
||||
destHash := sha256.Sum256([]byte(ctr.ID + "-src=" + mp.Source + "-dst=" + mp.Destination))
|
||||
layerName := hex.EncodeToString(destHash[:])
|
||||
layer, err := daemon.imageService.CreateLayerFromImage(img, layerName, rwLayerOpts)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
6
go.mod
6
go.mod
@@ -56,7 +56,7 @@ require (
|
||||
github.com/miekg/dns v1.1.66
|
||||
github.com/mistifyio/go-zfs/v3 v3.1.0
|
||||
github.com/mitchellh/copystructure v1.2.0
|
||||
github.com/moby/buildkit v0.26.2
|
||||
github.com/moby/buildkit v0.26.3
|
||||
github.com/moby/docker-image-spec v1.3.1
|
||||
github.com/moby/go-archive v0.1.0
|
||||
github.com/moby/ipvs v1.1.0
|
||||
@@ -160,7 +160,7 @@ require (
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.17.0 // indirect
|
||||
github.com/containerd/ttrpc v1.2.7 // indirect
|
||||
github.com/containernetworking/cni v1.3.0 // indirect
|
||||
github.com/containernetworking/plugins v1.8.0 // indirect
|
||||
github.com/containernetworking/plugins v1.9.0 // indirect
|
||||
github.com/coreos/go-semver v0.3.1 // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.6.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
@@ -193,7 +193,7 @@ require (
|
||||
github.com/hiddeco/sshsig v0.2.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jmoiron/sqlx v1.3.3 // indirect
|
||||
github.com/klauspost/compress v1.18.1 // indirect
|
||||
github.com/klauspost/compress v1.18.2 // indirect
|
||||
github.com/mitchellh/hashstructure/v2 v2.0.2 // indirect
|
||||
github.com/mitchellh/reflectwalk v1.0.2 // indirect
|
||||
github.com/moby/sys/capability v0.4.0 // indirect
|
||||
|
||||
12
go.sum
12
go.sum
@@ -168,8 +168,8 @@ github.com/containerd/typeurl/v2 v2.2.3 h1:yNA/94zxWdvYACdYO8zofhrTVuQY73fFU1y++
|
||||
github.com/containerd/typeurl/v2 v2.2.3/go.mod h1:95ljDnPfD3bAbDJRugOiShd/DlAAsxGtUBhJxIn7SCk=
|
||||
github.com/containernetworking/cni v1.3.0 h1:v6EpN8RznAZj9765HhXQrtXgX+ECGebEYEmnuFjskwo=
|
||||
github.com/containernetworking/cni v1.3.0/go.mod h1:Bs8glZjjFfGPHMw6hQu82RUgEPNGEaBb9KS5KtNMnJ4=
|
||||
github.com/containernetworking/plugins v1.8.0 h1:WjGbV/0UQyo8A4qBsAh6GaDAtu1hevxVxsEuqtBqUFk=
|
||||
github.com/containernetworking/plugins v1.8.0/go.mod h1:JG3BxoJifxxHBhG3hFyxyhid7JgRVBu/wtooGEvWf1c=
|
||||
github.com/containernetworking/plugins v1.9.0 h1:Mg3SXBdRGkdXyFC4lcwr6u2ZB2SDeL6LC3U+QrEANuQ=
|
||||
github.com/containernetworking/plugins v1.9.0/go.mod h1:JG3BxoJifxxHBhG3hFyxyhid7JgRVBu/wtooGEvWf1c=
|
||||
github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=
|
||||
github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
|
||||
github.com/coreos/go-systemd/v22 v22.6.0 h1:aGVa/v8B7hpb0TKl0MWoAavPDmHvobFe5R5zn0bCJWo=
|
||||
@@ -375,8 +375,8 @@ github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/u
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3JH2co=
|
||||
github.com/klauspost/compress v1.18.1/go.mod h1:ZQFFVG+MdnR0P+l6wpXgIL4NTtwiKIdBnrBd8Nrxr+0=
|
||||
github.com/klauspost/compress v1.18.2 h1:iiPHWW0YrcFgpBYhsA6D1+fqHssJscY/Tm/y2Uqnapk=
|
||||
github.com/klauspost/compress v1.18.2/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
@@ -417,8 +417,8 @@ github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:F
|
||||
github.com/mitchellh/mapstructure v0.0.0-20170523030023-d0303fe80992/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ=
|
||||
github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
|
||||
github.com/moby/buildkit v0.26.2 h1:EIh5j0gzRsCZmQzvgNNWzSDbuKqwUIiBH7ssqLv8RU8=
|
||||
github.com/moby/buildkit v0.26.2/go.mod h1:ylDa7IqzVJgLdi/wO7H1qLREFQpmhFbw2fbn4yoTw40=
|
||||
github.com/moby/buildkit v0.26.3 h1:D+ruZVAk/3ipRq5XRxBH9/DIFpRjSlTtMbghT5gQP9g=
|
||||
github.com/moby/buildkit v0.26.3/go.mod h1:4T4wJzQS4kYWIfFRjsbJry4QoxDBjK+UGOEOs1izL7w=
|
||||
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
|
||||
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
|
||||
github.com/moby/go-archive v0.1.0 h1:Kk/5rdW/g+H8NHdJW2gsXyZ7UnzvJNOy6VKJqueWdcQ=
|
||||
|
||||
@@ -7,7 +7,7 @@ set -e
|
||||
# The version of runc should match the version that is used by the containerd
|
||||
# version that is used. If you need to update runc, open a pull request in
|
||||
# the containerd project first, and update both after that is merged.
|
||||
: "${RUNC_VERSION:=v1.3.3}"
|
||||
: "${RUNC_VERSION:=v1.3.4}"
|
||||
|
||||
install_runc() {
|
||||
RUNC_BUILDTAGS="${RUNC_BUILDTAGS:-"seccomp"}"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
ARG GO_VERSION=1.25.4
|
||||
ARG GO_VERSION=1.25.5
|
||||
ARG BASE_DEBIAN_DISTRO="bookworm"
|
||||
ARG PROTOC_VERSION=3.11.4
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
ARG GO_VERSION=1.25.4
|
||||
ARG GO_VERSION=1.25.5
|
||||
ARG GOVULNCHECK_VERSION=v1.1.4
|
||||
ARG FORMAT=text
|
||||
|
||||
|
||||
@@ -4,7 +4,15 @@ set -e
|
||||
SCRIPTDIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
source "${SCRIPTDIR}/.validate"
|
||||
|
||||
api_files=$(validate_diff --diff-filter=ACMR --name-only -- 'api/' || true)
|
||||
api_files=$(
|
||||
validate_diff --diff-filter=ACMR --name-only -- \
|
||||
'api/' \
|
||||
':(exclude)api/README.md' \
|
||||
':(exclude)api/swagger.yaml' \
|
||||
':(exclude)api/docs/' \
|
||||
|| true
|
||||
)
|
||||
|
||||
client_files=$(validate_diff --diff-filter=ACMR --name-only -- 'client/' || true)
|
||||
|
||||
has_errors=0
|
||||
|
||||
@@ -2024,11 +2024,11 @@ CMD ["cat", "/foo"]`),
|
||||
}
|
||||
|
||||
func (s *DockerCLIBuildSuite) TestBuildContextTarGzip(c *testing.T) {
|
||||
testContextTar(c, archive.Gzip)
|
||||
testContextTar(c, compression.Gzip)
|
||||
}
|
||||
|
||||
func (s *DockerCLIBuildSuite) TestBuildContextTarNoCompression(c *testing.T) {
|
||||
testContextTar(c, archive.Uncompressed)
|
||||
testContextTar(c, compression.None)
|
||||
}
|
||||
|
||||
func (s *DockerCLIBuildSuite) TestBuildNoContext(c *testing.T) {
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
containertypes "github.com/moby/moby/api/types/container"
|
||||
"github.com/moby/moby/client"
|
||||
"github.com/moby/moby/v2/integration/internal/container"
|
||||
"github.com/moby/moby/v2/internal/testutil/daemon"
|
||||
"gotest.tools/v3/assert"
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
"gotest.tools/v3/fs"
|
||||
@@ -107,3 +108,26 @@ func TestRemoveInvalidContainer(t *testing.T) {
|
||||
assert.Check(t, is.ErrorType(err, cerrdefs.IsNotFound))
|
||||
assert.Check(t, is.ErrorContains(err, "No such container"))
|
||||
}
|
||||
|
||||
func TestRemoveDeadContainersOnDaemonRestart(t *testing.T) {
|
||||
skip.If(t, testEnv.IsRemoteDaemon)
|
||||
skip.If(t, testEnv.DaemonInfo.OSType == "windows", "FIXME: Windows CI does not support multiple daemons yet")
|
||||
|
||||
ctx := setupTest(t)
|
||||
d := daemon.New(t)
|
||||
d.StartWithBusybox(ctx, t)
|
||||
defer d.Stop(t)
|
||||
|
||||
apiClient := d.NewClientT(t)
|
||||
container.Run(ctx, t, apiClient, container.WithCmd("top"), container.WithAutoRemove)
|
||||
|
||||
list, err := apiClient.ContainerList(ctx, client.ContainerListOptions{All: true})
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, is.Len(list.Items, 1))
|
||||
|
||||
d.Restart(t)
|
||||
|
||||
list, err = apiClient.ContainerList(ctx, client.ContainerListOptions{All: true})
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, is.Len(list.Items, 0))
|
||||
}
|
||||
|
||||
@@ -82,6 +82,74 @@ func TestImageInspectDescriptor(t *testing.T) {
|
||||
assert.Check(t, inspect.Descriptor.Size > 0)
|
||||
}
|
||||
|
||||
// Regression test for: https://github.com/moby/moby/issues/51566
|
||||
//
|
||||
// This can be reproduced with two image that share the same uncompressed layer
|
||||
// but have a different compressed blob is pulled.
|
||||
//
|
||||
// Example:
|
||||
// ```
|
||||
// docker pull nginx@sha256:3b7732505933ca591ce4a6d860cb713ad96a3176b82f7979a8dfa9973486a0d6
|
||||
// docker pull gotenberg/gotenberg@sha256:b116a40a1c24917e2bf3e153692da5acd2e78e7cd67e1b2d243b47c178f31c90
|
||||
// ```
|
||||
//
|
||||
// In this case, it's the base debian trixie image that's used as a base.
|
||||
// They're effectively the same layer (unpacked diff ID
|
||||
// `sha256:1d46119d249f7719e1820e24a311aa7c453f166f714969cffe89504678eaa447`),
|
||||
// but different compressed blobs:
|
||||
//
|
||||
// # nginx
|
||||
// {
|
||||
// "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
|
||||
// "size": 29777766,
|
||||
// "digest": "sha256:8c7716127147648c1751940b9709b6325f2256290d3201662eca2701cadb2cdf"
|
||||
// }
|
||||
//
|
||||
// # gotenberg
|
||||
// {
|
||||
// "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||
// "size": 30781333,
|
||||
// "digest": "sha256:b96413fb491a5ed179bb2746ff3be6cbddd72e14c6503bea80d58e579a3b92bc"
|
||||
// },
|
||||
func TestImageInspectWithoutSomeBlobs(t *testing.T) {
|
||||
t.Skip("TODO(vvoland): Come up with minimal images for this test")
|
||||
|
||||
skip.If(t, testEnv.DaemonInfo.OSType != "linux", "The test images are Linux-only")
|
||||
|
||||
ctx := setupTest(t)
|
||||
apiClient := testEnv.APIClient()
|
||||
|
||||
const baseImage = "nginx@sha256:3b7732505933ca591ce4a6d860cb713ad96a3176b82f7979a8dfa9973486a0d6"
|
||||
const childImage = "gotenberg/gotenberg:8.24@sha256:b116a40a1c24917e2bf3e153692da5acd2e78e7cd67e1b2d243b47c178f31c90"
|
||||
|
||||
// Pull the base image first and then the child image
|
||||
for _, image := range []string{baseImage, childImage} {
|
||||
rdr, err := apiClient.ImagePull(ctx, image, client.ImagePullOptions{})
|
||||
assert.NilError(t, err)
|
||||
assert.NilError(t, rdr.Wait(ctx))
|
||||
|
||||
t.Cleanup(func() {
|
||||
_, _ = apiClient.ImageRemove(ctx, image, client.ImageRemoveOptions{})
|
||||
})
|
||||
}
|
||||
|
||||
var raw bytes.Buffer
|
||||
inspect, err := apiClient.ImageInspect(ctx, childImage, client.ImageInspectWithRawResponse(&raw))
|
||||
assert.NilError(t, err)
|
||||
|
||||
var rawJson map[string]any
|
||||
err = json.Unmarshal(raw.Bytes(), &rawJson)
|
||||
assert.NilError(t, err)
|
||||
|
||||
configVal, hasConfig := rawJson["Config"]
|
||||
assert.Check(t, hasConfig, "Config field should exist in JSON response")
|
||||
if assert.Check(t, configVal != nil, "Config should not be null in JSON response") {
|
||||
assert.Check(t, is.DeepEqual(inspect.Config.Cmd, []string{"gotenberg"}))
|
||||
assert.Check(t, inspect.Os != "")
|
||||
assert.Check(t, inspect.Architecture != "")
|
||||
}
|
||||
}
|
||||
|
||||
func TestImageInspectWithPlatform(t *testing.T) {
|
||||
skip.If(t, testEnv.DaemonInfo.OSType == "windows", "The test image is a Linux image")
|
||||
ctx := setupTest(t)
|
||||
|
||||
@@ -72,6 +72,13 @@ func WithSysctls(sysctls map[string]string) func(*TestContainerConfig) {
|
||||
}
|
||||
}
|
||||
|
||||
// WithPublishAllPorts sets PublishAllPorts.
|
||||
func WithPublishAllPorts(publishAll bool) func(*TestContainerConfig) {
|
||||
return func(c *TestContainerConfig) {
|
||||
c.HostConfig.PublishAllPorts = publishAll
|
||||
}
|
||||
}
|
||||
|
||||
// WithExposedPorts sets the exposed ports of the container
|
||||
func WithExposedPorts(ports ...string) func(*TestContainerConfig) {
|
||||
return func(c *TestContainerConfig) {
|
||||
|
||||
@@ -22,12 +22,14 @@ import (
|
||||
"github.com/moby/moby/v2/daemon/libnetwork/drivers/bridge"
|
||||
"github.com/moby/moby/v2/daemon/libnetwork/iptables"
|
||||
"github.com/moby/moby/v2/daemon/libnetwork/netlabel"
|
||||
"github.com/moby/moby/v2/integration/internal/build"
|
||||
"github.com/moby/moby/v2/integration/internal/container"
|
||||
"github.com/moby/moby/v2/integration/internal/network"
|
||||
"github.com/moby/moby/v2/integration/internal/testutils/networking"
|
||||
n "github.com/moby/moby/v2/integration/network"
|
||||
"github.com/moby/moby/v2/internal/testutil"
|
||||
"github.com/moby/moby/v2/internal/testutil/daemon"
|
||||
"github.com/moby/moby/v2/internal/testutil/fakecontext"
|
||||
"gotest.tools/v3/assert"
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
"gotest.tools/v3/icmd"
|
||||
@@ -2139,3 +2141,18 @@ func TestGatewayErrorOnNetDisconnect(t *testing.T) {
|
||||
assert.Check(t, is.Contains(ctrInsp.NetworkSettings.Networks, "n1"))
|
||||
assert.Check(t, is.Contains(ctrInsp.NetworkSettings.Networks, "n2"))
|
||||
}
|
||||
|
||||
// Regression test for https://github.com/moby/moby/issues/51620
|
||||
func TestPublishAllWithNilPortBindings(t *testing.T) {
|
||||
ctx := setupTest(t)
|
||||
c := testEnv.APIClient()
|
||||
|
||||
imgWithExpose := container.WithImage(build.Do(ctx, t, c,
|
||||
fakecontext.New(t, "", fakecontext.WithDockerfile("FROM busybox\nEXPOSE 80/tcp\n"))))
|
||||
|
||||
_ = container.Run(ctx, t, c,
|
||||
container.WithAutoRemove,
|
||||
container.WithPublishAllPorts(true),
|
||||
imgWithExpose,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -212,41 +212,3 @@ func TestNslookupWindows(t *testing.T) {
|
||||
// can only be changed in daemon.json using feature flag "windows-dns-proxy".
|
||||
assert.Check(t, is.Contains(res.Stdout.String(), "Addresses:"))
|
||||
}
|
||||
|
||||
// TestResolvConfPreservedOnRestart verifies that external modifications to
|
||||
// /etc/resolv.conf are preserved when a non-host network container is restarted.
|
||||
// Regression test for https://github.com/moby/moby/issues/51490
|
||||
func TestResolvConfPreservedOnRestart(t *testing.T) {
|
||||
skip.If(t, testEnv.DaemonInfo.OSType == "windows", "No /etc/resolv.conf on Windows")
|
||||
|
||||
ctx := setupTest(t)
|
||||
|
||||
d := daemon.New(t, daemon.WithResolvConf(network.GenResolvConf("8.8.8.8")))
|
||||
d.StartWithBusybox(ctx, t)
|
||||
defer d.Stop(t)
|
||||
|
||||
c := d.NewClientT(t)
|
||||
defer c.Close()
|
||||
|
||||
const ctrName = "test-resolvconf-preserved-on-restart"
|
||||
id := container.Run(ctx, t, c, container.WithName(ctrName))
|
||||
defer c.ContainerRemove(ctx, id, client.ContainerRemoveOptions{Force: true})
|
||||
|
||||
appendContent := `# hello`
|
||||
res, err := container.Exec(ctx, c, ctrName, []string{
|
||||
"sh", "-c",
|
||||
"echo '" + appendContent + "' >> /etc/resolv.conf",
|
||||
})
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, is.Equal(res.ExitCode, 0))
|
||||
|
||||
// Restart the container.
|
||||
_, err = c.ContainerRestart(ctx, ctrName, client.ContainerRestartOptions{})
|
||||
assert.Assert(t, is.Nil(err))
|
||||
|
||||
// Verify the modification was preserved
|
||||
res, err = container.Exec(ctx, c, ctrName, []string{"tail", "-n", "1", "/etc/resolv.conf"})
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, is.Equal(res.ExitCode, 0))
|
||||
assert.Check(t, is.Contains(res.Stdout(), appendContent))
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/moby/go-archive"
|
||||
"github.com/moby/go-archive/compression"
|
||||
"github.com/moby/moby/api/types/events"
|
||||
plugintypes "github.com/moby/moby/api/types/plugin"
|
||||
"github.com/moby/moby/api/types/registry"
|
||||
@@ -209,7 +210,7 @@ func makePluginBundle(inPath string, opts ...CreateOpt) (io.ReadCloser, error) {
|
||||
if err := archive.NewDefaultArchiver().CopyFileWithTar(cfg.binPath, filepath.Join(inPath, "rootfs", p.Entrypoint[0])); err != nil {
|
||||
return nil, errors.Wrap(err, "error copying plugin binary to rootfs path")
|
||||
}
|
||||
tar, err := archive.Tar(inPath, archive.Uncompressed)
|
||||
tar, err := archive.Tar(inPath, compression.None)
|
||||
return tar, errors.Wrap(err, "error making plugin archive")
|
||||
}
|
||||
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/distribution/reference"
|
||||
"github.com/google/uuid"
|
||||
"github.com/moby/go-archive"
|
||||
"github.com/moby/go-archive/compression"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/opencontainers/image-spec/specs-go"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
@@ -137,7 +138,7 @@ func fileArchive(dir string, name string, content []byte) (io.ReadCloser, error)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return archive.Tar(tmp, archive.Uncompressed)
|
||||
return archive.Tar(tmp, compression.None)
|
||||
}
|
||||
|
||||
func writeLayerWithOneFile(dir string, filename string, content []byte) (ocispec.Descriptor, error) {
|
||||
|
||||
17
vendor/github.com/klauspost/compress/README.md
generated
vendored
17
vendor/github.com/klauspost/compress/README.md
generated
vendored
@@ -27,6 +27,16 @@ Use the links above for more information on each.
|
||||
|
||||
# changelog
|
||||
|
||||
* Oct 20, 2025 - [1.18.1](https://github.com/klauspost/compress/releases/tag/v1.18.1)
|
||||
* zstd: Add simple zstd EncodeTo/DecodeTo functions https://github.com/klauspost/compress/pull/1079
|
||||
* zstd: Fix incorrect buffer size in dictionary encodes https://github.com/klauspost/compress/pull/1059
|
||||
* s2: check for cap, not len of buffer in EncodeBetter/Best by @vdarulis in https://github.com/klauspost/compress/pull/1080
|
||||
* zlib: Avoiding extra allocation in zlib.reader.Reset by @travelpolicy in https://github.com/klauspost/compress/pull/1086
|
||||
* gzhttp: remove redundant err check in zstdReader by @ryanfowler in https://github.com/klauspost/compress/pull/1090
|
||||
* flate: Faster load+store https://github.com/klauspost/compress/pull/1104
|
||||
* flate: Simplify matchlen https://github.com/klauspost/compress/pull/1101
|
||||
* flate: Use exact sizes for huffman tables https://github.com/klauspost/compress/pull/1103
|
||||
|
||||
* Feb 19th, 2025 - [1.18.0](https://github.com/klauspost/compress/releases/tag/v1.18.0)
|
||||
* Add unsafe little endian loaders https://github.com/klauspost/compress/pull/1036
|
||||
* fix: check `r.err != nil` but return a nil value error `err` by @alingse in https://github.com/klauspost/compress/pull/1028
|
||||
@@ -36,6 +46,9 @@ Use the links above for more information on each.
|
||||
* flate: Fix matchlen L5+L6 https://github.com/klauspost/compress/pull/1049
|
||||
* flate: Cleanup & reduce casts https://github.com/klauspost/compress/pull/1050
|
||||
|
||||
<details>
|
||||
<summary>See changes to v1.17.x</summary>
|
||||
|
||||
* Oct 11th, 2024 - [1.17.11](https://github.com/klauspost/compress/releases/tag/v1.17.11)
|
||||
* zstd: Fix extra CRC written with multiple Close calls https://github.com/klauspost/compress/pull/1017
|
||||
* s2: Don't use stack for index tables https://github.com/klauspost/compress/pull/1014
|
||||
@@ -102,7 +115,8 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp
|
||||
* s2: Do 2 overlapping match checks https://github.com/klauspost/compress/pull/839
|
||||
* flate: Add amd64 assembly matchlen https://github.com/klauspost/compress/pull/837
|
||||
* gzip: Copy bufio.Reader on Reset by @thatguystone in https://github.com/klauspost/compress/pull/860
|
||||
|
||||
|
||||
</details>
|
||||
<details>
|
||||
<summary>See changes to v1.16.x</summary>
|
||||
|
||||
@@ -669,3 +683,4 @@ Here are other packages of good quality and pure Go (no cgo wrappers or autoconv
|
||||
# license
|
||||
|
||||
This code is licensed under the same conditions as the original Go code. See LICENSE file.
|
||||
|
||||
|
||||
77
vendor/github.com/moby/buildkit/frontend/gateway/gateway.go
generated
vendored
77
vendor/github.com/moby/buildkit/frontend/gateway/gateway.go
generated
vendored
@@ -653,43 +653,7 @@ func (lbf *llbBridgeForwarder) ResolveSourceMeta(ctx context.Context, req *pb.Re
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
r := &pb.ResolveSourceMetaResponse{
|
||||
Source: resp.Op,
|
||||
}
|
||||
|
||||
if resp.Image != nil {
|
||||
r.Image = &pb.ResolveSourceImageResponse{
|
||||
Digest: string(resp.Image.Digest),
|
||||
Config: resp.Image.Config,
|
||||
}
|
||||
if resp.Image.AttestationChain != nil {
|
||||
r.Image.AttestationChain = toPBAttestationChain(resp.Image.AttestationChain)
|
||||
}
|
||||
}
|
||||
if resp.Git != nil {
|
||||
r.Git = &pb.ResolveSourceGitResponse{
|
||||
Checksum: resp.Git.Checksum,
|
||||
Ref: resp.Git.Ref,
|
||||
CommitChecksum: resp.Git.CommitChecksum,
|
||||
CommitObject: resp.Git.CommitObject,
|
||||
TagObject: resp.Git.TagObject,
|
||||
}
|
||||
}
|
||||
if resp.HTTP != nil {
|
||||
var lastModified *timestamp.Timestamp
|
||||
if resp.HTTP.LastModified != nil {
|
||||
lastModified = ×tamp.Timestamp{
|
||||
Seconds: resp.HTTP.LastModified.Unix(),
|
||||
}
|
||||
}
|
||||
r.HTTP = &pb.ResolveSourceHTTPResponse{
|
||||
Checksum: resp.HTTP.Digest.String(),
|
||||
Filename: resp.HTTP.Filename,
|
||||
LastModified: lastModified,
|
||||
}
|
||||
}
|
||||
return r, nil
|
||||
return ToPBResolveSourceMetaResponse(resp), nil
|
||||
}
|
||||
|
||||
func (lbf *llbBridgeForwarder) ResolveImageConfig(ctx context.Context, req *pb.ResolveImageConfigRequest) (*pb.ResolveImageConfigResponse, error) {
|
||||
@@ -1705,6 +1669,45 @@ func getCaps(label string) map[string]struct{} {
|
||||
return out
|
||||
}
|
||||
|
||||
func ToPBResolveSourceMetaResponse(in *sourceresolver.MetaResponse) *pb.ResolveSourceMetaResponse {
|
||||
r := &pb.ResolveSourceMetaResponse{
|
||||
Source: in.Op,
|
||||
}
|
||||
|
||||
if in.Image != nil {
|
||||
r.Image = &pb.ResolveSourceImageResponse{
|
||||
Digest: string(in.Image.Digest),
|
||||
Config: in.Image.Config,
|
||||
}
|
||||
if in.Image.AttestationChain != nil {
|
||||
r.Image.AttestationChain = toPBAttestationChain(in.Image.AttestationChain)
|
||||
}
|
||||
}
|
||||
if in.Git != nil {
|
||||
r.Git = &pb.ResolveSourceGitResponse{
|
||||
Checksum: in.Git.Checksum,
|
||||
Ref: in.Git.Ref,
|
||||
CommitChecksum: in.Git.CommitChecksum,
|
||||
CommitObject: in.Git.CommitObject,
|
||||
TagObject: in.Git.TagObject,
|
||||
}
|
||||
}
|
||||
if in.HTTP != nil {
|
||||
var lastModified *timestamp.Timestamp
|
||||
if in.HTTP.LastModified != nil {
|
||||
lastModified = ×tamp.Timestamp{
|
||||
Seconds: in.HTTP.LastModified.Unix(),
|
||||
}
|
||||
}
|
||||
r.HTTP = &pb.ResolveSourceHTTPResponse{
|
||||
Checksum: in.HTTP.Digest.String(),
|
||||
Filename: in.HTTP.Filename,
|
||||
LastModified: lastModified,
|
||||
}
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func toPBAttestationChain(ac *sourceresolver.AttestationChain) *pb.AttestationChain {
|
||||
if ac == nil {
|
||||
return nil
|
||||
|
||||
26
vendor/github.com/moby/buildkit/solver/llbsolver/policy.go
generated
vendored
26
vendor/github.com/moby/buildkit/solver/llbsolver/policy.go
generated
vendored
@@ -5,6 +5,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/moby/buildkit/client/llb/sourceresolver"
|
||||
"github.com/moby/buildkit/frontend/gateway"
|
||||
gatewaypb "github.com/moby/buildkit/frontend/gateway/pb"
|
||||
"github.com/moby/buildkit/solver/pb"
|
||||
"github.com/moby/buildkit/sourcepolicy"
|
||||
@@ -88,19 +89,26 @@ func (p *policyEvaluator) Evaluate(ctx context.Context, op *pb.Op) (bool, error)
|
||||
Platform: toOCIPlatform(metareq.Platform),
|
||||
}
|
||||
}
|
||||
|
||||
if metareq.Image != nil {
|
||||
if op.ImageOpt == nil {
|
||||
op.ImageOpt = &sourceresolver.ResolveImageOpt{}
|
||||
}
|
||||
op.ImageOpt.NoConfig = metareq.Image.NoConfig
|
||||
op.ImageOpt.AttestationChain = metareq.Image.AttestationChain
|
||||
}
|
||||
|
||||
if metareq.Git != nil {
|
||||
op.GitOpt = &sourceresolver.ResolveGitOpt{
|
||||
ReturnObject: metareq.Git.ReturnObject,
|
||||
}
|
||||
}
|
||||
|
||||
resp, err := p.resolveSourceMetadata(ctx, metareq.Source, op, false)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "error resolving source metadata from policy request")
|
||||
}
|
||||
req.Source = &gatewaypb.ResolveSourceMetaResponse{
|
||||
Source: resp.Op,
|
||||
}
|
||||
if resp.Image != nil {
|
||||
req.Source.Image = &gatewaypb.ResolveSourceImageResponse{
|
||||
Digest: resp.Image.Digest.String(),
|
||||
Config: resp.Image.Config,
|
||||
}
|
||||
}
|
||||
req.Source = gateway.ToPBResolveSourceMetaResponse(resp)
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
6
vendor/modules.txt
vendored
6
vendor/modules.txt
vendored
@@ -525,7 +525,7 @@ github.com/containernetworking/cni/pkg/types/create
|
||||
github.com/containernetworking/cni/pkg/types/internal
|
||||
github.com/containernetworking/cni/pkg/utils
|
||||
github.com/containernetworking/cni/pkg/version
|
||||
# github.com/containernetworking/plugins v1.8.0
|
||||
# github.com/containernetworking/plugins v1.9.0
|
||||
## explicit; go 1.24.2
|
||||
github.com/containernetworking/plugins/pkg/ns
|
||||
# github.com/coreos/go-semver v0.3.1
|
||||
@@ -813,7 +813,7 @@ github.com/ishidawataru/sctp
|
||||
# github.com/jmoiron/sqlx v1.3.3
|
||||
## explicit; go 1.10
|
||||
github.com/jmoiron/sqlx/types
|
||||
# github.com/klauspost/compress v1.18.1
|
||||
# github.com/klauspost/compress v1.18.2
|
||||
## explicit; go 1.23
|
||||
github.com/klauspost/compress
|
||||
github.com/klauspost/compress/fse
|
||||
@@ -838,7 +838,7 @@ github.com/mitchellh/hashstructure/v2
|
||||
# github.com/mitchellh/reflectwalk v1.0.2
|
||||
## explicit
|
||||
github.com/mitchellh/reflectwalk
|
||||
# github.com/moby/buildkit v0.26.2
|
||||
# github.com/moby/buildkit v0.26.3
|
||||
## explicit; go 1.24.3
|
||||
github.com/moby/buildkit/api/services/control
|
||||
github.com/moby/buildkit/api/types
|
||||
|
||||
Reference in New Issue
Block a user