mirror of
https://github.com/moby/moby.git
synced 2026-01-15 09:51:27 +00:00
Compare commits
37 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a312606256 | ||
|
|
754d8f168e | ||
|
|
4c31251093 | ||
|
|
8ce163b7dc | ||
|
|
ac366556b7 | ||
|
|
54aee20458 | ||
|
|
bd6b5b6d37 | ||
|
|
7cef0d9cd1 | ||
|
|
841c4c8057 | ||
|
|
60b9add796 | ||
|
|
8ad7f863b3 | ||
|
|
dc2755273e | ||
|
|
7b570f00ba | ||
|
|
8cdcc4f3c7 | ||
|
|
ed752f6544 | ||
|
|
9db1b6f28c | ||
|
|
6261281247 | ||
|
|
90355e5166 | ||
|
|
72615b19db | ||
|
|
23e7919e0a | ||
|
|
c943936458 | ||
|
|
8b7940f037 | ||
|
|
55207ea637 | ||
|
|
a7fa5e1deb | ||
|
|
8730cccee2 | ||
|
|
61d547bf00 | ||
|
|
0c9ff4ca23 | ||
|
|
46ca4a74b4 | ||
|
|
dea47c0810 | ||
|
|
f3842ab533 | ||
|
|
e0815819de | ||
|
|
07c797281e | ||
|
|
f2550b3c09 | ||
|
|
fc14d8f932 | ||
|
|
c035ef2283 | ||
|
|
703f14793e | ||
|
|
30e94dadbc |
22
.github/workflows/.test.yml
vendored
22
.github/workflows/.test.yml
vendored
@@ -19,8 +19,6 @@ env:
|
||||
DOCKER_EXPERIMENTAL: 1
|
||||
DOCKER_GRAPHDRIVER: ${{ inputs.storage == 'snapshotter' && 'overlayfs' || 'overlay2' }}
|
||||
TEST_INTEGRATION_USE_SNAPSHOTTER: ${{ inputs.storage == 'snapshotter' && '1' || '' }}
|
||||
SETUP_BUILDX_VERSION: latest
|
||||
SETUP_BUILDKIT_IMAGE: moby/buildkit:latest
|
||||
|
||||
jobs:
|
||||
unit:
|
||||
@@ -37,10 +35,6 @@ jobs:
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||
buildkitd-flags: --debug
|
||||
-
|
||||
name: Build dev image
|
||||
uses: docker/bake-action@v4
|
||||
@@ -123,10 +117,6 @@ jobs:
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||
buildkitd-flags: --debug
|
||||
-
|
||||
name: Build dev image
|
||||
uses: docker/bake-action@v4
|
||||
@@ -177,10 +167,6 @@ jobs:
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||
buildkitd-flags: --debug
|
||||
-
|
||||
name: Build dev image
|
||||
uses: docker/bake-action@v4
|
||||
@@ -235,10 +221,6 @@ jobs:
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||
buildkitd-flags: --debug
|
||||
-
|
||||
name: Build dev image
|
||||
uses: docker/bake-action@v4
|
||||
@@ -380,10 +362,6 @@ jobs:
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||
buildkitd-flags: --debug
|
||||
-
|
||||
name: Build dev image
|
||||
uses: docker/bake-action@v4
|
||||
|
||||
2
.github/workflows/.windows.yml
vendored
2
.github/workflows/.windows.yml
vendored
@@ -19,7 +19,7 @@ on:
|
||||
default: false
|
||||
|
||||
env:
|
||||
GO_VERSION: "1.21.11"
|
||||
GO_VERSION: "1.21.10"
|
||||
GOTESTLIST_VERSION: v0.3.1
|
||||
TESTSTAT_VERSION: v0.1.25
|
||||
WINDOWS_BASE_IMAGE: mcr.microsoft.com/windows/servercore
|
||||
|
||||
10
.github/workflows/bin-image.yml
vendored
10
.github/workflows/bin-image.yml
vendored
@@ -21,8 +21,6 @@ env:
|
||||
PLATFORM: Moby Engine - Nightly
|
||||
PRODUCT: moby-bin
|
||||
PACKAGER_NAME: The Moby Project
|
||||
SETUP_BUILDX_VERSION: latest
|
||||
SETUP_BUILDKIT_IMAGE: moby/buildkit:latest
|
||||
|
||||
jobs:
|
||||
validate-dco:
|
||||
@@ -114,10 +112,6 @@ jobs:
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||
buildkitd-flags: --debug
|
||||
-
|
||||
name: Login to Docker Hub
|
||||
if: github.event_name != 'pull_request' && github.repository == 'moby/moby'
|
||||
@@ -177,10 +171,6 @@ jobs:
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||
buildkitd-flags: --debug
|
||||
-
|
||||
name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
|
||||
12
.github/workflows/buildkit.yml
vendored
12
.github/workflows/buildkit.yml
vendored
@@ -13,10 +13,8 @@ on:
|
||||
pull_request:
|
||||
|
||||
env:
|
||||
GO_VERSION: "1.21.11"
|
||||
GO_VERSION: "1.21.10"
|
||||
DESTDIR: ./build
|
||||
SETUP_BUILDX_VERSION: latest
|
||||
SETUP_BUILDKIT_IMAGE: moby/buildkit:latest
|
||||
|
||||
jobs:
|
||||
validate-dco:
|
||||
@@ -33,10 +31,6 @@ jobs:
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||
buildkitd-flags: --debug
|
||||
-
|
||||
name: Build
|
||||
uses: docker/bake-action@v4
|
||||
@@ -111,10 +105,6 @@ jobs:
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||
buildkitd-flags: --debug
|
||||
-
|
||||
name: Download binary artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
|
||||
10
.github/workflows/ci.yml
vendored
10
.github/workflows/ci.yml
vendored
@@ -14,8 +14,6 @@ on:
|
||||
|
||||
env:
|
||||
DESTDIR: ./build
|
||||
SETUP_BUILDX_VERSION: latest
|
||||
SETUP_BUILDKIT_IMAGE: moby/buildkit:latest
|
||||
|
||||
jobs:
|
||||
validate-dco:
|
||||
@@ -40,10 +38,6 @@ jobs:
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||
buildkitd-flags: --debug
|
||||
-
|
||||
name: Build
|
||||
uses: docker/bake-action@v4
|
||||
@@ -102,10 +96,6 @@ jobs:
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||
buildkitd-flags: --debug
|
||||
-
|
||||
name: Build
|
||||
uses: docker/bake-action@v4
|
||||
|
||||
16
.github/workflows/test.yml
vendored
16
.github/workflows/test.yml
vendored
@@ -13,11 +13,9 @@ on:
|
||||
pull_request:
|
||||
|
||||
env:
|
||||
GO_VERSION: "1.21.11"
|
||||
GO_VERSION: "1.21.10"
|
||||
GIT_PAGER: "cat"
|
||||
PAGER: "cat"
|
||||
SETUP_BUILDX_VERSION: latest
|
||||
SETUP_BUILDKIT_IMAGE: moby/buildkit:latest
|
||||
|
||||
jobs:
|
||||
validate-dco:
|
||||
@@ -46,10 +44,6 @@ jobs:
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||
buildkitd-flags: --debug
|
||||
-
|
||||
name: Build dev image
|
||||
uses: docker/bake-action@v4
|
||||
@@ -118,10 +112,6 @@ jobs:
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||
buildkitd-flags: --debug
|
||||
-
|
||||
name: Build dev image
|
||||
uses: docker/bake-action@v4
|
||||
@@ -178,10 +168,6 @@ jobs:
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: ${{ env.SETUP_BUILDX_VERSION }}
|
||||
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
|
||||
buildkitd-flags: --debug
|
||||
-
|
||||
name: Test
|
||||
uses: docker/bake-action@v4
|
||||
|
||||
@@ -38,7 +38,7 @@ linters-settings:
|
||||
alias:
|
||||
# Enforce alias to prevent it accidentally being used instead of our
|
||||
# own errdefs package (or vice-versa).
|
||||
- pkg: github.com/containerd/errdefs
|
||||
- pkg: github.com/containerd/containerd/errdefs
|
||||
alias: cerrdefs
|
||||
- pkg: github.com/opencontainers/image-spec/specs-go/v1
|
||||
alias: ocispec
|
||||
@@ -51,16 +51,6 @@ linters-settings:
|
||||
deny:
|
||||
- pkg: io/ioutil
|
||||
desc: The io/ioutil package has been deprecated, see https://go.dev/doc/go1.16#ioutil
|
||||
- pkg: "github.com/stretchr/testify/assert"
|
||||
desc: Use "gotest.tools/v3/assert" instead
|
||||
- pkg: "github.com/stretchr/testify/require"
|
||||
desc: Use "gotest.tools/v3/assert" instead
|
||||
- pkg: "github.com/stretchr/testify/suite"
|
||||
desc: Do not use
|
||||
- pkg: github.com/containerd/containerd/errdefs
|
||||
desc: The errdefs package has moved to a separate module, https://github.com/containerd/errdefs
|
||||
- pkg: github.com/containerd/containerd/log
|
||||
desc: The logs package has moved to a separate module, https://github.com/containerd/log
|
||||
revive:
|
||||
rules:
|
||||
# FIXME make sure all packages have a description. Currently, there's many packages without.
|
||||
@@ -134,16 +124,6 @@ issues:
|
||||
linters:
|
||||
- staticcheck
|
||||
|
||||
- text: "ineffectual assignment to ctx"
|
||||
source: "ctx[, ].*=.*\\(ctx[,)]"
|
||||
linters:
|
||||
- ineffassign
|
||||
|
||||
- text: "SA4006: this value of `ctx` is never used"
|
||||
source: "ctx[, ].*=.*\\(ctx[,)]"
|
||||
linters:
|
||||
- staticcheck
|
||||
|
||||
# Maximum issues count per one linter. Set to 0 to disable. Default is 50.
|
||||
max-issues-per-linter: 0
|
||||
|
||||
|
||||
27
.mailmap
27
.mailmap
@@ -7,7 +7,6 @@
|
||||
#
|
||||
# For an explanation of this file format, consult gitmailmap(5).
|
||||
|
||||
Aaron Yoshitake <airandfingers@gmail.com>
|
||||
Aaron L. Xu <liker.xu@foxmail.com>
|
||||
Aaron L. Xu <liker.xu@foxmail.com> <likexu@harmonycloud.cn>
|
||||
Aaron Lehmann <alehmann@netflix.com>
|
||||
@@ -31,11 +30,9 @@ Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp>
|
||||
Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp> <suda.akihiro@lab.ntt.co.jp>
|
||||
Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp> <suda.kyoto@gmail.com>
|
||||
Akshay Moghe <akshay.moghe@gmail.com>
|
||||
Alano Terblanche <alano.terblanche@docker.com>
|
||||
Alano Terblanche <alano.terblanche@docker.com> <18033717+Benehiko@users.noreply.github.com>
|
||||
Albin Kerouanton <albinker@gmail.com>
|
||||
Albin Kerouanton <albinker@gmail.com> <557933+akerouanton@users.noreply.github.com>
|
||||
Albin Kerouanton <albinker@gmail.com> <albin@akerouanton.name>
|
||||
Albin Kerouanton <albinker@gmail.com> <557933+akerouanton@users.noreply.github.com>
|
||||
Aleksa Sarai <asarai@suse.de>
|
||||
Aleksa Sarai <asarai@suse.de> <asarai@suse.com>
|
||||
Aleksa Sarai <asarai@suse.de> <cyphar@cyphar.com>
|
||||
@@ -62,8 +59,6 @@ Allen Sun <allensun.shl@alibaba-inc.com> <allen.sun@daocloud.io>
|
||||
Allen Sun <allensun.shl@alibaba-inc.com> <shlallen1990@gmail.com>
|
||||
Anca Iordache <anca.iordache@docker.com>
|
||||
Andrea Denisse Gómez <crypto.andrea@protonmail.ch>
|
||||
Andrew Baxter <423qpsxzhh8k3h@s.rendaw.me>
|
||||
Andrew Baxter <423qpsxzhh8k3h@s.rendaw.me> andrew <>
|
||||
Andrew Kim <taeyeonkim90@gmail.com>
|
||||
Andrew Kim <taeyeonkim90@gmail.com> <akim01@fortinet.com>
|
||||
Andrew Weiss <andrew.weiss@docker.com> <andrew.weiss@microsoft.com>
|
||||
@@ -124,7 +119,6 @@ Brian Goff <cpuguy83@gmail.com> <bgoff@cpuguy83-mbp.home>
|
||||
Brian Goff <cpuguy83@gmail.com> <bgoff@cpuguy83-mbp.local>
|
||||
Brian Goff <cpuguy83@gmail.com> <brian.goff@microsoft.com>
|
||||
Brian Goff <cpuguy83@gmail.com> <cpuguy@hey.com>
|
||||
Calvin Liu <flycalvin@qq.com>
|
||||
Cameron Sparr <gh@sparr.email>
|
||||
Carlos de Paula <me@carlosedp.com>
|
||||
Chander Govindarajan <chandergovind@gmail.com>
|
||||
@@ -136,7 +130,6 @@ Chen Mingjie <chenmingjie0828@163.com>
|
||||
Chen Qiu <cheney-90@hotmail.com>
|
||||
Chen Qiu <cheney-90@hotmail.com> <21321229@zju.edu.cn>
|
||||
Chengfei Shang <cfshang@alauda.io>
|
||||
Chentianze <cmoman@126.com>
|
||||
Chris Dias <cdias@microsoft.com>
|
||||
Chris McKinnel <chris.mckinnel@tangentlabs.co.uk>
|
||||
Chris Price <cprice@mirantis.com>
|
||||
@@ -145,8 +138,6 @@ Chris Telfer <ctelfer@docker.com>
|
||||
Chris Telfer <ctelfer@docker.com> <ctelfer@users.noreply.github.com>
|
||||
Christopher Biscardi <biscarch@sketcht.com>
|
||||
Christopher Latham <sudosurootdev@gmail.com>
|
||||
Christopher Petito <chrisjpetito@gmail.com>
|
||||
Christopher Petito <chrisjpetito@gmail.com> <47751006+krissetto@users.noreply.github.com>
|
||||
Christy Norman <christy@linux.vnet.ibm.com>
|
||||
Chun Chen <ramichen@tencent.com> <chenchun.feed@gmail.com>
|
||||
Corbin Coleman <corbin.coleman@docker.com>
|
||||
@@ -182,8 +173,6 @@ Dattatraya Kumbhar <dattatraya.kumbhar@gslab.com>
|
||||
Dave Goodchild <buddhamagnet@gmail.com>
|
||||
Dave Henderson <dhenderson@gmail.com> <Dave.Henderson@ca.ibm.com>
|
||||
Dave Tucker <dt@docker.com> <dave@dtucker.co.uk>
|
||||
David Dooling <dooling@gmail.com>
|
||||
David Dooling <dooling@gmail.com> <david.dooling@docker.com>
|
||||
David M. Karr <davidmichaelkarr@gmail.com>
|
||||
David Sheets <dsheets@docker.com> <sheets@alum.mit.edu>
|
||||
David Sissitka <me@dsissitka.com>
|
||||
@@ -230,8 +219,6 @@ Felix Hupfeld <felix@quobyte.com> <quofelix@users.noreply.github.com>
|
||||
Felix Ruess <felix.ruess@gmail.com> <felix.ruess@roboception.de>
|
||||
Feng Yan <fy2462@gmail.com>
|
||||
Fengtu Wang <wangfengtu@huawei.com> <wangfengtu@huawei.com>
|
||||
Filipe Pina <hzlu1ot0@duck.com>
|
||||
Filipe Pina <hzlu1ot0@duck.com> <636320+fopina@users.noreply.github.com>
|
||||
Francisco Carriedo <fcarriedo@gmail.com>
|
||||
Frank Rosquin <frank.rosquin+github@gmail.com> <frank.rosquin@gmail.com>
|
||||
Frank Yang <yyb196@gmail.com>
|
||||
@@ -283,7 +270,6 @@ Hollie Teal <hollie@docker.com> <hollie.teal@docker.com>
|
||||
Hollie Teal <hollie@docker.com> <hollietealok@users.noreply.github.com>
|
||||
hsinko <21551195@zju.edu.cn> <hsinko@users.noreply.github.com>
|
||||
Hu Keping <hukeping@huawei.com>
|
||||
Huajin Tong <fliterdashen@gmail.com>
|
||||
Hui Kang <hkang.sunysb@gmail.com>
|
||||
Hui Kang <hkang.sunysb@gmail.com> <kangh@us.ibm.com>
|
||||
Huu Nguyen <huu@prismskylabs.com> <whoshuu@gmail.com>
|
||||
@@ -350,8 +336,6 @@ John Howard <github@lowenna.com> <john.howard@microsoft.com>
|
||||
John Howard <github@lowenna.com> <john@lowenna.com>
|
||||
John Stephens <johnstep@docker.com> <johnstep@users.noreply.github.com>
|
||||
Jon Surrell <jon.surrell@gmail.com> <jon.surrell@automattic.com>
|
||||
Jonathan A. Sternberg <jonathansternberg@gmail.com>
|
||||
Jonathan A. Sternberg <jonathansternberg@gmail.com> <jonathan.sternberg@docker.com>
|
||||
Jonathan Choy <jonathan.j.choy@gmail.com>
|
||||
Jonathan Choy <jonathan.j.choy@gmail.com> <oni@tetsujinlabs.com>
|
||||
Jordan Arentsen <blissdev@gmail.com>
|
||||
@@ -494,14 +478,14 @@ Mikael Davranche <mikael.davranche@corp.ovh.com> <mikael.davranche@corp.ovh.net>
|
||||
Mike Casas <mkcsas0@gmail.com> <mikecasas@users.noreply.github.com>
|
||||
Mike Goelzer <mike.goelzer@docker.com> <mgoelzer@docker.com>
|
||||
Milas Bowman <devnull@milas.dev>
|
||||
Milas Bowman <devnull@milas.dev> <milas.bowman@docker.com>
|
||||
Milas Bowman <devnull@milas.dev> <milasb@gmail.com>
|
||||
Milas Bowman <devnull@milas.dev> <milas.bowman@docker.com>
|
||||
Milind Chawre <milindchawre@gmail.com>
|
||||
Misty Stanley-Jones <misty@docker.com> <misty@apache.org>
|
||||
Mohammad Banikazemi <MBanikazemi@gmail.com>
|
||||
Mohammad Banikazemi <MBanikazemi@gmail.com> <mb@us.ibm.com>
|
||||
Mohd Sadiq <mohdsadiq058@gmail.com> <42430865+msadiq058@users.noreply.github.com>
|
||||
Mohd Sadiq <mohdsadiq058@gmail.com> <mohdsadiq058@gmail.com>
|
||||
Mohd Sadiq <mohdsadiq058@gmail.com> <42430865+msadiq058@users.noreply.github.com>
|
||||
Mohit Soni <mosoni@ebay.com> <mohitsoni1989@gmail.com>
|
||||
Moorthy RS <rsmoorthy@gmail.com> <rsmoorthy@users.noreply.github.com>
|
||||
Moysés Borges <moysesb@gmail.com>
|
||||
@@ -526,7 +510,6 @@ Olli Janatuinen <olli.janatuinen@gmail.com> <olljanat@users.noreply.github.com>
|
||||
Onur Filiz <onur.filiz@microsoft.com>
|
||||
Onur Filiz <onur.filiz@microsoft.com> <ofiliz@users.noreply.github.com>
|
||||
Ouyang Liduo <oyld0210@163.com>
|
||||
Patrick St. laurent <patrick@saint-laurent.us>
|
||||
Patrick Stapleton <github@gdi2290.com>
|
||||
Paul Liljenberg <liljenberg.paul@gmail.com> <letters@paulnotcom.se>
|
||||
Pavel Tikhomirov <ptikhomirov@virtuozzo.com> <ptikhomirov@parallels.com>
|
||||
@@ -550,8 +533,6 @@ Qin TianHuan <tianhuan@bingotree.cn>
|
||||
Ray Tsang <rayt@google.com> <saturnism@users.noreply.github.com>
|
||||
Renaud Gaubert <rgaubert@nvidia.com> <renaud.gaubert@gmail.com>
|
||||
Richard Scothern <richard.scothern@gmail.com>
|
||||
Rob Murray <rob.murray@docker.com>
|
||||
Rob Murray <rob.murray@docker.com> <148866618+robmry@users.noreply.github.com>
|
||||
Robert Terhaar <rterhaar@atlanticdynamic.com> <robbyt@users.noreply.github.com>
|
||||
Roberto G. Hashioka <roberto.hashioka@docker.com> <roberto_hashioka@hotmail.com>
|
||||
Roberto Muñoz Fernández <robertomf@gmail.com> <roberto.munoz.fernandez.contractor@bbva.com>
|
||||
@@ -562,7 +543,6 @@ Rongxiang Song <tinysong1226@gmail.com>
|
||||
Rony Weng <ronyweng@synology.com>
|
||||
Ross Boucher <rboucher@gmail.com>
|
||||
Rui Cao <ruicao@alauda.io>
|
||||
Rui JingAn <quiterace@gmail.com>
|
||||
Runshen Zhu <runshen.zhu@gmail.com>
|
||||
Ryan Stelly <ryan.stelly@live.com>
|
||||
Ryoga Saito <contact@proelbtn.com>
|
||||
@@ -583,7 +563,6 @@ Sebastiaan van Stijn <github@gone.nl> <sebastiaan@ws-key-sebas3.dpi1.dpi>
|
||||
Sebastiaan van Stijn <github@gone.nl> <thaJeztah@users.noreply.github.com>
|
||||
Sebastian Thomschke <sebthom@users.noreply.github.com>
|
||||
Seongyeol Lim <seongyeol37@gmail.com>
|
||||
Serhii Nakon <serhii.n@thescimus.com>
|
||||
Shaun Kaasten <shaunk@gmail.com>
|
||||
Shawn Landden <shawn@churchofgit.com> <shawnlandden@gmail.com>
|
||||
Shengbo Song <thomassong@tencent.com>
|
||||
|
||||
29
AUTHORS
29
AUTHORS
@@ -10,7 +10,6 @@ Aaron Huslage <huslage@gmail.com>
|
||||
Aaron L. Xu <liker.xu@foxmail.com>
|
||||
Aaron Lehmann <alehmann@netflix.com>
|
||||
Aaron Welch <welch@packet.net>
|
||||
Aaron Yoshitake <airandfingers@gmail.com>
|
||||
Abel Muiño <amuino@gmail.com>
|
||||
Abhijeet Kasurde <akasurde@redhat.com>
|
||||
Abhinandan Prativadi <aprativadi@gmail.com>
|
||||
@@ -63,7 +62,6 @@ alambike <alambike@gmail.com>
|
||||
Alan Hoyle <alan@alanhoyle.com>
|
||||
Alan Scherger <flyinprogrammer@gmail.com>
|
||||
Alan Thompson <cloojure@gmail.com>
|
||||
Alano Terblanche <alano.terblanche@docker.com>
|
||||
Albert Callarisa <shark234@gmail.com>
|
||||
Albert Zhang <zhgwenming@gmail.com>
|
||||
Albin Kerouanton <albinker@gmail.com>
|
||||
@@ -143,7 +141,6 @@ Andreas Tiefenthaler <at@an-ti.eu>
|
||||
Andrei Gherzan <andrei@resin.io>
|
||||
Andrei Ushakov <aushakov@netflix.com>
|
||||
Andrei Vagin <avagin@gmail.com>
|
||||
Andrew Baxter <423qpsxzhh8k3h@s.rendaw.me>
|
||||
Andrew C. Bodine <acbodine@us.ibm.com>
|
||||
Andrew Clay Shafer <andrewcshafer@gmail.com>
|
||||
Andrew Duckworth <grillopress@gmail.com>
|
||||
@@ -196,7 +193,6 @@ Anton Löfgren <anton.lofgren@gmail.com>
|
||||
Anton Nikitin <anton.k.nikitin@gmail.com>
|
||||
Anton Polonskiy <anton.polonskiy@gmail.com>
|
||||
Anton Tiurin <noxiouz@yandex.ru>
|
||||
Antonio Aguilar <antonio@zoftko.com>
|
||||
Antonio Murdaca <antonio.murdaca@gmail.com>
|
||||
Antonis Kalipetis <akalipetis@gmail.com>
|
||||
Antony Messerli <amesserl@rackspace.com>
|
||||
@@ -225,6 +221,7 @@ Avi Das <andas222@gmail.com>
|
||||
Avi Kivity <avi@scylladb.com>
|
||||
Avi Miller <avi.miller@oracle.com>
|
||||
Avi Vaid <avaid1996@gmail.com>
|
||||
ayoshitake <airandfingers@gmail.com>
|
||||
Azat Khuyiyakhmetov <shadow_uz@mail.ru>
|
||||
Bao Yonglei <baoyonglei@huawei.com>
|
||||
Bardia Keyoumarsi <bkeyouma@ucsc.edu>
|
||||
@@ -319,7 +316,6 @@ Burke Libbey <burke@libbey.me>
|
||||
Byung Kang <byung.kang.ctr@amrdec.army.mil>
|
||||
Caleb Spare <cespare@gmail.com>
|
||||
Calen Pennington <cale@edx.org>
|
||||
Calvin Liu <flycalvin@qq.com>
|
||||
Cameron Boehmer <cameron.boehmer@gmail.com>
|
||||
Cameron Sparr <gh@sparr.email>
|
||||
Cameron Spear <cameronspear@gmail.com>
|
||||
@@ -366,7 +362,6 @@ Chen Qiu <cheney-90@hotmail.com>
|
||||
Cheng-mean Liu <soccerl@microsoft.com>
|
||||
Chengfei Shang <cfshang@alauda.io>
|
||||
Chengguang Xu <cgxu519@gmx.com>
|
||||
Chentianze <cmoman@126.com>
|
||||
Chenyang Yan <memory.yancy@gmail.com>
|
||||
chenyuzhu <chenyuzhi@oschina.cn>
|
||||
Chetan Birajdar <birajdar.chetan@gmail.com>
|
||||
@@ -414,7 +409,6 @@ Christopher Crone <christopher.crone@docker.com>
|
||||
Christopher Currie <codemonkey+github@gmail.com>
|
||||
Christopher Jones <tophj@linux.vnet.ibm.com>
|
||||
Christopher Latham <sudosurootdev@gmail.com>
|
||||
Christopher Petito <chrisjpetito@gmail.com>
|
||||
Christopher Rigor <crigor@gmail.com>
|
||||
Christy Norman <christy@linux.vnet.ibm.com>
|
||||
Chun Chen <ramichen@tencent.com>
|
||||
@@ -675,7 +669,6 @@ Erik Hollensbe <github@hollensbe.org>
|
||||
Erik Inge Bolsø <knan@redpill-linpro.com>
|
||||
Erik Kristensen <erik@erikkristensen.com>
|
||||
Erik Sipsma <erik@sipsma.dev>
|
||||
Erik Sjölund <erik.sjolund@gmail.com>
|
||||
Erik St. Martin <alakriti@gmail.com>
|
||||
Erik Weathers <erikdw@gmail.com>
|
||||
Erno Hopearuoho <erno.hopearuoho@gmail.com>
|
||||
@@ -738,7 +731,6 @@ Feroz Salam <feroz.salam@sourcegraph.com>
|
||||
Ferran Rodenas <frodenas@gmail.com>
|
||||
Filipe Brandenburger <filbranden@google.com>
|
||||
Filipe Oliveira <contato@fmoliveira.com.br>
|
||||
Filipe Pina <hzlu1ot0@duck.com>
|
||||
Flavio Castelli <fcastelli@suse.com>
|
||||
Flavio Crisciani <flavio.crisciani@docker.com>
|
||||
Florian <FWirtz@users.noreply.github.com>
|
||||
@@ -783,7 +775,6 @@ Gabriel L. Somlo <gsomlo@gmail.com>
|
||||
Gabriel Linder <linder.gabriel@gmail.com>
|
||||
Gabriel Monroy <gabriel@opdemand.com>
|
||||
Gabriel Nicolas Avellaneda <avellaneda.gabriel@gmail.com>
|
||||
Gabriel Tomitsuka <gabriel@tomitsuka.com>
|
||||
Gaetan de Villele <gdevillele@gmail.com>
|
||||
Galen Sampson <galen.sampson@gmail.com>
|
||||
Gang Qiao <qiaohai8866@gmail.com>
|
||||
@@ -799,7 +790,6 @@ Geoff Levand <geoff@infradead.org>
|
||||
Geoffrey Bachelet <grosfrais@gmail.com>
|
||||
Geon Kim <geon0250@gmail.com>
|
||||
George Kontridze <george@bugsnag.com>
|
||||
George Ma <mayangang@outlook.com>
|
||||
George MacRorie <gmacr31@gmail.com>
|
||||
George Xie <georgexsh@gmail.com>
|
||||
Georgi Hristozov <georgi@forkbomb.nl>
|
||||
@@ -885,8 +875,6 @@ Hsing-Yu (David) Chen <davidhsingyuchen@gmail.com>
|
||||
hsinko <21551195@zju.edu.cn>
|
||||
Hu Keping <hukeping@huawei.com>
|
||||
Hu Tao <hutao@cn.fujitsu.com>
|
||||
Huajin Tong <fliterdashen@gmail.com>
|
||||
huang-jl <1046678590@qq.com>
|
||||
HuanHuan Ye <logindaveye@gmail.com>
|
||||
Huanzhong Zhang <zhanghuanzhong90@gmail.com>
|
||||
Huayi Zhang <irachex@gmail.com>
|
||||
@@ -921,7 +909,6 @@ Illo Abdulrahim <abdulrahim.illo@nokia.com>
|
||||
Ilya Dmitrichenko <errordeveloper@gmail.com>
|
||||
Ilya Gusev <mail@igusev.ru>
|
||||
Ilya Khlopotov <ilya.khlopotov@gmail.com>
|
||||
imalasong <2879499479@qq.com>
|
||||
imre Fitos <imre.fitos+github@gmail.com>
|
||||
inglesp <peter.inglesby@gmail.com>
|
||||
Ingo Gottwald <in.gottwald@gmail.com>
|
||||
@@ -939,7 +926,6 @@ J Bruni <joaohbruni@yahoo.com.br>
|
||||
J. Nunn <jbnunn@gmail.com>
|
||||
Jack Danger Canty <jackdanger@squareup.com>
|
||||
Jack Laxson <jackjrabbit@gmail.com>
|
||||
Jack Walker <90711509+j2walker@users.noreply.github.com>
|
||||
Jacob Atzen <jacob@jacobatzen.dk>
|
||||
Jacob Edelman <edelman.jd@gmail.com>
|
||||
Jacob Tomlinson <jacob@tom.linson.uk>
|
||||
@@ -983,7 +969,6 @@ Jannick Fahlbusch <git@jf-projects.de>
|
||||
Januar Wayong <januar@gmail.com>
|
||||
Jared Biel <jared.biel@bolderthinking.com>
|
||||
Jared Hocutt <jaredh@netapp.com>
|
||||
Jaroslav Jindrak <dzejrou@gmail.com>
|
||||
Jaroslaw Zabiello <hipertracker@gmail.com>
|
||||
Jasmine Hegman <jasmine@jhegman.com>
|
||||
Jason A. Donenfeld <Jason@zx2c4.com>
|
||||
@@ -999,7 +984,6 @@ Jason Shepherd <jason@jasonshepherd.net>
|
||||
Jason Smith <jasonrichardsmith@gmail.com>
|
||||
Jason Sommer <jsdirv@gmail.com>
|
||||
Jason Stangroome <jason@codeassassin.com>
|
||||
Jasper Siepkes <siepkes@serviceplanet.nl>
|
||||
Javier Bassi <javierbassi@gmail.com>
|
||||
jaxgeller <jacksongeller@gmail.com>
|
||||
Jay <teguhwpurwanto@gmail.com>
|
||||
@@ -1028,7 +1012,6 @@ Jeffrey Bolle <jeffreybolle@gmail.com>
|
||||
Jeffrey Morgan <jmorganca@gmail.com>
|
||||
Jeffrey van Gogh <jvg@google.com>
|
||||
Jenny Gebske <jennifer@gebske.de>
|
||||
Jeongseok Kang <piono623@naver.com>
|
||||
Jeremy Chambers <jeremy@thehipbot.com>
|
||||
Jeremy Grosser <jeremy@synack.me>
|
||||
Jeremy Huntwork <jhuntwork@lightcubesolutions.com>
|
||||
@@ -1046,7 +1029,6 @@ Jezeniel Zapanta <jpzapanta22@gmail.com>
|
||||
Jhon Honce <jhonce@redhat.com>
|
||||
Ji.Zhilong <zhilongji@gmail.com>
|
||||
Jian Liao <jliao@alauda.io>
|
||||
Jian Zeng <anonymousknight96@gmail.com>
|
||||
Jian Zhang <zhangjian.fnst@cn.fujitsu.com>
|
||||
Jiang Jinyang <jjyruby@gmail.com>
|
||||
Jianyong Wu <jianyong.wu@arm.com>
|
||||
@@ -1111,7 +1093,6 @@ Jon Johnson <jonjohnson@google.com>
|
||||
Jon Surrell <jon.surrell@gmail.com>
|
||||
Jon Wedaman <jweede@gmail.com>
|
||||
Jonas Dohse <jonas@dohse.ch>
|
||||
Jonas Geiler <git@jonasgeiler.com>
|
||||
Jonas Heinrich <Jonas@JonasHeinrich.com>
|
||||
Jonas Pfenniger <jonas@pfenniger.name>
|
||||
Jonathan A. Schweder <jonathanschweder@gmail.com>
|
||||
@@ -1279,7 +1260,6 @@ Lakshan Perera <lakshan@laktek.com>
|
||||
Lalatendu Mohanty <lmohanty@redhat.com>
|
||||
Lance Chen <cyen0312@gmail.com>
|
||||
Lance Kinley <lkinley@loyaltymethods.com>
|
||||
Lars Andringa <l.s.andringa@rug.nl>
|
||||
Lars Butler <Lars.Butler@gmail.com>
|
||||
Lars Kellogg-Stedman <lars@redhat.com>
|
||||
Lars R. Damerow <lars@pixar.com>
|
||||
@@ -1686,7 +1666,6 @@ Patrick Böänziger <patrick.baenziger@bsi-software.com>
|
||||
Patrick Devine <patrick.devine@docker.com>
|
||||
Patrick Haas <patrickhaas@google.com>
|
||||
Patrick Hemmer <patrick.hemmer@gmail.com>
|
||||
Patrick St. laurent <patrick@saint-laurent.us>
|
||||
Patrick Stapleton <github@gdi2290.com>
|
||||
Patrik Cyvoct <patrik@ptrk.io>
|
||||
pattichen <craftsbear@gmail.com>
|
||||
@@ -1892,7 +1871,6 @@ Royce Remer <royceremer@gmail.com>
|
||||
Rozhnov Alexandr <nox73@ya.ru>
|
||||
Rudolph Gottesheim <r.gottesheim@loot.at>
|
||||
Rui Cao <ruicao@alauda.io>
|
||||
Rui JingAn <quiterace@gmail.com>
|
||||
Rui Lopes <rgl@ruilopes.com>
|
||||
Ruilin Li <liruilin4@huawei.com>
|
||||
Runshen Zhu <runshen.zhu@gmail.com>
|
||||
@@ -1989,7 +1967,6 @@ Sergey Evstifeev <sergey.evstifeev@gmail.com>
|
||||
Sergii Kabashniuk <skabashnyuk@codenvy.com>
|
||||
Sergio Lopez <slp@redhat.com>
|
||||
Serhat Gülçiçek <serhat25@gmail.com>
|
||||
Serhii Nakon <serhii.n@thescimus.com>
|
||||
SeungUkLee <lsy931106@gmail.com>
|
||||
Sevki Hasirci <s@sevki.org>
|
||||
Shane Canon <scanon@lbl.gov>
|
||||
@@ -2199,7 +2176,6 @@ Tomek Mańko <tomek.manko@railgun-solutions.com>
|
||||
Tommaso Visconti <tommaso.visconti@gmail.com>
|
||||
Tomoya Tabuchi <t@tomoyat1.com>
|
||||
Tomáš Hrčka <thrcka@redhat.com>
|
||||
Tomáš Virtus <nechtom@gmail.com>
|
||||
tonic <tonicbupt@gmail.com>
|
||||
Tonny Xu <tonny.xu@gmail.com>
|
||||
Tony Abboud <tdabboud@hotmail.com>
|
||||
@@ -2244,7 +2220,6 @@ Victor I. Wood <viw@t2am.com>
|
||||
Victor Lyuboslavsky <victor@victoreda.com>
|
||||
Victor Marmol <vmarmol@google.com>
|
||||
Victor Palma <palma.victor@gmail.com>
|
||||
Victor Toni <victor.toni@gmail.com>
|
||||
Victor Vieux <victor.vieux@docker.com>
|
||||
Victoria Bialas <victoria.bialas@docker.com>
|
||||
Vijaya Kumar K <vijayak@caviumnetworks.com>
|
||||
@@ -2278,7 +2253,6 @@ VladimirAus <v_roudakov@yahoo.com>
|
||||
Vladislav Kolesnikov <vkolesnikov@beget.ru>
|
||||
Vlastimil Zeman <vlastimil.zeman@diffblue.com>
|
||||
Vojtech Vitek (V-Teq) <vvitek@redhat.com>
|
||||
voloder <110066198+voloder@users.noreply.github.com>
|
||||
Walter Leibbrandt <github@wrl.co.za>
|
||||
Walter Stanish <walter@pratyeka.org>
|
||||
Wang Chao <chao.wang@ucloud.cn>
|
||||
@@ -2296,7 +2270,6 @@ Wassim Dhif <wassimdhif@gmail.com>
|
||||
Wataru Ishida <ishida.wataru@lab.ntt.co.jp>
|
||||
Wayne Chang <wayne@neverfear.org>
|
||||
Wayne Song <wsong@docker.com>
|
||||
weebney <weebney@gmail.com>
|
||||
Weerasak Chongnguluam <singpor@gmail.com>
|
||||
Wei Fu <fuweid89@gmail.com>
|
||||
Wei Wu <wuwei4455@gmail.com>
|
||||
|
||||
@@ -101,7 +101,7 @@ the contributors guide.
|
||||
<td>
|
||||
<p>
|
||||
Register for the Docker Community Slack at
|
||||
<a href="https://dockr.ly/comm-slack" target="_blank">https://dockr.ly/comm-slack</a>.
|
||||
<a href="https://dockr.ly/slack" target="_blank">https://dockr.ly/slack</a>.
|
||||
We use the #moby-project channel for general discussion, and there are separate channels for other Moby projects such as #containerd.
|
||||
</p>
|
||||
</td>
|
||||
|
||||
36
Dockerfile
36
Dockerfile
@@ -1,6 +1,6 @@
|
||||
# syntax=docker/dockerfile:1.7
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
ARG GO_VERSION=1.21.11
|
||||
ARG GO_VERSION=1.21.10
|
||||
ARG BASE_DEBIAN_DISTRO="bookworm"
|
||||
ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}"
|
||||
ARG XX_VERSION=1.4.0
|
||||
@@ -8,12 +8,12 @@ ARG XX_VERSION=1.4.0
|
||||
ARG VPNKIT_VERSION=0.5.0
|
||||
|
||||
ARG DOCKERCLI_REPOSITORY="https://github.com/docker/cli.git"
|
||||
ARG DOCKERCLI_VERSION=v27.0.2
|
||||
ARG DOCKERCLI_VERSION=v26.0.0
|
||||
# cli version used for integration-cli tests
|
||||
ARG DOCKERCLI_INTEGRATION_REPOSITORY="https://github.com/docker/cli.git"
|
||||
ARG DOCKERCLI_INTEGRATION_VERSION=v17.06.2-ce
|
||||
ARG BUILDX_VERSION=0.15.1
|
||||
ARG COMPOSE_VERSION=v2.28.1
|
||||
ARG BUILDX_VERSION=0.13.1
|
||||
ARG COMPOSE_VERSION=v2.25.0
|
||||
|
||||
ARG SYSTEMD="false"
|
||||
ARG DOCKER_STATIC=1
|
||||
@@ -24,12 +24,6 @@ ARG DOCKER_STATIC=1
|
||||
# specified here should match a current release.
|
||||
ARG REGISTRY_VERSION=2.8.3
|
||||
|
||||
# delve is currently only supported on linux/amd64 and linux/arm64;
|
||||
# https://github.com/go-delve/delve/blob/v1.8.1/pkg/proc/native/support_sentinel.go#L1-L6
|
||||
ARG DELVE_SUPPORTED=${TARGETPLATFORM#linux/amd64} DELVE_SUPPORTED=${DELVE_SUPPORTED#linux/arm64}
|
||||
ARG DELVE_SUPPORTED=${DELVE_SUPPORTED:+"unsupported"}
|
||||
ARG DELVE_SUPPORTED=${DELVE_SUPPORTED:-"supported"}
|
||||
|
||||
# cross compilation helper
|
||||
FROM --platform=$BUILDPLATFORM tonistiigi/xx:${XX_VERSION} AS xx
|
||||
|
||||
@@ -150,7 +144,7 @@ RUN git init . && git remote add origin "https://github.com/go-delve/delve.git"
|
||||
ARG DELVE_VERSION=v1.21.1
|
||||
RUN git fetch -q --depth 1 origin "${DELVE_VERSION}" +refs/tags/*:refs/tags/* && git checkout -q FETCH_HEAD
|
||||
|
||||
FROM base AS delve-supported
|
||||
FROM base AS delve-build
|
||||
WORKDIR /usr/src/delve
|
||||
ARG TARGETPLATFORM
|
||||
RUN --mount=from=delve-src,src=/usr/src/delve,rw \
|
||||
@@ -161,8 +155,16 @@ RUN --mount=from=delve-src,src=/usr/src/delve,rw \
|
||||
xx-verify /build/dlv
|
||||
EOT
|
||||
|
||||
FROM binary-dummy AS delve-unsupported
|
||||
FROM delve-${DELVE_SUPPORTED} AS delve
|
||||
# delve is currently only supported on linux/amd64 and linux/arm64;
|
||||
# https://github.com/go-delve/delve/blob/v1.8.1/pkg/proc/native/support_sentinel.go#L1-L6
|
||||
FROM binary-dummy AS delve-windows
|
||||
FROM binary-dummy AS delve-linux-arm
|
||||
FROM binary-dummy AS delve-linux-ppc64le
|
||||
FROM binary-dummy AS delve-linux-s390x
|
||||
FROM delve-build AS delve-linux-amd64
|
||||
FROM delve-build AS delve-linux-arm64
|
||||
FROM delve-linux-${TARGETARCH} AS delve-linux
|
||||
FROM delve-${TARGETOS} AS delve
|
||||
|
||||
FROM base AS tomll
|
||||
# GOTOML_VERSION specifies the version of the tomll binary to build and install
|
||||
@@ -196,7 +198,7 @@ RUN git init . && git remote add origin "https://github.com/containerd/container
|
||||
# When updating the binary version you may also need to update the vendor
|
||||
# version to pick up bug fixes or new APIs, however, usually the Go packages
|
||||
# are built from a commit from the master branch.
|
||||
ARG CONTAINERD_VERSION=v1.7.18
|
||||
ARG CONTAINERD_VERSION=v1.7.15
|
||||
RUN git fetch -q --depth 1 origin "${CONTAINERD_VERSION}" +refs/tags/*:refs/tags/* && git checkout -q FETCH_HEAD
|
||||
|
||||
FROM base AS containerd-build
|
||||
@@ -229,7 +231,7 @@ FROM binary-dummy AS containerd-windows
|
||||
FROM containerd-${TARGETOS} AS containerd
|
||||
|
||||
FROM base AS golangci_lint
|
||||
ARG GOLANGCI_LINT_VERSION=v1.59.1
|
||||
ARG GOLANGCI_LINT_VERSION=v1.55.2
|
||||
RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||
--mount=type=cache,target=/go/pkg/mod \
|
||||
GOBIN=/build/ GO111MODULE=on go install "github.com/golangci/golangci-lint/cmd/golangci-lint@${GOLANGCI_LINT_VERSION}" \
|
||||
@@ -287,7 +289,7 @@ RUN git init . && git remote add origin "https://github.com/opencontainers/runc.
|
||||
# that is used. If you need to update runc, open a pull request in the containerd
|
||||
# project first, and update both after that is merged. When updating RUNC_VERSION,
|
||||
# consider updating runc in vendor.mod accordingly.
|
||||
ARG RUNC_VERSION=v1.1.13
|
||||
ARG RUNC_VERSION=v1.1.12
|
||||
RUN git fetch -q --depth 1 origin "${RUNC_VERSION}" +refs/tags/*:refs/tags/* && git checkout -q FETCH_HEAD
|
||||
|
||||
FROM base AS runc-build
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
|
||||
# This represents the bare minimum required to build and test Docker.
|
||||
|
||||
ARG GO_VERSION=1.21.11
|
||||
ARG GO_VERSION=1.21.10
|
||||
|
||||
ARG BASE_DEBIAN_DISTRO="bookworm"
|
||||
ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}"
|
||||
|
||||
@@ -161,10 +161,10 @@ FROM ${WINDOWS_BASE_IMAGE}:${WINDOWS_BASE_IMAGE_TAG}
|
||||
# Use PowerShell as the default shell
|
||||
SHELL ["powershell", "-Command", "$ErrorActionPreference = 'Stop'; $ProgressPreference = 'SilentlyContinue';"]
|
||||
|
||||
ARG GO_VERSION=1.21.11
|
||||
ARG GO_VERSION=1.21.10
|
||||
ARG GOTESTSUM_VERSION=v1.8.2
|
||||
ARG GOWINRES_VERSION=v0.3.1
|
||||
ARG CONTAINERD_VERSION=v1.7.18
|
||||
ARG CONTAINERD_VERSION=v1.7.15
|
||||
|
||||
# Environment variable notes:
|
||||
# - GO_VERSION must be consistent with 'Dockerfile' used by Linux.
|
||||
|
||||
@@ -38,7 +38,6 @@
|
||||
"laurazard",
|
||||
"mhbauer",
|
||||
"neersighted",
|
||||
"robmry",
|
||||
"rumpl",
|
||||
"runcom",
|
||||
"samuelkarp",
|
||||
@@ -76,6 +75,7 @@
|
||||
"olljanat",
|
||||
"programmerq",
|
||||
"ripcurld",
|
||||
"robmry",
|
||||
"sam-thibault",
|
||||
"samwhited",
|
||||
"thajeztah"
|
||||
|
||||
20
Makefile
20
Makefile
@@ -1,3 +1,5 @@
|
||||
.PHONY: all binary dynbinary build cross help install manpages run shell test test-docker-py test-integration test-unit validate validate-% win
|
||||
|
||||
DOCKER ?= docker
|
||||
BUILDX ?= $(DOCKER) buildx
|
||||
|
||||
@@ -155,19 +157,15 @@ BAKE_CMD := $(BUILDX) bake
|
||||
|
||||
default: binary
|
||||
|
||||
.PHONY: all
|
||||
all: build ## validate all checks, build linux binaries, run all tests,\ncross build non-linux binaries, and generate archives
|
||||
$(DOCKER_RUN_DOCKER) bash -c 'hack/validate/default && hack/make.sh'
|
||||
|
||||
.PHONY: binary
|
||||
binary: bundles ## build statically linked linux binaries
|
||||
$(BAKE_CMD) binary
|
||||
|
||||
.PHONY: dynbinary
|
||||
dynbinary: bundles ## build dynamically linked linux binaries
|
||||
$(BAKE_CMD) dynbinary
|
||||
|
||||
.PHONY: cross
|
||||
cross: bundles ## cross build the binaries
|
||||
$(BAKE_CMD) binary-cross
|
||||
|
||||
@@ -181,15 +179,12 @@ clean: clean-cache
|
||||
clean-cache: ## remove the docker volumes that are used for caching in the dev-container
|
||||
docker volume rm -f docker-dev-cache docker-mod-cache
|
||||
|
||||
.PHONY: help
|
||||
help: ## this help
|
||||
@awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z0-9_-]+:.*?## / {gsub("\\\\n",sprintf("\n%22c",""), $$2);printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST)
|
||||
|
||||
.PHONY: install
|
||||
install: ## install the linux binaries
|
||||
KEEPBUNDLE=1 hack/make.sh install-binary
|
||||
|
||||
.PHONY: run
|
||||
run: build ## run the docker daemon in a container
|
||||
$(DOCKER_RUN_DOCKER) sh -c "KEEPBUNDLE=1 hack/make.sh install-binary run"
|
||||
|
||||
@@ -202,22 +197,17 @@ endif
|
||||
build: bundles
|
||||
$(BUILD_CMD) $(BUILD_OPTS) $(shell_target) --load -t "$(DOCKER_IMAGE)" .
|
||||
|
||||
.PHONY: shell
|
||||
shell: build ## start a shell inside the build env
|
||||
$(DOCKER_RUN_DOCKER) bash
|
||||
|
||||
.PHONY: test
|
||||
test: build test-unit ## run the unit, integration and docker-py tests
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary test-integration test-docker-py
|
||||
|
||||
.PHONY: test-docker-py
|
||||
test-docker-py: build ## run the docker-py tests
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary test-docker-py
|
||||
|
||||
.PHONY: test-integration-cli
|
||||
test-integration-cli: test-integration ## (DEPRECATED) use test-integration
|
||||
|
||||
.PHONY: test-integration
|
||||
ifneq ($(and $(TEST_SKIP_INTEGRATION),$(TEST_SKIP_INTEGRATION_CLI)),)
|
||||
test-integration:
|
||||
@echo Both integrations suites skipped per environment variables
|
||||
@@ -226,29 +216,23 @@ test-integration: build ## run the integration tests
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary test-integration
|
||||
endif
|
||||
|
||||
.PHONY: test-integration-flaky
|
||||
test-integration-flaky: build ## run the stress test for all new integration tests
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary test-integration-flaky
|
||||
|
||||
.PHONY: test-unit
|
||||
test-unit: build ## run the unit tests
|
||||
$(DOCKER_RUN_DOCKER) hack/test/unit
|
||||
|
||||
.PHONY: validate
|
||||
validate: build ## validate DCO, Seccomp profile generation, gofmt,\n./pkg/ isolation, golint, tests, tomls, go vet and vendor
|
||||
$(DOCKER_RUN_DOCKER) hack/validate/all
|
||||
|
||||
.PHONY: validate-generate-files
|
||||
validate-generate-files:
|
||||
$(BUILD_CMD) --target "validate" \
|
||||
--output "type=cacheonly" \
|
||||
--file "./hack/dockerfiles/generate-files.Dockerfile" .
|
||||
|
||||
.PHONY: validate-%
|
||||
validate-%: build ## validate specific check
|
||||
$(DOCKER_RUN_DOCKER) hack/validate/$*
|
||||
|
||||
.PHONY: win
|
||||
win: bundles ## cross build the binary for windows
|
||||
$(BAKE_CMD) --set *.platform=windows/amd64 binary
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ package api // import "github.com/docker/docker/api"
|
||||
// Common constants for daemon and client.
|
||||
const (
|
||||
// DefaultVersion of the current REST API.
|
||||
DefaultVersion = "1.46"
|
||||
DefaultVersion = "1.45"
|
||||
|
||||
// MinSupportedAPIVersion is the minimum API version that can be supported
|
||||
// by the API server, specified as "major.minor". Note that the daemon
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
cerrdefs "github.com/containerd/errdefs"
|
||||
cerrdefs "github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/log"
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
"github.com/docker/docker/errdefs"
|
||||
@@ -24,37 +24,42 @@ func FromError(err error) int {
|
||||
return http.StatusInternalServerError
|
||||
}
|
||||
|
||||
var statusCode int
|
||||
|
||||
// Stop right there
|
||||
// Are you sure you should be adding a new error class here? Do one of the existing ones work?
|
||||
|
||||
// Note that the below functions are already checking the error causal chain for matches.
|
||||
switch {
|
||||
case errdefs.IsNotFound(err):
|
||||
return http.StatusNotFound
|
||||
statusCode = http.StatusNotFound
|
||||
case errdefs.IsInvalidParameter(err):
|
||||
return http.StatusBadRequest
|
||||
statusCode = http.StatusBadRequest
|
||||
case errdefs.IsConflict(err):
|
||||
return http.StatusConflict
|
||||
statusCode = http.StatusConflict
|
||||
case errdefs.IsUnauthorized(err):
|
||||
return http.StatusUnauthorized
|
||||
statusCode = http.StatusUnauthorized
|
||||
case errdefs.IsUnavailable(err):
|
||||
return http.StatusServiceUnavailable
|
||||
statusCode = http.StatusServiceUnavailable
|
||||
case errdefs.IsForbidden(err):
|
||||
return http.StatusForbidden
|
||||
statusCode = http.StatusForbidden
|
||||
case errdefs.IsNotModified(err):
|
||||
return http.StatusNotModified
|
||||
statusCode = http.StatusNotModified
|
||||
case errdefs.IsNotImplemented(err):
|
||||
return http.StatusNotImplemented
|
||||
statusCode = http.StatusNotImplemented
|
||||
case errdefs.IsSystem(err) || errdefs.IsUnknown(err) || errdefs.IsDataLoss(err) || errdefs.IsDeadline(err) || errdefs.IsCancelled(err):
|
||||
return http.StatusInternalServerError
|
||||
statusCode = http.StatusInternalServerError
|
||||
default:
|
||||
if statusCode := statusCodeFromGRPCError(err); statusCode != http.StatusInternalServerError {
|
||||
statusCode = statusCodeFromGRPCError(err)
|
||||
if statusCode != http.StatusInternalServerError {
|
||||
return statusCode
|
||||
}
|
||||
if statusCode := statusCodeFromContainerdError(err); statusCode != http.StatusInternalServerError {
|
||||
statusCode = statusCodeFromContainerdError(err)
|
||||
if statusCode != http.StatusInternalServerError {
|
||||
return statusCode
|
||||
}
|
||||
if statusCode := statusCodeFromDistributionError(err); statusCode != http.StatusInternalServerError {
|
||||
statusCode = statusCodeFromDistributionError(err)
|
||||
if statusCode != http.StatusInternalServerError {
|
||||
return statusCode
|
||||
}
|
||||
if e, ok := err.(causer); ok {
|
||||
@@ -66,9 +71,13 @@ func FromError(err error) int {
|
||||
"error": err,
|
||||
"error_type": fmt.Sprintf("%T", err),
|
||||
}).Debug("FIXME: Got an API for which error does not match any expected type!!!")
|
||||
|
||||
return http.StatusInternalServerError
|
||||
}
|
||||
|
||||
if statusCode == 0 {
|
||||
statusCode = http.StatusInternalServerError
|
||||
}
|
||||
|
||||
return statusCode
|
||||
}
|
||||
|
||||
// statusCodeFromGRPCError returns status code according to gRPC error
|
||||
|
||||
@@ -1,17 +1,12 @@
|
||||
package httputils // import "github.com/docker/docker/api/server/httputils"
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/distribution/reference"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// BoolValue transforms a form value in different formats into a boolean type.
|
||||
@@ -114,24 +109,3 @@ func ArchiveFormValues(r *http.Request, vars map[string]string) (ArchiveOptions,
|
||||
}
|
||||
return ArchiveOptions{name, path}, nil
|
||||
}
|
||||
|
||||
// DecodePlatform decodes the OCI platform JSON string into a Platform struct.
|
||||
func DecodePlatform(platformJSON string) (*ocispec.Platform, error) {
|
||||
var p ocispec.Platform
|
||||
|
||||
if err := json.Unmarshal([]byte(platformJSON), &p); err != nil {
|
||||
return nil, errdefs.InvalidParameter(errors.Wrap(err, "failed to parse platform"))
|
||||
}
|
||||
|
||||
hasAnyOptional := (p.Variant != "" || p.OSVersion != "" || len(p.OSFeatures) > 0)
|
||||
|
||||
if p.OS == "" && p.Architecture == "" && hasAnyOptional {
|
||||
return nil, errdefs.InvalidParameter(errors.New("optional platform fields provided, but OS and Architecture are missing"))
|
||||
}
|
||||
|
||||
if p.OS == "" || p.Architecture == "" {
|
||||
return nil, errdefs.InvalidParameter(errors.New("both OS and Architecture must be provided"))
|
||||
}
|
||||
|
||||
return &p, nil
|
||||
}
|
||||
|
||||
@@ -1,16 +1,9 @@
|
||||
package httputils // import "github.com/docker/docker/api/server/httputils"
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"testing"
|
||||
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/docker/docker/errdefs"
|
||||
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"gotest.tools/v3/assert"
|
||||
)
|
||||
|
||||
func TestBoolValue(t *testing.T) {
|
||||
@@ -110,23 +103,3 @@ func TestInt64ValueOrDefaultWithError(t *testing.T) {
|
||||
t.Fatal("Expected an error.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePlatformInvalid(t *testing.T) {
|
||||
for _, tc := range []ocispec.Platform{
|
||||
{
|
||||
OSVersion: "1.2.3",
|
||||
OSFeatures: []string{"a", "b"},
|
||||
},
|
||||
{OSVersion: "12.0"},
|
||||
{OS: "linux"},
|
||||
{Architecture: "amd64"},
|
||||
} {
|
||||
t.Run(platforms.Format(tc), func(t *testing.T) {
|
||||
js, err := json.Marshal(tc)
|
||||
assert.NilError(t, err)
|
||||
|
||||
_, err = DecodePlatform(string(js))
|
||||
assert.Check(t, errdefs.IsInvalidParameter(err))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"sort"
|
||||
|
||||
@@ -17,11 +16,7 @@ import (
|
||||
|
||||
// WriteLogStream writes an encoded byte stream of log messages from the
|
||||
// messages channel, multiplexing them with a stdcopy.Writer if mux is true
|
||||
func WriteLogStream(_ context.Context, w http.ResponseWriter, msgs <-chan *backend.LogMessage, config *container.LogsOptions, mux bool) {
|
||||
// See https://github.com/moby/moby/issues/47448
|
||||
// Trigger headers to be written immediately.
|
||||
w.WriteHeader(http.StatusOK)
|
||||
|
||||
func WriteLogStream(_ context.Context, w io.Writer, msgs <-chan *backend.LogMessage, config *container.LogsOptions, mux bool) {
|
||||
wf := ioutils.NewWriteFlusher(w)
|
||||
defer wf.Close()
|
||||
|
||||
|
||||
@@ -10,15 +10,11 @@ import (
|
||||
|
||||
// CORSMiddleware injects CORS headers to each request
|
||||
// when it's configured.
|
||||
//
|
||||
// Deprecated: CORS headers should not be set on the API. This feature will be removed in the next release.
|
||||
type CORSMiddleware struct {
|
||||
defaultHeaders string
|
||||
}
|
||||
|
||||
// NewCORSMiddleware creates a new CORSMiddleware with default headers.
|
||||
//
|
||||
// Deprecated: CORS headers should not be set on the API. This feature will be removed in the next release.
|
||||
func NewCORSMiddleware(d string) CORSMiddleware {
|
||||
return CORSMiddleware{defaultHeaders: d}
|
||||
}
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/docker/docker/pkg/progress"
|
||||
"github.com/docker/docker/pkg/streamformatter"
|
||||
units "github.com/docker/go-units"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@@ -104,7 +105,7 @@ func newImageBuildOptions(ctx context.Context, r *http.Request) (*types.ImageBui
|
||||
}
|
||||
|
||||
if ulimitsJSON := r.FormValue("ulimits"); ulimitsJSON != "" {
|
||||
buildUlimits := []*container.Ulimit{}
|
||||
buildUlimits := []*units.Ulimit{}
|
||||
if err := json.Unmarshal([]byte(ulimitsJSON), &buildUlimits); err != nil {
|
||||
return nil, invalidParam{errors.Wrap(err, "error reading ulimit settings")}
|
||||
}
|
||||
|
||||
@@ -14,19 +14,19 @@ import (
|
||||
|
||||
// execBackend includes functions to implement to provide exec functionality.
|
||||
type execBackend interface {
|
||||
ContainerExecCreate(name string, options *container.ExecOptions) (string, error)
|
||||
ContainerExecCreate(name string, config *types.ExecConfig) (string, error)
|
||||
ContainerExecInspect(id string) (*backend.ExecInspect, error)
|
||||
ContainerExecResize(name string, height, width int) error
|
||||
ContainerExecStart(ctx context.Context, name string, options backend.ExecStartConfig) error
|
||||
ContainerExecStart(ctx context.Context, name string, options container.ExecStartOptions) error
|
||||
ExecExists(name string) (bool, error)
|
||||
}
|
||||
|
||||
// copyBackend includes functions to implement to provide container copy functionality.
|
||||
type copyBackend interface {
|
||||
ContainerArchivePath(name string, path string) (content io.ReadCloser, stat *container.PathStat, err error)
|
||||
ContainerArchivePath(name string, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error)
|
||||
ContainerExport(ctx context.Context, name string, out io.Writer) error
|
||||
ContainerExtractToDir(name, path string, copyUIDGID, noOverwriteDirNonDir bool, content io.Reader) error
|
||||
ContainerStatPath(name string, path string) (stat *container.PathStat, err error)
|
||||
ContainerStatPath(name string, path string) (stat *types.ContainerPathStat, err error)
|
||||
}
|
||||
|
||||
// stateBackend includes functions to implement to provide container state lifecycle functionality.
|
||||
@@ -62,7 +62,7 @@ type attachBackend interface {
|
||||
|
||||
// systemBackend includes functions to implement to provide system wide containers functionality
|
||||
type systemBackend interface {
|
||||
ContainersPrune(ctx context.Context, pruneFilters filters.Args) (*container.PruneReport, error)
|
||||
ContainersPrune(ctx context.Context, pruneFilters filters.Args) (*types.ContainersPruneReport, error)
|
||||
}
|
||||
|
||||
type commitBackend interface {
|
||||
|
||||
@@ -22,14 +22,11 @@ import (
|
||||
"github.com/docker/docker/api/types/network"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
containerpkg "github.com/docker/docker/container"
|
||||
networkSettings "github.com/docker/docker/daemon/network"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/docker/libnetwork/netlabel"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/docker/docker/runconfig"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"go.opentelemetry.io/otel"
|
||||
"golang.org/x/net/websocket"
|
||||
)
|
||||
|
||||
@@ -42,13 +39,6 @@ func (s *containerRouter) postCommit(ctx context.Context, w http.ResponseWriter,
|
||||
return err
|
||||
}
|
||||
|
||||
// FIXME(thaJeztah): change this to unmarshal just [container.Config]:
|
||||
// The commit endpoint accepts a [container.Config], but the decoder uses a
|
||||
// [container.CreateRequest], which is a superset, and also contains
|
||||
// [container.HostConfig] and [network.NetworkConfig]. Those structs
|
||||
// are discarded here, but decoder.DecodeConfig also performs validation,
|
||||
// so a request containing those additional fields would result in a
|
||||
// validation error.
|
||||
config, _, _, err := s.decoder.DecodeConfig(r.Body)
|
||||
if err != nil && !errors.Is(err, io.EOF) { // Do not fail if body is empty.
|
||||
return err
|
||||
@@ -104,15 +94,6 @@ func (s *containerRouter) getContainersJSON(ctx context.Context, w http.Response
|
||||
return err
|
||||
}
|
||||
|
||||
version := httputils.VersionFromContext(ctx)
|
||||
|
||||
if versions.LessThan(version, "1.46") {
|
||||
for _, c := range containers {
|
||||
// Ignore HostConfig.Annotations because it was added in API v1.46.
|
||||
c.HostConfig.Annotations = nil
|
||||
}
|
||||
}
|
||||
|
||||
return httputils.WriteJSON(w, http.StatusOK, containers)
|
||||
}
|
||||
|
||||
@@ -131,18 +112,9 @@ func (s *containerRouter) getContainersStats(ctx context.Context, w http.Respons
|
||||
}
|
||||
|
||||
return s.backend.ContainerStats(ctx, vars["name"], &backend.ContainerStatsConfig{
|
||||
Stream: stream,
|
||||
OneShot: oneShot,
|
||||
OutStream: func() io.Writer {
|
||||
// Assume that when this is called the request is OK.
|
||||
w.WriteHeader(http.StatusOK)
|
||||
if !stream {
|
||||
return w
|
||||
}
|
||||
wf := ioutils.NewWriteFlusher(w)
|
||||
wf.Flush()
|
||||
return wf
|
||||
},
|
||||
Stream: stream,
|
||||
OneShot: oneShot,
|
||||
OutStream: w,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -197,9 +169,6 @@ func (s *containerRouter) getContainersExport(ctx context.Context, w http.Respon
|
||||
}
|
||||
|
||||
func (s *containerRouter) postContainersStart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
ctx, span := otel.Tracer("").Start(ctx, "containerRouter.postContainersStart")
|
||||
defer span.End()
|
||||
|
||||
// If contentLength is -1, we can assumed chunked encoding
|
||||
// or more technically that the length is unknown
|
||||
// https://golang.org/src/pkg/net/http/request.go#L139
|
||||
@@ -487,27 +456,15 @@ func (s *containerRouter) postContainersCreate(ctx context.Context, w http.Respo
|
||||
if hostConfig == nil {
|
||||
hostConfig = &container.HostConfig{}
|
||||
}
|
||||
if hostConfig.NetworkMode == "" {
|
||||
hostConfig.NetworkMode = "default"
|
||||
}
|
||||
if networkingConfig == nil {
|
||||
networkingConfig = &network.NetworkingConfig{}
|
||||
}
|
||||
if networkingConfig.EndpointsConfig == nil {
|
||||
networkingConfig.EndpointsConfig = make(map[string]*network.EndpointSettings)
|
||||
}
|
||||
// The NetworkMode "default" is used as a way to express a container should
|
||||
// be attached to the OS-dependant default network, in an OS-independent
|
||||
// way. Doing this conversion as soon as possible ensures we have less
|
||||
// NetworkMode to handle down the path (including in the
|
||||
// backward-compatibility layer we have just below).
|
||||
//
|
||||
// Note that this is not the only place where this conversion has to be
|
||||
// done (as there are various other places where containers get created).
|
||||
if hostConfig.NetworkMode == "" || hostConfig.NetworkMode.IsDefault() {
|
||||
hostConfig.NetworkMode = networkSettings.DefaultNetwork
|
||||
if nw, ok := networkingConfig.EndpointsConfig[network.NetworkDefault]; ok {
|
||||
networkingConfig.EndpointsConfig[hostConfig.NetworkMode.NetworkName()] = nw
|
||||
delete(networkingConfig.EndpointsConfig, network.NetworkDefault)
|
||||
}
|
||||
}
|
||||
|
||||
version := httputils.VersionFromContext(ctx)
|
||||
|
||||
@@ -650,12 +607,6 @@ func (s *containerRouter) postContainersCreate(ctx context.Context, w http.Respo
|
||||
warnings = append(warnings, warn)
|
||||
}
|
||||
|
||||
if warn, err := handleSysctlBC(hostConfig, networkingConfig, version); err != nil {
|
||||
return err
|
||||
} else if warn != "" {
|
||||
warnings = append(warnings, warn)
|
||||
}
|
||||
|
||||
if hostConfig.PidsLimit != nil && *hostConfig.PidsLimit <= 0 {
|
||||
// Don't set a limit if either no limit was specified, or "unlimited" was
|
||||
// explicitly set.
|
||||
@@ -695,15 +646,27 @@ func handleMACAddressBC(config *container.Config, hostConfig *container.HostConf
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
if !hostConfig.NetworkMode.IsBridge() && !hostConfig.NetworkMode.IsUserDefined() {
|
||||
if !hostConfig.NetworkMode.IsDefault() && !hostConfig.NetworkMode.IsBridge() && !hostConfig.NetworkMode.IsUserDefined() {
|
||||
return "", runconfig.ErrConflictContainerNetworkAndMac
|
||||
}
|
||||
|
||||
epConfig, err := epConfigForNetMode(version, hostConfig.NetworkMode, networkingConfig)
|
||||
if err != nil {
|
||||
return "", err
|
||||
// There cannot be more than one entry in EndpointsConfig with API < 1.44.
|
||||
|
||||
// If there's no EndpointsConfig, create a place to store the configured address. It is
|
||||
// safe to use NetworkMode as the network name, whether it's a name or id/short-id, as
|
||||
// it will be normalised later and there is no other EndpointSettings object that might
|
||||
// refer to this network/endpoint.
|
||||
if len(networkingConfig.EndpointsConfig) == 0 {
|
||||
nwName := hostConfig.NetworkMode.NetworkName()
|
||||
networkingConfig.EndpointsConfig[nwName] = &network.EndpointSettings{}
|
||||
}
|
||||
// There's exactly one network in EndpointsConfig, either from the API or just-created.
|
||||
// Migrate the container-wide setting to it.
|
||||
// No need to check for a match between NetworkMode and the names/ids in EndpointsConfig,
|
||||
// the old version of the API would have applied the address to this network anyway.
|
||||
for _, ep := range networkingConfig.EndpointsConfig {
|
||||
ep.MacAddress = deprecatedMacAddress
|
||||
}
|
||||
epConfig.MacAddress = deprecatedMacAddress
|
||||
return "", nil
|
||||
}
|
||||
|
||||
@@ -712,17 +675,32 @@ func handleMACAddressBC(config *container.Config, hostConfig *container.HostConf
|
||||
return "", nil
|
||||
}
|
||||
var warning string
|
||||
if hostConfig.NetworkMode.IsBridge() || hostConfig.NetworkMode.IsUserDefined() {
|
||||
ep, err := epConfigForNetMode(version, hostConfig.NetworkMode, networkingConfig)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "unable to migrate container-wide MAC address to a specific network")
|
||||
}
|
||||
// ep is the endpoint that needs the container-wide MAC address; migrate the address
|
||||
// to it, or bail out if there's a mismatch.
|
||||
if ep.MacAddress == "" {
|
||||
ep.MacAddress = deprecatedMacAddress
|
||||
} else if ep.MacAddress != deprecatedMacAddress {
|
||||
return "", errdefs.InvalidParameter(errors.New("the container-wide MAC address must match the endpoint-specific MAC address for the main network, or be left empty"))
|
||||
if hostConfig.NetworkMode.IsDefault() || hostConfig.NetworkMode.IsBridge() || hostConfig.NetworkMode.IsUserDefined() {
|
||||
nwName := hostConfig.NetworkMode.NetworkName()
|
||||
// If there's no endpoint config, create a place to store the configured address.
|
||||
if len(networkingConfig.EndpointsConfig) == 0 {
|
||||
networkingConfig.EndpointsConfig[nwName] = &network.EndpointSettings{
|
||||
MacAddress: deprecatedMacAddress,
|
||||
}
|
||||
} else {
|
||||
// There is existing endpoint config - if it's not indexed by NetworkMode.Name(), we
|
||||
// can't tell which network the container-wide settings was intended for. NetworkMode,
|
||||
// the keys in EndpointsConfig and the NetworkID in EndpointsConfig may mix network
|
||||
// name/id/short-id. It's not safe to create EndpointsConfig under the NetworkMode
|
||||
// name to store the container-wide MAC address, because that may result in two sets
|
||||
// of EndpointsConfig for the same network and one set will be discarded later. So,
|
||||
// reject the request ...
|
||||
ep, ok := networkingConfig.EndpointsConfig[nwName]
|
||||
if !ok {
|
||||
return "", errdefs.InvalidParameter(errors.New("if a container-wide MAC address is supplied, HostConfig.NetworkMode must match the identity of a network in NetworkSettings.Networks"))
|
||||
}
|
||||
// ep is the endpoint that needs the container-wide MAC address; migrate the address
|
||||
// to it, or bail out if there's a mismatch.
|
||||
if ep.MacAddress == "" {
|
||||
ep.MacAddress = deprecatedMacAddress
|
||||
} else if ep.MacAddress != deprecatedMacAddress {
|
||||
return "", errdefs.InvalidParameter(errors.New("the container-wide MAC address must match the endpoint-specific MAC address for the main network, or be left empty"))
|
||||
}
|
||||
}
|
||||
}
|
||||
warning = "The container-wide MacAddress field is now deprecated. It should be specified in EndpointsConfig instead."
|
||||
@@ -731,146 +709,6 @@ func handleMACAddressBC(config *container.Config, hostConfig *container.HostConf
|
||||
return warning, nil
|
||||
}
|
||||
|
||||
// handleSysctlBC migrates top level network endpoint-specific '--sysctl'
|
||||
// settings to an DriverOpts for an endpoint. This is necessary because sysctls
|
||||
// are applied during container task creation, but sysctls that name an interface
|
||||
// (for example 'net.ipv6.conf.eth0.forwarding') cannot be applied until the
|
||||
// interface has been created. So, these settings are removed from hostConfig.Sysctls
|
||||
// and added to DriverOpts[netlabel.EndpointSysctls].
|
||||
//
|
||||
// Because interface names ('ethN') are allocated sequentially, and the order of
|
||||
// network connections is not deterministic on container restart, only 'eth0'
|
||||
// would work reliably in a top-level '--sysctl' option, and then only when
|
||||
// there's a single initial network connection. So, settings for 'eth0' are
|
||||
// migrated to the primary interface, identified by 'hostConfig.NetworkMode'.
|
||||
// Settings for other interfaces are treated as errors.
|
||||
//
|
||||
// In the DriverOpts, because the interface name cannot be determined in advance, the
|
||||
// interface name is replaced by "IFNAME". For example, 'net.ipv6.conf.eth0.forwarding'
|
||||
// becomes 'net.ipv6.conf.IFNAME.forwarding'. The value in DriverOpts is a
|
||||
// comma-separated list.
|
||||
//
|
||||
// A warning is generated when settings are migrated.
|
||||
func handleSysctlBC(
|
||||
hostConfig *container.HostConfig,
|
||||
netConfig *network.NetworkingConfig,
|
||||
version string,
|
||||
) (string, error) {
|
||||
if !hostConfig.NetworkMode.IsPrivate() {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
var ep *network.EndpointSettings
|
||||
var toDelete []string
|
||||
var netIfSysctls []string
|
||||
for k, v := range hostConfig.Sysctls {
|
||||
// If the sysctl name matches "net.*.*.eth0.*" ...
|
||||
if spl := strings.SplitN(k, ".", 5); len(spl) == 5 && spl[0] == "net" && strings.HasPrefix(spl[3], "eth") {
|
||||
netIfSysctl := fmt.Sprintf("net.%s.%s.IFNAME.%s=%s", spl[1], spl[2], spl[4], v)
|
||||
// Find the EndpointConfig to migrate settings to, if not already found.
|
||||
if ep == nil {
|
||||
// Per-endpoint sysctls were introduced in API version 1.46. Migration is
|
||||
// needed, but refuse to do it automatically for newer versions of the API.
|
||||
if versions.GreaterThan(version, "1.46") {
|
||||
return "", fmt.Errorf("interface specific sysctl setting %q must be supplied using driver option '%s'",
|
||||
k, netlabel.EndpointSysctls)
|
||||
}
|
||||
var err error
|
||||
ep, err = epConfigForNetMode(version, hostConfig.NetworkMode, netConfig)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("unable to find a network for sysctl %s: %w", k, err)
|
||||
}
|
||||
}
|
||||
// Only try to migrate settings for "eth0", anything else would always
|
||||
// have behaved unpredictably.
|
||||
if spl[3] != "eth0" {
|
||||
return "", fmt.Errorf(`unable to determine network endpoint for sysctl %s, use driver option '%s' to set per-interface sysctls`,
|
||||
k, netlabel.EndpointSysctls)
|
||||
}
|
||||
// Prepare the migration.
|
||||
toDelete = append(toDelete, k)
|
||||
netIfSysctls = append(netIfSysctls, netIfSysctl)
|
||||
}
|
||||
}
|
||||
if ep == nil {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
newDriverOpt := strings.Join(netIfSysctls, ",")
|
||||
warning := fmt.Sprintf(`Migrated sysctl %q to DriverOpts{%q:%q}.`,
|
||||
strings.Join(toDelete, ","),
|
||||
netlabel.EndpointSysctls, newDriverOpt)
|
||||
|
||||
// Append existing per-endpoint sysctls to the migrated sysctls (give priority
|
||||
// to per-endpoint settings).
|
||||
if ep.DriverOpts == nil {
|
||||
ep.DriverOpts = map[string]string{}
|
||||
}
|
||||
if oldDriverOpt, ok := ep.DriverOpts[netlabel.EndpointSysctls]; ok {
|
||||
newDriverOpt += "," + oldDriverOpt
|
||||
}
|
||||
ep.DriverOpts[netlabel.EndpointSysctls] = newDriverOpt
|
||||
|
||||
// Delete migrated settings from the top-level sysctls.
|
||||
for _, k := range toDelete {
|
||||
delete(hostConfig.Sysctls, k)
|
||||
}
|
||||
|
||||
return warning, nil
|
||||
}
|
||||
|
||||
// epConfigForNetMode finds, or creates, an entry in netConfig.EndpointsConfig
|
||||
// corresponding to nwMode.
|
||||
//
|
||||
// nwMode.NetworkName() may be the network's name, its id, or its short-id.
|
||||
//
|
||||
// The corresponding endpoint in netConfig.EndpointsConfig may be keyed on a
|
||||
// different one of name/id/short-id. If there's any ambiguity (there are
|
||||
// endpoints but the names don't match), return an error and do not create a new
|
||||
// endpoint, because it might be a duplicate.
|
||||
func epConfigForNetMode(
|
||||
version string,
|
||||
nwMode container.NetworkMode,
|
||||
netConfig *network.NetworkingConfig,
|
||||
) (*network.EndpointSettings, error) {
|
||||
nwName := nwMode.NetworkName()
|
||||
|
||||
// It's always safe to create an EndpointsConfig entry under nwName if there are
|
||||
// no entries already (because there can't be an entry for this network nwName
|
||||
// refers to under any other name/short-id/id).
|
||||
if len(netConfig.EndpointsConfig) == 0 {
|
||||
es := &network.EndpointSettings{}
|
||||
netConfig.EndpointsConfig = map[string]*network.EndpointSettings{
|
||||
nwName: es,
|
||||
}
|
||||
return es, nil
|
||||
}
|
||||
|
||||
// There cannot be more than one entry in EndpointsConfig with API < 1.44.
|
||||
if versions.LessThan(version, "1.44") {
|
||||
// No need to check for a match between NetworkMode and the names/ids in EndpointsConfig,
|
||||
// the old version of the API would pick this network anyway.
|
||||
for _, ep := range netConfig.EndpointsConfig {
|
||||
return ep, nil
|
||||
}
|
||||
}
|
||||
|
||||
// There is existing endpoint config - if it's not indexed by NetworkMode.Name(), we
|
||||
// can't tell which network the container-wide settings are intended for. NetworkMode,
|
||||
// the keys in EndpointsConfig and the NetworkID in EndpointsConfig may mix network
|
||||
// name/id/short-id. It's not safe to create EndpointsConfig under the NetworkMode
|
||||
// name to store the container-wide setting, because that may result in two sets
|
||||
// of EndpointsConfig for the same network and one set will be discarded later. So,
|
||||
// reject the request ...
|
||||
ep, ok := netConfig.EndpointsConfig[nwName]
|
||||
if !ok {
|
||||
return nil, errdefs.InvalidParameter(
|
||||
errors.New("HostConfig.NetworkMode must match the identity of a network in NetworkSettings.Networks"))
|
||||
}
|
||||
|
||||
return ep, nil
|
||||
}
|
||||
|
||||
func (s *containerRouter) deleteContainers(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
if err := httputils.ParseForm(r); err != nil {
|
||||
return err
|
||||
@@ -925,7 +763,7 @@ func (s *containerRouter) postContainersAttach(ctx context.Context, w http.Respo
|
||||
}
|
||||
|
||||
contentType := types.MediaTypeRawStream
|
||||
setupStreams := func(multiplexed bool, cancel func()) (io.ReadCloser, io.Writer, io.Writer, error) {
|
||||
setupStreams := func(multiplexed bool) (io.ReadCloser, io.Writer, io.Writer, error) {
|
||||
conn, _, err := hijacker.Hijack()
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
@@ -943,8 +781,6 @@ func (s *containerRouter) postContainersAttach(ctx context.Context, w http.Respo
|
||||
fmt.Fprintf(conn, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n")
|
||||
}
|
||||
|
||||
go notifyClosed(ctx, conn, cancel)
|
||||
|
||||
closer := func() error {
|
||||
httputils.CloseStreams(conn)
|
||||
return nil
|
||||
@@ -993,7 +829,7 @@ func (s *containerRouter) wsContainersAttach(ctx context.Context, w http.Respons
|
||||
|
||||
version := httputils.VersionFromContext(ctx)
|
||||
|
||||
setupStreams := func(multiplexed bool, cancel func()) (io.ReadCloser, io.Writer, io.Writer, error) {
|
||||
setupStreams := func(multiplexed bool) (io.ReadCloser, io.Writer, io.Writer, error) {
|
||||
wsChan := make(chan *websocket.Conn)
|
||||
h := func(conn *websocket.Conn) {
|
||||
wsChan <- conn
|
||||
@@ -1012,8 +848,6 @@ func (s *containerRouter) wsContainersAttach(ctx context.Context, w http.Respons
|
||||
if versions.GreaterThanOrEqualTo(version, "1.28") {
|
||||
conn.PayloadType = websocket.BinaryFrame
|
||||
}
|
||||
|
||||
// TODO: Close notifications
|
||||
return conn, conn, conn, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -1,12 +1,10 @@
|
||||
package container
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/network"
|
||||
"github.com/docker/docker/libnetwork/netlabel"
|
||||
"gotest.tools/v3/assert"
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
)
|
||||
@@ -104,7 +102,7 @@ func TestHandleMACAddressBC(t *testing.T) {
|
||||
ctrWideMAC: "11:22:33:44:55:66",
|
||||
networkMode: "aNetId",
|
||||
epConfig: map[string]*network.EndpointSettings{"aNetName": {}},
|
||||
expError: "unable to migrate container-wide MAC address to a specific network: HostConfig.NetworkMode must match the identity of a network in NetworkSettings.Networks",
|
||||
expError: "if a container-wide MAC address is supplied, HostConfig.NetworkMode must match the identity of a network in NetworkSettings.Networks",
|
||||
expCtrWideMAC: "11:22:33:44:55:66",
|
||||
},
|
||||
{
|
||||
@@ -128,8 +126,8 @@ func TestHandleMACAddressBC(t *testing.T) {
|
||||
}
|
||||
epConfig := make(map[string]*network.EndpointSettings, len(tc.epConfig))
|
||||
for k, v := range tc.epConfig {
|
||||
v := *v
|
||||
epConfig[k] = &v
|
||||
v := v
|
||||
epConfig[k] = v
|
||||
}
|
||||
netCfg := &network.NetworkingConfig{
|
||||
EndpointsConfig: epConfig,
|
||||
@@ -160,191 +158,3 @@ func TestHandleMACAddressBC(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestEpConfigForNetMode(t *testing.T) {
|
||||
testcases := []struct {
|
||||
name string
|
||||
apiVersion string
|
||||
networkMode string
|
||||
epConfig map[string]*network.EndpointSettings
|
||||
expEpId string
|
||||
expNumEps int
|
||||
expError bool
|
||||
}{
|
||||
{
|
||||
name: "old api no eps",
|
||||
apiVersion: "1.43",
|
||||
networkMode: "mynet",
|
||||
expNumEps: 1,
|
||||
},
|
||||
{
|
||||
name: "new api no eps",
|
||||
apiVersion: "1.44",
|
||||
networkMode: "mynet",
|
||||
expNumEps: 1,
|
||||
},
|
||||
{
|
||||
name: "old api with ep",
|
||||
apiVersion: "1.43",
|
||||
networkMode: "mynet",
|
||||
epConfig: map[string]*network.EndpointSettings{
|
||||
"anything": {EndpointID: "epone"},
|
||||
},
|
||||
expEpId: "epone",
|
||||
expNumEps: 1,
|
||||
},
|
||||
{
|
||||
name: "new api with matching ep",
|
||||
apiVersion: "1.44",
|
||||
networkMode: "mynet",
|
||||
epConfig: map[string]*network.EndpointSettings{
|
||||
"mynet": {EndpointID: "epone"},
|
||||
},
|
||||
expEpId: "epone",
|
||||
expNumEps: 1,
|
||||
},
|
||||
{
|
||||
name: "new api with mismatched ep",
|
||||
apiVersion: "1.44",
|
||||
networkMode: "mynet",
|
||||
epConfig: map[string]*network.EndpointSettings{
|
||||
"shortid": {EndpointID: "epone"},
|
||||
},
|
||||
expError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testcases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
netConfig := &network.NetworkingConfig{
|
||||
EndpointsConfig: tc.epConfig,
|
||||
}
|
||||
ep, err := epConfigForNetMode(tc.apiVersion, container.NetworkMode(tc.networkMode), netConfig)
|
||||
if tc.expError {
|
||||
assert.Check(t, is.ErrorContains(err, "HostConfig.NetworkMode must match the identity of a network in NetworkSettings.Networks"))
|
||||
} else {
|
||||
assert.Assert(t, err)
|
||||
assert.Check(t, is.Equal(ep.EndpointID, tc.expEpId))
|
||||
assert.Check(t, is.Len(netConfig.EndpointsConfig, tc.expNumEps))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleSysctlBC(t *testing.T) {
|
||||
testcases := []struct {
|
||||
name string
|
||||
apiVersion string
|
||||
networkMode string
|
||||
sysctls map[string]string
|
||||
epConfig map[string]*network.EndpointSettings
|
||||
expEpSysctls []string
|
||||
expSysctls map[string]string
|
||||
expWarningContains []string
|
||||
expError string
|
||||
}{
|
||||
{
|
||||
name: "migrate to new ep",
|
||||
apiVersion: "1.46",
|
||||
networkMode: "mynet",
|
||||
sysctls: map[string]string{
|
||||
"net.ipv6.conf.all.disable_ipv6": "0",
|
||||
"net.ipv6.conf.eth0.accept_ra": "2",
|
||||
"net.ipv6.conf.eth0.forwarding": "1",
|
||||
},
|
||||
expSysctls: map[string]string{
|
||||
"net.ipv6.conf.all.disable_ipv6": "0",
|
||||
},
|
||||
expEpSysctls: []string{"net.ipv6.conf.IFNAME.forwarding=1", "net.ipv6.conf.IFNAME.accept_ra=2"},
|
||||
expWarningContains: []string{
|
||||
"Migrated",
|
||||
"net.ipv6.conf.eth0.accept_ra", "net.ipv6.conf.IFNAME.accept_ra=2",
|
||||
"net.ipv6.conf.eth0.forwarding", "net.ipv6.conf.IFNAME.forwarding=1",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "migrate nothing",
|
||||
apiVersion: "1.46",
|
||||
networkMode: "mynet",
|
||||
sysctls: map[string]string{
|
||||
"net.ipv6.conf.all.disable_ipv6": "0",
|
||||
},
|
||||
expSysctls: map[string]string{
|
||||
"net.ipv6.conf.all.disable_ipv6": "0",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "migration disabled for newer api",
|
||||
apiVersion: "1.47",
|
||||
networkMode: "mynet",
|
||||
sysctls: map[string]string{
|
||||
"net.ipv6.conf.eth0.accept_ra": "2",
|
||||
},
|
||||
expError: "must be supplied using driver option 'com.docker.network.endpoint.sysctls'",
|
||||
},
|
||||
{
|
||||
name: "only migrate eth0",
|
||||
apiVersion: "1.46",
|
||||
networkMode: "mynet",
|
||||
sysctls: map[string]string{
|
||||
"net.ipv6.conf.eth1.accept_ra": "2",
|
||||
},
|
||||
expError: "unable to determine network endpoint",
|
||||
},
|
||||
{
|
||||
name: "net name mismatch",
|
||||
apiVersion: "1.46",
|
||||
networkMode: "mynet",
|
||||
epConfig: map[string]*network.EndpointSettings{
|
||||
"shortid": {EndpointID: "epone"},
|
||||
},
|
||||
sysctls: map[string]string{
|
||||
"net.ipv6.conf.eth1.accept_ra": "2",
|
||||
},
|
||||
expError: "unable to find a network for sysctl",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testcases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
hostCfg := &container.HostConfig{
|
||||
NetworkMode: container.NetworkMode(tc.networkMode),
|
||||
Sysctls: map[string]string{},
|
||||
}
|
||||
for k, v := range tc.sysctls {
|
||||
hostCfg.Sysctls[k] = v
|
||||
}
|
||||
netCfg := &network.NetworkingConfig{
|
||||
EndpointsConfig: tc.epConfig,
|
||||
}
|
||||
|
||||
warnings, err := handleSysctlBC(hostCfg, netCfg, tc.apiVersion)
|
||||
|
||||
for _, s := range tc.expWarningContains {
|
||||
assert.Check(t, is.Contains(warnings, s))
|
||||
}
|
||||
|
||||
if tc.expError != "" {
|
||||
assert.Check(t, is.ErrorContains(err, tc.expError))
|
||||
} else {
|
||||
assert.Check(t, err)
|
||||
|
||||
assert.Check(t, is.DeepEqual(hostCfg.Sysctls, tc.expSysctls))
|
||||
|
||||
ep := netCfg.EndpointsConfig[tc.networkMode]
|
||||
if ep == nil {
|
||||
assert.Check(t, is.Nil(tc.expEpSysctls))
|
||||
} else {
|
||||
got, ok := ep.DriverOpts[netlabel.EndpointSysctls]
|
||||
assert.Check(t, ok)
|
||||
// Check for expected ep-sysctls.
|
||||
for _, want := range tc.expEpSysctls {
|
||||
assert.Check(t, is.Contains(got, want))
|
||||
}
|
||||
// Check for unexpected ep-sysctls.
|
||||
assert.Check(t, is.Len(got, len(strings.Join(tc.expEpSysctls, ","))))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,12 +10,12 @@ import (
|
||||
"net/http"
|
||||
|
||||
"github.com/docker/docker/api/server/httputils"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types"
|
||||
gddohttputil "github.com/golang/gddo/httputil"
|
||||
)
|
||||
|
||||
// setContainerPathStatHeader encodes the stat to JSON, base64 encode, and place in a header.
|
||||
func setContainerPathStatHeader(stat *container.PathStat, header http.Header) error {
|
||||
func setContainerPathStatHeader(stat *types.ContainerPathStat, header http.Header) error {
|
||||
statJSON, err := json.Marshal(stat)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"github.com/containerd/log"
|
||||
"github.com/docker/docker/api/server/httputils"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/backend"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
"github.com/docker/docker/errdefs"
|
||||
@@ -39,7 +38,7 @@ func (s *containerRouter) postContainerExecCreate(ctx context.Context, w http.Re
|
||||
return err
|
||||
}
|
||||
|
||||
execConfig := &container.ExecOptions{}
|
||||
execConfig := &types.ExecConfig{}
|
||||
if err := httputils.ReadJSON(r, execConfig); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -78,8 +77,8 @@ func (s *containerRouter) postContainerExecStart(ctx context.Context, w http.Res
|
||||
stdout, stderr, outStream io.Writer
|
||||
)
|
||||
|
||||
options := &container.ExecStartOptions{}
|
||||
if err := httputils.ReadJSON(r, options); err != nil {
|
||||
execStartCheck := &types.ExecStartCheck{}
|
||||
if err := httputils.ReadJSON(r, execStartCheck); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -87,21 +86,21 @@ func (s *containerRouter) postContainerExecStart(ctx context.Context, w http.Res
|
||||
return err
|
||||
}
|
||||
|
||||
if options.ConsoleSize != nil {
|
||||
if execStartCheck.ConsoleSize != nil {
|
||||
version := httputils.VersionFromContext(ctx)
|
||||
|
||||
// Not supported before 1.42
|
||||
if versions.LessThan(version, "1.42") {
|
||||
options.ConsoleSize = nil
|
||||
execStartCheck.ConsoleSize = nil
|
||||
}
|
||||
|
||||
// No console without tty
|
||||
if !options.Tty {
|
||||
options.ConsoleSize = nil
|
||||
if !execStartCheck.Tty {
|
||||
execStartCheck.ConsoleSize = nil
|
||||
}
|
||||
}
|
||||
|
||||
if !options.Detach {
|
||||
if !execStartCheck.Detach {
|
||||
var err error
|
||||
// Setting up the streaming http interface.
|
||||
inStream, outStream, err = httputils.HijackConnection(w)
|
||||
@@ -112,43 +111,42 @@ func (s *containerRouter) postContainerExecStart(ctx context.Context, w http.Res
|
||||
|
||||
if _, ok := r.Header["Upgrade"]; ok {
|
||||
contentType := types.MediaTypeRawStream
|
||||
if !options.Tty && versions.GreaterThanOrEqualTo(httputils.VersionFromContext(ctx), "1.42") {
|
||||
if !execStartCheck.Tty && versions.GreaterThanOrEqualTo(httputils.VersionFromContext(ctx), "1.42") {
|
||||
contentType = types.MediaTypeMultiplexedStream
|
||||
}
|
||||
_, _ = fmt.Fprint(outStream, "HTTP/1.1 101 UPGRADED\r\nContent-Type: "+contentType+"\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n")
|
||||
fmt.Fprint(outStream, "HTTP/1.1 101 UPGRADED\r\nContent-Type: "+contentType+"\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n")
|
||||
} else {
|
||||
_, _ = fmt.Fprint(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n")
|
||||
fmt.Fprint(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n")
|
||||
}
|
||||
|
||||
// copy headers that were removed as part of hijack
|
||||
if err := w.Header().WriteSubset(outStream, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
_, _ = fmt.Fprint(outStream, "\r\n")
|
||||
fmt.Fprint(outStream, "\r\n")
|
||||
|
||||
stdin = inStream
|
||||
if options.Tty {
|
||||
stdout = outStream
|
||||
} else {
|
||||
stdout = outStream
|
||||
if !execStartCheck.Tty {
|
||||
stderr = stdcopy.NewStdWriter(outStream, stdcopy.Stderr)
|
||||
stdout = stdcopy.NewStdWriter(outStream, stdcopy.Stdout)
|
||||
}
|
||||
}
|
||||
|
||||
// Now run the user process in container.
|
||||
//
|
||||
// TODO: Maybe we should we pass ctx here if we're not detaching?
|
||||
err := s.backend.ContainerExecStart(context.Background(), execName, backend.ExecStartConfig{
|
||||
options := container.ExecStartOptions{
|
||||
Stdin: stdin,
|
||||
Stdout: stdout,
|
||||
Stderr: stderr,
|
||||
ConsoleSize: options.ConsoleSize,
|
||||
})
|
||||
if err != nil {
|
||||
if options.Detach {
|
||||
ConsoleSize: execStartCheck.ConsoleSize,
|
||||
}
|
||||
|
||||
// Now run the user process in container.
|
||||
// Maybe we should we pass ctx here if we're not detaching?
|
||||
if err := s.backend.ContainerExecStart(context.Background(), execName, options); err != nil {
|
||||
if execStartCheck.Detach {
|
||||
return err
|
||||
}
|
||||
_, _ = fmt.Fprintf(stdout, "%v\r\n", err)
|
||||
stdout.Write([]byte(err.Error() + "\r\n"))
|
||||
log.G(ctx).Errorf("Error running exec %s in container: %v", execName, err)
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -1,54 +0,0 @@
|
||||
package container
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"syscall"
|
||||
|
||||
"github.com/containerd/log"
|
||||
"github.com/docker/docker/internal/unix_noeintr"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func notifyClosed(ctx context.Context, conn net.Conn, notify func()) {
|
||||
sc, ok := conn.(syscall.Conn)
|
||||
if !ok {
|
||||
log.G(ctx).Debug("notifyClosed: conn does not support close notifications")
|
||||
return
|
||||
}
|
||||
|
||||
rc, err := sc.SyscallConn()
|
||||
if err != nil {
|
||||
log.G(ctx).WithError(err).Warn("notifyClosed: failed get raw conn for close notifications")
|
||||
return
|
||||
}
|
||||
|
||||
epFd, err := unix_noeintr.EpollCreate()
|
||||
if err != nil {
|
||||
log.G(ctx).WithError(err).Warn("notifyClosed: failed to create epoll fd")
|
||||
return
|
||||
}
|
||||
defer unix.Close(epFd)
|
||||
|
||||
err = rc.Control(func(fd uintptr) {
|
||||
err := unix_noeintr.EpollCtl(epFd, unix.EPOLL_CTL_ADD, int(fd), &unix.EpollEvent{
|
||||
Events: unix.EPOLLHUP,
|
||||
Fd: int32(fd),
|
||||
})
|
||||
if err != nil {
|
||||
log.G(ctx).WithError(err).Warn("notifyClosed: failed to register fd for close notifications")
|
||||
return
|
||||
}
|
||||
|
||||
events := make([]unix.EpollEvent, 1)
|
||||
if _, err := unix_noeintr.EpollWait(epFd, events, -1); err != nil {
|
||||
log.G(ctx).WithError(err).Warn("notifyClosed: failed to wait for close notifications")
|
||||
return
|
||||
}
|
||||
notify()
|
||||
})
|
||||
if err != nil {
|
||||
log.G(ctx).WithError(err).Warn("notifyClosed: failed to register for close notifications")
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -1,10 +0,0 @@
|
||||
//go:build !linux
|
||||
|
||||
package container
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
)
|
||||
|
||||
func notifyClosed(ctx context.Context, conn net.Conn, notify func()) {}
|
||||
@@ -1,21 +1,13 @@
|
||||
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
|
||||
//go:build go1.21
|
||||
|
||||
package grpc // import "github.com/docker/docker/api/server/router/grpc"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/log"
|
||||
"github.com/docker/docker/api/server/router"
|
||||
grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
|
||||
"github.com/moby/buildkit/util/grpcerrors"
|
||||
"github.com/moby/buildkit/util/stack"
|
||||
"github.com/moby/buildkit/util/tracing"
|
||||
"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
|
||||
"go.opentelemetry.io/otel"
|
||||
"golang.org/x/net/http2"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
@@ -28,15 +20,12 @@ type grpcRouter struct {
|
||||
|
||||
// NewRouter initializes a new grpc http router
|
||||
func NewRouter(backends ...Backend) router.Router {
|
||||
opts := []grpc.ServerOption{
|
||||
grpc.StatsHandler(tracing.ServerStatsHandler(otelgrpc.WithTracerProvider(otel.GetTracerProvider()))),
|
||||
grpc.ChainUnaryInterceptor(unaryInterceptor, grpcerrors.UnaryServerInterceptor),
|
||||
grpc.StreamInterceptor(grpcerrors.StreamServerInterceptor),
|
||||
}
|
||||
unary := grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(unaryInterceptor(), grpcerrors.UnaryServerInterceptor))
|
||||
stream := grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(otelgrpc.StreamServerInterceptor(), grpcerrors.StreamServerInterceptor)) //nolint:staticcheck // TODO(thaJeztah): ignore SA1019 for deprecated options: see https://github.com/moby/moby/issues/47437
|
||||
|
||||
r := &grpcRouter{
|
||||
h2Server: &http2.Server{},
|
||||
grpcServer: grpc.NewServer(opts...),
|
||||
grpcServer: grpc.NewServer(unary, stream),
|
||||
}
|
||||
for _, b := range backends {
|
||||
b.RegisterGRPC(r.grpcServer)
|
||||
@@ -56,20 +45,16 @@ func (gr *grpcRouter) initRoutes() {
|
||||
}
|
||||
}
|
||||
|
||||
func unaryInterceptor(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) {
|
||||
// This method is used by the clients to send their traces to buildkit so they can be included
|
||||
// in the daemon trace and stored in the build history record. This method can not be traced because
|
||||
// it would cause an infinite loop.
|
||||
if strings.HasSuffix(info.FullMethod, "opentelemetry.proto.collector.trace.v1.TraceService/Export") {
|
||||
return handler(ctx, req)
|
||||
}
|
||||
func unaryInterceptor() grpc.UnaryServerInterceptor {
|
||||
withTrace := otelgrpc.UnaryServerInterceptor() //nolint:staticcheck // TODO(thaJeztah): ignore SA1019 for deprecated options: see https://github.com/moby/moby/issues/47437
|
||||
|
||||
resp, err = handler(ctx, req)
|
||||
if err != nil {
|
||||
log.G(ctx).WithError(err).Error(info.FullMethod)
|
||||
if log.GetLevel() >= log.DebugLevel {
|
||||
fmt.Fprintf(os.Stderr, "%+v", stack.Formatter(grpcerrors.FromGRPC(err)))
|
||||
return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) {
|
||||
// This method is used by the clients to send their traces to buildkit so they can be included
|
||||
// in the daemon trace and stored in the build history record. This method can not be traced because
|
||||
// it would cause an infinite loop.
|
||||
if strings.HasSuffix(info.FullMethod, "opentelemetry.proto.collector.trace.v1.TraceService/Export") {
|
||||
return handler(ctx, req)
|
||||
}
|
||||
return withTrace(ctx, req, info, handler)
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"io"
|
||||
|
||||
"github.com/distribution/reference"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/backend"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/image"
|
||||
@@ -27,7 +28,7 @@ type imageBackend interface {
|
||||
Images(ctx context.Context, opts image.ListOptions) ([]*image.Summary, error)
|
||||
GetImage(ctx context.Context, refOrID string, options backend.GetImageOpts) (*dockerimage.Image, error)
|
||||
TagImage(ctx context.Context, id dockerimage.ID, newRef reference.Named) error
|
||||
ImagesPrune(ctx context.Context, pruneFilters filters.Args) (*image.PruneReport, error)
|
||||
ImagesPrune(ctx context.Context, pruneFilters filters.Args) (*types.ImagesPruneReport, error)
|
||||
}
|
||||
|
||||
type importExportBackend interface {
|
||||
@@ -38,7 +39,7 @@ type importExportBackend interface {
|
||||
|
||||
type registryBackend interface {
|
||||
PullImage(ctx context.Context, ref reference.Named, platform *ocispec.Platform, metaHeaders map[string][]string, authConfig *registry.AuthConfig, outStream io.Writer) error
|
||||
PushImage(ctx context.Context, ref reference.Named, platform *ocispec.Platform, metaHeaders map[string][]string, authConfig *registry.AuthConfig, outStream io.Writer) error
|
||||
PushImage(ctx context.Context, ref reference.Named, metaHeaders map[string][]string, authConfig *registry.AuthConfig, outStream io.Writer) error
|
||||
}
|
||||
|
||||
type Searcher interface {
|
||||
|
||||
@@ -56,7 +56,7 @@ func (ir *imageRouter) postImagesCreate(ctx context.Context, w http.ResponseWrit
|
||||
if p := r.FormValue("platform"); p != "" {
|
||||
sp, err := platforms.Parse(p)
|
||||
if err != nil {
|
||||
return errdefs.InvalidParameter(err)
|
||||
return err
|
||||
}
|
||||
platform = &sp
|
||||
}
|
||||
@@ -205,25 +205,7 @@ func (ir *imageRouter) postImagesPush(ctx context.Context, w http.ResponseWriter
|
||||
ref = r
|
||||
}
|
||||
|
||||
var platform *ocispec.Platform
|
||||
// Platform is optional, and only supported in API version 1.46 and later.
|
||||
// However the PushOptions struct previously was an alias for the PullOptions struct
|
||||
// which also contained a Platform field.
|
||||
// This means that older clients may be sending a platform field, even
|
||||
// though it wasn't really supported by the server.
|
||||
// Don't break these clients and just ignore the platform field on older APIs.
|
||||
if versions.GreaterThanOrEqualTo(httputils.VersionFromContext(ctx), "1.46") {
|
||||
if formPlatform := r.Form.Get("platform"); formPlatform != "" {
|
||||
p, err := httputils.DecodePlatform(formPlatform)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
platform = p
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if err := ir.backend.PushImage(ctx, ref, platform, metaHeaders, authConfig, output); err != nil {
|
||||
if err := ir.backend.PushImage(ctx, ref, metaHeaders, authConfig, output); err != nil {
|
||||
if !output.Flushed() {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package network // import "github.com/docker/docker/api/server/router/network"
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/backend"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/network"
|
||||
@@ -11,20 +12,20 @@ import (
|
||||
// Backend is all the methods that need to be implemented
|
||||
// to provide network specific functionality.
|
||||
type Backend interface {
|
||||
GetNetworks(filters.Args, backend.NetworkListConfig) ([]network.Inspect, error)
|
||||
CreateNetwork(nc network.CreateRequest) (*network.CreateResponse, error)
|
||||
ConnectContainerToNetwork(ctx context.Context, containerName, networkName string, endpointConfig *network.EndpointSettings) error
|
||||
GetNetworks(filters.Args, backend.NetworkListConfig) ([]types.NetworkResource, error)
|
||||
CreateNetwork(nc types.NetworkCreateRequest) (*types.NetworkCreateResponse, error)
|
||||
ConnectContainerToNetwork(containerName, networkName string, endpointConfig *network.EndpointSettings) error
|
||||
DisconnectContainerFromNetwork(containerName string, networkName string, force bool) error
|
||||
DeleteNetwork(networkID string) error
|
||||
NetworksPrune(ctx context.Context, pruneFilters filters.Args) (*network.PruneReport, error)
|
||||
NetworksPrune(ctx context.Context, pruneFilters filters.Args) (*types.NetworksPruneReport, error)
|
||||
}
|
||||
|
||||
// ClusterBackend is all the methods that need to be implemented
|
||||
// to provide cluster network specific functionality.
|
||||
type ClusterBackend interface {
|
||||
GetNetworks(filters.Args) ([]network.Inspect, error)
|
||||
GetNetwork(name string) (network.Inspect, error)
|
||||
GetNetworksByName(name string) ([]network.Inspect, error)
|
||||
CreateNetwork(nc network.CreateRequest) (string, error)
|
||||
GetNetworks(filters.Args) ([]types.NetworkResource, error)
|
||||
GetNetwork(name string) (types.NetworkResource, error)
|
||||
GetNetworksByName(name string) ([]types.NetworkResource, error)
|
||||
CreateNetwork(nc types.NetworkCreateRequest) (string, error)
|
||||
RemoveNetwork(name string) error
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/api/server/httputils"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/backend"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/network"
|
||||
@@ -31,7 +32,7 @@ func (n *networkRouter) getNetworksList(ctx context.Context, w http.ResponseWrit
|
||||
return err
|
||||
}
|
||||
|
||||
var list []network.Summary
|
||||
var list []types.NetworkResource
|
||||
nr, err := n.cluster.GetNetworks(filter)
|
||||
if err == nil {
|
||||
list = nr
|
||||
@@ -59,7 +60,7 @@ func (n *networkRouter) getNetworksList(ctx context.Context, w http.ResponseWrit
|
||||
}
|
||||
|
||||
if list == nil {
|
||||
list = []network.Summary{}
|
||||
list = []types.NetworkResource{}
|
||||
}
|
||||
|
||||
return httputils.WriteJSON(w, http.StatusOK, list)
|
||||
@@ -108,8 +109,8 @@ func (n *networkRouter) getNetwork(ctx context.Context, w http.ResponseWriter, r
|
||||
|
||||
// For full name and partial ID, save the result first, and process later
|
||||
// in case multiple records was found based on the same term
|
||||
listByFullName := map[string]network.Inspect{}
|
||||
listByPartialID := map[string]network.Inspect{}
|
||||
listByFullName := map[string]types.NetworkResource{}
|
||||
listByPartialID := map[string]types.NetworkResource{}
|
||||
|
||||
// TODO(@cpuguy83): All this logic for figuring out which network to return does not belong here
|
||||
// Instead there should be a backend function to just get one network.
|
||||
@@ -203,7 +204,7 @@ func (n *networkRouter) postNetworkCreate(ctx context.Context, w http.ResponseWr
|
||||
return err
|
||||
}
|
||||
|
||||
var create network.CreateRequest
|
||||
var create types.NetworkCreateRequest
|
||||
if err := httputils.ReadJSON(r, &create); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -225,7 +226,7 @@ func (n *networkRouter) postNetworkCreate(ctx context.Context, w http.ResponseWr
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nw = &network.CreateResponse{
|
||||
nw = &types.NetworkCreateResponse{
|
||||
ID: id,
|
||||
}
|
||||
}
|
||||
@@ -238,7 +239,7 @@ func (n *networkRouter) postNetworkConnect(ctx context.Context, w http.ResponseW
|
||||
return err
|
||||
}
|
||||
|
||||
var connect network.ConnectOptions
|
||||
var connect types.NetworkConnect
|
||||
if err := httputils.ReadJSON(r, &connect); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -247,7 +248,7 @@ func (n *networkRouter) postNetworkConnect(ctx context.Context, w http.ResponseW
|
||||
// The reason is that, In case of attachable network in swarm scope, the actual local network
|
||||
// may not be available at the time. At the same time, inside daemon `ConnectContainerToNetwork`
|
||||
// does the ambiguity check anyway. Therefore, passing the name to daemon would be enough.
|
||||
return n.backend.ConnectContainerToNetwork(ctx, connect.Container, vars["id"], connect.EndpointConfig)
|
||||
return n.backend.ConnectContainerToNetwork(connect.Container, vars["id"], connect.EndpointConfig)
|
||||
}
|
||||
|
||||
func (n *networkRouter) postNetworkDisconnect(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
@@ -255,7 +256,7 @@ func (n *networkRouter) postNetworkDisconnect(ctx context.Context, w http.Respon
|
||||
return err
|
||||
}
|
||||
|
||||
var disconnect network.DisconnectOptions
|
||||
var disconnect types.NetworkDisconnect
|
||||
if err := httputils.ReadJSON(r, &disconnect); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -310,9 +311,9 @@ func (n *networkRouter) postNetworksPrune(ctx context.Context, w http.ResponseWr
|
||||
// For full name and partial ID, save the result first, and process later
|
||||
// in case multiple records was found based on the same term
|
||||
// TODO (yongtang): should we wrap with version here for backward compatibility?
|
||||
func (n *networkRouter) findUniqueNetwork(term string) (network.Inspect, error) {
|
||||
listByFullName := map[string]network.Inspect{}
|
||||
listByPartialID := map[string]network.Inspect{}
|
||||
func (n *networkRouter) findUniqueNetwork(term string) (types.NetworkResource, error) {
|
||||
listByFullName := map[string]types.NetworkResource{}
|
||||
listByPartialID := map[string]types.NetworkResource{}
|
||||
|
||||
filter := filters.NewArgs(filters.Arg("idOrName", term))
|
||||
networks, _ := n.backend.GetNetworks(filter, backend.NetworkListConfig{Detailed: true})
|
||||
@@ -362,7 +363,7 @@ func (n *networkRouter) findUniqueNetwork(term string) (network.Inspect, error)
|
||||
}
|
||||
}
|
||||
if len(listByFullName) > 1 {
|
||||
return network.Inspect{}, errdefs.InvalidParameter(errors.Errorf("network %s is ambiguous (%d matches found based on name)", term, len(listByFullName)))
|
||||
return types.NetworkResource{}, errdefs.InvalidParameter(errors.Errorf("network %s is ambiguous (%d matches found based on name)", term, len(listByFullName)))
|
||||
}
|
||||
|
||||
// Find based on partial ID, returns true only if no duplicates
|
||||
@@ -372,8 +373,8 @@ func (n *networkRouter) findUniqueNetwork(term string) (network.Inspect, error)
|
||||
}
|
||||
}
|
||||
if len(listByPartialID) > 1 {
|
||||
return network.Inspect{}, errdefs.InvalidParameter(errors.Errorf("network %s is ambiguous (%d matches found based on ID prefix)", term, len(listByPartialID)))
|
||||
return types.NetworkResource{}, errdefs.InvalidParameter(errors.Errorf("network %s is ambiguous (%d matches found based on ID prefix)", term, len(listByPartialID)))
|
||||
}
|
||||
|
||||
return network.Inspect{}, errdefs.NotFound(libnetwork.ErrNoSuchNetwork(term))
|
||||
return types.NetworkResource{}, errdefs.NotFound(libnetwork.ErrNoSuchNetwork(term))
|
||||
}
|
||||
|
||||
@@ -224,6 +224,14 @@ func (sr *swarmRouter) createService(ctx context.Context, w http.ResponseWriter,
|
||||
adjustForAPIVersion(v, &service)
|
||||
}
|
||||
|
||||
version := httputils.VersionFromContext(ctx)
|
||||
if versions.LessThan(version, "1.44") {
|
||||
if service.TaskTemplate.ContainerSpec != nil && service.TaskTemplate.ContainerSpec.Healthcheck != nil {
|
||||
// StartInterval was added in API 1.44
|
||||
service.TaskTemplate.ContainerSpec.Healthcheck.StartInterval = 0
|
||||
}
|
||||
}
|
||||
|
||||
resp, err := sr.backend.CreateService(service, encodedAuth, queryRegistry)
|
||||
if err != nil {
|
||||
log.G(ctx).WithFields(log.Fields{
|
||||
|
||||
@@ -78,16 +78,6 @@ func adjustForAPIVersion(cliVersion string, service *swarm.ServiceSpec) {
|
||||
if cliVersion == "" {
|
||||
return
|
||||
}
|
||||
if versions.LessThan(cliVersion, "1.46") {
|
||||
if service.TaskTemplate.ContainerSpec != nil {
|
||||
for i, mount := range service.TaskTemplate.ContainerSpec.Mounts {
|
||||
if mount.TmpfsOptions != nil {
|
||||
mount.TmpfsOptions.Options = nil
|
||||
service.TaskTemplate.ContainerSpec.Mounts[i] = mount
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if versions.LessThan(cliVersion, "1.40") {
|
||||
if service.TaskTemplate.ContainerSpec != nil {
|
||||
// Sysctls for docker swarm services weren't supported before
|
||||
@@ -131,25 +121,11 @@ func adjustForAPIVersion(cliVersion string, service *swarm.ServiceSpec) {
|
||||
}
|
||||
|
||||
if versions.LessThan(cliVersion, "1.44") {
|
||||
if service.TaskTemplate.ContainerSpec != nil {
|
||||
// seccomp, apparmor, and no_new_privs were added in 1.44.
|
||||
if service.TaskTemplate.ContainerSpec.Privileges != nil {
|
||||
service.TaskTemplate.ContainerSpec.Privileges.Seccomp = nil
|
||||
service.TaskTemplate.ContainerSpec.Privileges.AppArmor = nil
|
||||
service.TaskTemplate.ContainerSpec.Privileges.NoNewPrivileges = false
|
||||
}
|
||||
if service.TaskTemplate.ContainerSpec.Healthcheck != nil {
|
||||
// StartInterval was added in API 1.44
|
||||
service.TaskTemplate.ContainerSpec.Healthcheck.StartInterval = 0
|
||||
}
|
||||
// seccomp, apparmor, and no_new_privs were added in 1.44.
|
||||
if service.TaskTemplate.ContainerSpec != nil && service.TaskTemplate.ContainerSpec.Privileges != nil {
|
||||
service.TaskTemplate.ContainerSpec.Privileges.Seccomp = nil
|
||||
service.TaskTemplate.ContainerSpec.Privileges.AppArmor = nil
|
||||
service.TaskTemplate.ContainerSpec.Privileges.NoNewPrivileges = false
|
||||
}
|
||||
}
|
||||
|
||||
if versions.LessThan(cliVersion, "1.46") {
|
||||
if service.TaskTemplate.ContainerSpec != nil && service.TaskTemplate.ContainerSpec.OomScoreAdj != 0 {
|
||||
// OomScoreAdj was added in API 1.46
|
||||
service.TaskTemplate.ContainerSpec.OomScoreAdj = 0
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -4,9 +4,8 @@ import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/mount"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"github.com/docker/go-units"
|
||||
)
|
||||
|
||||
func TestAdjustForAPIVersion(t *testing.T) {
|
||||
@@ -39,25 +38,13 @@ func TestAdjustForAPIVersion(t *testing.T) {
|
||||
ConfigName: "configRuntime",
|
||||
},
|
||||
},
|
||||
Ulimits: []*container.Ulimit{
|
||||
Ulimits: []*units.Ulimit{
|
||||
{
|
||||
Name: "nofile",
|
||||
Soft: 100,
|
||||
Hard: 200,
|
||||
},
|
||||
},
|
||||
Mounts: []mount.Mount{
|
||||
{
|
||||
Type: mount.TypeTmpfs,
|
||||
Source: "/foo",
|
||||
Target: "/bar",
|
||||
TmpfsOptions: &mount.TmpfsOptions{
|
||||
Options: [][]string{
|
||||
{"exec"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Placement: &swarm.Placement{
|
||||
MaxReplicas: 222,
|
||||
@@ -70,19 +57,6 @@ func TestAdjustForAPIVersion(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
adjustForAPIVersion("1.46", spec)
|
||||
if !reflect.DeepEqual(
|
||||
spec.TaskTemplate.ContainerSpec.Mounts[0].TmpfsOptions.Options,
|
||||
[][]string{{"exec"}},
|
||||
) {
|
||||
t.Error("TmpfsOptions.Options was stripped from spec")
|
||||
}
|
||||
|
||||
adjustForAPIVersion("1.45", spec)
|
||||
if len(spec.TaskTemplate.ContainerSpec.Mounts[0].TmpfsOptions.Options) != 0 {
|
||||
t.Error("TmpfsOptions.Options not stripped from spec")
|
||||
}
|
||||
|
||||
// first, does calling this with a later version correctly NOT strip
|
||||
// fields? do the later version first, so we can reuse this spec in the
|
||||
// next test.
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
|
||||
//go:build go1.21
|
||||
//go:build go1.19
|
||||
|
||||
package system // import "github.com/docker/docker/api/server/router/system"
|
||||
|
||||
|
||||
@@ -97,10 +97,6 @@ func (s *systemRouter) getInfo(ctx context.Context, w http.ResponseWriter, r *ht
|
||||
info.Runtimes[k] = system.RuntimeWithStatus{Runtime: rt.Runtime}
|
||||
}
|
||||
}
|
||||
if versions.LessThan(version, "1.46") {
|
||||
// Containerd field introduced in API v1.46.
|
||||
info.Containerd = nil
|
||||
}
|
||||
if versions.GreaterThanOrEqualTo(version, "1.42") {
|
||||
info.KernelMemory = false
|
||||
}
|
||||
@@ -267,7 +263,6 @@ func (s *systemRouter) getEvents(ctx context.Context, w http.ResponseWriter, r *
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
output := ioutils.NewWriteFlusher(w)
|
||||
defer output.Close()
|
||||
output.Flush()
|
||||
@@ -277,18 +272,7 @@ func (s *systemRouter) getEvents(ctx context.Context, w http.ResponseWriter, r *
|
||||
buffered, l := s.backend.SubscribeToEvents(since, until, ef)
|
||||
defer s.backend.UnsubscribeFromEvents(l)
|
||||
|
||||
shouldSkip := func(ev events.Message) bool { return false }
|
||||
if versions.LessThan(httputils.VersionFromContext(ctx), "1.46") {
|
||||
// Image create events were added in API 1.46
|
||||
shouldSkip = func(ev events.Message) bool {
|
||||
return ev.Type == "image" && ev.Action == "create"
|
||||
}
|
||||
}
|
||||
|
||||
for _, ev := range buffered {
|
||||
if shouldSkip(ev) {
|
||||
continue
|
||||
}
|
||||
if err := enc.Encode(ev); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -306,9 +290,6 @@ func (s *systemRouter) getEvents(ctx context.Context, w http.ResponseWriter, r *
|
||||
log.G(ctx).Warnf("unexpected event message: %q", ev)
|
||||
continue
|
||||
}
|
||||
if shouldSkip(jev) {
|
||||
continue
|
||||
}
|
||||
if err := enc.Encode(jev); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -3,9 +3,11 @@ package volume // import "github.com/docker/docker/api/server/router/volume"
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/docker/docker/volume/service/opts"
|
||||
// TODO return types need to be refactored into pkg
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/volume"
|
||||
"github.com/docker/docker/volume/service/opts"
|
||||
)
|
||||
|
||||
// Backend is the methods that need to be implemented to provide
|
||||
@@ -15,7 +17,7 @@ type Backend interface {
|
||||
Get(ctx context.Context, name string, opts ...opts.GetOption) (*volume.Volume, error)
|
||||
Create(ctx context.Context, name, driverName string, opts ...opts.CreateOption) (*volume.Volume, error)
|
||||
Remove(ctx context.Context, name string, opts ...opts.RemoveOption) error
|
||||
Prune(ctx context.Context, pruneFilters filters.Args) (*volume.PruneReport, error)
|
||||
Prune(ctx context.Context, pruneFilters filters.Args) (*types.VolumesPruneReport, error)
|
||||
}
|
||||
|
||||
// ClusterBackend is the backend used for Swarm Cluster Volumes. Regular
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"gotest.tools/v3/assert"
|
||||
|
||||
"github.com/docker/docker/api/server/httputils"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/volume"
|
||||
"github.com/docker/docker/errdefs"
|
||||
@@ -635,7 +636,7 @@ func (b *fakeVolumeBackend) Remove(_ context.Context, name string, o ...opts.Rem
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *fakeVolumeBackend) Prune(_ context.Context, _ filters.Args) (*volume.PruneReport, error) {
|
||||
func (b *fakeVolumeBackend) Prune(_ context.Context, _ filters.Args) (*types.VolumesPruneReport, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
||||
714
api/swagger.yaml
714
api/swagger.yaml
File diff suppressed because it is too large
Load Diff
@@ -1,26 +0,0 @@
|
||||
package auxprogress
|
||||
|
||||
import (
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// ManifestPushedInsteadOfIndex is a note that is sent when a manifest is pushed
|
||||
// instead of an index. It is sent when the pushed image is an multi-platform
|
||||
// index, but the whole index couldn't be pushed.
|
||||
type ManifestPushedInsteadOfIndex struct {
|
||||
ManifestPushedInsteadOfIndex bool `json:"manifestPushedInsteadOfIndex"` // Always true
|
||||
|
||||
// OriginalIndex is the descriptor of the original image index.
|
||||
OriginalIndex ocispec.Descriptor `json:"originalIndex"`
|
||||
|
||||
// SelectedManifest is the descriptor of the manifest that was pushed instead.
|
||||
SelectedManifest ocispec.Descriptor `json:"selectedManifest"`
|
||||
}
|
||||
|
||||
// ContentMissing is a note that is sent when push fails because the content is missing.
|
||||
type ContentMissing struct {
|
||||
ContentMissing bool `json:"contentMissing"` // Always true
|
||||
|
||||
// Desc is the descriptor of the root object that was attempted to be pushed.
|
||||
Desc ocispec.Descriptor `json:"desc"`
|
||||
}
|
||||
@@ -30,7 +30,7 @@ type ContainerRmConfig struct {
|
||||
|
||||
// ContainerAttachConfig holds the streams to use when connecting to a container to view logs.
|
||||
type ContainerAttachConfig struct {
|
||||
GetStreams func(multiplexed bool, cancel func()) (io.ReadCloser, io.Writer, io.Writer, error)
|
||||
GetStreams func(multiplexed bool) (io.ReadCloser, io.Writer, io.Writer, error)
|
||||
UseStdin bool
|
||||
UseStdout bool
|
||||
UseStderr bool
|
||||
@@ -89,15 +89,7 @@ type LogSelector struct {
|
||||
type ContainerStatsConfig struct {
|
||||
Stream bool
|
||||
OneShot bool
|
||||
OutStream func() io.Writer
|
||||
}
|
||||
|
||||
// ExecStartConfig holds the options to start container's exec.
|
||||
type ExecStartConfig struct {
|
||||
Stdin io.Reader
|
||||
Stdout io.Writer
|
||||
Stderr io.Writer
|
||||
ConsoleSize *[2]uint `json:",omitempty"`
|
||||
OutStream io.Writer
|
||||
}
|
||||
|
||||
// ExecInspect holds information about a running process started
|
||||
|
||||
@@ -2,15 +2,43 @@ package types // import "github.com/docker/docker/api/types"
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"io"
|
||||
"net"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/registry"
|
||||
units "github.com/docker/go-units"
|
||||
)
|
||||
|
||||
// ContainerExecInspect holds information returned by exec inspect.
|
||||
type ContainerExecInspect struct {
|
||||
ExecID string `json:"ID"`
|
||||
ContainerID string
|
||||
Running bool
|
||||
ExitCode int
|
||||
Pid int
|
||||
}
|
||||
|
||||
// CopyToContainerOptions holds information
|
||||
// about files to copy into a container
|
||||
type CopyToContainerOptions struct {
|
||||
AllowOverwriteDirWithFile bool
|
||||
CopyUIDGID bool
|
||||
}
|
||||
|
||||
// EventsOptions holds parameters to filter events with.
|
||||
type EventsOptions struct {
|
||||
Since string
|
||||
Until string
|
||||
Filters filters.Args
|
||||
}
|
||||
|
||||
// NetworkListOptions holds parameters to filter the list of networks with.
|
||||
type NetworkListOptions struct {
|
||||
Filters filters.Args
|
||||
}
|
||||
|
||||
// NewHijackedResponse intializes a HijackedResponse type
|
||||
func NewHijackedResponse(conn net.Conn, mediaType string) HijackedResponse {
|
||||
return HijackedResponse{Conn: conn, Reader: bufio.NewReader(conn), mediaType: mediaType}
|
||||
@@ -73,7 +101,7 @@ type ImageBuildOptions struct {
|
||||
NetworkMode string
|
||||
ShmSize int64
|
||||
Dockerfile string
|
||||
Ulimits []*container.Ulimit
|
||||
Ulimits []*units.Ulimit
|
||||
// BuildArgs needs to be a *string instead of just a string so that
|
||||
// we can tell the difference between "" (empty string) and no value
|
||||
// at all (nil). See the parsing of buildArgs in
|
||||
@@ -94,7 +122,7 @@ type ImageBuildOptions struct {
|
||||
Target string
|
||||
SessionID string
|
||||
Platform string
|
||||
// Version specifies the version of the underlying builder to use
|
||||
// Version specifies the version of the unerlying builder to use
|
||||
Version BuilderVersion
|
||||
// BuildID is an optional identifier that can be passed together with the
|
||||
// build request. The same identifier can be used to gracefully cancel the
|
||||
@@ -129,13 +157,34 @@ type ImageBuildResponse struct {
|
||||
OSType string
|
||||
}
|
||||
|
||||
// ImageImportSource holds source information for ImageImport
|
||||
type ImageImportSource struct {
|
||||
Source io.Reader // Source is the data to send to the server to create this image from. You must set SourceName to "-" to leverage this.
|
||||
SourceName string // SourceName is the name of the image to pull. Set to "-" to leverage the Source attribute.
|
||||
}
|
||||
|
||||
// ImageLoadResponse returns information to the client about a load process.
|
||||
type ImageLoadResponse struct {
|
||||
// Body must be closed to avoid a resource leak
|
||||
Body io.ReadCloser
|
||||
JSON bool
|
||||
}
|
||||
|
||||
// RequestPrivilegeFunc is a function interface that
|
||||
// clients can supply to retry operations after
|
||||
// getting an authorization error.
|
||||
// This function returns the registry authentication
|
||||
// header value in base 64 format, or an error
|
||||
// if the privilege request fails.
|
||||
type RequestPrivilegeFunc func(context.Context) (string, error)
|
||||
type RequestPrivilegeFunc func() (string, error)
|
||||
|
||||
// ImageSearchOptions holds parameters to search images with.
|
||||
type ImageSearchOptions struct {
|
||||
RegistryAuth string
|
||||
PrivilegeFunc RequestPrivilegeFunc
|
||||
Filters filters.Args
|
||||
Limit int
|
||||
}
|
||||
|
||||
// NodeListOptions holds parameters to list nodes with.
|
||||
type NodeListOptions struct {
|
||||
@@ -240,7 +289,7 @@ type PluginInstallOptions struct {
|
||||
RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry
|
||||
RemoteRef string // RemoteRef is the plugin name on the registry
|
||||
PrivilegeFunc RequestPrivilegeFunc
|
||||
AcceptPermissionsFunc func(context.Context, PluginPrivileges) (bool, error)
|
||||
AcceptPermissionsFunc func(PluginPrivileges) (bool, error)
|
||||
Args []string
|
||||
}
|
||||
|
||||
|
||||
18
api/types/configs.go
Normal file
18
api/types/configs.go
Normal file
@@ -0,0 +1,18 @@
|
||||
package types // import "github.com/docker/docker/api/types"
|
||||
|
||||
// ExecConfig is a small subset of the Config struct that holds the configuration
|
||||
// for the exec feature of docker.
|
||||
type ExecConfig struct {
|
||||
User string // User that will run the command
|
||||
Privileged bool // Is the container in privileged mode
|
||||
Tty bool // Attach standard streams to a tty.
|
||||
ConsoleSize *[2]uint `json:",omitempty"` // Initial console size [height, width]
|
||||
AttachStdin bool // Attach the standard input, makes possible user interaction
|
||||
AttachStderr bool // Attach the standard error
|
||||
AttachStdout bool // Attach the standard output
|
||||
Detach bool // Execute in detach mode
|
||||
DetachKeys string // Escape keys for detach
|
||||
Env []string // Environment variables
|
||||
WorkingDir string // Working directory
|
||||
Cmd []string // Execution commands and args
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
package container // import "github.com/docker/docker/api/types/container"
|
||||
|
||||
import (
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types/strslice"
|
||||
@@ -35,6 +36,14 @@ type StopOptions struct {
|
||||
// HealthConfig holds configuration settings for the HEALTHCHECK feature.
|
||||
type HealthConfig = dockerspec.HealthcheckConfig
|
||||
|
||||
// ExecStartOptions holds the options to start container's exec.
|
||||
type ExecStartOptions struct {
|
||||
Stdin io.Reader
|
||||
Stdout io.Writer
|
||||
Stderr io.Writer
|
||||
ConsoleSize *[2]uint `json:",omitempty"`
|
||||
}
|
||||
|
||||
// Config contains the configuration data about a container.
|
||||
// It should hold only portable information about the container.
|
||||
// Here, "portable" means "independent from the host we are running on".
|
||||
|
||||
@@ -1,44 +0,0 @@
|
||||
package container
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
// PruneReport contains the response for Engine API:
|
||||
// POST "/containers/prune"
|
||||
type PruneReport struct {
|
||||
ContainersDeleted []string
|
||||
SpaceReclaimed uint64
|
||||
}
|
||||
|
||||
// PathStat is used to encode the header from
|
||||
// GET "/containers/{name:.*}/archive"
|
||||
// "Name" is the file or directory name.
|
||||
type PathStat struct {
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
Mode os.FileMode `json:"mode"`
|
||||
Mtime time.Time `json:"mtime"`
|
||||
LinkTarget string `json:"linkTarget"`
|
||||
}
|
||||
|
||||
// CopyToContainerOptions holds information
|
||||
// about files to copy into a container
|
||||
type CopyToContainerOptions struct {
|
||||
AllowOverwriteDirWithFile bool
|
||||
CopyUIDGID bool
|
||||
}
|
||||
|
||||
// StatsResponseReader wraps an io.ReadCloser to read (a stream of) stats
|
||||
// for a container, as produced by the GET "/stats" endpoint.
|
||||
//
|
||||
// The OSType field is set to the server's platform to allow
|
||||
// platform-specific handling of the response.
|
||||
//
|
||||
// TODO(thaJeztah): remove this wrapper, and make OSType part of [StatsResponse].
|
||||
type StatsResponseReader struct {
|
||||
Body io.ReadCloser `json:"body"`
|
||||
OSType string `json:"ostype"`
|
||||
}
|
||||
@@ -1,13 +0,0 @@
|
||||
package container
|
||||
|
||||
import "github.com/docker/docker/api/types/network"
|
||||
|
||||
// CreateRequest is the request message sent to the server for container
|
||||
// create calls. It is a config wrapper that holds the container [Config]
|
||||
// (portable) and the corresponding [HostConfig] (non-portable) and
|
||||
// [network.NetworkingConfig].
|
||||
type CreateRequest struct {
|
||||
*Config
|
||||
HostConfig *HostConfig `json:"HostConfig,omitempty"`
|
||||
NetworkingConfig *network.NetworkingConfig `json:"NetworkingConfig,omitempty"`
|
||||
}
|
||||
@@ -1,43 +0,0 @@
|
||||
package container
|
||||
|
||||
// ExecOptions is a small subset of the Config struct that holds the configuration
|
||||
// for the exec feature of docker.
|
||||
type ExecOptions struct {
|
||||
User string // User that will run the command
|
||||
Privileged bool // Is the container in privileged mode
|
||||
Tty bool // Attach standard streams to a tty.
|
||||
ConsoleSize *[2]uint `json:",omitempty"` // Initial console size [height, width]
|
||||
AttachStdin bool // Attach the standard input, makes possible user interaction
|
||||
AttachStderr bool // Attach the standard error
|
||||
AttachStdout bool // Attach the standard output
|
||||
Detach bool // Execute in detach mode
|
||||
DetachKeys string // Escape keys for detach
|
||||
Env []string // Environment variables
|
||||
WorkingDir string // Working directory
|
||||
Cmd []string // Execution commands and args
|
||||
}
|
||||
|
||||
// ExecStartOptions is a temp struct used by execStart
|
||||
// Config fields is part of ExecConfig in runconfig package
|
||||
type ExecStartOptions struct {
|
||||
// ExecStart will first check if it's detached
|
||||
Detach bool
|
||||
// Check if there's a tty
|
||||
Tty bool
|
||||
// Terminal size [height, width], unused if Tty == false
|
||||
ConsoleSize *[2]uint `json:",omitempty"`
|
||||
}
|
||||
|
||||
// ExecAttachOptions is a temp struct used by execAttach.
|
||||
//
|
||||
// TODO(thaJeztah): make this a separate type; ContainerExecAttach does not use the Detach option, and cannot run detached.
|
||||
type ExecAttachOptions = ExecStartOptions
|
||||
|
||||
// ExecInspect holds information returned by exec inspect.
|
||||
type ExecInspect struct {
|
||||
ExecID string `json:"ID"`
|
||||
ContainerID string
|
||||
Running bool
|
||||
ExitCode int
|
||||
Pid int
|
||||
}
|
||||
@@ -360,12 +360,6 @@ type LogConfig struct {
|
||||
Config map[string]string
|
||||
}
|
||||
|
||||
// Ulimit is an alias for [units.Ulimit], which may be moving to a different
|
||||
// location or become a local type. This alias is to help transitioning.
|
||||
//
|
||||
// Users are recommended to use this alias instead of using [units.Ulimit] directly.
|
||||
type Ulimit = units.Ulimit
|
||||
|
||||
// Resources contains container's resources (cgroups config, ulimits...)
|
||||
type Resources struct {
|
||||
// Applicable to all platforms
|
||||
@@ -393,14 +387,14 @@ type Resources struct {
|
||||
|
||||
// KernelMemory specifies the kernel memory limit (in bytes) for the container.
|
||||
// Deprecated: kernel 5.4 deprecated kmem.limit_in_bytes.
|
||||
KernelMemory int64 `json:",omitempty"`
|
||||
KernelMemoryTCP int64 `json:",omitempty"` // Hard limit for kernel TCP buffer memory (in bytes)
|
||||
MemoryReservation int64 // Memory soft limit (in bytes)
|
||||
MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap
|
||||
MemorySwappiness *int64 // Tuning container memory swappiness behaviour
|
||||
OomKillDisable *bool // Whether to disable OOM Killer or not
|
||||
PidsLimit *int64 // Setting PIDs limit for a container; Set `0` or `-1` for unlimited, or `null` to not change.
|
||||
Ulimits []*Ulimit // List of ulimits to be set in the container
|
||||
KernelMemory int64 `json:",omitempty"`
|
||||
KernelMemoryTCP int64 `json:",omitempty"` // Hard limit for kernel TCP buffer memory (in bytes)
|
||||
MemoryReservation int64 // Memory soft limit (in bytes)
|
||||
MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap
|
||||
MemorySwappiness *int64 // Tuning container memory swappiness behaviour
|
||||
OomKillDisable *bool // Whether to disable OOM Killer or not
|
||||
PidsLimit *int64 // Setting PIDs limit for a container; Set `0` or `-1` for unlimited, or `null` to not change.
|
||||
Ulimits []*units.Ulimit // List of ulimits to be set in the container
|
||||
|
||||
// Applicable to Windows
|
||||
CPUCount int64 `json:"CpuCount"` // CPU count
|
||||
|
||||
@@ -9,6 +9,24 @@ func (i Isolation) IsValid() bool {
|
||||
return i.IsDefault()
|
||||
}
|
||||
|
||||
// NetworkName returns the name of the network stack.
|
||||
func (n NetworkMode) NetworkName() string {
|
||||
if n.IsBridge() {
|
||||
return network.NetworkBridge
|
||||
} else if n.IsHost() {
|
||||
return network.NetworkHost
|
||||
} else if n.IsContainer() {
|
||||
return "container"
|
||||
} else if n.IsNone() {
|
||||
return network.NetworkNone
|
||||
} else if n.IsDefault() {
|
||||
return network.NetworkDefault
|
||||
} else if n.IsUserDefined() {
|
||||
return n.UserDefined()
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// IsBridge indicates whether container uses the bridge network stack
|
||||
func (n NetworkMode) IsBridge() bool {
|
||||
return n == network.NetworkBridge
|
||||
@@ -23,23 +41,3 @@ func (n NetworkMode) IsHost() bool {
|
||||
func (n NetworkMode) IsUserDefined() bool {
|
||||
return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer()
|
||||
}
|
||||
|
||||
// NetworkName returns the name of the network stack.
|
||||
func (n NetworkMode) NetworkName() string {
|
||||
switch {
|
||||
case n.IsDefault():
|
||||
return network.NetworkDefault
|
||||
case n.IsBridge():
|
||||
return network.NetworkBridge
|
||||
case n.IsHost():
|
||||
return network.NetworkHost
|
||||
case n.IsNone():
|
||||
return network.NetworkNone
|
||||
case n.IsContainer():
|
||||
return "container"
|
||||
case n.IsUserDefined():
|
||||
return n.UserDefined()
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,11 +2,6 @@ package container // import "github.com/docker/docker/api/types/container"
|
||||
|
||||
import "github.com/docker/docker/api/types/network"
|
||||
|
||||
// IsValid indicates if an isolation technology is valid
|
||||
func (i Isolation) IsValid() bool {
|
||||
return i.IsDefault() || i.IsHyperV() || i.IsProcess()
|
||||
}
|
||||
|
||||
// IsBridge indicates whether container uses the bridge network stack
|
||||
// in windows it is given the name NAT
|
||||
func (n NetworkMode) IsBridge() bool {
|
||||
@@ -24,24 +19,24 @@ func (n NetworkMode) IsUserDefined() bool {
|
||||
return !n.IsDefault() && !n.IsNone() && !n.IsBridge() && !n.IsContainer()
|
||||
}
|
||||
|
||||
// IsValid indicates if an isolation technology is valid
|
||||
func (i Isolation) IsValid() bool {
|
||||
return i.IsDefault() || i.IsHyperV() || i.IsProcess()
|
||||
}
|
||||
|
||||
// NetworkName returns the name of the network stack.
|
||||
func (n NetworkMode) NetworkName() string {
|
||||
switch {
|
||||
case n.IsDefault():
|
||||
if n.IsDefault() {
|
||||
return network.NetworkDefault
|
||||
case n.IsBridge():
|
||||
} else if n.IsBridge() {
|
||||
return network.NetworkNat
|
||||
case n.IsHost():
|
||||
// Windows currently doesn't support host network-mode, so
|
||||
// this would currently never happen..
|
||||
return network.NetworkHost
|
||||
case n.IsNone():
|
||||
} else if n.IsNone() {
|
||||
return network.NetworkNone
|
||||
case n.IsContainer():
|
||||
} else if n.IsContainer() {
|
||||
return "container"
|
||||
case n.IsUserDefined():
|
||||
} else if n.IsUserDefined() {
|
||||
return n.UserDefined()
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
package events // import "github.com/docker/docker/api/types/events"
|
||||
import "github.com/docker/docker/api/types/filters"
|
||||
|
||||
// Type is used for event-types.
|
||||
type Type string
|
||||
@@ -126,10 +125,3 @@ type Message struct {
|
||||
Time int64 `json:"time,omitempty"`
|
||||
TimeNano int64 `json:"timeNano,omitempty"`
|
||||
}
|
||||
|
||||
// ListOptions holds parameters to filter events with.
|
||||
type ListOptions struct {
|
||||
Since string
|
||||
Until string
|
||||
Filters filters.Args
|
||||
}
|
||||
|
||||
@@ -1,47 +1,9 @@
|
||||
package image
|
||||
|
||||
import (
|
||||
"io"
|
||||
"time"
|
||||
)
|
||||
import "time"
|
||||
|
||||
// Metadata contains engine-local data about the image.
|
||||
type Metadata struct {
|
||||
// LastTagTime is the date and time at which the image was last tagged.
|
||||
LastTagTime time.Time `json:",omitempty"`
|
||||
}
|
||||
|
||||
// PruneReport contains the response for Engine API:
|
||||
// POST "/images/prune"
|
||||
type PruneReport struct {
|
||||
ImagesDeleted []DeleteResponse
|
||||
SpaceReclaimed uint64
|
||||
}
|
||||
|
||||
// LoadResponse returns information to the client about a load process.
|
||||
//
|
||||
// TODO(thaJeztah): remove this type, and just use an io.ReadCloser
|
||||
//
|
||||
// This type was added in https://github.com/moby/moby/pull/18878, related
|
||||
// to https://github.com/moby/moby/issues/19177;
|
||||
//
|
||||
// Make docker load to output json when the response content type is json
|
||||
// Swarm hijacks the response from docker load and returns JSON rather
|
||||
// than plain text like the Engine does. This makes the API library to return
|
||||
// information to figure that out.
|
||||
//
|
||||
// However the "load" endpoint unconditionally returns JSON;
|
||||
// https://github.com/moby/moby/blob/7b9d2ef6e5518a3d3f3cc418459f8df786cfbbd1/api/server/router/image/image_routes.go#L248-L255
|
||||
//
|
||||
// PR https://github.com/moby/moby/pull/21959 made the response-type depend
|
||||
// on whether "quiet" was set, but this logic got changed in a follow-up
|
||||
// https://github.com/moby/moby/pull/25557, which made the JSON response-type
|
||||
// unconditionally, but the output produced depend on whether"quiet" was set.
|
||||
//
|
||||
// We should deprecated the "quiet" option, as it's really a client
|
||||
// responsibility.
|
||||
type LoadResponse struct {
|
||||
// Body must be closed to avoid a resource leak
|
||||
Body io.ReadCloser
|
||||
JSON bool
|
||||
}
|
||||
|
||||
@@ -1,18 +1,6 @@
|
||||
package image
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// ImportSource holds source information for ImageImport
|
||||
type ImportSource struct {
|
||||
Source io.Reader // Source is the data to send to the server to create this image from. You must set SourceName to "-" to leverage this.
|
||||
SourceName string // SourceName is the name of the image to pull. Set to "-" to leverage the Source attribute.
|
||||
}
|
||||
import "github.com/docker/docker/api/types/filters"
|
||||
|
||||
// ImportOptions holds information to import images from the client host.
|
||||
type ImportOptions struct {
|
||||
@@ -39,28 +27,12 @@ type PullOptions struct {
|
||||
// privilege request fails.
|
||||
//
|
||||
// Also see [github.com/docker/docker/api/types.RequestPrivilegeFunc].
|
||||
PrivilegeFunc func(context.Context) (string, error)
|
||||
PrivilegeFunc func() (string, error)
|
||||
Platform string
|
||||
}
|
||||
|
||||
// PushOptions holds information to push images.
|
||||
type PushOptions struct {
|
||||
All bool
|
||||
RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry
|
||||
|
||||
// PrivilegeFunc is a function that clients can supply to retry operations
|
||||
// after getting an authorization error. This function returns the registry
|
||||
// authentication header value in base64 encoded format, or an error if the
|
||||
// privilege request fails.
|
||||
//
|
||||
// Also see [github.com/docker/docker/api/types.RequestPrivilegeFunc].
|
||||
PrivilegeFunc func(context.Context) (string, error)
|
||||
|
||||
// Platform is an optional field that selects a specific platform to push
|
||||
// when the image is a multi-platform image.
|
||||
// Using this will only push a single platform-specific manifest.
|
||||
Platform *ocispec.Platform `json:",omitempty"`
|
||||
}
|
||||
type PushOptions PullOptions
|
||||
|
||||
// ListOptions holds parameters to list images with.
|
||||
type ListOptions struct {
|
||||
|
||||
@@ -119,11 +119,7 @@ type TmpfsOptions struct {
|
||||
SizeBytes int64 `json:",omitempty"`
|
||||
// Mode of the tmpfs upon creation
|
||||
Mode os.FileMode `json:",omitempty"`
|
||||
// Options to be passed to the tmpfs mount. An array of arrays. Flag
|
||||
// options should be provided as 1-length arrays. Other types should be
|
||||
// provided as 2-length arrays, where the first item is the key and the
|
||||
// second the value.
|
||||
Options [][]string `json:",omitempty"`
|
||||
|
||||
// TODO(stevvooe): There are several more tmpfs flags, specified in the
|
||||
// daemon, that are accepted. Only the most basic are added for now.
|
||||
//
|
||||
|
||||
@@ -1,19 +0,0 @@
|
||||
package network
|
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
// CreateResponse NetworkCreateResponse
|
||||
//
|
||||
// OK response to NetworkCreate operation
|
||||
// swagger:model CreateResponse
|
||||
type CreateResponse struct {
|
||||
|
||||
// The ID of the created network.
|
||||
// Required: true
|
||||
ID string `json:"Id"`
|
||||
|
||||
// Warnings encountered when creating the container
|
||||
// Required: true
|
||||
Warning string `json:"Warning"`
|
||||
}
|
||||
@@ -18,7 +18,6 @@ type EndpointSettings struct {
|
||||
// Once the container is running, it becomes operational data (it may contain a
|
||||
// generated address).
|
||||
MacAddress string
|
||||
DriverOpts map[string]string
|
||||
// Operational data
|
||||
NetworkID string
|
||||
EndpointID string
|
||||
@@ -28,6 +27,7 @@ type EndpointSettings struct {
|
||||
IPv6Gateway string
|
||||
GlobalIPv6Address string
|
||||
GlobalIPv6PrefixLen int
|
||||
DriverOpts map[string]string
|
||||
// DNSNames holds all the (non fully qualified) DNS names associated to this endpoint. First entry is used to
|
||||
// generate PTR records.
|
||||
DNSNames []string
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
package network // import "github.com/docker/docker/api/types/network"
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
)
|
||||
|
||||
@@ -19,82 +17,6 @@ const (
|
||||
NetworkNat = "nat"
|
||||
)
|
||||
|
||||
// CreateRequest is the request message sent to the server for network create call.
|
||||
type CreateRequest struct {
|
||||
CreateOptions
|
||||
Name string // Name is the requested name of the network.
|
||||
|
||||
// Deprecated: CheckDuplicate is deprecated since API v1.44, but it defaults to true when sent by the client
|
||||
// package to older daemons.
|
||||
CheckDuplicate *bool `json:",omitempty"`
|
||||
}
|
||||
|
||||
// CreateOptions holds options to create a network.
|
||||
type CreateOptions struct {
|
||||
Driver string // Driver is the driver-name used to create the network (e.g. `bridge`, `overlay`)
|
||||
Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level).
|
||||
EnableIPv6 *bool `json:",omitempty"` // EnableIPv6 represents whether to enable IPv6.
|
||||
IPAM *IPAM // IPAM is the network's IP Address Management.
|
||||
Internal bool // Internal represents if the network is used internal only.
|
||||
Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode.
|
||||
Ingress bool // Ingress indicates the network is providing the routing-mesh for the swarm cluster.
|
||||
ConfigOnly bool // ConfigOnly creates a config-only network. Config-only networks are place-holder networks for network configurations to be used by other networks. ConfigOnly networks cannot be used directly to run containers or services.
|
||||
ConfigFrom *ConfigReference // ConfigFrom specifies the source which will provide the configuration for this network. The specified network must be a config-only network; see [CreateOptions.ConfigOnly].
|
||||
Options map[string]string // Options specifies the network-specific options to use for when creating the network.
|
||||
Labels map[string]string // Labels holds metadata specific to the network being created.
|
||||
}
|
||||
|
||||
// ListOptions holds parameters to filter the list of networks with.
|
||||
type ListOptions struct {
|
||||
Filters filters.Args
|
||||
}
|
||||
|
||||
// InspectOptions holds parameters to inspect network.
|
||||
type InspectOptions struct {
|
||||
Scope string
|
||||
Verbose bool
|
||||
}
|
||||
|
||||
// ConnectOptions represents the data to be used to connect a container to the
|
||||
// network.
|
||||
type ConnectOptions struct {
|
||||
Container string
|
||||
EndpointConfig *EndpointSettings `json:",omitempty"`
|
||||
}
|
||||
|
||||
// DisconnectOptions represents the data to be used to disconnect a container
|
||||
// from the network.
|
||||
type DisconnectOptions struct {
|
||||
Container string
|
||||
Force bool
|
||||
}
|
||||
|
||||
// Inspect is the body of the "get network" http response message.
|
||||
type Inspect struct {
|
||||
Name string // Name is the name of the network
|
||||
ID string `json:"Id"` // ID uniquely identifies a network on a single machine
|
||||
Created time.Time // Created is the time the network created
|
||||
Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level)
|
||||
Driver string // Driver is the Driver name used to create the network (e.g. `bridge`, `overlay`)
|
||||
EnableIPv6 bool // EnableIPv6 represents whether to enable IPv6
|
||||
IPAM IPAM // IPAM is the network's IP Address Management
|
||||
Internal bool // Internal represents if the network is used internal only
|
||||
Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode.
|
||||
Ingress bool // Ingress indicates the network is providing the routing-mesh for the swarm cluster.
|
||||
ConfigFrom ConfigReference // ConfigFrom specifies the source which will provide the configuration for this network.
|
||||
ConfigOnly bool // ConfigOnly networks are place-holder networks for network configurations to be used by other networks. ConfigOnly networks cannot be used directly to run containers or services.
|
||||
Containers map[string]EndpointResource // Containers contains endpoints belonging to the network
|
||||
Options map[string]string // Options holds the network specific options to use for when creating the network
|
||||
Labels map[string]string // Labels holds metadata specific to the network being created
|
||||
Peers []PeerInfo `json:",omitempty"` // List of peer nodes for an overlay network
|
||||
Services map[string]ServiceInfo `json:",omitempty"`
|
||||
}
|
||||
|
||||
// Summary is used as response when listing networks. It currently is an alias
|
||||
// for [Inspect], but may diverge in the future, as not all information may
|
||||
// be included when listing networks.
|
||||
type Summary = Inspect
|
||||
|
||||
// Address represents an IP address
|
||||
type Address struct {
|
||||
Addr string
|
||||
@@ -123,16 +45,6 @@ type ServiceInfo struct {
|
||||
Tasks []Task
|
||||
}
|
||||
|
||||
// EndpointResource contains network resources allocated and used for a
|
||||
// container in a network.
|
||||
type EndpointResource struct {
|
||||
Name string
|
||||
EndpointID string
|
||||
MacAddress string
|
||||
IPv4Address string
|
||||
IPv6Address string
|
||||
}
|
||||
|
||||
// NetworkingConfig represents the container's networking configuration for each of its interfaces
|
||||
// Carries the networking configs specified in the `docker run` and `docker network connect` commands
|
||||
type NetworkingConfig struct {
|
||||
@@ -158,9 +70,3 @@ var acceptedFilters = map[string]bool{
|
||||
func ValidateFilters(filter filters.Args) error {
|
||||
return filter.Validate(acceptedFilters)
|
||||
}
|
||||
|
||||
// PruneReport contains the response for Engine API:
|
||||
// POST "/networks/prune"
|
||||
type PruneReport struct {
|
||||
NetworksDeleted []string
|
||||
}
|
||||
|
||||
@@ -84,6 +84,32 @@ type IndexInfo struct {
|
||||
Official bool
|
||||
}
|
||||
|
||||
// SearchResult describes a search result returned from a registry
|
||||
type SearchResult struct {
|
||||
// StarCount indicates the number of stars this repository has
|
||||
StarCount int `json:"star_count"`
|
||||
// IsOfficial is true if the result is from an official repository.
|
||||
IsOfficial bool `json:"is_official"`
|
||||
// Name is the name of the repository
|
||||
Name string `json:"name"`
|
||||
// IsAutomated indicates whether the result is automated.
|
||||
//
|
||||
// Deprecated: the "is_automated" field is deprecated and will always be "false".
|
||||
IsAutomated bool `json:"is_automated"`
|
||||
// Description is a textual description of the repository
|
||||
Description string `json:"description"`
|
||||
}
|
||||
|
||||
// SearchResults lists a collection search results returned from a registry
|
||||
type SearchResults struct {
|
||||
// Query contains the query string that generated the search results
|
||||
Query string `json:"query"`
|
||||
// NumResults indicates the number of results the query returned
|
||||
NumResults int `json:"num_results"`
|
||||
// Results is a slice containing the actual results for the search
|
||||
Results []SearchResult `json:"results"`
|
||||
}
|
||||
|
||||
// DistributionInspect describes the result obtained from contacting the
|
||||
// registry to retrieve image metadata
|
||||
type DistributionInspect struct {
|
||||
|
||||
@@ -1,47 +0,0 @@
|
||||
package registry
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
)
|
||||
|
||||
// SearchOptions holds parameters to search images with.
|
||||
type SearchOptions struct {
|
||||
RegistryAuth string
|
||||
|
||||
// PrivilegeFunc is a [types.RequestPrivilegeFunc] the client can
|
||||
// supply to retry operations after getting an authorization error.
|
||||
//
|
||||
// It must return the registry authentication header value in base64
|
||||
// format, or an error if the privilege request fails.
|
||||
PrivilegeFunc func(context.Context) (string, error)
|
||||
Filters filters.Args
|
||||
Limit int
|
||||
}
|
||||
|
||||
// SearchResult describes a search result returned from a registry
|
||||
type SearchResult struct {
|
||||
// StarCount indicates the number of stars this repository has
|
||||
StarCount int `json:"star_count"`
|
||||
// IsOfficial is true if the result is from an official repository.
|
||||
IsOfficial bool `json:"is_official"`
|
||||
// Name is the name of the repository
|
||||
Name string `json:"name"`
|
||||
// IsAutomated indicates whether the result is automated.
|
||||
//
|
||||
// Deprecated: the "is_automated" field is deprecated and will always be "false".
|
||||
IsAutomated bool `json:"is_automated"`
|
||||
// Description is a textual description of the repository
|
||||
Description string `json:"description"`
|
||||
}
|
||||
|
||||
// SearchResults lists a collection search results returned from a registry
|
||||
type SearchResults struct {
|
||||
// Query contains the query string that generated the search results
|
||||
Query string `json:"query"`
|
||||
// NumResults indicates the number of results the query returned
|
||||
NumResults int `json:"num_results"`
|
||||
// Results is a slice containing the actual results for the search
|
||||
Results []SearchResult `json:"results"`
|
||||
}
|
||||
@@ -1,4 +1,6 @@
|
||||
package container
|
||||
// Package types is used for API stability in the types and response to the
|
||||
// consumers of the API stats endpoint.
|
||||
package types // import "github.com/docker/docker/api/types"
|
||||
|
||||
import "time"
|
||||
|
||||
@@ -167,10 +169,8 @@ type Stats struct {
|
||||
MemoryStats MemoryStats `json:"memory_stats,omitempty"`
|
||||
}
|
||||
|
||||
// StatsResponse is newly used Networks.
|
||||
//
|
||||
// TODO(thaJeztah): unify with [Stats]. This wrapper was to account for pre-api v1.21 changes, see https://github.com/moby/moby/commit/d3379946ec96fb6163cb8c4517d7d5a067045801
|
||||
type StatsResponse struct {
|
||||
// StatsJSON is newly used Networks
|
||||
type StatsJSON struct {
|
||||
Stats
|
||||
|
||||
Name string `json:"name,omitempty"`
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/mount"
|
||||
"github.com/docker/go-units"
|
||||
)
|
||||
|
||||
// DNSConfig specifies DNS related configurations in resolver configuration file (resolv.conf)
|
||||
@@ -114,6 +115,5 @@ type ContainerSpec struct {
|
||||
Sysctls map[string]string `json:",omitempty"`
|
||||
CapabilityAdd []string `json:",omitempty"`
|
||||
CapabilityDrop []string `json:",omitempty"`
|
||||
Ulimits []*container.Ulimit `json:",omitempty"`
|
||||
OomScoreAdj int64 `json:",omitempty"`
|
||||
Ulimits []*units.Ulimit `json:",omitempty"`
|
||||
}
|
||||
|
||||
@@ -75,8 +75,6 @@ type Info struct {
|
||||
DefaultAddressPools []NetworkAddressPool `json:",omitempty"`
|
||||
CDISpecDirs []string
|
||||
|
||||
Containerd *ContainerdInfo `json:",omitempty"`
|
||||
|
||||
// Legacy API fields for older API versions.
|
||||
legacyFields
|
||||
|
||||
@@ -87,43 +85,6 @@ type Info struct {
|
||||
Warnings []string
|
||||
}
|
||||
|
||||
// ContainerdInfo holds information about the containerd instance used by the daemon.
|
||||
type ContainerdInfo struct {
|
||||
// Address is the path to the containerd socket.
|
||||
Address string `json:",omitempty"`
|
||||
// Namespaces is the containerd namespaces used by the daemon.
|
||||
Namespaces ContainerdNamespaces
|
||||
}
|
||||
|
||||
// ContainerdNamespaces reflects the containerd namespaces used by the daemon.
|
||||
//
|
||||
// These namespaces can be configured in the daemon configuration, and are
|
||||
// considered to be used exclusively by the daemon,
|
||||
//
|
||||
// As these namespaces are considered to be exclusively accessed
|
||||
// by the daemon, it is not recommended to change these values,
|
||||
// or to change them to a value that is used by other systems,
|
||||
// such as cri-containerd.
|
||||
type ContainerdNamespaces struct {
|
||||
// Containers holds the default containerd namespace used for
|
||||
// containers managed by the daemon.
|
||||
//
|
||||
// The default namespace for containers is "moby", but will be
|
||||
// suffixed with the `<uid>.<gid>` of the remapped `root` if
|
||||
// user-namespaces are enabled and the containerd image-store
|
||||
// is used.
|
||||
Containers string
|
||||
|
||||
// Plugins holds the default containerd namespace used for
|
||||
// plugins managed by the daemon.
|
||||
//
|
||||
// The default namespace for plugins is "moby", but will be
|
||||
// suffixed with the `<uid>.<gid>` of the remapped `root` if
|
||||
// user-namespaces are enabled and the containerd image-store
|
||||
// is used.
|
||||
Plugins string
|
||||
}
|
||||
|
||||
type legacyFields struct {
|
||||
ExecutionDriver string `json:",omitempty"` // Deprecated: deprecated since API v1.25, but returned for older versions.
|
||||
}
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package types // import "github.com/docker/docker/api/types"
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
@@ -153,13 +155,36 @@ type Container struct {
|
||||
State string
|
||||
Status string
|
||||
HostConfig struct {
|
||||
NetworkMode string `json:",omitempty"`
|
||||
Annotations map[string]string `json:",omitempty"`
|
||||
NetworkMode string `json:",omitempty"`
|
||||
}
|
||||
NetworkSettings *SummaryNetworkSettings
|
||||
Mounts []MountPoint
|
||||
}
|
||||
|
||||
// CopyConfig contains request body of Engine API:
|
||||
// POST "/containers/"+containerID+"/copy"
|
||||
type CopyConfig struct {
|
||||
Resource string
|
||||
}
|
||||
|
||||
// ContainerPathStat is used to encode the header from
|
||||
// GET "/containers/{name:.*}/archive"
|
||||
// "Name" is the file or directory name.
|
||||
type ContainerPathStat struct {
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
Mode os.FileMode `json:"mode"`
|
||||
Mtime time.Time `json:"mtime"`
|
||||
LinkTarget string `json:"linkTarget"`
|
||||
}
|
||||
|
||||
// ContainerStats contains response of Engine API:
|
||||
// GET "/stats"
|
||||
type ContainerStats struct {
|
||||
Body io.ReadCloser `json:"body"`
|
||||
OSType string `json:"ostype"`
|
||||
}
|
||||
|
||||
// Ping contains response of Engine API:
|
||||
// GET "/_ping"
|
||||
type Ping struct {
|
||||
@@ -205,6 +230,17 @@ type Version struct {
|
||||
BuildTime string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// ExecStartCheck is a temp struct used by execStart
|
||||
// Config fields is part of ExecConfig in runconfig package
|
||||
type ExecStartCheck struct {
|
||||
// ExecStart will first check if it's detached
|
||||
Detach bool
|
||||
// Check if there's a tty
|
||||
Tty bool
|
||||
// Terminal size [height, width], unused if Tty == false
|
||||
ConsoleSize *[2]uint `json:",omitempty"`
|
||||
}
|
||||
|
||||
// HealthcheckResult stores information about a single run of a healthcheck probe
|
||||
type HealthcheckResult struct {
|
||||
Start time.Time // Start is the time this check started
|
||||
@@ -245,6 +281,18 @@ type ContainerState struct {
|
||||
Health *Health `json:",omitempty"`
|
||||
}
|
||||
|
||||
// ContainerNode stores information about the node that a container
|
||||
// is running on. It's only used by the Docker Swarm standalone API
|
||||
type ContainerNode struct {
|
||||
ID string
|
||||
IPAddress string `json:"IP"`
|
||||
Addr string
|
||||
Name string
|
||||
Cpus int
|
||||
Memory int64
|
||||
Labels map[string]string
|
||||
}
|
||||
|
||||
// ContainerJSONBase contains response of Engine API:
|
||||
// GET "/containers/{name:.*}/json"
|
||||
type ContainerJSONBase struct {
|
||||
@@ -258,7 +306,7 @@ type ContainerJSONBase struct {
|
||||
HostnamePath string
|
||||
HostsPath string
|
||||
LogPath string
|
||||
Node *ContainerNode `json:",omitempty"` // Deprecated: Node was only propagated by Docker Swarm standalone API. It sill be removed in the next release.
|
||||
Node *ContainerNode `json:",omitempty"` // Node is only propagated by Docker Swarm standalone API
|
||||
Name string
|
||||
RestartCount int
|
||||
Driver string
|
||||
@@ -375,6 +423,84 @@ type MountPoint struct {
|
||||
Propagation mount.Propagation
|
||||
}
|
||||
|
||||
// NetworkResource is the body of the "get network" http response message
|
||||
type NetworkResource struct {
|
||||
Name string // Name is the requested name of the network
|
||||
ID string `json:"Id"` // ID uniquely identifies a network on a single machine
|
||||
Created time.Time // Created is the time the network created
|
||||
Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level)
|
||||
Driver string // Driver is the Driver name used to create the network (e.g. `bridge`, `overlay`)
|
||||
EnableIPv6 bool // EnableIPv6 represents whether to enable IPv6
|
||||
IPAM network.IPAM // IPAM is the network's IP Address Management
|
||||
Internal bool // Internal represents if the network is used internal only
|
||||
Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode.
|
||||
Ingress bool // Ingress indicates the network is providing the routing-mesh for the swarm cluster.
|
||||
ConfigFrom network.ConfigReference // ConfigFrom specifies the source which will provide the configuration for this network.
|
||||
ConfigOnly bool // ConfigOnly networks are place-holder networks for network configurations to be used by other networks. ConfigOnly networks cannot be used directly to run containers or services.
|
||||
Containers map[string]EndpointResource // Containers contains endpoints belonging to the network
|
||||
Options map[string]string // Options holds the network specific options to use for when creating the network
|
||||
Labels map[string]string // Labels holds metadata specific to the network being created
|
||||
Peers []network.PeerInfo `json:",omitempty"` // List of peer nodes for an overlay network
|
||||
Services map[string]network.ServiceInfo `json:",omitempty"`
|
||||
}
|
||||
|
||||
// EndpointResource contains network resources allocated and used for a container in a network
|
||||
type EndpointResource struct {
|
||||
Name string
|
||||
EndpointID string
|
||||
MacAddress string
|
||||
IPv4Address string
|
||||
IPv6Address string
|
||||
}
|
||||
|
||||
// NetworkCreate is the expected body of the "create network" http request message
|
||||
type NetworkCreate struct {
|
||||
// Deprecated: CheckDuplicate is deprecated since API v1.44, but it defaults to true when sent by the client
|
||||
// package to older daemons.
|
||||
CheckDuplicate bool `json:",omitempty"`
|
||||
Driver string
|
||||
Scope string
|
||||
EnableIPv6 bool
|
||||
IPAM *network.IPAM
|
||||
Internal bool
|
||||
Attachable bool
|
||||
Ingress bool
|
||||
ConfigOnly bool
|
||||
ConfigFrom *network.ConfigReference
|
||||
Options map[string]string
|
||||
Labels map[string]string
|
||||
}
|
||||
|
||||
// NetworkCreateRequest is the request message sent to the server for network create call.
|
||||
type NetworkCreateRequest struct {
|
||||
NetworkCreate
|
||||
Name string
|
||||
}
|
||||
|
||||
// NetworkCreateResponse is the response message sent by the server for network create call
|
||||
type NetworkCreateResponse struct {
|
||||
ID string `json:"Id"`
|
||||
Warning string
|
||||
}
|
||||
|
||||
// NetworkConnect represents the data to be used to connect a container to the network
|
||||
type NetworkConnect struct {
|
||||
Container string
|
||||
EndpointConfig *network.EndpointSettings `json:",omitempty"`
|
||||
}
|
||||
|
||||
// NetworkDisconnect represents the data to be used to disconnect a container from the network
|
||||
type NetworkDisconnect struct {
|
||||
Container string
|
||||
Force bool
|
||||
}
|
||||
|
||||
// NetworkInspectOptions holds parameters to inspect network
|
||||
type NetworkInspectOptions struct {
|
||||
Scope string
|
||||
Verbose bool
|
||||
}
|
||||
|
||||
// DiskUsageObject represents an object type used for disk usage query filtering.
|
||||
type DiskUsageObject string
|
||||
|
||||
@@ -407,6 +533,27 @@ type DiskUsage struct {
|
||||
BuilderSize int64 `json:",omitempty"` // Deprecated: deprecated in API 1.38, and no longer used since API 1.40.
|
||||
}
|
||||
|
||||
// ContainersPruneReport contains the response for Engine API:
|
||||
// POST "/containers/prune"
|
||||
type ContainersPruneReport struct {
|
||||
ContainersDeleted []string
|
||||
SpaceReclaimed uint64
|
||||
}
|
||||
|
||||
// VolumesPruneReport contains the response for Engine API:
|
||||
// POST "/volumes/prune"
|
||||
type VolumesPruneReport struct {
|
||||
VolumesDeleted []string
|
||||
SpaceReclaimed uint64
|
||||
}
|
||||
|
||||
// ImagesPruneReport contains the response for Engine API:
|
||||
// POST "/images/prune"
|
||||
type ImagesPruneReport struct {
|
||||
ImagesDeleted []image.DeleteResponse
|
||||
SpaceReclaimed uint64
|
||||
}
|
||||
|
||||
// BuildCachePruneReport contains the response for Engine API:
|
||||
// POST "/build/prune"
|
||||
type BuildCachePruneReport struct {
|
||||
@@ -414,6 +561,12 @@ type BuildCachePruneReport struct {
|
||||
SpaceReclaimed uint64
|
||||
}
|
||||
|
||||
// NetworksPruneReport contains the response for Engine API:
|
||||
// POST "/networks/prune"
|
||||
type NetworksPruneReport struct {
|
||||
NetworksDeleted []string
|
||||
}
|
||||
|
||||
// SecretCreateResponse contains the information returned to a client
|
||||
// on the creation of a new secret.
|
||||
type SecretCreateResponse struct {
|
||||
|
||||
@@ -1,210 +1,35 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/events"
|
||||
"github.com/docker/docker/api/types/image"
|
||||
"github.com/docker/docker/api/types/network"
|
||||
"github.com/docker/docker/api/types/registry"
|
||||
"github.com/docker/docker/api/types/volume"
|
||||
)
|
||||
|
||||
// ImagesPruneReport contains the response for Engine API:
|
||||
// POST "/images/prune"
|
||||
// ImageImportOptions holds information to import images from the client host.
|
||||
//
|
||||
// Deprecated: use [image.PruneReport].
|
||||
type ImagesPruneReport = image.PruneReport
|
||||
// Deprecated: use [image.ImportOptions].
|
||||
type ImageImportOptions = image.ImportOptions
|
||||
|
||||
// VolumesPruneReport contains the response for Engine API:
|
||||
// POST "/volumes/prune".
|
||||
// ImageCreateOptions holds information to create images.
|
||||
//
|
||||
// Deprecated: use [volume.PruneReport].
|
||||
type VolumesPruneReport = volume.PruneReport
|
||||
// Deprecated: use [image.CreateOptions].
|
||||
type ImageCreateOptions = image.CreateOptions
|
||||
|
||||
// NetworkCreateRequest is the request message sent to the server for network create call.
|
||||
// ImagePullOptions holds information to pull images.
|
||||
//
|
||||
// Deprecated: use [network.CreateRequest].
|
||||
type NetworkCreateRequest = network.CreateRequest
|
||||
// Deprecated: use [image.PullOptions].
|
||||
type ImagePullOptions = image.PullOptions
|
||||
|
||||
// NetworkCreate is the expected body of the "create network" http request message
|
||||
// ImagePushOptions holds information to push images.
|
||||
//
|
||||
// Deprecated: use [network.CreateOptions].
|
||||
type NetworkCreate = network.CreateOptions
|
||||
// Deprecated: use [image.PushOptions].
|
||||
type ImagePushOptions = image.PushOptions
|
||||
|
||||
// NetworkListOptions holds parameters to filter the list of networks with.
|
||||
// ImageListOptions holds parameters to list images with.
|
||||
//
|
||||
// Deprecated: use [network.ListOptions].
|
||||
type NetworkListOptions = network.ListOptions
|
||||
// Deprecated: use [image.ListOptions].
|
||||
type ImageListOptions = image.ListOptions
|
||||
|
||||
// NetworkCreateResponse is the response message sent by the server for network create call.
|
||||
// ImageRemoveOptions holds parameters to remove images.
|
||||
//
|
||||
// Deprecated: use [network.CreateResponse].
|
||||
type NetworkCreateResponse = network.CreateResponse
|
||||
|
||||
// NetworkInspectOptions holds parameters to inspect network.
|
||||
//
|
||||
// Deprecated: use [network.InspectOptions].
|
||||
type NetworkInspectOptions = network.InspectOptions
|
||||
|
||||
// NetworkConnect represents the data to be used to connect a container to the network
|
||||
//
|
||||
// Deprecated: use [network.ConnectOptions].
|
||||
type NetworkConnect = network.ConnectOptions
|
||||
|
||||
// NetworkDisconnect represents the data to be used to disconnect a container from the network
|
||||
//
|
||||
// Deprecated: use [network.DisconnectOptions].
|
||||
type NetworkDisconnect = network.DisconnectOptions
|
||||
|
||||
// EndpointResource contains network resources allocated and used for a container in a network.
|
||||
//
|
||||
// Deprecated: use [network.EndpointResource].
|
||||
type EndpointResource = network.EndpointResource
|
||||
|
||||
// NetworkResource is the body of the "get network" http response message/
|
||||
//
|
||||
// Deprecated: use [network.Inspect] or [network.Summary] (for list operations).
|
||||
type NetworkResource = network.Inspect
|
||||
|
||||
// NetworksPruneReport contains the response for Engine API:
|
||||
// POST "/networks/prune"
|
||||
//
|
||||
// Deprecated: use [network.PruneReport].
|
||||
type NetworksPruneReport = network.PruneReport
|
||||
|
||||
// ExecConfig is a small subset of the Config struct that holds the configuration
|
||||
// for the exec feature of docker.
|
||||
//
|
||||
// Deprecated: use [container.ExecOptions].
|
||||
type ExecConfig = container.ExecOptions
|
||||
|
||||
// ExecStartCheck is a temp struct used by execStart
|
||||
// Config fields is part of ExecConfig in runconfig package
|
||||
//
|
||||
// Deprecated: use [container.ExecStartOptions] or [container.ExecAttachOptions].
|
||||
type ExecStartCheck = container.ExecStartOptions
|
||||
|
||||
// ContainerExecInspect holds information returned by exec inspect.
|
||||
//
|
||||
// Deprecated: use [container.ExecInspect].
|
||||
type ContainerExecInspect = container.ExecInspect
|
||||
|
||||
// ContainersPruneReport contains the response for Engine API:
|
||||
// POST "/containers/prune"
|
||||
//
|
||||
// Deprecated: use [container.PruneReport].
|
||||
type ContainersPruneReport = container.PruneReport
|
||||
|
||||
// ContainerPathStat is used to encode the header from
|
||||
// GET "/containers/{name:.*}/archive"
|
||||
// "Name" is the file or directory name.
|
||||
//
|
||||
// Deprecated: use [container.PathStat].
|
||||
type ContainerPathStat = container.PathStat
|
||||
|
||||
// CopyToContainerOptions holds information
|
||||
// about files to copy into a container.
|
||||
//
|
||||
// Deprecated: use [container.CopyToContainerOptions],
|
||||
type CopyToContainerOptions = container.CopyToContainerOptions
|
||||
|
||||
// ContainerStats contains response of Engine API:
|
||||
// GET "/stats"
|
||||
//
|
||||
// Deprecated: use [container.StatsResponseReader].
|
||||
type ContainerStats = container.StatsResponseReader
|
||||
|
||||
// ThrottlingData stores CPU throttling stats of one running container.
|
||||
// Not used on Windows.
|
||||
//
|
||||
// Deprecated: use [container.ThrottlingData].
|
||||
type ThrottlingData = container.ThrottlingData
|
||||
|
||||
// CPUUsage stores All CPU stats aggregated since container inception.
|
||||
//
|
||||
// Deprecated: use [container.CPUUsage].
|
||||
type CPUUsage = container.CPUUsage
|
||||
|
||||
// CPUStats aggregates and wraps all CPU related info of container
|
||||
//
|
||||
// Deprecated: use [container.CPUStats].
|
||||
type CPUStats = container.CPUStats
|
||||
|
||||
// MemoryStats aggregates all memory stats since container inception on Linux.
|
||||
// Windows returns stats for commit and private working set only.
|
||||
//
|
||||
// Deprecated: use [container.MemoryStats].
|
||||
type MemoryStats = container.MemoryStats
|
||||
|
||||
// BlkioStatEntry is one small entity to store a piece of Blkio stats
|
||||
// Not used on Windows.
|
||||
//
|
||||
// Deprecated: use [container.BlkioStatEntry].
|
||||
type BlkioStatEntry = container.BlkioStatEntry
|
||||
|
||||
// BlkioStats stores All IO service stats for data read and write.
|
||||
// This is a Linux specific structure as the differences between expressing
|
||||
// block I/O on Windows and Linux are sufficiently significant to make
|
||||
// little sense attempting to morph into a combined structure.
|
||||
//
|
||||
// Deprecated: use [container.BlkioStats].
|
||||
type BlkioStats = container.BlkioStats
|
||||
|
||||
// StorageStats is the disk I/O stats for read/write on Windows.
|
||||
//
|
||||
// Deprecated: use [container.StorageStats].
|
||||
type StorageStats = container.StorageStats
|
||||
|
||||
// NetworkStats aggregates the network stats of one container
|
||||
//
|
||||
// Deprecated: use [container.NetworkStats].
|
||||
type NetworkStats = container.NetworkStats
|
||||
|
||||
// PidsStats contains the stats of a container's pids
|
||||
//
|
||||
// Deprecated: use [container.PidsStats].
|
||||
type PidsStats = container.PidsStats
|
||||
|
||||
// Stats is Ultimate struct aggregating all types of stats of one container
|
||||
//
|
||||
// Deprecated: use [container.Stats].
|
||||
type Stats = container.Stats
|
||||
|
||||
// StatsJSON is newly used Networks
|
||||
//
|
||||
// Deprecated: use [container.StatsResponse].
|
||||
type StatsJSON = container.StatsResponse
|
||||
|
||||
// EventsOptions holds parameters to filter events with.
|
||||
//
|
||||
// Deprecated: use [events.ListOptions].
|
||||
type EventsOptions = events.ListOptions
|
||||
|
||||
// ImageSearchOptions holds parameters to search images with.
|
||||
//
|
||||
// Deprecated: use [registry.SearchOptions].
|
||||
type ImageSearchOptions = registry.SearchOptions
|
||||
|
||||
// ImageImportSource holds source information for ImageImport
|
||||
//
|
||||
// Deprecated: use [image.ImportSource].
|
||||
type ImageImportSource image.ImportSource
|
||||
|
||||
// ImageLoadResponse returns information to the client about a load process.
|
||||
//
|
||||
// Deprecated: use [image.LoadResponse].
|
||||
type ImageLoadResponse = image.LoadResponse
|
||||
|
||||
// ContainerNode stores information about the node that a container
|
||||
// is running on. It's only used by the Docker Swarm standalone API.
|
||||
//
|
||||
// Deprecated: ContainerNode was used for the classic Docker Swarm standalone API. It will be removed in the next release.
|
||||
type ContainerNode struct {
|
||||
ID string
|
||||
IPAddress string `json:"IP"`
|
||||
Addr string
|
||||
Name string
|
||||
Cpus int
|
||||
Memory int64
|
||||
Labels map[string]string
|
||||
}
|
||||
// Deprecated: use [image.RemoveOptions].
|
||||
type ImageRemoveOptions = image.RemoveOptions
|
||||
|
||||
@@ -6,10 +6,3 @@ import "github.com/docker/docker/api/types/filters"
|
||||
type ListOptions struct {
|
||||
Filters filters.Args
|
||||
}
|
||||
|
||||
// PruneReport contains the response for Engine API:
|
||||
// POST "/volumes/prune"
|
||||
type PruneReport struct {
|
||||
VolumesDeleted []string
|
||||
SpaceReclaimed uint64
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
|
||||
//go:build go1.21
|
||||
//go:build go1.19
|
||||
|
||||
package containerimage
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
cerrdefs "github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/gc"
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/leases"
|
||||
@@ -24,7 +25,6 @@ import (
|
||||
"github.com/containerd/containerd/remotes"
|
||||
"github.com/containerd/containerd/remotes/docker"
|
||||
"github.com/containerd/containerd/remotes/docker/schema1" //nolint:staticcheck // Ignore SA1019: "github.com/containerd/containerd/remotes/docker/schema1" is deprecated: use images formatted in Docker Image Manifest v2, Schema 2, or OCI Image Spec v1.
|
||||
cerrdefs "github.com/containerd/errdefs"
|
||||
"github.com/containerd/log"
|
||||
distreference "github.com/distribution/reference"
|
||||
dimages "github.com/docker/docker/daemon/images"
|
||||
|
||||
@@ -7,10 +7,10 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
cerrdefs "github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/leases"
|
||||
"github.com/containerd/containerd/mount"
|
||||
"github.com/containerd/containerd/snapshots"
|
||||
cerrdefs "github.com/containerd/errdefs"
|
||||
"github.com/docker/docker/daemon/graphdriver"
|
||||
"github.com/docker/docker/layer"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
"github.com/containerd/containerd/remotes/docker"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/backend"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
timetypes "github.com/docker/docker/api/types/time"
|
||||
"github.com/docker/docker/builder"
|
||||
"github.com/docker/docker/builder/builder-next/exporter"
|
||||
@@ -22,11 +21,11 @@ import (
|
||||
"github.com/docker/docker/builder/builder-next/exporter/overrides"
|
||||
"github.com/docker/docker/daemon/config"
|
||||
"github.com/docker/docker/daemon/images"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/docker/libnetwork"
|
||||
"github.com/docker/docker/opts"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/streamformatter"
|
||||
"github.com/docker/go-units"
|
||||
controlapi "github.com/moby/buildkit/api/services/control"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/control"
|
||||
@@ -77,24 +76,23 @@ var cacheFields = map[string]bool{
|
||||
|
||||
// Opt is option struct required for creating the builder
|
||||
type Opt struct {
|
||||
SessionManager *session.Manager
|
||||
Root string
|
||||
EngineID string
|
||||
Dist images.DistributionServices
|
||||
ImageTagger mobyexporter.ImageTagger
|
||||
NetworkController *libnetwork.Controller
|
||||
DefaultCgroupParent string
|
||||
RegistryHosts docker.RegistryHosts
|
||||
BuilderConfig config.BuilderConfig
|
||||
Rootless bool
|
||||
IdentityMapping idtools.IdentityMapping
|
||||
DNSConfig config.DNSConfig
|
||||
ApparmorProfile string
|
||||
UseSnapshotter bool
|
||||
Snapshotter string
|
||||
ContainerdAddress string
|
||||
ContainerdNamespace string
|
||||
ImageExportedCallback exporter.ImageExportedByBuildkit
|
||||
SessionManager *session.Manager
|
||||
Root string
|
||||
EngineID string
|
||||
Dist images.DistributionServices
|
||||
ImageTagger mobyexporter.ImageTagger
|
||||
NetworkController *libnetwork.Controller
|
||||
DefaultCgroupParent string
|
||||
RegistryHosts docker.RegistryHosts
|
||||
BuilderConfig config.BuilderConfig
|
||||
Rootless bool
|
||||
IdentityMapping idtools.IdentityMapping
|
||||
DNSConfig config.DNSConfig
|
||||
ApparmorProfile string
|
||||
UseSnapshotter bool
|
||||
Snapshotter string
|
||||
ContainerdAddress string
|
||||
ContainerdNamespace string
|
||||
}
|
||||
|
||||
// Builder can build using BuildKit backend
|
||||
@@ -328,7 +326,7 @@ func (b *Builder) Build(ctx context.Context, opt backend.BuildConfig) (*builder.
|
||||
// TODO: remove once opt.Options.Platform is of type specs.Platform
|
||||
_, err := platforms.Parse(opt.Options.Platform)
|
||||
if err != nil {
|
||||
return nil, errdefs.InvalidParameter(err)
|
||||
return nil, err
|
||||
}
|
||||
frontendAttrs["platform"] = opt.Options.Platform
|
||||
}
|
||||
@@ -393,7 +391,7 @@ func (b *Builder) Build(ctx context.Context, opt backend.BuildConfig) (*builder.
|
||||
req := &controlapi.SolveRequest{
|
||||
Ref: id,
|
||||
Exporters: []*controlapi.Exporter{
|
||||
{Type: exporterName, Attrs: exporterAttrs},
|
||||
&controlapi.Exporter{Type: exporterName, Attrs: exporterAttrs},
|
||||
},
|
||||
Frontend: "dockerfile.v0",
|
||||
FrontendAttrs: frontendAttrs,
|
||||
@@ -613,7 +611,7 @@ func toBuildkitExtraHosts(inp []string, hostGatewayIP net.IP) (string, error) {
|
||||
}
|
||||
|
||||
// toBuildkitUlimits converts ulimits from docker type=soft:hard format to buildkit's csv format
|
||||
func toBuildkitUlimits(inp []*container.Ulimit) (string, error) {
|
||||
func toBuildkitUlimits(inp []*units.Ulimit) (string, error) {
|
||||
if len(inp) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
@@ -46,7 +46,6 @@ import (
|
||||
"github.com/moby/buildkit/util/archutil"
|
||||
"github.com/moby/buildkit/util/entitlements"
|
||||
"github.com/moby/buildkit/util/network/netproviders"
|
||||
"github.com/moby/buildkit/util/tracing"
|
||||
"github.com/moby/buildkit/util/tracing/detect"
|
||||
"github.com/moby/buildkit/worker"
|
||||
"github.com/moby/buildkit/worker/containerd"
|
||||
@@ -68,17 +67,11 @@ func newController(ctx context.Context, rt http.RoundTripper, opt Opt) (*control
|
||||
}
|
||||
|
||||
func getTraceExporter(ctx context.Context) trace.SpanExporter {
|
||||
tc := make(tracing.MultiSpanExporter, 0, 2)
|
||||
if detect.Recorder != nil {
|
||||
tc = append(tc, detect.Recorder)
|
||||
}
|
||||
|
||||
if exp, err := detect.NewSpanExporter(ctx); err != nil {
|
||||
span, _, err := detect.Exporter()
|
||||
if err != nil {
|
||||
log.G(ctx).WithError(err).Error("Failed to detect trace exporter for buildkit controller")
|
||||
} else if !detect.IsNoneSpanExporter(exp) {
|
||||
tc = append(tc, exp)
|
||||
}
|
||||
return tc
|
||||
return span
|
||||
}
|
||||
|
||||
func newSnapshotterController(ctx context.Context, rt http.RoundTripper, opt Opt) (*control.Controller, error) {
|
||||
@@ -138,7 +131,7 @@ func newSnapshotterController(ctx context.Context, rt http.RoundTripper, opt Opt
|
||||
}
|
||||
wo.Executor = exec
|
||||
|
||||
w, err := mobyworker.NewContainerdWorker(ctx, wo, opt.ImageExportedCallback)
|
||||
w, err := mobyworker.NewContainerdWorker(ctx, wo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -149,15 +142,9 @@ func newSnapshotterController(ctx context.Context, rt http.RoundTripper, opt Opt
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
gwf, err := gateway.NewGatewayFrontend(wc.Infos(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
frontends := map[string]frontend.Frontend{
|
||||
"dockerfile.v0": forwarder.NewGatewayForwarder(wc.Infos(), dockerfile.Build),
|
||||
"gateway.v0": gwf,
|
||||
"gateway.v0": gateway.NewGatewayFrontend(wc.Infos()),
|
||||
}
|
||||
|
||||
return control.NewController(control.Opt{
|
||||
@@ -316,12 +303,11 @@ func newGraphDriverController(ctx context.Context, rt http.RoundTripper, opt Opt
|
||||
}
|
||||
|
||||
exp, err := mobyexporter.New(mobyexporter.Opt{
|
||||
ImageStore: dist.ImageStore,
|
||||
ContentStore: store,
|
||||
Differ: differ,
|
||||
ImageTagger: opt.ImageTagger,
|
||||
LeaseManager: lm,
|
||||
ImageExportedCallback: opt.ImageExportedCallback,
|
||||
ImageStore: dist.ImageStore,
|
||||
ContentStore: store,
|
||||
Differ: differ,
|
||||
ImageTagger: opt.ImageTagger,
|
||||
LeaseManager: lm,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -380,14 +366,9 @@ func newGraphDriverController(ctx context.Context, rt http.RoundTripper, opt Opt
|
||||
}
|
||||
wc.Add(w)
|
||||
|
||||
gwf, err := gateway.NewGatewayFrontend(wc.Infos(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
frontends := map[string]frontend.Frontend{
|
||||
"dockerfile.v0": forwarder.NewGatewayForwarder(wc.Infos(), dockerfile.Build),
|
||||
"gateway.v0": gwf,
|
||||
"gateway.v0": gateway.NewGatewayFrontend(wc.Infos()),
|
||||
}
|
||||
|
||||
return control.NewController(control.Opt{
|
||||
|
||||
@@ -113,20 +113,20 @@ func (iface *lnInterface) init(c *libnetwork.Controller, n *libnetwork.Network)
|
||||
defer close(iface.ready)
|
||||
id := identity.NewID()
|
||||
|
||||
ep, err := n.CreateEndpoint(context.TODO(), id, libnetwork.CreateOptionDisableResolution())
|
||||
ep, err := n.CreateEndpoint(id, libnetwork.CreateOptionDisableResolution())
|
||||
if err != nil {
|
||||
iface.err = err
|
||||
return
|
||||
}
|
||||
|
||||
sbx, err := c.NewSandbox(context.TODO(), id, libnetwork.OptionUseExternalKey(), libnetwork.OptionHostsPath(filepath.Join(iface.provider.Root, id, "hosts")),
|
||||
sbx, err := c.NewSandbox(id, libnetwork.OptionUseExternalKey(), libnetwork.OptionHostsPath(filepath.Join(iface.provider.Root, id, "hosts")),
|
||||
libnetwork.OptionResolvConfPath(filepath.Join(iface.provider.Root, id, "resolv.conf")))
|
||||
if err != nil {
|
||||
iface.err = err
|
||||
return
|
||||
}
|
||||
|
||||
if err := ep.Join(context.TODO(), sbx); err != nil {
|
||||
if err := ep.Join(sbx); err != nil {
|
||||
iface.err = err
|
||||
return
|
||||
}
|
||||
@@ -161,7 +161,7 @@ func (iface *lnInterface) Close() error {
|
||||
<-iface.ready
|
||||
if iface.sbx != nil {
|
||||
go func() {
|
||||
if err := iface.sbx.Delete(context.TODO()); err != nil {
|
||||
if err := iface.sbx.Delete(); err != nil {
|
||||
log.G(context.TODO()).WithError(err).Errorf("failed to delete builder network sandbox")
|
||||
}
|
||||
if err := os.RemoveAll(filepath.Join(iface.provider.Root, iface.sbx.ContainerID())); err != nil {
|
||||
|
||||
@@ -22,11 +22,11 @@ func newExecutor(_, _ string, _ *libnetwork.Controller, _ *oci.DNSConfig, _ bool
|
||||
type stubExecutor struct{}
|
||||
|
||||
func (w *stubExecutor) Run(ctx context.Context, id string, root executor.Mount, mounts []executor.Mount, process executor.ProcessInfo, started chan<- struct{}) (resourcetypes.Recorder, error) {
|
||||
return nil, errors.New("buildkit executor not implemented for " + runtime.GOOS)
|
||||
return nil, errors.New("buildkit executor not implemented for "+runtime.GOOS)
|
||||
}
|
||||
|
||||
func (w *stubExecutor) Exec(ctx context.Context, id string, process executor.ProcessInfo) error {
|
||||
return errors.New("buildkit executor not implemented for " + runtime.GOOS)
|
||||
return errors.New("buildkit executor not implemented for "+runtime.GOOS)
|
||||
}
|
||||
|
||||
func getDNSConfig(config.DNSConfig) *oci.DNSConfig {
|
||||
|
||||
@@ -10,8 +10,8 @@ import (
|
||||
"github.com/containerd/containerd/leases"
|
||||
"github.com/containerd/log"
|
||||
distref "github.com/distribution/reference"
|
||||
builderexporter "github.com/docker/docker/builder/builder-next/exporter"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/internal/compatcontext"
|
||||
"github.com/docker/docker/layer"
|
||||
"github.com/moby/buildkit/exporter"
|
||||
"github.com/moby/buildkit/exporter/containerimage"
|
||||
@@ -33,12 +33,11 @@ type ImageTagger interface {
|
||||
|
||||
// Opt defines a struct for creating new exporter
|
||||
type Opt struct {
|
||||
ImageStore image.Store
|
||||
Differ Differ
|
||||
ImageTagger ImageTagger
|
||||
ContentStore content.Store
|
||||
LeaseManager leases.Manager
|
||||
ImageExportedCallback builderexporter.ImageExportedByBuildkit
|
||||
ImageStore image.Store
|
||||
Differ Differ
|
||||
ImageTagger ImageTagger
|
||||
ContentStore content.Store
|
||||
LeaseManager leases.Manager
|
||||
}
|
||||
|
||||
type imageExporter struct {
|
||||
@@ -51,13 +50,12 @@ func New(opt Opt) (exporter.Exporter, error) {
|
||||
return im, nil
|
||||
}
|
||||
|
||||
func (e *imageExporter) Resolve(ctx context.Context, id int, attrs map[string]string) (exporter.ExporterInstance, error) {
|
||||
func (e *imageExporter) Resolve(ctx context.Context, id int, opt map[string]string) (exporter.ExporterInstance, error) {
|
||||
i := &imageExporterInstance{
|
||||
imageExporter: e,
|
||||
id: id,
|
||||
attrs: attrs,
|
||||
}
|
||||
for k, v := range attrs {
|
||||
for k, v := range opt {
|
||||
switch exptypes.ImageExporterOptKey(k) {
|
||||
case exptypes.OptKeyName:
|
||||
for _, v := range strings.Split(v, ",") {
|
||||
@@ -82,17 +80,12 @@ type imageExporterInstance struct {
|
||||
id int
|
||||
targetNames []distref.Named
|
||||
meta map[string][]byte
|
||||
attrs map[string]string
|
||||
}
|
||||
|
||||
func (e *imageExporterInstance) ID() int {
|
||||
return e.id
|
||||
}
|
||||
|
||||
func (e *imageExporterInstance) Type() string {
|
||||
return "image"
|
||||
}
|
||||
|
||||
func (e *imageExporterInstance) Name() string {
|
||||
return "exporting to image"
|
||||
}
|
||||
@@ -101,10 +94,6 @@ func (e *imageExporterInstance) Config() *exporter.Config {
|
||||
return exporter.NewConfig()
|
||||
}
|
||||
|
||||
func (e *imageExporterInstance) Attrs() map[string]string {
|
||||
return e.attrs
|
||||
}
|
||||
|
||||
func (e *imageExporterInstance) Export(ctx context.Context, inp *exporter.Source, inlineCache exptypes.InlineCache, sessionID string) (map[string]string, exporter.DescriptorReference, error) {
|
||||
if len(inp.Refs) > 1 {
|
||||
return nil, nil, fmt.Errorf("exporting multiple references to image store is currently unsupported")
|
||||
@@ -228,10 +217,6 @@ func (e *imageExporterInstance) Export(ctx context.Context, inp *exporter.Source
|
||||
return nil, nil, fmt.Errorf("failed to create a temporary descriptor reference: %w", err)
|
||||
}
|
||||
|
||||
if e.opt.ImageExportedCallback != nil {
|
||||
e.opt.ImageExportedCallback(ctx, id.String(), descRef.Descriptor())
|
||||
}
|
||||
|
||||
return resp, descRef, nil
|
||||
}
|
||||
|
||||
@@ -245,7 +230,7 @@ func (e *imageExporterInstance) newTempReference(ctx context.Context, config []b
|
||||
}
|
||||
|
||||
unlease := func(ctx context.Context) error {
|
||||
err := done(context.WithoutCancel(ctx))
|
||||
err := done(compatcontext.WithoutCancel(ctx))
|
||||
if err != nil {
|
||||
log.G(ctx).WithError(err).Error("failed to delete descriptor reference lease")
|
||||
}
|
||||
|
||||
@@ -45,10 +45,6 @@ func patchImageConfig(dt []byte, dps []digest.Digest, history []ocispec.History,
|
||||
return nil, errors.Wrap(err, "failed to parse image config for patch")
|
||||
}
|
||||
|
||||
if m == nil {
|
||||
return nil, errors.New("null image config")
|
||||
}
|
||||
|
||||
var rootFS ocispec.RootFS
|
||||
rootFS.Type = "layers"
|
||||
rootFS.DiffIDs = append(rootFS.DiffIDs, dps...)
|
||||
|
||||
@@ -1,42 +0,0 @@
|
||||
package mobyexporter
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"gotest.tools/v3/assert"
|
||||
)
|
||||
|
||||
func TestPatchImageConfig(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
cfgJSON string
|
||||
err string
|
||||
}{
|
||||
{
|
||||
name: "empty",
|
||||
cfgJSON: "{}",
|
||||
},
|
||||
{
|
||||
name: "history only",
|
||||
cfgJSON: `{"history": []}`,
|
||||
},
|
||||
{
|
||||
name: "rootfs only",
|
||||
cfgJSON: `{"rootfs": {}}`,
|
||||
},
|
||||
{
|
||||
name: "null",
|
||||
cfgJSON: "null",
|
||||
err: "null image config",
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
_, err := patchImageConfig([]byte(tc.cfgJSON), nil, nil, nil)
|
||||
if tc.err == "" {
|
||||
assert.NilError(t, err)
|
||||
} else {
|
||||
assert.ErrorContains(t, err, tc.err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
37
builder/builder-next/exporter/overrides/wrapper.go
Normal file
37
builder/builder-next/exporter/overrides/wrapper.go
Normal file
@@ -0,0 +1,37 @@
|
||||
package overrides
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
"github.com/moby/buildkit/exporter"
|
||||
"github.com/moby/buildkit/exporter/containerimage/exptypes"
|
||||
)
|
||||
|
||||
// Wraps the containerimage exporter's Resolve method to apply moby-specific
|
||||
// overrides to the exporter attributes.
|
||||
type imageExporterMobyWrapper struct {
|
||||
exp exporter.Exporter
|
||||
}
|
||||
|
||||
func NewExporterWrapper(exp exporter.Exporter) (exporter.Exporter, error) {
|
||||
return &imageExporterMobyWrapper{exp: exp}, nil
|
||||
}
|
||||
|
||||
// Resolve applies moby specific attributes to the request.
|
||||
func (e *imageExporterMobyWrapper) Resolve(ctx context.Context, id int, exporterAttrs map[string]string) (exporter.ExporterInstance, error) {
|
||||
if exporterAttrs == nil {
|
||||
exporterAttrs = make(map[string]string)
|
||||
}
|
||||
reposAndTags, err := SanitizeRepoAndTags(strings.Split(exporterAttrs[string(exptypes.OptKeyName)], ","))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
exporterAttrs[string(exptypes.OptKeyName)] = strings.Join(reposAndTags, ",")
|
||||
exporterAttrs[string(exptypes.OptKeyUnpack)] = "true"
|
||||
if _, has := exporterAttrs[string(exptypes.OptKeyDanglingPrefix)]; !has {
|
||||
exporterAttrs[string(exptypes.OptKeyDanglingPrefix)] = "moby-dangling"
|
||||
}
|
||||
|
||||
return e.exp.Resolve(ctx, id, exporterAttrs)
|
||||
}
|
||||
@@ -1,69 +0,0 @@
|
||||
package exporter
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/builder/builder-next/exporter/overrides"
|
||||
"github.com/moby/buildkit/exporter"
|
||||
"github.com/moby/buildkit/exporter/containerimage/exptypes"
|
||||
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
type ImageExportedByBuildkit = func(ctx context.Context, id string, desc ocispec.Descriptor) error
|
||||
|
||||
// Wraps the containerimage exporter's Resolve method to apply moby-specific
|
||||
// overrides to the exporter attributes.
|
||||
type imageExporterMobyWrapper struct {
|
||||
exp exporter.Exporter
|
||||
callback ImageExportedByBuildkit
|
||||
}
|
||||
|
||||
// NewWrapper returns an exporter wrapper that applies moby specific attributes
|
||||
// and hooks the export process.
|
||||
func NewWrapper(exp exporter.Exporter, callback ImageExportedByBuildkit) (exporter.Exporter, error) {
|
||||
return &imageExporterMobyWrapper{exp: exp, callback: callback}, nil
|
||||
}
|
||||
|
||||
// Resolve applies moby specific attributes to the request.
|
||||
func (e *imageExporterMobyWrapper) Resolve(ctx context.Context, id int, exporterAttrs map[string]string) (exporter.ExporterInstance, error) {
|
||||
if exporterAttrs == nil {
|
||||
exporterAttrs = make(map[string]string)
|
||||
}
|
||||
reposAndTags, err := overrides.SanitizeRepoAndTags(strings.Split(exporterAttrs[string(exptypes.OptKeyName)], ","))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
exporterAttrs[string(exptypes.OptKeyName)] = strings.Join(reposAndTags, ",")
|
||||
exporterAttrs[string(exptypes.OptKeyUnpack)] = "true"
|
||||
if _, has := exporterAttrs[string(exptypes.OptKeyDanglingPrefix)]; !has {
|
||||
exporterAttrs[string(exptypes.OptKeyDanglingPrefix)] = "moby-dangling"
|
||||
}
|
||||
|
||||
inst, err := e.exp.Resolve(ctx, id, exporterAttrs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &imageExporterInstanceWrapper{ExporterInstance: inst, callback: e.callback}, nil
|
||||
}
|
||||
|
||||
type imageExporterInstanceWrapper struct {
|
||||
exporter.ExporterInstance
|
||||
callback ImageExportedByBuildkit
|
||||
}
|
||||
|
||||
func (i *imageExporterInstanceWrapper) Export(ctx context.Context, src *exporter.Source, inlineCache exptypes.InlineCache, sessionID string) (map[string]string, exporter.DescriptorReference, error) {
|
||||
out, ref, err := i.ExporterInstance.Export(ctx, src, inlineCache, sessionID)
|
||||
if err != nil {
|
||||
return out, ref, err
|
||||
}
|
||||
|
||||
desc := ref.Descriptor()
|
||||
imageID := out[exptypes.ExporterImageDigestKey]
|
||||
if i.callback != nil {
|
||||
i.callback(ctx, imageID, desc)
|
||||
}
|
||||
return out, ref, nil
|
||||
}
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
|
||||
mobyexporter "github.com/docker/docker/builder/builder-next/exporter"
|
||||
"github.com/docker/docker/builder/builder-next/exporter/overrides"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/exporter"
|
||||
"github.com/moby/buildkit/session"
|
||||
@@ -13,16 +14,15 @@ import (
|
||||
// ContainerdWorker is a local worker instance with dedicated snapshotter, cache, and so on.
|
||||
type ContainerdWorker struct {
|
||||
*base.Worker
|
||||
callback mobyexporter.ImageExportedByBuildkit
|
||||
}
|
||||
|
||||
// NewContainerdWorker instantiates a local worker.
|
||||
func NewContainerdWorker(ctx context.Context, wo base.WorkerOpt, callback mobyexporter.ImageExportedByBuildkit) (*ContainerdWorker, error) {
|
||||
func NewContainerdWorker(ctx context.Context, wo base.WorkerOpt) (*ContainerdWorker, error) {
|
||||
bw, err := base.NewWorker(ctx, wo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &ContainerdWorker{Worker: bw, callback: callback}, nil
|
||||
return &ContainerdWorker{Worker: bw}, nil
|
||||
}
|
||||
|
||||
// Exporter returns exporter by name
|
||||
@@ -33,7 +33,7 @@ func (w *ContainerdWorker) Exporter(name string, sm *session.Manager) (exporter.
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return mobyexporter.NewWrapper(exp, w.callback)
|
||||
return overrides.NewExporterWrapper(exp)
|
||||
default:
|
||||
return w.Worker.Exporter(name, sm)
|
||||
}
|
||||
|
||||
@@ -4,6 +4,8 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
|
||||
"github.com/docker/docker/runconfig/opts"
|
||||
)
|
||||
|
||||
// builtinAllowedBuildArgs is list of built-in allowed build args
|
||||
@@ -136,7 +138,7 @@ func (b *BuildArgs) getAllFromMapping(source map[string]*string) map[string]stri
|
||||
// FilterAllowed returns all allowed args without the filtered args
|
||||
func (b *BuildArgs) FilterAllowed(filter []string) []string {
|
||||
envs := []string{}
|
||||
configEnv := convertKVStringsToMap(filter)
|
||||
configEnv := opts.ConvertKVStringsToMap(filter)
|
||||
|
||||
for key, val := range b.GetAllAllowed() {
|
||||
if _, ok := configEnv[key]; !ok {
|
||||
|
||||
@@ -159,7 +159,7 @@ func newBuilder(ctx context.Context, options builderOptions) (*Builder, error) {
|
||||
if config.Platform != "" {
|
||||
sp, err := platforms.Parse(config.Platform)
|
||||
if err != nil {
|
||||
return nil, errdefs.InvalidParameter(err)
|
||||
return nil, err
|
||||
}
|
||||
b.platform = &sp
|
||||
}
|
||||
@@ -187,7 +187,7 @@ func buildLabelOptions(labels map[string]string, stages []instructions.Stage) {
|
||||
func (b *Builder) build(ctx context.Context, source builder.Source, dockerfile *parser.Result) (*builder.Result, error) {
|
||||
defer b.imageSources.Unmount()
|
||||
|
||||
stages, metaArgs, err := instructions.Parse(dockerfile.AST, nil)
|
||||
stages, metaArgs, err := instructions.Parse(dockerfile.AST)
|
||||
if err != nil {
|
||||
var uiErr *instructions.UnknownInstructionError
|
||||
if errors.As(err, &uiErr) {
|
||||
@@ -230,8 +230,7 @@ func processMetaArg(meta instructions.ArgCommand, shlex *shell.Lex, args *BuildA
|
||||
// shell.Lex currently only support the concatenated string format
|
||||
envs := convertMapToEnvList(args.GetAllAllowed())
|
||||
if err := meta.Expand(func(word string) (string, error) {
|
||||
newword, _, err := shlex.ProcessWord(word, envs)
|
||||
return newword, err
|
||||
return shlex.ProcessWord(word, envs)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -381,14 +380,3 @@ func convertMapToEnvList(m map[string]string) []string {
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// convertKVStringsToMap converts ["key=value"] to {"key":"value"}
|
||||
func convertKVStringsToMap(values []string) map[string]string {
|
||||
result := make(map[string]string, len(values))
|
||||
for _, value := range values {
|
||||
k, v, _ := strings.Cut(value, "=")
|
||||
result[k] = v
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
@@ -477,7 +477,7 @@ func performCopyForInfo(dest copyInfo, source copyInfo, options copyFileOptions)
|
||||
}
|
||||
// dest.path must be used because destPath has already been cleaned of any
|
||||
// trailing slash
|
||||
if destExistsAsDir || strings.HasSuffix(dest.path, string(os.PathSeparator)) {
|
||||
if endsInSlash(dest.path) || destExistsAsDir {
|
||||
// source.path must be used to get the correct filename when the source
|
||||
// is a symlink
|
||||
destPath = filepath.Join(destPath, filepath.Base(source.path))
|
||||
@@ -524,6 +524,10 @@ func copyFile(archiver *archive.Archiver, source, dest string, identity *idtools
|
||||
return nil
|
||||
}
|
||||
|
||||
func endsInSlash(path string) bool {
|
||||
return strings.HasSuffix(path, string(filepath.Separator))
|
||||
}
|
||||
|
||||
// isExistingDirectory returns true if the path exists and is a directory
|
||||
func isExistingDirectory(path string) (bool, error) {
|
||||
destStat, err := os.Stat(path)
|
||||
|
||||
@@ -166,17 +166,17 @@ func initializeStage(ctx context.Context, d dispatchRequest, cmd *instructions.S
|
||||
|
||||
p, err := platforms.Parse(v)
|
||||
if err != nil {
|
||||
return errors.Wrapf(errdefs.InvalidParameter(err), "failed to parse platform %s", v)
|
||||
return errors.Wrapf(err, "failed to parse platform %s", v)
|
||||
}
|
||||
platform = &p
|
||||
}
|
||||
|
||||
img, err := d.getFromImage(ctx, d.shlex, cmd.BaseName, platform)
|
||||
image, err := d.getFromImage(ctx, d.shlex, cmd.BaseName, platform)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
state := d.state
|
||||
if err := state.beginStage(cmd.Name, img); err != nil {
|
||||
if err := state.beginStage(cmd.Name, image); err != nil {
|
||||
return err
|
||||
}
|
||||
if len(state.runConfig.OnBuild) > 0 {
|
||||
@@ -224,7 +224,7 @@ func (d *dispatchRequest) getExpandedString(shlex *shell.Lex, str string) (strin
|
||||
substitutionArgs = append(substitutionArgs, key+"="+value)
|
||||
}
|
||||
|
||||
name, _, err := shlex.ProcessWord(str, substitutionArgs)
|
||||
name, err := shlex.ProcessWord(str, substitutionArgs)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
@@ -22,7 +22,7 @@ func normalizeWorkdir(_ string, current string, requested string) (string, error
|
||||
if !filepath.IsAbs(requested) {
|
||||
return filepath.Join(string(os.PathSeparator), current, requested), nil
|
||||
}
|
||||
return filepath.Clean(requested), nil
|
||||
return requested, nil
|
||||
}
|
||||
|
||||
// resolveCmdLine takes a command line arg set and optionally prepends a platform-specific
|
||||
|
||||
@@ -30,6 +30,7 @@ import (
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/oci"
|
||||
"github.com/docker/docker/runconfig/opts"
|
||||
"github.com/moby/buildkit/frontend/dockerfile/instructions"
|
||||
"github.com/moby/buildkit/frontend/dockerfile/shell"
|
||||
"github.com/pkg/errors"
|
||||
@@ -47,8 +48,7 @@ func dispatch(ctx context.Context, d dispatchRequest, cmd instructions.Command)
|
||||
|
||||
if ex, ok := cmd.(instructions.SupportsSingleWordExpansion); ok {
|
||||
err := ex.Expand(func(word string) (string, error) {
|
||||
newword, _, err := d.shlex.ProcessWord(word, envs)
|
||||
return newword, err
|
||||
return d.shlex.ProcessWord(word, envs)
|
||||
})
|
||||
if err != nil {
|
||||
return errdefs.InvalidParameter(err)
|
||||
@@ -242,7 +242,7 @@ func (s *dispatchState) setDefaultPath() {
|
||||
if defaultPath == "" {
|
||||
return
|
||||
}
|
||||
envMap := convertKVStringsToMap(s.runConfig.Env)
|
||||
envMap := opts.ConvertKVStringsToMap(s.runConfig.Env)
|
||||
if _, ok := envMap["PATH"]; !ok {
|
||||
s.runConfig.Env = append(s.runConfig.Env, "PATH="+defaultPath)
|
||||
}
|
||||
|
||||
@@ -15,9 +15,7 @@ import (
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/backend"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/network"
|
||||
"github.com/docker/docker/builder"
|
||||
networkSettings "github.com/docker/docker/daemon/network"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/chrootarchive"
|
||||
@@ -130,24 +128,26 @@ func (b *Builder) performCopy(ctx context.Context, req dispatchRequest, inst cop
|
||||
commentStr := fmt.Sprintf("%s %s%s in %s ", inst.cmdName, chownComment, srcHash, inst.dest)
|
||||
|
||||
// TODO: should this have been using origPaths instead of srcHash in the comment?
|
||||
runConfigWithCommentCmd := copyRunConfig(state.runConfig, withCmdCommentString(commentStr, state.operatingSystem))
|
||||
runConfigWithCommentCmd := copyRunConfig(
|
||||
state.runConfig,
|
||||
withCmdCommentString(commentStr, state.operatingSystem))
|
||||
hit, err := b.probeCache(state, runConfigWithCommentCmd)
|
||||
if err != nil || hit {
|
||||
return err
|
||||
}
|
||||
|
||||
imgMount, err := b.imageSources.Get(ctx, state.imageID, true, req.builder.platform)
|
||||
imageMount, err := b.imageSources.Get(ctx, state.imageID, true, req.builder.platform)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to get destination image %q", state.imageID)
|
||||
}
|
||||
|
||||
rwLayer, err := imgMount.NewRWLayer()
|
||||
rwLayer, err := imageMount.NewRWLayer()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rwLayer.Release()
|
||||
|
||||
destInfo, err := createDestInfo(state.runConfig.WorkingDir, inst, rwLayer)
|
||||
destInfo, err := createDestInfo(state.runConfig.WorkingDir, inst, rwLayer, state.operatingSystem)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -179,10 +179,10 @@ func (b *Builder) performCopy(ctx context.Context, req dispatchRequest, inst cop
|
||||
return errors.Wrapf(err, "failed to copy files")
|
||||
}
|
||||
}
|
||||
return b.exportImage(ctx, state, rwLayer, imgMount.Image(), runConfigWithCommentCmd)
|
||||
return b.exportImage(ctx, state, rwLayer, imageMount.Image(), runConfigWithCommentCmd)
|
||||
}
|
||||
|
||||
func createDestInfo(workingDir string, inst copyInstruction, rwLayer builder.RWLayer) (copyInfo, error) {
|
||||
func createDestInfo(workingDir string, inst copyInstruction, rwLayer builder.RWLayer, platform string) (copyInfo, error) {
|
||||
// Twiddle the destination when it's a relative path - meaning, make it
|
||||
// relative to the WORKINGDIR
|
||||
dest, err := normalizeDest(workingDir, inst.dest)
|
||||
@@ -278,38 +278,38 @@ func withoutHealthcheck() runConfigModifier {
|
||||
}
|
||||
|
||||
func copyRunConfig(runConfig *container.Config, modifiers ...runConfigModifier) *container.Config {
|
||||
cfgCopy := *runConfig
|
||||
cfgCopy.Cmd = copyStringSlice(runConfig.Cmd)
|
||||
cfgCopy.Env = copyStringSlice(runConfig.Env)
|
||||
cfgCopy.Entrypoint = copyStringSlice(runConfig.Entrypoint)
|
||||
cfgCopy.OnBuild = copyStringSlice(runConfig.OnBuild)
|
||||
cfgCopy.Shell = copyStringSlice(runConfig.Shell)
|
||||
copy := *runConfig
|
||||
copy.Cmd = copyStringSlice(runConfig.Cmd)
|
||||
copy.Env = copyStringSlice(runConfig.Env)
|
||||
copy.Entrypoint = copyStringSlice(runConfig.Entrypoint)
|
||||
copy.OnBuild = copyStringSlice(runConfig.OnBuild)
|
||||
copy.Shell = copyStringSlice(runConfig.Shell)
|
||||
|
||||
if cfgCopy.Volumes != nil {
|
||||
cfgCopy.Volumes = make(map[string]struct{}, len(runConfig.Volumes))
|
||||
if copy.Volumes != nil {
|
||||
copy.Volumes = make(map[string]struct{}, len(runConfig.Volumes))
|
||||
for k, v := range runConfig.Volumes {
|
||||
cfgCopy.Volumes[k] = v
|
||||
copy.Volumes[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
if cfgCopy.ExposedPorts != nil {
|
||||
cfgCopy.ExposedPorts = make(nat.PortSet, len(runConfig.ExposedPorts))
|
||||
if copy.ExposedPorts != nil {
|
||||
copy.ExposedPorts = make(nat.PortSet, len(runConfig.ExposedPorts))
|
||||
for k, v := range runConfig.ExposedPorts {
|
||||
cfgCopy.ExposedPorts[k] = v
|
||||
copy.ExposedPorts[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
if cfgCopy.Labels != nil {
|
||||
cfgCopy.Labels = make(map[string]string, len(runConfig.Labels))
|
||||
if copy.Labels != nil {
|
||||
copy.Labels = make(map[string]string, len(runConfig.Labels))
|
||||
for k, v := range runConfig.Labels {
|
||||
cfgCopy.Labels[k] = v
|
||||
copy.Labels[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
for _, modifier := range modifiers {
|
||||
modifier(&cfgCopy)
|
||||
modifier(©)
|
||||
}
|
||||
return &cfgCopy
|
||||
return ©
|
||||
}
|
||||
|
||||
func copyStringSlice(orig []string) []string {
|
||||
@@ -333,7 +333,7 @@ func (b *Builder) probeCache(dispatchState *dispatchState, runConfig *container.
|
||||
if cachedID == "" || err != nil {
|
||||
return false, err
|
||||
}
|
||||
_, _ = fmt.Fprintln(b.Stdout, " ---> Using cache")
|
||||
fmt.Fprint(b.Stdout, " ---> Using cache\n")
|
||||
|
||||
dispatchState.imageID = cachedID
|
||||
return true, nil
|
||||
@@ -352,15 +352,16 @@ func (b *Builder) create(ctx context.Context, runConfig *container.Config) (stri
|
||||
log.G(ctx).Debugf("[BUILDER] Command to be executed: %v", runConfig.Cmd)
|
||||
|
||||
hostConfig := hostConfigFromOptions(b.options)
|
||||
ctr, err := b.containerManager.Create(ctx, runConfig, hostConfig)
|
||||
container, err := b.containerManager.Create(ctx, runConfig, hostConfig)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
for _, warning := range ctr.Warnings {
|
||||
_, _ = fmt.Fprintf(b.Stdout, " ---> [Warning] %s\n", warning)
|
||||
// TODO: could this be moved into containerManager.Create() ?
|
||||
for _, warning := range container.Warnings {
|
||||
fmt.Fprintf(b.Stdout, " ---> [Warning] %s\n", warning)
|
||||
}
|
||||
_, _ = fmt.Fprintf(b.Stdout, " ---> Running in %s\n", stringid.TruncateID(ctr.ID))
|
||||
return ctr.ID, nil
|
||||
fmt.Fprintf(b.Stdout, " ---> Running in %s\n", stringid.TruncateID(container.ID))
|
||||
return container.ID, nil
|
||||
}
|
||||
|
||||
func hostConfigFromOptions(options *types.ImageBuildOptions) *container.HostConfig {
|
||||
@@ -376,21 +377,12 @@ func hostConfigFromOptions(options *types.ImageBuildOptions) *container.HostConf
|
||||
Ulimits: options.Ulimits,
|
||||
}
|
||||
|
||||
// We need to make sure no empty string or "default" NetworkMode is
|
||||
// provided to the daemon as it doesn't support them.
|
||||
//
|
||||
// This is in line with what the ContainerCreate API endpoint does.
|
||||
networkMode := options.NetworkMode
|
||||
if networkMode == "" || networkMode == network.NetworkDefault {
|
||||
networkMode = networkSettings.DefaultNetwork
|
||||
}
|
||||
|
||||
hc := &container.HostConfig{
|
||||
SecurityOpt: options.SecurityOpt,
|
||||
Isolation: options.Isolation,
|
||||
ShmSize: options.ShmSize,
|
||||
Resources: resources,
|
||||
NetworkMode: container.NetworkMode(networkMode),
|
||||
NetworkMode: container.NetworkMode(options.NetworkMode),
|
||||
// Set a log config to override any default value set on the daemon
|
||||
LogConfig: defaultLogConfig,
|
||||
ExtraHosts: options.ExtraHosts,
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/mount"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/jsonmessage"
|
||||
"golang.org/x/sys/windows"
|
||||
@@ -63,7 +62,7 @@ func lookupNTAccount(ctx context.Context, builder *Builder, accountName string,
|
||||
|
||||
optionsPlatform, err := platforms.Parse(builder.options.Platform)
|
||||
if err != nil {
|
||||
return idtools.Identity{}, errdefs.InvalidParameter(err)
|
||||
return idtools.Identity{}, err
|
||||
}
|
||||
|
||||
runConfig := copyRunConfig(state.runConfig,
|
||||
|
||||
@@ -44,8 +44,8 @@ func downloadRemote(remoteURL string) (string, io.ReadCloser, error) {
|
||||
// GetWithStatusError does an http.Get() and returns an error if the
|
||||
// status code is 4xx or 5xx.
|
||||
func GetWithStatusError(address string) (resp *http.Response, err error) {
|
||||
resp, err = http.Get(address) // #nosec G107 -- ignore G107: Potential HTTP request made with variable url
|
||||
if err != nil {
|
||||
// #nosec G107
|
||||
if resp, err = http.Get(address); err != nil {
|
||||
if uerr, ok := err.(*url.Error); ok {
|
||||
if derr, ok := uerr.Err.(*net.DNSError); ok && !derr.IsTimeout {
|
||||
return nil, errdefs.NotFound(err)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package debug // import "github.com/docker/docker/cmd/dockerd/debug"
|
||||
package debug // import "github.com/docker/docker/cli/debug"
|
||||
|
||||
import (
|
||||
"os"
|
||||
@@ -1,4 +1,4 @@
|
||||
package debug // import "github.com/docker/docker/cmd/dockerd/debug"
|
||||
package debug // import "github.com/docker/docker/cli/debug"
|
||||
|
||||
import (
|
||||
"os"
|
||||
@@ -49,8 +49,6 @@ import (
|
||||
"net/url"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api"
|
||||
@@ -133,10 +131,7 @@ type Client struct {
|
||||
negotiateVersion bool
|
||||
|
||||
// negotiated indicates that API version negotiation took place
|
||||
negotiated atomic.Bool
|
||||
|
||||
// negotiateLock is used to single-flight the version negotiation process
|
||||
negotiateLock sync.Mutex
|
||||
negotiated bool
|
||||
|
||||
tp trace.TracerProvider
|
||||
|
||||
@@ -271,16 +266,7 @@ func (cli *Client) Close() error {
|
||||
// be negotiated when making the actual requests, and for which cases
|
||||
// we cannot do the negotiation lazily.
|
||||
func (cli *Client) checkVersion(ctx context.Context) error {
|
||||
if !cli.manualOverride && cli.negotiateVersion && !cli.negotiated.Load() {
|
||||
// Ensure exclusive write access to version and negotiated fields
|
||||
cli.negotiateLock.Lock()
|
||||
defer cli.negotiateLock.Unlock()
|
||||
|
||||
// May have been set during last execution of critical zone
|
||||
if cli.negotiated.Load() {
|
||||
return nil
|
||||
}
|
||||
|
||||
if !cli.manualOverride && cli.negotiateVersion && !cli.negotiated {
|
||||
ping, err := cli.Ping(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -326,10 +312,6 @@ func (cli *Client) ClientVersion() string {
|
||||
// added (1.24).
|
||||
func (cli *Client) NegotiateAPIVersion(ctx context.Context) {
|
||||
if !cli.manualOverride {
|
||||
// Avoid concurrent modification of version-related fields
|
||||
cli.negotiateLock.Lock()
|
||||
defer cli.negotiateLock.Unlock()
|
||||
|
||||
ping, err := cli.Ping(ctx)
|
||||
if err != nil {
|
||||
// FIXME(thaJeztah): Ping returns an error when failing to connect to the API; we should not swallow the error here, and instead returning it.
|
||||
@@ -354,10 +336,6 @@ func (cli *Client) NegotiateAPIVersion(ctx context.Context) {
|
||||
// added (1.24).
|
||||
func (cli *Client) NegotiateAPIVersionPing(pingResponse types.Ping) {
|
||||
if !cli.manualOverride {
|
||||
// Avoid concurrent modification of version-related fields
|
||||
cli.negotiateLock.Lock()
|
||||
defer cli.negotiateLock.Unlock()
|
||||
|
||||
cli.negotiateAPIVersionPing(pingResponse)
|
||||
}
|
||||
}
|
||||
@@ -383,7 +361,7 @@ func (cli *Client) negotiateAPIVersionPing(pingResponse types.Ping) {
|
||||
// Store the results, so that automatic API version negotiation (if enabled)
|
||||
// won't be performed on the next request.
|
||||
if cli.negotiateVersion {
|
||||
cli.negotiated.Store(true)
|
||||
cli.negotiated = true
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -342,7 +342,7 @@ func TestNegotiateAPIVersion(t *testing.T) {
|
||||
|
||||
// TestNegotiateAPIVersionOverride asserts that we honor the DOCKER_API_VERSION
|
||||
// environment variable when negotiating versions.
|
||||
func TestNegotiateAPIVersionOverride(t *testing.T) {
|
||||
func TestNegotiateAPVersionOverride(t *testing.T) {
|
||||
const expected = "9.99"
|
||||
t.Setenv("DOCKER_API_VERSION", expected)
|
||||
|
||||
@@ -354,9 +354,9 @@ func TestNegotiateAPIVersionOverride(t *testing.T) {
|
||||
assert.Equal(t, client.ClientVersion(), expected)
|
||||
}
|
||||
|
||||
// TestNegotiateAPIVersionConnectionFailure asserts that we do not modify the
|
||||
// TestNegotiateAPVersionConnectionFailure asserts that we do not modify the
|
||||
// API version when failing to connect.
|
||||
func TestNegotiateAPIVersionConnectionFailure(t *testing.T) {
|
||||
func TestNegotiateAPVersionConnectionFailure(t *testing.T) {
|
||||
const expected = "9.99"
|
||||
|
||||
client, err := NewClientWithOpts(WithHost("tcp://no-such-host.invalid"))
|
||||
|
||||
@@ -11,11 +11,11 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types"
|
||||
)
|
||||
|
||||
// ContainerStatPath returns stat information about a path inside the container filesystem.
|
||||
func (cli *Client) ContainerStatPath(ctx context.Context, containerID, path string) (container.PathStat, error) {
|
||||
func (cli *Client) ContainerStatPath(ctx context.Context, containerID, path string) (types.ContainerPathStat, error) {
|
||||
query := url.Values{}
|
||||
query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API.
|
||||
|
||||
@@ -23,14 +23,14 @@ func (cli *Client) ContainerStatPath(ctx context.Context, containerID, path stri
|
||||
response, err := cli.head(ctx, urlStr, query, nil)
|
||||
defer ensureReaderClosed(response)
|
||||
if err != nil {
|
||||
return container.PathStat{}, err
|
||||
return types.ContainerPathStat{}, err
|
||||
}
|
||||
return getContainerPathStatFromHeader(response.header)
|
||||
}
|
||||
|
||||
// CopyToContainer copies content into the container filesystem.
|
||||
// Note that `content` must be a Reader for a TAR archive
|
||||
func (cli *Client) CopyToContainer(ctx context.Context, containerID, dstPath string, content io.Reader, options container.CopyToContainerOptions) error {
|
||||
func (cli *Client) CopyToContainer(ctx context.Context, containerID, dstPath string, content io.Reader, options types.CopyToContainerOptions) error {
|
||||
query := url.Values{}
|
||||
query.Set("path", filepath.ToSlash(dstPath)) // Normalize the paths used in the API.
|
||||
// Do not allow for an existing directory to be overwritten by a non-directory and vice versa.
|
||||
@@ -55,14 +55,14 @@ func (cli *Client) CopyToContainer(ctx context.Context, containerID, dstPath str
|
||||
|
||||
// CopyFromContainer gets the content from the container and returns it as a Reader
|
||||
// for a TAR archive to manipulate it in the host. It's up to the caller to close the reader.
|
||||
func (cli *Client) CopyFromContainer(ctx context.Context, containerID, srcPath string) (io.ReadCloser, container.PathStat, error) {
|
||||
func (cli *Client) CopyFromContainer(ctx context.Context, containerID, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) {
|
||||
query := make(url.Values, 1)
|
||||
query.Set("path", filepath.ToSlash(srcPath)) // Normalize the paths used in the API.
|
||||
|
||||
apiPath := "/containers/" + containerID + "/archive"
|
||||
response, err := cli.get(ctx, apiPath, query, nil)
|
||||
if err != nil {
|
||||
return nil, container.PathStat{}, err
|
||||
return nil, types.ContainerPathStat{}, err
|
||||
}
|
||||
|
||||
// In order to get the copy behavior right, we need to know information
|
||||
@@ -78,8 +78,8 @@ func (cli *Client) CopyFromContainer(ctx context.Context, containerID, srcPath s
|
||||
return response.body, stat, err
|
||||
}
|
||||
|
||||
func getContainerPathStatFromHeader(header http.Header) (container.PathStat, error) {
|
||||
var stat container.PathStat
|
||||
func getContainerPathStatFromHeader(header http.Header) (types.ContainerPathStat, error) {
|
||||
var stat types.ContainerPathStat
|
||||
|
||||
encodedStat := header.Get("X-Docker-Container-Path-Stat")
|
||||
statDecoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(encodedStat))
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"gotest.tools/v3/assert"
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
@@ -64,7 +64,7 @@ func TestContainerStatPath(t *testing.T) {
|
||||
if path != expectedPath {
|
||||
return nil, fmt.Errorf("path not set in URL query properly")
|
||||
}
|
||||
content, err := json.Marshal(container.PathStat{
|
||||
content, err := json.Marshal(types.ContainerPathStat{
|
||||
Name: "name",
|
||||
Mode: 0o700,
|
||||
})
|
||||
@@ -97,7 +97,7 @@ func TestCopyToContainerError(t *testing.T) {
|
||||
client := &Client{
|
||||
client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")),
|
||||
}
|
||||
err := client.CopyToContainer(context.Background(), "container_id", "path/to/file", bytes.NewReader([]byte("")), container.CopyToContainerOptions{})
|
||||
err := client.CopyToContainer(context.Background(), "container_id", "path/to/file", bytes.NewReader([]byte("")), types.CopyToContainerOptions{})
|
||||
assert.Check(t, is.ErrorType(err, errdefs.IsSystem))
|
||||
}
|
||||
|
||||
@@ -105,7 +105,7 @@ func TestCopyToContainerNotFoundError(t *testing.T) {
|
||||
client := &Client{
|
||||
client: newMockClient(errorMock(http.StatusNotFound, "Not found")),
|
||||
}
|
||||
err := client.CopyToContainer(context.Background(), "container_id", "path/to/file", bytes.NewReader([]byte("")), container.CopyToContainerOptions{})
|
||||
err := client.CopyToContainer(context.Background(), "container_id", "path/to/file", bytes.NewReader([]byte("")), types.CopyToContainerOptions{})
|
||||
assert.Check(t, is.ErrorType(err, errdefs.IsNotFound))
|
||||
}
|
||||
|
||||
@@ -115,7 +115,7 @@ func TestCopyToContainerEmptyResponse(t *testing.T) {
|
||||
client := &Client{
|
||||
client: newMockClient(errorMock(http.StatusNoContent, "No content")),
|
||||
}
|
||||
err := client.CopyToContainer(context.Background(), "container_id", "path/to/file", bytes.NewReader([]byte("")), container.CopyToContainerOptions{})
|
||||
err := client.CopyToContainer(context.Background(), "container_id", "path/to/file", bytes.NewReader([]byte("")), types.CopyToContainerOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
@@ -159,7 +159,7 @@ func TestCopyToContainer(t *testing.T) {
|
||||
}, nil
|
||||
}),
|
||||
}
|
||||
err := client.CopyToContainer(context.Background(), "container_id", expectedPath, bytes.NewReader([]byte("content")), container.CopyToContainerOptions{
|
||||
err := client.CopyToContainer(context.Background(), "container_id", expectedPath, bytes.NewReader([]byte("content")), types.CopyToContainerOptions{
|
||||
AllowOverwriteDirWithFile: false,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -188,7 +188,7 @@ func TestCopyFromContainerNotFoundError(t *testing.T) {
|
||||
func TestCopyFromContainerEmptyResponse(t *testing.T) {
|
||||
client := &Client{
|
||||
client: newMockClient(func(req *http.Request) (*http.Response, error) {
|
||||
content, err := json.Marshal(container.PathStat{
|
||||
content, err := json.Marshal(types.ContainerPathStat{
|
||||
Name: "path/to/file",
|
||||
Mode: 0o700,
|
||||
})
|
||||
@@ -242,7 +242,7 @@ func TestCopyFromContainer(t *testing.T) {
|
||||
return nil, fmt.Errorf("path not set in URL query properly, expected '%s', got %s", expectedPath, path)
|
||||
}
|
||||
|
||||
headercontent, err := json.Marshal(container.PathStat{
|
||||
headercontent, err := json.Marshal(types.ContainerPathStat{
|
||||
Name: "name",
|
||||
Mode: 0o700,
|
||||
})
|
||||
|
||||
@@ -6,12 +6,11 @@ import (
|
||||
"net/http"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
)
|
||||
|
||||
// ContainerExecCreate creates a new exec configuration to run an exec process.
|
||||
func (cli *Client) ContainerExecCreate(ctx context.Context, container string, options container.ExecOptions) (types.IDResponse, error) {
|
||||
func (cli *Client) ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error) {
|
||||
var response types.IDResponse
|
||||
|
||||
// Make sure we negotiated (if the client is configured to do so),
|
||||
@@ -23,14 +22,14 @@ func (cli *Client) ContainerExecCreate(ctx context.Context, container string, op
|
||||
return response, err
|
||||
}
|
||||
|
||||
if err := cli.NewVersionError(ctx, "1.25", "env"); len(options.Env) != 0 && err != nil {
|
||||
if err := cli.NewVersionError(ctx, "1.25", "env"); len(config.Env) != 0 && err != nil {
|
||||
return response, err
|
||||
}
|
||||
if versions.LessThan(cli.ClientVersion(), "1.42") {
|
||||
options.ConsoleSize = nil
|
||||
config.ConsoleSize = nil
|
||||
}
|
||||
|
||||
resp, err := cli.post(ctx, "/containers/"+container+"/exec", nil, options, nil)
|
||||
resp, err := cli.post(ctx, "/containers/"+container+"/exec", nil, config, nil)
|
||||
defer ensureReaderClosed(resp)
|
||||
if err != nil {
|
||||
return response, err
|
||||
@@ -40,7 +39,7 @@ func (cli *Client) ContainerExecCreate(ctx context.Context, container string, op
|
||||
}
|
||||
|
||||
// ContainerExecStart starts an exec process already created in the docker host.
|
||||
func (cli *Client) ContainerExecStart(ctx context.Context, execID string, config container.ExecStartOptions) error {
|
||||
func (cli *Client) ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error {
|
||||
if versions.LessThan(cli.ClientVersion(), "1.42") {
|
||||
config.ConsoleSize = nil
|
||||
}
|
||||
@@ -53,7 +52,7 @@ func (cli *Client) ContainerExecStart(ctx context.Context, execID string, config
|
||||
// It returns a types.HijackedConnection with the hijacked connection
|
||||
// and the a reader to get output. It's up to the called to close
|
||||
// the hijacked connection by calling types.HijackedResponse.Close.
|
||||
func (cli *Client) ContainerExecAttach(ctx context.Context, execID string, config container.ExecAttachOptions) (types.HijackedResponse, error) {
|
||||
func (cli *Client) ContainerExecAttach(ctx context.Context, execID string, config types.ExecStartCheck) (types.HijackedResponse, error) {
|
||||
if versions.LessThan(cli.ClientVersion(), "1.42") {
|
||||
config.ConsoleSize = nil
|
||||
}
|
||||
@@ -63,8 +62,8 @@ func (cli *Client) ContainerExecAttach(ctx context.Context, execID string, confi
|
||||
}
|
||||
|
||||
// ContainerExecInspect returns information about a specific exec process on the docker host.
|
||||
func (cli *Client) ContainerExecInspect(ctx context.Context, execID string) (container.ExecInspect, error) {
|
||||
var response container.ExecInspect
|
||||
func (cli *Client) ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) {
|
||||
var response types.ContainerExecInspect
|
||||
resp, err := cli.get(ctx, "/exec/"+execID+"/json", nil, nil)
|
||||
if err != nil {
|
||||
return response, err
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"gotest.tools/v3/assert"
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
@@ -21,7 +20,7 @@ func TestContainerExecCreateError(t *testing.T) {
|
||||
client := &Client{
|
||||
client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")),
|
||||
}
|
||||
_, err := client.ContainerExecCreate(context.Background(), "container_id", container.ExecOptions{})
|
||||
_, err := client.ContainerExecCreate(context.Background(), "container_id", types.ExecConfig{})
|
||||
assert.Check(t, is.ErrorType(err, errdefs.IsSystem))
|
||||
}
|
||||
|
||||
@@ -33,7 +32,7 @@ func TestContainerExecCreateConnectionError(t *testing.T) {
|
||||
client, err := NewClientWithOpts(WithAPIVersionNegotiation(), WithHost("tcp://no-such-host.invalid"))
|
||||
assert.NilError(t, err)
|
||||
|
||||
_, err = client.ContainerExecCreate(context.Background(), "", container.ExecOptions{})
|
||||
_, err = client.ContainerExecCreate(context.Background(), "", types.ExecConfig{})
|
||||
assert.Check(t, is.ErrorType(err, IsErrConnectionFailed))
|
||||
}
|
||||
|
||||
@@ -51,7 +50,7 @@ func TestContainerExecCreate(t *testing.T) {
|
||||
if err := req.ParseForm(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
execConfig := &container.ExecOptions{}
|
||||
execConfig := &types.ExecConfig{}
|
||||
if err := json.NewDecoder(req.Body).Decode(execConfig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -71,7 +70,7 @@ func TestContainerExecCreate(t *testing.T) {
|
||||
}),
|
||||
}
|
||||
|
||||
r, err := client.ContainerExecCreate(context.Background(), "container_id", container.ExecOptions{
|
||||
r, err := client.ContainerExecCreate(context.Background(), "container_id", types.ExecConfig{
|
||||
User: "user",
|
||||
})
|
||||
if err != nil {
|
||||
@@ -86,7 +85,7 @@ func TestContainerExecStartError(t *testing.T) {
|
||||
client := &Client{
|
||||
client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")),
|
||||
}
|
||||
err := client.ContainerExecStart(context.Background(), "nothing", container.ExecStartOptions{})
|
||||
err := client.ContainerExecStart(context.Background(), "nothing", types.ExecStartCheck{})
|
||||
assert.Check(t, is.ErrorType(err, errdefs.IsSystem))
|
||||
}
|
||||
|
||||
@@ -100,12 +99,12 @@ func TestContainerExecStart(t *testing.T) {
|
||||
if err := req.ParseForm(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
options := &container.ExecStartOptions{}
|
||||
if err := json.NewDecoder(req.Body).Decode(options); err != nil {
|
||||
execStartCheck := &types.ExecStartCheck{}
|
||||
if err := json.NewDecoder(req.Body).Decode(execStartCheck); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if options.Tty || !options.Detach {
|
||||
return nil, fmt.Errorf("expected ExecStartOptions{Detach:true,Tty:false}, got %v", options)
|
||||
if execStartCheck.Tty || !execStartCheck.Detach {
|
||||
return nil, fmt.Errorf("expected execStartCheck{Detach:true,Tty:false}, got %v", execStartCheck)
|
||||
}
|
||||
|
||||
return &http.Response{
|
||||
@@ -115,7 +114,7 @@ func TestContainerExecStart(t *testing.T) {
|
||||
}),
|
||||
}
|
||||
|
||||
err := client.ContainerExecStart(context.Background(), "exec_id", container.ExecStartOptions{
|
||||
err := client.ContainerExecStart(context.Background(), "exec_id", types.ExecStartCheck{
|
||||
Detach: true,
|
||||
Tty: false,
|
||||
})
|
||||
@@ -139,7 +138,7 @@ func TestContainerExecInspect(t *testing.T) {
|
||||
if !strings.HasPrefix(req.URL.Path, expectedURL) {
|
||||
return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL)
|
||||
}
|
||||
b, err := json.Marshal(container.ExecInspect{
|
||||
b, err := json.Marshal(types.ContainerExecInspect{
|
||||
ExecID: "exec_id",
|
||||
ContainerID: "container_id",
|
||||
})
|
||||
|
||||
@@ -83,3 +83,55 @@ func TestContainerInspect(t *testing.T) {
|
||||
t.Fatalf("expected `name`, got %s", r.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// TestContainerInspectNode tests that the "Node" field is included in the "inspect"
|
||||
// output. This information is only present when connected to a Swarm standalone API.
|
||||
func TestContainerInspectNode(t *testing.T) {
|
||||
client := &Client{
|
||||
client: newMockClient(func(req *http.Request) (*http.Response, error) {
|
||||
content, err := json.Marshal(types.ContainerJSON{
|
||||
ContainerJSONBase: &types.ContainerJSONBase{
|
||||
ID: "container_id",
|
||||
Image: "image",
|
||||
Name: "name",
|
||||
Node: &types.ContainerNode{
|
||||
ID: "container_node_id",
|
||||
Addr: "container_node",
|
||||
Labels: map[string]string{"foo": "bar"},
|
||||
},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Body: io.NopCloser(bytes.NewReader(content)),
|
||||
}, nil
|
||||
}),
|
||||
}
|
||||
|
||||
r, err := client.ContainerInspect(context.Background(), "container_id")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if r.ID != "container_id" {
|
||||
t.Fatalf("expected `container_id`, got %s", r.ID)
|
||||
}
|
||||
if r.Image != "image" {
|
||||
t.Fatalf("expected `image`, got %s", r.Image)
|
||||
}
|
||||
if r.Name != "name" {
|
||||
t.Fatalf("expected `name`, got %s", r.Name)
|
||||
}
|
||||
if r.Node.ID != "container_node_id" {
|
||||
t.Fatalf("expected `container_node_id`, got %s", r.Node.ID)
|
||||
}
|
||||
if r.Node.Addr != "container_node" {
|
||||
t.Fatalf("expected `container_node`, got %s", r.Node.Addr)
|
||||
}
|
||||
foo, ok := r.Node.Labels["foo"]
|
||||
if foo != "bar" || !ok {
|
||||
t.Fatalf("expected `bar` for label `foo`")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,13 +5,13 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
)
|
||||
|
||||
// ContainersPrune requests the daemon to delete unused data
|
||||
func (cli *Client) ContainersPrune(ctx context.Context, pruneFilters filters.Args) (container.PruneReport, error) {
|
||||
var report container.PruneReport
|
||||
func (cli *Client) ContainersPrune(ctx context.Context, pruneFilters filters.Args) (types.ContainersPruneReport, error) {
|
||||
var report types.ContainersPruneReport
|
||||
|
||||
if err := cli.NewVersionError(ctx, "1.25", "container prune"); err != nil {
|
||||
return report, err
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"gotest.tools/v3/assert"
|
||||
@@ -93,7 +93,7 @@ func TestContainersPrune(t *testing.T) {
|
||||
actual := query.Get(key)
|
||||
assert.Check(t, is.Equal(expected, actual))
|
||||
}
|
||||
content, err := json.Marshal(container.PruneReport{
|
||||
content, err := json.Marshal(types.ContainersPruneReport{
|
||||
ContainersDeleted: []string{"container_id1", "container_id2"},
|
||||
SpaceReclaimed: 9999,
|
||||
})
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user