Compare commits

..

365 Commits

Author SHA1 Message Date
Sebastiaan van Stijn
bb2eab21c6 Merge pull request #44560 from vvoland/client-sharedsize-2206
[22.06 backport] client/list: Handle SharedSize
2022-12-01 09:16:40 +01:00
Sebastiaan van Stijn
cfc4677f62 Merge pull request #44557 from thaJeztah/22.06_backport_generate_authors
[22.06 backport] AUTHORS: regenerate, cleanup, refactor
2022-12-01 00:52:00 +01:00
Paweł Gronowski
978690e4f9 client/list: Handle SharedSize
This makes the `ImageList` function to add `shared-size=1` to the url
query when user caller sets the SharedSize.
SharedSize support was introduced in API version 1.42. This field was
added to the options struct, but client wasn't adjusted.

Signed-off-by: Paweł Gronowski <pawel.gronowski@docker.com>
(cherry picked from commit 3d97f1e22d)
Signed-off-by: Paweł Gronowski <pawel.gronowski@docker.com>
2022-11-30 17:14:21 +01:00
Sebastiaan van Stijn
44eb640a1b Merge pull request #44556 from thaJeztah/22.06_backport_ci_fixes
[22.06 backport] assorted CI and packaging fixes
2022-11-30 14:58:52 +01:00
Akihiro Suda
69ef0358c3 Merge pull request #44555 from thaJeztah/22.06_backport_remove_outdated_comment
[22.06 backport] vendor.mod: remove outdated comment about replaced module
2022-11-30 22:46:07 +09:00
Sebastiaan van Stijn
fc72ed9760 Merge pull request #44549 from thaJeztah/22.06_backport_search_remove_id
[22.06 backport] registry: session: remove unused id
2022-11-30 12:09:01 +01:00
Bjorn Neergaard
da6bb8c408 AUTHORS: regenerate
Signed-off-by: Bjorn Neergaard <bneergaard@mirantis.com>
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit e1c3305015)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-11-30 12:06:21 +01:00
Bjorn Neergaard
a889a17a63 .mailmap: cleanup and additions
Signed-off-by: Bjorn Neergaard <bneergaard@mirantis.com>
(cherry picked from commit 672383bc56)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-11-30 12:06:21 +01:00
Bjorn Neergaard
6f581c1808 hack/generate-authors.sh: refactor and simplify
Signed-off-by: Bjorn Neergaard <bneergaard@mirantis.com>
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit b94d1604a9)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-11-30 12:06:21 +01:00
CrazyMax
de3143c6b9 Jenkinsfile: Fix dev image build fox ppc64le/s390x archs
Signed-off-by: CrazyMax <crazy-max@users.noreply.github.com>
(cherry picked from commit f7e59cbccc)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-11-30 11:16:51 +01:00
CrazyMax
f547f2f3c0 Makefile: always use buildx
Signed-off-by: CrazyMax <crazy-max@users.noreply.github.com>
(cherry picked from commit bade242ddd)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-11-30 11:16:36 +01:00
CrazyMax
0c4b6b1742 ci: define timeout for jobs
Signed-off-by: CrazyMax <crazy-max@users.noreply.github.com>
(cherry picked from commit 426e3926ef)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-11-30 11:16:24 +01:00
Bjorn Neergaard
f088bcadd5 ignorefiles: cleanup
Signed-off-by: Bjorn Neergaard <bneergaard@mirantis.com>
(cherry picked from commit 4f17d17009)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-11-30 11:15:56 +01:00
Sebastiaan van Stijn
5770145433 vendor.mod: remove outdated comment about replaced module
The replace was removed in 64f9ea1cf5, but I
forgot to remove the comment.

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 6326ad1729)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-11-30 11:12:37 +01:00
Sebastiaan van Stijn
d15fe0d782 registry: session: remove unused id
This removes the dependency on github.com/docker/docker/pkg/stringid

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit a44f547343)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-11-29 19:13:02 +01:00
Sebastiaan van Stijn
98040b95a7 Merge pull request #44536 from thaJeztah/22.06_backport_protobuf_extensions_fix
[22.06 backport] go.mod: golang_protobuf_extensions v1.0.4 - prevent incompatible versions
2022-11-25 17:42:20 +01:00
Sebastiaan van Stijn
546005804c go.mod: golang_protobuf_extensions v1.0.4 - prevent incompatible versions
This module made a whoopsie, and updated to `google.golang.org/protobuf`
in a patch release, but `google.golang.org/protobuf` is not backward
compatible with `github.com/golang/protobuf`.

Updating the minimum version to v1.0.4 which corrects this, to prevent
users of containerd as a module from accidentally pulling in the wrong
version:

- v1.0.3 switched to use `google.golang.org/protobuf`; https://github.com/matttproud/golang_protobuf_extensions/compare/v1.0.2..v1.0.3
- This was reverted in v1.0.4 (which is the same as v1.0.2); https://github.com/matttproud/golang_protobuf_extensions/compare/v1.0.3..v1.0.4
- And a `v2` was created instead; https://github.com/matttproud/golang_protobuf_extensions/releases/tag/v2.0.0

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit e1058e6bc3)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-11-25 15:20:11 +01:00
Sebastiaan van Stijn
a8184baf3b Merge pull request #44523 from crazy-max/22.06_dockerfile-vpnkit-platform
[22.06 backport] Dockerfile: remove hardcoded platforms for vpnkit stage
2022-11-25 00:47:12 +01:00
Sebastiaan van Stijn
e571db3846 Merge pull request #44522 from crazy-max/22.06_go-autogen
[22.06 backport] hack: remove obsolete sources for go-autogen
2022-11-25 00:46:53 +01:00
Sebastiaan van Stijn
a913b5ad7e Merge pull request #44519 from thaJeztah/22.06_backport_bump_swarmkit3
[22.06 backport] vendor: github.com/moby/swarmkit/v2 v2.0.0-20221123162438-b17f02f0a054
2022-11-24 17:42:42 +01:00
Sebastiaan van Stijn
73a98393c6 vendor: github.com/moby/swarmkit/v2 v2.0.0-20221123162438-b17f02f0a054
Conflicts:
       vendor.mod

Conflict because code.cloudfoundry.org/clock moved to a direct dependency in
vendor.mod on master branch since 342b44bf20

full diff: 6341884e5f...b17f02f0a0

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 64f9ea1cf5)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-11-24 14:47:30 +01:00
CrazyMax
acb8204a7f Dockerfile: remove hardcoded platforms for vpnkit stage
Current Dockerfile downloads vpnkit for both linux/amd64
and linux/arm64 platforms even if target platform does not
match. This change will download vpnkit only if target
platform matches, otherwise it will just use a dummy scratch
stage.

Signed-off-by: CrazyMax <crazy-max@users.noreply.github.com>
(cherry picked from commit 8a46a2a364)
2022-11-24 14:21:28 +01:00
CrazyMax
313f105443 hack: remove obsolete sources for go-autogen
Signed-off-by: CrazyMax <crazy-max@users.noreply.github.com>
(cherry picked from commit 40069797ef)
2022-11-24 14:20:48 +01:00
Sebastiaan van Stijn
2b1ba3ea6b vendor: github.com/prometheus/client_golang v1.13.0
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit a5898e3a2d)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-11-24 11:00:13 +01:00
Sebastiaan van Stijn
f493b770a9 vendor: github.com/aws/aws-sdk-go v1.37.0
full diff: https://github.com/aws/aws-sdk-go/compare/v1.31.6...v1.37.0

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 2cc6a86fd3)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-11-24 11:00:13 +01:00
Sebastiaan van Stijn
36430f7970 vendor: google.golang.org/grpc v1.48.0
full diff: https://github.com/grpc/grpc-go/compare/v1.47.0...v1.48.0

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 8f1bc3a3b7)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-11-24 11:00:13 +01:00
Sebastiaan van Stijn
fb24b99a2b vendor: go.uber.org/zap v1.21.0
full diff: https://github.com/uber-go/zap/compare/v1.17.0...v1.21.0

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit b0e20e1b3c)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-11-24 11:00:13 +01:00
Sebastiaan van Stijn
0c65191c49 vendor: go.uber.org/multierr v1.8.0
full diff: https://github.com/uber-go/multierr/compare/v1.6.0...v1.8.0

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 066fb6c69e)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-11-24 11:00:13 +01:00
Sebastiaan van Stijn
bf78bf3458 vendor: go.uber.org/atomic v1.9.0
full diff: https://github.com/uber-go/atomic/compare/v1.7.0...v1.9.0

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit b41580d66e)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-11-24 11:00:13 +01:00
Sebastiaan van Stijn
dcf06b3f5f vendor: github.com/jmespath/go-jmespath v0.4.0
no code changes in vendored files

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 341c9e77a8)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-11-24 11:00:12 +01:00
Sebastiaan van Stijn
aba1d597bc vendor: google.golang.org/genproto v0.0.0-20220706185917-7780775163c4
no changes in vendored files

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 9a8b46518b)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-11-24 11:00:12 +01:00
Sebastiaan van Stijn
e37ac41afb vendor: github.com/fernet/fernet-go v0.0.0-20211208181803-9f70042a33ee
Fixes a potential panic.

full diff: 9eac43b88a...9f70042a33

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 1e48b64538)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-11-24 11:00:12 +01:00
Sebastiaan van Stijn
963e3ec65c vendor: google.golang.org/protobuf v1.28.1
indirect dependency, but updating it in a separate commit

full diff: https://github.com/protocolbuffers/protobuf-go/compare/v1.28.0...v1.28.1

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 4113a88523)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-11-24 11:00:12 +01:00
Sebastiaan van Stijn
526203dd7f vendor: github.com/spf13/cobra v1.6.1
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 26c4429f7e)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-11-24 11:00:12 +01:00
Sebastiaan van Stijn
f528e2ab96 vendor: github.com/moby/term v0.0.0-20221120202655-abb19827d345
no significant changes in vendored code, other than updating build-tags
for go1.17, but removes some dependencies from the module, which can
help with future updates;

full diff: 3f7ff695ad...abb19827d3

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 61f266f660)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-11-24 10:59:17 +01:00
Sebastiaan van Stijn
3989be2f7b vendor: github.com/bsphere/le_go v0.0.0-20200109081728-fc06dab2caa8
updates the "logentries" dependency;

- checking error when calling output
- Support Go Modules

full diff: 7a984a84b5...fc06dab2ca

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 8d5eebcc6e)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-11-24 10:59:06 +01:00
Sebastiaan van Stijn
d406a5fd22 Merge pull request #44499 from thaJeztah/22.06_backport_update_go_radix
[22.06 backport] vendor: remove most "replace" rules and update github.com/armon/go-radix
2022-11-23 12:52:58 +01:00
Sebastiaan van Stijn
394f6c14ad Merge pull request #44496 from crazy-max/22.06_frozen-script-variant
[22.06 backport] Dockerfile: variant support in frozen-images stage
2022-11-20 21:12:02 +01:00
Sebastiaan van Stijn
77a01aaec7 Merge pull request #44475 from thaJeztah/22.06_backport_config_fix_panic
[22.06 backport] daemon/config: use strings.Cut(), fix panic in BuilderGCFilter
2022-11-19 00:18:19 +01:00
Sebastiaan van Stijn
df2427022a vendor.mod: add comment about replaced dependency
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 30b0cb0cd4)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-11-18 17:35:08 +01:00
Sebastiaan van Stijn
9e4c508b55 vendor.mod: remove replace for github.com/rexray/gocsi
While this replace was needed in swarmkit itself, it looks like
it doesn't cause issues when removed in this repository, so
let's remove it here.

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 62a4a45a72)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-11-18 17:35:07 +01:00
Sebastiaan van Stijn
cb358e8a19 vendor: github.com/armon/go-radix v1.0.1-0.20221118154546-54df44f2176c
Previously we had to use a replace rule, as later versions of this
module resulted in a panic. This issue was fixed in:
f30034d788

Which means we can remove the replace rule, and update the dependency.
No new release was tagged yet, so sticking to a "commit" for now.

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit a2d758acc9)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-11-18 17:35:07 +01:00
Sebastiaan van Stijn
2bc33b4c26 Merge pull request #44492 from thaJeztah/22.06_backport_update_gowinres
[22.06 backport] update github.com/tc-hib/go-winres v0.3.0 to fix schema version in manifest
2022-11-18 15:44:56 +01:00
CrazyMax
3768c71d9e Dockerfile: variant support in frozen-images stage
using TARGETVARIANT in frozen-images stage implies changes in
`download-frozen-image-v2.sh` script to add support for variants
so we are able to build against more platforms.

Signed-off-by: CrazyMax <crazy-max@users.noreply.github.com>
(cherry picked from commit 25dc760162)
2022-11-18 15:42:48 +01:00
Sebastiaan van Stijn
7b9e86f789 update github.com/tc-hib/go-winres v0.3.0 to fix schema version in manifest
- Fix xml schema version in manifest
- Provide more verbose error on failed git tag resolution

full diffs:

- https://github.com/tc-hib/go-winres/compare/v0.2.3...v0.3.0
- https://github.com/tc-hib/winres/compare/v0.1.5...v0.1.6

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit ca807edac0)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-11-18 00:27:16 +01:00
Sebastiaan van Stijn
214e6363b3 Merge pull request #44487 from thaJeztah/22.06_backport_update_gotestsum
[22.06 backport] update gotestsum to v1.8.2
2022-11-18 00:26:15 +01:00
Sebastiaan van Stijn
5052c38846 update gotestsum to v1.8.2
release notes: https://github.com/gotestyourself/gotestsum/releases/tag/v1.8.2

- Show shuffle seed
- Update tests, and cleanup formats
- Update dependencies
- Test against go1.19, remove go1.15
- Add project name to junit.xml output
- Adding in support for s390x and ppc64le

full diff: https://github.com/gotestyourself/gotestsum/compare/v1.8.1...v1.8.2

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 882ddf4b16)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-11-17 17:47:28 +01:00
Sebastiaan van Stijn
d15be0c54d Merge pull request #44415 from thaJeztah/22.06_backport_pkg_thining
[22.06 backport] clean-up various pkg/ changes
2022-11-17 10:48:50 +01:00
Sebastiaan van Stijn
27982c186e Merge pull request #44480 from neersighted/drop_changelog_22.06
[22.06 backport] cleanup: drop historical CHANGELOG.md
2022-11-17 10:48:08 +01:00
Sebastiaan van Stijn
9d990cbae8 Merge pull request #44471 from thaJeztah/22.06_backport_containerd_v1.6.10
[22.06 backport] update containerd v1.6.10 (binary and vendor)
2022-11-17 10:46:14 +01:00
Bjorn Neergaard
3508cfb149 hack/validate: drop changelog-related steps
Signed-off-by: Bjorn Neergaard <bneergaard@mirantis.com>
(cherry picked from commit be18f92bf4)
Signed-off-by: Bjorn Neergaard <bneergaard@mirantis.com>
2022-11-16 13:35:40 -07:00
Bjorn Neergaard
67633130c6 cleanup: drop historical CHANGELOG.md
The file will still be available in Git history; we should drop it
however as it is misleading and obsolete.

Signed-off-by: Bjorn Neergaard <bneergaard@mirantis.com>
(cherry picked from commit ec1bb21649)
Signed-off-by: Bjorn Neergaard <bneergaard@mirantis.com>
2022-11-16 13:35:38 -07:00
Sebastiaan van Stijn
2e13f771f3 Merge pull request #44477 from neersighted/drop_derek_22.06
[22.06 backport] cleanup: drop .DEREK.yml
2022-11-16 21:22:43 +01:00
Sebastiaan van Stijn
bbf3f33dc8 Merge pull request #44473 from thaJeztah/22.06_backport_rootlesskit_1.1.0
[22.06 backport] update RootlessKit to v1.1.0
2022-11-16 18:45:07 +01:00
Bjorn Neergaard
2dc7a1dc25 cleanup: drop .DEREK.yml
Signed-off-by: Bjorn Neergaard <bneergaard@mirantis.com>
(cherry picked from commit e8ad01594a)
Signed-off-by: Bjorn Neergaard <bneergaard@mirantis.com>
2022-11-16 09:10:46 -07:00
Sebastiaan van Stijn
fc657692c7 daemon/config: use strings.Cut(), fix panic in BuilderGCFilter
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit b529870558)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-11-16 15:23:00 +01:00
Sebastiaan van Stijn
e75fa6684c daemon: use strconv instead of fmt.Sprintf()
Also cleaning up some errors

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 56e64270f3)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-11-16 15:21:05 +01:00
Akihiro Suda
abe6b3dc9b rootlesskit.installer: s/vendor.conf/vendor.mod/
Signed-off-by: Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp>
(cherry picked from commit 7ca03c1a79)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-11-16 14:33:40 +01:00
Akihiro Suda
297f224a92 update RootlessKit to v1.1.0
Signed-off-by: Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp>
(cherry picked from commit d9fb730148)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-11-16 14:33:40 +01:00
Akihiro Suda
f48f4dde24 vendor: github.com/rootless-containers/rootlesskit v1.1.0
Signed-off-by: Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp>
(cherry picked from commit 08516af897)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-11-16 14:33:37 +01:00
Sebastiaan van Stijn
cd8873dd3d Merge pull request #44468 from thaJeztah/22.06_backport_golang_x_releases_step1
[22.06 backport] vendor: golang.org/x/crypto v0.1.0 and other golang.org/x/.. dependencies
2022-11-16 14:11:02 +01:00
Jintao Zhang
2dce69e001 update containerd binary to v1.6.10
Signed-off-by: Jintao Zhang <zhangjintao9020@gmail.com>
(cherry picked from commit a5979a2106)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-11-16 14:03:34 +01:00
Jintao Zhang
5c4dc48995 vendor: github.com/containerd/containerd v1.6.10
Signed-off-by: Jintao Zhang <zhangjintao9020@gmail.com>
(cherry picked from commit 4e5c3b82cb)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-11-16 14:03:03 +01:00
Sebastiaan van Stijn
10fa0d5321 vendor: golang.org/x/oauth2 v0.1.0
The golang.org/x/ projects are now doing tagged releases.

Some notable changes:

- authhandler: Add support for PKCE
- Introduce new AuthenticationError type returned by errWrappingTokenSource.Token
- Add support to set JWT Audience in JWTConfigFromJSON()
- google/internal: Add AWS Session Token to Metadata Requests
- go.mod: update vulnerable net library
- google: add support for "impersonated_service_account" credential type.
- google/externalaccount: add support for workforce pool credentials

full diff: https://github.com/golang/oauth2/compare/2bc19b11175f...v0.1.0

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit a6cb8efd81)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-11-15 17:06:19 +01:00
Sebastiaan van Stijn
356f483038 vendor: golang.org/x/crypto v0.1.0
The golang.org/x/ projects are now doing tagged releases.

full diff: https://github.com/golang/crypto/compare/3147a52a75dd...v0.1.0

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 9d7bd47cb6)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-11-15 17:05:25 +01:00
Sebastiaan van Stijn
c0edbfd621 vendor: golang.org/x/net v0.1.0
The golang.org/x/ projects are now doing tagged releases.

full diff:

- https://github.com/golang/net/compare/f3363e06e74c...v0.1.0
- https://github.com/golang/text/compare/v0.3.7...v0.4.0

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 79f9ffd401)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-11-15 14:56:21 +01:00
Sebastiaan van Stijn
e46e43470b vendor: golang.org/x/time v0.1.0
The golang.org/x/ projects are now doing tagged releases.

full diff: https://github.com/golang/time/compare/f0f3c7e86c11...v0.1.0

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 71fa64a272)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-11-15 14:56:12 +01:00
Sebastiaan van Stijn
d4e2341f93 vendor: golang.org/x/sync v0.1.0
The golang.org/x/ projects are now doing tagged releases.

full diff: https://github.com/golang/sync/compare/036812b2e83c...v0.1.0

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 4965f19626)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-11-15 14:56:04 +01:00
Sebastiaan van Stijn
e32bfd347c vendor: golang.org/x/sys v0.1.0
The golang.org/x/ projects are now doing tagged releases.

full diff: https://github.com/golang/sys/compare/84dc82d7e875...v0.1.0

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 4bb95eef6f)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-11-15 14:55:56 +01:00
Sebastiaan van Stijn
65c7f3bac3 pkg/loopback: use ioctl helpers from x/sys/unix
Use the IoctlRetInt, IoctlSetInt and IoctlLoopSetStatus64 helper
functions defined in the golang.org/x/sys/unix package instead of
manually wrapping these using a locally defined function.

Inspired by 3cc3d8a560

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit c7c02eea81)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-11-15 14:55:17 +01:00
Sebastiaan van Stijn
5f35b157a3 vendor: golang.org/x/sys v0.0.0-20221006211917-84dc82d7e875
full diff: 3c1f35247d...84dc82d7e8

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 6742f74e0e)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-11-15 14:54:32 +01:00
Sebastiaan van Stijn
76e132ed56 Merge pull request #44448 from neersighted/actions_deprecations_22.06
[22.06 backport] ci(actions): migrate to file-based commands
2022-11-15 14:45:56 +01:00
Sebastiaan van Stijn
6f7ee1c942 Merge pull request #44449 from thaJeztah/22.06_backport_bump_gotest_tools
[22.06 backport] vendor: gotest.tools/v3 v3.4.0, github.com/google/go-cmp v0.5.9, remove golang.org/x/xerrors
2022-11-15 11:27:11 +01:00
Akihiro Suda
f476deac40 Merge pull request #44456 from thaJeztah/22.06_backport_bump_buildkit_v0.10.6
[22.06 backport] vendor: github.com/moby/buildkit v0.10.6
2022-11-15 10:23:56 +09:00
Sebastiaan van Stijn
11973d0c0a vendor: github.com/moby/buildkit v0.10.6
full diff: https://github.com/moby/buildkit/compare/v0.10.5...v0.10.6

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 06e4b7d1f8)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-11-14 20:38:51 +01:00
Sebastiaan van Stijn
251610397c vendor: gotest.tools/v3 v3.4.0
- removes github.com/spf13/pflag dependency
- removes use of deprecated io/ioutil package
- drops support for go1.16

full diff: https://github.com/gotestyourself/gotest.tools/compare/v3.3.0...v3.4.0

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit d43bc26717)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-11-11 18:16:39 +01:00
Sebastiaan van Stijn
83f90039ef vendor: github.com/google/go-cmp v0.5.9 to remove golang.org/x/xerrors dep
full diff: https://github.com/google/go-cmp/compare/v0.5.7...v0.5.9

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 57ba2df970)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-11-11 18:16:39 +01:00
Sebastiaan van Stijn
2fd846d40f vendor: gotest.tools v3.3.0
full diff: https://github.com/gotestyourself/gotest.tools/compare/v3.2.0...v3.3.0

- golden: accept -update for updating files
- assert: golden variables

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 3e1601a980)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-11-11 18:16:34 +01:00
Bjorn Neergaard
f9ab209417 chore: update supported go version to 1.18+
The 1.16 `io/fs` compatibility code was being built on 1.18 and 1.19.
Drop it completely as 1.16 is long EOL, and additionally drop 1.17 as it
has been EOL for a month and 1.18 is both the minimum Go supported by
the 20.10 branch, as well as a very easy jump from 1.17.

Signed-off-by: Bjorn Neergaard <bneergaard@mirantis.com>
(cherry picked from commit 85fa72c599)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-11-11 18:01:58 +01:00
Bjorn Neergaard
bfca3185ee ci(actions): bump outdated actions on Linux
Signed-off-by: Bjorn Neergaard <bneergaard@mirantis.com>
(cherry picked from commit 6a02afa56f)
Signed-off-by: Bjorn Neergaard <bneergaard@mirantis.com>
2022-11-10 16:48:37 -07:00
Bjorn Neergaard
7f45eb041c ci(actions): migrate to file-based commands
Signed-off-by: Bjorn Neergaard <bneergaard@mirantis.com>
(cherry picked from commit 0557569947)
Signed-off-by: Bjorn Neergaard <bneergaard@mirantis.com>
2022-11-10 16:48:33 -07:00
Tianon Gravi
b76a60dee6 Merge pull request #44414 from thaJeztah/22.06_backport_rm_deprecated_arm_fallback
[22.06 backport] Remove long-deprecated "arm" fallback
2022-11-10 12:21:14 -08:00
Sebastiaan van Stijn
4acfbaba1e Merge pull request #44430 from thaJeztah/22.06_swap_digestset
[22.06 backport] replace distribution/digestset with opencontainers/go-digest/digestset
2022-11-10 21:09:01 +01:00
Sebastiaan van Stijn
e749a31322 Merge pull request #44416 from thaJeztah/22.06_backport_enable_deprecated_check
[22.06 backport] Revert "validation: temporarily allows changes in integration-cli"
2022-11-10 18:02:41 +01:00
Sebastiaan van Stijn
7370bbc034 replace distribution/digestset with opencontainers/go-digest/digestset
opencontainers/go-digest is a 1:1 copy of the one in distribution. It's no
longer used in distribution itself, so may be removed there at some point.

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 6174d00c03)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-11-09 10:22:38 +01:00
Samuel Karp
38152f4d5b Merge pull request #44411 from thaJeztah/22.06_backport_bump_go1.19.3
fixes https://github.com/golang/go/issues/56309
2022-11-08 19:12:03 -08:00
Sebastiaan van Stijn
21feb1808d Revert "validation: temporarily allows changes in integration-cli"
This reverts commit 7ed823ead9.

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 9b71a46899)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-11-05 18:35:47 +01:00
Sebastiaan van Stijn
5e15ce3a4a pkg/directory: remove unused MoveToSubdir() utility
This utility was added in 442b45628e as part of
user-namespaces, and first used in 44e1023a93 to
set up the daemon root, and move the existing content;
44e1023a93/daemon/daemon_experimental.go (L68-L71)

A later iteration no longer _moved_ the existing root directory, and removed the
use of `directory.MoveToSubdir()` e8532023f2

It looks like there's no external consumers of this utility, so we should be
save to remove it.

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 26659d5eb8)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-11-05 18:31:05 +01:00
Sebastiaan van Stijn
92b96ac2ed pkg/directory: minor refactor of Size()
- separate exported function from implementation, to allow for GoDoc to be
  maintained in a single location.
- don't use named return variables (no "bare" return, and potentially shadowing
  variables)
- reverse the `os.IsNotExist(err) && d != dir` condition, putting  the "lighter"
  `d != dir` first.

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit bd6217bb74)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-11-05 18:31:05 +01:00
Sebastiaan van Stijn
e0b105623e pkg/system: unconvert
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit ab677c41ea)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-11-05 18:30:44 +01:00
Sebastiaan van Stijn
9d86e1d204 pkg/system: move GetExitCode() to pkg/idtools, and un-export
This utility was only used in a single place, and had no external consumers.
Move it to where it's used.

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 07b1aa822c)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-11-05 18:30:44 +01:00
Sebastiaan van Stijn
3a946f5291 pkg/system: remove Umask() utility
It was only used in a couple of places, and in most places shouldn't be used
as those locations were in unix/linux-only files, so didn't need the wrapper.

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 4347080b46)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-11-05 18:30:26 +01:00
Sebastiaan van Stijn
cf1e138ab1 pkg/directory: Size(): add back type-casts to account for platform differences
I noticed the comment above this code, but didn't see a corresponding type-cast.
Looking at this file's history, I found that these were removed as part of
2f5f0af3fd, which looks to have overlooked some
deliberate type-casts.

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 0a861e68df)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-11-05 18:30:16 +01:00
Tianon Gravi
7175841ebd Remove long-deprecated "arm" fallback
This fallback is used when we filter the manifest list by the user-provided platform and find no matches such that we match the previous Docker behavior (before it supported variant matching).  This has been deprecated long enough that I think it's time we finally stop supporting this weird fallback, especially since it makes for buggy behavior like `docker pull --platform linux/arm/v5 alpine:3.16` leading to a `linux/arm/v6` image being pulled (I specified a variant, every manifest list entry specifies a variant, so clearly the only behavior I as a user could reasonably expect is an error that `linux/arm/v5` is not supported, but instead I get an explicitly incompatible image despite doing everything I as a user can to prevent that situation).

Signed-off-by: Tianon Gravi <admwiggin@gmail.com>
(cherry picked from commit 5bc17c3e54)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-11-05 18:23:43 +01:00
Cory Snider
f3e180b704 Update to Go 1.19.3 to address CVE-2022-41716
On Windows, syscall.StartProcess and os/exec.Cmd did not properly
    check for invalid environment variable values. A malicious
    environment variable value could exploit this behavior to set a
    value for a different environment variable. For example, the
    environment variable string "A=B\x00C=D" set the variables "A=B" and
    "C=D".

    Thanks to RyotaK (https://twitter.com/ryotkak) for reporting this
    issue.

    This is CVE-2022-41716 and Go issue https://go.dev/issue/56284.

This Go release also fixes https://github.com/golang/go/issues/56309, a
runtime bug which can cause random memory corruption when a goroutine
exits with runtime.LockOSThread() set. This fix is necessary to unblock
work to replace certain uses of pkg/reexec with unshared OS threads.

Signed-off-by: Cory Snider <csnider@mirantis.com>
(cherry picked from commit f9d4589976)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-11-05 17:52:37 +01:00
Sebastiaan van Stijn
afdc9a804a Merge pull request #44404 from neersighted/swarmkit_revendor_22.06
[22.06 backport] vendor: github.com/moby/swarmkit/v2 v2.0.0-20221102165002-6341884e5fc9
2022-11-03 22:27:56 +01:00
Sebastiaan van Stijn
e24277883f Merge pull request #44405 from vvoland/oci-artifacts-error-2206
[22.06 backport] distribution: Error when pulling OCI artifacts
2022-11-03 22:27:37 +01:00
Paweł Gronowski
07e84005ac distribution: Error when pulling OCI artifacts
Currently an attempt to pull a reference which resolves to an OCI
artifact (Helm chart for example), results in a bit unrelated error
message `invalid rootfs in image configuration`.

This provides a more meaningful error in case a user attempts to
download a media type which isn't image related.

Signed-off-by: Paweł Gronowski <pawel.gronowski@docker.com>
2022-11-03 19:41:51 +01:00
Bjorn Neergaard
39d3d3db56 vendor: github.com/moby/swarmkit/v2 v2.0.0-20221102165002-6341884e5fc9
full diff: 48dd89375d...6341884e5f

Pulls in a set of fixes to SwarmKit's nascent Cluster Volumes support
discovered during subsequent development and testing.

Signed-off-by: Bjorn Neergaard <bneergaard@mirantis.com>
(cherry picked from commit 57c2545cd5)
Signed-off-by: Bjorn Neergaard <bneergaard@mirantis.com>
2022-11-03 12:30:53 -06:00
Cory Snider
4b79d9078a Merge pull request #44400 from corhere/backport-22.06/fix-task-delete-on-failed-start
[22.06 backport] Fix containerd task deletion after failed start
2022-11-02 18:15:19 -04:00
Cory Snider
1e0f2186a9 Fix containerd task deletion after failed start
Deleting a containerd task whose status is Created fails with a
"precondition failed" error. This is because (aside from Windows)
a process is spawned when the task is created, and deleting the task
while the process is running would leak the process if it was allowed.
libcontainerd mistakenly tries to clean up from a failed start by
deleting the created task, which will always fail with the
aforementioned error. Change it to pass the `WithProcessKill` delete
option so the cleanup has a chance to succeed.

Signed-off-by: Cory Snider <csnider@mirantis.com>
(cherry picked from commit 1bef9e3fbf)
Signed-off-by: Cory Snider <csnider@mirantis.com>
2022-11-02 16:59:22 -04:00
Cory Snider
4404c36460 Merge pull request #44376 from corhere/backport-22.06/gh-44363
[22.06 backport] Fix the max-concurrent-downloads and max-concurrent-uploads configs documentation
2022-10-31 13:00:46 -04:00
Cory Snider
75634f9a1e daemon: fix docs for config-default constants
Signed-off-by: Cory Snider <csnider@mirantis.com>
(cherry picked from commit ad4073edc1)
Signed-off-by: Cory Snider <csnider@mirantis.com>
2022-10-31 11:51:27 -04:00
Luis Henrique Mulinari
ad11d3f232 Fix the max-concurrent-downloads and max-concurrent-uploads configs documentation
This fix tries to address issues raised in #44346.
The max-concurrent-downloads and max-concurrent-uploads limits are applied for the whole engine and not for each pull/push command.

Signed-off-by: Luis Henrique Mulinari <luis.mulinari@gmail.com>
(cherry picked from commit 6c0aa5b00a)
Signed-off-by: Cory Snider <csnider@mirantis.com>
2022-10-31 11:51:27 -04:00
Sebastiaan van Stijn
cbaf1808cb Merge pull request #44360 from neersighted/backport_44224
[22.06 backport] Fix force-remove for cluster volumes
2022-10-26 10:36:39 -04:00
Drew Erny
03015fe6de fix force remove for cluster volumes
Signed-off-by: Drew Erny <derny@mirantis.com>
(cherry picked from commit 3246db3755)
Signed-off-by: Bjorn Neergaard <bneergaard@mirantis.com>
2022-10-25 15:18:34 -06:00
Sebastiaan van Stijn
fa3804f8ba Merge pull request #44357 from thaJeztah/22.06_backport_busybox_w32_img
[22.06 backport] integration: download busybox-w32 from GitHub Release
2022-10-25 07:44:47 -04:00
Sebastiaan van Stijn
4c1a3f096c Merge pull request #44355 from thaJeztah/22.06_vendor_containerd_1.6.9
[22.06 backport] vendor: github.com/containerd/containerd v1.6.9
2022-10-24 17:58:09 -04:00
CrazyMax
09a2f7a667 integration: download busybox-w32 from GitHub Release
Signed-off-by: CrazyMax <crazy-max@users.noreply.github.com>
(cherry picked from commit 4f1d1422de)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-10-24 17:04:10 -04:00
Brian Goff
02e02e512f Merge pull request #44352 from thaJeztah/22.06_update_containerd_binary
[22.06 backport] update containerd binary to v1.6.9
2022-10-24 11:57:06 -07:00
Sebastiaan van Stijn
24de1f7adc vendor: github.com/containerd/containerd v1.6.9
release notes: https://github.com/containerd/containerd/releases/tag/v1.6.9

full diff: https://github.com/containerd/containerd/compare/v1.6.8...v1.6.9

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 04dc007c76)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-10-24 14:24:27 -04:00
Sebastiaan van Stijn
c4685540e4 update containerd binary to v1.6.9
release notes: https://github.com/containerd/containerd/releases/tag/v1.6.9

full diff: containerd/containerd@v1.6.8...v1.6.9

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit ac79a02ace)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-10-24 13:55:25 -04:00
Sebastiaan van Stijn
5aac513617 Merge pull request #44345 from thaJeztah/22.06_backport_go1.18_compat
[22.06 backport] builder/remotecontext/git: allow building on go1.18
2022-10-21 19:39:10 +02:00
Sebastiaan van Stijn
80dc5186ec builder/remotecontext/git: allow building on go1.18
cmd.Environ() is new in go1.19, and not needed for this specific case.
Without this, trying to use this package in code that uses go1.18 will fail;

    builder/remotecontext/git/gitutils.go:216:23: cmd.Environ undefined (type *exec.Cmd has no field or method Environ)

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 4fdc1bb1fb)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-10-21 17:44:20 +02:00
Sebastiaan van Stijn
f9cb47a052 Merge pull request #44341 from thaJeztah/22.06_backport_buildkit_skip_unit
[22.06 backport] gha: buildkit: remove "skip-integration-tests" from matrix
2022-10-21 14:21:14 +02:00
Sebastiaan van Stijn
5202b5c781 Merge pull request #44328 from thaJeztah/22.06_backport_ghsa-ambiguous-pull-by-digest
[22.06 backport] Validate digest in repo for pull by digest
2022-10-21 14:20:22 +02:00
Sebastiaan van Stijn
28c34259c7 Merge pull request #44297 from thaJeztah/22.06_backport_windows_bits
[22.06 backport] windows cleanups
2022-10-21 02:44:02 +02:00
Sebastiaan van Stijn
67ea873f61 Merge pull request #44325 from corhere/backport-22.06/fix-git-file-leak
[22.06 backport] builder: Isolate Git from local system
2022-10-21 02:11:56 +02:00
Sebastiaan van Stijn
f72c96c5c4 gha: buildkit: make checks more readable
GitHub uses these parameters to construct a name; removing the ./ prefix
to make them more readable (and add them back where it's used)

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 0760c6f4e1)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-10-21 02:07:29 +02:00
Sebastiaan van Stijn
1bbb6f2454 gha: buildkit: remove "skip-integration-tests" from matrix
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit cfa2f9a2f2)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-10-21 02:07:26 +02:00
Sebastiaan van Stijn
c0be73f88d skip TestImagePullStoredfDigestForOtherRepo() on Windows and rootless
- On Windows, we don't build and run a local  test registry (we're not running
  docker-in-docker), so we need to skip this test.
- On rootless, networking doesn't support this (currently)

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 4f43cb660a)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-10-21 01:50:09 +02:00
Brian Goff
727c4fdee3 Validate digest in repo for pull by digest
This is accomplished by storing the distribution source in the content
labels. If the distribution source is not found then we check to the
registry to see if the digest exists in the repo, if it does exist then
the puller will use it.

Signed-off-by: Brian Goff <cpuguy83@gmail.com>
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 27530efedb)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-10-21 01:50:09 +02:00
Sebastiaan van Stijn
b4c4be1f22 Revert "testutil/registry: remove unused WithStdout(), WithStErr() opts"
This reverts commit 1f21c4dd05.

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 92eca900b0)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-10-21 01:50:09 +02:00
Sebastiaan van Stijn
7106874e39 Merge pull request #44338 from thaJeztah/22.06_backport_buildkit_testskips
[22.06 backport] gha: update buildkit to v0.10.5-6-ge27c8e24 to skip some tests
2022-10-21 01:48:00 +02:00
Sebastiaan van Stijn
4bef6f5510 gha: update buildkit to v0.10.5-6-ge27c8e24 to skip some tests
full diff: https://github.com/moby/buildkit/compare/v0.10.5...v0.10.5-6-ge27c8e24

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 201fdf67ac)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-10-20 23:50:16 +02:00
Cory Snider
f056df579a builder: add missing doc comment
Signed-off-by: Cory Snider <csnider@mirantis.com>
2022-10-20 16:46:23 -04:00
Cory Snider
c062238ea4 builder: fix running git commands on Windows
Setting cmd.Env overrides the default of passing through the parent
process' environment, which works out fine most of the time, except when
it doesn't. For whatever reason, leaving out all the environment causes
git-for-windows sh.exe subprocesses to enter an infinite loop of
access violations during Cygwin initialization in certain environments
(specifically, our very own dev container image).

Signed-off-by: Cory Snider <csnider@mirantis.com>
2022-10-20 16:46:23 -04:00
Cory Snider
20ff8a2380 builder: make git config isolation opt-in
While it is undesirable for the system or user git config to be used
when the daemon clones a Git repo, it could break workflows if it was
unconditionally applied to docker/cli as well.

Signed-off-by: Cory Snider <csnider@mirantis.com>
2022-10-20 16:46:23 -04:00
Cory Snider
ca99cab891 builder: isolate git from local system
Prevent git commands we run from reading the user or system
configuration, or cloning submodules from the local filesystem.

Signed-off-by: Cory Snider <csnider@mirantis.com>
2022-10-20 16:46:23 -04:00
Cory Snider
5829b244ec builder: explicitly set CWD for all git commands
Keep It Simple! Set the working directory for git commands by...setting
the git process's working directory. Git commands can be run in the
parent process's working directory by passing the empty string.

Signed-off-by: Cory Snider <csnider@mirantis.com>
2022-10-20 16:46:23 -04:00
Cory Snider
3bc8fccc1b builder: modernize TestCheckoutGit
Make the test more debuggable by logging all git command output and
running each table-driven test case as a subtest.

Signed-off-by: Cory Snider <csnider@mirantis.com>
2022-10-20 16:46:23 -04:00
Sebastiaan van Stijn
4a96094bf5 Merge pull request #44321 from thaJeztah/22.06_backport_bump_buildkit
[22.06 backport] vendor: github.com/moby/buildkit v0.10.5
2022-10-19 10:08:32 +02:00
Sebastiaan van Stijn
00b44caa69 vendor: github.com/moby/buildkit v0.10.5
https://github.com/moby/buildkit/releases/tag/v0.10.5

full diff: https://github.com/moby/buildkit/compare/v0.10.4...v0.10.5

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 0fc17c42af)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-10-18 22:39:56 +02:00
Sebastiaan van Stijn
1fcb1dd728 Merge pull request #44314 from tianon/22.06-distributable
[22.06 backport] registry: allow "allow-nondistributable-artifacts" for Docker Hub
2022-10-18 14:39:53 +02:00
Sebastiaan van Stijn
aaa8f96cc9 registry: allow "allow-nondistributable-artifacts" for Docker Hub
Previously, Docker Hub was excluded when configuring "allow-nondistributable-artifacts".
With the updated policy announced by Microsoft, we can remove this restriction;
https://techcommunity.microsoft.com/t5/containers/announcing-windows-container-base-image-redistribution-rights/ba-p/3645201

There are plans to deprecated support for foreign layers altogether in the OCI,
and we should consider to make this option the default, but as that requires
deprecating the option (and possibly keeping an "opt-out" option), we can look
at that separately.

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 30e5333ce3)
2022-10-17 15:04:59 -07:00
Sebastiaan van Stijn
671bf589e2 Change restart delay for Windows service to 15s
Previously we waited for 60 seconds after the service faults to restart
it. However, there isn't much benefit to waiting this long. We expect
15 seconds to be a more reasonable delay.

Co-Authored-by: Kevin Parsons <kevpar@microsoft.com>
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 624daf8d9e)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-10-13 23:08:42 +02:00
Sebastiaan van Stijn
e1b240d6bd cmd/dockerd: use golang.org/x/sys Service.SetRecoveryActions()
This is the equivalent of the local implementation.

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 3c585e6567)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-10-13 23:08:42 +02:00
Sebastiaan van Stijn
18a54ed59c cmd/dockerd: use golang.org/x/sys/windows.SetStdHandle()
golang.org/x/sys/windows now implements this, so we can use that
instead of a local implementation.

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 6176ab5901)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-10-13 23:05:49 +02:00
Sebastiaan van Stijn
0c66bc948a cmd/dockerd: replace deprecated windows.IsAnInteractiveSession()
The `IsAnInteractiveSession` was deprecated, and `IsWindowsService` is marked
as the recommended replacement.

For details, see 280f808b4a

> CL 244958 includes isWindowsService function that determines if a
> process is running as a service. The code of the function is based on
> public .Net implementation.
>
> IsAnInteractiveSession function implements similar functionality, but
> is based on an old Stackoverflow post., which is not as authoritative
> as code written by Microsoft for their official product.
>
> This change copies CL 244958 isWindowsService function into svc package
> and makes it public. The intention is that future users will prefer
> IsWindowsService to IsAnInteractiveSession.

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit ffcddc908e)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-10-13 23:05:48 +02:00
Sebastiaan van Stijn
a12d359c1a daemon/graphdriver/windows: Remove() don't use defer() in a loop
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 75bdbf02a6)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-10-13 23:05:48 +02:00
Sebastiaan van Stijn
2d12e69c9f daemon/graphdriver/windows: use go-winio.GetFileSystemType()
go-winio now defines this function, so we can consume that.

Note that there's a difference between the old implementation and the original
one (added in 1cb9e9b44e). The old implementation
had special handling for win32 error codes, which was removed in the go-winio
implementation in 0966e1ad56

As `go-winio.GetFileSystemType()` calls `filepath.VolumeName(path)` internally,
this patch also removes the `string(home[0])`, which is redundant, and could
potentially panic if an empty string would be passed.

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 90431d1857)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-10-13 23:05:45 +02:00
Sebastiaan van Stijn
33ab36d6b3 Merge pull request #44279 from thaJeztah/22.06_backport_overlay_remove_kernel_check_override
[22.06 backport] daemon/graphdriver/overlay2: remove deprecated overrideKernelCheck
2022-10-13 22:35:45 +02:00
Sebastiaan van Stijn
fa10084a82 daemon/graphdriver/overlay2: remove deprecated overrideKernelCheck
Commit 955c1f881a (Docker v17.12.0) replaced
detection of support for multiple lowerdirs (as required by overlay2) to not
depend on the kernel version. The `overlay2.override_kernel_check` was still
used to print a warning that older kernel versions may not have full support.

After this, commit e226aea280 (Docker v20.10.0,
backported to v19.03.7) removed uses of the `overlay2.override_kernel_check`
option altogether, but we were still parsing it.

This patch changes the `parseOptions()` function to not parse the option,
printing a deprecation warning instead. We should change this to be an error,
but the  `overlay2.override_kernel_check` option was not deprecated in the
documentation, so keeping it around for one more release.

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit e35700eb50)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-10-10 15:00:30 +02:00
Sebastiaan van Stijn
43ce8f7d24 integration/plugin: remove deprecated overlay2.override_kernel_check
It's no longer used since e226aea280

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit b43a7ac530)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-10-10 15:00:29 +02:00
Sebastiaan van Stijn
87d9d96ab0 Merge pull request #44262 from thaJeztah/22.06_backport_bump_selinux
[22.06 backport] vendor: github.com/opencontainers/selinux v1.10.2
2022-10-06 20:54:38 +02:00
Sebastiaan van Stijn
a5ecbf4d22 vendor: github.com/opencontainers/selinux v1.10.2
full diff: https://github.com/opencontainers/selinux/compare/v1.10.1...v1.10.2

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit a5e1baf3ab)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-10-06 16:48:58 +02:00
Sebastiaan van Stijn
99aa9bb766 Merge pull request #44260 from thaJeztah/22.06_backport_deprecate_pkg_fsutil
[22.06 backport] pkg/fsutils: deprecate in favor of containerd/continuity/fs
2022-10-06 02:50:07 +02:00
Brian Goff
6442025060 Merge pull request #44259 from thaJeztah/22.06_backport_volume_unnamed_label
[22.06 backport] Volume prune: only prune anonymous volumes by default
2022-10-05 17:01:24 -07:00
Sebastiaan van Stijn
ac6624773e pkg/fsutils: deprecate in favor of containerd/continuity/fs
The pkg/fsutils package was forked in containerd, and later moved to
containerd/continuity/fs. As we're moving more bits to containerd, let's also
use the same implementation to reduce code-duplication and to prevent them from
diverging.

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 5b6b42162b)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-10-05 23:52:55 +02:00
Brian Goff
4669418731 Volume prune: only prune anonymous volumes by default
This adds a new filter argument to the volume prune endpoint "all".
When this is not set, or it is a false-y value, then only anonymous
volumes are considered for pruning.

When `all` is set to a truth-y value, you get the old behavior.

This is an API change, but I think one that is what most people would
want.

Signed-off-by: Brian Goff <cpuguy83@gmail.com>

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 618f26ccbc)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-10-05 23:50:01 +02:00
Sebastiaan van Stijn
ff07aadeb0 Merge pull request #44245 from thaJeztah/22.06_backport_api_update_swagger_for_builder_version
[22.06 backport] docs: swagger: update description for default builder version
2022-10-05 10:56:20 +02:00
Samuel Karp
cde4767cbd Merge pull request #44248 from thaJeztah/22.06_backport_bump_go_1.19.2 2022-10-05 00:29:49 -07:00
Sebastiaan van Stijn
1fe550cfc7 Update to go 1.19.2 to address CVE-2022-2879, CVE-2022-2880, CVE-2022-41715
From the mailing list:

We have just released Go versions 1.19.2 and 1.18.7, minor point releases.

These minor releases include 3 security fixes following the security policy:

- archive/tar: unbounded memory consumption when reading headers

  Reader.Read did not set a limit on the maximum size of file headers.
  A maliciously crafted archive could cause Read to allocate unbounded
  amounts of memory, potentially causing resource exhaustion or panics.
  Reader.Read now limits the maximum size of header blocks to 1 MiB.

  Thanks to Adam Korczynski (ADA Logics) and OSS-Fuzz for reporting this issue.

  This is CVE-2022-2879 and Go issue https://go.dev/issue/54853.

- net/http/httputil: ReverseProxy should not forward unparseable query parameters

  Requests forwarded by ReverseProxy included the raw query parameters from the
  inbound request, including unparseable parameters rejected by net/http. This
  could permit query parameter smuggling when a Go proxy forwards a parameter
  with an unparseable value.

  ReverseProxy will now sanitize the query parameters in the forwarded query
  when the outbound request's Form field is set after the ReverseProxy.Director
  function returns, indicating that the proxy has parsed the query parameters.
  Proxies which do not parse query parameters continue to forward the original
  query parameters unchanged.

  Thanks to Gal Goldstein (Security Researcher, Oxeye) and
  Daniel Abeles (Head of Research, Oxeye) for reporting this issue.

  This is CVE-2022-2880 and Go issue https://go.dev/issue/54663.

- regexp/syntax: limit memory used by parsing regexps

  The parsed regexp representation is linear in the size of the input,
  but in some cases the constant factor can be as high as 40,000,
  making relatively small regexps consume much larger amounts of memory.

  Each regexp being parsed is now limited to a 256 MB memory footprint.
  Regular expressions whose representation would use more space than that
  are now rejected. Normal use of regular expressions is unaffected.

  Thanks to Adam Korczynski (ADA Logics) and OSS-Fuzz for reporting this issue.

  This is CVE-2022-41715 and Go issue https://go.dev/issue/55949.

View the release notes for more information: https://go.dev/doc/devel/release#go1.19.2

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 7b4e4c08b5)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-10-04 20:42:39 +02:00
Sebastiaan van Stijn
edef49eeac docs: swagger: update description for default builder version
Commit 7b153b9e28 updated the main
swagger file, but didn't update the v1.42 version used for the
documentation as it wasn't created yet at the time.

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 271243d382)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-10-04 19:00:47 +02:00
Sebastiaan van Stijn
dbcd0e7aee Merge pull request #44237 from cpuguy83/22.06_fix_restore_volumerefs
[22.06] Fix live-restore w/ restart policies + volume refs
2022-10-03 23:32:29 +02:00
Brian Goff
0a87dc9f71 Fix live-restore w/ restart policies + volume refs
Before this change restarting the daemon in live-restore with running
containers + a restart policy meant that volume refs were not restored.
This specifically happens when the container is still running *and*
there is a restart policy that would make sure the container was running
again on restart.

The bug allows volumes to be removed even though containers are
referencing them. 😱

Signed-off-by: Brian Goff <cpuguy83@gmail.com>
(cherry picked from commit 4c0e0979b4)
Signed-off-by: Brian Goff <cpuguy83@gmail.com>
2022-10-03 17:36:16 +00:00
Sebastiaan van Stijn
ed3c4e8d8e Merge pull request #44232 from thaJeztah/22.06_backport_resolvconf_deadcode
[22.06 backport] libnetwork/resolvconf: removed unused GetIfChanged() and GetLastModified()
2022-10-03 10:59:39 +02:00
Sebastiaan van Stijn
3956644474 Merge pull request #44230 from thaJeztah/22.06_backport_migrate_filematcher
[22.06 backport] replace pkg/fileutils Matching funcs with github.com/moby/patternmatcher
2022-10-03 10:58:27 +02:00
Sebastiaan van Stijn
262ad3bb2f libnetwork/resolvconf: removed unused GetIfChanged() and GetLastModified()
These functions were used in 63a7ccdd23, which was
part of Docker v1.5.0 and v1.6.0, but removed in Docker v1.7.0 when the network
stack was replaced with libnetwork in d18919e304.

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 49de15cdcc)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-10-01 02:08:44 +02:00
Sebastiaan van Stijn
44d42c2b16 replace pkg/fileutils Matching funcs with github.com/moby/patternmatcher
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 3c69b9f2c5)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-10-01 01:09:36 +02:00
Sebastiaan van Stijn
14eb977c15 Merge pull request #44229 from thaJeztah/22.06_backport_more_linters_step1
[22.06 backport] fix (whitespace) formatting in preparation of enabling more linters
2022-10-01 01:09:05 +02:00
Sebastiaan van Stijn
f8e5145e96 runconfig, oci, image, layer, distribution: fix empty-lines (revive)
runconfig/config_test.go:23:46: empty-lines: extra empty line at the start of a block (revive)
    runconfig/config_test.go:75:55: empty-lines: extra empty line at the start of a block (revive)

    oci/devices_linux.go:57:34: empty-lines: extra empty line at the start of a block (revive)
    oci/devices_linux.go:60:69: empty-lines: extra empty line at the start of a block (revive)

    image/fs_test.go:53:38: empty-lines: extra empty line at the end of a block (revive)
    image/tarexport/save.go:88:29: empty-lines: extra empty line at the end of a block (revive)

    layer/layer_unix_test.go:21:34: empty-lines: extra empty line at the end of a block (revive)

    distribution/xfer/download.go:302:9: empty-lines: extra empty line at the end of a block (revive)
    distribution/manifest_test.go:154:99: empty-lines: extra empty line at the end of a block (revive)
    distribution/manifest_test.go:329:52: empty-lines: extra empty line at the end of a block (revive)
    distribution/manifest_test.go:354:59: empty-lines: extra empty line at the end of a block (revive)

    registry/config_test.go:323:42: empty-lines: extra empty line at the end of a block (revive)
    registry/config_test.go:350:33: empty-lines: extra empty line at the end of a block (revive)

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 8a2e1245d4)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-10-01 00:01:14 +02:00
Sebastiaan van Stijn
24888a10f6 cmd/dockerd: fix empty-lines (revive)
cmd/dockerd/trap/trap_linux_test.go:29:29: empty-lines: extra empty line at the end of a block (revive)
    cmd/dockerd/daemon.go:327:35: empty-lines: extra empty line at the start of a block (revive)

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit f63dea4337)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-10-01 00:01:14 +02:00
Sebastiaan van Stijn
3a1896db63 client: fix empty-lines (revive)
client/events.go:19:115: empty-lines: extra empty line at the start of a block (revive)
    client/events_test.go:60:31: empty-lines: extra empty line at the start of a block (revive)

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit cd51c9fafb)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-10-01 00:01:13 +02:00
Sebastiaan van Stijn
47319e065d api/server: fix empty-lines (revive)
api/server/router/build/build_routes.go:239:32: empty-lines: extra empty line at the start of a block (revive)
    api/server/middleware/version.go:45:241: empty-lines: extra empty line at the end of a block (revive)
    api/server/router/swarm/helpers_test.go:11:44: empty-lines: extra empty line at the end of a block (revive)

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit f71fe8476a)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-10-01 00:01:13 +02:00
Sebastiaan van Stijn
b9b6e68903 opts: fix empty-lines (revive)
opts/address_pools_test.go:7:39: empty-lines: extra empty line at the end of a block (revive)
    opts/opts_test.go:12:42: empty-lines: extra empty line at the end of a block (revive)
    opts/opts_test.go:60:49: empty-lines: extra empty line at the end of a block (revive)
    opts/opts_test.go:253:37: empty-lines: extra empty line at the end of a block (revive)

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit b04f1416f6)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-10-01 00:01:13 +02:00
Sebastiaan van Stijn
c6c4d07830 daemon: fix empty-lines (revive)
daemon/network/filter_test.go:174:19: empty-lines: extra empty line at the end of a block (revive)
    daemon/restart.go:17:116: empty-lines: extra empty line at the end of a block (revive)
    daemon/daemon_linux_test.go:255:41: empty-lines: extra empty line at the end of a block (revive)
    daemon/reload_test.go:340:58: empty-lines: extra empty line at the end of a block (revive)
    daemon/oci_linux.go:495:101: empty-lines: extra empty line at the end of a block (revive)
    daemon/seccomp_linux_test.go:17:36: empty-lines: extra empty line at the start of a block (revive)
    daemon/container_operations.go:560:73: empty-lines: extra empty line at the end of a block (revive)
    daemon/daemon_unix.go:558:76: empty-lines: extra empty line at the end of a block (revive)
    daemon/daemon_unix.go:1092:64: empty-lines: extra empty line at the start of a block (revive)
    daemon/container_operations.go:587:24: empty-lines: extra empty line at the end of a block (revive)
    daemon/network.go:807:18: empty-lines: extra empty line at the end of a block (revive)
    daemon/network.go:813:42: empty-lines: extra empty line at the end of a block (revive)
    daemon/network.go:872:72: empty-lines: extra empty line at the end of a block (revive)

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit ddb42f3ad2)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-10-01 00:01:13 +02:00
Sebastiaan van Stijn
9136c32327 daemon/images: fix empty-lines (revive)
daemon/images/image_squash.go:17:71: empty-lines: extra empty line at the start of a block (revive)
    daemon/images/store.go:128:27: empty-lines: extra empty line at the end of a block (revive)
    daemon/images/image_list.go:154:55: empty-lines: extra empty line at the start of a block (revive)
    daemon/images/image_delete.go:135:13: empty-lines: extra empty line at the end of a block (revive)
    daemon/images/image_search.go:25:64: empty-lines: extra empty line at the start of a block (revive)

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 05042ce472)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-10-01 00:01:10 +02:00
Sebastiaan van Stijn
7cb488934b daemon/logger: fix empty-lines (revive)
daemon/logger/loggertest/logreader.go:58:43: empty-lines: extra empty line at the end of a block (revive)
    daemon/logger/ring_test.go:119:34: empty-lines: extra empty line at the end of a block (revive)
    daemon/logger/adapter_test.go:37:12: empty-lines: extra empty line at the end of a block (revive)
    daemon/logger/adapter_test.go:41:44: empty-lines: extra empty line at the end of a block (revive)
    daemon/logger/adapter_test.go:170:9: empty-lines: extra empty line at the end of a block (revive)
    daemon/logger/loggerutils/sharedtemp_test.go:152:43: empty-lines: extra empty line at the end of a block (revive)
    daemon/logger/loggerutils/sharedtemp.go:124:117: empty-lines: extra empty line at the end of a block (revive)
    daemon/logger/syslog/syslog.go:249:87: empty-lines: extra empty line at the end of a block (revive)

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 0695a910c6)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-09-30 23:59:31 +02:00
Sebastiaan van Stijn
aea1aa0daa daemon/graphdriver: fix empty-lines (revive)
daemon/graphdriver/aufs/aufs.go:239:80: empty-lines: extra empty line at the start of a block (revive)
    daemon/graphdriver/graphtest/graphbench_unix.go:249:27: empty-lines: extra empty line at the start of a block (revive)
    daemon/graphdriver/graphtest/testutil.go:271:30: empty-lines: extra empty line at the end of a block (revive)
    daemon/graphdriver/graphtest/graphbench_unix.go:179:32: empty-block: this block is empty, you can remove it (revive)
    daemon/graphdriver/zfs/zfs.go:375:48: empty-lines: extra empty line at the end of a block (revive)
    daemon/graphdriver/overlay/overlay.go:248:89: empty-lines: extra empty line at the start of a block (revive)
    daemon/graphdriver/devmapper/deviceset.go:636:21: empty-lines: extra empty line at the end of a block (revive)
    daemon/graphdriver/devmapper/deviceset.go:1150:70: empty-lines: extra empty line at the start of a block (revive)
    daemon/graphdriver/devmapper/deviceset.go:1613:30: empty-lines: extra empty line at the end of a block (revive)
    daemon/graphdriver/devmapper/deviceset.go:1645:65: empty-lines: extra empty line at the start of a block (revive)
    daemon/graphdriver/btrfs/btrfs.go:53:101: empty-lines: extra empty line at the start of a block (revive)
    daemon/graphdriver/devmapper/deviceset.go:1944:89: empty-lines: extra empty line at the start of a block (revive)

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 9d9cca49b4)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-09-30 23:59:31 +02:00
Sebastiaan van Stijn
79caa2f955 daemon/cluster: fix empty-lines (revive)
daemon/cluster/convert/service.go:96:34: empty-lines: extra empty line at the end of a block (revive)
    daemon/cluster/convert/service.go:169:44: empty-lines: extra empty line at the end of a block (revive)
    daemon/cluster/convert/service.go:470:30: empty-lines: extra empty line at the end of a block (revive)
    daemon/cluster/convert/container.go:224:23: empty-lines: extra empty line at the start of a block (revive)
    daemon/cluster/convert/network.go:109:14: empty-lines: extra empty line at the end of a block (revive)
    daemon/cluster/convert/service.go:537:27: empty-lines: extra empty line at the end of a block (revive)
    daemon/cluster/services.go:247:19: empty-lines: extra empty line at the end of a block (revive)
    daemon/cluster/services.go:252:41: empty-lines: extra empty line at the end of a block (revive)
    daemon/cluster/services.go:256:12: empty-lines: extra empty line at the end of a block (revive)
    daemon/cluster/services.go:289:80: empty-lines: extra empty line at the start of a block (revive)
    daemon/cluster/executor/container/health_test.go:18:37: empty-lines: extra empty line at the start of a block (revive)
    daemon/cluster/executor/container/adapter.go:437:68: empty-lines: extra empty line at the end of a block (revive)

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 0c7b930952)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-09-30 23:59:30 +02:00
Sebastiaan van Stijn
87552f2e67 plugin: fix empty-lines (revive)
plugin/v2/settable_test.go:24:29: empty-lines: extra empty line at the end of a block (revive)
    plugin/manager_linux.go:96:6: empty-lines: extra empty line at the end of a block (revive)
    plugin/backend_linux.go:373:16: empty-lines: extra empty line at the start of a block (revive)

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 4eb9b5f20e)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-09-30 23:59:30 +02:00
Sebastiaan van Stijn
aad639c1fa volume: fix empty-lines (revive)
volume/mounts/parser_test.go:42:39: empty-lines: extra empty line at the end of a block (revive)
    volume/mounts/windows_parser.go:129:24: empty-lines: extra empty line at the end of a block (revive)
    volume/local/local_test.go:16:35: empty-lines: extra empty line at the end of a block (revive)
    volume/local/local_unix.go:145:3: early-return: if c {...} else {... return } can be simplified to if !c { ... return } ... (revive)
    volume/service/service_test.go:18:38: empty-lines: extra empty line at the end of a block (revive)

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 188724a597)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-09-30 23:59:30 +02:00
Sebastiaan van Stijn
bee5153c5b testutil: fix empty-lines (revive)
testutil/fixtures/load/frozen.go:141:99: empty-lines: extra empty line at the end of a block (revive)
    testutil/daemon/plugin.go:56:129: empty-lines: extra empty line at the end of a block (revive)

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit e9f1b83a4a)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-09-30 23:59:30 +02:00
Sebastiaan van Stijn
31a938c73c integration: fix empty-lines (revive)
integration/config/config_test.go:106:31: empty-lines: extra empty line at the end of a block (revive)
    integration/secret/secret_test.go:106:31: empty-lines: extra empty line at the end of a block (revive)
    integration/network/service_test.go:58:50: empty-lines: extra empty line at the end of a block (revive)
    integration/network/service_test.go:401:58: empty-lines: extra empty line at the end of a block (revive)
    integration/system/event_test.go:30:38: empty-lines: extra empty line at the end of a block (revive)
    integration/plugin/logging/read_test.go:19:41: empty-lines: extra empty line at the end of a block (revive)
    integration/service/list_test.go:30:48: empty-lines: extra empty line at the end of a block (revive)
    integration/service/create_test.go:400:46: empty-lines: extra empty line at the start of a block (revive)
    integration/container/logs_test.go:156:42: empty-lines: extra empty line at the end of a block (revive)
    integration/container/daemon_linux_test.go:135:44: empty-lines: extra empty line at the end of a block (revive)
    integration/container/restart_test.go:160:62: empty-lines: extra empty line at the end of a block (revive)
    integration/container/wait_test.go:181:47: empty-lines: extra empty line at the end of a block (revive)
    integration/container/restart_test.go:116:30: empty-lines: extra empty line at the end of a block (revive)

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 786e6d80ba)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-09-30 23:59:30 +02:00
Sebastiaan van Stijn
9d44956d8c builder: fix empty-lines (revive)
builder/remotecontext/detect_test.go:64:66: empty-lines: extra empty line at the end of a block (revive)
    builder/remotecontext/detect_test.go:78:46: empty-lines: extra empty line at the end of a block (revive)
    builder/remotecontext/detect_test.go:91:51: empty-lines: extra empty line at the end of a block (revive)
    builder/dockerfile/internals_test.go:95:38: empty-lines: extra empty line at the end of a block (revive)
    builder/dockerfile/copy.go:86:112: empty-lines: extra empty line at the end of a block (revive)
    builder/dockerfile/dispatchers_test.go:286:39: empty-lines: extra empty line at the start of a block (revive)
    builder/dockerfile/builder.go:280:38: empty-lines: extra empty line at the end of a block (revive)
    builder/dockerfile/dispatchers.go:66:85: empty-lines: extra empty line at the start of a block (revive)
    builder/dockerfile/dispatchers.go:559:85: empty-lines: extra empty line at the start of a block (revive)
    builder/builder-next/adapters/localinlinecache/inlinecache.go:26:183: empty-lines: extra empty line at the start of a block (revive)
    builder/builder-next/adapters/containerimage/pull.go:441:9: empty-lines: extra empty line at the start of a block (revive)

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit ecb4ed172b)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-09-30 23:59:29 +02:00
Sebastiaan van Stijn
08d01be870 integration-cli: fix empty-lines (revive)
integration-cli/docker_cli_pull_test.go:55:69: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_cli_exec_test.go:46:64: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_cli_service_health_test.go:86:65: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_api_images_test.go:128:66: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_api_swarm_node_test.go:79:69: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_cli_health_test.go:51:57: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_cli_health_test.go:159:73: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_cli_swarm_unix_test.go:60:67: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_api_inspect_test.go:30:33: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_api_build_test.go:429:71: empty-lines: extra empty line at the start of a block (revive)
    integration-cli/docker_cli_attach_unix_test.go:19:78: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_api_build_test.go:470:70: empty-lines: extra empty line at the start of a block (revive)
    integration-cli/docker_cli_history_test.go:29:64: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_cli_links_test.go:93:86: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_cli_create_test.go:33:61: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_cli_links_test.go:145:78: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_cli_create_test.go:114:70: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_api_attach_test.go:226:153: empty-lines: extra empty line at the start of a block (revive)
    integration-cli/docker_cli_by_digest_test.go:239:71: empty-lines: extra empty line at the start of a block (revive)
    integration-cli/docker_cli_create_test.go:135:49: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_cli_create_test.go:143:75: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_cli_create_test.go:181:71: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_cli_inspect_test.go:72:65: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_api_swarm_service_test.go:98:77: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_api_swarm_service_test.go:144:69: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_cli_rmi_test.go:63:2: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_api_swarm_service_test.go:199:79: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_cli_rmi_test.go:69:2: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_api_swarm_service_test.go:300:75: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_cli_prune_unix_test.go:35:25: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_cli_events_unix_test.go:393:60: empty-lines: extra empty line at the start of a block (revive)
    integration-cli/docker_cli_events_unix_test.go:441:71: empty-lines: extra empty line at the start of a block (revive)
    integration-cli/docker_cli_ps_test.go:33:67: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_cli_ps_test.go:559:67: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_cli_events_test.go:117:75: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_api_containers_test.go:547:74: empty-lines: extra empty line at the start of a block (revive)
    integration-cli/docker_api_containers_test.go:1054:84: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_api_containers_test.go:1076:87: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_api_containers_test.go:1232:72: empty-lines: extra empty line at the start of a block (revive)
    integration-cli/docker_api_containers_test.go:1801:21: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_cli_network_unix_test.go:58:95: empty-lines: extra empty line at the start of a block (revive)
    integration-cli/docker_cli_network_unix_test.go:750:75: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_cli_network_unix_test.go:765:76: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_cli_swarm_test.go:617:100: empty-lines: extra empty line at the start of a block (revive)
    integration-cli/docker_cli_swarm_test.go:892:72: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_cli_daemon_test.go:119:74: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_cli_daemon_test.go:981:68: empty-lines: extra empty line at the start of a block (revive)
    integration-cli/docker_cli_daemon_test.go:1951:87: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_cli_run_test.go:83:66: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_cli_run_test.go:357:72: empty-lines: extra empty line at the start of a block (revive)
    integration-cli/docker_cli_build_test.go:89:83: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_cli_build_test.go:114:83: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_cli_build_test.go:183:80: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_cli_build_test.go:290:71: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_cli_build_test.go:314:65: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_cli_build_test.go:331:67: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_cli_build_test.go:366:76: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_cli_build_test.go:403:67: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_cli_build_test.go:648:67: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_cli_build_test.go:708:72: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_cli_build_test.go:938:66: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_cli_build_test.go:1018:72: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_cli_build_test.go:1097:2: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_cli_build_test.go:1182:62: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_cli_build_test.go:1244:66: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_cli_build_test.go:1524:69: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_cli_build_test.go:1546:80: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_cli_build_test.go:1716:70: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_cli_build_test.go:1730:65: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_cli_build_test.go:2162:74: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_cli_build_test.go:2270:71: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_cli_build_test.go:2288:70: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_cli_build_test.go:3206:65: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_cli_build_test.go:3392:66: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_cli_build_test.go:3433:72: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_cli_build_test.go:3678:76: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_cli_build_test.go:3732:67: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_cli_build_test.go:3759:69: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_cli_build_test.go:3802:61: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_cli_build_test.go:3898:66: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_cli_build_test.go:4107:9: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_cli_build_test.go:4791:74: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_cli_build_test.go:4821:73: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_cli_build_test.go:4854:70: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_cli_build_test.go:5341:74: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_cli_build_test.go:5593:81: empty-lines: extra empty line at the end of a block (revive)
    integration-cli/docker_api_containers_test.go:2145:11: empty-lines: extra empty line at the start of a block (revive)

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit dc0c2340b8)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-09-30 23:59:29 +02:00
Sebastiaan van Stijn
3660ee30e3 api/types: fix empty-lines (revive)
Also renamed variables that collided with import

     api/types/strslice/strslice_test.go:36:41: empty-lines: extra empty line at the end of a block (revive)

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 31441778fa)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-09-30 23:59:29 +02:00
Sebastiaan van Stijn
3424a7c2e3 pkg/*: fix "empty-lines" (revive)
pkg/directory/directory.go:9:49: empty-lines: extra empty line at the start of a block (revive)
    pkg/pubsub/publisher.go:8:48: empty-lines: extra empty line at the start of a block (revive)
    pkg/loopback/attach_loopback.go:96:69: empty-lines: extra empty line at the start of a block (revive)
    pkg/devicemapper/devmapper_wrapper.go:136:48: empty-lines: extra empty line at the start of a block (revive)
    pkg/devicemapper/devmapper.go:391:35: empty-lines: extra empty line at the end of a block (revive)
    pkg/devicemapper/devmapper.go:676:35: empty-lines: extra empty line at the end of a block (revive)
    pkg/archive/changes_posix_test.go:15:38: empty-lines: extra empty line at the end of a block (revive)
    pkg/devicemapper/devmapper.go:241:51: empty-lines: extra empty line at the start of a block (revive)
    pkg/fileutils/fileutils_test.go:17:47: empty-lines: extra empty line at the end of a block (revive)
    pkg/fileutils/fileutils_test.go:34:48: empty-lines: extra empty line at the end of a block (revive)
    pkg/fileutils/fileutils_test.go:318:32: empty-lines: extra empty line at the end of a block (revive)
    pkg/tailfile/tailfile.go:171:6: empty-lines: extra empty line at the end of a block (revive)
    pkg/tarsum/fileinfosums_test.go:16:41: empty-lines: extra empty line at the end of a block (revive)
    pkg/tarsum/tarsum_test.go:198:42: empty-lines: extra empty line at the start of a block (revive)
    pkg/tarsum/tarsum_test.go:294:25: empty-lines: extra empty line at the start of a block (revive)
    pkg/tarsum/tarsum_test.go:407:34: empty-lines: extra empty line at the end of a block (revive)
    pkg/ioutils/fswriters_test.go:52:45: empty-lines: extra empty line at the end of a block (revive)
    pkg/ioutils/writers_test.go:24:39: empty-lines: extra empty line at the end of a block (revive)
    pkg/ioutils/bytespipe_test.go:78:26: empty-lines: extra empty line at the end of a block (revive)
    pkg/sysinfo/sysinfo_linux_test.go:13:37: empty-lines: extra empty line at the end of a block (revive)
    pkg/archive/archive_linux_test.go:57:64: empty-lines: extra empty line at the end of a block (revive)
    pkg/archive/changes.go:248:72: empty-lines: extra empty line at the start of a block (revive)
    pkg/archive/changes_posix_test.go:15:38: empty-lines: extra empty line at the end of a block (revive)
    pkg/archive/copy.go:248:124: empty-lines: extra empty line at the end of a block (revive)
    pkg/archive/diff_test.go:198:44: empty-lines: extra empty line at the end of a block (revive)
    pkg/archive/archive.go:304:12: empty-lines: extra empty line at the end of a block (revive)
    pkg/archive/archive.go:749:37: empty-lines: extra empty line at the end of a block (revive)
    pkg/archive/archive.go:812:81: empty-lines: extra empty line at the start of a block (revive)
    pkg/archive/copy_unix_test.go:347:34: empty-lines: extra empty line at the end of a block (revive)
    pkg/system/path.go:11:39: empty-lines: extra empty line at the end of a block (revive)
    pkg/system/meminfo_linux.go:29:21: empty-lines: extra empty line at the end of a block (revive)
    pkg/plugins/plugins.go:135:32: empty-lines: extra empty line at the end of a block (revive)
    pkg/authorization/response.go:71:48: empty-lines: extra empty line at the start of a block (revive)
    pkg/authorization/api_test.go:18:51: empty-lines: extra empty line at the end of a block (revive)
    pkg/authorization/middleware_test.go:23:44: empty-lines: extra empty line at the end of a block (revive)
    pkg/authorization/middleware_unix_test.go:17:46: empty-lines: extra empty line at the end of a block (revive)
    pkg/authorization/api_test.go:57:45: empty-lines: extra empty line at the end of a block (revive)
    pkg/authorization/response.go:83:50: empty-lines: extra empty line at the start of a block (revive)
    pkg/authorization/api_test.go:66:47: empty-lines: extra empty line at the end of a block (revive)
    pkg/authorization/middleware_unix_test.go:45:48: empty-lines: extra empty line at the end of a block (revive)
    pkg/authorization/response.go:145:75: empty-lines: extra empty line at the start of a block (revive)
    pkg/authorization/middleware_unix_test.go:56:51: empty-lines: extra empty line at the end of a block (revive)

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 412c650e05)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-09-30 23:59:25 +02:00
Sebastiaan van Stijn
36fda30565 Merge pull request #44228 from thaJeztah/22.06_backport_migrate_pubsub
[22.06 backport] migrate pkg/pubsub to github.com/moby/pubsub
2022-09-30 23:52:43 +02:00
Sebastiaan van Stijn
541fda8e90 migrate pkg/pubsub to github.com/moby/pubsub
This package was moved to a separate repository, using the steps below:

    # install filter-repo (https://github.com/newren/git-filter-repo/blob/main/INSTALL.md)
    brew install git-filter-repo

    cd ~/projects

    # create a temporary clone of docker
    git clone https://github.com/docker/docker.git moby_pubsub_temp
    cd moby_pubsub_temp

    # for reference
    git rev-parse HEAD
    # --> 572ca799db

    # remove all code, except for pkg/pubsub, license, and notice, and rename pkg/pubsub to /
    git filter-repo --path pkg/pubsub/ --path LICENSE --path NOTICE --path-rename pkg/pubsub/:

    # remove canonical imports
    git revert -s -S 585ff0ebbe6bc25b801a0e0087dd5353099cb72e

    # initialize module
    go mod init github.com/moby/pubsub
    go mod tidy

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 0249afc523)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-09-30 22:32:43 +02:00
Sebastiaan van Stijn
a8b1fec072 pkg/fileutils: remove gotest.tools as dependency, use t.TempDir()
In preparation of moving this package separate.

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 0440ca07ba)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-09-30 22:32:28 +02:00
Sebastiaan van Stijn
70c3d7783f Merge pull request #44217 from thaJeztah/22.06_backport_more_robust_rootless
[22.06 backport] contrib: make dockerd-rootless-setuptool.sh more robust
2022-09-29 16:31:36 +02:00
Sebastiaan van Stijn
fd0904805c contrib: make dockerd-rootless-setuptool.sh more robust
The `docker` CLI currently doesn't handle situations where the current context
(as defined in `~/.docker/config.json`) is invalid or doesn't exist. As loading
(and checking) the context happens during initialization of the CLI, this
prevents `docker context` commands from being used, which makes it complicated
to fix the situation. For example, running `docker context use <correct context>`
would fail, which makes it not possible to update the `~/.docker/config.json`,
unless doing so manually.

For example, given the following `~/.docker/config.json`:

```json
{
        "currentContext": "nosuchcontext"
}
```

All of the commands below fail:

```bash
docker context inspect rootless
Current context "nosuchcontext" is not found on the file system, please check your config file at /Users/thajeztah/.docker/config.json

docker context rm --force rootless
Current context "nosuchcontext" is not found on the file system, please check your config file at /Users/thajeztah/.docker/config.json

docker context use default
Current context "nosuchcontext" is not found on the file system, please check your config file at /Users/thajeztah/.docker/config.json
```

While these things should be fixed, this patch updates the script to switch
the context using the `--context` flag; this flag is taken into account when
initializing the CLI, so that having an invalid context configured won't
block `docker context` commands from being executed. Given that all `context`
commands are local operations, "any" context can be used (it doesn't need to
make a connection with the daemon).

With this patch, those commands can now be run (and won't fail for the wrong
reason);

```bash
 docker --context=default context inspect -f "{{.Name}}" rootless
rootless

docker --context=default context inspect -f "{{.Name}}" rootless-doesnt-exist
context "rootless-doesnt-exist" does not exist
```

One other issue may also cause things to fail during uninstall; trying to remove
a context that doesn't exist will fail (even with the `-f` / `--force` option
set);

```bash
docker --context=default context rm blablabla
Error: context "blablabla": not found
```

While this is "ok" in most circumstances, it also means that (potentially) the
current context is not reset to "default", so this patch adds an explicit
`docker context use`, as well as unsetting the `DOCKER_HOST` and `DOCKER_CONTEXT`
environment variables.

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit e2114731e7)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-09-29 10:03:30 +02:00
Sebastiaan van Stijn
3977a3c6e8 Merge pull request #44206 from thaJeztah/22.06_backport_idtools_fix_infinite_loop
[22.06 backport] pkg/idtools: mkdirAs(): fix infinite loops and repeated "chown"
2022-09-27 23:44:44 +02:00
Sebastiaan van Stijn
43cfc50bbb Merge pull request #44207 from neersighted/createImpliedDirectories_22.06
[22.06 backport] refactor(pkg/archive): factor out createImpliedDirectories helper
2022-09-27 23:34:22 +02:00
Bjorn Neergaard
f6ebfaea19 test(pkg/archive): add TestImpliedDirectoryPermissions
Co-authored-by: Cory Snider <csnider@mirantis.com>
Signed-off-by: Bjorn Neergaard <bneergaard@mirantis.com>
(cherry picked from commit 5dff494b87)
2022-09-27 14:01:30 -06:00
Bjorn Neergaard
daa8708601 refactor(pkg/archive): factor out createImpliedDirectories helper
This code was duplicated in two places -- factor it out, add
documentation, and move magic numbers into a constant.

Additionally, use the same permissions (0755) in both code paths, and
ensure that the ID map is used in both code paths.

Co-authored-by: Vasiliy Ulyanov <vulyanov@suse.de>
Signed-off-by: Bjorn Neergaard <bneergaard@mirantis.com>
Signed-off-by: Vasiliy Ulyanov <vulyanov@suse.de>
(cherry picked from commit 4831ff9f27)
2022-09-27 14:01:30 -06:00
Sebastiaan van Stijn
7114360901 pkg/idtools: mkdirAs(): fix infinite loops and repeated "chown"
This fixes an inifinite loop in mkdirAs(), used by `MkdirAllAndChown`,
`MkdirAndChown`, and `MkdirAllAndChownNew`, as well as directories being
chown'd multiple times when relative paths are used.

The for loop in this function was incorrectly assuming that;

1. `filepath.Dir()` would always return the parent directory of any given path
2. traversing any given path to ultimately result in "/"

While this is correct for absolute and "cleaned" paths, both assumptions are
incorrect in some variations of "path";

1. for paths with a trailing path-separator ("some/path/"), or dot ("."),
   `filepath.Dir()` considers the (implicit) "." to be a location _within_ the
   directory, and returns "some/path" as ("parent") directory. This resulted
   in the path itself to be included _twice_ in the list of paths to chown.
2. for relative paths ("./some-path", "../some-path"), "traversing" the path
   would never end in "/", causing the for loop to run indefinitely:

    ```go
    // walk back to "/" looking for directories which do not exist
    // and add them to the paths array for chown after creation
    dirPath := path
    for {
        dirPath = filepath.Dir(dirPath)
        if dirPath == "/" {
            break
        }
        if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) {
            paths = append(paths, dirPath)
        }
    }
    ```

A _partial_ mitigation for this would be to use `filepath.Clean()` before using
the path (while `filepath.Dir()` _does_ call `filepath.Clean()`, it only does so
_after_ some processing, so only cleans the result). Doing so would prevent the
double chown from happening, but would not prevent the "final" path to be "."
or ".." (in the relative path case), still causing an infinite loop, or
additional checks for "." / ".." to be needed.

| path           | filepath.Dir(path) | filepath.Dir(filepath.Clean(path)) |
|----------------|--------------------|------------------------------------|
| some-path      | .                  | .                                  |
| ./some-path    | .                  | .                                  |
| ../some-path   | ..                 | ..                                 |
| some/path/     | some/path          | some                               |
| ./some/path/   | some/path          | some                               |
| ../some/path/  | ../some/path       | ../some                            |
| some/path/.    | some/path          | some                               |
| ./some/path/.  | some/path          | some                               |
| ../some/path/. | ../some/path       | ../some                            |
| /some/path/    | /some/path         | /some                              |
| /some/path/.   | /some/path         | /some                              |

Instead, this patch adds a `filepath.Abs()` to the function, so make sure that
paths are both cleaned, and not resulting in an infinite loop.

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 1e13247d6d)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-09-27 21:59:47 +02:00
Sebastiaan van Stijn
fc6192786a Merge pull request #44195 from thaJeztah/22.06_backport_update_golangci_lint
[22.06 backport] golangci-lint: update to v1.49.0
2022-09-27 18:24:50 +02:00
Sebastiaan van Stijn
3d6a13f072 Merge pull request #44198 from thaJeztah/22.06_backport_jenkinsfile_ubuntu_2004
[22.06 backport] Jenkinsfile: use Ubuntu 20.04 for DCO stage
2022-09-27 17:54:50 +02:00
Sebastiaan van Stijn
5ebe35cc09 Merge pull request #44201 from crazy-max/22.06_backport_api-fix-logo
[22.06 backport] swagger: update links to logo
2022-09-27 17:54:33 +02:00
CrazyMax
5dfec22079 swagger: update links to logo
Signed-off-by: CrazyMax <crazy-max@users.noreply.github.com>
(cherry picked from commit 7f3602f1c9)
2022-09-27 11:56:36 +02:00
Sebastiaan van Stijn
cee2490d84 Jenkinsfile: use Ubuntu 20.04 for DCO stage
Also switching to use arm64, as all amd64 stages have moved to GitHub actions,
so using arm64 allows the same machine to be used for tests after the DCO check
completed.

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 419c47a80a)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-09-26 19:56:46 +02:00
Sebastiaan van Stijn
3ce520ec80 golangci-lint: update to v1.49.0
Remove the "deadcode", "structcheck", and "varcheck" linters, as they are
deprecated:

    WARN [runner] The linter 'deadcode' is deprecated (since v1.49.0) due to: The owner seems to have abandoned the linter.  Replaced by unused.
    WARN [runner] The linter 'structcheck' is deprecated (since v1.49.0) due to: The owner seems to have abandoned the linter.  Replaced by unused.
    WARN [runner] The linter 'varcheck' is deprecated (since v1.49.0) due to: The owner seems to have abandoned the linter.  Replaced by unused.
    WARN [linters context] structcheck is disabled because of generics. You can track the evolution of the generics support by following the https://github.com/golangci/golangci-lint/issues/2649.

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 2f1c382a6d)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-09-26 11:58:07 +02:00
Sebastiaan van Stijn
7772535e79 Merge pull request #44175 from thaJeztah/22.06_backport_fix_g112_slowlorus
[22.06 backport] set ReadHeaderTimeout to address G112: Potential Slowloris Attack (gosec)
2022-09-26 11:54:09 +02:00
Samuel Karp
bebad9e22e Merge pull request #44190 from thaJeztah/22.06_backport_sequential_release 2022-09-25 00:06:26 -07:00
Samuel Karp
b31d51cac6 Merge pull request #44188 from thaJeztah/22.06_backport_bump_go_systemd 2022-09-25 00:02:55 -07:00
Sebastiaan van Stijn
1d7fb64a6e vendor: github.com/moby/sys/sequential v0.5.0
no changes, just updated to use the tagged version;

full diff: https://github.com/moby/sys/compare/b22ba8a69b30...sequential/v0.5.0

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 489e7b61bf)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-09-23 18:42:11 +02:00
Sebastiaan van Stijn
ae65811be2 vendor: github.com/github.com/coreos/go-systemd v22.4.0
- dbus: add Connected methods to check connections status
- dbus: add support for querying unit by PID
- dbus: implement support for cgroup freezer APIs
- journal: remove implicit initialization
- login1: add methods to get session/user properties
- login1: add context-aware ListSessions and ListUsers methods

full diff: https://github.com/github.com/coreos/go-systemd/compare/v22.3.2...v22.4.0

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 323ab8ef97)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-09-23 17:27:02 +02:00
Sebastiaan van Stijn
0e873d5cd8 Merge pull request #44183 from thaJeztah/22.06_backport_remove_os_check
[22.06 backport] Remove the OS check when creating a container
2022-09-22 21:37:39 +02:00
Djordje Lukic
2bc36de638 Remove the OS check when creating a container
Now that we can pass any custom containerd shim to dockerd there is need
for this check. Without this it becomes possible to use wasm shims for
example with images that have "wasi" as the OS.

Signed-off-by: Djordje Lukic <djordje.lukic@docker.com>
(cherry picked from commit 1a3d8019d1)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-09-22 19:52:22 +02:00
Sebastiaan van Stijn
aca9143c13 Merge pull request #44126 from thaJeztah/22.06_backport_image_spec_no_literal
[22.06 backport] Update uses of Image platform fields in OCI image-spec
2022-09-22 19:01:11 +02:00
Sebastiaan van Stijn
e143eed8bc Merge pull request #44168 from thaJeztah/22.06_backport_test_summary
[22.06 backport] ci(test): report summary output
2022-09-22 15:18:05 +02:00
Sebastiaan van Stijn
7d621608dd Merge pull request #44173 from crazy-max/22.06_backport_ci-cache
[22.06 backport] ci: reduce cache throttling limits
2022-09-22 13:18:02 +02:00
Sebastiaan van Stijn
997ec12ec8 set ReadHeaderTimeout to address G112: Potential Slowloris Attack (gosec)
After discussing in the maintainers meeting, we concluded that Slowloris attacks
are not a real risk other than potentially having some additional goroutines
lingering around, so setting a long timeout to satisfy the linter, and to at
least have "some" timeout.

    libnetwork/diagnostic/server.go:96:10: G112: Potential Slowloris Attack because ReadHeaderTimeout is not configured in the http.Server (gosec)
        srv := &http.Server{
            Addr:    net.JoinHostPort(ip, strconv.Itoa(port)),
            Handler: s,
        }
    api/server/server.go:60:10: G112: Potential Slowloris Attack because ReadHeaderTimeout is not configured in the http.Server (gosec)
                srv: &http.Server{
                    Addr: addr,
                },
    daemon/metrics_unix.go:34:13: G114: Use of net/http serve function that has no support for setting timeouts (gosec)
            if err := http.Serve(l, mux); err != nil && !strings.Contains(err.Error(), "use of closed network connection") {
                      ^
    cmd/dockerd/metrics.go:27:13: G114: Use of net/http serve function that has no support for setting timeouts (gosec)
            if err := http.Serve(l, mux); err != nil && !strings.Contains(err.Error(), "use of closed network connection") {
                      ^

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 55fd77f724)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-09-22 12:27:32 +02:00
CrazyMax
4a8f744255 ci: reduce cache throttling limits
Signed-off-by: CrazyMax <crazy-max@users.noreply.github.com>
(cherry picked from commit 6d59619d6e)
2022-09-22 11:29:44 +02:00
Sebastiaan van Stijn
49a2f5c55c Merge pull request #44165 from thaJeztah/22.06_backport_sysvinit_debian_restart
[22.06 backport] fix debian sysvinit script fails to restart docker daemon when stopped
2022-09-21 23:32:33 +02:00
CrazyMax
07efcaf3b2 ci(test): report summary output
Signed-off-by: CrazyMax <crazy-max@users.noreply.github.com>
(cherry picked from commit 7b9877bd8a)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-09-21 18:08:57 +02:00
Sebastiaan van Stijn
6b04087d5f Merge pull request #44163 from thaJeztah/22.06_backport_memberlist_transient_bumps
[22.06 backport] chore: bump transient dependencies of memberlist
2022-09-21 18:04:37 +02:00
Sebastiaan van Stijn
d752acd960 Merge pull request #44164 from thaJeztah/22.06_backport_client_kernel_memory
[22.06 backport] client: ignore kernel-memory on API >= 1.42
2022-09-21 18:03:48 +02:00
Yann Autissier
7f94f2b393 fix debian sysvinit script fails to restart docker daemon when stopped
Fixes: #44130
Signed-off-by: Yann Autissier <yann.autissier@gmail.com>
(cherry picked from commit 8ad8c6d887)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-09-21 16:32:57 +02:00
Sebastiaan van Stijn
970c938b56 client: ignore kernel-memory on API >= 1.42
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 2597a71623)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-09-21 16:27:20 +02:00
Bjorn Neergaard
d41ebd79f7 vendor: github.com/armon/go-metrics v0.4.1
Signed-off-by: Bjorn Neergaard <bneergaard@mirantis.com>
(cherry picked from commit c0fa14e8af)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-09-21 16:24:32 +02:00
Bjorn Neergaard
d0fadc859d vendor: github.com/google/btree v1.1.2
Signed-off-by: Bjorn Neergaard <bneergaard@mirantis.com>
(cherry picked from commit 1d7ceb2fee)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-09-21 16:24:29 +02:00
Sebastiaan van Stijn
40b28dc7e1 Merge pull request #44159 from crazy-max/22.06_backport_ci-fix-filter-pattern
[22.06 backport] ci: fix branch filter pattern
2022-09-21 10:58:02 +02:00
CrazyMax
44c5f7721a ci: fix branch filter pattern
Signed-off-by: CrazyMax <crazy-max@users.noreply.github.com>
(cherry picked from commit 5e50d002f1)
2022-09-20 21:27:42 +02:00
Sebastiaan van Stijn
a13cd44a13 Merge pull request #44148 from crazy-max/22.06_backport_cleanup-test-suite
[22.06 backport] integration-cli: remove TestDockerSuite func
2022-09-20 11:50:30 +02:00
Sebastiaan van Stijn
2e89072681 Merge pull request #44154 from crazy-max/22.06_backport_jenkins-rm-validate
[22.06 backport] Jenkinsfile: remove leftover steps and stages
2022-09-20 11:16:38 +02:00
Sebastiaan van Stijn
7b5de59256 Merge pull request #44152 from crazy-max/22.06_backport_gha-validate
[22.06 backport] ci(test): validate job matrix
2022-09-19 17:56:49 +02:00
CrazyMax
00b1722fb4 Jenkinsfile: remove unit-validate stage
Left cross step is already in GHA so we can remove
unit-validate stage.

Signed-off-by: CrazyMax <crazy-max@users.noreply.github.com>
(cherry picked from commit 8596486743)
2022-09-19 16:34:07 +02:00
CrazyMax
8fdaad4018 Jenkinsfile: remove report bundles creation in unit-validate
This was missing to be removed from Jenkinsfile when we moved
to GHA for unit and integration tests.

Signed-off-by: CrazyMax <crazy-max@users.noreply.github.com>
(cherry picked from commit cd54f31984)
2022-09-19 16:34:06 +02:00
CrazyMax
fefe6290e5 Jenkinsfile: remove validation steps moved to GHA
Signed-off-by: CrazyMax <crazy-max@users.noreply.github.com>
(cherry picked from commit a354970eaa)
2022-09-19 16:34:06 +02:00
CrazyMax
f925f295f4 ci: reusable dco workflow
Signed-off-by: CrazyMax <crazy-max@users.noreply.github.com>
(cherry picked from commit 7602edfd06)
2022-09-16 19:53:19 +02:00
CrazyMax
cc770330f8 ci(test): dynamic validate matrix
Signed-off-by: CrazyMax <crazy-max@users.noreply.github.com>
(cherry picked from commit 5a98363a92)
2022-09-16 19:53:19 +02:00
CrazyMax
e42f7db450 ci(test): validate job matrix
Signed-off-by: CrazyMax <crazy-max@users.noreply.github.com>
(cherry picked from commit a73d78f8d5)
2022-09-16 19:53:19 +02:00
CrazyMax
9a57be4ac4 integration-cli: remove TestDockerSuite func
Signed-off-by: CrazyMax <crazy-max@users.noreply.github.com>
(cherry picked from commit 9b428a3d33)
2022-09-15 19:10:04 +02:00
Sebastiaan van Stijn
95831246a2 Update uses of Image platform fields in OCI image-spec
The OCI image spec is considering to change the Image struct and embedding the
Platform type (see opencontainers/image-spec#959) in the go implementation.
Moby currently uses some struct-literals to propagate the platform fields,
which will break once those changes in the OCI spec are merged.

Ideally (once that change arrives) we would update the code to set the Platform
information as a whole, instead of assigning related fields individually, but
in some cases in the code, image platform information is only partially set
(for example, OSVersion and OSFeatures are not preserved in all cases). This
may be on purpose, so needs to be reviewed.

This patch keeps the current behavior (assigning only specific fields), but
removes the use of struct-literals to make the code compatible with the
upcoming changes in the image-spec module.

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 3cb933db9d)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-09-10 12:19:53 +02:00
Akihiro Suda
8af2e62556 Merge pull request #44117 from thaJeztah/22.06_backport_bump_klauspost_compress
[22.06 backport] vendor: github.com/klauspost/compress v1.15.9
2022-09-09 20:37:31 +09:00
Sebastiaan van Stijn
6236ebaed5 vendor: github.com/klauspost/compress v1.15.9
various fixes in zstd compression

- https://github.com/klauspost/compress/releases/tag/v1.15.9
- https://github.com/klauspost/compress/releases/tag/v1.15.8
- https://github.com/klauspost/compress/releases/tag/v1.15.7
- https://github.com/klauspost/compress/releases/tag/v1.15.6
- https://github.com/klauspost/compress/releases/tag/v1.15.5
- https://github.com/klauspost/compress/releases/tag/v1.15.4
- https://github.com/klauspost/compress/releases/tag/v1.15.3
- https://github.com/klauspost/compress/releases/tag/v1.15.2

full diff: https://github.com/klauspost/compress/compare/v1.15.1...v1.15.9

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit f7277806c8)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-09-09 03:40:36 +02:00
Sebastiaan van Stijn
50d3438b26 Merge pull request #44121 from thaJeztah/22.06_backport_GHSA_rc4r_wh2q_q6c4
[22.06 backport] Updates for supplementary group permissions
2022-09-09 02:11:57 +02:00
Sebastiaan van Stijn
366d551cd2 Update some tests for supplementary group permissions
Update tests checking for groups to adjust for new policy updated in
de7af816e7, which caused those tests
to fail:

    === FAIL: amd64.integration-cli TestDockerSwarmSuite/TestSwarmServiceWithGroup (1.94s)
    docker_cli_swarm_test.go:311: assertion failed: uid=0(root) gid=0(root) groups=0(root),10(wheel),29(audio),50(staff),777 (string) != uid=0(root) gid=0(root) groups=10(wheel),29(audio),50(staff),777
(string)
    --- FAIL: TestDockerSwarmSuite/TestSwarmServiceWithGroup (1.94s)

    === FAIL: amd64.integration-cli TestDockerCLIRunSuite/TestRunGroupAdd (0.41s)
    docker_cli_run_test.go:1091: expected output uid=0(root) gid=0(root) groups=10(wheel),29(audio),50(staff),777 received uid=0(root) gid=0(root) groups=0(root),10(wheel),29(audio),50(staff),777
    --- FAIL: TestDockerCLIRunSuite/TestRunGroupAdd (0.41s)

    === FAIL: amd64.integration-cli TestDockerCLIRunSuite/TestRunUserByIDZero (0.41s)
    docker_cli_run_test.go:790: expected daemon user got uid=0(root) gid=0(root) groups=0(root),10(wheel)
    --- FAIL: TestDockerCLIRunSuite/TestRunUserByIDZero (0.41s)

    === FAIL: amd64.integration-cli TestDockerCLIRunSuite (195.70s)

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit c7e77dba7f)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-09-09 00:07:25 +02:00
Nicolas De Loof
393027d1b1 AdditionalGids must include effective group ID
otherwise this one won't be considered for permission checks

Signed-off-by: Nicolas De Loof <nicolas.deloof@gmail.com>
(cherry picked from commit 25345f2c04)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-09-09 00:07:21 +02:00
Sebastiaan van Stijn
21d818be87 Merge pull request #44115 from thaJeztah/22.06_backport_cleanup_jenkins
[22.06 backport] Jenkinsfile: remove steps moved to GitHub Actions
2022-09-08 21:17:48 +02:00
Brian Goff
6d65028804 Merge pull request #44096 from thaJeztah/22.06_backport_bump_units
[22.06 backport] vendor: github.com/docker/go-units v0.5.0
2022-09-08 12:09:35 -07:00
Brian Goff
c0e1c67c78 Merge pull request #44112 from crazy-max/22.06_backport_gha-test
[22.06 backport] ci: gha test workflow for integration and unit test
2022-09-08 11:20:42 -07:00
CrazyMax
b9b8ddc160 Jenkinsfile: remove steps moved to GitHub Actions
Signed-off-by: CrazyMax <crazy-max@users.noreply.github.com>
(cherry picked from commit 9f8bd80487)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-09-08 16:48:15 +02:00
CrazyMax
d96d56ff09 ci: fix .windows workflow name
Signed-off-by: CrazyMax <crazy-max@users.noreply.github.com>
(cherry picked from commit 36688496ca)
2022-09-08 13:50:19 +02:00
CrazyMax
cc7b8cc980 ci(test): send coverage to codecov
Signed-off-by: CrazyMax <crazy-max@users.noreply.github.com>
(cherry picked from commit 807c849431)
2022-09-08 13:50:19 +02:00
CrazyMax
8ca74127d9 ci(test): upload reports
Signed-off-by: CrazyMax <crazy-max@users.noreply.github.com>
(cherry picked from commit 4c8af0e2f9)
2022-09-08 13:50:19 +02:00
CrazyMax
fc2942d4e0 integration-cli: TestPluginInstallImage broken on GitHub Runner
Signed-off-by: CrazyMax <crazy-max@users.noreply.github.com>
(cherry picked from commit 60864229b8)
2022-09-08 13:50:19 +02:00
CrazyMax
874954d8bd integration-cli: TestEventsOOM* broken on GitHub Runner
Signed-off-by: CrazyMax <crazy-max@users.noreply.github.com>
(cherry picked from commit b8bf60c590)
2022-09-08 13:50:19 +02:00
CrazyMax
0bfb1bded3 integration: TestNetworkLoopbackNat is broken on GitHub Runner
Signed-off-by: CrazyMax <crazy-max@users.noreply.github.com>
(cherry picked from commit df731c745a)
2022-09-08 13:50:18 +02:00
CrazyMax
4765040aa3 ci: gha test workflow for integration and unit test
Signed-off-by: CrazyMax <crazy-max@users.noreply.github.com>
(cherry picked from commit 2e04be3fb9)
2022-09-08 13:50:18 +02:00
Sebastiaan van Stijn
de0300b1c6 Merge pull request #44095 from thaJeztah/22.06_fix_linting_issues
[22.06 backport] fix various linting issues in preparation of golangci-lint update
2022-09-08 00:00:08 +02:00
Samuel Karp
4807ef2af0 Merge pull request #44085 from thaJeztah/22.06_backport_bump_go_1.19 2022-09-07 14:25:02 -07:00
Sebastiaan van Stijn
c853881610 Update to go 1.19.1 to address CVE-2022-27664, CVE-2022-32190
From the mailing list:

We have just released Go versions 1.19.1 and 1.18.6, minor point releases.
These minor releases include 2 security fixes following the security policy:

- net/http: handle server errors after sending GOAWAY
  A closing HTTP/2 server connection could hang forever waiting for a clean
  shutdown that was preempted by a subsequent fatal error. This failure mode
  could be exploited to cause a denial of service.

  Thanks to Bahruz Jabiyev, Tommaso Innocenti, Anthony Gavazzi, Steven Sprecher,
  and Kaan Onarlioglu for reporting this.

  This is CVE-2022-27664 and Go issue https://go.dev/issue/54658.

- net/url: JoinPath does not strip relative path components in all circumstances
  JoinPath and URL.JoinPath would not remove `../` path components appended to a
  relative path. For example, `JoinPath("https://go.dev", "../go")` returned the
  URL `https://go.dev/../go`, despite the JoinPath documentation stating that
  `../` path elements are cleaned from the result.

  Thanks to q0jt for reporting this issue.

  This is CVE-2022-32190 and Go issue https://go.dev/issue/54385.

Release notes:

go1.19.1 (released 2022-09-06) includes security fixes to the net/http and
net/url packages, as well as bug fixes to the compiler, the go command, the pprof
command, the linker, the runtime, and the crypto/tls and crypto/x509 packages.
See the Go 1.19.1 milestone on the issue tracker for details.

https://github.com/golang/go/issues?q=milestone%3AGo1.19.1+label%3ACherryPickApproved

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 1eadbdd9fa)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-09-07 22:11:46 +02:00
Sebastiaan van Stijn
2450c5a46b update to golang 1.19
also ran gofmt with go1.19

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 58413c15cb)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-09-07 22:11:46 +02:00
Sebastiaan van Stijn
a490e68553 Merge pull request #44108 from rumpl/22.06-fix-local-context
[22.06 backport] fix local context
2022-09-07 22:07:15 +02:00
Djordje Lukic
5d2b3687b0 Wrap local calls to the content and lease service
The wrapper sets the default namespace in the context if none is
provided, this is needed because we are calling these services directly
and not trough GRPC that has an interceptor to set the default namespace
to all calls.

Signed-off-by: Djordje Lukic <djordje.lukic@docker.com>
(cherry picked from commit 878906630b)
Signed-off-by: Djordje Lukic <djordje.lukic@docker.com>
2022-09-07 15:07:16 +02:00
Samuel Karp
d7e36c99fb Merge pull request #44100 from thaJeztah/22.06_backport_bump_golang_1.18.6 2022-09-06 21:43:55 -07:00
Samuel Karp
1249d36bdd Merge pull request #44103 from thaJeztah/22.06_backport_bump_x_net 2022-09-06 20:50:29 -07:00
Sebastiaan van Stijn
287d1656de vendor: golang.org/x/net v0.0.0-20220906165146-f3363e06e74c
Update to the latest version that contains a fix for CVE-2022-27664;
f3363e06e7

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 518179f63e)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-09-06 23:19:13 +02:00
Sebastiaan van Stijn
39976cd2bf Update to go 1.18.6 to address CVE-2022-27664, CVE-2022-32190
From the mailing list:

We have just released Go versions 1.19.1 and 1.18.6, minor point releases.
These minor releases include 2 security fixes following the security policy:

- net/http: handle server errors after sending GOAWAY
  A closing HTTP/2 server connection could hang forever waiting for a clean
  shutdown that was preempted by a subsequent fatal error. This failure mode
  could be exploited to cause a denial of service.

  Thanks to Bahruz Jabiyev, Tommaso Innocenti, Anthony Gavazzi, Steven Sprecher,
  and Kaan Onarlioglu for reporting this.

  This is CVE-2022-27664 and Go issue https://go.dev/issue/54658.

- net/url: JoinPath does not strip relative path components in all circumstances
  JoinPath and URL.JoinPath would not remove `../` path components appended to a
  relative path. For example, `JoinPath("https://go.dev", "../go")` returned the
  URL `https://go.dev/../go`, despite the JoinPath documentation stating that
  `../` path elements are cleaned from the result.

  Thanks to q0jt for reporting this issue.

  This is CVE-2022-32190 and Go issue https://go.dev/issue/54385.

Release notes:

go1.18.6 (released 2022-09-06) includes security fixes to the net/http package,
as well as bug fixes to the compiler, the go command, the pprof command, the
runtime, and the crypto/tls, encoding/xml, and net packages. See the Go 1.18.6
milestone on the issue tracker for details;

https://github.com/golang/go/issues?q=milestone%3AGo1.18.6+label%3ACherryPickApproved

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit cba36a064d)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-09-06 22:26:19 +02:00
Sebastiaan van Stijn
85f1b6ff8f Merge pull request #44094 from thaJeztah/22.06_backport_remove_tereshkova
[22.06] cleanup namesgenerator
2022-09-06 21:58:09 +02:00
Sebastiaan van Stijn
1650fa8889 vendor: github.com/docker/go-units v0.5.0
full diff: https://github.com/docker/go-units/compare/v0.4.0...v0.5.0

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 13f99eb65f)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-09-06 16:04:56 +02:00
Sebastiaan van Stijn
e9e7491f2b linting: host:port in url should be constructed with net.JoinHostPort
integration-cli/docker_cli_daemon_test.go:545:54: host:port in url should be constructed with net.JoinHostPort and not directly with fmt.Sprintf (nosprintfhostport)
            cmdArgs = append(cmdArgs, "--tls=false", "--host", fmt.Sprintf("tcp://%s:%s", l.daemon, l.port))
                                                               ^
    opts/hosts_test.go:35:31: host:port in url should be constructed with net.JoinHostPort and not directly with fmt.Sprintf (nosprintfhostport)
            "tcp://:5555":              fmt.Sprintf("tcp://%s:5555", DefaultHTTPHost),
                                        ^
    opts/hosts_test.go:91:30: host:port in url should be constructed with net.JoinHostPort and not directly with fmt.Sprintf (nosprintfhostport)
            ":5555":                   fmt.Sprintf("tcp://%s:5555", DefaultHTTPHost),
                                       ^

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 306b8c89e8)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-09-06 15:11:42 +02:00
Sebastiaan van Stijn
2609d4e252 linting: gosec: fix or suppress G112, G114 in test code
Updating test-code only; set ReadHeaderTimeout for some, or suppress the linter
error for others.

     contrib/httpserver/server.go:11:12: G114: Use of net/http serve function that has no support for setting timeouts (gosec)
        log.Panic(http.ListenAndServe(":80", nil))
                  ^
     integration/plugin/logging/cmd/close_on_start/main.go:42:12: G112: Potential Slowloris Attack because ReadHeaderTimeout is not configured in the http.Server (gosec)
        server := http.Server{
            Addr:    l.Addr().String(),
            Handler: mux,
        }
     integration/plugin/logging/cmd/discard/main.go:17:12: G112: Potential Slowloris Attack because ReadHeaderTimeout is not configured in the http.Server (gosec)
        server := http.Server{
            Addr:    l.Addr().String(),
            Handler: mux,
        }
     integration/plugin/logging/cmd/dummy/main.go:14:12: G112: Potential Slowloris Attack because ReadHeaderTimeout is not configured in the http.Server (gosec)
        server := http.Server{
            Addr:    l.Addr().String(),
            Handler: http.NewServeMux(),
        }
     integration/plugin/volumes/cmd/dummy/main.go:14:12: G112: Potential Slowloris Attack because ReadHeaderTimeout is not configured in the http.Server (gosec)
        server := http.Server{
            Addr:    l.Addr().String(),
            Handler: http.NewServeMux(),
        }
     testutil/fixtures/plugin/basic/basic.go:25:12: G112: Potential Slowloris Attack because ReadHeaderTimeout is not configured in the http.Server (gosec)
        server := http.Server{
            Addr:    l.Addr().String(),
            Handler: http.NewServeMux(),
        }
     volume/testutils/testutils.go:170:5: G114: Use of net/http serve function that has no support for setting timeouts (gosec)
        go http.Serve(l, mux)
           ^

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 31fb92c609)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-09-06 15:11:42 +02:00
Sebastiaan van Stijn
188c5d4a7c linting: suppress false positive for G404 (gosec)
The linter falsely detects this as using "math/rand":

    libnetwork/networkdb/cluster.go:721:14: G404: Use of weak random number generator (math/rand instead of crypto/rand) (gosec)
       val, err := rand.Int(rand.Reader, big.NewInt(int64(n)))
                   ^

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 561a010161)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-09-06 15:11:42 +02:00
Sebastiaan van Stijn
ff4ec67b90 libnetwork/diagnostic: EnableDiagnostic(): use net.JoinHostPort
Use net.JoinHostPort to account for IPv6 addresses.

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit a33d1f9a7c)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-09-06 15:11:39 +02:00
Sebastiaan van Stijn
fee68df273 namesgenerator: remove Valentina Tereshkova
While the name generator has been frozen for new additions in 624b3cfbe8,
this person has become controversial. Our intent is for this list to be inclusive
and non-controversial.

This patch removes the name from the list.

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 0f052eb4f5)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-09-06 13:58:28 +02:00
Sebastiaan van Stijn
b5a0d7a188 Merge pull request #44065 from thaJeztah/22.06_backport_test_updates
[22.06 backport] assorted CI changes
2022-08-31 15:15:23 +02:00
CrazyMax
f7cf9fbe48 ci: move buildkit tests to a dedicated workflow
Signed-off-by: CrazyMax <crazy-max@users.noreply.github.com>
(cherry picked from commit a4d081cc17)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-08-31 11:57:25 +02:00
CrazyMax
ee87eaf9ad ci(windows): move windows-2019 to another workflow
Signed-off-by: CrazyMax <crazy-max@users.noreply.github.com>
(cherry picked from commit 65fdd10d4e)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-08-31 11:57:23 +02:00
Ben Langfeld
09a0b0a84a Upgrades buildx to 0.9.1
v0.9.0 included regressions. Release notes: https://github.com/docker/buildx/releases/tag/v0.9.1

Signed-off-by: Ben Langfeld <blangfeld@powerhrg.com>
(cherry picked from commit 5dcaad0dd3)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-08-31 11:57:21 +02:00
Sebastiaan van Stijn
8e6ed32610 Makefile: update buildx to v0.9.0
release notes: https://github.com/docker/buildx/releases/tag/v0.9.0

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit f27b74f0f7)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-08-31 11:57:18 +02:00
Sebastiaan van Stijn
dfd2f917dc Merge pull request #44055 from thaJeztah/22.06_backport_migrate_sequential
[22.06 backport] replace pkg/system Sequential funcs with moby/sys/sequential
2022-08-30 23:17:23 +02:00
Sebastiaan van Stijn
4f1dd92056 Merge pull request #44049 from thaJeztah/22.06_backport_validate_yaml
[22.06 backport] validate: add additional validation on YAML files
2022-08-30 19:35:22 +02:00
Sebastiaan van Stijn
f10c50958c replace pkg/system Sequential funcs with moby/sys/sequential
Migrating these functions to allow them being shared between moby, docker/cli,
and containerd, and to allow using them without importing all of sys / system,
which (in containerd) also depends on hcsshim and more.

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 509f19f611)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-08-30 09:34:33 +02:00
Sebastiaan van Stijn
40515da6d6 pkg/system: make IsAbs() platform-agnostic
filepath.IsAbs() will short-circuit on Linux/Unix, so having a single
implementation should not affect those platforms.

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 2640aec0d7)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-08-29 23:19:53 +02:00
Sebastiaan van Stijn
3b9370fcf8 validate: address SC2155 (shellcheck)
see https://github.com/koalaman/shellcheck/wiki/SC2155

Looking at how these were used, I don't think we even need to
export them, so removing that.

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 5cfc9c374c)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-08-29 19:54:57 +02:00
Sebastiaan van Stijn
51bf7da729 validate: format vendor script with shfmt
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit b9fd2cf605)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-08-29 19:54:56 +02:00
Sebastiaan van Stijn
f1bd611d41 validate: add yamllint validation
validate other YAML files, such as the ones used in the documentation,
and GitHub actions workflows, to prevent issues such as;

- 30295c1750
- 8e8d9a3650

With this patch:

    hack/validate/yamllint
    Congratulations! yamllint config file formatted correctly
    Congratulations! YAML files are formatted correctly

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 6cef06b940)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-08-29 19:54:54 +02:00
Sebastiaan van Stijn
e9f7c05ae1 validate: yamllint: ignore "truthy value should be one of" warnings
Suppresses warnings like:

    LANG=C.UTF-8 yamllint -c hack/validate/yamllint.yaml -f parsable .github/workflows/*.yml
    .github/workflows/ci.yml:7:1: [warning] truthy value should be one of [false, true] (truthy)
    .github/workflows/windows.yml:7:1: [warning] truthy value should be one of [false, true] (truthy)

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 91bb776bb8)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-08-29 19:54:52 +02:00
Sebastiaan van Stijn
72156dd7a4 validate: yamllint: set locale in config file
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit cc2134ea83)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-08-29 19:54:50 +02:00
Sebastiaan van Stijn
554a933944 validate: yamllint: use "parsable" output
Before:

    10030:81  error    line too long (89 > 80 characters)  (line-length)

After:

    api/swagger.yaml:10030:81: [error] line too long (89 > 80 characters) (line-length)

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit f679d8c821)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-08-29 19:54:48 +02:00
Sebastiaan van Stijn
8d43d7fa6b validate: yamllint rename config-file
Don't make the file hidden, and add .yaml extension, so that editors
pick up the right formatting :)

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 5f114b65b4)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-08-29 19:54:46 +02:00
Sebastiaan van Stijn
a61b411ceb Dockerfile: update yamllint to v1.27.1
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 1d7cd76ee9)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-08-29 19:54:43 +02:00
Sebastiaan van Stijn
d2590dc3cd Merge pull request #44039 from thaJeztah/22.06_backport_update_runc_1.1.4
[22.06 backport] update runc to v1.1.4
2022-08-26 13:29:28 +02:00
Sebastiaan van Stijn
274316f89e Merge pull request #44042 from thaJeztah/22.06_backport_containerd_binary_1.6.8
[22.06 backport] update containerd binary to v1.6.8
2022-08-26 13:07:31 +02:00
Sebastiaan van Stijn
e3e3a31989 Merge pull request #44046 from thaJeztah/22.06_backport_fix_ci_workflow
[22.06 backport] ci: fix broken workflow
2022-08-26 10:39:37 +02:00
CrazyMax
704e7a2d71 ci: fix broken workflow
Signed-off-by: CrazyMax <crazy-max@users.noreply.github.com>
(cherry picked from commit 8e8d9a3650)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-08-26 08:27:00 +02:00
Sebastiaan van Stijn
87b7e40a34 update containerd binary to v1.6.8
release notes: https://github.com/containerd/containerd/releases/tag/v1.6.8

full diff: https://github.com/containerd/containerd/compare/v1.6.7...v1.6.8

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit d52ffce38f)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-08-26 00:34:11 +02:00
Akihiro Suda
901fb577cb update runc to v1.1.4
release notes: https://github.com/opencontainers/runc/releases/tag/v1.1.4

full diff: https://github.com/opencontainers/runc/compare/v1.1.3...v1.1.4

Signed-off-by: Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp>
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit bd98bf38e9)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-08-26 00:26:42 +02:00
Sebastiaan van Stijn
fc8b388eac Merge pull request #44029 from thaJeztah/22.06_backport_bump_buildkit
[22.06 backport] vendor: github.com/moby/buildkit v0.10.4
2022-08-26 00:13:00 +02:00
Sebastiaan van Stijn
9aeda305fd vendor: github.com/moby/buildkit v0.10.4
release notes: https://github.com/moby/buildkit/releases/tag/v0.10.4

full diff: https://github.com/moby/buildkit/compare/8e2d9b9006ca...v0.10.4

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit c500d8824d)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-08-25 21:33:53 +02:00
Sebastiaan van Stijn
48e314fbe2 gha: temporarily pin BuildKit integration test version
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 6217f8001e)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-08-25 21:33:51 +02:00
Samuel Karp
29c636bf80 Merge pull request #44022 from thaJeztah/22.06_backport_client_remove_withdialer 2022-08-25 10:35:34 -07:00
Sebastiaan van Stijn
64b0b54fc8 Merge pull request #44027 from thaJeztah/22.06_backport_libnetwork_ipvlan_fix
[22.06 backport] libnetwork/drivers/ipvlan: fix missing IpvlanFlag field in config JSON
2022-08-24 16:56:36 +02:00
Sebastiaan van Stijn
e8d00f02aa Merge pull request #44024 from thaJeztah/22.06_backport_vendor_containerd_1.6.8
[22.06 backport] vendor: github.com/containerd/containerd v1.6.8
2022-08-24 13:50:39 +02:00
Sebastiaan van Stijn
7b086898ee Merge pull request #44025 from thaJeztah/22.06_backport_testfix_TestNetworkDBNodeJoinLeaveIteration
[22.06 backprot] Test: wait for network changes in TestNetworkDBNodeJoinLeaveIteration
2022-08-24 13:49:35 +02:00
Youfu Zhang
292d352ee4 libnetwork/drivers/ipvlan: fix missing IpvlanFlag field in config JSON
Fixes #42542

Signed-off-by: Youfu Zhang <zhangyoufu@gmail.com>
(cherry picked from commit 549d24b437)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-08-24 13:46:31 +02:00
David Wang
2293a20972 Test: wait for network changes in TestNetworkDBNodeJoinLeaveIteration
In network node change test, the expected behavior is focused on how many nodes
left in networkDB, besides timing issues, things would also go tricky for a
leave-then-join sequence, if the check (counting the nodes) happened before the
first "leave" event, then the testcase actually miss its target and report PASS
without verifying its final result; if the check happened after the 'leave' event,
but before the 'join' event, the test would report FAIL unnecessary;

This code change would check both the db changes and the node count, it would
report PASS only when networkdb has indeed changed and the node count is expected.

Signed-off-by: David Wang <00107082@163.com>
(cherry picked from commit f499c6b9ec)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-08-24 01:45:06 +02:00
Sebastiaan van Stijn
76fa56b62d vendor: github.com/containerd/containerd v1.6.8
no code changes, other than a version bump

full diff: https://github.com/containerd/containerd/compare/v1.6.7...v1.6.8

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit ca52e0a244)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-08-24 01:42:55 +02:00
Sebastiaan van Stijn
e5958a8f08 client: remove deprecated WithDialer() option
It was deprecated in edac92409a, which
was part of 18.09 and up, so should be safe by now to remove this.

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit e14924570c)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-08-24 01:36:57 +02:00
Sebastiaan van Stijn
2dc3e510d4 Merge pull request #43994 from corhere/backport-22.06/healthcheck_timeout
[v22.06 backport] don't use canceled context to send KILL signal to healthcheck process
2022-08-24 01:35:09 +02:00
Cory Snider
e7f4963e73 daemon: kill exec process on ctx cancel
Terminating the exec process when the context is canceled has been
broken since Docker v17.11 so nobody has been able to depend upon that
behaviour in five years of releases. We are thus free from backwards-
compatibility constraints.

Co-authored-by: Nicolas De Loof <nicolas.deloof@gmail.com>
Co-authored-by: Sebastiaan van Stijn <github@gone.nl>
Signed-off-by: Nicolas De Loof <nicolas.deloof@gmail.com>
Signed-off-by: Cory Snider <csnider@mirantis.com>
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 4b84a33217)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-08-23 22:09:40 +02:00
Sebastiaan van Stijn
629397f70e Merge pull request #43992 from neersighted/22.06_builder_version
[22.06 backport] api: set default "Builder-Version" to "2" (BuildKit) on Linux
2022-08-18 23:01:12 +02:00
Sebastiaan van Stijn
1e6029e81e Merge pull request #43993 from cpuguy83/22.06_backport_43978_default_runtime
[22.06] Allow containerd shim refs in default-runtime
2022-08-18 23:00:34 +02:00
Sebastiaan van Stijn
2a33c73574 Merge pull request #43988 from thaJeztah/22.06_backport_seccomp_bpfcap
[22.06 backport] seccomp: allow "bpf", "perf_event_open", gated by CAP_BPF, CAP_PERFMON
2022-08-18 22:57:41 +02:00
Sebastiaan van Stijn
4bf8eec265 Merge pull request #43985 from neersighted/22.06_bump_memberlist
[22.06] vendor: bump memberlist
2022-08-18 20:51:48 +02:00
Brian Goff
dfcb3e17ae Allow containerd shim refs in default-runtime
Since runtimes can now just be containerd shims, we need to check if the
reference is possibly a containerd shim.

Signed-off-by: Brian Goff <cpuguy83@gmail.com>
(cherry picked from commit e6ee27a541)
Signed-off-by: Brian Goff <cpuguy83@gmail.com>
2022-08-18 18:51:23 +00:00
Sebastiaan van Stijn
8e9684c029 Merge pull request #43990 from thaJeztah/22.06_backport_deprecate_graph
[22.06 backport] daemon: complete the "--graph" / "-g" deprecation
2022-08-18 20:30:24 +02:00
Sebastiaan van Stijn
2c17e9a333 api: set default "Builder-Version" to "2" (BuildKit) on Linux
Starting with the 22.06 release, buildx is the default client for
docker build, which uses BuildKit as builder.

This patch changes the default builder version as advertised by
the daemon to "2" (BuildKit), so that pre-22.06 CLIs with BuildKit
support (but no buildx installed) also default to using BuildKit
when interacting with a 22.06 (or up) daemon.

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-08-18 12:18:32 -06:00
Sebastiaan van Stijn
d1d9fd50c2 daemon: complete the "--graph" / "-g" deprecation
The `-g` / `--graph` options were soft deprecated in favor of `--data-root` in
261ef1fa27 (v17.05.0) and at the time considered
to not be removed. However, with the move towards containerd snapshotters, having
these options around adds additional complexity to handle fallbacks for deprecated
(and hidden) flags, so completing the deprecation.

With this patch:

    dockerd --graph=/var/lib/docker --validate
    Flag --graph has been deprecated, Use --data-root instead
    unable to configure the Docker daemon with file /etc/docker/daemon.json: merged configuration validation from file and command line flags failed: the "graph" config file option is deprecated; use "data-root" instead

    mkdir -p /etc/docker
    echo '{"graph":"/var/lib/docker"}' > /etc/docker/daemon.json

    dockerd --validate
    unable to configure the Docker daemon with file /etc/docker/daemon.json: merged configuration validation from file and command line flags failed: the "graph" config file option is deprecated; use "data-root" instead

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit b58de39ca7)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-08-18 18:44:17 +02:00
Sebastiaan van Stijn
8912c1fade seccomp: allow "bpf", "perf_event_open", gated by CAP_BPF, CAP_PERFMON
Update the profile to make use of CAP_BPF and CAP_PERFMON capabilities. Prior to
kernel 5.8, bpf and perf_event_open required CAP_SYS_ADMIN. This change enables
finer control of the privilege setting, thus allowing us to run certain system
tracing tools with minimal privileges.

Based on the original patch from Henry Wang in the containerd repository.

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 7b7d1132e8)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-08-18 18:36:49 +02:00
Bjorn Neergaard
332de3f1e3 vendor: github.com/hasicorp/memberlist v0.4.0
Signed-off-by: Bjorn Neergaard <bneergaard@mirantis.com>
2022-08-18 09:50:59 -06:00
Akihiro Suda
2160f0041d Merge pull request #43923 from crazy-max/22.06_vendor-buildkit
[22.06 backport] vendor buildkit 8e2d9b9 (v0.10 branch)
2022-08-15 02:32:59 +09:00
Sebastiaan van Stijn
3254fa3b50 Merge pull request #43938 from thaJeztah/22.06_backport_bump_swarmkit3
[22.06 backport] vendor: github.com/moby/swarmkit/v2 v2.0.0-20220721174824-48dd89375d0a, change "csi" to "cluster"
2022-08-11 21:30:49 +02:00
Sebastiaan van Stijn
b73c27ef6b vendor: github.com/moby/swarmkit/v2 v2.0.0-20220721174824-48dd89375d0a
full diff: 6068d1894d...48dd89375d

Finishes off the work to change references to cluster volumes in the API
from using "csi" as the magic word to "cluster". This reflects that the
volumes are "cluster volumes", not "csi volumes".

Notably, there is no change to the plugin definitions being "csinode"
and "csicontroller". This terminology is appropriate with regards to
plugins because it accurates reflects what the plugin is.

Signed-off-by: Drew Erny <derny@mirantis.com>
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 9861dd069b)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-08-09 14:05:30 +02:00
Sebastiaan van Stijn
ec89e7cde1 Merge pull request #43932 from thaJeztah/22.06_backport_bump_containerd_1.6.7_vendor
[22.06 backport] vendor: github.com/containerd/containerd v1.6.7
2022-08-08 18:51:46 +02:00
Sebastiaan van Stijn
15f9cb5c4d vendor: github.com/containerd/containerd v1.6.7
full diff: https://github.com/containerd/containerd/v1.6.6...v1.6.7

Welcome to the v1.6.7 release of containerd!

The seventh patch release for containerd 1.6 contains various fixes,
includes a new version of runc and adds support for ppc64le and riscv64
(requires unreleased runc 1.2) builds.

Notable Updates

- Update runc to v1.1.3
- Seccomp: Allow clock_settime64 with CAP_SYS_TIME
- Fix WWW-Authenticate parsing
- Support RISC-V 64 and ppc64le builds
- Windows: Update hcsshim to v0.9.4 to fix regression with HostProcess stats
- Windows: Fix shim logs going to panic.log file
- Allow ptrace(2) by default for kernels >= 4.8

See the changelog for complete list of changes

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 7376bf948b)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-08-08 11:32:26 +02:00
Tianon Gravi
ebfc35f887 Merge pull request #43915 from thaJeztah/22.06_backport_vendor_hcsshim
[22.06 backport] vendor: github.com/Microsoft/hcsshim v0.9.4
2022-08-05 15:09:34 -07:00
Tianon Gravi
f47d5ced16 Merge pull request #43919 from thaJeztah/22.06_backport_bump_containerd_1.6.7_binary
[22.06 backport] update containerd binary too v1.6.7, runc to v1.3.1
2022-08-05 15:09:16 -07:00
CrazyMax
6c78a1166e vendor buildkit 8e2d9b9 (v0.10 branch)
Signed-off-by: CrazyMax <crazy-max@users.noreply.github.com>
(cherry picked from commit e05f614267)
2022-08-05 14:57:32 +02:00
Sebastiaan van Stijn
8ae63006f1 update containerd binary to v1.6.7
full diff: https://github.com/containerd/containerd/v1.6.6...v1.6.7

Welcome to the v1.6.7 release of containerd!

The seventh patch release for containerd 1.6 contains various fixes,
includes a new version of runc and adds support for ppc64le and riscv64
(requires unreleased runc 1.2) builds.

Notable Updates

- Update runc to v1.1.3
- Seccomp: Allow clock_settime64 with CAP_SYS_TIME
- Fix WWW-Authenticate parsing
- Support RISC-V 64 and ppc64le builds
- Windows: Update hcsshim to v0.9.4 to fix regression with HostProcess stats
- Windows: Fix shim logs going to panic.log file
- Allow ptrace(2) by default for kernels >= 4.8

See the changelog for complete list of changes

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 4e46d9f963)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-08-05 00:26:02 +02:00
Sebastiaan van Stijn
aeb600bc4a update runc binary to v1.1.3
full diff: https://github.com/opencontainers/runc/compare/v1.1.2...v1.1.3

This is the third release of the 1.1.z series of runc, and contains
various minor improvements and bugfixes.

- Our seccomp `-ENOSYS` stub now correctly handles multiplexed syscalls on
  s390 and s390x. This solves the issue where syscalls the host kernel did not
  support would return `-EPERM` despite the existence of the `-ENOSYS` stub
  code (this was due to how s390x does syscall multiplexing).
- Retry on dbus disconnect logic in libcontainer/cgroups/systemd now works as
  intended; this fix does not affect runc binary itself but is important for
  libcontainer users such as Kubernetes.
- Inability to compile with recent clang due to an issue with duplicate
  constants in libseccomp-golang.
- When using systemd cgroup driver, skip adding device paths that don't exist,
  to stop systemd from emitting warnings about those paths.
- Socket activation was failing when more than 3 sockets were used.
- Various CI fixes.
- Allow to bind mount `/proc/sys/kernel/ns_last_pid` to inside container.
- runc static binaries are now linked against libseccomp v2.5.4.

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 2293de1c82)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-08-05 00:26:00 +02:00
Paweł Gronowski
e0d8418ddc vendor: github.com/Microsoft/hcsshim v0.9.4
full diff: https://github.com/microsoft/hcsshim/compare/v0.9.3...v0.9.4

Changes are mostly fixes of unsafe usage of `unsafe.Pointer`

Signed-off-by: Paweł Gronowski <pawel.gronowski@docker.com>
(cherry picked from commit 69f077f1aa)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-08-04 22:43:56 +02:00
Sebastiaan van Stijn
e6a5f44e61 Merge pull request #43914 from thaJeztah/22.06_backport_fix_api_swagger
[22.06 backport] api: swagger: fix invalid example value (API v1.39-v1.41)
2022-08-04 21:02:55 +02:00
Sebastiaan van Stijn
7130076488 Merge pull request #43890 from thaJeztah/22.06_backport_api_fix_missing_platform
[22.06 backport] api: swagger: add missing "platform" query-arg on create
2022-08-04 21:00:55 +02:00
Sebastiaan van Stijn
0133759476 api: swagger: fix invalid example value (API v1.39-v1.41)
This was introduced in 43956c1bfc

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 30295c1750)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-08-04 17:55:21 +02:00
Sebastiaan van Stijn
86839c826f Merge pull request #43908 from thaJeztah/22.06_backport_deprecate_buildcache_parent
[22.06 backport] api: deprecate BuildCache.Parent, add BuildCache.Parents in API >= v1.42
2022-08-04 13:19:37 +02:00
Sebastiaan van Stijn
f93e0ef4d6 Merge pull request #43904 from thaJeztah/22.06_backport_bump_go_1.18.5
[22.06 backport] Update golang to 1.18.5
2022-08-04 11:27:00 +02:00
Sebastiaan van Stijn
572457e265 api: swagger: document BuildCache fields (API v1.39-v1.41)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 43956c1bfc)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-08-04 09:33:39 +02:00
Sebastiaan van Stijn
49377cdd63 api: swagger: document BuildCache fields (API v1.42)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 5371c889a8)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-08-04 09:33:37 +02:00
Sebastiaan van Stijn
910d5c44fc api: add BuildCache.Parents for API >= v1.42
This field was added to replace the deprecated "Parent" field.

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit e0db8207f3)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-08-04 09:33:35 +02:00
Sebastiaan van Stijn
0e3d20cb20 api: deprecate BuildCache.Parent in API >= v1.42
This field has been deprecated in BuildKit, so this follows the deprecation
in the Engine API.

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit ebf339628a)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-08-04 09:33:33 +02:00
Sebastiaan van Stijn
a96b75191e api/types: add missing GoDoc for BuildCache fields.
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit e0286d7f4e)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-08-04 09:33:31 +02:00
Sebastiaan van Stijn
a285cd4d88 api: swagger: document BuildCache fields.
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit dc2b34af6a)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-08-04 09:33:28 +02:00
Sebastiaan van Stijn
4f057d8bb6 Merge pull request #43887 from thaJeztah/22.06_backport_implicit_runtime_config
[22.06 backport] daemon: support other containerd runtimes (MVP)
2022-08-03 23:55:51 +02:00
Sebastiaan van Stijn
1240460547 Update golang to 1.18.5
Update Go runtime to 1.18.5 to address CVE-2022-32189.

Full diff: https://github.com/golang/go/compare/go1.18.4...go1.18.5

--------------------------------------------------------

From the security announcement:
https://groups.google.com/g/golang-announce/c/YqYYG87xB10

We have just released Go versions 1.18.5 and 1.17.13, minor point
releases.

These minor releases include 1 security fixes following the security
policy:

encoding/gob & math/big: decoding big.Float and big.Rat can panic

Decoding big.Float and big.Rat types can panic if the encoded message is
too short.

This is CVE-2022-32189 and Go issue https://go.dev/issue/53871.

View the release notes for more information:
https://go.dev/doc/devel/release#go1.18.5

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit f1d71f7cc3)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-08-03 20:09:14 +02:00
Akihiro Suda
d9a6b805b3 Merge pull request #43884 from vvoland/fix-exitcode-wait-22.06
[22.06 backport] state/Wait: Fix race when reading exit status
2022-07-30 15:51:39 +09:00
Sebastiaan van Stijn
e88c28941f docs: api: add missing "platform" query-arg on create (v1.42)
Commit 7a9cb29fb9 added a new "platform" query-
parameter to the `POST /containers/create` endpoint, but did not update the
swagger file and documentation.

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 982f09f837)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-07-29 23:23:02 +02:00
Sebastiaan van Stijn
9c4984db6b docs: api: add missing "platform" query-arg on create (v1.41)
Commit 7a9cb29fb9 added a new "platform" query-
parameter to the `POST /containers/create` endpoint, but did not update the
swagger file and documentation.

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 1000e4ee7d)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-07-29 23:23:00 +02:00
Sebastiaan van Stijn
af7c8ff045 api: swagger: add missing "platform" query-arg on create
Commit 7a9cb29fb9 added a new "platform" query-
parameter to the `POST /containers/create` endpoint, but did not update the
swagger file and documentation.

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 3dae8e9fc2)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-07-29 23:22:58 +02:00
Cory Snider
6de52a29a8 daemon: support other containerd runtimes (MVP)
Contrary to popular belief, the OCI Runtime specification does not
specify the command-line API for runtimes. Looking at containerd's
architecture from the lens of the OCI Runtime spec, the _shim_ is the
OCI Runtime and runC is "just" an implementation detail of the
io.containerd.runc.v2 runtime. When one configures a non-default runtime
in Docker, what they're really doing is instructing Docker to create
containers using the io.containerd.runc.v2 runtime with a configuration
option telling the runtime that the runC binary is at some non-default
path. Consequently, only OCI runtimes which are compatible with the
io.containerd.runc.v2 shim, such as crun, can be used in this manner.
Other OCI runtimes, including kata-containers v2, come with their own
containerd shim and are not compatible with io.containerd.runc.v2.
As Docker has not historically provided a way to select a non-default
runtime which requires its own shim, runtimes such as kata-containers v2
could not be used with Docker.

Allow other containerd shims to be used with Docker; no daemon
configuration required. If the daemon is instructed to create a
container with a runtime name which does not match any of the configured
or stock runtimes, it passes the name along to containerd verbatim. A
user can start a container with the kata-containers runtime, for
example, simply by calling

    docker run --runtime io.containerd.kata.v2

Runtime names which containerd would interpret as a path to an arbitrary
binary are disallowed. While handy for development and testing it is not
strictly necessary and would allow anyone with Engine API access to
trivially execute any binary on the host as root, so we have decided it
would be safest for our users if it was not allowed.

It is not yet possible to set an alternative containerd shim as the
default runtime; it can only be configured per-container.

Signed-off-by: Cory Snider <csnider@mirantis.com>
(cherry picked from commit 547da0d575)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-07-29 20:36:50 +02:00
Sebastiaan van Stijn
ad0ee82f0d Merge pull request #43875 from thaJeztah/22.06_backport_42655_vfs_storage_driver
[22.06 backport] Fix file capabilities dropping in Dockerfile
2022-07-29 18:23:41 +02:00
Paweł Gronowski
85b9568d0e state/Wait: Fix race when reading exit status
Before this change there was a race condition between State.Wait reading
the exit code from State and the State being changed instantly after the
change which ended the State.Wait.

Now, each State.Wait has its own channel which is used to transmit the
desired StateStatus at the time the state transitions to the awaited
one. Wait no longer reads the status by itself so there is no race.

The issue caused the `docker run --restart=always ...' to sometimes exit
with 0 exit code, because the process was already restarted by the time
State.Wait got the chance to read the exit code.

Test run
--------
Before:
```
$ go test -count 1 -run TestCorrectStateWaitResultAfterRestart .
--- FAIL: TestCorrectStateWaitResultAfterRestart (0.00s)
    state_test.go:198: expected exit code 10, got 0
FAIL
FAIL    github.com/docker/docker/container      0.011s
FAIL

```

After:
```
$ go test -count 1 -run TestCorrectStateWaitResultAfterRestart .
ok      github.com/docker/docker/container      0.011s
```

Signed-off-by: Paweł Gronowski <pawel.gronowski@docker.com>
2022-07-29 16:49:56 +02:00
Paweł Gronowski
826003ecae integration: TestWaitRestartedContainer
Signed-off-by: Paweł Gronowski <pawel.gronowski@docker.com>
2022-07-29 16:49:56 +02:00
Paweł Gronowski
e2bd8edb0d daemon/restart: Don't mutate AutoRemove when restarting
This caused a race condition where AutoRemove could be restored before
container was considered for restart and made autoremove containers
impossible to restart.

```
$ make DOCKER_GRAPHDRIVER=vfs BIND_DIR=. TEST_FILTER='TestContainerWithAutoRemoveCanBeRestarted' TESTFLAGS='-test.count 1' test-integration
...
=== RUN   TestContainerWithAutoRemoveCanBeRestarted
=== RUN   TestContainerWithAutoRemoveCanBeRestarted/kill
=== RUN   TestContainerWithAutoRemoveCanBeRestarted/stop
--- PASS: TestContainerWithAutoRemoveCanBeRestarted (1.61s)
    --- PASS: TestContainerWithAutoRemoveCanBeRestarted/kill (0.70s)
    --- PASS: TestContainerWithAutoRemoveCanBeRestarted/stop (0.86s)
PASS

DONE 3 tests in 3.062s
```

Signed-off-by: Paweł Gronowski <pawel.gronowski@docker.com>
2022-07-29 16:49:56 +02:00
Paweł Gronowski
44fde1bdb7 integration: Add TestContainerWithAutoRemoveCanBeRestarted
Signed-off-by: Paweł Gronowski <pawel.gronowski@docker.com>
2022-07-29 16:49:56 +02:00
Sebastiaan van Stijn
d8f20bfdc1 Merge pull request #43878 from thaJeztah/22.06_backport_containerd_config_v2
[22.06 backport] libcontainerd: switch generated containerd.toml to v2 (v1 is deprecated)
2022-07-28 21:19:38 +02:00
Sebastiaan van Stijn
6ab3b50a3f libcontainerd: switch generated containerd.toml to v2 (v1 is deprecated)
Before this patch:

    INFO[2022-07-27T14:30:06.188762628Z] Starting up
    INFO[2022-07-27T14:30:06.190750725Z] libcontainerd: started new containerd process  pid=2028
    ...
    WARN[0000] containerd config version `1` has been deprecated and will be removed in containerd v2.0, please switch to version `2`, see https://github.com/containerd/containerd/blob/main/docs/PLUGINS.md#version-header
    INFO[2022-07-27T14:30:06.220024286Z] starting containerd                           revision=10c12954828e7c7c9b6e0ea9b0c02b01407d3ae1 version=v1.6.6

With this patch:

    INFO[2022-07-27T14:28:04.025543517Z] Starting up
    INFO[2022-07-27T14:28:04.027447105Z] libcontainerd: started new containerd process  pid=1377
    ...
    INFO[2022-07-27T14:28:04.054483270Z] starting containerd                           revision=10c12954828e7c7c9b6e0ea9b0c02b01407d3ae1 version=v1.6.6

And the generated /var/run/docker/containerd/containerd.toml:

```toml
disabled_plugins = ["io.containerd.grpc.v1.cri"]
imports = []
oom_score = 0
plugin_dir = ""
required_plugins = []
root = "/var/lib/docker/containerd/daemon"
state = "/var/run/docker/containerd/daemon"
temp = ""
version = 2

[cgroup]
  path = ""

[debug]
  address = "/var/run/docker/containerd/containerd-debug.sock"
  format = ""
  gid = 0
  level = "debug"
  uid = 0

[grpc]
  address = "/var/run/docker/containerd/containerd.sock"
  gid = 0
  max_recv_message_size = 16777216
  max_send_message_size = 16777216
  tcp_address = ""
  tcp_tls_ca = ""
  tcp_tls_cert = ""
  tcp_tls_key = ""
  uid = 0

[metrics]
  address = ""
  grpc_histogram = false

[plugins]

[proxy_plugins]

[stream_processors]

[timeouts]

[ttrpc]
  address = ""
  gid = 0
  uid = 0
```

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit ba2ff69894)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-07-28 16:45:26 +02:00
Illo Abdulrahim
6d41219bae Fix file capabilities droping in Dockerfile
doCopyXattrs() never reached due to copyXattrs boolean being false, as
a result file capabilities not being copied.

moved copyXattr() out of doCopyXattrs()

Signed-off-by: Illo Abdulrahim <abdulrahim.illo@nokia.com>
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 31f654a704)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-07-28 09:39:21 +02:00
Sebastiaan van Stijn
dcbd68a1d4 Merge pull request #43858 from olljanat/22.06_backport_restore-custom-nat-networks
[22.06 backport] Windows: Re-create custom NAT networks after restart if missing from HNS
2022-07-25 12:23:14 +02:00
Olli Janatuinen
112fb22152 Windows: Re-create custom NAT networks after restart if missing from HNS
Signed-off-by: Olli Janatuinen <olli.janatuinen@gmail.com>
(cherry picked from commit 67c36d5)
Signed-off-by: Olli Janatuinen <olli.janatuinen@gmail.com>
2022-07-23 23:16:23 -07:00
Tianon Gravi
a60b458179 Merge pull request #43841 from thaJeztah/22.06_backport_journald_fix_break
[22.06 backport] logger/journald: fix SA4011: ineffective break statement
2022-07-20 10:13:42 -07:00
Sebastiaan van Stijn
a9081299dd logger/journald: fix SA4011: ineffective break statement
This was introduced in 906b979b88, which changed
a `goto` to a `break`, but afaics, the intent was still to break out of the loop.
(linter didn't catch this before because it didn't have the right build-tag set)

    daemon/logger/journald/read.go:238:4: SA4011: ineffective break statement. Did you mean to break out of the outer loop? (staticcheck)
                break // won't be able to write anything anymore
                ^

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 75577fe7a8)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-07-20 16:57:22 +02:00
Brian Goff
48a144954e Merge pull request #43812 from thaJeztah/22.06_backport_43481_support_pku
[22.06 backport] profiles: seccomp: add syscalls related to PKU in default policy
2022-07-18 15:51:39 -07:00
Sebastiaan van Stijn
c4c8a80958 Merge pull request #43813 from thaJeztah/22.06_backport_fix_43781
[22.06 backport] libnetwork: skip firewalld management for rootless
2022-07-18 09:48:12 +02:00
Sebastiaan van Stijn
1b928c1bd5 Merge pull request #43814 from thaJeztah/22.06_backport_gofmt_119_2
[22.06 backport] fix formatting of "nolint" tags for go1.19
2022-07-18 09:47:27 +02:00
Sebastiaan van Stijn
e34ab5200d fix formatting of "nolint" tags for go1.19
The correct formatting for machine-readable comments is;

    //<some alphanumeric identifier>:<options>[,<option>...][ // comment]

Which basically means:

- MUST NOT have a space before `<identifier>` (e.g. `nolint`)
- Identified MUST be alphanumeric
- MUST be followed by a colon
- MUST be followed by at least one `<option>`
- Optionally additional `<options>` (comma-separated)
- Optionally followed by a comment

Any other format will not be considered a machine-readable comment by `gofmt`,
and thus formatted as a regular comment. Note that this also means that a
`//nolint` (without anything after it) is considered invalid, same for `//#nosec`
(starts with a `#`).

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 4f08346686)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-07-15 13:45:13 +02:00
Akihiro Suda
863ca3f185 libnetwork: skip firewalld management for rootless
Fix issue 43781

Co-authored-by: Sebastiaan van Stijn <github@gone.nl>
Signed-off-by: Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp>
(cherry picked from commit 9464898b47)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-07-15 13:40:07 +02:00
zhubojun
edcc51cbee profiles: seccomp: add syscalls related to PKU in default policy
Add pkey_alloc(2), pkey_free(2) and pkey_mprotect(2) in seccomp default profile.
pkey_alloc(2), pkey_free(2) and pkey_mprotect(2) can only configure
the calling process's own memory, so they are existing "safe for everyone" syscalls.

close issue: #43481

Signed-off-by: zhubojun <bojun.zhu@foxmail.com>
(cherry picked from commit e258d66f17)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-07-15 09:19:57 +02:00
Sebastiaan van Stijn
6408132d74 Merge pull request #43808 from thaJeztah/22.06_backport_client_deadcode
[22.06 backport] client: errors: remove dead code
2022-07-14 22:02:30 +02:00
Sebastiaan van Stijn
d64dd71200 Merge pull request #43809 from thaJeztah/22.06_backport_api_docs
[22.06 backport] docs: add API v1.42
2022-07-14 22:00:20 +02:00
Brian Goff
e0ba440909 Merge pull request #43806 from thaJeztah/22.06_backport_fix_import
[22.06 backport] pkg/parsers/operatingsystem: fix stray import
2022-07-14 08:19:15 -07:00
Sebastiaan van Stijn
269e10a725 Merge pull request #43807 from thaJeztah/22.06_backport_gofmt_119
[22.06 backport] format (GoDoc) comments with Go 1.19 to prepare for future updates
2022-07-14 16:48:12 +02:00
Sebastiaan van Stijn
149b7e7f03 Merge pull request #43805 from thaJeztah/22.06_backport_bump_go_1.18.4
[22.06 backport] update golang to 1.18.4
2022-07-14 16:47:51 +02:00
Sebastiaan van Stijn
c51efa8617 docs: add API v1.42
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 264b41fb9e)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-07-13 22:49:14 +02:00
Sebastiaan van Stijn
52791b1c14 client: errors: remove dead code
- Update IsErrNotFound() to check for the current type before falling back to
  detecting the deprecated type.
- Remove unauthorizedError and notImplementedError types, which were not used.
- IsErrPluginPermissionDenied() was added in 7c36a1af03,
  but not used at the time, and still appears to be unused.
- Deprecate IsErrUnauthorized in favor of errdefs.IsUnauthorized()
- Deprecate IsErrNotImplemented in favor of errdefs,IsNotImplemented()

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit ee230d8fdd)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-07-13 22:44:43 +02:00
Sebastiaan van Stijn
cdbca4061b gofmt GoDoc comments with go1.19
Older versions of Go don't format comments, so committing this as
a separate commit, so that we can already make these changes before
we upgrade to Go 1.19.

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 52c1a2fae8)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-07-13 22:42:29 +02:00
Sebastiaan van Stijn
c52e221207 gofmt files
result of:

    gofmt -s -w $(find . -type f -name '*.go' | grep -v "/vendor/")

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 6668801d40)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-07-13 22:42:26 +02:00
Sebastiaan van Stijn
e417e8dfc2 pkg/parsers/operatingsystem: fix stray import
This was caught by goimports;

    goimports -w $(find . -type f -name '*.go'| grep -v "/vendor/")

CI doesn't run on these platforms, so didn't catch it.

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit e4e819b49c)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-07-13 22:40:22 +02:00
Sebastiaan van Stijn
6905fe7488 update golang to 1.18.4
go1.18.4 (released 2022-07-12) includes security fixes to the compress/gzip,
encoding/gob, encoding/xml, go/parser, io/fs, net/http, and path/filepath
packages, as well as bug fixes to the compiler, the go command, the linker,
the runtime, and the runtime/metrics package. See the Go 1.18.4 milestone on the
issue tracker for details:

https://github.com/golang/go/issues?q=milestone%3AGo1.18.4+label%3ACherryPickApproved

This update addresses:

CVE-2022-1705, CVE-2022-1962, CVE-2022-28131, CVE-2022-30630, CVE-2022-30631,
CVE-2022-30632, CVE-2022-30633, CVE-2022-30635, and CVE-2022-32148.

Full diff: https://github.com/golang/go/compare/go1.18.3...go1.18.4

From the security announcement;
https://groups.google.com/g/golang-announce/c/nqrv9fbR0zE

We have just released Go versions 1.18.4 and 1.17.12, minor point releases. These
minor releases include 9 security fixes following the security policy:

- net/http: improper sanitization of Transfer-Encoding header

  The HTTP/1 client accepted some invalid Transfer-Encoding headers as indicating
  a "chunked" encoding. This could potentially allow for request smuggling, but
  only if combined with an intermediate server that also improperly failed to
  reject the header as invalid.

  This is CVE-2022-1705 and https://go.dev/issue/53188.

- When `httputil.ReverseProxy.ServeHTTP` was called with a `Request.Header` map
  containing a nil value for the X-Forwarded-For header, ReverseProxy would set
  the client IP as the value of the X-Forwarded-For header, contrary to its
  documentation. In the more usual case where a Director function set the
  X-Forwarded-For header value to nil, ReverseProxy would leave the header
  unmodified as expected.

  This is https://go.dev/issue/53423 and CVE-2022-32148.

  Thanks to Christian Mehlmauer for reporting this issue.

- compress/gzip: stack exhaustion in Reader.Read

  Calling Reader.Read on an archive containing a large number of concatenated
  0-length compressed files can cause a panic due to stack exhaustion.

  This is CVE-2022-30631 and Go issue https://go.dev/issue/53168.

- encoding/xml: stack exhaustion in Unmarshal

  Calling Unmarshal on a XML document into a Go struct which has a nested field
  that uses the any field tag can cause a panic due to stack exhaustion.

  This is CVE-2022-30633 and Go issue https://go.dev/issue/53611.

- encoding/xml: stack exhaustion in Decoder.Skip

  Calling Decoder.Skip when parsing a deeply nested XML document can cause a
  panic due to stack exhaustion. The Go Security team discovered this issue, and
  it was independently reported by Juho Nurminen of Mattermost.

  This is CVE-2022-28131 and Go issue https://go.dev/issue/53614.

- encoding/gob: stack exhaustion in Decoder.Decode

  Calling Decoder.Decode on a message which contains deeply nested structures
  can cause a panic due to stack exhaustion.

  This is CVE-2022-30635 and Go issue https://go.dev/issue/53615.

- path/filepath: stack exhaustion in Glob

  Calling Glob on a path which contains a large number of path separators can
  cause a panic due to stack exhaustion.

  Thanks to Juho Nurminen of Mattermost for reporting this issue.

  This is CVE-2022-30632 and Go issue https://go.dev/issue/53416.

- io/fs: stack exhaustion in Glob

  Calling Glob on a path which contains a large number of path separators can
  cause a panic due to stack exhaustion.

  This is CVE-2022-30630 and Go issue https://go.dev/issue/53415.

- go/parser: stack exhaustion in all Parse* functions

  Calling any of the Parse functions on Go source code which contains deeply
  nested types or declarations can cause a panic due to stack exhaustion.

  Thanks to Juho Nurminen of Mattermost for reporting this issue.

  This is CVE-2022-1962 and Go issue https://go.dev/issue/53616.

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 34b8670b1a)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-07-13 22:37:32 +02:00
9067 changed files with 453274 additions and 1067581 deletions

View File

@@ -1,21 +0,0 @@
{
"name": "moby",
"build": {
"context": "..",
"dockerfile": "../Dockerfile",
"target": "devcontainer"
},
"workspaceFolder": "/go/src/github.com/docker/docker",
"workspaceMount": "source=${localWorkspaceFolder},target=/go/src/github.com/docker/docker,type=bind,consistency=cached",
"remoteUser": "root",
"runArgs": ["--privileged"],
"customizations": {
"vscode": {
"extensions": [
"golang.go"
]
}
}
}

View File

@@ -1,6 +1,4 @@
/.git
# build artifacts
/bundles/
/cli/winresources/dockerd/winres.json
/cli/winresources/dockerd/*.syso
.git
bundles/
cli/winresources/**/winres.json
cli/winresources/**/*.syso

2
.github/CODEOWNERS vendored
View File

@@ -5,6 +5,8 @@
builder/** @tonistiigi
contrib/mkimage/** @tianon
daemon/graphdriver/devmapper/** @rhvgoyal
daemon/graphdriver/overlay/** @dmcgowan
daemon/graphdriver/overlay2/** @dmcgowan
daemon/graphdriver/windows/** @johnstep
daemon/logger/awslogs/** @samuelkarp

70
.github/ISSUE_TEMPLATE.md vendored Normal file
View File

@@ -0,0 +1,70 @@
<!--
If you are reporting a new issue, make sure that we do not have any duplicates
already open. You can ensure this by searching the issue list for this
repository. If there is a duplicate, please close your issue and add a comment
to the existing issue instead.
If you suspect your issue is a bug, please edit your issue description to
include the BUG REPORT INFORMATION shown below. If you fail to provide this
information within 7 days, we cannot debug your issue and will close it. We
will, however, reopen it if you later provide the information.
For more information about reporting issues, see
https://github.com/moby/moby/blob/master/CONTRIBUTING.md#reporting-other-issues
---------------------------------------------------
GENERAL SUPPORT INFORMATION
---------------------------------------------------
The GitHub issue tracker is for bug reports and feature requests.
General support for **docker** can be found at the following locations:
- Docker Support Forums - https://forums.docker.com
- Slack - community.docker.com #general channel
- Post a question on StackOverflow, using the Docker tag
General support for **moby** can be found at the following locations:
- Moby Project Forums - https://forums.mobyproject.org
- Slack - community.docker.com #moby-project channel
- Post a question on StackOverflow, using the Moby tag
---------------------------------------------------
BUG REPORT INFORMATION
---------------------------------------------------
Use the commands below to provide key information from your environment:
You do NOT have to include this information if this is a FEATURE REQUEST
-->
**Description**
<!--
Briefly describe the problem you are having in a few paragraphs.
-->
**Steps to reproduce the issue:**
1.
2.
3.
**Describe the results you received:**
**Describe the results you expected:**
**Additional information you deem important (e.g. issue happens only occasionally):**
**Output of `docker version`:**
```
(paste your output here)
```
**Output of `docker info`:**
```
(paste your output here)
```
**Additional environment details (AWS, VirtualBox, physical, etc.):**

View File

@@ -1,146 +0,0 @@
name: Bug report
description: Create a report to help us improve
labels:
- kind/bug
- status/0-triage
body:
- type: markdown
attributes:
value: |
Thank you for taking the time to report a bug!
If this is a security issue please report it to the [Docker Security team](mailto:security@docker.com).
- type: textarea
id: description
attributes:
label: Description
description: Please give a clear and concise description of the bug
validations:
required: true
- type: textarea
id: repro
attributes:
label: Reproduce
description: Steps to reproduce the bug
placeholder: |
1. docker run ...
2. docker kill ...
3. docker rm ...
validations:
required: true
- type: textarea
id: expected
attributes:
label: Expected behavior
description: What is the expected behavior?
placeholder: |
E.g. "`docker rm` should remove the container and cleanup all associated data"
- type: textarea
id: version
attributes:
label: docker version
description: Output of `docker version`
render: bash
placeholder: |
Client:
Version: 20.10.17
API version: 1.41
Go version: go1.17.11
Git commit: 100c70180fde3601def79a59cc3e996aa553c9b9
Built: Mon Jun 6 21:36:39 UTC 2022
OS/Arch: linux/amd64
Context: default
Experimental: true
Server:
Engine:
Version: 20.10.17
API version: 1.41 (minimum version 1.12)
Go version: go1.17.11
Git commit: a89b84221c8560e7a3dee2a653353429e7628424
Built: Mon Jun 6 22:32:38 2022
OS/Arch: linux/amd64
Experimental: true
containerd:
Version: 1.6.6
GitCommit: 10c12954828e7c7c9b6e0ea9b0c02b01407d3ae1
runc:
Version: 1.1.2
GitCommit: a916309fff0f838eb94e928713dbc3c0d0ac7aa4
docker-init:
Version: 0.19.0
GitCommit:
validations:
required: true
- type: textarea
id: info
attributes:
label: docker info
description: Output of `docker info`
render: bash
placeholder: |
Client:
Context: default
Debug Mode: false
Plugins:
buildx: Docker Buildx (Docker Inc., 0.8.2)
compose: Docker Compose (Docker Inc., 2.6.0)
Server:
Containers: 4
Running: 2
Paused: 0
Stopped: 2
Images: 80
Server Version: 20.10.17
Storage Driver: overlay2
Backing Filesystem: xfs
Supports d_type: true
Native Overlay Diff: false
userxattr: false
Logging Driver: local
Cgroup Driver: cgroupfs
Cgroup Version: 1
Plugins:
Volume: local
Network: bridge host ipvlan macvlan null overlay
Log: awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslog
Swarm: inactive
Runtimes: runc io.containerd.runc.v2 io.containerd.runtime.v1.linux
Default Runtime: runc
Init Binary: docker-init
containerd version: 10c12954828e7c7c9b6e0ea9b0c02b01407d3ae1
runc version: a916309fff0f838eb94e928713dbc3c0d0ac7aa4
init version:
Security Options:
apparmor
seccomp
Profile: default
Kernel Version: 5.13.0-1031-azure
Operating System: Ubuntu 20.04.4 LTS
OSType: linux
Architecture: x86_64
CPUs: 4
Total Memory: 15.63GiB
Name: dev
ID: UC44:2RFL:7NQ5:GGFW:34O5:DYRE:CLOH:VLGZ:64AZ:GFXC:PY6H:SAHY
Docker Root Dir: /var/lib/docker
Debug Mode: true
File Descriptors: 46
Goroutines: 134
System Time: 2022-07-06T18:07:54.812439392Z
EventsListeners: 0
Registry: https://index.docker.io/v1/
Labels:
Experimental: true
Insecure Registries:
127.0.0.0/8
Live Restore Enabled: true
validations:
required: true
- type: textarea
id: additional
attributes:
label: Additional Info
description: Additional info you want to provide such as logs, system info, environment, etc.
validations:
required: false

View File

@@ -1,8 +0,0 @@
blank_issues_enabled: false
contact_links:
- name: Security and Vulnerabilities
url: https://github.com/moby/moby/blob/master/SECURITY.md
about: Please report any security issues or vulnerabilities responsibly to the Docker security team. Please do not use the public issue tracker.
- name: Questions and Discussions
url: https://github.com/moby/moby/discussions/new
about: Use Github Discussions to ask questions and/or open discussion topics.

View File

@@ -1,13 +0,0 @@
name: Feature request
description: Missing functionality? Come tell us about it!
labels:
- kind/feature
- status/0-triage
body:
- type: textarea
id: description
attributes:
label: Description
description: What is the feature you want to see?
validations:
required: true

View File

@@ -19,18 +19,12 @@ Please provide the following information:
**- How to verify it**
**- Human readable description for the release notes**
**- Description for the changelog**
<!--
Write a short (one line) summary that describes the changes in this
pull request for inclusion in the changelog.
It must be placed inside the below triple backticks section.
NOTE: Only fill this section if changes introduced in this PR are user-facing.
The PR must have a relevant impact/ label.
pull request for inclusion in the changelog:
-->
```markdown changelog
```
**- A picture of a cute animal (not mandatory but encouraged)**

View File

@@ -13,7 +13,7 @@ runs:
shell: bash
- run: |
if [ ! -e /etc/docker/daemon.json ]; then
echo '{}' | sudo tee /etc/docker/daemon.json >/dev/null
echo '{}' | tee /etc/docker/daemon.json >/dev/null
fi
DOCKERD_CONFIG=$(jq '.+{"experimental":true,"live-restore":true,"ipv6":true,"fixed-cidr-v6":"2001:db8:1::/64"}' /etc/docker/daemon.json)
sudo tee /etc/docker/daemon.json <<<"$DOCKERD_CONFIG" >/dev/null

View File

@@ -1,14 +0,0 @@
name: 'Setup Tracing'
description: 'Composite action to set up the tracing for test jobs'
runs:
using: composite
steps:
- run: |
set -e
# Jaeger is set up on Windows through an inline run step. If you update Jaeger here, don't forget to update
# the version set in .github/workflows/.windows.yml.
docker run -d --net=host --name jaeger -e COLLECTOR_OTLP_ENABLED=true jaegertracing/all-in-one:1.46
docker0_ip="$(ip -f inet addr show docker0 | grep -Po 'inet \K[\d.]+')"
echo "OTEL_EXPORTER_OTLP_ENDPOINT=http://${docker0_ip}:4318" >> "${GITHUB_ENV}"
shell: bash

View File

@@ -3,41 +3,31 @@ name: .dco
# TODO: hide reusable workflow from the UI. Tracked in https://github.com/community/community/discussions/12025
# Default to 'contents: read', which grants actions to read commits.
#
# If any permission is set, any permission not included in the list is
# implicitly set to "none".
#
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
permissions:
contents: read
on:
workflow_call:
env:
ALPINE_VERSION: "3.21"
ALPINE_VERSION: 3.16
jobs:
run:
runs-on: ubuntu-24.04
timeout-minutes: 10 # guardrails timeout for the whole job
runs-on: ubuntu-20.04
steps:
-
name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v3
with:
fetch-depth: 0
-
name: Dump context
uses: actions/github-script@v7
uses: actions/github-script@v6
with:
script: |
console.log(JSON.stringify(context, null, 2));
-
name: Get base ref
id: base-ref
uses: actions/github-script@v7
uses: actions/github-script@v6
with:
result-encoding: string
script: |
@@ -49,12 +39,10 @@ jobs:
name: Validate
run: |
docker run --rm \
--quiet \
-v ./:/workspace \
-w /workspace \
-v "$(pwd):/workspace" \
-e VALIDATE_REPO \
-e VALIDATE_BRANCH \
alpine:${{ env.ALPINE_VERSION }} sh -c 'apk add --no-cache -q bash git openssh-client && git config --system --add safe.directory /workspace && hack/validate/dco'
alpine:${{ env.ALPINE_VERSION }} sh -c 'apk add --no-cache -q bash git openssh-client && git config --system --add safe.directory /workspace && cd /workspace && hack/validate/dco'
env:
VALIDATE_REPO: ${{ github.server_url }}/${{ github.repository }}.git
VALIDATE_BRANCH: ${{ steps.base-ref.outputs.result }}

View File

@@ -1,45 +0,0 @@
# reusable workflow
name: .test-prepare
# TODO: hide reusable workflow from the UI. Tracked in https://github.com/community/community/discussions/12025
# Default to 'contents: read', which grants actions to read commits.
#
# If any permission is set, any permission not included in the list is
# implicitly set to "none".
#
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
permissions:
contents: read
on:
workflow_call:
outputs:
matrix:
description: Test matrix
value: ${{ jobs.run.outputs.matrix }}
jobs:
run:
runs-on: ubuntu-24.04
timeout-minutes: 120 # guardrails timeout for the whole job
outputs:
matrix: ${{ steps.set.outputs.matrix }}
steps:
-
name: Checkout
uses: actions/checkout@v4
-
name: Create matrix
id: set
uses: actions/github-script@v7
with:
script: |
let matrix = ['graphdriver'];
if ("${{ contains(github.event.pull_request.labels.*.name, 'containerd-integration') || github.event_name != 'pull_request' }}" == "true") {
matrix.push('snapshotter');
}
await core.group(`Set matrix`, async () => {
core.info(`matrix: ${JSON.stringify(matrix)}`);
core.setOutput('matrix', JSON.stringify(matrix));
});

View File

@@ -1,123 +0,0 @@
# reusable workflow
name: .test-unit
# TODO: hide reusable workflow from the UI. Tracked in https://github.com/community/community/discussions/12025
# Default to 'contents: read', which grants actions to read commits.
#
# If any permission is set, any permission not included in the list is
# implicitly set to "none".
#
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
permissions:
contents: read
on:
workflow_call:
env:
GO_VERSION: "1.24.3"
GOTESTLIST_VERSION: v0.3.1
TESTSTAT_VERSION: v0.1.25
SETUP_BUILDX_VERSION: edge
SETUP_BUILDKIT_IMAGE: moby/buildkit:latest
jobs:
unit:
runs-on: ubuntu-24.04
timeout-minutes: 120 # guardrails timeout for the whole job
continue-on-error: ${{ github.event_name != 'pull_request' }}
strategy:
fail-fast: false
matrix:
mode:
- ""
- firewalld
steps:
-
name: Checkout
uses: actions/checkout@v4
-
name: Set up runner
uses: ./.github/actions/setup-runner
-
name: Prepare
run: |
CACHE_DEV_SCOPE=dev
if [[ "${{ matrix.mode }}" == *"firewalld"* ]]; then
echo "FIREWALLD=true" >> $GITHUB_ENV
CACHE_DEV_SCOPE="${CACHE_DEV_SCOPE}firewalld"
fi
echo "CACHE_DEV_SCOPE=${CACHE_DEV_SCOPE}" >> $GITHUB_ENV
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
version: ${{ env.SETUP_BUILDX_VERSION }}
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
buildkitd-flags: --debug
-
name: Build dev image
uses: docker/bake-action@v6
with:
targets: dev
set: |
dev.cache-from=type=gha,scope=${{ env.CACHE_DEV_SCOPE }}
-
name: Test
run: |
make -o build test-unit
-
name: Prepare reports
if: always()
run: |
mkdir -p bundles /tmp/reports
find bundles -type f \( -name '*-report.json' -o -name '*.log' -o -name '*.out' -o -name '*.prof' -o -name '*-report.xml' \) -print | xargs sudo tar -czf /tmp/reports.tar.gz
tar -xzf /tmp/reports.tar.gz -C /tmp/reports
sudo chown -R $(id -u):$(id -g) /tmp/reports
tree -nh /tmp/reports
-
name: Send to Codecov
uses: codecov/codecov-action@v4
with:
directory: ./bundles
env_vars: RUNNER_OS
flags: unit
token: ${{ secrets.CODECOV_TOKEN }} # used to upload coverage reports: https://github.com/moby/buildkit/pull/4660#issue-2142122533
-
name: Upload reports
if: always()
uses: actions/upload-artifact@v4
with:
name: test-reports-unit--${{ matrix.mode }}
path: /tmp/reports/*
retention-days: 1
unit-report:
runs-on: ubuntu-24.04
timeout-minutes: 10
continue-on-error: ${{ github.event_name != 'pull_request' }}
if: always()
needs:
- unit
steps:
-
name: Set up Go
uses: actions/setup-go@v5
with:
go-version: ${{ env.GO_VERSION }}
cache-dependency-path: vendor.sum
-
name: Download reports
uses: actions/download-artifact@v4
with:
pattern: test-reports-unit-*
path: /tmp/reports
-
name: Install teststat
run: |
go install github.com/vearutop/teststat@${{ env.TESTSTAT_VERSION }}
-
name: Create summary
run: |
find /tmp/reports -type f -name '*-go-test-report.json' -exec teststat -markdown {} \+ >> $GITHUB_STEP_SUMMARY

View File

@@ -1,472 +0,0 @@
# reusable workflow
name: .test
# TODO: hide reusable workflow from the UI. Tracked in https://github.com/community/community/discussions/12025
# Default to 'contents: read', which grants actions to read commits.
#
# If any permission is set, any permission not included in the list is
# implicitly set to "none".
#
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
permissions:
contents: read
on:
workflow_call:
inputs:
storage:
required: true
type: string
default: "graphdriver"
env:
GO_VERSION: "1.24.3"
GOTESTLIST_VERSION: v0.3.1
TESTSTAT_VERSION: v0.1.25
ITG_CLI_MATRIX_SIZE: 6
DOCKER_EXPERIMENTAL: 1
DOCKER_GRAPHDRIVER: ${{ inputs.storage == 'snapshotter' && 'overlayfs' || 'overlay2' }}
TEST_INTEGRATION_USE_SNAPSHOTTER: ${{ inputs.storage == 'snapshotter' && '1' || '' }}
SETUP_BUILDX_VERSION: edge
SETUP_BUILDKIT_IMAGE: moby/buildkit:latest
jobs:
docker-py:
runs-on: ubuntu-24.04
timeout-minutes: 120 # guardrails timeout for the whole job
continue-on-error: ${{ github.event_name != 'pull_request' }}
steps:
-
name: Checkout
uses: actions/checkout@v4
-
name: Set up runner
uses: ./.github/actions/setup-runner
-
name: Set up tracing
uses: ./.github/actions/setup-tracing
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
version: ${{ env.SETUP_BUILDX_VERSION }}
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
buildkitd-flags: --debug
-
name: Build dev image
uses: docker/bake-action@v6
with:
targets: dev
set: |
dev.cache-from=type=gha,scope=dev
-
name: Test
run: |
make TEST_SKIP_INTEGRATION_CLI=1 -o build test-docker-py
-
name: Prepare reports
if: always()
run: |
mkdir -p bundles /tmp/reports
find bundles -path '*/root/*overlay2' -prune -o -type f \( -name '*-report.json' -o -name '*.log' -o -name '*.out' -o -name '*.prof' -o -name '*-report.xml' \) -print | xargs sudo tar -czf /tmp/reports.tar.gz
tar -xzf /tmp/reports.tar.gz -C /tmp/reports
sudo chown -R $(id -u):$(id -g) /tmp/reports
tree -nh /tmp/reports
curl -sSLf localhost:16686/api/traces?service=integration-test-client > /tmp/reports/jaeger-trace.json
-
name: Test daemon logs
if: always()
run: |
cat bundles/test-docker-py/docker.log
-
name: Upload reports
if: always()
uses: actions/upload-artifact@v4
with:
name: test-reports-docker-py-${{ inputs.storage }}
path: /tmp/reports/*
retention-days: 1
integration-flaky:
runs-on: ubuntu-24.04
timeout-minutes: 120 # guardrails timeout for the whole job
continue-on-error: ${{ github.event_name != 'pull_request' }}
steps:
-
name: Checkout
uses: actions/checkout@v4
-
name: Set up runner
uses: ./.github/actions/setup-runner
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
version: ${{ env.SETUP_BUILDX_VERSION }}
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
buildkitd-flags: --debug
-
name: Build dev image
uses: docker/bake-action@v6
with:
targets: dev
set: |
dev.cache-from=type=gha,scope=dev
-
name: Test
run: |
make -o build test-integration-flaky
env:
TEST_SKIP_INTEGRATION_CLI: 1
integration-prepare:
runs-on: ubuntu-24.04
timeout-minutes: 10 # guardrails timeout for the whole job
continue-on-error: ${{ github.event_name != 'pull_request' }}
outputs:
includes: ${{ steps.set.outputs.includes }}
steps:
-
name: Create matrix includes
id: set
uses: actions/github-script@v7
with:
script: |
let includes = [
{ os: 'ubuntu-22.04', mode: '' },
{ os: 'ubuntu-22.04', mode: 'rootless' },
{ os: 'ubuntu-22.04', mode: 'systemd' },
{ os: 'ubuntu-24.04', mode: '' },
// { os: 'ubuntu-24.04', mode: 'rootless' }, // FIXME: https://github.com/moby/moby/pull/49579#issuecomment-2698622223
{ os: 'ubuntu-24.04', mode: 'systemd' },
// { os: 'ubuntu-24.04', mode: 'rootless-systemd' }, // FIXME: https://github.com/moby/moby/issues/44084
];
if ("${{ inputs.storage }}" == "snapshotter") {
includes.push({ os: 'ubuntu-24.04', mode: 'firewalld' });
}
await core.group(`Set matrix`, async () => {
core.info(`matrix: ${JSON.stringify(includes)}`);
core.setOutput('includes', JSON.stringify(includes));
});
-
name: Show matrix
run: |
echo ${{ steps.set.outputs.includes }}
integration:
runs-on: ${{ matrix.os }}
timeout-minutes: 120 # guardrails timeout for the whole job
continue-on-error: ${{ github.event_name != 'pull_request' }}
needs:
- integration-prepare
strategy:
fail-fast: false
matrix:
include: ${{ fromJson(needs.integration-prepare.outputs.includes) }}
steps:
-
name: Checkout
uses: actions/checkout@v4
-
name: Set up runner
uses: ./.github/actions/setup-runner
-
name: Set up tracing
uses: ./.github/actions/setup-tracing
-
name: Prepare
run: |
CACHE_DEV_SCOPE=dev
if [[ "${{ matrix.mode }}" == *"rootless"* ]]; then
echo "DOCKER_ROOTLESS=1" >> $GITHUB_ENV
fi
if [[ "${{ matrix.mode }}" == *"systemd"* ]]; then
echo "SYSTEMD=true" >> $GITHUB_ENV
CACHE_DEV_SCOPE="${CACHE_DEV_SCOPE}systemd"
fi
if [[ "${{ matrix.mode }}" == *"firewalld"* ]]; then
echo "FIREWALLD=true" >> $GITHUB_ENV
CACHE_DEV_SCOPE="${CACHE_DEV_SCOPE}firewalld"
fi
echo "CACHE_DEV_SCOPE=${CACHE_DEV_SCOPE}" >> $GITHUB_ENV
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
version: ${{ env.SETUP_BUILDX_VERSION }}
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
buildkitd-flags: --debug
-
name: Build dev image
uses: docker/bake-action@v6
with:
targets: dev
set: |
dev.cache-from=type=gha,scope=${{ env.CACHE_DEV_SCOPE }}
-
name: Test
run: |
make -o build test-integration
env:
TEST_SKIP_INTEGRATION_CLI: 1
TESTCOVERAGE: 1
-
name: Prepare reports
if: always()
run: |
reportsName=${{ matrix.os }}
if [ -n "${{ matrix.mode }}" ]; then
reportsName="$reportsName-${{ matrix.mode }}"
fi
reportsPath="/tmp/reports/$reportsName"
echo "TESTREPORTS_NAME=$reportsName" >> $GITHUB_ENV
mkdir -p bundles $reportsPath
find bundles -path '*/root/*overlay2' -prune -o -type f \( -name '*-report.json' -o -name '*.log' -o -name '*.out' -o -name '*.prof' -o -name '*-report.xml' \) -print | xargs sudo tar -czf /tmp/reports.tar.gz
tar -xzf /tmp/reports.tar.gz -C $reportsPath
sudo chown -R $(id -u):$(id -g) $reportsPath
tree -nh $reportsPath
curl -sSLf localhost:16686/api/traces?service=integration-test-client > $reportsPath/jaeger-trace.json
-
name: Send to Codecov
uses: codecov/codecov-action@v4
with:
directory: ./bundles/test-integration
env_vars: RUNNER_OS
flags: integration,${{ matrix.mode }}
token: ${{ secrets.CODECOV_TOKEN }} # used to upload coverage reports: https://github.com/moby/buildkit/pull/4660#issue-2142122533
-
name: Test daemon logs
if: always()
run: |
cat bundles/test-integration/docker.log
-
name: Upload reports
if: always()
uses: actions/upload-artifact@v4
with:
name: test-reports-integration-${{ inputs.storage }}-${{ env.TESTREPORTS_NAME }}
path: /tmp/reports/*
retention-days: 1
integration-report:
runs-on: ubuntu-24.04
timeout-minutes: 10
continue-on-error: ${{ github.event_name != 'pull_request' }}
if: always()
needs:
- integration
steps:
-
name: Set up Go
uses: actions/setup-go@v5
with:
go-version: ${{ env.GO_VERSION }}
cache-dependency-path: vendor.sum
-
name: Download reports
uses: actions/download-artifact@v4
with:
path: /tmp/reports
pattern: test-reports-integration-${{ inputs.storage }}-*
merge-multiple: true
-
name: Install teststat
run: |
go install github.com/vearutop/teststat@${{ env.TESTSTAT_VERSION }}
-
name: Create summary
run: |
find /tmp/reports -type f -name '*-go-test-report.json' -exec teststat -markdown {} \+ >> $GITHUB_STEP_SUMMARY
integration-cli-prepare:
runs-on: ubuntu-24.04
timeout-minutes: 120 # guardrails timeout for the whole job
continue-on-error: ${{ github.event_name != 'pull_request' }}
outputs:
matrix: ${{ steps.set.outputs.matrix }}
steps:
-
name: Checkout
uses: actions/checkout@v4
-
name: Set up Go
uses: actions/setup-go@v5
with:
go-version: ${{ env.GO_VERSION }}
cache-dependency-path: vendor.sum
-
name: Install gotestlist
run:
go install github.com/crazy-max/gotestlist/cmd/gotestlist@${{ env.GOTESTLIST_VERSION }}
-
name: Create test matrix
id: tests
working-directory: ./integration-cli
run: |
# This step creates a matrix for integration-cli tests. Tests suites
# are distributed in integration-cli job through a matrix. There is
# also overrides being added to the matrix like "./..." to run
# "Test integration" step exclusively and specific tests suites that
# take a long time to run.
matrix="$(gotestlist -d ${{ env.ITG_CLI_MATRIX_SIZE }} -o "./..." -o "DockerSwarmSuite" -o "DockerNetworkSuite|DockerExternalVolumeSuite" ./...)"
echo "matrix=$matrix" >> $GITHUB_OUTPUT
-
name: Create gha matrix
id: set
uses: actions/github-script@v7
with:
script: |
let matrix = {
test: ${{ steps.tests.outputs.matrix }},
include: [],
};
// For some reasons, GHA doesn't combine a dynamically defined
// 'include' with other matrix variables that aren't part of the
// include items.
// Moreover, since the goal is to run only relevant tests with
// firewalld enabled to minimize the number of CI jobs, we
// statically define the list of test suites that we want to run.
if ("${{ inputs.storage }}" == "snapshotter") {
matrix.include.push({
'mode': 'firewalld',
'test': 'DockerCLINetworkSuite|DockerCLIPortSuite|DockerDaemonSuite'
});
matrix.include.push({
'mode': 'firewalld',
'test': 'DockerSwarmSuite'
});
matrix.include.push({
'mode': 'firewalld',
'test': 'DockerNetworkSuite'
});
}
await core.group(`Set matrix`, async () => {
core.info(`matrix: ${JSON.stringify(matrix)}`);
core.setOutput('matrix', JSON.stringify(matrix));
});
-
name: Show final gha matrix
run: |
echo ${{ steps.set.outputs.matrix }}
integration-cli:
runs-on: ubuntu-24.04
timeout-minutes: 120 # guardrails timeout for the whole job
continue-on-error: ${{ github.event_name != 'pull_request' }}
needs:
- integration-cli-prepare
strategy:
fail-fast: false
matrix: ${{ fromJson(needs.integration-cli-prepare.outputs.matrix) }}
steps:
-
name: Checkout
uses: actions/checkout@v4
-
name: Set up runner
uses: ./.github/actions/setup-runner
-
name: Set up tracing
uses: ./.github/actions/setup-tracing
-
name: Prepare
run: |
CACHE_DEV_SCOPE=dev
if [[ "${{ matrix.mode }}" == *"firewalld"* ]]; then
echo "FIREWALLD=true" >> $GITHUB_ENV
CACHE_DEV_SCOPE="${CACHE_DEV_SCOPE}firewalld"
fi
echo "CACHE_DEV_SCOPE=${CACHE_DEV_SCOPE}" >> $GITHUB_ENV
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
version: ${{ env.SETUP_BUILDX_VERSION }}
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
buildkitd-flags: --debug
-
name: Build dev image
uses: docker/bake-action@v6
with:
targets: dev
set: |
dev.cache-from=type=gha,scope=dev
-
name: Test
run: |
make -o build test-integration
env:
TEST_SKIP_INTEGRATION: 1
TESTCOVERAGE: 1
TESTFLAGS: "-test.run (${{ matrix.test }})/"
-
name: Prepare reports
if: always()
run: |
reportsName=$(echo -n "${{ matrix.test }}" | sha256sum | cut -d " " -f 1)
reportsPath=/tmp/reports/$reportsName
echo "TESTREPORTS_NAME=$reportsName" >> $GITHUB_ENV
mkdir -p bundles $reportsPath
echo "${{ matrix.test }}" | tr -s '|' '\n' | tee -a "$reportsPath/tests.txt"
find bundles -path '*/root/*overlay2' -prune -o -type f \( -name '*-report.json' -o -name '*.log' -o -name '*.out' -o -name '*.prof' -o -name '*-report.xml' \) -print | xargs sudo tar -czf /tmp/reports.tar.gz
tar -xzf /tmp/reports.tar.gz -C $reportsPath
sudo chown -R $(id -u):$(id -g) $reportsPath
tree -nh $reportsPath
curl -sSLf localhost:16686/api/traces?service=integration-test-client > $reportsPath/jaeger-trace.json
-
name: Send to Codecov
uses: codecov/codecov-action@v4
with:
directory: ./bundles/test-integration
env_vars: RUNNER_OS
flags: integration-cli
token: ${{ secrets.CODECOV_TOKEN }} # used to upload coverage reports: https://github.com/moby/buildkit/pull/4660#issue-2142122533
-
name: Test daemon logs
if: always()
run: |
cat bundles/test-integration/docker.log
-
name: Upload reports
if: always()
uses: actions/upload-artifact@v4
with:
name: test-reports-integration-cli-${{ inputs.storage }}-${{ env.TESTREPORTS_NAME }}
path: /tmp/reports/*
retention-days: 1
integration-cli-report:
runs-on: ubuntu-24.04
timeout-minutes: 10
continue-on-error: ${{ github.event_name != 'pull_request' }}
if: always()
needs:
- integration-cli
steps:
-
name: Set up Go
uses: actions/setup-go@v5
with:
go-version: ${{ env.GO_VERSION }}
cache-dependency-path: vendor.sum
-
name: Download reports
uses: actions/download-artifact@v4
with:
path: /tmp/reports
pattern: test-reports-integration-cli-${{ inputs.storage }}-*
merge-multiple: true
-
name: Install teststat
run: |
go install github.com/vearutop/teststat@${{ env.TESTSTAT_VERSION }}
-
name: Create summary
run: |
find /tmp/reports -type f -name '*-go-test-report.json' -exec teststat -markdown {} \+ >> $GITHUB_STEP_SUMMARY

View File

@@ -3,34 +3,21 @@ name: .windows
# TODO: hide reusable workflow from the UI. Tracked in https://github.com/community/community/discussions/12025
# Default to 'contents: read', which grants actions to read commits.
#
# If any permission is set, any permission not included in the list is
# implicitly set to "none".
#
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
permissions:
contents: read
on:
workflow_call:
inputs:
os:
required: true
type: string
storage:
required: true
type: string
default: "graphdriver"
send_coverage:
required: false
type: boolean
default: false
env:
GO_VERSION: "1.24.3"
GOTESTLIST_VERSION: v0.3.1
TESTSTAT_VERSION: v0.1.25
GO_VERSION: 1.19.3
GOTESTLIST_VERSION: v0.2.0
TESTSTAT_VERSION: v0.1.3
WINDOWS_BASE_IMAGE: mcr.microsoft.com/windows/servercore
WINDOWS_BASE_TAG_2019: ltsc2019
WINDOWS_BASE_TAG_2022: ltsc2022
@@ -42,7 +29,6 @@ env:
jobs:
build:
runs-on: ${{ inputs.os }}
timeout-minutes: 120 # guardrails timeout for the whole job
env:
GOPATH: ${{ github.workspace }}\go
GOBIN: ${{ github.workspace }}\go\bin
@@ -53,7 +39,7 @@ jobs:
steps:
-
name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v3
with:
path: ${{ env.GOPATH }}/src/github.com/docker/docker
-
@@ -72,7 +58,7 @@ jobs:
}
-
name: Cache
uses: actions/cache@v4
uses: actions/cache@v3
with:
path: |
~\AppData\Local\go-build
@@ -89,12 +75,9 @@ jobs:
-
name: Build base image
run: |
& docker build `
--build-arg WINDOWS_BASE_IMAGE `
--build-arg WINDOWS_BASE_IMAGE_TAG `
--build-arg GO_VERSION `
-t ${{ env.TEST_IMAGE_NAME }} `
-f Dockerfile.windows .
docker pull ${{ env.WINDOWS_BASE_IMAGE }}:${{ env.WINDOWS_BASE_IMAGE_TAG }}
docker tag ${{ env.WINDOWS_BASE_IMAGE }}:${{ env.WINDOWS_BASE_IMAGE_TAG }} microsoft/windowsservercore
docker build --build-arg GO_VERSION -t ${{ env.TEST_IMAGE_NAME }} -f Dockerfile.windows .
-
name: Build binaries
run: |
@@ -113,16 +96,16 @@ jobs:
docker cp "${{ env.TEST_CTN_NAME }}`:c`:\containerd\bin\containerd-shim-runhcs-v1.exe" ${{ env.BIN_OUT }}\
-
name: Upload artifacts
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v3
with:
name: build-${{ inputs.storage }}-${{ inputs.os }}
name: build-${{ inputs.os }}
path: ${{ env.BIN_OUT }}/*
if-no-files-found: error
retention-days: 2
unit-test:
runs-on: ${{ inputs.os }}
timeout-minutes: 120 # guardrails timeout for the whole job
timeout-minutes: 120
env:
GOPATH: ${{ github.workspace }}\go
GOBIN: ${{ github.workspace }}\go\bin
@@ -132,7 +115,7 @@ jobs:
steps:
-
name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v3
with:
path: ${{ env.GOPATH }}/src/github.com/docker/docker
-
@@ -152,7 +135,7 @@ jobs:
}
-
name: Cache
uses: actions/cache@v4
uses: actions/cache@v3
with:
path: |
~\AppData\Local\go-build
@@ -169,12 +152,9 @@ jobs:
-
name: Build base image
run: |
& docker build `
--build-arg WINDOWS_BASE_IMAGE `
--build-arg WINDOWS_BASE_IMAGE_TAG `
--build-arg GO_VERSION `
-t ${{ env.TEST_IMAGE_NAME }} `
-f Dockerfile.windows .
docker pull ${{ env.WINDOWS_BASE_IMAGE }}:${{ env.WINDOWS_BASE_IMAGE_TAG }}
docker tag ${{ env.WINDOWS_BASE_IMAGE }}:${{ env.WINDOWS_BASE_IMAGE_TAG }} microsoft/windowsservercore
docker build --build-arg GO_VERSION -t ${{ env.TEST_IMAGE_NAME }} -f Dockerfile.windows .
-
name: Test
run: |
@@ -186,40 +166,36 @@ jobs:
-
name: Send to Codecov
if: inputs.send_coverage
uses: codecov/codecov-action@v4
uses: codecov/codecov-action@v3
with:
working-directory: ${{ env.GOPATH }}\src\github.com\docker\docker
directory: bundles
env_vars: RUNNER_OS
flags: unit
token: ${{ secrets.CODECOV_TOKEN }} # used to upload coverage reports: https://github.com/moby/buildkit/pull/4660#issue-2142122533
-
name: Upload reports
if: always()
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v3
with:
name: ${{ inputs.os }}-${{ inputs.storage }}-unit-reports
name: ${{ inputs.os }}-unit-reports
path: ${{ env.GOPATH }}\src\github.com\docker\docker\bundles\*
retention-days: 1
unit-test-report:
runs-on: ubuntu-24.04
timeout-minutes: 120 # guardrails timeout for the whole job
runs-on: ubuntu-latest
if: always()
needs:
- unit-test
steps:
-
name: Set up Go
uses: actions/setup-go@v5
uses: actions/setup-go@v3
with:
go-version: ${{ env.GO_VERSION }}
cache-dependency-path: vendor.sum
-
name: Download artifacts
uses: actions/download-artifact@v4
uses: actions/download-artifact@v3
with:
name: ${{ inputs.os }}-${{ inputs.storage }}-unit-reports
name: ${{ inputs.os }}-unit-reports
path: /tmp/artifacts
-
name: Install teststat
@@ -228,23 +204,21 @@ jobs:
-
name: Create summary
run: |
find /tmp/artifacts -type f -name '*-go-test-report.json' -exec teststat -markdown {} \+ >> $GITHUB_STEP_SUMMARY
teststat -markdown $(find /tmp/artifacts -type f -name '*.json' -print0 | xargs -0) >> $GITHUB_STEP_SUMMARY
integration-test-prepare:
runs-on: ubuntu-24.04
timeout-minutes: 120 # guardrails timeout for the whole job
runs-on: ubuntu-latest
outputs:
matrix: ${{ steps.tests.outputs.matrix }}
steps:
-
name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v3
-
name: Set up Go
uses: actions/setup-go@v5
uses: actions/setup-go@v3
with:
go-version: ${{ env.GO_VERSION }}
cache-dependency-path: vendor.sum
-
name: Install gotestlist
run:
@@ -254,11 +228,10 @@ jobs:
id: tests
working-directory: ./integration-cli
run: |
# This step creates a matrix for integration-cli tests. Tests suites
# are distributed in integration-test job through a matrix. There is
# also an override being added to the matrix like "./..." to run
# "Test integration" step exclusively.
matrix="$(gotestlist -d ${{ env.ITG_CLI_MATRIX_SIZE }} -o "./..." ./...)"
# Distribute integration-cli tests for the matrix in integration-test job.
# Also prepend ./... to the matrix. This is a special case to run "Test integration" step exclusively.
matrix="$(gotestlist -d ${{ env.ITG_CLI_MATRIX_SIZE }} ./...)"
matrix="$(echo "$matrix" | jq -c '. |= ["./..."] + .')"
echo "matrix=$matrix" >> $GITHUB_OUTPUT
-
name: Show matrix
@@ -267,23 +240,17 @@ jobs:
integration-test:
runs-on: ${{ inputs.os }}
timeout-minutes: 120 # guardrails timeout for the whole job
continue-on-error: ${{ inputs.storage == 'snapshotter' && github.event_name != 'pull_request' }}
timeout-minutes: 120
needs:
- build
- integration-test-prepare
strategy:
fail-fast: false
matrix:
storage:
- ${{ inputs.storage }}
runtime:
- builtin
- containerd
test: ${{ fromJson(needs.integration-test-prepare.outputs.matrix) }}
exclude:
- storage: snapshotter
runtime: builtin
env:
GOPATH: ${{ github.workspace }}\go
GOBIN: ${{ github.workspace }}\go\bin
@@ -294,28 +261,18 @@ jobs:
steps:
-
name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v3
with:
path: ${{ env.GOPATH }}/src/github.com/docker/docker
-
name: Set up Jaeger
run: |
# Jaeger is set up on Linux through the setup-tracing action. If you update Jaeger here, don't forget to
# update the version set in .github/actions/setup-tracing/action.yml.
Invoke-WebRequest -Uri "https://github.com/jaegertracing/jaeger/releases/download/v1.46.0/jaeger-1.46.0-windows-amd64.tar.gz" -OutFile ".\jaeger-1.46.0-windows-amd64.tar.gz"
tar -zxvf ".\jaeger-1.46.0-windows-amd64.tar.gz"
Start-Process '.\jaeger-1.46.0-windows-amd64\jaeger-all-in-one.exe'
echo "OTEL_EXPORTER_OTLP_ENDPOINT=http://127.0.0.1:4318" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf-8 -Append
shell: pwsh
-
name: Env
run: |
Get-ChildItem Env: | Out-String
-
name: Download artifacts
uses: actions/download-artifact@v4
uses: actions/download-artifact@v3
with:
name: build-${{ inputs.storage }}-${{ inputs.os }}
name: build-${{ inputs.os }}
path: ${{ env.BIN_OUT }}
-
name: Init
@@ -327,9 +284,6 @@ jobs:
echo "WINDOWS_BASE_IMAGE_TAG=${{ env.WINDOWS_BASE_TAG_2022 }}" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf-8 -Append
}
Write-Output "${{ env.BIN_OUT }}" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
$testName = ([System.BitConverter]::ToString((New-Object System.Security.Cryptography.SHA256Managed).ComputeHash([System.Text.Encoding]::UTF8.GetBytes("${{ matrix.test }}"))) -replace '-').ToLower()
echo "TESTREPORTS_NAME=$testName" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf-8 -Append
-
# removes docker service that is currently installed on the runner. we
# could use Uninstall-Package but not yet available on Windows runners.
@@ -349,12 +303,33 @@ jobs:
$ErrorActionPreference = "Stop"
Write-Host "Service removed"
}
-
name: Starting containerd
if: matrix.runtime == 'containerd'
run: |
Write-Host "Generating config"
& "${{ env.BIN_OUT }}\containerd.exe" config default | Out-File "$env:TEMP\ctn.toml" -Encoding ascii
Write-Host "Creating service"
New-Item -ItemType Directory "$env:TEMP\ctn-root" -ErrorAction SilentlyContinue | Out-Null
New-Item -ItemType Directory "$env:TEMP\ctn-state" -ErrorAction SilentlyContinue | Out-Null
Start-Process -Wait "${{ env.BIN_OUT }}\containerd.exe" `
-ArgumentList "--log-level=debug", `
"--config=$env:TEMP\ctn.toml", `
"--address=\\.\pipe\containerd-containerd", `
"--root=$env:TEMP\ctn-root", `
"--state=$env:TEMP\ctn-state", `
"--log-file=$env:TEMP\ctn.log", `
"--register-service"
Write-Host "Starting service"
Start-Service -Name containerd
Start-Sleep -Seconds 5
Write-Host "Service started successfully!"
-
name: Starting test daemon
run: |
Write-Host "Creating service"
If ("${{ matrix.runtime }}" -eq "containerd") {
$runtimeArg="--default-runtime=io.containerd.runhcs.v1"
$runtimeArg="--containerd=\\.\pipe\containerd-containerd"
echo "DOCKER_WINDOWS_CONTAINERD_RUNTIME=1" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf-8 -Append
}
New-Item -ItemType Directory "$env:TEMP\moby-root" -ErrorAction SilentlyContinue | Out-Null
@@ -366,11 +341,6 @@ jobs:
"--exec-root=$env:TEMP\moby-exec", `
"--pidfile=$env:TEMP\docker.pid", `
"--register-service"
If ("${{ inputs.storage }}" -eq "snapshotter") {
# Make the env-var visible to the service-managed dockerd, as there's no CLI flag for this option.
& reg add "HKLM\SYSTEM\CurrentControlSet\Services\docker" /v Environment /t REG_MULTI_SZ /s '@' /d TEST_INTEGRATION_USE_SNAPSHOTTER=1
echo "TEST_INTEGRATION_USE_SNAPSHOTTER=1" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf-8 -Append
}
Write-Host "Starting service"
Start-Service -Name docker
Write-Host "Service started successfully!"
@@ -394,17 +364,6 @@ jobs:
Start-Sleep -Seconds 1
}
Write-Host "Test daemon started and replied!"
If ("${{ matrix.runtime }}" -eq "containerd") {
$containerdProcesses = Get-Process -Name containerd -ErrorAction:SilentlyContinue
If (-not $containerdProcesses) {
Throw "containerd process is not running"
} else {
foreach ($process in $containerdProcesses) {
$processPath = (Get-Process -Id $process.Id -FileVersionInfo).FileName
Write-Output "Running containerd instance binary Path: $($processPath)"
}
}
}
env:
DOCKER_HOST: npipe:////./pipe/docker_engine
-
@@ -430,10 +389,9 @@ jobs:
DOCKER_HOST: npipe:////./pipe/docker_engine
-
name: Set up Go
uses: actions/setup-go@v5
uses: actions/setup-go@v3
with:
go-version: ${{ env.GO_VERSION }}
cache-dependency-path: vendor.sum
-
name: Test integration
if: matrix.test == './...'
@@ -456,19 +414,31 @@ jobs:
-
name: Send to Codecov
if: inputs.send_coverage
uses: codecov/codecov-action@v4
uses: codecov/codecov-action@v3
with:
working-directory: ${{ env.GOPATH }}\src\github.com\docker\docker
directory: bundles
env_vars: RUNNER_OS
flags: integration,${{ matrix.runtime }}
token: ${{ secrets.CODECOV_TOKEN }} # used to upload coverage reports: https://github.com/moby/buildkit/pull/4660#issue-2142122533
-
name: Docker info
run: |
& "${{ env.BIN_OUT }}\docker" info
env:
DOCKER_HOST: npipe:////./pipe/docker_engine
-
name: Stop containerd
if: always() && matrix.runtime == 'containerd'
run: |
$ErrorActionPreference = "SilentlyContinue"
Stop-Service -Force -Name containerd
$ErrorActionPreference = "Stop"
-
name: Containerd logs
if: always() && matrix.runtime == 'containerd'
run: |
Copy-Item "$env:TEMP\ctn.log" -Destination ".\bundles\containerd.log"
Get-Content "$env:TEMP\ctn.log" | Out-Host
-
name: Stop daemon
if: always()
@@ -484,57 +454,40 @@ jobs:
run: |
Get-WinEvent -ea SilentlyContinue `
-FilterHashtable @{ProviderName= "docker"; LogName = "application"} |
Select-Object -Property TimeCreated, @{N='Detailed Message'; E={$_.Message}} |
Sort-Object @{Expression="TimeCreated";Descending=$false} |
ForEach-Object {"$($_.TimeCreated.ToUniversalTime().ToString("o")) [$($_.LevelDisplayName)] $($_.Message)"} |
Tee-Object -file ".\bundles\daemon.log"
-
name: Download Jaeger traces
if: always()
run: |
Invoke-WebRequest `
-Uri "http://127.0.0.1:16686/api/traces?service=integration-test-client" `
-OutFile ".\bundles\jaeger-trace.json"
Select-Object -ExpandProperty 'Detailed Message' | Tee-Object -file ".\bundles\daemon.log"
-
name: Upload reports
if: always()
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v3
with:
name: ${{ inputs.os }}-${{ inputs.storage }}-integration-reports-${{ matrix.runtime }}-${{ env.TESTREPORTS_NAME }}
name: ${{ inputs.os }}-integration-reports-${{ matrix.runtime }}
path: ${{ env.GOPATH }}\src\github.com\docker\docker\bundles\*
retention-days: 1
integration-test-report:
runs-on: ubuntu-24.04
timeout-minutes: 120 # guardrails timeout for the whole job
continue-on-error: ${{ inputs.storage == 'snapshotter' && github.event_name != 'pull_request' }}
runs-on: ubuntu-latest
if: always()
needs:
- integration-test
strategy:
fail-fast: false
matrix:
storage:
- ${{ inputs.storage }}
runtime:
- builtin
- containerd
exclude:
- storage: snapshotter
runtime: builtin
steps:
-
name: Set up Go
uses: actions/setup-go@v5
uses: actions/setup-go@v3
with:
go-version: ${{ env.GO_VERSION }}
cache-dependency-path: vendor.sum
-
name: Download reports
uses: actions/download-artifact@v4
name: Download artifacts
uses: actions/download-artifact@v3
with:
path: /tmp/reports
pattern: ${{ inputs.os }}-${{ inputs.storage }}-integration-reports-${{ matrix.runtime }}-*
merge-multiple: true
name: ${{ inputs.os }}-integration-reports-${{ matrix.runtime }}
path: /tmp/artifacts
-
name: Install teststat
run: |
@@ -542,4 +495,4 @@ jobs:
-
name: Create summary
run: |
find /tmp/reports -type f -name '*-go-test-report.json' -exec teststat -markdown {} \+ >> $GITHUB_STEP_SUMMARY
teststat -markdown $(find /tmp/artifacts -type f -name '*.json' -print0 | xargs -0) >> $GITHUB_STEP_SUMMARY

View File

@@ -1,276 +0,0 @@
name: arm64
# Default to 'contents: read', which grants actions to read commits.
#
# If any permission is set, any permission not included in the list is
# implicitly set to "none".
#
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
permissions:
contents: read
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
on:
workflow_dispatch:
push:
branches:
- 'master'
- '[0-9]+.[0-9]+'
- '[0-9]+.x'
pull_request:
env:
GO_VERSION: "1.24.3"
TESTSTAT_VERSION: v0.1.25
DESTDIR: ./build
SETUP_BUILDX_VERSION: edge
SETUP_BUILDKIT_IMAGE: moby/buildkit:latest
DOCKER_EXPERIMENTAL: 1
jobs:
validate-dco:
uses: ./.github/workflows/.dco.yml
build:
runs-on: ubuntu-24.04-arm
timeout-minutes: 20 # guardrails timeout for the whole job
needs:
- validate-dco
strategy:
fail-fast: false
matrix:
target:
- binary
- dynbinary
steps:
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
version: ${{ env.SETUP_BUILDX_VERSION }}
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
buildkitd-flags: --debug
-
name: Build
uses: docker/bake-action@v6
with:
targets: ${{ matrix.target }}
-
name: List artifacts
run: |
tree -nh ${{ env.DESTDIR }}
-
name: Check artifacts
run: |
find ${{ env.DESTDIR }} -type f -exec file -e ascii -- {} +
build-dev:
runs-on: ubuntu-24.04-arm
timeout-minutes: 120 # guardrails timeout for the whole job
needs:
- validate-dco
steps:
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
version: ${{ env.SETUP_BUILDX_VERSION }}
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
buildkitd-flags: --debug
-
name: Build dev image
uses: docker/bake-action@v6
with:
targets: dev
set: |
*.cache-from=type=gha,scope=dev-arm64
*.cache-to=type=gha,scope=dev-arm64,mode=max
*.output=type=cacheonly
test-unit:
runs-on: ubuntu-24.04-arm
timeout-minutes: 120 # guardrails timeout for the whole job
needs:
- build-dev
steps:
-
name: Checkout
uses: actions/checkout@v4
-
name: Set up runner
uses: ./.github/actions/setup-runner
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
version: ${{ env.SETUP_BUILDX_VERSION }}
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
buildkitd-flags: --debug
-
name: Build dev image
uses: docker/bake-action@v6
with:
targets: dev
set: |
dev.cache-from=type=gha,scope=dev-arm64
-
name: Test
run: |
make -o build test-unit
-
name: Prepare reports
if: always()
run: |
mkdir -p bundles /tmp/reports
find bundles -path '*/root/*overlay2' -prune -o -type f \( -name '*-report.json' -o -name '*.log' -o -name '*.out' -o -name '*.prof' -o -name '*-report.xml' \) -print | xargs sudo tar -czf /tmp/reports.tar.gz
tar -xzf /tmp/reports.tar.gz -C /tmp/reports
sudo chown -R $(id -u):$(id -g) /tmp/reports
tree -nh /tmp/reports
-
name: Send to Codecov
uses: codecov/codecov-action@v4
with:
directory: ./bundles
env_vars: RUNNER_OS
flags: unit
token: ${{ secrets.CODECOV_TOKEN }} # used to upload coverage reports: https://github.com/moby/buildkit/pull/4660#issue-2142122533
-
name: Upload reports
if: always()
uses: actions/upload-artifact@v4
with:
name: test-reports-unit-arm64-graphdriver
path: /tmp/reports/*
retention-days: 1
test-unit-report:
runs-on: ubuntu-24.04
timeout-minutes: 10
continue-on-error: ${{ github.event_name != 'pull_request' }}
if: always()
needs:
- test-unit
steps:
-
name: Set up Go
uses: actions/setup-go@v5
with:
go-version: ${{ env.GO_VERSION }}
cache-dependency-path: vendor.sum
-
name: Download reports
uses: actions/download-artifact@v4
with:
pattern: test-reports-unit-arm64-*
path: /tmp/reports
-
name: Install teststat
run: |
go install github.com/vearutop/teststat@${{ env.TESTSTAT_VERSION }}
-
name: Create summary
run: |
find /tmp/reports -type f -name '*-go-test-report.json' -exec teststat -markdown {} \+ >> $GITHUB_STEP_SUMMARY
test-integration:
runs-on: ubuntu-24.04-arm
timeout-minutes: 120 # guardrails timeout for the whole job
continue-on-error: ${{ github.event_name != 'pull_request' }}
needs:
- build-dev
steps:
-
name: Checkout
uses: actions/checkout@v4
-
name: Set up runner
uses: ./.github/actions/setup-runner
-
name: Set up tracing
uses: ./.github/actions/setup-tracing
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
version: ${{ env.SETUP_BUILDX_VERSION }}
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
buildkitd-flags: --debug
-
name: Build dev image
uses: docker/bake-action@v6
with:
targets: dev
set: |
dev.cache-from=type=gha,scope=dev-arm64
-
name: Test
run: |
make -o build test-integration
env:
TEST_SKIP_INTEGRATION_CLI: 1
TESTCOVERAGE: 1
-
name: Prepare reports
if: always()
run: |
reportsPath="/tmp/reports/arm64-graphdriver"
mkdir -p bundles $reportsPath
find bundles -path '*/root/*overlay2' -prune -o -type f \( -name '*-report.json' -o -name '*.log' -o -name '*.out' -o -name '*.prof' -o -name '*-report.xml' \) -print | xargs sudo tar -czf /tmp/reports.tar.gz
tar -xzf /tmp/reports.tar.gz -C $reportsPath
sudo chown -R $(id -u):$(id -g) $reportsPath
tree -nh $reportsPath
curl -sSLf localhost:16686/api/traces?service=integration-test-client > $reportsPath/jaeger-trace.json
-
name: Send to Codecov
uses: codecov/codecov-action@v4
with:
directory: ./bundles/test-integration
env_vars: RUNNER_OS
flags: integration
token: ${{ secrets.CODECOV_TOKEN }} # used to upload coverage reports: https://github.com/moby/buildkit/pull/4660#issue-2142122533
-
name: Test daemon logs
if: always()
run: |
cat bundles/test-integration/docker.log
-
name: Upload reports
if: always()
uses: actions/upload-artifact@v4
with:
name: test-reports-integration-arm64-graphdriver
path: /tmp/reports/*
retention-days: 1
test-integration-report:
runs-on: ubuntu-24.04
timeout-minutes: 10
continue-on-error: ${{ github.event_name != 'pull_request' }}
if: always()
needs:
- test-integration
steps:
-
name: Set up Go
uses: actions/setup-go@v5
with:
go-version: ${{ env.GO_VERSION }}
cache-dependency-path: vendor.sum
-
name: Download reports
uses: actions/download-artifact@v4
with:
path: /tmp/reports
pattern: test-reports-integration-arm64-*
merge-multiple: true
-
name: Install teststat
run: |
go install github.com/vearutop/teststat@${{ env.TESTSTAT_VERSION }}
-
name: Create summary
run: |
find /tmp/reports -type f -name '*-go-test-report.json' -exec teststat -markdown {} \+ >> $GITHUB_STEP_SUMMARY

View File

@@ -1,215 +0,0 @@
name: bin-image
# Default to 'contents: read', which grants actions to read commits.
#
# If any permission is set, any permission not included in the list is
# implicitly set to "none".
#
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
permissions:
contents: read
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
on:
workflow_dispatch:
push:
branches:
- 'master'
- '[0-9]+.[0-9]+'
- '[0-9]+.x'
tags:
- 'v*'
pull_request:
env:
MOBYBIN_REPO_SLUG: moby/moby-bin
DOCKER_GITCOMMIT: ${{ github.sha }}
VERSION: ${{ github.ref }}
PLATFORM: Moby Engine - Nightly
PRODUCT: moby-bin
PACKAGER_NAME: The Moby Project
SETUP_BUILDX_VERSION: edge
SETUP_BUILDKIT_IMAGE: moby/buildkit:latest
jobs:
validate-dco:
if: ${{ !startsWith(github.ref, 'refs/tags/v') }}
uses: ./.github/workflows/.dco.yml
prepare:
runs-on: ubuntu-24.04
timeout-minutes: 20 # guardrails timeout for the whole job
outputs:
platforms: ${{ steps.platforms.outputs.matrix }}
steps:
-
name: Checkout
uses: actions/checkout@v4
-
name: Docker meta
id: meta
uses: docker/metadata-action@v5
with:
images: |
${{ env.MOBYBIN_REPO_SLUG }}
### versioning strategy
## push semver tag v23.0.0
# moby/moby-bin:23.0.0
# moby/moby-bin:latest
## push semver prerelease tag v23.0.0-beta.1
# moby/moby-bin:23.0.0-beta.1
## push on master
# moby/moby-bin:master
## push on 23.0 branch
# moby/moby-bin:23.0
## any push
# moby/moby-bin:sha-ad132f5
tags: |
type=semver,pattern={{version}}
type=ref,event=branch
type=ref,event=pr
type=sha
-
name: Rename meta bake definition file
# see https://github.com/docker/metadata-action/issues/381#issuecomment-1918607161
run: |
bakeFile="${{ steps.meta.outputs.bake-file }}"
mv "${bakeFile#cwd://}" "/tmp/bake-meta.json"
-
name: Upload meta bake definition
uses: actions/upload-artifact@v4
with:
name: bake-meta
path: /tmp/bake-meta.json
if-no-files-found: error
retention-days: 1
-
name: Create platforms matrix
id: platforms
run: |
echo "matrix=$(docker buildx bake bin-image-cross --print | jq -cr '.target."bin-image-cross".platforms')" >>${GITHUB_OUTPUT}
build:
runs-on: ubuntu-24.04
timeout-minutes: 120 # guardrails timeout for the whole job
needs:
- validate-dco
- prepare
if: always() && !contains(needs.*.result, 'failure') && !contains(needs.*.result, 'cancelled')
strategy:
fail-fast: false
matrix:
platform: ${{ fromJson(needs.prepare.outputs.platforms) }}
steps:
-
name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
-
name: Prepare
run: |
platform=${{ matrix.platform }}
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
-
name: Download meta bake definition
uses: actions/download-artifact@v4
with:
name: bake-meta
path: /tmp
-
name: Set up QEMU
uses: docker/setup-qemu-action@v3
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
version: ${{ env.SETUP_BUILDX_VERSION }}
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
buildkitd-flags: --debug
-
name: Login to Docker Hub
if: github.event_name != 'pull_request' && github.repository == 'moby/moby'
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_MOBYBIN_USERNAME }}
password: ${{ secrets.DOCKERHUB_MOBYBIN_TOKEN }}
-
name: Build
id: bake
uses: docker/bake-action@v6
with:
source: .
files: |
./docker-bake.hcl
/tmp/bake-meta.json
targets: bin-image
set: |
*.platform=${{ matrix.platform }}
*.output=type=image,name=${{ env.MOBYBIN_REPO_SLUG }},push-by-digest=true,name-canonical=true,push=${{ github.event_name != 'pull_request' && github.repository == 'moby/moby' }}
*.tags=
-
name: Export digest
if: github.event_name != 'pull_request' && github.repository == 'moby/moby'
run: |
mkdir -p /tmp/digests
digest="${{ fromJSON(steps.bake.outputs.metadata)['bin-image']['containerimage.digest'] }}"
touch "/tmp/digests/${digest#sha256:}"
-
name: Upload digest
if: github.event_name != 'pull_request' && github.repository == 'moby/moby'
uses: actions/upload-artifact@v4
with:
name: digests-${{ env.PLATFORM_PAIR }}
path: /tmp/digests/*
if-no-files-found: error
retention-days: 1
merge:
runs-on: ubuntu-24.04
timeout-minutes: 120 # guardrails timeout for the whole job
needs:
- build
if: always() && !contains(needs.*.result, 'failure') && !contains(needs.*.result, 'cancelled') && github.event_name != 'pull_request' && github.repository == 'moby/moby'
steps:
-
name: Download meta bake definition
uses: actions/download-artifact@v4
with:
name: bake-meta
path: /tmp
-
name: Download digests
uses: actions/download-artifact@v4
with:
path: /tmp/digests
pattern: digests-*
merge-multiple: true
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
version: ${{ env.SETUP_BUILDX_VERSION }}
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
buildkitd-flags: --debug
-
name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_MOBYBIN_USERNAME }}
password: ${{ secrets.DOCKERHUB_MOBYBIN_TOKEN }}
-
name: Create manifest list and push
working-directory: /tmp/digests
run: |
set -x
docker buildx imagetools create $(jq -cr '.target."docker-metadata-action".tags | map("-t " + .) | join(" ")' /tmp/bake-meta.json) \
$(printf '${{ env.MOBYBIN_REPO_SLUG }}@sha256:%s ' *)
-
name: Inspect image
run: |
set -x
docker buildx imagetools inspect ${{ env.MOBYBIN_REPO_SLUG }}:$(jq -cr '.target."docker-metadata-action".args.DOCKER_META_VERSION' /tmp/bake-meta.json)

View File

@@ -1,14 +1,5 @@
name: buildkit
# Default to 'contents: read', which grants actions to read commits.
#
# If any permission is set, any permission not included in the list is
# implicitly set to "none".
#
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
permissions:
contents: read
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
@@ -18,61 +9,49 @@ on:
push:
branches:
- 'master'
- '[0-9]+.[0-9]+'
- '[0-9]+.x'
- '[0-9]+.[0-9]{2}'
pull_request:
env:
GO_VERSION: "1.24.3"
DESTDIR: ./build
SETUP_BUILDX_VERSION: edge
SETUP_BUILDKIT_IMAGE: moby/buildkit:latest
BUNDLES_OUTPUT: ./bundles
jobs:
validate-dco:
uses: ./.github/workflows/.dco.yml
build-linux:
runs-on: ubuntu-24.04
timeout-minutes: 120 # guardrails timeout for the whole job
build:
runs-on: ubuntu-20.04
needs:
- validate-dco
steps:
-
name: Checkout
uses: actions/checkout@v3
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
version: ${{ env.SETUP_BUILDX_VERSION }}
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
buildkitd-flags: --debug
uses: docker/setup-buildx-action@v2
-
name: Build
uses: docker/bake-action@v6
uses: docker/bake-action@v2
with:
targets: binary
-
name: Upload artifacts
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v3
with:
name: binary
path: ${{ env.DESTDIR }}
path: ${{ env.BUNDLES_OUTPUT }}
if-no-files-found: error
retention-days: 1
test-linux:
runs-on: ubuntu-24.04
timeout-minutes: 120 # guardrails timeout for the whole job
test:
runs-on: ubuntu-20.04
timeout-minutes: 120
needs:
- build-linux
env:
TEST_IMAGE_BUILD: "0"
TEST_IMAGE_ID: "buildkit-tests"
- build
strategy:
fail-fast: false
matrix:
worker:
- dockerd
- dockerd-containerd
pkg:
- client
- cmd/buildctl
@@ -82,75 +61,45 @@ jobs:
typ:
- integration
steps:
-
name: Prepare
run: |
disabledFeatures="cache_backend_azblob,cache_backend_s3"
if [ "${{ matrix.worker }}" = "dockerd" ]; then
disabledFeatures="${disabledFeatures},merge_diff"
fi
echo "BUILDKIT_TEST_DISABLE_FEATURES=${disabledFeatures}" >> $GITHUB_ENV
# Expose `ACTIONS_RUNTIME_TOKEN` and `ACTIONS_CACHE_URL`, which is used
# in BuildKit's test suite to skip/unskip cache exporters:
# https://github.com/moby/buildkit/blob/567a99433ca23402d5e9b9f9124005d2e59b8861/client/client_test.go#L5407-L5411
-
name: Expose GitHub Runtime
uses: crazy-max/ghaction-github-runtime@v3
-
name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v3
with:
path: moby
-
name: Set up Go
uses: actions/setup-go@v5
with:
go-version: ${{ env.GO_VERSION }}
cache-dependency-path: vendor.sum
-
name: BuildKit ref
run: |
echo "$(./hack/buildkit-ref)" >> $GITHUB_ENV
./hack/go-mod-prepare.sh
# FIXME(thaJeztah) temporarily overriding version to use for tests; remove with the next release of buildkit
# echo "BUILDKIT_REF=$(./hack/buildkit-ref)" >> $GITHUB_ENV
echo "BUILDKIT_REF=4febae4f874bd8ef52dec30e988c8fe0bc96b3b9" >> $GITHUB_ENV
working-directory: moby
-
name: Checkout BuildKit ${{ env.BUILDKIT_REF }}
uses: actions/checkout@v4
uses: actions/checkout@v3
with:
repository: ${{ env.BUILDKIT_REPO }}
repository: "moby/buildkit"
ref: ${{ env.BUILDKIT_REF }}
path: buildkit
-
name: Set up QEMU
uses: docker/setup-qemu-action@v3
uses: docker/setup-qemu-action@v2
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
version: ${{ env.SETUP_BUILDX_VERSION }}
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
buildkitd-flags: --debug
uses: docker/setup-buildx-action@v2
-
name: Download binary artifacts
uses: actions/download-artifact@v4
uses: actions/download-artifact@v3
with:
name: binary
path: ./buildkit/build/moby/
-
name: Update daemon.json
run: |
sudo rm -f /etc/docker/daemon.json
sudo rm /etc/docker/daemon.json
sudo service docker restart
docker version
docker info
-
name: Build test image
uses: docker/bake-action@v6
with:
source: .
workdir: ./buildkit
targets: integration-tests
set: |
*.output=type=docker,name=${{ env.TEST_IMAGE_ID }}
-
name: Test
run: |
@@ -158,217 +107,7 @@ jobs:
env:
CONTEXT: "."
TEST_DOCKERD: "1"
TEST_DOCKERD_BINARY: "./build/moby/dockerd"
TEST_DOCKERD_BINARY: "./build/moby/binary-daemon/dockerd"
TESTPKGS: "./${{ matrix.pkg }}"
TESTFLAGS: "-v --parallel=1 --timeout=30m --run=//worker=${{ matrix.worker }}$"
working-directory: buildkit
build-windows:
runs-on: windows-2022
timeout-minutes: 120
needs:
- validate-dco
env:
GOPATH: ${{ github.workspace }}\go
GOBIN: ${{ github.workspace }}\go\bin
BIN_OUT: ${{ github.workspace }}\out
WINDOWS_BASE_IMAGE: mcr.microsoft.com/windows/servercore
WINDOWS_BASE_TAG_2022: ltsc2022
TEST_IMAGE_NAME: moby:test
TEST_CTN_NAME: moby
defaults:
run:
working-directory: ${{ env.GOPATH }}/src/github.com/docker/docker
steps:
- name: Checkout
uses: actions/checkout@v4
with:
path: ${{ env.GOPATH }}/src/github.com/docker/docker
- name: Env
run: |
Get-ChildItem Env: | Out-String
- name: Moby - Init
run: |
New-Item -ItemType "directory" -Path "${{ github.workspace }}\go-build"
New-Item -ItemType "directory" -Path "${{ github.workspace }}\go\pkg\mod"
echo "WINDOWS_BASE_IMAGE_TAG=${{ env.WINDOWS_BASE_TAG_2022 }}" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf-8 -Append
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: ${{ env.GO_VERSION }}
cache-dependency-path: vendor.sum
- name: Cache
uses: actions/cache@v4
with:
path: |
~\AppData\Local\go-build
~\go\pkg\mod
${{ github.workspace }}\go-build
${{ env.GOPATH }}\pkg\mod
key: ${{ inputs.os }}-${{ github.job }}-${{ hashFiles('**/vendor.sum') }}
restore-keys: |
${{ inputs.os }}-${{ github.job }}-
- name: Docker info
run: |
docker info
- name: Build base image
run: |
& docker build `
--build-arg WINDOWS_BASE_IMAGE `
--build-arg WINDOWS_BASE_IMAGE_TAG `
--build-arg GO_VERSION `
-t ${{ env.TEST_IMAGE_NAME }} `
-f Dockerfile.windows .
- name: Build binaries
run: |
& docker run --name ${{ env.TEST_CTN_NAME }} -e "DOCKER_GITCOMMIT=${{ github.sha }}" `
-v "${{ github.workspace }}\go-build:C:\Users\ContainerAdministrator\AppData\Local\go-build" `
-v "${{ github.workspace }}\go\pkg\mod:C:\gopath\pkg\mod" `
${{ env.TEST_IMAGE_NAME }} hack\make.ps1 -Daemon -Client
go install github.com/distribution/distribution/v3/cmd/registry@latest
- name: Checkout BuildKit
uses: actions/checkout@v4
with:
repository: moby/buildkit
ref: master
path: buildkit
- name: Add buildctl to binaries
run: |
go install ./cmd/buildctl
working-directory: buildkit
- name: Copy artifacts
run: |
New-Item -ItemType "directory" -Path "${{ env.BIN_OUT }}"
docker cp "${{ env.TEST_CTN_NAME }}`:c`:\gopath\src\github.com\docker\docker\bundles\docker.exe" ${{ env.BIN_OUT }}\
docker cp "${{ env.TEST_CTN_NAME }}`:c`:\gopath\src\github.com\docker\docker\bundles\dockerd.exe" ${{ env.BIN_OUT }}\
docker cp "${{ env.TEST_CTN_NAME }}`:c`:\gopath\bin\gotestsum.exe" ${{ env.BIN_OUT }}\
docker cp "${{ env.TEST_CTN_NAME }}`:c`:\containerd\bin\containerd.exe" ${{ env.BIN_OUT }}\
docker cp "${{ env.TEST_CTN_NAME }}`:c`:\containerd\bin\containerd-shim-runhcs-v1.exe" ${{ env.BIN_OUT }}\
cp ${{ env.GOPATH }}\bin\registry.exe ${{ env.BIN_OUT }}
cp ${{ env.GOPATH }}\bin\buildctl.exe ${{ env.BIN_OUT }}
- name: Upload artifacts
uses: actions/upload-artifact@v4
with:
name: build-windows
path: ${{ env.BIN_OUT }}/*
if-no-files-found: error
retention-days: 2
test-windows:
runs-on: windows-2022
timeout-minutes: 120 # guardrails timeout for the whole job
needs:
- build-windows
env:
TEST_IMAGE_BUILD: "0"
TEST_IMAGE_ID: "buildkit-tests"
GOPATH: ${{ github.workspace }}\go
GOBIN: ${{ github.workspace }}\go\bin
BIN_OUT: ${{ github.workspace }}\out
TESTFLAGS: "-v --timeout=90m"
TEST_DOCKERD: "1"
strategy:
fail-fast: false
matrix:
worker:
- dockerd-containerd
pkg:
- ./client#1-4
- ./client#2-4
- ./client#3-4
- ./client#4-4
- ./cmd/buildctl
- ./frontend
- ./frontend/dockerfile#1-12
- ./frontend/dockerfile#2-12
- ./frontend/dockerfile#3-12
- ./frontend/dockerfile#4-12
- ./frontend/dockerfile#5-12
- ./frontend/dockerfile#6-12
- ./frontend/dockerfile#7-12
- ./frontend/dockerfile#8-12
- ./frontend/dockerfile#9-12
- ./frontend/dockerfile#10-12
- ./frontend/dockerfile#11-12
- ./frontend/dockerfile#12-12
steps:
- name: Prepare
shell: bash
run: |
disabledFeatures="cache_backend_azblob,cache_backend_s3"
if [ "${{ matrix.worker }}" = "dockerd" ]; then
disabledFeatures="${disabledFeatures},merge_diff"
fi
echo "BUILDKIT_TEST_DISABLE_FEATURES=${disabledFeatures}" >> $GITHUB_ENV
- name: Expose GitHub Runtime
uses: crazy-max/ghaction-github-runtime@v3
- name: Checkout
uses: actions/checkout@v4
with:
path: moby
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: ${{ env.GO_VERSION }}
cache-dependency-path: vendor.sum
- name: BuildKit ref
shell: bash
run: |
echo "$(./hack/buildkit-ref)" >> $GITHUB_ENV
working-directory: moby
- name: Checkout BuildKit ${{ env.BUILDKIT_REF }}
uses: actions/checkout@v4
with:
repository: ${{ env.BUILDKIT_REPO }}
ref: ${{ env.BUILDKIT_REF }}
path: buildkit
- name: Download Moby artifacts
uses: actions/download-artifact@v4
with:
name: build-windows
path: ${{ env.BIN_OUT }}
- name: Add binaries to PATH
run: |
ls ${{ env.BIN_OUT }}
Write-Output "${{ env.BIN_OUT }}" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
- name: Test Prep
shell: bash
run: |
TESTPKG=$(echo "${{ matrix.pkg }}" | awk '-F#' '{print $1}')
echo "TESTPKG=$TESTPKG" >> $GITHUB_ENV
echo "TEST_REPORT_NAME=${{ github.job }}-$(echo "${{ matrix.pkg }}-${{ matrix.worker }}" | tr -dc '[:alnum:]-\n\r' | tr '[:upper:]' '[:lower:]')" >> $GITHUB_ENV
testFlags="${{ env.TESTFLAGS }}"
testSlice=$(echo "${{ matrix.pkg }}" | awk '-F#' '{print $2}')
testSliceOffset=""
if [ -n "$testSlice" ]; then
testSliceOffset="slice=$testSlice/"
fi
if [ -n "${{ matrix.worker }}" ]; then
testFlags="${testFlags} --run=TestIntegration/$testSliceOffset.*/worker=${{ matrix.worker }}"
fi
echo "TESTFLAGS=${testFlags}" >> $GITHUB_ENV
- name: Test
shell: bash
run: |
mkdir -p ./bin/testreports
gotestsum \
--jsonfile="./bin/testreports/go-test-report-${{ env.TEST_REPORT_NAME }}.json" \
--junitfile="./bin/testreports/junit-report-${{ env.TEST_REPORT_NAME }}.xml" \
--packages="${{ env.TESTPKG }}" \
-- \
"-mod=vendor" \
"-coverprofile" "./bin/testreports/coverage-${{ env.TEST_REPORT_NAME }}.txt" \
"-covermode" "atomic" ${{ env.TESTFLAGS }}
TESTFLAGS: "-v --parallel=1 --timeout=30m --run=//worker=dockerd$"
working-directory: buildkit

View File

@@ -1,14 +1,5 @@
name: ci
# Default to 'contents: read', which grants actions to read commits.
#
# If any permission is set, any permission not included in the list is
# implicitly set to "none".
#
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
permissions:
contents: read
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
@@ -19,21 +10,19 @@ on:
branches:
- 'master'
- '[0-9]+.[0-9]+'
- '[0-9]+.x'
tags:
- 'v*'
pull_request:
env:
DESTDIR: ./build
SETUP_BUILDX_VERSION: edge
SETUP_BUILDKIT_IMAGE: moby/buildkit:latest
BUNDLES_OUTPUT: ./bundles
jobs:
validate-dco:
uses: ./.github/workflows/.dco.yml
build:
runs-on: ubuntu-24.04
timeout-minutes: 20 # guardrails timeout for the whole job
runs-on: ubuntu-20.04
needs:
- validate-dco
strategy:
@@ -44,59 +33,50 @@ jobs:
- dynbinary
steps:
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
name: Checkout
uses: actions/checkout@v3
with:
version: ${{ env.SETUP_BUILDX_VERSION }}
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
buildkitd-flags: --debug
fetch-depth: 0
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
-
name: Build
uses: docker/bake-action@v6
uses: docker/bake-action@v2
with:
targets: ${{ matrix.target }}
-
name: List artifacts
run: |
tree -nh ${{ env.DESTDIR }}
-
name: Check artifacts
run: |
find ${{ env.DESTDIR }} -type f -exec file -e ascii -- {} +
prepare-cross:
runs-on: ubuntu-24.04
timeout-minutes: 20 # guardrails timeout for the whole job
needs:
- validate-dco
outputs:
matrix: ${{ steps.platforms.outputs.matrix }}
steps:
-
name: Checkout
uses: actions/checkout@v4
-
name: Create matrix
id: platforms
run: |
matrix="$(docker buildx bake binary-cross --print | jq -cr '.target."binary-cross".platforms')"
echo "matrix=$matrix" >> $GITHUB_OUTPUT
-
name: Show matrix
run: |
echo ${{ steps.platforms.outputs.matrix }}
name: Upload artifacts
uses: actions/upload-artifact@v3
with:
name: ${{ matrix.target }}
path: ${{ env.BUNDLES_OUTPUT }}
if-no-files-found: error
retention-days: 7
cross:
runs-on: ubuntu-24.04
timeout-minutes: 20 # guardrails timeout for the whole job
runs-on: ubuntu-20.04
needs:
- validate-dco
- prepare-cross
strategy:
fail-fast: false
matrix:
platform: ${{ fromJson(needs.prepare-cross.outputs.matrix) }}
platform:
- linux/amd64
- linux/arm/v5
- linux/arm/v6
- linux/arm/v7
- linux/arm64
- linux/ppc64le
- linux/s390x
- windows/amd64
- windows/arm64
steps:
-
name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 0
-
name: Prepare
run: |
@@ -104,73 +84,19 @@ jobs:
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
version: ${{ env.SETUP_BUILDX_VERSION }}
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
buildkitd-flags: --debug
uses: docker/setup-buildx-action@v2
-
name: Build
uses: docker/bake-action@v6
uses: docker/bake-action@v2
with:
targets: all
set: |
*.platform=${{ matrix.platform }}
-
name: List artifacts
run: |
tree -nh ${{ env.DESTDIR }}
-
name: Check artifacts
run: |
find ${{ env.DESTDIR }} -type f -exec file -e ascii -- {} +
govulncheck:
runs-on: ubuntu-24.04
timeout-minutes: 120 # guardrails timeout for the whole job
permissions:
# required to write sarif report
security-events: write
# required to check out the repository
contents: read
steps:
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
version: ${{ env.SETUP_BUILDX_VERSION }}
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
buildkitd-flags: --debug
-
name: Run
uses: docker/bake-action@v6
with:
targets: govulncheck
targets: cross
env:
GOVULNCHECK_FORMAT: sarif
DOCKER_CROSSPLATFORMS: ${{ matrix.platform }}
-
name: Upload SARIF report
if: ${{ github.event_name != 'pull_request' && github.repository == 'moby/moby' }}
uses: github/codeql-action/upload-sarif@v3
name: Upload artifacts
uses: actions/upload-artifact@v3
with:
sarif_file: ${{ env.DESTDIR }}/govulncheck.out
build-dind:
runs-on: ubuntu-24.04
needs:
- validate-dco
steps:
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
version: ${{ env.SETUP_BUILDX_VERSION }}
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
buildkitd-flags: --debug
-
name: Build dind image
uses: docker/bake-action@v6
with:
targets: dind
set: |
*.output=type=cacheonly
name: cross-${{ env.PLATFORM_PAIR }}
path: ${{ env.BUNDLES_OUTPUT }}
if-no-files-found: error
retention-days: 7

View File

@@ -1,71 +0,0 @@
name: codeql
# Default to 'contents: read', which grants actions to read commits.
#
# If any permission is set, any permission not included in the list is
# implicitly set to "none".
#
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
permissions:
contents: read
on:
push:
branches:
- 'master'
- '[0-9]+.[0-9]+'
- '[0-9]+.x'
tags:
- 'v*'
pull_request:
# The branches below must be a subset of the branches above
branches: ["master"]
schedule:
# ┌───────────── minute (0 - 59)
# │ ┌───────────── hour (0 - 23)
# │ │ ┌───────────── day of the month (1 - 31)
# │ │ │ ┌───────────── month (1 - 12)
# │ │ │ │ ┌───────────── day of the week (0 - 6) (Sunday to Saturday)
# │ │ │ │ │
# │ │ │ │ │
# │ │ │ │ │
# * * * * *
- cron: '0 9 * * 4'
jobs:
codeql:
runs-on: ubuntu-24.04
timeout-minutes: 10
permissions:
actions: read
contents: read
security-events: write
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 2
# CodeQL 2.16.4's auto-build added support for multi-module repositories,
# and is trying to be smart by searching for modules in every directory,
# including vendor directories. If no module is found, it's creating one
# which is ... not what we want, so let's give it a "go.mod".
# see: https://github.com/docker/cli/pull/4944#issuecomment-2002034698
- name: Create go.mod
run: |
ln -s vendor.mod go.mod
ln -s vendor.sum go.sum
- name: Update Go
uses: actions/setup-go@v5
with:
go-version: "1.24.3"
- name: Initialize CodeQL
uses: github/codeql-action/init@v3
with:
languages: go
- name: Autobuild
uses: github/codeql-action/autobuild@v3
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v3
with:
category: "/language:go"

View File

@@ -1,14 +1,5 @@
name: test
# Default to 'contents: read', which grants actions to read commits.
#
# If any permission is set, any permission not included in the list is
# implicitly set to "none".
#
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
permissions:
contents: read
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
@@ -19,23 +10,24 @@ on:
branches:
- 'master'
- '[0-9]+.[0-9]+'
- '[0-9]+.x'
tags:
- 'v*'
pull_request:
env:
GO_VERSION: "1.24.3"
GIT_PAGER: "cat"
PAGER: "cat"
SETUP_BUILDX_VERSION: edge
SETUP_BUILDKIT_IMAGE: moby/buildkit:latest
GO_VERSION: 1.19.3
GOTESTLIST_VERSION: v0.2.0
TESTSTAT_VERSION: v0.1.3
ITG_CLI_MATRIX_SIZE: 6
DOCKER_EXPERIMENTAL: 1
DOCKER_GRAPHDRIVER: overlay2
jobs:
validate-dco:
uses: ./.github/workflows/.dco.yml
build-dev:
runs-on: ubuntu-24.04
timeout-minutes: 120 # guardrails timeout for the whole job
runs-on: ubuntu-20.04
needs:
- validate-dco
strategy:
@@ -51,16 +43,15 @@ jobs:
if [ "${{ matrix.mode }}" = "systemd" ]; then
echo "SYSTEMD=true" >> $GITHUB_ENV
fi
-
name: Checkout
uses: actions/checkout@v3
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
version: ${{ env.SETUP_BUILDX_VERSION }}
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
buildkitd-flags: --debug
uses: docker/setup-buildx-action@v2
-
name: Build dev image
uses: docker/bake-action@v6
uses: docker/bake-action@v2
with:
targets: dev
set: |
@@ -68,31 +59,8 @@ jobs:
*.cache-to=type=gha,scope=dev${{ matrix.mode }},mode=max
*.output=type=cacheonly
test:
needs:
- build-dev
- validate-dco
uses: ./.github/workflows/.test.yml
secrets: inherit
strategy:
fail-fast: false
matrix:
storage:
- graphdriver
- snapshotter
with:
storage: ${{ matrix.storage }}
test-unit:
needs:
- build-dev
- validate-dco
uses: ./.github/workflows/.test-unit.yml
secrets: inherit
validate-prepare:
runs-on: ubuntu-24.04
timeout-minutes: 10 # guardrails timeout for the whole job
runs-on: ubuntu-20.04
needs:
- validate-dco
outputs:
@@ -100,12 +68,12 @@ jobs:
steps:
-
name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v3
-
name: Create matrix
id: scripts
run: |
scripts=$(cd ./hack/validate && jq -nc '$ARGS.positional - ["all", "default", "dco"] | map(select(test("[.]")|not)) + ["generate-files"]' --args *)
scripts=$(jq -ncR '[inputs]' <<< "$(ls -I .validate -I all -I default -I dco -I golangci-lint.yml -I yamllint.yaml -A ./hack/validate/)")
echo "matrix=$scripts" >> $GITHUB_OUTPUT
-
name: Show matrix
@@ -113,8 +81,7 @@ jobs:
echo ${{ steps.scripts.outputs.matrix }}
validate:
runs-on: ubuntu-24.04
timeout-minutes: 30 # guardrails timeout for the whole job
runs-on: ubuntu-20.04
needs:
- validate-prepare
- build-dev
@@ -125,7 +92,7 @@ jobs:
steps:
-
name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v3
with:
fetch-depth: 0
-
@@ -133,14 +100,10 @@ jobs:
uses: ./.github/actions/setup-runner
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
version: ${{ env.SETUP_BUILDX_VERSION }}
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
buildkitd-flags: --debug
uses: docker/setup-buildx-action@v2
-
name: Build dev image
uses: docker/bake-action@v6
uses: docker/bake-action@v2
with:
targets: dev
set: |
@@ -150,57 +113,392 @@ jobs:
run: |
make -o build validate-${{ matrix.script }}
smoke-prepare:
runs-on: ubuntu-24.04
timeout-minutes: 10 # guardrails timeout for the whole job
unit:
runs-on: ubuntu-20.04
timeout-minutes: 120
needs:
- validate-dco
outputs:
matrix: ${{ steps.platforms.outputs.matrix }}
- build-dev
steps:
-
name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v3
-
name: Set up runner
uses: ./.github/actions/setup-runner
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
-
name: Build dev image
uses: docker/bake-action@v2
with:
targets: dev
set: |
dev.cache-from=type=gha,scope=dev
-
name: Test
run: |
make -o build test-unit
-
name: Prepare reports
if: always()
run: |
mkdir -p bundles /tmp/reports
find bundles -path '*/root/*overlay2' -prune -o -type f \( -name '*-report.json' -o -name '*.log' -o -name '*.out' -o -name '*.prof' -o -name '*-report.xml' \) -print | xargs sudo tar -czf /tmp/reports.tar.gz
tar -xzf /tmp/reports.tar.gz -C /tmp/reports
sudo chown -R $(id -u):$(id -g) /tmp/reports
tree -nh /tmp/reports
-
name: Send to Codecov
uses: codecov/codecov-action@v3
with:
directory: ./bundles
env_vars: RUNNER_OS
flags: unit
-
name: Upload reports
if: always()
uses: actions/upload-artifact@v3
with:
name: unit-reports
path: /tmp/reports/*
unit-report:
runs-on: ubuntu-20.04
if: always()
needs:
- unit
steps:
-
name: Set up Go
uses: actions/setup-go@v3
with:
go-version: ${{ env.GO_VERSION }}
-
name: Download reports
uses: actions/download-artifact@v3
with:
name: unit-reports
path: /tmp/reports
-
name: Install teststat
run: |
go install github.com/vearutop/teststat@${{ env.TESTSTAT_VERSION }}
-
name: Create summary
run: |
teststat -markdown $(find /tmp/reports -type f -name '*.json' -print0 | xargs -0) >> $GITHUB_STEP_SUMMARY
docker-py:
runs-on: ubuntu-20.04
timeout-minutes: 120
needs:
- build-dev
steps:
-
name: Checkout
uses: actions/checkout@v3
-
name: Set up runner
uses: ./.github/actions/setup-runner
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
-
name: Build dev image
uses: docker/bake-action@v2
with:
targets: dev
set: |
dev.cache-from=type=gha,scope=dev
-
name: Test
run: |
make -o build test-docker-py
-
name: Prepare reports
if: always()
run: |
mkdir -p bundles /tmp/reports
find bundles -path '*/root/*overlay2' -prune -o -type f \( -name '*-report.json' -o -name '*.log' -o -name '*.out' -o -name '*.prof' -o -name '*-report.xml' \) -print | xargs sudo tar -czf /tmp/reports.tar.gz
tar -xzf /tmp/reports.tar.gz -C /tmp/reports
sudo chown -R $(id -u):$(id -g) /tmp/reports
tree -nh /tmp/reports
-
name: Test daemon logs
if: always()
run: |
cat bundles/test-docker-py/docker.log
-
name: Upload reports
if: always()
uses: actions/upload-artifact@v3
with:
name: docker-py-reports
path: /tmp/reports/*
integration-flaky:
runs-on: ubuntu-20.04
timeout-minutes: 120
needs:
- build-dev
steps:
-
name: Checkout
uses: actions/checkout@v3
-
name: Set up runner
uses: ./.github/actions/setup-runner
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
-
name: Build dev image
uses: docker/bake-action@v2
with:
targets: dev
set: |
dev.cache-from=type=gha,scope=dev
-
name: Test
run: |
make -o build test-integration-flaky
env:
TEST_SKIP_INTEGRATION_CLI: 1
integration:
runs-on: ${{ matrix.os }}
timeout-minutes: 120
needs:
- build-dev
strategy:
fail-fast: false
matrix:
os:
- ubuntu-20.04
- ubuntu-22.04
mode:
- ""
- rootless
- systemd
#- rootless-systemd FIXME: https://github.com/moby/moby/issues/44084
steps:
-
name: Checkout
uses: actions/checkout@v3
-
name: Set up runner
uses: ./.github/actions/setup-runner
-
name: Prepare
run: |
CACHE_DEV_SCOPE=dev
if [[ "${{ matrix.mode }}" == *"rootless"* ]]; then
echo "DOCKER_ROOTLESS=1" >> $GITHUB_ENV
fi
if [[ "${{ matrix.mode }}" == *"systemd"* ]]; then
echo "SYSTEMD=true" >> $GITHUB_ENV
CACHE_DEV_SCOPE="${CACHE_DEV_SCOPE}systemd"
fi
echo "CACHE_DEV_SCOPE=${CACHE_DEV_SCOPE}" >> $GITHUB_ENV
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
-
name: Build dev image
uses: docker/bake-action@v2
with:
targets: dev
set: |
dev.cache-from=type=gha,scope=${{ env.CACHE_DEV_SCOPE }}
-
name: Test
run: |
make -o build test-integration
env:
TEST_SKIP_INTEGRATION_CLI: 1
TESTCOVERAGE: 1
-
name: Prepare reports
if: always()
run: |
reportsPath="/tmp/reports/${{ matrix.os }}"
if [ -n "${{ matrix.mode }}" ]; then
reportsPath="$reportsPath-${{ matrix.mode }}"
fi
mkdir -p bundles $reportsPath
find bundles -path '*/root/*overlay2' -prune -o -type f \( -name '*-report.json' -o -name '*.log' -o -name '*.out' -o -name '*.prof' -o -name '*-report.xml' \) -print | xargs sudo tar -czf /tmp/reports.tar.gz
tar -xzf /tmp/reports.tar.gz -C $reportsPath
sudo chown -R $(id -u):$(id -g) $reportsPath
tree -nh $reportsPath
-
name: Send to Codecov
uses: codecov/codecov-action@v3
with:
directory: ./bundles/test-integration
env_vars: RUNNER_OS
flags: integration,${{ matrix.mode }}
-
name: Test daemon logs
if: always()
run: |
cat bundles/test-integration/docker.log
-
name: Upload reports
if: always()
uses: actions/upload-artifact@v3
with:
name: integration-reports
path: /tmp/reports/*
integration-report:
runs-on: ubuntu-20.04
if: always()
needs:
- integration
steps:
-
name: Set up Go
uses: actions/setup-go@v3
with:
go-version: ${{ env.GO_VERSION }}
-
name: Download reports
uses: actions/download-artifact@v3
with:
name: integration-reports
path: /tmp/reports
-
name: Install teststat
run: |
go install github.com/vearutop/teststat@${{ env.TESTSTAT_VERSION }}
-
name: Create summary
run: |
teststat -markdown $(find /tmp/reports -type f -name '*.json' -print0 | xargs -0) >> $GITHUB_STEP_SUMMARY
integration-cli-prepare:
runs-on: ubuntu-20.04
needs:
- validate-dco
outputs:
matrix: ${{ steps.tests.outputs.matrix }}
steps:
-
name: Checkout
uses: actions/checkout@v3
-
name: Set up Go
uses: actions/setup-go@v3
with:
go-version: ${{ env.GO_VERSION }}
-
name: Install gotestlist
run:
go install github.com/crazy-max/gotestlist/cmd/gotestlist@${{ env.GOTESTLIST_VERSION }}
-
name: Create matrix
id: platforms
id: tests
working-directory: ./integration-cli
run: |
matrix="$(docker buildx bake binary-smoketest --print | jq -cr '.target."binary-smoketest".platforms')"
# Distribute integration-cli tests for the matrix in integration-test job.
# Also prepend ./... to the matrix. This is a special case to run "Test integration" step exclusively.
matrix="$(gotestlist -d ${{ env.ITG_CLI_MATRIX_SIZE }} ./...)"
matrix="$(echo "$matrix" | jq -c '. |= ["./..."] + .')"
echo "matrix=$matrix" >> $GITHUB_OUTPUT
-
name: Show matrix
run: |
echo ${{ steps.platforms.outputs.matrix }}
echo ${{ steps.tests.outputs.matrix }}
smoke:
runs-on: ubuntu-24.04
timeout-minutes: 20 # guardrails timeout for the whole job
integration-cli:
runs-on: ubuntu-20.04
timeout-minutes: 120
needs:
- smoke-prepare
- build-dev
- integration-cli-prepare
strategy:
fail-fast: false
matrix:
platform: ${{ fromJson(needs.smoke-prepare.outputs.matrix) }}
test: ${{ fromJson(needs.integration-cli-prepare.outputs.matrix) }}
steps:
-
name: Prepare
run: |
platform=${{ matrix.platform }}
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
name: Checkout
uses: actions/checkout@v3
-
name: Set up QEMU
uses: docker/setup-qemu-action@v3
name: Set up runner
uses: ./.github/actions/setup-runner
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
uses: docker/setup-buildx-action@v2
-
name: Build dev image
uses: docker/bake-action@v2
with:
version: ${{ env.SETUP_BUILDX_VERSION }}
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
buildkitd-flags: --debug
targets: dev
set: |
dev.cache-from=type=gha,scope=dev
-
name: Test
uses: docker/bake-action@v6
run: |
make -o build test-integration
env:
TEST_SKIP_INTEGRATION: 1
TESTCOVERAGE: 1
TESTFLAGS: "-test.run (${{ matrix.test }})/"
-
name: Prepare reports
if: always()
run: |
reportsPath=/tmp/reports/$(echo -n "${{ matrix.test }}" | sha256sum | cut -d " " -f 1)
mkdir -p bundles $reportsPath
echo "${{ matrix.test }}" | tr -s '|' '\n' | tee -a "$reportsPath/tests.txt"
find bundles -path '*/root/*overlay2' -prune -o -type f \( -name '*-report.json' -o -name '*.log' -o -name '*.out' -o -name '*.prof' -o -name '*-report.xml' \) -print | xargs sudo tar -czf /tmp/reports.tar.gz
tar -xzf /tmp/reports.tar.gz -C $reportsPath
sudo chown -R $(id -u):$(id -g) $reportsPath
tree -nh $reportsPath
-
name: Send to Codecov
uses: codecov/codecov-action@v3
with:
targets: binary-smoketest
set: |
*.platform=${{ matrix.platform }}
directory: ./bundles/test-integration
env_vars: RUNNER_OS
flags: integration-cli
-
name: Test daemon logs
if: always()
run: |
cat bundles/test-integration/docker.log
-
name: Upload reports
if: always()
uses: actions/upload-artifact@v3
with:
name: integration-cli-reports
path: /tmp/reports/*
integration-cli-report:
runs-on: ubuntu-20.04
if: always()
needs:
- integration-cli
steps:
-
name: Set up Go
uses: actions/setup-go@v3
with:
go-version: ${{ env.GO_VERSION }}
-
name: Download reports
uses: actions/download-artifact@v3
with:
name: integration-cli-reports
path: /tmp/reports
-
name: Install teststat
run: |
go install github.com/vearutop/teststat@${{ env.TESTSTAT_VERSION }}
-
name: Create summary
run: |
teststat -markdown $(find /tmp/reports -type f -name '*.json' -print0 | xargs -0) >> $GITHUB_STEP_SUMMARY

View File

@@ -1,88 +0,0 @@
name: validate-pr
# Default to 'contents: read', which grants actions to read commits.
#
# If any permission is set, any permission not included in the list is
# implicitly set to "none".
#
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
permissions:
contents: read
on:
pull_request:
types: [opened, edited, labeled, unlabeled, synchronize]
jobs:
check-area-label:
runs-on: ubuntu-24.04
timeout-minutes: 120 # guardrails timeout for the whole job
steps:
- name: Missing `area/` label
if: contains(join(github.event.pull_request.labels.*.name, ','), 'impact/') && !contains(join(github.event.pull_request.labels.*.name, ','), 'area/')
run: |
echo "::error::Every PR with an 'impact/*' label should also have an 'area/*' label"
exit 1
- name: OK
run: exit 0
check-changelog:
runs-on: ubuntu-24.04
timeout-minutes: 120 # guardrails timeout for the whole job
env:
HAS_IMPACT_LABEL: ${{ contains(join(github.event.pull_request.labels.*.name, ','), 'impact/') }}
PR_BODY: |
${{ github.event.pull_request.body }}
steps:
- name: Check changelog description
run: |
# Extract the `markdown changelog` note code block
block=$(echo -n "$PR_BODY" | tr -d '\r' | awk '/^```markdown changelog$/{flag=1;next}/^```$/{flag=0}flag')
# Strip empty lines
desc=$(echo "$block" | awk NF)
if [ "$HAS_IMPACT_LABEL" = "true" ]; then
if [ -z "$desc" ]; then
echo "::error::Changelog section is empty. Please provide a description for the changelog."
exit 1
fi
len=$(echo -n "$desc" | wc -c)
if [[ $len -le 6 ]]; then
echo "::error::Description looks too short: $desc"
exit 1
fi
else
if [ -n "$desc" ]; then
echo "::error::PR has a changelog description, but no changelog label"
echo "::error::Please add the relevant 'impact/' label to the PR or remove the changelog description"
exit 1
fi
fi
echo "This PR will be included in the release notes with the following note:"
echo "$desc"
check-pr-branch:
runs-on: ubuntu-24.04
timeout-minutes: 120 # guardrails timeout for the whole job
env:
PR_TITLE: ${{ github.event.pull_request.title }}
steps:
# Backports or PR that target a release branch directly should mention the target branch in the title, for example:
# [X.Y backport] Some change that needs backporting to X.Y
# [X.Y] Change directly targeting the X.Y branch
- name: Check release branch
id: title_branch
run: |
# get the intended major version prefix ("[27.1 backport]" -> "27.") from the PR title.
[[ "$PR_TITLE" =~ ^\[([0-9]*\.)[^]]*\] ]] && branch="${BASH_REMATCH[1]}"
# get major version prefix from the release branch ("27.x -> "27.")
[[ "$GITHUB_BASE_REF" =~ ^([0-9]*\.) ]] && target_branch="${BASH_REMATCH[1]}" || target_branch="$GITHUB_BASE_REF"
if [[ "$target_branch" != "$branch" ]] && ! [[ "$GITHUB_BASE_REF" == "master" && "$branch" == "" ]]; then
echo "::error::PR is opened against the $GITHUB_BASE_REF branch, but its title suggests otherwise."
exit 1
fi

View File

@@ -1,14 +1,5 @@
name: windows-2019
# Default to 'contents: read', which grants actions to read commits.
#
# If any permission is set, any permission not included in the list is
# implicitly set to "none".
#
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
permissions:
contents: read
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
@@ -22,21 +13,10 @@ jobs:
validate-dco:
uses: ./.github/workflows/.dco.yml
test-prepare:
uses: ./.github/workflows/.test-prepare.yml
needs:
- validate-dco
run:
needs:
- test-prepare
- validate-dco
uses: ./.github/workflows/.windows.yml
secrets: inherit
strategy:
fail-fast: false
matrix:
storage: ${{ fromJson(needs.test-prepare.outputs.matrix) }}
with:
os: windows-2019
storage: ${{ matrix.storage }}
send_coverage: false

View File

@@ -1,14 +1,5 @@
name: windows-2022
# Default to 'contents: read', which grants actions to read commits.
#
# If any permission is set, any permission not included in the list is
# implicitly set to "none".
#
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
permissions:
contents: read
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
@@ -19,28 +10,16 @@ on:
branches:
- 'master'
- '[0-9]+.[0-9]+'
- '[0-9]+.x'
pull_request:
jobs:
validate-dco:
uses: ./.github/workflows/.dco.yml
test-prepare:
uses: ./.github/workflows/.test-prepare.yml
needs:
- validate-dco
run:
needs:
- test-prepare
- validate-dco
uses: ./.github/workflows/.windows.yml
secrets: inherit
strategy:
fail-fast: false
matrix:
storage: ${{ fromJson(needs.test-prepare.outputs.matrix) }}
with:
os: windows-2022
storage: ${{ matrix.storage }}
send_coverage: true

9
.gitignore vendored
View File

@@ -13,10 +13,13 @@ thumbs.db
.bashrc
.editorconfig
# top-level go.mod is not meant to be checked in
/go.mod
# build artifacts
/bundles/
/cli/winresources/dockerd/*.syso
/cli/winresources/dockerd/winres.json
bundles/
cli/winresources/*/*.syso
cli/winresources/*/winres.json
contrib/builder/rpm/*/changelog
# ci artifacts
*.exe

View File

@@ -1,263 +0,0 @@
version: "2"
run:
# prevent golangci-lint from deducting the go version to lint for through go.mod,
# which causes it to fallback to go1.17 semantics.
go: "1.24.3"
concurrency: 2
# Only supported with go modules enabled (build flag -mod=vendor only valid when using modules)
# modules-download-mode: vendor
formatters:
enable:
- gofmt
- goimports
linters:
enable:
- asasalint # Detects "[]any" used as argument for variadic "func(...any)".
- copyloopvar # Detects places where loop variables are copied.
- depguard
- dogsled # Detects assignments with too many blank identifiers.
- dupword # Detects duplicate words.
- durationcheck # Detect cases where two time.Duration values are being multiplied in possibly erroneous ways.
- errchkjson # Detects unsupported types passed to json encoding functions and reports if checks for the returned error can be omitted.
- exhaustive # Detects missing options in enum switch statements.
- exptostd # Detects functions from golang.org/x/exp/ that can be replaced by std functions.
- fatcontext # Detects nested contexts in loops and function literals.
- forbidigo
- gocheckcompilerdirectives # Detects invalid go compiler directive comments (//go:).
- gosec # Detects security problems.
- govet
- iface # Detects incorrect use of interfaces. Currently only used for "identical" interfaces in the same package.
- importas
- ineffassign
- makezero # Finds slice declarations with non-zero initial length.
- mirror # Detects wrong mirror patterns of bytes/strings usage.
- misspell # Detects commonly misspelled English words in comments.
- nakedret # Detects uses of naked returns.
- nilnesserr # Detects returning nil errors. It combines the features of nilness and nilerr,
- nosprintfhostport # Detects misuse of Sprintf to construct a host with port in a URL.
- reassign # Detects reassigning a top-level variable in another package.
- revive # Metalinter; drop-in replacement for golint.
- spancheck # Detects mistakes with OpenTelemetry/Census spans.
- staticcheck
- unconvert # Detects unnecessary type conversions.
- unused
- usestdlibvars # Detects the possibility to use variables/constants from the Go standard library.
- wastedassign # Detects wasted assignment statements.
disable:
- errcheck
- spancheck # FIXME
settings:
depguard:
rules:
main:
deny:
- pkg: "github.com/stretchr/testify/assert"
desc: Use "gotest.tools/v3/assert" instead
- pkg: "github.com/stretchr/testify/require"
desc: Use "gotest.tools/v3/assert" instead
- pkg: "github.com/stretchr/testify/suite"
desc: Do not use
- pkg: "github.com/containerd/containerd/pkg/userns"
desc: Use github.com/moby/sys/userns instead.
- pkg: "github.com/tonistiigi/fsutil"
desc: The fsutil module does not have a stable API, so we should not have a direct dependency unless necessary.
dupword:
ignore:
- "true" # some tests use this as expected output
- "false" # some tests use this as expected output
- "root" # for tests using "ls" output with files owned by "root:root"
exhaustive:
# Program elements to check for exhaustiveness.
# Default: [ switch ]
check:
- switch
# - map # TODO(thaJeztah): also enable for maps
# Presence of "default" case in switch statements satisfies exhaustiveness,
# even if all enum members are not listed.
# Default: false
#
# TODO(thaJeztah): consider not allowing this to catch new values being added (and falling through to "default")
default-signifies-exhaustive: true
forbidigo:
forbid:
- pkg: ^sync/atomic$
pattern: ^atomic\.(Add|CompareAndSwap|Load|Store|Swap).
msg: Go 1.19 atomic types should be used instead.
- pkg: ^regexp$
pattern: ^regexp\.MustCompile
msg: Use internal/lazyregexp.New instead.
- pkg: github.com/vishvananda/netlink$
pattern: ^netlink\.(Handle\.)?(AddrList|BridgeVlanList|ChainList|ClassList|ConntrackTableList|ConntrackDeleteFilter$|ConntrackDeleteFilters|DevLinkGetDeviceList|DevLinkGetAllPortList|DevlinkGetDeviceParams|FilterList|FouList|GenlFamilyList|GTPPDPList|LinkByName|LinkByAlias|LinkList|LinkSubscribeWithOptions|NeighList$|NeighProxyList|NeighListExecute|NeighSubscribeWithOptions|LinkGetProtinfo|QdiscList|RdmaLinkList|RdmaLinkByName|RdmaLinkDel|RouteList|RouteListFilteredIter|RuleListFiltered$|RouteSubscribeWithOptions|RuleList$|RuleListFiltered|SocketGet|SocketDiagTCPInfo|SocketDiagTCP|SocketDiagUDPInfo|SocketDiagUDP|UnixSocketDiagInfo|UnixSocketDiag|VDPAGetDevConfigList|VDPAGetDevList|VDPAGetMGMTDevList|XfrmPolicyList|XfrmStateList)
msg: Use internal nlwrap package for EINTR handling.
- pkg: github.com/docker/docker/internal/nlwrap$
pattern: ^nlwrap.Handle.(BridgeVlanList|ChainList|ClassList|ConntrackDeleteFilter$|DevLinkGetDeviceList|DevLinkGetAllPortList|DevlinkGetDeviceParams|FilterList|FouList|GenlFamilyList|GTPPDPList|LinkByAlias|LinkSubscribeWithOptions|NeighList$|NeighProxyList|NeighListExecute|NeighSubscribeWithOptions|LinkGetProtinfo|QdiscList|RdmaLinkList|RdmaLinkByName|RdmaLinkDel|RouteListFilteredIter|RuleListFiltered$|RouteSubscribeWithOptions|RuleList$|RuleListFiltered|SocketGet|SocketDiagTCPInfo|SocketDiagTCP|SocketDiagUDPInfo|SocketDiagUDP|UnixSocketDiagInfo|UnixSocketDiag|VDPAGetDevConfigList|VDPAGetDevList|VDPAGetMGMTDevList)
msg: Add a wrapper to nlwrap.Handle for EINTR handling and update the list in .golangci.yml.
analyze-types: true
gosec:
excludes:
- G104 # G104: Errors unhandled; (TODO: reduce unhandled errors, or explicitly ignore)
- G115 # G115: integer overflow conversion; (TODO: verify these: https://github.com/moby/moby/issues/48358)
- G204 # G204: Subprocess launched with variable; too many false positives.
- G301 # G301: Expect directory permissions to be 0750 or less (also EXC0009); too restrictive
- G302 # G302: Expect file permissions to be 0600 or less (also EXC0009); too restrictive
- G304 # G304: Potential file inclusion via variable.
- G306 # G306: Expect WriteFile permissions to be 0600 or less (too restrictive; also flags "0o644" permissions)
- G307 # G307: Deferring unsafe method "*os.File" on type "Close" (also EXC0008); (TODO: evaluate these and fix where needed: G307: Deferring unsafe method "*os.File" on type "Close")
- G504 # G504: Blocklisted import net/http/cgi: Go versions < 1.6.3 are vulnerable to Httpoxy attack: (CVE-2016-5386); (only affects go < 1.6.3)
govet:
enable-all: true
disable:
- fieldalignment # TODO: evaluate which ones should be updated.
importas:
# Do not allow unaliased imports of aliased packages.
no-unaliased: true
alias:
# Enforce alias to prevent it accidentally being used instead of our
# own errdefs package (or vice-versa).
- pkg: github.com/containerd/errdefs
alias: cerrdefs
- pkg: github.com/containerd/containerd/images
alias: c8dimages
- pkg: github.com/opencontainers/image-spec/specs-go/v1
alias: ocispec
- pkg: go.etcd.io/bbolt
alias: bolt
# Enforce that gotest.tools/v3/assert/cmp is always aliased as "is"
- pkg: gotest.tools/v3/assert/cmp
alias: is
nakedret:
# Disallow naked returns if func has more lines of code than this setting.
# Default: 30
max-func-lines: 0
revive:
rules:
# FIXME make sure all packages have a description. Currently, there's many packages without.
- name: package-comments
disabled: true
staticcheck:
checks:
- all
- -QF1008 # Omit embedded fields from selector expression; https://staticcheck.dev/docs/checks/#QF1008
- -ST1000 # Incorrect or missing package comment; https://staticcheck.dev/docs/checks/#ST1000
- -ST1003 # Poorly chosen identifier; https://staticcheck.dev/docs/checks/#ST1003
- -ST1005 # Incorrectly formatted error string; https://staticcheck.dev/docs/checks/#ST1005
spancheck:
# Default: ["end"]
checks:
- end # check that `span.End()` is called
- record-error # check that `span.RecordError(err)` is called when an error is returned
- set-status # check that `span.SetStatus(codes.Error, msg)` is called when an error is returned
usestdlibvars:
# Suggest the use of http.MethodXX.
http-method: true
# Suggest the use of http.StatusXX.
http-status-code: true
exclusions:
paths:
- volume/drivers/proxy.go # TODO: this is a generated file but with an invalid header, see https://github.com/moby/moby/pull/46274
rules:
# We prefer to use an "linters.exclusions.rules" so that new "default" exclusions are not
# automatically inherited. We can decide whether or not to follow upstream
# defaults when updating golang-ci-lint versions.
# Unfortunately, this means we have to copy the whole exclusion pattern, as
# (unlike the "include" option), the "exclude" option does not take exclusion
# ID's.
#
# These exclusion patterns are copied from the default excludes at:
# https://github.com/golangci/golangci-lint/blob/v1.61.0/pkg/config/issues.go#L11-L104
#
# The default list of exclusions can be found at:
# https://golangci-lint.run/usage/false-positives/#default-exclusions
# Exclude some linters from running on tests files.
- path: _test\.go
linters:
- errcheck
- text: "G404: Use of weak random number generator"
path: _test\.go
linters:
- gosec
# Suppress golint complaining about generated types in api/types/
- text: "type name will be used as (container|volume)\\.(Container|Volume).* by other packages, and that stutters; consider calling this"
path: "api/types/(volume|container)/"
linters:
- revive
# FIXME: ignoring unused assigns to ctx for now; too many hits in libnetwork/xxx functions that setup traces
- text: "assigned to ctx, but never used afterwards"
linters:
- wastedassign
- text: "ineffectual assignment to ctx"
source: "ctx[, ].*=.*\\(ctx[,)]"
linters:
- ineffassign
- text: "SA4006: this value of ctx is never used"
source: "ctx[, ].*=.*\\(ctx[,)]"
linters:
- staticcheck
# FIXME(thaJeztah): ignoring these transitional utilities until BuildKit is vendored with https://github.com/moby/moby/pull/49743
- text: "SA1019: idtools\\.(ToUserIdentityMapping|FromUserIdentityMapping) is deprecated"
linters:
- staticcheck
# Ignore "nested context in function literal (fatcontext)" as we intentionally set up tracing on a base-context for tests.
# FIXME(thaJeztah): see if there's a more iodiomatic way to do this.
- text: 'nested context in function literal'
path: '((main|check)_(linux_|)test\.go)|testutil/helpers\.go'
linters:
- fatcontext
- text: '^shadow: declaration of "(ctx|err|ok)" shadows declaration'
linters:
- govet
- text: '^shadow: declaration of "(out)" shadows declaration'
path: _test\.go
linters:
- govet
- text: 'use of `regexp.MustCompile` forbidden'
path: _test\.go
linters:
- forbidigo
- text: 'use of `regexp.MustCompile` forbidden'
path: "internal/lazyregexp"
linters:
- forbidigo
- text: 'use of `regexp.MustCompile` forbidden'
path: "libnetwork/cmd/networkdb-test/dbclient"
linters:
- forbidigo
# Log a warning if an exclusion rule is unused.
# Default: false
warn-unused: true
issues:
# Maximum issues count per one linter. Set to 0 to disable. Default is 50.
max-issues-per-linter: 0
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
max-same-issues: 0

View File

@@ -7,7 +7,6 @@
#
# For an explanation of this file format, consult gitmailmap(5).
Aaron Yoshitake <airandfingers@gmail.com>
Aaron L. Xu <liker.xu@foxmail.com>
Aaron L. Xu <liker.xu@foxmail.com> <likexu@harmonycloud.cn>
Aaron Lehmann <alehmann@netflix.com>
@@ -31,10 +30,7 @@ Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp>
Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp> <suda.akihiro@lab.ntt.co.jp>
Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp> <suda.kyoto@gmail.com>
Akshay Moghe <akshay.moghe@gmail.com>
Alano Terblanche <alano.terblanche@docker.com>
Alano Terblanche <alano.terblanche@docker.com> <18033717+Benehiko@users.noreply.github.com>
Albin Kerouanton <albinker@gmail.com>
Albin Kerouanton <albinker@gmail.com> <557933+akerouanton@users.noreply.github.com>
Albin Kerouanton <albinker@gmail.com> <albin@akerouanton.name>
Aleksa Sarai <asarai@suse.de>
Aleksa Sarai <asarai@suse.de> <asarai@suse.com>
@@ -62,8 +58,6 @@ Allen Sun <allensun.shl@alibaba-inc.com> <allen.sun@daocloud.io>
Allen Sun <allensun.shl@alibaba-inc.com> <shlallen1990@gmail.com>
Anca Iordache <anca.iordache@docker.com>
Andrea Denisse Gómez <crypto.andrea@protonmail.ch>
Andrew Baxter <423qpsxzhh8k3h@s.rendaw.me>
Andrew Baxter <423qpsxzhh8k3h@s.rendaw.me> andrew <>
Andrew Kim <taeyeonkim90@gmail.com>
Andrew Kim <taeyeonkim90@gmail.com> <akim01@fortinet.com>
Andrew Weiss <andrew.weiss@docker.com> <andrew.weiss@microsoft.com>
@@ -94,9 +88,6 @@ Arnaud Rebillout <arnaud.rebillout@collabora.com>
Arnaud Rebillout <arnaud.rebillout@collabora.com> <elboulangero@gmail.com>
Arthur Gautier <baloo@gandi.net> <superbaloo+registrations.github@superbaloo.net>
Artur Meyster <arthurfbi@yahoo.com>
Austin Vazquez <austin.vazquez.dev@gmail.com>
Austin Vazquez <austin.vazquez.dev@gmail.com> <55906459+austinvazquez@users.noreply.github.com>
Austin Vazquez <austin.vazquez.dev@gmail.com> <macedonv@amazon.com>
Avi Miller <avi.miller@oracle.com> <avi.miller@gmail.com>
Ben Bonnefoy <frenchben@docker.com>
Ben Golub <ben.golub@dotcloud.com>
@@ -112,9 +103,6 @@ Bily Zhang <xcoder@tenxcloud.com>
Bin Liu <liubin0329@gmail.com>
Bin Liu <liubin0329@gmail.com> <liubin0329@users.noreply.github.com>
Bingshen Wang <bingshen.wbs@alibaba-inc.com>
Bjorn Neergaard <bjorn@neersighted.com>
Bjorn Neergaard <bjorn@neersighted.com> <bjorn.neergaard@docker.com>
Bjorn Neergaard <bjorn@neersighted.com> <bneergaard@mirantis.com>
Boaz Shuster <ripcurld.github@gmail.com>
Bojun Zhu <bojun.zhu@foxmail.com>
Boqin Qin <bobbqqin@gmail.com>
@@ -127,7 +115,6 @@ Brian Goff <cpuguy83@gmail.com> <bgoff@cpuguy83-mbp.home>
Brian Goff <cpuguy83@gmail.com> <bgoff@cpuguy83-mbp.local>
Brian Goff <cpuguy83@gmail.com> <brian.goff@microsoft.com>
Brian Goff <cpuguy83@gmail.com> <cpuguy@hey.com>
Calvin Liu <flycalvin@qq.com>
Cameron Sparr <gh@sparr.email>
Carlos de Paula <me@carlosedp.com>
Chander Govindarajan <chandergovind@gmail.com>
@@ -139,8 +126,6 @@ Chen Mingjie <chenmingjie0828@163.com>
Chen Qiu <cheney-90@hotmail.com>
Chen Qiu <cheney-90@hotmail.com> <21321229@zju.edu.cn>
Chengfei Shang <cfshang@alauda.io>
Chengyu Zhu <hudson@cyzhu.com>
Chentianze <cmoman@126.com>
Chris Dias <cdias@microsoft.com>
Chris McKinnel <chris.mckinnel@tangentlabs.co.uk>
Chris Price <cprice@mirantis.com>
@@ -149,8 +134,6 @@ Chris Telfer <ctelfer@docker.com>
Chris Telfer <ctelfer@docker.com> <ctelfer@users.noreply.github.com>
Christopher Biscardi <biscarch@sketcht.com>
Christopher Latham <sudosurootdev@gmail.com>
Christopher Petito <chrisjpetito@gmail.com>
Christopher Petito <chrisjpetito@gmail.com> <47751006+krissetto@users.noreply.github.com>
Christy Norman <christy@linux.vnet.ibm.com>
Chun Chen <ramichen@tencent.com> <chenchun.feed@gmail.com>
Corbin Coleman <corbin.coleman@docker.com>
@@ -158,8 +141,6 @@ Cristian Ariza <dev@cristianrz.com>
Cristian Staretu <cristian.staretu@gmail.com>
Cristian Staretu <cristian.staretu@gmail.com> <unclejack@users.noreply.github.com>
Cristian Staretu <cristian.staretu@gmail.com> <unclejacksons@gmail.com>
cui fliter <imcusg@gmail.com>
cui fliter <imcusg@gmail.com> cuishuang <imcusg@gmail.com>
CUI Wei <ghostplant@qq.com> cuiwei13 <cuiwei13@pku.edu.cn>
Daehyeok Mun <daehyeok@gmail.com>
Daehyeok Mun <daehyeok@gmail.com> <daehyeok@daehyeok-ui-MacBook-Air.local>
@@ -186,8 +167,6 @@ Dattatraya Kumbhar <dattatraya.kumbhar@gslab.com>
Dave Goodchild <buddhamagnet@gmail.com>
Dave Henderson <dhenderson@gmail.com> <Dave.Henderson@ca.ibm.com>
Dave Tucker <dt@docker.com> <dave@dtucker.co.uk>
David Dooling <dooling@gmail.com>
David Dooling <dooling@gmail.com> <david.dooling@docker.com>
David M. Karr <davidmichaelkarr@gmail.com>
David Sheets <dsheets@docker.com> <sheets@alum.mit.edu>
David Sissitka <me@dsissitka.com>
@@ -234,8 +213,6 @@ Felix Hupfeld <felix@quobyte.com> <quofelix@users.noreply.github.com>
Felix Ruess <felix.ruess@gmail.com> <felix.ruess@roboception.de>
Feng Yan <fy2462@gmail.com>
Fengtu Wang <wangfengtu@huawei.com> <wangfengtu@huawei.com>
Filipe Pina <hzlu1ot0@duck.com>
Filipe Pina <hzlu1ot0@duck.com> <636320+fopina@users.noreply.github.com>
Francisco Carriedo <fcarriedo@gmail.com>
Frank Rosquin <frank.rosquin+github@gmail.com> <frank.rosquin@gmail.com>
Frank Yang <yyb196@gmail.com>
@@ -287,7 +264,6 @@ Hollie Teal <hollie@docker.com> <hollie.teal@docker.com>
Hollie Teal <hollie@docker.com> <hollietealok@users.noreply.github.com>
hsinko <21551195@zju.edu.cn> <hsinko@users.noreply.github.com>
Hu Keping <hukeping@huawei.com>
Huajin Tong <fliterdashen@gmail.com>
Hui Kang <hkang.sunysb@gmail.com>
Hui Kang <hkang.sunysb@gmail.com> <kangh@us.ibm.com>
Huu Nguyen <huu@prismskylabs.com> <whoshuu@gmail.com>
@@ -354,8 +330,6 @@ John Howard <github@lowenna.com> <john.howard@microsoft.com>
John Howard <github@lowenna.com> <john@lowenna.com>
John Stephens <johnstep@docker.com> <johnstep@users.noreply.github.com>
Jon Surrell <jon.surrell@gmail.com> <jon.surrell@automattic.com>
Jonathan A. Sternberg <jonathansternberg@gmail.com>
Jonathan A. Sternberg <jonathansternberg@gmail.com> <jonathan.sternberg@docker.com>
Jonathan Choy <jonathan.j.choy@gmail.com>
Jonathan Choy <jonathan.j.choy@gmail.com> <oni@tetsujinlabs.com>
Jordan Arentsen <blissdev@gmail.com>
@@ -395,9 +369,7 @@ Ken Cochrane <kencochrane@gmail.com> <KenCochrane@gmail.com>
Ken Herner <kherner@progress.com> <chosenken@gmail.com>
Ken Reese <krrgithub@gmail.com>
Kenfe-Mickaël Laventure <mickael.laventure@gmail.com>
Kevin Alvarez <github@crazymax.dev>
Kevin Alvarez <github@crazymax.dev> <1951866+crazy-max@users.noreply.github.com>
Kevin Alvarez <github@crazymax.dev> <crazy-max@users.noreply.github.com>
Kevin Alvarez <crazy-max@users.noreply.github.com>
Kevin Feyrer <kevin.feyrer@btinternet.com> <kevinfeyrer@users.noreply.github.com>
Kevin Kern <kaiwentan@harmonycloud.cn>
Kevin Meredith <kevin.m.meredith@gmail.com>
@@ -497,21 +469,15 @@ Mikael Davranche <mikael.davranche@corp.ovh.com>
Mikael Davranche <mikael.davranche@corp.ovh.com> <mikael.davranche@corp.ovh.net>
Mike Casas <mkcsas0@gmail.com> <mikecasas@users.noreply.github.com>
Mike Goelzer <mike.goelzer@docker.com> <mgoelzer@docker.com>
Milas Bowman <devnull@milas.dev>
Milas Bowman <devnull@milas.dev> <milas.bowman@docker.com>
Milas Bowman <devnull@milas.dev> <milasb@gmail.com>
Milind Chawre <milindchawre@gmail.com>
Misty Stanley-Jones <misty@docker.com> <misty@apache.org>
Mohammad Banikazemi <MBanikazemi@gmail.com>
Mohammad Banikazemi <MBanikazemi@gmail.com> <mb@us.ibm.com>
Mohd Sadiq <mohdsadiq058@gmail.com> <42430865+msadiq058@users.noreply.github.com>
Mohd Sadiq <mohdsadiq058@gmail.com> <mohdsadiq058@gmail.com>
Mohit Soni <mosoni@ebay.com> <mohitsoni1989@gmail.com>
Moorthy RS <rsmoorthy@gmail.com> <rsmoorthy@users.noreply.github.com>
Moysés Borges <moysesb@gmail.com>
Moysés Borges <moysesb@gmail.com> <moyses.furtado@wplex.com.br>
mrfly <mr.wrfly@gmail.com> <wrfly@users.noreply.github.com>
Myeongjoon Kim <kimmj8409@gmail.com>
Nace Oroz <orkica@gmail.com>
Natasha Jarus <linuxmercedes@gmail.com>
Nathan LeClaire <nathan.leclaire@docker.com> <nathan.leclaire@gmail.com>
@@ -531,11 +497,8 @@ Olli Janatuinen <olli.janatuinen@gmail.com> <olljanat@users.noreply.github.com>
Onur Filiz <onur.filiz@microsoft.com>
Onur Filiz <onur.filiz@microsoft.com> <ofiliz@users.noreply.github.com>
Ouyang Liduo <oyld0210@163.com>
Patrick St. laurent <patrick@saint-laurent.us>
Patrick Stapleton <github@gdi2290.com>
Paul Liljenberg <liljenberg.paul@gmail.com> <letters@paulnotcom.se>
Paweł Gronowski <pawel.gronowski@docker.com>
Paweł Gronowski <pawel.gronowski@docker.com> <me@woland.xyz>
Pavel Tikhomirov <ptikhomirov@virtuozzo.com> <ptikhomirov@parallels.com>
Pawel Konczalski <mail@konczalski.de>
Peter Choi <phkchoi89@gmail.com> <reikani@Peters-MacBook-Pro.local>
@@ -557,21 +520,16 @@ Qin TianHuan <tianhuan@bingotree.cn>
Ray Tsang <rayt@google.com> <saturnism@users.noreply.github.com>
Renaud Gaubert <rgaubert@nvidia.com> <renaud.gaubert@gmail.com>
Richard Scothern <richard.scothern@gmail.com>
Rob Murray <rob.murray@docker.com>
Rob Murray <rob.murray@docker.com> <148866618+robmry@users.noreply.github.com>
Robert Terhaar <rterhaar@atlanticdynamic.com> <robbyt@users.noreply.github.com>
Roberto G. Hashioka <roberto.hashioka@docker.com> <roberto_hashioka@hotmail.com>
Roberto Muñoz Fernández <robertomf@gmail.com> <roberto.munoz.fernandez.contractor@bbva.com>
Robin Thoni <robin@rthoni.com>
Rodrigo Campos <rodrigoca@microsoft.com>
Rodrigo Campos <rodrigoca@microsoft.com> <rodrigo@kinvolk.io>
Roman Dudin <katrmr@gmail.com> <decadent@users.noreply.github.com>
Rong Zhang <rongzhang@alauda.io>
Rongxiang Song <tinysong1226@gmail.com>
Rony Weng <ronyweng@synology.com>
Ross Boucher <rboucher@gmail.com>
Rui Cao <ruicao@alauda.io>
Rui JingAn <quiterace@gmail.com>
Runshen Zhu <runshen.zhu@gmail.com>
Ryan Stelly <ryan.stelly@live.com>
Ryoga Saito <contact@proelbtn.com>
@@ -590,9 +548,7 @@ Sebastiaan van Stijn <github@gone.nl>
Sebastiaan van Stijn <github@gone.nl> <moby@example.com>
Sebastiaan van Stijn <github@gone.nl> <sebastiaan@ws-key-sebas3.dpi1.dpi>
Sebastiaan van Stijn <github@gone.nl> <thaJeztah@users.noreply.github.com>
Sebastian Thomschke <sebthom@users.noreply.github.com>
Seongyeol Lim <seongyeol37@gmail.com>
Serhii Nakon <serhii.n@thescimus.com>
Shaun Kaasten <shaunk@gmail.com>
Shawn Landden <shawn@churchofgit.com> <shawnlandden@gmail.com>
Shengbo Song <thomassong@tencent.com>
@@ -739,8 +695,6 @@ Xiaodong Liu <liuxiaodong@loongson.cn>
Xiaodong Zhang <a4012017@sina.com>
Xiaohua Ding <xiao_hua_ding@sina.cn>
Xiaoyu Zhang <zhang.xiaoyu33@zte.com.cn>
Xinfeng Liu <XinfengLiu@icloud.com>
Xinfeng Liu <XinfengLiu@icloud.com> <xinfeng.liu@gmail.com>
Xuecong Liao <satorulogic@gmail.com>
Yamasaki Masahide <masahide.y@gmail.com>
Yao Zaiyong <yaozaiyong@hotmail.com>

140
AUTHORS
View File

@@ -2,10 +2,7 @@
# This file lists all contributors to the repository.
# See hack/generate-authors.sh to make modifications.
17neverends <ionianrise@gmail.com>
7sunarni <710720732@qq.com>
Aanand Prasad <aanand.prasad@gmail.com>
Aarni Koskela <akx@iki.fi>
Aaron Davidson <aaron@databricks.com>
Aaron Feng <aaron.feng@gmail.com>
Aaron Hnatiw <aaron@griddio.com>
@@ -13,8 +10,6 @@ Aaron Huslage <huslage@gmail.com>
Aaron L. Xu <liker.xu@foxmail.com>
Aaron Lehmann <alehmann@netflix.com>
Aaron Welch <welch@packet.net>
Aaron Yoshitake <airandfingers@gmail.com>
Abdur Rehman <abdur_rehman@mentor.com>
Abel Muiño <amuino@gmail.com>
Abhijeet Kasurde <akasurde@redhat.com>
Abhinandan Prativadi <aprativadi@gmail.com>
@@ -28,16 +23,12 @@ Adam Avilla <aavilla@yp.com>
Adam Dobrawy <naczelnik@jawnosc.tk>
Adam Eijdenberg <adam.eijdenberg@gmail.com>
Adam Kunk <adam.kunk@tiaa-cref.org>
Adam Lamers <adam.lamers@wmsdev.pl>
Adam Miller <admiller@redhat.com>
Adam Mills <adam@armills.info>
Adam Pointer <adam.pointer@skybettingandgaming.com>
Adam Simon <adamsimon85100@gmail.com>
Adam Singer <financeCoding@gmail.com>
Adam Thornton <adam.thornton@maryville.com>
Adam Walz <adam@adamwalz.net>
Adam Williams <awilliams@mirantis.com>
AdamKorcz <adam@adalogics.com>
Addam Hardy <addam.hardy@gmail.com>
Aditi Rajagopal <arajagopal@us.ibm.com>
Aditya <aditya@netroy.in>
@@ -69,7 +60,6 @@ alambike <alambike@gmail.com>
Alan Hoyle <alan@alanhoyle.com>
Alan Scherger <flyinprogrammer@gmail.com>
Alan Thompson <cloojure@gmail.com>
Alano Terblanche <alano.terblanche@docker.com>
Albert Callarisa <shark234@gmail.com>
Albert Zhang <zhgwenming@gmail.com>
Albin Kerouanton <albinker@gmail.com>
@@ -91,7 +81,6 @@ Alex Goodman <wagoodman@gmail.com>
Alex Nordlund <alexander.nordlund@nasdaq.com>
Alex Olshansky <i@creagenics.com>
Alex Samorukov <samm@os2.kiev.ua>
Alex Stockinger <alex@atomicjar.com>
Alex Warhawk <ax.warhawk@gmail.com>
Alexander Artemenko <svetlyak.40wt@gmail.com>
Alexander Boyd <alex@opengroove.org>
@@ -125,7 +114,6 @@ amangoel <amangoel@gmail.com>
Amen Belayneh <amenbelayneh@gmail.com>
Ameya Gawde <agawde@mirantis.com>
Amir Goldstein <amir73il@aquasec.com>
AmirBuddy <badinlu.amirhossein@gmail.com>
Amit Bakshi <ambakshi@gmail.com>
Amit Krishnan <amit.krishnan@oracle.com>
Amit Shukla <amit.shukla@docker.com>
@@ -150,7 +138,6 @@ Andreas Tiefenthaler <at@an-ti.eu>
Andrei Gherzan <andrei@resin.io>
Andrei Ushakov <aushakov@netflix.com>
Andrei Vagin <avagin@gmail.com>
Andrew Baxter <423qpsxzhh8k3h@s.rendaw.me>
Andrew C. Bodine <acbodine@us.ibm.com>
Andrew Clay Shafer <andrewcshafer@gmail.com>
Andrew Duckworth <grillopress@gmail.com>
@@ -175,7 +162,6 @@ Andrey Kolomentsev <andrey.kolomentsev@docker.com>
Andrey Petrov <andrey.petrov@shazow.net>
Andrey Stolbovsky <andrey.stolbovsky@gmail.com>
André Martins <aanm90@gmail.com>
Andrés Maldonado <maldonado@codelutin.com>
Andy Chambers <anchambers@paypal.com>
andy diller <dillera@gmail.com>
Andy Goldstein <agoldste@redhat.com>
@@ -185,12 +171,10 @@ Andy Rothfusz <github@developersupport.net>
Andy Smith <github@anarkystic.com>
Andy Wilson <wilson.andrew.j+github@gmail.com>
Andy Zhang <andy.zhangtao@hotmail.com>
Aneesh Kulkarni <askthefactorcamera@gmail.com>
Anes Hasicic <anes.hasicic@gmail.com>
Angel Velazquez <angelcar@amazon.com>
Anil Belur <askb23@gmail.com>
Anil Madhavapeddy <anil@recoil.org>
Anirudh Aithal <aithal@amazon.com>
Ankit Jain <ajatkj@yahoo.co.in>
Ankush Agarwal <ankushagarwal11@gmail.com>
Anonmily <michelle@michelleliu.io>
@@ -205,7 +189,6 @@ Anton Löfgren <anton.lofgren@gmail.com>
Anton Nikitin <anton.k.nikitin@gmail.com>
Anton Polonskiy <anton.polonskiy@gmail.com>
Anton Tiurin <noxiouz@yandex.ru>
Antonio Aguilar <antonio@zoftko.com>
Antonio Murdaca <antonio.murdaca@gmail.com>
Antonis Kalipetis <akalipetis@gmail.com>
Antony Messerli <amesserl@rackspace.com>
@@ -215,7 +198,6 @@ Anusha Ragunathan <anusha.ragunathan@docker.com>
Anyu Wang <wanganyu@outlook.com>
apocas <petermdias@gmail.com>
Arash Deshmeh <adeshmeh@ca.ibm.com>
arcosx <arcosx@outlook.com>
ArikaChen <eaglesora@gmail.com>
Arko Dasgupta <arko@tetrate.io>
Arnaud Lefebvre <a.lefebvre@outlook.fr>
@@ -228,13 +210,13 @@ Artur Meyster <arthurfbi@yahoo.com>
Arun Gupta <arun.gupta@gmail.com>
Asad Saeeduddin <masaeedu@gmail.com>
Asbjørn Enge <asbjorn@hanafjedle.net>
Ashly Mathew <ashly.mathew@sap.com>
Austin Vazquez <austin.vazquez.dev@gmail.com>
Austin Vazquez <macedonv@amazon.com>
averagehuman <averagehuman@users.noreply.github.com>
Avi Das <andas222@gmail.com>
Avi Kivity <avi@scylladb.com>
Avi Miller <avi.miller@oracle.com>
Avi Vaid <avaid1996@gmail.com>
ayoshitake <airandfingers@gmail.com>
Azat Khuyiyakhmetov <shadow_uz@mail.ru>
Bao Yonglei <baoyonglei@huawei.com>
Bardia Keyoumarsi <bkeyouma@ucsc.edu>
@@ -251,7 +233,6 @@ Ben Golub <ben.golub@dotcloud.com>
Ben Gould <ben@bengould.co.uk>
Ben Hall <ben@benhall.me.uk>
Ben Langfeld <ben@langfeld.me>
Ben Lovy <ben@deciduously.com>
Ben Sargent <ben@brokendigits.com>
Ben Severson <BenSeverson@users.noreply.github.com>
Ben Toews <mastahyeti@gmail.com>
@@ -260,7 +241,6 @@ Benjamin Atkin <ben@benatkin.com>
Benjamin Baker <Benjamin.baker@utexas.edu>
Benjamin Boudreau <boudreau.benjamin@gmail.com>
Benjamin Böhmke <benjamin@boehmke.net>
Benjamin Wang <wachao@vmware.com>
Benjamin Yolken <yolken@stripe.com>
Benny Ng <benny.tpng@gmail.com>
Benoit Chesneau <bchesneau@gmail.com>
@@ -278,7 +258,7 @@ Billy Ridgway <wrridgwa@us.ibm.com>
Bily Zhang <xcoder@tenxcloud.com>
Bin Liu <liubin0329@gmail.com>
Bingshen Wang <bingshen.wbs@alibaba-inc.com>
Bjorn Neergaard <bjorn@neersighted.com>
Bjorn Neergaard <bneergaard@mirantis.com>
Blake Geno <blakegeno@gmail.com>
Boaz Shuster <ripcurld.github@gmail.com>
bobby abbott <ttobbaybbob@gmail.com>
@@ -295,8 +275,6 @@ Brandon Liu <bdon@bdon.org>
Brandon Philips <brandon.philips@coreos.com>
Brandon Rhodes <brandon@rhodesmill.org>
Brendan Dixon <brendand@microsoft.com>
Brendon Smith <bws@bws.bio>
Brennan Kinney <5098581+polarathene@users.noreply.github.com>
Brent Salisbury <brent.salisbury@docker.com>
Brett Higgins <brhiggins@arbor.net>
Brett Kochendorfer <brett.kochendorfer@gmail.com>
@@ -330,7 +308,6 @@ Burke Libbey <burke@libbey.me>
Byung Kang <byung.kang.ctr@amrdec.army.mil>
Caleb Spare <cespare@gmail.com>
Calen Pennington <cale@edx.org>
Calvin Liu <flycalvin@qq.com>
Cameron Boehmer <cameron.boehmer@gmail.com>
Cameron Sparr <gh@sparr.email>
Cameron Spear <cameronspear@gmail.com>
@@ -350,14 +327,12 @@ Casey Bisson <casey.bisson@joyent.com>
Catalin Pirvu <pirvu.catalin94@gmail.com>
Ce Gao <ce.gao@outlook.com>
Cedric Davies <cedricda@microsoft.com>
Cesar Talledo <cesar.talledo@docker.com>
Cezar Sa Espinola <cezarsa@gmail.com>
Chad Swenson <chadswen@gmail.com>
Chance Zibolski <chance.zibolski@gmail.com>
Chander Govindarajan <chandergovind@gmail.com>
Chanhun Jeong <keyolk@gmail.com>
Chao Wang <wangchao.fnst@cn.fujitsu.com>
Charity Kathure <ckathure@microsoft.com>
Charles Chan <charleswhchan@users.noreply.github.com>
Charles Hooper <charles.hooper@dotcloud.com>
Charles Law <claw@conduce.com>
@@ -379,14 +354,11 @@ Chen Qiu <cheney-90@hotmail.com>
Cheng-mean Liu <soccerl@microsoft.com>
Chengfei Shang <cfshang@alauda.io>
Chengguang Xu <cgxu519@gmx.com>
Chengyu Zhu <hudson@cyzhu.com>
Chentianze <cmoman@126.com>
Chenyang Yan <memory.yancy@gmail.com>
chenyuzhu <chenyuzhi@oschina.cn>
Chetan Birajdar <birajdar.chetan@gmail.com>
Chewey <prosto-chewey@users.noreply.github.com>
Chia-liang Kao <clkao@clkao.org>
Chiranjeevi Tirunagari <vchiranjeeviak.tirunagari@gmail.com>
chli <chli@freewheel.tv>
Cholerae Hu <choleraehyq@gmail.com>
Chris Alfonso <calfonso@redhat.com>
@@ -428,7 +400,6 @@ Christopher Crone <christopher.crone@docker.com>
Christopher Currie <codemonkey+github@gmail.com>
Christopher Jones <tophj@linux.vnet.ibm.com>
Christopher Latham <sudosurootdev@gmail.com>
Christopher Petito <chrisjpetito@gmail.com>
Christopher Rigor <crigor@gmail.com>
Christy Norman <christy@linux.vnet.ibm.com>
Chun Chen <ramichen@tencent.com>
@@ -458,8 +429,8 @@ Cristian Staretu <cristian.staretu@gmail.com>
cristiano balducci <cristiano.balducci@gmail.com>
Cristina Yenyxe Gonzalez Garcia <cristina.yenyxe@gmail.com>
Cruceru Calin-Cristian <crucerucalincristian@gmail.com>
cui fliter <imcusg@gmail.com>
CUI Wei <ghostplant@qq.com>
cuishuang <imcusg@gmail.com>
Cuong Manh Le <cuong.manhle.vn@gmail.com>
Cyprian Gracz <cyprian.gracz@micro-jumbo.eu>
Cyril F <cyrilf7x@gmail.com>
@@ -494,7 +465,6 @@ Daniel Farrell <dfarrell@redhat.com>
Daniel Garcia <daniel@danielgarcia.info>
Daniel Gasienica <daniel@gasienica.ch>
Daniel Grunwell <mwgrunny@gmail.com>
Daniel Guns <danbguns@gmail.com>
Daniel Helfand <helfand.4@gmail.com>
Daniel Hiltgen <daniel.hiltgen@docker.com>
Daniel J Walsh <dwalsh@redhat.com>
@@ -539,7 +509,6 @@ David Dooling <dooling@gmail.com>
David Gageot <david@gageot.net>
David Gebler <davidgebler@gmail.com>
David Glasser <glasser@davidglasser.net>
David Karlsson <35727626+dvdksn@users.noreply.github.com>
David Lawrence <david.lawrence@docker.com>
David Lechner <david@lechnology.com>
David M. Karr <davidmichaelkarr@gmail.com>
@@ -629,7 +598,6 @@ Donald Huang <don.hcd@gmail.com>
Dong Chen <dongluo.chen@docker.com>
Donghwa Kim <shanytt@gmail.com>
Donovan Jones <git@gamma.net.nz>
Dorin Geman <dorin.geman@docker.com>
Doron Podoleanu <doronp@il.ibm.com>
Doug Davis <dug@us.ibm.com>
Doug MacEachern <dougm@vmware.com>
@@ -664,10 +632,8 @@ Emily Rose <emily@contactvibe.com>
Emir Ozer <emirozer@yandex.com>
Eng Zer Jun <engzerjun@gmail.com>
Enguerran <engcolson@gmail.com>
Enrico Weigelt, metux IT consult <info@metux.net>
Eohyung Lee <liquidnuker@gmail.com>
epeterso <epeterson@breakpoint-labs.com>
er0k <er0k@er0k.net>
Eric Barch <barch@tomesoftware.com>
Eric Curtin <ericcurtin17@gmail.com>
Eric G. Noriega <enoriega@vizuri.com>
@@ -690,7 +656,6 @@ Erik Hollensbe <github@hollensbe.org>
Erik Inge Bolsø <knan@redpill-linpro.com>
Erik Kristensen <erik@erikkristensen.com>
Erik Sipsma <erik@sipsma.dev>
Erik Sjölund <erik.sjolund@gmail.com>
Erik St. Martin <alakriti@gmail.com>
Erik Weathers <erikdw@gmail.com>
Erno Hopearuoho <erno.hopearuoho@gmail.com>
@@ -706,7 +671,6 @@ Evan Allrich <evan@unguku.com>
Evan Carmi <carmi@users.noreply.github.com>
Evan Hazlett <ejhazlett@gmail.com>
Evan Krall <krall@yelp.com>
Evan Lezar <elezar@nvidia.com>
Evan Phoenix <evan@fallingsnow.net>
Evan Wies <evan@neomantra.net>
Evelyn Xu <evelynhsu21@gmail.com>
@@ -753,7 +717,6 @@ Feroz Salam <feroz.salam@sourcegraph.com>
Ferran Rodenas <frodenas@gmail.com>
Filipe Brandenburger <filbranden@google.com>
Filipe Oliveira <contato@fmoliveira.com.br>
Filipe Pina <hzlu1ot0@duck.com>
Flavio Castelli <fcastelli@suse.com>
Flavio Crisciani <flavio.crisciani@docker.com>
Florian <FWirtz@users.noreply.github.com>
@@ -776,9 +739,7 @@ Frank Groeneveld <frank@ivaldi.nl>
Frank Herrmann <fgh@4gh.tv>
Frank Macreery <frank@macreery.com>
Frank Rosquin <frank.rosquin+github@gmail.com>
Frank Villaro-Dixon <frank.villarodixon@merkle.com>
Frank Yang <yyb196@gmail.com>
François Scala <github@arcenik.net>
Fred Lifton <fred.lifton@docker.com>
Frederick F. Kautz IV <fkautz@redhat.com>
Frederico F. de Oliveira <FreddieOliveira@users.noreply.github.com>
@@ -793,13 +754,11 @@ Félix Baylac-Jacqué <baylac.felix@gmail.com>
Félix Cantournet <felix.cantournet@cloudwatt.com>
Gabe Rosenhouse <gabe@missionst.com>
Gabor Nagy <mail@aigeruth.hu>
Gabriel Adrian Samfira <gsamfira@cloudbasesolutions.com>
Gabriel Goller <gabrielgoller123@gmail.com>
Gabriel L. Somlo <gsomlo@gmail.com>
Gabriel Linder <linder.gabriel@gmail.com>
Gabriel Monroy <gabriel@opdemand.com>
Gabriel Nicolas Avellaneda <avellaneda.gabriel@gmail.com>
Gabriel Tomitsuka <gabriel@tomitsuka.com>
Gaetan de Villele <gdevillele@gmail.com>
Galen Sampson <galen.sampson@gmail.com>
Gang Qiao <qiaohai8866@gmail.com>
@@ -814,9 +773,7 @@ GennadySpb <lipenkov@gmail.com>
Geoff Levand <geoff@infradead.org>
Geoffrey Bachelet <grosfrais@gmail.com>
Geon Kim <geon0250@gmail.com>
George Adams <georgeadams1995@gmail.com>
George Kontridze <george@bugsnag.com>
George Ma <mayangang@outlook.com>
George MacRorie <gmacr31@gmail.com>
George Xie <georgexsh@gmail.com>
Georgi Hristozov <georgi@forkbomb.nl>
@@ -843,7 +800,6 @@ Gopikannan Venugopalsamy <gopikannan.venugopalsamy@gmail.com>
Gosuke Miyashita <gosukenator@gmail.com>
Gou Rao <gou@portworx.com>
Govinda Fichtner <govinda.fichtner@googlemail.com>
Grace Choi <grace.54109@gmail.com>
Grant Millar <rid@cylo.io>
Grant Reaber <grant.reaber@gmail.com>
Graydon Hoare <graydon@pobox.com>
@@ -899,12 +855,9 @@ Hongbin Lu <hongbin034@gmail.com>
Hongxu Jia <hongxu.jia@windriver.com>
Honza Pokorny <me@honza.ca>
Hsing-Hui Hsu <hsinghui@amazon.com>
Hsing-Yu (David) Chen <davidhsingyuchen@gmail.com>
hsinko <21551195@zju.edu.cn>
Hu Keping <hukeping@huawei.com>
Hu Tao <hutao@cn.fujitsu.com>
Huajin Tong <fliterdashen@gmail.com>
huang-jl <1046678590@qq.com>
HuanHuan Ye <logindaveye@gmail.com>
Huanzhong Zhang <zhanghuanzhong90@gmail.com>
Huayi Zhang <irachex@gmail.com>
@@ -934,12 +887,10 @@ Igor Dolzhikov <bluesriverz@gmail.com>
Igor Karpovich <i.karpovich@currencysolutions.com>
Iliana Weller <iweller@amazon.com>
Ilkka Laukkanen <ilkka@ilkka.io>
Illia Antypenko <ilya@antipenko.pp.ua>
Illo Abdulrahim <abdulrahim.illo@nokia.com>
Ilya Dmitrichenko <errordeveloper@gmail.com>
Ilya Gusev <mail@igusev.ru>
Ilya Khlopotov <ilya.khlopotov@gmail.com>
imalasong <2879499479@qq.com>
imre Fitos <imre.fitos+github@gmail.com>
inglesp <peter.inglesby@gmail.com>
Ingo Gottwald <in.gottwald@gmail.com>
@@ -957,7 +908,6 @@ J Bruni <joaohbruni@yahoo.com.br>
J. Nunn <jbnunn@gmail.com>
Jack Danger Canty <jackdanger@squareup.com>
Jack Laxson <jackjrabbit@gmail.com>
Jack Walker <90711509+j2walker@users.noreply.github.com>
Jacob Atzen <jacob@jacobatzen.dk>
Jacob Edelman <edelman.jd@gmail.com>
Jacob Tomlinson <jacob@tom.linson.uk>
@@ -984,12 +934,10 @@ James Nugent <james@jen20.com>
James Sanders <james3sanders@gmail.com>
James Turnbull <james@lovedthanlost.net>
James Watkins-Harvey <jwatkins@progi-media.com>
Jameson Hyde <jameson.hyde@docker.com>
Jamie Hannaford <jamie@limetree.org>
Jamshid Afshar <jafshar@yahoo.com>
Jan Breig <git@pygos.space>
Jan Chren <dev.rindeal@gmail.com>
Jan Garcia <github-public@n-garcia.com>
Jan Götte <jaseg@jaseg.net>
Jan Keromnes <janx@linux.com>
Jan Koprowski <jan.koprowski@gmail.com>
@@ -1002,7 +950,6 @@ Jannick Fahlbusch <git@jf-projects.de>
Januar Wayong <januar@gmail.com>
Jared Biel <jared.biel@bolderthinking.com>
Jared Hocutt <jaredh@netapp.com>
Jaroslav Jindrak <dzejrou@gmail.com>
Jaroslaw Zabiello <hipertracker@gmail.com>
Jasmine Hegman <jasmine@jhegman.com>
Jason A. Donenfeld <Jason@zx2c4.com>
@@ -1018,7 +965,6 @@ Jason Shepherd <jason@jasonshepherd.net>
Jason Smith <jasonrichardsmith@gmail.com>
Jason Sommer <jsdirv@gmail.com>
Jason Stangroome <jason@codeassassin.com>
Jasper Siepkes <siepkes@serviceplanet.nl>
Javier Bassi <javierbassi@gmail.com>
jaxgeller <jacksongeller@gmail.com>
Jay <teguhwpurwanto@gmail.com>
@@ -1028,7 +974,6 @@ Jean Rouge <rougej+github@gmail.com>
Jean-Baptiste Barth <jeanbaptiste.barth@gmail.com>
Jean-Baptiste Dalido <jeanbaptiste@appgratis.com>
Jean-Christophe Berthon <huygens@berthon.eu>
Jean-Michel Rouet <jm.rouet@gmail.com>
Jean-Paul Calderone <exarkun@twistedmatrix.com>
Jean-Pierre Huynh <jean-pierre.huynh@ounet.fr>
Jean-Tiare Le Bigot <jt@yadutaf.fr>
@@ -1047,7 +992,6 @@ Jeffrey Bolle <jeffreybolle@gmail.com>
Jeffrey Morgan <jmorganca@gmail.com>
Jeffrey van Gogh <jvg@google.com>
Jenny Gebske <jennifer@gebske.de>
Jeongseok Kang <piono623@naver.com>
Jeremy Chambers <jeremy@thehipbot.com>
Jeremy Grosser <jeremy@synack.me>
Jeremy Huntwork <jhuntwork@lightcubesolutions.com>
@@ -1060,12 +1004,10 @@ Jeroen Jacobs <github@jeroenj.be>
Jesse Dearing <jesse.dearing@gmail.com>
Jesse Dubay <jesse@thefortytwo.net>
Jessica Frazelle <jess@oxide.computer>
Jeyanthinath Muthuram <jeyanthinath10@gmail.com>
Jezeniel Zapanta <jpzapanta22@gmail.com>
Jhon Honce <jhonce@redhat.com>
Ji.Zhilong <zhilongji@gmail.com>
Jian Liao <jliao@alauda.io>
Jian Zeng <anonymousknight96@gmail.com>
Jian Zhang <zhangjian.fnst@cn.fujitsu.com>
Jiang Jinyang <jjyruby@gmail.com>
Jianyong Wu <jianyong.wu@arm.com>
@@ -1083,16 +1025,13 @@ Jim Perrin <jperrin@centos.org>
Jimmy Cuadra <jimmy@jimmycuadra.com>
Jimmy Puckett <jimmy.puckett@spinen.com>
Jimmy Song <rootsongjc@gmail.com>
jinjiadu <jinjiadu@aliyun.com>
Jinsoo Park <cellpjs@gmail.com>
Jintao Zhang <zhangjintao9020@gmail.com>
Jiri Appl <jiria@microsoft.com>
Jiri Popelka <jpopelka@redhat.com>
Jiuyue Ma <majiuyue@huawei.com>
Jiří Župka <jzupka@redhat.com>
jjimbo137 <115816493+jjimbo137@users.noreply.github.com>
Joakim Roubert <joakim.roubert@axis.com>
Joan Grau <grautxo.dev@proton.me>
Joao Fernandes <joao.fernandes@docker.com>
Joao Trindade <trindade.joao@gmail.com>
Joe Beda <joe.github@bedafamily.com>
@@ -1133,7 +1072,6 @@ Jon Johnson <jonjohnson@google.com>
Jon Surrell <jon.surrell@gmail.com>
Jon Wedaman <jweede@gmail.com>
Jonas Dohse <jonas@dohse.ch>
Jonas Geiler <git@jonasgeiler.com>
Jonas Heinrich <Jonas@JonasHeinrich.com>
Jonas Pfenniger <jonas@pfenniger.name>
Jonathan A. Schweder <jonathanschweder@gmail.com>
@@ -1177,7 +1115,6 @@ Josiah Kiehl <jkiehl@riotgames.com>
José Tomás Albornoz <jojo@eljojo.net>
Joyce Jang <mail@joycejang.com>
JP <jpellerin@leapfrogonline.com>
JSchltggr <jschltggr@gmail.com>
Julian Taylor <jtaylor.debian@googlemail.com>
Julien Barbier <write0@gmail.com>
Julien Bisconti <veggiemonk@users.noreply.github.com>
@@ -1195,7 +1132,6 @@ junxu <xujun@cmss.chinamobile.com>
Jussi Nummelin <jussi.nummelin@gmail.com>
Justas Brazauskas <brazauskasjustas@gmail.com>
Justen Martin <jmart@the-coder.com>
Justin Chadwell <me@jedevc.com>
Justin Cormack <justin.cormack@docker.com>
Justin Force <justin.force@gmail.com>
Justin Keller <85903732+jk-vb@users.noreply.github.com>
@@ -1212,7 +1148,6 @@ K. Heller <pestophagous@gmail.com>
Kai Blin <kai@samba.org>
Kai Qiang Wu (Kennan) <wkq5325@gmail.com>
Kaijie Chen <chen@kaijie.org>
Kaita Nakamura <kaita.nakamura0830@gmail.com>
Kamil Domański <kamil@domanski.co>
Kamjar Gerami <kami.gerami@gmail.com>
Kanstantsin Shautsou <kanstantsin.sha@gmail.com>
@@ -1239,7 +1174,6 @@ Ke Xu <leonhartx.k@gmail.com>
Kei Ohmura <ohmura.kei@gmail.com>
Keith Hudgins <greenman@greenman.org>
Keli Hu <dev@keli.hu>
Ken Bannister <kb2ma@runbox.com>
Ken Cochrane <kencochrane@gmail.com>
Ken Herner <kherner@progress.com>
Ken ICHIKAWA <ichikawa.ken@jp.fujitsu.com>
@@ -1249,7 +1183,7 @@ Kenjiro Nakayama <nakayamakenjiro@gmail.com>
Kent Johnson <kentoj@gmail.com>
Kenta Tada <Kenta.Tada@sony.com>
Kevin "qwazerty" Houdebert <kevin.houdebert@gmail.com>
Kevin Alvarez <github@crazymax.dev>
Kevin Alvarez <crazy-max@users.noreply.github.com>
Kevin Burke <kev@inburke.com>
Kevin Clark <kevin.clark@gmail.com>
Kevin Feyrer <kevin.feyrer@btinternet.com>
@@ -1272,7 +1206,6 @@ Kimbro Staken <kstaken@kstaken.com>
Kir Kolyshkin <kolyshkin@gmail.com>
Kiran Gangadharan <kiran.daredevil@gmail.com>
Kirill SIbirev <l0kix2@gmail.com>
Kirk Easterson <kirk.easterson@gmail.com>
knappe <tyler.knappe@gmail.com>
Kohei Tsuruta <coheyxyz@gmail.com>
Koichi Shiraishi <k@zchee.io>
@@ -1282,12 +1215,10 @@ Konstantin Gribov <grossws@gmail.com>
Konstantin L <sw.double@gmail.com>
Konstantin Pelykh <kpelykh@zettaset.com>
Kostadin Plachkov <k.n.plachkov@gmail.com>
kpcyrd <git@rxv.cc>
Krasi Georgiev <krasi@vip-consult.solutions>
Krasimir Georgiev <support@vip-consult.co.uk>
Kris-Mikael Krister <krismikael@protonmail.com>
Kristian Haugene <kristian.haugene@capgemini.com>
Kristian Heljas <kristian@kristian.ee>
Kristina Zabunova <triara.xiii@gmail.com>
Krystian Wojcicki <kwojcicki@sympatico.ca>
Kunal Kushwaha <kushwaha_kunal_v7@lab.ntt.co.jp>
@@ -1304,19 +1235,15 @@ Lakshan Perera <lakshan@laktek.com>
Lalatendu Mohanty <lmohanty@redhat.com>
Lance Chen <cyen0312@gmail.com>
Lance Kinley <lkinley@loyaltymethods.com>
Lars Andringa <l.s.andringa@rug.nl>
Lars Butler <Lars.Butler@gmail.com>
Lars Kellogg-Stedman <lars@redhat.com>
Lars R. Damerow <lars@pixar.com>
Lars-Magnus Skog <ralphtheninja@riseup.net>
Laszlo Meszaros <lacienator@gmail.com>
Laura Brehm <laurabrehm@hey.com>
Laura Frank <ljfrank@gmail.com>
Laurent Bernaille <laurent.bernaille@datadoghq.com>
Laurent Erignoux <lerignoux@gmail.com>
Laurent Goderre <laurent.goderre@docker.com>
Laurie Voss <github@seldo.com>
Leandro Motta Barros <lmb@stackedboxes.org>
Leandro Siqueira <leandro.siqueira@gmail.com>
Lee Calcote <leecalcote@gmail.com>
Lee Chao <932819864@qq.com>
@@ -1367,7 +1294,6 @@ Lorenzo Fontana <fontanalorenz@gmail.com>
Lotus Fenn <fenn.lotus@gmail.com>
Louis Delossantos <ldelossa.ld@gmail.com>
Louis Opter <kalessin@kalessin.fr>
Luboslav Pivarc <lpivarc@redhat.com>
Luca Favatella <luca.favatella@erlang-solutions.com>
Luca Marturana <lucamarturana@gmail.com>
Luca Orlandi <luca.orlandi@gmail.com>
@@ -1395,7 +1321,6 @@ Madhan Raj Mookkandy <MadhanRaj.Mookkandy@microsoft.com>
Madhav Puri <madhav.puri@gmail.com>
Madhu Venugopal <mavenugo@gmail.com>
Mageee <fangpuyi@foxmail.com>
maggie44 <64841595+maggie44@users.noreply.github.com>
Mahesh Tiyyagura <tmahesh@gmail.com>
malnick <malnick@gmail..com>
Malte Janduda <mail@janduda.net>
@@ -1407,7 +1332,6 @@ Manuel Meurer <manuel@krautcomputing.com>
Manuel Rüger <manuel@rueg.eu>
Manuel Woelker <github@manuel.woelker.org>
mapk0y <mapk0y@gmail.com>
Marat Radchenko <marat@slonopotamus.org>
Marc Abramowitz <marc@marc-abramowitz.com>
Marc Kuo <kuomarc2@gmail.com>
Marc Tamsky <mtamsky@gmail.com>
@@ -1447,7 +1371,6 @@ Martijn van Oosterhout <kleptog@svana.org>
Martin Braun <braun@neuroforge.de>
Martin Dojcak <martin.dojcak@lablabs.io>
Martin Honermeyer <maze@strahlungsfrei.de>
Martin Jirku <martin@jirku.sk>
Martin Kelly <martin@surround.io>
Martin Mosegaard Amdisen <martin.amdisen@praqma.com>
Martin Muzatko <martin@happy-css.com>
@@ -1489,7 +1412,6 @@ Matthias Kühnle <git.nivoc@neverbox.com>
Matthias Rampke <mr@soundcloud.com>
Matthieu Fronton <m@tthieu.fr>
Matthieu Hauglustaine <matt.hauglustaine@gmail.com>
Matthieu MOREL <matthieu.morel35@gmail.com>
Mattias Jernberg <nostrad@gmail.com>
Mauricio Garavaglia <mauricio@medallia.com>
mauriyouth <mauriyouth@gmail.com>
@@ -1527,7 +1449,6 @@ Michael Holzheu <holzheu@linux.vnet.ibm.com>
Michael Hudson-Doyle <michael.hudson@canonical.com>
Michael Huettermann <michael@huettermann.net>
Michael Irwin <mikesir87@gmail.com>
Michael Kebe <michael.kebe@hkm.de>
Michael Kuehn <micha@kuehn.io>
Michael Käufl <docker@c.michael-kaeufl.de>
Michael Neale <michael.neale@gmail.com>
@@ -1576,11 +1497,10 @@ Mike Lundy <mike@fluffypenguin.org>
Mike MacCana <mike.maccana@gmail.com>
Mike Naberezny <mike@naberezny.com>
Mike Snitzer <snitzer@redhat.com>
Mike Sul <mike.sul@foundries.io>
mikelinjie <294893458@qq.com>
Mikhail Sobolev <mss@mawhrin.net>
Miklos Szegedi <miklos.szegedi@cloudera.com>
Milas Bowman <devnull@milas.dev>
Milas Bowman <milasb@gmail.com>
Milind Chawre <milindchawre@gmail.com>
Miloslav Trmač <mitr@redhat.com>
mingqing <limingqing@cyou-inc.com>
@@ -1592,7 +1512,6 @@ mlarcher <github@ringabell.org>
Mohammad Banikazemi <MBanikazemi@gmail.com>
Mohammad Nasirifar <farnasirim@gmail.com>
Mohammed Aaqib Ansari <maaquib@gmail.com>
Mohd Sadiq <mohdsadiq058@gmail.com>
Mohit Soni <mosoni@ebay.com>
Moorthy RS <rsmoorthy@gmail.com>
Morgan Bauer <mbauer@us.ibm.com>
@@ -1607,7 +1526,6 @@ Muayyad Alsadi <alsadi@gmail.com>
Muhammad Zohaib Aslam <zohaibse011@gmail.com>
Mustafa Akın <mustafa91@gmail.com>
Muthukumar R <muthur@gmail.com>
Myeongjoon Kim <kimmj8409@gmail.com>
Máximo Cuadros <mcuadros@gmail.com>
Médi-Rémi Hashim <medimatrix@users.noreply.github.com>
Nace Oroz <orkica@gmail.com>
@@ -1622,7 +1540,6 @@ Natasha Jarus <linuxmercedes@gmail.com>
Nate Brennand <nate.brennand@clever.com>
Nate Eagleson <nate@nateeag.com>
Nate Jones <nate@endot.org>
Nathan Baulch <nathan.baulch@gmail.com>
Nathan Carlson <carl4403@umn.edu>
Nathan Herald <me@nathanherald.com>
Nathan Hsieh <hsieh.nathan@gmail.com>
@@ -1646,7 +1563,6 @@ Nick Neisen <nwneisen@gmail.com>
Nick Parker <nikaios@gmail.com>
Nick Payne <nick@kurai.co.uk>
Nick Russo <nicholasjamesrusso@gmail.com>
Nick Santos <nick.santos@docker.com>
Nick Stenning <nick.stenning@digital.cabinet-office.gov.uk>
Nick Stinemates <nick@stinemates.org>
Nick Wood <nwood@microsoft.com>
@@ -1668,7 +1584,6 @@ NikolaMandic <mn080202@gmail.com>
Nikolas Garofil <nikolas.garofil@uantwerpen.be>
Nikolay Edigaryev <edigaryev@gmail.com>
Nikolay Milovanov <nmil@itransformers.net>
ningmingxiao <ning.mingxiao@zte.com.cn>
Nirmal Mehta <nirmalkmehta@gmail.com>
Nishant Totla <nishanttotla@gmail.com>
NIWA Hideyuki <niwa.niwa@nifty.ne.jp>
@@ -1677,7 +1592,6 @@ Noah Treuhaft <noah.treuhaft@docker.com>
NobodyOnSE <ich@sektor.selfip.com>
noducks <onemannoducks@gmail.com>
Nolan Darilek <nolan@thewordnerd.info>
Nolan Miles <nolanpmiles@gmail.com>
Noriki Nakamura <noriki.nakamura@miraclelinux.com>
nponeccop <andy.melnikov@gmail.com>
Nurahmadie <nurahmadie@gmail.com>
@@ -1685,7 +1599,6 @@ Nuutti Kotivuori <naked@iki.fi>
nzwsch <hi@nzwsch.com>
O.S. Tezer <ostezer@gmail.com>
objectified <objectified@gmail.com>
Octol1ttle <l1ttleofficial@outlook.com>
Odin Ugedal <odin@ugedal.com>
Oguz Bilgic <fisyonet@gmail.com>
Oh Jinkyun <tintypemolly@gmail.com>
@@ -1702,7 +1615,6 @@ Omri Shiv <Omri.Shiv@teradata.com>
Onur Filiz <onur.filiz@microsoft.com>
Oriol Francès <oriolfa@gmail.com>
Oscar Bonilla <6f6231@gmail.com>
oscar.chen <2972789494@qq.com>
Oskar Niburski <oskarniburski@gmail.com>
Otto Kekäläinen <otto@seravo.fi>
Ouyang Liduo <oyld0210@163.com>
@@ -1717,10 +1629,8 @@ Patrick Böänziger <patrick.baenziger@bsi-software.com>
Patrick Devine <patrick.devine@docker.com>
Patrick Haas <patrickhaas@google.com>
Patrick Hemmer <patrick.hemmer@gmail.com>
Patrick St. laurent <patrick@saint-laurent.us>
Patrick Stapleton <github@gdi2290.com>
Patrik Cyvoct <patrik@ptrk.io>
Patrik Leifert <patrikleifert@hotmail.com>
pattichen <craftsbear@gmail.com>
Paul "TBBle" Hampson <Paul.Hampson@Pobox.com>
Paul <paul9869@gmail.com>
@@ -1736,7 +1646,6 @@ Paul Lietar <paul@lietar.net>
Paul Liljenberg <liljenberg.paul@gmail.com>
Paul Morie <pmorie@gmail.com>
Paul Nasrat <pnasrat@gmail.com>
Paul Seiffert <paul.seiffert@jimdo.com>
Paul Weaver <pauweave@cisco.com>
Paulo Gomes <pjbgf@linux.com>
Paulo Ribeiro <paigr.io@gmail.com>
@@ -1750,7 +1659,6 @@ Pavlos Ratis <dastergon@gentoo.org>
Pavol Vargovcik <pallly.vargovcik@gmail.com>
Pawel Konczalski <mail@konczalski.de>
Paweł Gronowski <pawel.gronowski@docker.com>
payall4u <payall4u@qq.com>
Peeyush Gupta <gpeeyush@linux.vnet.ibm.com>
Peggy Li <peggyli.224@gmail.com>
Pei Su <sillyousu@gmail.com>
@@ -1780,9 +1688,7 @@ Phil Estes <estesp@gmail.com>
Phil Sphicas <phil.sphicas@att.com>
Phil Spitler <pspitler@gmail.com>
Philip Alexander Etling <paetling@gmail.com>
Philip K. Warren <pkwarren@gmail.com>
Philip Monroe <phil@philmonroe.com>
Philipp Fruck <dev@p-fruck.de>
Philipp Gillé <philipp.gille@gmail.com>
Philipp Wahala <philipp.wahala@gmail.com>
Philipp Weissensteiner <mail@philippweissensteiner.com>
@@ -1795,7 +1701,6 @@ Pierre Carrier <pierre@meteor.com>
Pierre Dal-Pra <dalpra.pierre@gmail.com>
Pierre Wacrenier <pierre.wacrenier@gmail.com>
Pierre-Alain RIVIERE <pariviere@ippon.fr>
pinglanlu <pinglanlu@outlook.com>
Piotr Bogdan <ppbogdan@gmail.com>
Piotr Karbowski <piotr.karbowski@protonmail.ch>
Porjo <porjo38@yahoo.com.au>
@@ -1821,9 +1726,7 @@ Quentin Brossard <qbrossard@gmail.com>
Quentin Perez <qperez@ocs.online.net>
Quentin Tayssier <qtayssier@gmail.com>
r0n22 <cameron.regan@gmail.com>
Rachit Sharma <rachitsharma613@gmail.com>
Radostin Stoyanov <rstoyanov1@gmail.com>
Rafael Fernández López <ereslibre@ereslibre.es>
Rafal Jeczalik <rjeczalik@gmail.com>
Rafe Colton <rafael.colton@gmail.com>
Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com>
@@ -1855,7 +1758,6 @@ Rich Horwood <rjhorwood@apple.com>
Rich Moyse <rich@moyse.us>
Rich Seymour <rseymour@gmail.com>
Richard Burnison <rburnison@ebay.com>
Richard Hansen <rhansen@rhansen.org>
Richard Harvey <richard@squarecows.com>
Richard Mathie <richard.mathie@amey.co.uk>
Richard Metzler <richard@paadee.com>
@@ -1871,7 +1773,6 @@ Ritesh H Shukla <sritesh@vmware.com>
Riyaz Faizullabhoy <riyaz.faizullabhoy@docker.com>
Rob Cowsill <42620235+rcowsill@users.noreply.github.com>
Rob Gulewich <rgulewich@netflix.com>
Rob Murray <rob.murray@docker.com>
Rob Vesse <rvesse@dotnetrdf.org>
Robert Bachmann <rb@robertbachmann.at>
Robert Bittle <guywithnose@gmail.com>
@@ -1879,7 +1780,6 @@ Robert Obryk <robryk@gmail.com>
Robert Schneider <mail@shakeme.info>
Robert Shade <robert.shade@gmail.com>
Robert Stern <lexandro2000@gmail.com>
Robert Sturla <robertsturla@outlook.com>
Robert Terhaar <rterhaar@atlanticdynamic.com>
Robert Wallis <smilingrob@gmail.com>
Robert Wang <robert@arctic.tw>
@@ -1891,7 +1791,7 @@ Robin Speekenbrink <robin@kingsquare.nl>
Robin Thoni <robin@rthoni.com>
robpc <rpcann@gmail.com>
Rodolfo Carvalho <rhcarvalho@gmail.com>
Rodrigo Campos <rodrigoca@microsoft.com>
Rodrigo Campos <rodrigo@kinvolk.io>
Rodrigo Vaz <rodrigo.vaz@gmail.com>
Roel Van Nyen <roel.vannyen@gmail.com>
Roger Peppe <rogpeppe@gmail.com>
@@ -1922,12 +1822,10 @@ Rory Hunter <roryhunter2@gmail.com>
Rory McCune <raesene@gmail.com>
Ross Boucher <rboucher@gmail.com>
Rovanion Luckey <rovanion.luckey@gmail.com>
Roy Reznik <roy@wiz.io>
Royce Remer <royceremer@gmail.com>
Rozhnov Alexandr <nox73@ya.ru>
Rudolph Gottesheim <r.gottesheim@loot.at>
Rui Cao <ruicao@alauda.io>
Rui JingAn <quiterace@gmail.com>
Rui Lopes <rgl@ruilopes.com>
Ruilin Li <liruilin4@huawei.com>
Runshen Zhu <runshen.zhu@gmail.com>
@@ -1955,7 +1853,6 @@ ryancooper7 <ryan.cooper7@gmail.com>
RyanDeng <sheldon.d1018@gmail.com>
Ryo Nakao <nakabonne@gmail.com>
Ryoga Saito <contact@proelbtn.com>
Régis Behmo <regis@behmo.com>
Rémy Greinhofer <remy.greinhofer@livelovely.com>
s. rannou <mxs@sbrk.org>
Sabin Basyal <sabin.basyal@gmail.com>
@@ -1972,7 +1869,6 @@ Sam J Sharpe <sam.sharpe@digital.cabinet-office.gov.uk>
Sam Neirinck <sam@samneirinck.com>
Sam Reis <sreis@atlassian.com>
Sam Rijs <srijs@airpost.net>
Sam Thibault <sam.thibault@docker.com>
Sam Whited <sam@samwhited.com>
Sambuddha Basu <sambuddhabasu1@gmail.com>
Sami Wagiaalla <swagiaal@redhat.com>
@@ -1996,7 +1892,6 @@ Satoshi Tagomori <tagomoris@gmail.com>
Scott Bessler <scottbessler@gmail.com>
Scott Collier <emailscottcollier@gmail.com>
Scott Johnston <scott@docker.com>
Scott Moser <smoser@brickies.net>
Scott Percival <scottp@lastyard.com>
Scott Stamp <scottstamp851@gmail.com>
Scott Walls <sawalls@umich.edu>
@@ -2012,7 +1907,6 @@ Sebastiaan van Steenis <mail@superseb.nl>
Sebastiaan van Stijn <github@gone.nl>
Sebastian Höffner <sebastian.hoeffner@mevis.fraunhofer.de>
Sebastian Radloff <sradloff23@gmail.com>
Sebastian Thomschke <sebthom@users.noreply.github.com>
Sebastien Goasguen <runseb@gmail.com>
Senthil Kumar Selvaraj <senthil.thecoder@gmail.com>
Senthil Kumaran <senthil@uthcode.com>
@@ -2024,13 +1918,11 @@ Sergey Evstifeev <sergey.evstifeev@gmail.com>
Sergii Kabashniuk <skabashnyuk@codenvy.com>
Sergio Lopez <slp@redhat.com>
Serhat Gülçiçek <serhat25@gmail.com>
Serhii Nakon <serhii.n@thescimus.com>
SeungUkLee <lsy931106@gmail.com>
Sevki Hasirci <s@sevki.org>
Shane Canon <scanon@lbl.gov>
Shane da Silva <shane@dasilva.io>
Shaun Kaasten <shaunk@gmail.com>
Shaun Thompson <shaun.thompson@docker.com>
shaunol <shaunol@gmail.com>
Shawn Landden <shawn@churchofgit.com>
Shawn Siefkas <shawn.siefkas@meredith.com>
@@ -2049,7 +1941,6 @@ Shijun Qin <qinshijun16@mails.ucas.ac.cn>
Shishir Mahajan <shishir.mahajan@redhat.com>
Shoubhik Bose <sbose78@gmail.com>
Shourya Sarcar <shourya.sarcar@gmail.com>
Shreenidhi Shedi <shreenidhi.shedi@broadcom.com>
Shu-Wai Chow <shu-wai.chow@seattlechildrens.org>
shuai-z <zs.broccoli@gmail.com>
Shukui Yang <yangshukui@huawei.com>
@@ -2089,7 +1980,6 @@ Stanislav Bondarenko <stanislav.bondarenko@gmail.com>
Stanislav Levin <slev@altlinux.org>
Steeve Morin <steeve.morin@gmail.com>
Stefan Berger <stefanb@linux.vnet.ibm.com>
Stefan Gehrig <stefan.gehrig.hn@googlemail.com>
Stefan J. Wernli <swernli@microsoft.com>
Stefan Praszalowicz <stefan@greplin.com>
Stefan S. <tronicum@user.github.com>
@@ -2097,7 +1987,6 @@ Stefan Scherer <stefan.scherer@docker.com>
Stefan Staudenmeyer <doerte@instana.com>
Stefan Weil <sw@weilnetz.de>
Steffen Butzer <steffen.butzer@outlook.com>
Stephan Henningsen <stephan-henningsen@users.noreply.github.com>
Stephan Spindler <shutefan@gmail.com>
Stephen Benjamin <stephen@redhat.com>
Stephen Crosby <stevecrozz@gmail.com>
@@ -2137,7 +2026,6 @@ Sébastien Stormacq <sebsto@users.noreply.github.com>
Sören Tempel <soeren+git@soeren-tempel.net>
Tabakhase <mail@tabakhase.com>
Tadej Janež <tadej.j@nez.si>
Tadeusz Dudkiewicz <tadeusz.dudkiewicz@rtbhouse.com>
Takuto Sato <tockn.jp@gmail.com>
tang0th <tang0th@gmx.com>
Tangi Colin <tangicolin@gmail.com>
@@ -2145,7 +2033,6 @@ Tatsuki Sugiura <sugi@nemui.org>
Tatsushi Inagaki <e29253@jp.ibm.com>
Taylan Isikdemir <taylani@google.com>
Taylor Jones <monitorjbl@gmail.com>
tcpdumppy <847462026@qq.com>
Ted M. Young <tedyoung@gmail.com>
Tehmasp Chaudhri <tehmasp@gmail.com>
Tejaswini Duggaraju <naduggar@microsoft.com>
@@ -2238,7 +2125,6 @@ Tomek Mańko <tomek.manko@railgun-solutions.com>
Tommaso Visconti <tommaso.visconti@gmail.com>
Tomoya Tabuchi <t@tomoyat1.com>
Tomáš Hrčka <thrcka@redhat.com>
Tomáš Virtus <nechtom@gmail.com>
tonic <tonicbupt@gmail.com>
Tonny Xu <tonny.xu@gmail.com>
Tony Abboud <tdabboud@hotmail.com>
@@ -2283,7 +2169,6 @@ Victor I. Wood <viw@t2am.com>
Victor Lyuboslavsky <victor@victoreda.com>
Victor Marmol <vmarmol@google.com>
Victor Palma <palma.victor@gmail.com>
Victor Toni <victor.toni@gmail.com>
Victor Vieux <victor.vieux@docker.com>
Victoria Bialas <victoria.bialas@docker.com>
Vijaya Kumar K <vijayak@caviumnetworks.com>
@@ -2303,7 +2188,6 @@ Vinod Kulkarni <vinod.kulkarni@gmail.com>
Vishal Doshi <vishal.doshi@gmail.com>
Vishnu Kannan <vishnuk@google.com>
Vitaly Ostrosablin <vostrosablin@virtuozzo.com>
Vitor Anjos <bartier@users.noreply.github.com>
Vitor Monteiro <vmrmonteiro@gmail.com>
Vivek Agarwal <me@vivek.im>
Vivek Dasgupta <vdasgupt@redhat.com>
@@ -2317,7 +2201,6 @@ VladimirAus <v_roudakov@yahoo.com>
Vladislav Kolesnikov <vkolesnikov@beget.ru>
Vlastimil Zeman <vlastimil.zeman@diffblue.com>
Vojtech Vitek (V-Teq) <vvitek@redhat.com>
voloder <110066198+voloder@users.noreply.github.com>
Walter Leibbrandt <github@wrl.co.za>
Walter Stanish <walter@pratyeka.org>
Wang Chao <chao.wang@ucloud.cn>
@@ -2335,7 +2218,6 @@ Wassim Dhif <wassimdhif@gmail.com>
Wataru Ishida <ishida.wataru@lab.ntt.co.jp>
Wayne Chang <wayne@neverfear.org>
Wayne Song <wsong@docker.com>
weebney <weebney@gmail.com>
Weerasak Chongnguluam <singpor@gmail.com>
Wei Fu <fuweid89@gmail.com>
Wei Wu <wuwei4455@gmail.com>
@@ -2352,7 +2234,6 @@ Wenxuan Zhao <viz@linux.com>
Wenyu You <21551128@zju.edu.cn>
Wenzhi Liang <wenzhi.liang@gmail.com>
Wes Morgan <cap10morgan@gmail.com>
Wesley Pettit <wppttt@amazon.com>
Wewang Xiaorenfine <wang.xiaoren@zte.com.cn>
Wiktor Kwapisiewicz <wiktor@metacode.biz>
Will Dietz <w@wdtz.org>
@@ -2390,9 +2271,8 @@ Xiaoyu Zhang <zhang.xiaoyu33@zte.com.cn>
xichengliudui <1693291525@qq.com>
xiekeyang <xiekeyang@huawei.com>
Ximo Guanter Gonzálbez <joaquin.guantergonzalbez@telefonica.com>
xin.li <xin.li@daocloud.io>
Xinbo Weng <xihuanbo_0521@zju.edu.cn>
Xinfeng Liu <XinfengLiu@icloud.com>
Xinfeng Liu <xinfeng.liu@gmail.com>
Xinzi Zhou <imdreamrunner@gmail.com>
Xiuming Chen <cc@cxm.cc>
Xuecong Liao <satorulogic@gmail.com>
@@ -2402,7 +2282,6 @@ Yahya <ya7yaz@gmail.com>
yalpul <yalpul@gmail.com>
YAMADA Tsuyoshi <tyamada@minimum2scp.org>
Yamasaki Masahide <masahide.y@gmail.com>
Yamazaki Masashi <masi19bw@gmail.com>
Yan Feng <yanfeng2@huawei.com>
Yan Zhu <yanzhu@alauda.io>
Yang Bai <hamo.by@gmail.com>
@@ -2430,7 +2309,6 @@ You-Sheng Yang (楊有勝) <vicamo@gmail.com>
youcai <omegacoleman@gmail.com>
Youcef YEKHLEF <yyekhlef@gmail.com>
Youfu Zhang <zhangyoufu@gmail.com>
YR Chen <stevapple@icloud.com>
Yu Changchun <yuchangchun1@huawei.com>
Yu Chengxia <yuchengxia@huawei.com>
Yu Peng <yu.peng36@zte.com.cn>
@@ -2459,7 +2337,6 @@ Zen Lin(Zhinan Lin) <linzhinan@huawei.com>
Zhang Kun <zkazure@gmail.com>
Zhang Wei <zhangwei555@huawei.com>
Zhang Wentao <zhangwentao234@huawei.com>
zhangguanzhang <zhangguanzhang@qq.com>
ZhangHang <stevezhang2014@gmail.com>
zhangxianwei <xianwei.zw@alibaba-inc.com>
Zhenan Ye <21551168@zju.edu.cn>
@@ -2486,7 +2363,6 @@ Zuhayr Elahi <zuhayr.elahi@docker.com>
Zunayed Ali <zunayed@gmail.com>
Álvaro Lázaro <alvaro.lazaro.g@gmail.com>
Átila Camurça Alves <camurca.home@gmail.com>
吴小白 <296015668@qq.com>
尹吉峰 <jifeng.yin@gmail.com>
屈骏 <qujun@tiduyun.com>
徐俊杰 <paco.xu@daocloud.io>

View File

@@ -72,7 +72,7 @@ anybody starts working on it.
We are always thrilled to receive pull requests. We do our best to process them
quickly. If your pull request is not accepted on the first try,
don't get discouraged! Our contributor's guide explains [the review process we
use for simple changes](https://docs.docker.com/contribute/overview/).
use for simple changes](https://docs.docker.com/opensource/workflow/make-a-contribution/).
### Design and cleanup proposals
@@ -101,7 +101,7 @@ the contributors guide.
<td>
<p>
Register for the Docker Community Slack at
<a href="https://dockr.ly/comm-slack" target="_blank">https://dockr.ly/comm-slack</a>.
<a href="https://dockr.ly/slack" target="_blank">https://dockr.ly/slack</a>.
We use the #moby-project channel for general discussion, and there are separate channels for other Moby projects such as #containerd.
</p>
</td>
@@ -309,6 +309,36 @@ Don't forget: being a maintainer is a time investment. Make sure you
will have time to make yourself available. You don't have to be a
maintainer to make a difference on the project!
### Manage issues and pull requests using the Derek bot
If you want to help label, assign, close or reopen issues or pull requests
without commit rights, ask a maintainer to add your Github handle to the
`.DEREK.yml` file. [Derek](https://github.com/alexellis/derek) is a bot that extends
Github's user permissions to help non-committers to manage issues and pull requests simply by commenting.
For example:
* Labels
```
Derek add label: kind/question
Derek remove label: status/claimed
```
* Assign work
```
Derek assign: username
Derek unassign: me
```
* Manage issues and PRs
```
Derek close
Derek reopen
```
## Moby community guidelines
We want to keep the Moby community awesome, growing and collaborative. We need
@@ -422,6 +452,6 @@ The rules:
guidelines. Since you've read all the rules, you now know that.
If you are having trouble getting into the mood of idiomatic Go, we recommend
reading through [Effective Go](https://go.dev/doc/effective_go). The
[Go Blog](https://go.dev/blog/) is also a great resource. Drinking the
reading through [Effective Go](https://golang.org/doc/effective_go.html). The
[Go Blog](https://blog.golang.org) is also a great resource. Drinking the
kool-aid is a lot easier than going thirsty.

View File

@@ -1,113 +1,84 @@
# syntax=docker/dockerfile:1
ARG GO_VERSION=1.24.3
ARG BASE_DEBIAN_DISTRO="bookworm"
ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}"
ARG XX_VERSION=1.6.1
# VPNKIT_VERSION is the version of the vpnkit binary which is used as a fallback
# network driver for rootless.
ARG VPNKIT_VERSION=0.6.0
# DOCKERCLI_VERSION is the version of the CLI to install in the dev-container.
ARG DOCKERCLI_VERSION=v28.2.0-rc.2
ARG DOCKERCLI_REPOSITORY="https://github.com/docker/cli.git"
# cli version used for integration-cli tests
ARG DOCKERCLI_INTEGRATION_REPOSITORY="https://github.com/docker/cli.git"
ARG DOCKERCLI_INTEGRATION_VERSION=v18.06.3-ce
# BUILDX_VERSION is the version of buildx to install in the dev container.
ARG BUILDX_VERSION=0.24.0
# COMPOSE_VERSION is the version of compose to install in the dev container.
ARG COMPOSE_VERSION=v2.36.2
ARG CROSS="false"
ARG SYSTEMD="false"
ARG FIREWALLD="false"
ARG DOCKER_STATIC=1
ARG GO_VERSION=1.19.3
ARG DEBIAN_FRONTEND=noninteractive
ARG VPNKIT_VERSION=0.5.0
# REGISTRY_VERSION specifies the version of the registry to download from
# https://hub.docker.com/r/distribution/distribution. This version of
# the registry is used to test schema 2 manifests. Generally, the version
# specified here should match a current release.
ARG REGISTRY_VERSION=3.0.0
ARG BASE_DEBIAN_DISTRO="bullseye"
ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}"
# delve is currently only supported on linux/amd64 and linux/arm64;
# https://github.com/go-delve/delve/blob/v1.24.1/pkg/proc/native/support_sentinel.go#L1
# https://github.com/go-delve/delve/blob/v1.24.1/pkg/proc/native/support_sentinel_linux.go#L1
#
# ppc64le support was added in v1.21.1, but is still experimental, and requires
# the "-tags exp.linuxppc64le" build-tag to be set:
# https://github.com/go-delve/delve/commit/71f12207175a1cc09668f856340d8a543c87dcca
ARG DELVE_SUPPORTED=${TARGETPLATFORM#linux/amd64} DELVE_SUPPORTED=${DELVE_SUPPORTED#linux/arm64} DELVE_SUPPORTED=${DELVE_SUPPORTED#linux/ppc64le}
ARG DELVE_SUPPORTED=${DELVE_SUPPORTED:+"unsupported"}
ARG DELVE_SUPPORTED=${DELVE_SUPPORTED:-"supported"}
# cross compilation helper
FROM --platform=$BUILDPLATFORM tonistiigi/xx:${XX_VERSION} AS xx
# dummy stage to make sure the image is built for deps that don't support some
# architectures
FROM --platform=$BUILDPLATFORM busybox AS build-dummy
RUN mkdir -p /build
FROM scratch AS binary-dummy
COPY --from=build-dummy /build /build
# base
FROM --platform=$BUILDPLATFORM ${GOLANG_IMAGE} AS base
COPY --from=xx / /
# Disable collecting local telemetry, as collected by Go and Delve;
#
# - https://github.com/go-delve/delve/blob/v1.24.1/CHANGELOG.md#1231-2024-09-23
# - https://go.dev/doc/telemetry#background
RUN go telemetry off && [ "$(go telemetry)" = "off" ] || { echo "Failed to disable Go telemetry"; exit 1; }
FROM ${GOLANG_IMAGE} AS base
RUN echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache
RUN apt-get update && apt-get install --no-install-recommends -y file
ARG APT_MIRROR
RUN sed -ri "s/(httpredir|deb).debian.org/${APT_MIRROR:-deb.debian.org}/g" /etc/apt/sources.list \
&& sed -ri "s/(security).debian.org/${APT_MIRROR:-security.debian.org}/g" /etc/apt/sources.list
ENV GO111MODULE=off
ENV GOTOOLCHAIN=local
FROM base AS criu
ARG DEBIAN_FRONTEND
ADD --chmod=0644 https://download.opensuse.org/repositories/devel:/tools:/criu/Debian_11/Release.key /etc/apt/trusted.gpg.d/criu.gpg.asc
RUN --mount=type=cache,sharing=locked,id=moby-criu-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-criu-aptcache,target=/var/cache/apt \
echo 'deb https://download.opensuse.org/repositories/devel:/tools:/criu/Debian_12/ /' > /etc/apt/sources.list.d/criu.list \
echo 'deb https://download.opensuse.org/repositories/devel:/tools:/criu/Debian_11/ /' > /etc/apt/sources.list.d/criu.list \
&& apt-get update \
&& apt-get install -y --no-install-recommends criu \
&& install -D /usr/sbin/criu /build/criu \
&& /build/criu --version
&& install -D /usr/sbin/criu /build/criu
# registry
FROM distribution/distribution:$REGISTRY_VERSION AS registry
RUN mkdir /build && mv /bin/registry /build/registry
FROM base AS registry
WORKDIR /go/src/github.com/docker/distribution
# go-swagger
FROM base AS swagger-src
WORKDIR /usr/src/swagger
# Currently uses a fork from https://github.com/kolyshkin/go-swagger/tree/golang-1.13-fix
# TODO: move to under moby/ or fix upstream go-swagger to work for us.
RUN git init . && git remote add origin "https://github.com/kolyshkin/go-swagger.git"
# GO_SWAGGER_COMMIT specifies the version of the go-swagger binary to build and
# install. Go-swagger is used in CI for validating swagger.yaml in hack/validate/swagger-gen
ARG GO_SWAGGER_COMMIT=c56166c036004ba7a3a321e5951ba472b9ae298c
RUN git fetch -q --depth 1 origin "${GO_SWAGGER_COMMIT}" && git checkout -q FETCH_HEAD
# REGISTRY_VERSION specifies the version of the registry to build and install
# from the https://github.com/docker/distribution repository. This version of
# the registry is used to test both schema 1 and schema 2 manifests. Generally,
# the version specified here should match a current release.
ARG REGISTRY_VERSION=v2.3.0
# REGISTRY_VERSION_SCHEMA1 specifies the version of the registry to build and
# install from the https://github.com/docker/distribution repository. This is
# an older (pre v2.3.0) version of the registry that only supports schema1
# manifests. This version of the registry is not working on arm64, so installation
# is skipped on that architecture.
ARG REGISTRY_VERSION_SCHEMA1=v2.1.0
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
--mount=type=tmpfs,target=/go/src/ \
set -x \
&& git clone https://github.com/docker/distribution.git . \
&& git checkout -q "$REGISTRY_VERSION" \
&& GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \
go build -buildmode=pie -o /build/registry-v2 github.com/docker/distribution/cmd/registry \
&& case $(dpkg --print-architecture) in \
amd64|armhf|ppc64*|s390x) \
git checkout -q "$REGISTRY_VERSION_SCHEMA1"; \
GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH"; \
go build -buildmode=pie -o /build/registry-v2-schema1 github.com/docker/distribution/cmd/registry; \
;; \
esac
FROM base AS swagger
WORKDIR /go/src/github.com/go-swagger/go-swagger
ARG TARGETPLATFORM
RUN --mount=from=swagger-src,src=/usr/src/swagger,rw \
--mount=type=cache,target=/root/.cache/go-build,id=swagger-build-$TARGETPLATFORM \
WORKDIR $GOPATH/src/github.com/go-swagger/go-swagger
# GO_SWAGGER_COMMIT specifies the version of the go-swagger binary to build and
# install. Go-swagger is used in CI for validating swagger.yaml in hack/validate/swagger-gen
#
# Currently uses a fork from https://github.com/kolyshkin/go-swagger/tree/golang-1.13-fix,
# TODO: move to under moby/ or fix upstream go-swagger to work for us.
ENV GO_SWAGGER_COMMIT c56166c036004ba7a3a321e5951ba472b9ae298c
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
--mount=type=tmpfs,target=/go/src/ <<EOT
set -e
xx-go build -o /build/swagger ./cmd/swagger
xx-verify /build/swagger
EOT
--mount=type=tmpfs,target=/go/src/ \
set -x \
&& git clone https://github.com/kolyshkin/go-swagger.git . \
&& git checkout -q "$GO_SWAGGER_COMMIT" \
&& go build -o /build/swagger github.com/go-swagger/go-swagger/cmd/swagger
# frozen-images
# See also frozenImages in "testutil/environment/protect.go" (which needs to
# be updated when adding images to this list)
FROM debian:${BASE_DEBIAN_DISTRO} AS frozen-images
ARG DEBIAN_FRONTEND
RUN --mount=type=cache,sharing=locked,id=moby-frozen-images-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-frozen-images-aptcache,target=/var/cache/apt \
apt-get update && apt-get install -y --no-install-recommends \
@@ -121,246 +92,183 @@ ARG TARGETVARIANT
RUN /download-frozen-image-v2.sh /build \
busybox:latest@sha256:95cf004f559831017cdf4628aaf1bb30133677be8702a8c5f2994629f637a209 \
busybox:glibc@sha256:1f81263701cddf6402afe9f33fca0266d9fff379e59b1748f33d3072da71ee85 \
debian:bookworm-slim@sha256:2bc5c236e9b262645a323e9088dfa3bb1ecb16cc75811daf40a23a824d665be9 \
debian:bullseye-slim@sha256:dacf278785a4daa9de07596ec739dbc07131e189942772210709c5c0777e8437 \
hello-world:latest@sha256:d58e752213a51785838f9eed2b7a498ffa1cb3aa7f946dda11af39286c3db9a9 \
arm32v7/hello-world:latest@sha256:50b8560ad574c779908da71f7ce370c0a2471c098d44d1c8f6b513c5a55eeeb1 \
hello-world:amd64@sha256:90659bf80b44ce6be8234e6ff90a1ac34acbeb826903b02cfa0da11c82cbc042 \
hello-world:arm64@sha256:963612c5503f3f1674f315c67089dee577d8cc6afc18565e0b4183ae355fb343
arm32v7/hello-world:latest@sha256:50b8560ad574c779908da71f7ce370c0a2471c098d44d1c8f6b513c5a55eeeb1
# delve
FROM base AS delve-src
WORKDIR /usr/src/delve
RUN git init . && git remote add origin "https://github.com/go-delve/delve.git"
FROM base AS cross-false
FROM --platform=linux/amd64 base AS cross-true
ARG DEBIAN_FRONTEND
RUN dpkg --add-architecture arm64
RUN dpkg --add-architecture armel
RUN dpkg --add-architecture armhf
RUN dpkg --add-architecture ppc64el
RUN dpkg --add-architecture s390x
RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \
apt-get update && apt-get install -y --no-install-recommends \
crossbuild-essential-arm64 \
crossbuild-essential-armel \
crossbuild-essential-armhf \
crossbuild-essential-ppc64el \
crossbuild-essential-s390x
FROM cross-${CROSS} AS dev-base
FROM dev-base AS runtime-dev-cross-false
ARG DEBIAN_FRONTEND
RUN --mount=type=cache,sharing=locked,id=moby-cross-false-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-cross-false-aptcache,target=/var/cache/apt \
apt-get update && apt-get install -y --no-install-recommends \
binutils-mingw-w64 \
g++-mingw-w64-x86-64 \
libapparmor-dev \
libbtrfs-dev \
libdevmapper-dev \
libseccomp-dev \
libsystemd-dev \
libudev-dev
FROM --platform=linux/amd64 runtime-dev-cross-false AS runtime-dev-cross-true
ARG DEBIAN_FRONTEND
# These crossbuild packages rely on gcc-<arch>, but this doesn't want to install
# on non-amd64 systems, so other architectures cannot crossbuild amd64.
RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \
apt-get update && apt-get install -y --no-install-recommends \
libapparmor-dev:arm64 \
libapparmor-dev:armel \
libapparmor-dev:armhf \
libapparmor-dev:ppc64el \
libapparmor-dev:s390x \
libseccomp-dev:arm64 \
libseccomp-dev:armel \
libseccomp-dev:armhf \
libseccomp-dev:ppc64el \
libseccomp-dev:s390x
FROM runtime-dev-cross-${CROSS} AS runtime-dev
FROM base AS delve
# DELVE_VERSION specifies the version of the Delve debugger binary
# from the https://github.com/go-delve/delve repository.
# It can be used to run Docker with a possibility of
# attaching debugger to it.
ARG DELVE_VERSION=v1.24.1
RUN git fetch -q --depth 1 origin "${DELVE_VERSION}" +refs/tags/*:refs/tags/* && git checkout -q FETCH_HEAD
#
ARG DELVE_VERSION=v1.8.1
# Delve on Linux is currently only supported on amd64 and arm64;
# https://github.com/go-delve/delve/blob/v1.8.1/pkg/proc/native/support_sentinel.go#L1-L6
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
case $(dpkg --print-architecture) in \
amd64|arm64) \
GOBIN=/build/ GO111MODULE=on go install "github.com/go-delve/delve/cmd/dlv@${DELVE_VERSION}" \
&& /build/dlv --help \
;; \
*) \
mkdir -p /build/ \
;; \
esac
FROM base AS delve-supported
WORKDIR /usr/src/delve
ARG TARGETPLATFORM
RUN --mount=from=delve-src,src=/usr/src/delve,rw \
--mount=type=cache,target=/root/.cache/go-build,id=delve-build-$TARGETPLATFORM \
--mount=type=cache,target=/go/pkg/mod <<EOT
set -e
GO111MODULE=on xx-go build -o /build/dlv ./cmd/dlv
xx-verify /build/dlv
EOT
FROM binary-dummy AS delve-unsupported
FROM delve-${DELVE_SUPPORTED} AS delve
FROM base AS tomll
# GOTOML_VERSION specifies the version of the tomll binary to build and install
# from the https://github.com/pelletier/go-toml repository. This binary is used
# in CI in the hack/validate/toml script.
#
# When updating this version, consider updating the github.com/pelletier/go-toml
# dependency in vendor.mod accordingly.
ARG GOTOML_VERSION=v1.8.1
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
GOBIN=/build/ GO111MODULE=on go install "github.com/pelletier/go-toml/cmd/tomll@${GOTOML_VERSION}" \
&& /build/tomll --help
FROM base AS gowinres
# GOWINRES_VERSION defines go-winres tool version
ARG GOWINRES_VERSION=v0.3.1
ARG GOWINRES_VERSION=v0.3.0
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
GOBIN=/build/ GO111MODULE=on go install "github.com/tc-hib/go-winres@${GOWINRES_VERSION}" \
&& /build/go-winres --help
# containerd
FROM base AS containerd-src
WORKDIR /usr/src/containerd
RUN git init . && git remote add origin "https://github.com/containerd/containerd.git"
# CONTAINERD_VERSION is used to build containerd binaries, and used for the
# integration tests. The distributed docker .deb and .rpm packages depend on a
# separate (containerd.io) package, which may be a different version as is
# specified here. The containerd golang package is also pinned in vendor.mod.
# When updating the binary version you may also need to update the vendor
# version to pick up bug fixes or new APIs, however, usually the Go packages
# are built from a commit from the master branch.
ARG CONTAINERD_VERSION=v1.7.27
RUN git fetch -q --depth 1 origin "${CONTAINERD_VERSION}" +refs/tags/*:refs/tags/* && git checkout -q FETCH_HEAD
FROM base AS containerd-build
WORKDIR /go/src/github.com/containerd/containerd
ARG TARGETPLATFORM
FROM dev-base AS containerd
ARG DEBIAN_FRONTEND
RUN --mount=type=cache,sharing=locked,id=moby-containerd-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-containerd-aptcache,target=/var/cache/apt \
apt-get update && xx-apt-get install -y --no-install-recommends \
gcc \
pkg-config
ARG DOCKER_STATIC
RUN --mount=from=containerd-src,src=/usr/src/containerd,rw \
--mount=type=cache,target=/root/.cache/go-build,id=containerd-build-$TARGETPLATFORM <<EOT
set -e
export CC=$(xx-info)-gcc
export CGO_ENABLED=$([ "$DOCKER_STATIC" = "1" ] && echo "0" || echo "1")
xx-go --wrap
make $([ "$DOCKER_STATIC" = "1" ] && echo "STATIC=1") binaries
xx-verify $([ "$DOCKER_STATIC" = "1" ] && echo "--static") bin/containerd
xx-verify $([ "$DOCKER_STATIC" = "1" ] && echo "--static") bin/containerd-shim-runc-v2
xx-verify $([ "$DOCKER_STATIC" = "1" ] && echo "--static") bin/ctr
mkdir /build
mv bin/containerd bin/containerd-shim-runc-v2 bin/ctr /build
EOT
FROM containerd-build AS containerd-linux
FROM binary-dummy AS containerd-windows
FROM containerd-${TARGETOS} AS containerd
FROM base AS golangci_lint
ARG GOLANGCI_LINT_VERSION=v2.1.5
apt-get update && apt-get install -y --no-install-recommends \
libbtrfs-dev
ARG CONTAINERD_VERSION
COPY /hack/dockerfile/install/install.sh /hack/dockerfile/install/containerd.installer /
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
GOBIN=/build/ GO111MODULE=on go install "github.com/golangci/golangci-lint/v2/cmd/golangci-lint@${GOLANGCI_LINT_VERSION}" \
PREFIX=/build /install.sh containerd
FROM base AS golangci_lint
# FIXME: when updating golangci-lint, remove the temporary "nolint" in https://github.com/moby/moby/blob/7860686a8df15eea9def9e6189c6f9eca031bb6f/libnetwork/networkdb/cluster.go#L246
ARG GOLANGCI_LINT_VERSION=v1.49.0
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
GOBIN=/build/ GO111MODULE=on go install "github.com/golangci/golangci-lint/cmd/golangci-lint@${GOLANGCI_LINT_VERSION}" \
&& /build/golangci-lint --version
FROM base AS gotestsum
ARG GOTESTSUM_VERSION=v1.12.0
ARG GOTESTSUM_VERSION=v1.8.2
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
GOBIN=/build/ GO111MODULE=on go install "gotest.tools/gotestsum@${GOTESTSUM_VERSION}" \
&& /build/gotestsum --version
FROM base AS shfmt
ARG SHFMT_VERSION=v3.8.0
ARG SHFMT_VERSION=v3.0.2
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
GOBIN=/build/ GO111MODULE=on go install "mvdan.cc/sh/v3/cmd/shfmt@${SHFMT_VERSION}" \
&& /build/shfmt --version
FROM base AS gopls
FROM dev-base AS dockercli
ARG DOCKERCLI_CHANNEL
ARG DOCKERCLI_VERSION
COPY /hack/dockerfile/install/install.sh /hack/dockerfile/install/dockercli.installer /
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
GOBIN=/build/ GO111MODULE=on go install "golang.org/x/tools/gopls@latest" \
&& /build/gopls version
PREFIX=/build /install.sh dockercli
FROM base AS dockercli
WORKDIR /go/src/github.com/docker/cli
ARG DOCKERCLI_REPOSITORY
ARG DOCKERCLI_VERSION
ARG TARGETPLATFORM
RUN --mount=source=hack/dockerfile/cli.sh,target=/download-or-build-cli.sh \
--mount=type=cache,id=dockercli-git-$TARGETPLATFORM,sharing=locked,target=./.git \
--mount=type=cache,target=/root/.cache/go-build,id=dockercli-build-$TARGETPLATFORM \
rm -f ./.git/*.lock \
&& /download-or-build-cli.sh ${DOCKERCLI_VERSION} ${DOCKERCLI_REPOSITORY} /build \
&& /build/docker --version \
&& /build/docker completion bash >/completion.bash
FROM base AS dockercli-integration
WORKDIR /go/src/github.com/docker/cli
ARG DOCKERCLI_INTEGRATION_REPOSITORY
ARG DOCKERCLI_INTEGRATION_VERSION
ARG TARGETPLATFORM
RUN --mount=source=hack/dockerfile/cli.sh,target=/download-or-build-cli.sh \
--mount=type=cache,id=dockercli-git-$TARGETPLATFORM,sharing=locked,target=./.git \
--mount=type=cache,target=/root/.cache/go-build,id=dockercli-build-$TARGETPLATFORM \
rm -f ./.git/*.lock \
&& /download-or-build-cli.sh ${DOCKERCLI_INTEGRATION_VERSION} ${DOCKERCLI_INTEGRATION_REPOSITORY} /build \
&& /build/docker --version
# runc
FROM base AS runc-src
WORKDIR /usr/src/runc
RUN git init . && git remote add origin "https://github.com/opencontainers/runc.git"
# RUNC_VERSION should match the version that is used by the containerd version
# that is used. If you need to update runc, open a pull request in the containerd
# project first, and update both after that is merged. When updating RUNC_VERSION,
# consider updating runc in vendor.mod accordingly.
ARG RUNC_VERSION=v1.2.6
RUN git fetch -q --depth 1 origin "${RUNC_VERSION}" +refs/tags/*:refs/tags/* && git checkout -q FETCH_HEAD
FROM base AS runc-build
WORKDIR /go/src/github.com/opencontainers/runc
ARG TARGETPLATFORM
RUN --mount=type=cache,sharing=locked,id=moby-runc-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-runc-aptcache,target=/var/cache/apt \
apt-get update && xx-apt-get install -y --no-install-recommends \
gcc \
libc6-dev \
libseccomp-dev \
pkg-config
ARG DOCKER_STATIC
RUN --mount=from=runc-src,src=/usr/src/runc,rw \
--mount=type=cache,target=/root/.cache/go-build,id=runc-build-$TARGETPLATFORM <<EOT
set -e
xx-go --wrap
CGO_ENABLED=1 make "$([ "$DOCKER_STATIC" = "1" ] && echo "static" || echo "runc")"
xx-verify $([ "$DOCKER_STATIC" = "1" ] && echo "--static") runc
mkdir /build
mv runc /build/
EOT
FROM runc-build AS runc-linux
FROM binary-dummy AS runc-windows
FROM runc-${TARGETOS} AS runc
# tini
FROM base AS tini-src
WORKDIR /usr/src/tini
RUN git init . && git remote add origin "https://github.com/krallin/tini.git"
# TINI_VERSION specifies the version of tini (docker-init) to build. This
# binary is used when starting containers with the `--init` option.
ARG TINI_VERSION=v0.19.0
RUN git fetch -q --depth 1 origin "${TINI_VERSION}" +refs/tags/*:refs/tags/* && git checkout -q FETCH_HEAD
FROM base AS tini-build
WORKDIR /go/src/github.com/krallin/tini
RUN --mount=type=cache,sharing=locked,id=moby-tini-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-tini-aptcache,target=/var/cache/apt \
apt-get update && apt-get install -y --no-install-recommends cmake
ARG TARGETPLATFORM
RUN --mount=type=cache,sharing=locked,id=moby-tini-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-tini-aptcache,target=/var/cache/apt \
xx-apt-get install -y --no-install-recommends \
gcc \
libc6-dev \
pkg-config
RUN --mount=from=tini-src,src=/usr/src/tini,rw \
--mount=type=cache,target=/root/.cache/go-build,id=tini-build-$TARGETPLATFORM <<EOT
set -e
CC=$(xx-info)-gcc cmake .
make tini-static
xx-verify --static tini-static
mkdir /build
mv tini-static /build/docker-init
EOT
FROM tini-build AS tini-linux
FROM binary-dummy AS tini-windows
FROM tini-${TARGETOS} AS tini
# rootlesskit
FROM base AS rootlesskit-src
WORKDIR /usr/src/rootlesskit
RUN git init . && git remote add origin "https://github.com/rootless-containers/rootlesskit.git"
# When updating, also update vendor.mod and hack/dockerfile/install/rootlesskit.installer accordingly.
ARG ROOTLESSKIT_VERSION=v2.3.4
RUN git fetch -q --depth 1 origin "${ROOTLESSKIT_VERSION}" +refs/tags/*:refs/tags/* && git checkout -q FETCH_HEAD
FROM base AS rootlesskit-build
WORKDIR /go/src/github.com/rootless-containers/rootlesskit
ARG TARGETPLATFORM
RUN --mount=type=cache,sharing=locked,id=moby-rootlesskit-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-rootlesskit-aptcache,target=/var/cache/apt \
apt-get update && xx-apt-get install -y --no-install-recommends \
gcc \
libc6-dev \
pkg-config
ENV GO111MODULE=on
ARG DOCKER_STATIC
RUN --mount=from=rootlesskit-src,src=/usr/src/rootlesskit,rw \
FROM runtime-dev AS runc
ARG RUNC_VERSION
ARG RUNC_BUILDTAGS
COPY /hack/dockerfile/install/install.sh /hack/dockerfile/install/runc.installer /
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
--mount=type=cache,target=/root/.cache/go-build,id=rootlesskit-build-$TARGETPLATFORM <<EOT
set -e
export CGO_ENABLED=$([ "$DOCKER_STATIC" = "1" ] && echo "0" || echo "1")
xx-go build -o /build/rootlesskit -ldflags="$([ "$DOCKER_STATIC" != "1" ] && echo "-linkmode=external")" ./cmd/rootlesskit
xx-verify $([ "$DOCKER_STATIC" = "1" ] && echo "--static") /build/rootlesskit
EOT
COPY --link ./contrib/dockerd-rootless.sh /build/
COPY --link ./contrib/dockerd-rootless-setuptool.sh /build/
PREFIX=/build /install.sh runc
FROM rootlesskit-build AS rootlesskit-linux
FROM binary-dummy AS rootlesskit-windows
FROM rootlesskit-${TARGETOS} AS rootlesskit
FROM dev-base AS tini
ARG DEBIAN_FRONTEND
ARG TINI_VERSION
RUN --mount=type=cache,sharing=locked,id=moby-tini-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-tini-aptcache,target=/var/cache/apt \
apt-get update && apt-get install -y --no-install-recommends \
cmake \
vim-common
COPY /hack/dockerfile/install/install.sh /hack/dockerfile/install/tini.installer /
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
PREFIX=/build /install.sh tini
FROM dev-base AS rootlesskit
ARG ROOTLESSKIT_VERSION
ARG PREFIX=/build
COPY /hack/dockerfile/install/install.sh /hack/dockerfile/install/rootlesskit.installer /
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
/install.sh rootlesskit \
&& "${PREFIX}"/rootlesskit --version \
&& "${PREFIX}"/rootlesskit-docker-proxy --help
COPY ./contrib/dockerd-rootless.sh /build
COPY ./contrib/dockerd-rootless-setuptool.sh /build
FROM base AS crun
# CRUN_VERSION is the version of crun to install in the dev-container.
ARG CRUN_VERSION=1.21
ARG CRUN_VERSION=1.4.5
RUN --mount=type=cache,sharing=locked,id=moby-crun-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-crun-aptcache,target=/var/cache/apt \
apt-get update && apt-get install -y --no-install-recommends \
@@ -372,6 +280,7 @@ RUN --mount=type=cache,sharing=locked,id=moby-crun-aptlib,target=/var/lib/apt \
libseccomp-dev \
libsystemd-dev \
libtool \
libudev-dev \
libyajl-dev \
python3 \
;
@@ -391,72 +300,88 @@ FROM scratch AS vpnkit-linux-arm
FROM scratch AS vpnkit-linux-ppc64le
FROM scratch AS vpnkit-linux-riscv64
FROM scratch AS vpnkit-linux-s390x
FROM moby/vpnkit-bin:${VPNKIT_VERSION} AS vpnkit-linux-amd64
FROM moby/vpnkit-bin:${VPNKIT_VERSION} AS vpnkit-linux-arm64
FROM djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-linux-amd64
FROM djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-linux-arm64
FROM vpnkit-linux-${TARGETARCH} AS vpnkit-linux
FROM vpnkit-${TARGETOS} AS vpnkit
# containerutility
FROM base AS containerutil-src
WORKDIR /usr/src/containerutil
RUN git init . && git remote add origin "https://github.com/docker-archive/windows-container-utility.git"
ARG CONTAINERUTILITY_VERSION=aa1ba87e99b68e0113bd27ec26c60b88f9d4ccd9
RUN git fetch -q --depth 1 origin "${CONTAINERUTILITY_VERSION}" +refs/tags/*:refs/tags/* && git checkout -q FETCH_HEAD
# TODO: Some of this is only really needed for testing, it would be nice to split this up
FROM runtime-dev AS dev-systemd-false
ARG DEBIAN_FRONTEND
RUN groupadd -r docker
RUN useradd --create-home --gid docker unprivilegeduser \
&& mkdir -p /home/unprivilegeduser/.local/share/docker \
&& chown -R unprivilegeduser /home/unprivilegeduser
# Let us use a .bashrc file
RUN ln -sfv /go/src/github.com/docker/docker/.bashrc ~/.bashrc
# Activate bash completion and include Docker's completion if mounted with DOCKER_BASH_COMPLETION_PATH
RUN echo "source /usr/share/bash-completion/bash_completion" >> /etc/bash.bashrc
RUN ln -s /usr/local/completion/bash/docker /etc/bash_completion.d/docker
RUN ldconfig
# This should only install packages that are specifically needed for the dev environment and nothing else
# Do you really need to add another package here? Can it be done in a different build stage?
RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \
apt-get update && apt-get install -y --no-install-recommends \
apparmor \
bash-completion \
bzip2 \
inetutils-ping \
iproute2 \
iptables \
jq \
libcap2-bin \
libnet1 \
libnl-3-200 \
libprotobuf-c1 \
libyajl2 \
net-tools \
patch \
pigz \
python3-pip \
python3-setuptools \
python3-wheel \
sudo \
thin-provisioning-tools \
uidmap \
vim \
vim-common \
xfsprogs \
xz-utils \
zip \
zstd
FROM base AS containerutil-build
WORKDIR /usr/src/containerutil
ARG TARGETPLATFORM
RUN xx-apt-get install -y --no-install-recommends \
gcc \
g++ \
libc6-dev \
pkg-config
RUN --mount=from=containerutil-src,src=/usr/src/containerutil,rw \
--mount=type=cache,target=/root/.cache/go-build,id=containerutil-build-$TARGETPLATFORM <<EOT
set -e
CC="$(xx-info)-gcc" CXX="$(xx-info)-g++" make
xx-verify --static containerutility.exe
mkdir /build
mv containerutility.exe /build/
EOT
FROM binary-dummy AS containerutil-linux
FROM containerutil-build AS containerutil-windows-amd64
FROM containerutil-windows-${TARGETARCH} AS containerutil-windows
FROM containerutil-${TARGETOS} AS containerutil
FROM docker/buildx-bin:${BUILDX_VERSION} AS buildx
FROM docker/compose-bin:${COMPOSE_VERSION} AS compose
# Switch to use iptables instead of nftables (to match the CI hosts)
# TODO use some kind of runtime auto-detection instead if/when nftables is supported (https://github.com/moby/moby/issues/26824)
RUN update-alternatives --set iptables /usr/sbin/iptables-legacy || true \
&& update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy || true \
&& update-alternatives --set arptables /usr/sbin/arptables-legacy || true
FROM base AS dev-systemd-false
COPY --link --from=frozen-images /build/ /docker-frozen-images
COPY --link --from=swagger /build/ /usr/local/bin/
COPY --link --from=delve /build/ /usr/local/bin/
COPY --link --from=gowinres /build/ /usr/local/bin/
COPY --link --from=tini /build/ /usr/local/bin/
COPY --link --from=registry /build/ /usr/local/bin/
# Skip the CRIU stage for now, as the opensuse package repository is sometimes
# unstable, and we're currently not using it in CI.
#
# FIXME(thaJeztah): re-enable this stage when https://github.com/moby/moby/issues/38963 is resolved (see https://github.com/moby/moby/pull/38984)
# COPY --link --from=criu /build/ /usr/local/bin/
COPY --link --from=gotestsum /build/ /usr/local/bin/
COPY --link --from=golangci_lint /build/ /usr/local/bin/
COPY --link --from=shfmt /build/ /usr/local/bin/
COPY --link --from=runc /build/ /usr/local/bin/
COPY --link --from=containerd /build/ /usr/local/bin/
COPY --link --from=rootlesskit /build/ /usr/local/bin/
COPY --link --from=vpnkit / /usr/local/bin/
COPY --link --from=containerutil /build/ /usr/local/bin/
COPY --link --from=crun /build/ /usr/local/bin/
COPY --link hack/dockerfile/etc/docker/ /etc/docker/
COPY --link --from=buildx /buildx /usr/local/libexec/docker/cli-plugins/docker-buildx
COPY --link --from=compose /docker-compose /usr/libexec/docker/cli-plugins/docker-compose
ARG YAMLLINT_VERSION=1.27.1
RUN pip3 install yamllint==${YAMLLINT_VERSION}
COPY --from=dockercli /build/ /usr/local/cli
COPY --from=frozen-images /build/ /docker-frozen-images
COPY --from=swagger /build/ /usr/local/bin/
COPY --from=delve /build/ /usr/local/bin/
COPY --from=tomll /build/ /usr/local/bin/
COPY --from=gowinres /build/ /usr/local/bin/
COPY --from=tini /build/ /usr/local/bin/
COPY --from=registry /build/ /usr/local/bin/
COPY --from=criu /build/ /usr/local/bin/
COPY --from=gotestsum /build/ /usr/local/bin/
COPY --from=golangci_lint /build/ /usr/local/bin/
COPY --from=shfmt /build/ /usr/local/bin/
COPY --from=runc /build/ /usr/local/bin/
COPY --from=containerd /build/ /usr/local/bin/
COPY --from=rootlesskit /build/ /usr/local/bin/
COPY --from=vpnkit / /usr/local/bin/
COPY --from=crun /build/ /usr/local/bin/
COPY hack/dockerfile/etc/docker/ /etc/docker/
ENV PATH=/usr/local/cli:$PATH
ENV TEST_CLIENT_BINARY=/usr/local/cli-integration/docker
ENV CONTAINERD_ADDRESS=/run/docker/containerd/containerd.sock
ENV CONTAINERD_NAMESPACE=moby
ARG DOCKER_BUILDTAGS
ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}"
WORKDIR /go/src/github.com/docker/docker
VOLUME /var/lib/docker
VOLUME /home/unprivilegeduser/.local/share/docker
@@ -471,176 +396,70 @@ RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \
dbus-user-session \
systemd \
systemd-sysv
RUN mkdir -p hack \
&& curl -o hack/dind-systemd https://raw.githubusercontent.com/AkihiroSuda/containerized-systemd/b70bac0daeea120456764248164c21684ade7d0d/docker-entrypoint.sh \
&& chmod +x hack/dind-systemd
ENTRYPOINT ["hack/dind-systemd"]
FROM dev-systemd-${SYSTEMD} AS dev-firewalld-false
FROM dev-systemd-${SYSTEMD} AS dev
FROM dev-systemd-true AS dev-firewalld-true
RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \
apt-get update && apt-get install -y --no-install-recommends \
firewalld
FROM dev-firewalld-${FIREWALLD} AS dev-base
RUN groupadd -r docker
RUN useradd --create-home --gid docker unprivilegeduser \
&& mkdir -p /home/unprivilegeduser/.local/share/docker \
&& chown -R unprivilegeduser /home/unprivilegeduser
# Let us use a .bashrc file
RUN ln -sfv /go/src/github.com/docker/docker/.bashrc ~/.bashrc
# Activate bash completion
RUN echo "source /usr/share/bash-completion/bash_completion" >> /etc/bash.bashrc
RUN ldconfig
# Set dev environment as safe git directory to prevent "dubious ownership" errors
# when bind-mounting the source into the dev-container. See https://github.com/moby/moby/pull/44930
RUN git config --global --add safe.directory $GOPATH/src/github.com/docker/docker
# This should only install packages that are specifically needed for the dev environment and nothing else
# Do you really need to add another package here? Can it be done in a different build stage?
RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \
apt-get update && apt-get install -y --no-install-recommends \
apparmor \
bash-completion \
bzip2 \
inetutils-ping \
iproute2 \
iptables \
nftables \
jq \
libcap2-bin \
libnet1 \
libnl-3-200 \
libprotobuf-c1 \
libyajl2 \
nano \
net-tools \
netcat-openbsd \
patch \
pigz \
sudo \
systemd-journal-remote \
thin-provisioning-tools \
uidmap \
vim \
vim-common \
xfsprogs \
xz-utils \
zip \
zstd
RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \
apt-get update && apt-get install --no-install-recommends -y \
gcc \
pkg-config \
libseccomp-dev \
libsystemd-dev \
yamllint
COPY --link --from=dockercli /build/ /usr/local/cli
COPY --link --from=dockercli /completion.bash /etc/bash_completion.d/docker
COPY --link --from=dockercli-integration /build/ /usr/local/cli-integration
FROM base AS build
COPY --from=gowinres /build/ /usr/local/bin/
WORKDIR /go/src/github.com/docker/docker
ENV GO111MODULE=off
ENV CGO_ENABLED=1
RUN --mount=type=cache,sharing=locked,id=moby-build-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-build-aptcache,target=/var/cache/apt \
apt-get update && apt-get install --no-install-recommends -y \
clang \
lld \
llvm
ARG TARGETPLATFORM
RUN --mount=type=cache,sharing=locked,id=moby-build-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-build-aptcache,target=/var/cache/apt \
xx-apt-get install --no-install-recommends -y \
gcc \
libc6-dev \
libseccomp-dev \
libsystemd-dev \
pkg-config
ARG DOCKER_BUILDTAGS
ARG DOCKER_DEBUG
FROM runtime-dev AS binary-base
ARG DOCKER_GITCOMMIT=HEAD
ARG DOCKER_LDFLAGS
ARG DOCKER_STATIC
ENV DOCKER_GITCOMMIT=${DOCKER_GITCOMMIT}
ARG VERSION
ENV VERSION=${VERSION}
ARG PLATFORM
ENV PLATFORM=${PLATFORM}
ARG PRODUCT
ENV PRODUCT=${PRODUCT}
ARG DEFAULT_PRODUCT_LICENSE
ENV DEFAULT_PRODUCT_LICENSE=${DEFAULT_PRODUCT_LICENSE}
ARG PACKAGER_NAME
# PREFIX overrides DEST dir in make.sh script otherwise it fails because of
# read only mount in current work dir
ENV PREFIX=/tmp
RUN <<EOT
# in bullseye arm64 target does not link with lld so configure it to use ld instead
if [ "$(xx-info arch)" = "arm64" ]; then
XX_CC_PREFER_LINKER=ld xx-clang --setup-target-triple
fi
EOT
RUN --mount=type=bind,target=.,rw \
ENV PACKAGER_NAME=${PACKAGER_NAME}
ARG DOCKER_BUILDTAGS
ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}"
ENV PREFIX=/build
# TODO: This is here because hack/make.sh binary copies these extras binaries
# from $PATH into the bundles dir.
# It would be nice to handle this in a different way.
COPY --from=tini /build/ /usr/local/bin/
COPY --from=runc /build/ /usr/local/bin/
COPY --from=containerd /build/ /usr/local/bin/
COPY --from=rootlesskit /build/ /usr/local/bin/
COPY --from=vpnkit / /usr/local/bin/
COPY --from=gowinres /build/ /usr/local/bin/
WORKDIR /go/src/github.com/docker/docker
FROM binary-base AS build-binary
RUN --mount=type=cache,target=/root/.cache \
--mount=type=bind,target=.,ro \
--mount=type=tmpfs,target=cli/winresources/dockerd \
--mount=type=cache,target=/root/.cache/go-build,id=moby-build-$TARGETPLATFORM <<EOT
set -e
target=$([ "$DOCKER_STATIC" = "1" ] && echo "binary" || echo "dynbinary")
xx-go --wrap
PKG_CONFIG=$(xx-go env PKG_CONFIG) ./hack/make.sh $target
xx-verify $([ "$DOCKER_STATIC" = "1" ] && echo "--static") /tmp/bundles/${target}-daemon/dockerd$([ "$(xx-info os)" = "windows" ] && echo ".exe")
[ "$(xx-info os)" != "linux" ] || xx-verify $([ "$DOCKER_STATIC" = "1" ] && echo "--static") /tmp/bundles/${target}-daemon/docker-proxy
mkdir /build
mv /tmp/bundles/${target}-daemon/* /build/
EOT
--mount=type=tmpfs,target=cli/winresources/docker-proxy \
hack/make.sh binary
FROM binary-base AS build-dynbinary
RUN --mount=type=cache,target=/root/.cache \
--mount=type=bind,target=.,ro \
--mount=type=tmpfs,target=cli/winresources/dockerd \
--mount=type=tmpfs,target=cli/winresources/docker-proxy \
hack/make.sh dynbinary
FROM binary-base AS build-cross
ARG DOCKER_CROSSPLATFORMS
RUN --mount=type=cache,target=/root/.cache \
--mount=type=bind,target=.,ro \
--mount=type=tmpfs,target=cli/winresources/dockerd \
--mount=type=tmpfs,target=cli/winresources/docker-proxy \
hack/make.sh cross
# usage:
# > docker buildx bake binary
# > DOCKER_STATIC=0 docker buildx bake binary
# or
# > make binary
# > make dynbinary
FROM scratch AS binary
COPY --from=build /build/ /
COPY --from=build-binary /build/bundles/ /
# usage:
# > docker buildx bake all
FROM scratch AS all
COPY --link --from=tini /build/ /
COPY --link --from=runc /build/ /
COPY --link --from=containerd /build/ /
COPY --link --from=rootlesskit /build/ /
COPY --link --from=containerutil /build/ /
COPY --link --from=vpnkit / /
COPY --link --from=build /build /
FROM scratch AS dynbinary
COPY --from=build-dynbinary /build/bundles/ /
# smoke tests
# usage:
# > docker buildx bake binary-smoketest
FROM base AS smoketest
WORKDIR /usr/local/bin
COPY --from=build /build .
RUN <<EOT
set -ex
file dockerd
dockerd --version
file docker-proxy
docker-proxy --version
EOT
FROM scratch AS cross
COPY --from=build-cross /build/bundles/ /
# devcontainer is a stage used by .devcontainer/devcontainer.json
FROM dev-base AS devcontainer
COPY --link . .
COPY --link --from=gopls /build/ /usr/local/bin/
# usage:
# > docker buildx bake dind
# > docker run -d --restart always --privileged --name devdind -p 12375:2375 docker-dind --debug --host=tcp://0.0.0.0:2375 --tlsverify=false
FROM docker:dind AS dind
COPY --link --from=dockercli /build/docker /usr/local/bin/
COPY --link --from=buildx /buildx /usr/local/libexec/docker/cli-plugins/docker-buildx
COPY --link --from=compose /docker-compose /usr/local/libexec/docker/cli-plugins/docker-compose
COPY --link --from=all / /usr/local/bin/
# usage:
# > make shell
# > SYSTEMD=true make shell
FROM dev-base AS dev
COPY --link . .
FROM dev AS final
COPY . /go/src/github.com/docker/docker

85
Dockerfile.e2e Normal file
View File

@@ -0,0 +1,85 @@
ARG GO_VERSION=1.19.3
FROM golang:${GO_VERSION}-alpine AS base
ENV GO111MODULE=off
RUN apk --no-cache add \
bash \
btrfs-progs-dev \
build-base \
curl \
lvm2-dev \
jq
RUN mkdir -p /build/
RUN mkdir -p /go/src/github.com/docker/docker/
WORKDIR /go/src/github.com/docker/docker/
FROM base AS frozen-images
# Get useful and necessary Hub images so we can "docker load" locally instead of pulling
COPY contrib/download-frozen-image-v2.sh /
RUN /download-frozen-image-v2.sh /build \
busybox:latest@sha256:95cf004f559831017cdf4628aaf1bb30133677be8702a8c5f2994629f637a209 \
busybox:latest@sha256:95cf004f559831017cdf4628aaf1bb30133677be8702a8c5f2994629f637a209 \
debian:bullseye-slim@sha256:dacf278785a4daa9de07596ec739dbc07131e189942772210709c5c0777e8437 \
hello-world:latest@sha256:d58e752213a51785838f9eed2b7a498ffa1cb3aa7f946dda11af39286c3db9a9 \
arm32v7/hello-world:latest@sha256:50b8560ad574c779908da71f7ce370c0a2471c098d44d1c8f6b513c5a55eeeb1
# See also frozenImages in "testutil/environment/protect.go" (which needs to be updated when adding images to this list)
FROM base AS dockercli
COPY hack/dockerfile/install/install.sh ./install.sh
COPY hack/dockerfile/install/dockercli.installer ./
RUN PREFIX=/build ./install.sh dockercli
# TestDockerCLIBuildSuite dependency
FROM base AS contrib
COPY contrib/syscall-test /build/syscall-test
COPY contrib/httpserver/Dockerfile /build/httpserver/Dockerfile
COPY contrib/httpserver contrib/httpserver
RUN CGO_ENABLED=0 go build -buildmode=pie -o /build/httpserver/httpserver github.com/docker/docker/contrib/httpserver
# Build the integration tests and copy the resulting binaries to /build/tests
FROM base AS builder
# Set tag and add sources
COPY . .
# Copy test sources tests that use assert can print errors
RUN mkdir -p /build${PWD} && find integration integration-cli -name \*_test.go -exec cp --parents '{}' /build${PWD} \;
# Build and install test binaries
ARG DOCKER_GITCOMMIT=undefined
RUN hack/make.sh build-integration-test-binary
RUN mkdir -p /build/tests && find . -name test.main -exec cp --parents '{}' /build/tests \;
## Generate testing image
FROM alpine:3.10 as runner
ENV DOCKER_REMOTE_DAEMON=1
ENV DOCKER_INTEGRATION_DAEMON_DEST=/
ENTRYPOINT ["/scripts/run.sh"]
# Add an unprivileged user to be used for tests which need it
RUN addgroup docker && adduser -D -G docker unprivilegeduser -s /bin/ash
# GNU tar is used for generating the emptyfs image
RUN apk --no-cache add \
bash \
ca-certificates \
g++ \
git \
inetutils-ping \
iptables \
libcap2-bin \
pigz \
tar \
xz
COPY hack/test/e2e-run.sh /scripts/run.sh
COPY hack/make/.ensure-emptyfs /scripts/ensure-emptyfs.sh
COPY integration/testdata /tests/integration/testdata
COPY integration/build/testdata /tests/integration/build/testdata
COPY integration-cli/fixtures /tests/integration-cli/fixtures
COPY --from=frozen-images /build/ /docker-frozen-images
COPY --from=dockercli /build/ /usr/bin/
COPY --from=contrib /build/ /tests/contrib/
COPY --from=builder /build/ /

View File

@@ -5,14 +5,17 @@
# This represents the bare minimum required to build and test Docker.
ARG GO_VERSION=1.24.3
ARG GO_VERSION=1.19.3
ARG BASE_DEBIAN_DISTRO="bookworm"
ARG BASE_DEBIAN_DISTRO="bullseye"
ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}"
FROM ${GOLANG_IMAGE}
ENV GO111MODULE=off
ENV GOTOOLCHAIN=local
# allow replacing httpredir or deb mirror
ARG APT_MIRROR=deb.debian.org
RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list
# Compile and runtime deps
# https://github.com/docker/docker/blob/master/project/PACKAGERS.md#build-dependencies
@@ -21,7 +24,11 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
build-essential \
curl \
cmake \
gcc \
git \
libapparmor-dev \
libbtrfs-dev \
libdevmapper-dev \
libseccomp-dev \
ca-certificates \
e2fsprogs \
@@ -35,10 +42,10 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
vim-common \
&& rm -rf /var/lib/apt/lists/*
# Install runc, containerd, and tini
# Install runc, containerd, tini and docker-proxy
# Please edit hack/dockerfile/install/<name>.installer to update them.
COPY hack/dockerfile/install hack/dockerfile/install
RUN set -e; for i in runc containerd tini dockercli; \
RUN for i in runc containerd tini proxy dockercli; \
do hack/dockerfile/install/install.sh $i; \
done
ENV PATH=/usr/local/cli:$PATH

View File

@@ -154,19 +154,21 @@
# The number of build steps below are explicitly minimised to improve performance.
ARG WINDOWS_BASE_IMAGE=mcr.microsoft.com/windows/servercore
ARG WINDOWS_BASE_IMAGE_TAG=ltsc2022
FROM ${WINDOWS_BASE_IMAGE}:${WINDOWS_BASE_IMAGE_TAG}
# Extremely important - do not change the following line to reference a "specific" image,
# such as `mcr.microsoft.com/windows/servercore:ltsc2022`. If using this Dockerfile in process
# isolated containers, the kernel of the host must match the container image, and hence
# would fail between Windows Server 2016 (aka RS1) and Windows Server 2019 (aka RS5).
# It is expected that the image `microsoft/windowsservercore:latest` is present, and matches
# the hosts kernel version before doing a build.
FROM microsoft/windowsservercore
# Use PowerShell as the default shell
SHELL ["powershell", "-Command", "$ErrorActionPreference = 'Stop'; $ProgressPreference = 'SilentlyContinue';"]
ARG GO_VERSION=1.24.3
ARG GOTESTSUM_VERSION=v1.12.0
# GOWINRES_VERSION is the version of go-winres to install.
ARG GOWINRES_VERSION=v0.3.3
ARG CONTAINERD_VERSION=v1.7.27
ARG GO_VERSION=1.19.3
ARG GOTESTSUM_VERSION=v1.8.2
ARG GOWINRES_VERSION=v0.3.0
ARG CONTAINERD_VERSION=v1.6.10
# Environment variable notes:
# - GO_VERSION must be consistent with 'Dockerfile' used by Linux.
@@ -177,7 +179,6 @@ ENV GO_VERSION=${GO_VERSION} `
GIT_VERSION=2.11.1 `
GOPATH=C:\gopath `
GO111MODULE=off `
GOTOOLCHAIN=local `
FROM_DOCKERFILE=1 `
GOTESTSUM_VERSION=${GOTESTSUM_VERSION} `
GOWINRES_VERSION=${GOWINRES_VERSION}
@@ -222,8 +223,8 @@ RUN `
Download-File $location C:\gitsetup.zip; `
`
Write-Host INFO: Downloading go...; `
$dlGoVersion=$Env:GO_VERSION; `
Download-File "https://go.dev/dl/go${dlGoVersion}.windows-amd64.zip" C:\go.zip; `
$dlGoVersion=$Env:GO_VERSION -replace '\.0$',''; `
Download-File "https://golang.org/dl/go${dlGoVersion}.windows-amd64.zip" C:\go.zip; `
`
Write-Host INFO: Downloading compiler 1 of 3...; `
Download-File https://raw.githubusercontent.com/moby/docker-tdmgcc/master/gcc.zip C:\gcc.zip; `

563
Jenkinsfile vendored Normal file
View File

@@ -0,0 +1,563 @@
#!groovy
pipeline {
agent none
options {
buildDiscarder(logRotator(daysToKeepStr: '30'))
timeout(time: 2, unit: 'HOURS')
timestamps()
}
parameters {
booleanParam(name: 'arm64', defaultValue: true, description: 'ARM (arm64) Build/Test')
booleanParam(name: 's390x', defaultValue: false, description: 'IBM Z (s390x) Build/Test')
booleanParam(name: 'ppc64le', defaultValue: false, description: 'PowerPC (ppc64le) Build/Test')
booleanParam(name: 'dco', defaultValue: true, description: 'Run the DCO check')
}
environment {
DOCKER_BUILDKIT = '1'
DOCKER_EXPERIMENTAL = '1'
DOCKER_GRAPHDRIVER = 'overlay2'
APT_MIRROR = 'cdn-fastly.deb.debian.org'
CHECK_CONFIG_COMMIT = '33a3680e08d1007e72c3b3f1454f823d8e9948ee'
TESTDEBUG = '0'
TIMEOUT = '120m'
}
stages {
stage('pr-hack') {
when { changeRequest() }
steps {
script {
echo "Workaround for PR auto-cancel feature. Borrowed from https://issues.jenkins-ci.org/browse/JENKINS-43353"
def buildNumber = env.BUILD_NUMBER as int
if (buildNumber > 1) milestone(buildNumber - 1)
milestone(buildNumber)
}
}
}
stage('DCO-check') {
when {
beforeAgent true
expression { params.dco }
}
agent { label 'arm64 && ubuntu-2004' }
steps {
sh '''
docker run --rm \
-v "$WORKSPACE:/workspace" \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
alpine sh -c 'apk add --no-cache -q bash git openssh-client && git config --system --add safe.directory /workspace && cd /workspace && hack/validate/dco'
'''
}
}
stage('Build') {
parallel {
stage('s390x') {
when {
beforeAgent true
// Skip this stage on PRs unless the checkbox is selected
anyOf {
not { changeRequest() }
expression { params.s390x }
}
}
agent { label 's390x-ubuntu-2004' }
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Unit tests") {
steps {
sh '''
sudo modprobe ip6table_filter
'''
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/test/unit
'''
}
post {
always {
junit testResults: 'bundles/junit-report*.xml', allowEmptyResults: true
}
}
}
stage("Integration tests") {
environment { TEST_SKIP_INTEGRATION_CLI = '1' }
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TESTDEBUG \
-e TEST_SKIP_INTEGRATION_CLI \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=s390x-integration
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('s390x integration-cli') {
when {
beforeAgent true
// Skip this stage on PRs unless the checkbox is selected
anyOf {
not { changeRequest() }
expression { params.s390x }
}
}
agent { label 's390x-ubuntu-2004' }
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Integration-cli tests") {
environment { TEST_SKIP_INTEGRATION = '1' }
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TEST_SKIP_INTEGRATION \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=s390x-integration-cli
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('ppc64le') {
when {
beforeAgent true
// Skip this stage on PRs unless the checkbox is selected
anyOf {
not { changeRequest() }
expression { params.ppc64le }
}
}
agent { label 'ppc64le-ubuntu-1604' }
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
docker buildx build --load --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Unit tests") {
steps {
sh '''
sudo modprobe ip6table_filter
'''
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/test/unit
'''
}
post {
always {
junit testResults: 'bundles/junit-report*.xml', allowEmptyResults: true
}
}
}
stage("Integration tests") {
environment { TEST_SKIP_INTEGRATION_CLI = '1' }
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TESTDEBUG \
-e TEST_SKIP_INTEGRATION_CLI \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=ppc64le-integration
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('ppc64le integration-cli') {
when {
beforeAgent true
// Skip this stage on PRs unless the checkbox is selected
anyOf {
not { changeRequest() }
expression { params.ppc64le }
}
}
agent { label 'ppc64le-ubuntu-1604' }
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
docker buildx build --load --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Integration-cli tests") {
environment { TEST_SKIP_INTEGRATION = '1' }
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TEST_SKIP_INTEGRATION \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=ppc64le-integration-cli
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('arm64') {
when {
beforeAgent true
expression { params.arm64 }
}
agent { label 'arm64 && ubuntu-2004' }
environment {
TEST_SKIP_INTEGRATION_CLI = '1'
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh 'docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .'
}
}
stage("Unit tests") {
steps {
sh '''
sudo modprobe ip6table_filter
'''
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/test/unit
'''
}
post {
always {
junit testResults: 'bundles/junit-report*.xml', allowEmptyResults: true
}
}
}
stage("Integration tests") {
environment { TEST_SKIP_INTEGRATION_CLI = '1' }
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TESTDEBUG \
-e TEST_SKIP_INTEGRATION_CLI \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=arm64-integration
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
}
}
}
}

View File

@@ -1,33 +1,543 @@
# Moby maintainers file
#
# See project/GOVERNANCE.md for committer versus reviewer roles
# This file describes the maintainer groups within the moby/moby project.
# More detail on Moby project governance is available in the
# project/GOVERNANCE.md file found in this repository.
#
# COMMITTERS
# GitHub ID, Name, Email address, GPG fingerprint
"akerouanton","Albin Kerouanton","albinker@gmail.com"
"AkihiroSuda","Akihiro Suda","akihiro.suda.cz@hco.ntt.co.jp"
"austinvazquez","Austin Vazquez","macedonv@amazon.com"
"corhere","Cory Snider","csnider@mirantis.com"
"cpuguy83","Brian Goff","cpuguy83@gmail.com"
"robmry","Rob Murray","rob.murray@docker.com"
"thaJeztah","Sebastiaan van Stijn","github@gone.nl"
"tianon","Tianon Gravi","admwiggin@gmail.com"
"tonistiigi","Tõnis Tiigi","tonis@docker.com"
"vvoland","Paweł Gronowski","pawel.gronowski@docker.com"
# It is structured to be consumable by both humans and programs.
# To extract its contents programmatically, use any TOML-compliant
# parser.
#
# REVIEWERS
# GitHub ID, Name, Email address, GPG fingerprint
"coolljt0725","Lei Jitang","leijitang@huawei.com"
"crazy-max","Kevin Alvarez","contact@crazymax.dev"
"dmcgowan","Derek McGowan","derek@mcgstyle.net"
"estesp","Phil Estes","estesp@linux.vnet.ibm.com"
"justincormack","Justin Cormack","justin.cormack@docker.com"
"kolyshkin","Kir Kolyshkin","kolyshkin@gmail.com"
"laurazard","Laura Brehm","laurabrehm@hey.com"
"neersighted","Bjorn Neergaard","bjorn@neersighted.com"
"rumpl","Djordje Lukic","djordje.lukic@docker.com"
"samuelkarp","Samuel Karp","me@samuelkarp.com"
"stevvooe","Stephen Day","stephen.day@docker.com"
"thompson-shaun","Shaun Thompson","shaun.thompson@docker.com"
"tiborvass","Tibor Vass","tibor@docker.com"
"unclejack","Cristian Staretu","cristian.staretu@gmail.com"
# TODO(estesp): This file should not necessarily depend on docker/opensource
# This file is compiled into the MAINTAINERS file in docker/opensource.
#
[Org]
[Org."Core maintainers"]
# The Core maintainers are the ghostbusters of the project: when there's a problem others
# can't solve, they show up and fix it with bizarre devices and weaponry.
# They have final say on technical implementation and coding style.
# They are ultimately responsible for quality in all its forms: usability polish,
# bugfixes, performance, stability, etc. When ownership can cleanly be passed to
# a subsystem, they are responsible for doing so and holding the
# subsystem maintainers accountable. If ownership is unclear, they are the de facto owners.
people = [
"akihirosuda",
"anusha",
"coolljt0725",
"cpuguy83",
"estesp",
"johnstep",
"justincormack",
"kolyshkin",
"mhbauer",
"runcom",
"samuelkarp",
"stevvooe",
"thajeztah",
"tianon",
"tibor",
"tonistiigi",
"unclejack",
"vdemeester",
"vieux",
"yongtang"
]
[Org.Curators]
# The curators help ensure that incoming issues and pull requests are properly triaged and
# that our various contribution and reviewing processes are respected. With their knowledge of
# the repository activity, they can also guide contributors to relevant material or
# discussions.
#
# They are neither code nor docs reviewers, so they are never expected to merge. They can
# however:
# - close an issue or pull request when it's an exact duplicate
# - close an issue or pull request when it's inappropriate or off-topic
people = [
"alexellis",
"andrewhsu",
"corhere",
"fntlnz",
"gianarb",
"ndeloof",
"neersighted",
"olljanat",
"programmerq",
"ripcurld",
"rumpl",
"samwhited",
"thajeztah"
]
[Org.Alumni]
# This list contains maintainers that are no longer active on the project.
# It is thanks to these people that the project has become what it is today.
# Thank you!
people = [
# Aaron Lehmann was a maintainer for swarmkit, the registry, and the engine,
# and contributed many improvements, features, and bugfixes in those areas,
# among which "automated service rollbacks", templated secrets and configs,
# and resumable image layer downloads.
"aaronlehmann",
# Harald Albers is the mastermind behind the bash completion scripts for the
# Docker CLI. The completion scripts moved to the Docker CLI repository, so
# you can now find him perform his magic in the https://github.com/docker/cli repository.
"albers",
# Andrea Luzzardi started contributing to the Docker codebase in the "dotCloud"
# era, even before it was called "Docker". He is one of the architects of both
# Swarm and SwarmKit, and its integration into the Docker engine.
"aluzzardi",
# David Calavera contributed many features to Docker, such as an improved
# event system, dynamic configuration reloading, volume plugins, fancy
# new templating options, and an external client credential store. As a
# maintainer, David was release captain for Docker 1.8, and competing
# with Jess Frazelle to be "top dream killer".
# David is now doing amazing stuff as CTO for https://www.netlify.com,
# and tweets as @calavera.
"calavera",
# Michael Crosby was "chief maintainer" of the Docker project.
# During his time as a maintainer, Michael contributed to many
# milestones of the project; he was release captain of Docker v1.0.0,
# started the development of "libcontainer" (what later became runc)
# and containerd, as well as demoing cool hacks such as live migrating
# a game server container with checkpoint/restore.
#
# Michael is currently a maintainer of containerd, but you may see
# him around in other projects on GitHub.
"crosbymichael",
# Before becoming a maintainer, Daniel Nephin was a core contributor
# to "Fig" (now known as Docker Compose). As a maintainer for both the
# Engine and Docker CLI, Daniel contributed many features, among which
# the `docker stack` commands, allowing users to deploy their Docker
# Compose projects as a Swarm service.
"dnephin",
# Doug Davis contributed many features and fixes for the classic builder,
# such as "wildcard" copy, the dockerignore file, custom paths/names
# for the Dockerfile, as well as enhancements to the API and documentation.
# Follow Doug on Twitter, where he tweets as @duginabox.
"duglin",
# As a maintainer, Erik was responsible for the "builder", and
# started the first designs for the new networking model in
# Docker. Erik is now working on all kinds of plugins for Docker
# (https://github.com/contiv) and various open source projects
# in his own repository https://github.com/erikh. You may
# still stumble into him in our issue tracker, or on IRC.
"erikh",
# Evan Hazlett is the creator of the Shipyard and Interlock open source projects,
# and the author of "Orca", which became the foundation of Docker Universal Control
# Plane (UCP). As a maintainer, Evan helped integrating SwarmKit (secrets, tasks)
# into the Docker engine.
"ehazlett",
# Arnaud Porterie (AKA "icecrime") was in charge of maintaining the maintainers.
# As a maintainer, he made life easier for contributors to the Docker open-source
# projects, bringing order in the chaos by designing a triage- and review workflow
# using labels (see https://icecrime.net/technology/a-structured-approach-to-labeling/),
# and automating the hell out of things with his buddies GordonTheTurtle and Poule
# (a chicken!).
#
# A lesser-known fact is that he created the first commit in the libnetwork repository
# even though he didn't know anything about it. Some say, he's now selling stuff on
# the internet ;-)
"icecrime",
# After a false start with his first PR being rejected, James Turnbull became a frequent
# contributor to the documentation, and became a docs maintainer on December 5, 2013. As
# a maintainer, James lifted the docs to a higher standard, and introduced the community
# guidelines ("three strikes"). James is currently changing the world as CTO of https://www.empatico.org,
# meanwhile authoring various books that are worth checking out. You can find him on Twitter,
# rambling as @kartar, and although no longer active as a maintainer, he's always "game" to
# help out reviewing docs PRs, so you may still see him around in the repository.
"jamtur01",
# Jessica Frazelle, also known as the "Keyser Söze of containers",
# runs *everything* in containers. She started contributing to
# Docker with a (fun fun) change involving both iptables and regular
# expressions (coz, YOLO!) on July 10, 2014
# https://github.com/docker/docker/pull/6950/commits/f3a68ffa390fb851115c77783fa4031f1d3b2995.
# Jess was Release Captain for Docker 1.4, 1.6 and 1.7, and contributed
# many features and improvement, among which "seccomp profiles" (making
# containers a lot more secure). Besides being a maintainer, she
# set up the CI infrastructure for the project, giving everyone
# something to shout at if a PR failed ("noooo Janky!").
# Be sure you don't miss her talks at a conference near you (a must-see),
# read her blog at https://blog.jessfraz.com (a must-read), and
# check out her open source projects on GitHub https://github.com/jessfraz (a must-try).
"jessfraz",
# As a maintainer, John Howard managed to make the impossible possible;
# to run Docker on Windows. After facing many challenges, teaching
# fellow-maintainers that 'Windows is not Linux', and many changes in
# Windows Server to facilitate containers, native Windows containers
# saw the light of day in 2015.
#
# John is now enjoying life without containers: playing piano, painting,
# and walking his dogs, but you may occasionally see him drop by on GitHub.
"lowenna",
# Alexander Morozov contributed many features to Docker, worked on the premise of
# what later became containerd (and worked on that too), and made a "stupid" Go
# vendor tool specifically for docker/docker needs: vndr (https://github.com/LK4D4/vndr).
# Not many know that Alexander is a master negotiator, being able to change course
# of action with a single "Nope, we're not gonna do that".
"lk4d4",
# Madhu Venugopal was part of the SocketPlane team that joined Docker.
# As a maintainer, he was working with Jana for the Container Network
# Model (CNM) implemented through libnetwork, and the "routing mesh" powering
# Swarm mode networking.
"mavenugo",
# As a maintainer, Kenfe-Mickaël Laventure worked on the container runtime,
# integrating containerd 1.0 with the daemon, and adding support for custom
# OCI runtimes, as well as implementing the `docker prune` subcommands,
# which was a welcome feature to be added. You can keep up with Mickaél on
# Twitter (@kmlaventure).
"mlaventure",
# As a docs maintainer, Mary Anthony contributed greatly to the Docker
# docs. She wrote the Docker Contributor Guide and Getting Started
# Guides. She helped create a doc build system independent of
# docker/docker project, and implemented a new docs.docker.com theme and
# nav for 2015 Dockercon. Fun fact: the most inherited layer in DockerHub
# public repositories was originally referenced in
# maryatdocker/docker-whale back in May 2015.
"moxiegirl",
# Jana Radhakrishnan was part of the SocketPlane team that joined Docker.
# As a maintainer, he was the lead architect for the Container Network
# Model (CNM) implemented through libnetwork, and the "routing mesh" powering
# Swarm mode networking.
#
# Jana started new adventures in networking, but you can find him tweeting as @mrjana,
# coding on GitHub https://github.com/mrjana, and he may be hiding on the Docker Community
# slack channel :-)
"mrjana",
# Sven Dowideit became a well known person in the Docker ecosphere, building
# boot2docker, and became a regular contributor to the project, starting as
# early as October 2013 (https://github.com/docker/docker/pull/2119), to become
# a maintainer less than two months later (https://github.com/docker/docker/pull/3061).
#
# As a maintainer, Sven took on the task to convert the documentation from
# ReStructuredText to Markdown, migrate to Hugo for generating the docs, and
# writing tooling for building, testing, and publishing them.
#
# If you're not in the occasion to visit "the Australian office", you
# can keep up with Sven on Twitter (@SvenDowideit), his blog http://fosiki.com,
# and of course on GitHub.
"sven",
# Vincent "vbatts!" Batts made his first contribution to the project
# in November 2013, to become a maintainer a few months later, on
# May 10, 2014 (https://github.com/docker/docker/commit/d6e666a87a01a5634c250358a94c814bf26cb778).
# As a maintainer, Vincent made important contributions to core elements
# of Docker, such as "distribution" (tarsum) and graphdrivers (btrfs, devicemapper).
# He also contributed the "tar-split" library, an important element
# for the content-addressable store.
# Vincent is currently a member of the Open Containers Initiative
# Technical Oversight Board (TOB), besides his work at Red Hat and
# Project Atomic. You can still find him regularly hanging out in
# our repository and the #docker-dev and #docker-maintainers IRC channels
# for a chat, as he's always a lot of fun.
"vbatts",
# Vishnu became a maintainer to help out on the daemon codebase and
# libcontainer integration. He's currently involved in the
# Open Containers Initiative, working on the specifications,
# besides his work on cAdvisor and Kubernetes for Google.
"vishh"
]
[people]
# A reference list of all people associated with the project.
# All other sections should refer to people by their canonical key
# in the people section.
# ADD YOURSELF HERE IN ALPHABETICAL ORDER
[people.aaronlehmann]
Name = "Aaron Lehmann"
Email = "aaron.lehmann@docker.com"
GitHub = "aaronlehmann"
[people.alexellis]
Name = "Alex Ellis"
Email = "alexellis2@gmail.com"
GitHub = "alexellis"
[people.akihirosuda]
Name = "Akihiro Suda"
Email = "akihiro.suda.cz@hco.ntt.co.jp"
GitHub = "AkihiroSuda"
[people.aluzzardi]
Name = "Andrea Luzzardi"
Email = "al@docker.com"
GitHub = "aluzzardi"
[people.albers]
Name = "Harald Albers"
Email = "github@albersweb.de"
GitHub = "albers"
[people.andrewhsu]
Name = "Andrew Hsu"
Email = "andrewhsu@docker.com"
GitHub = "andrewhsu"
[people.anusha]
Name = "Anusha Ragunathan"
Email = "anusha@docker.com"
GitHub = "anusha-ragunathan"
[people.calavera]
Name = "David Calavera"
Email = "david.calavera@gmail.com"
GitHub = "calavera"
[people.coolljt0725]
Name = "Lei Jitang"
Email = "leijitang@huawei.com"
GitHub = "coolljt0725"
[people.corhere]
Name = "Cory Snider"
Email = "csnider@mirantis.com"
GitHub = "corhere"
[people.cpuguy83]
Name = "Brian Goff"
Email = "cpuguy83@gmail.com"
GitHub = "cpuguy83"
[people.crosbymichael]
Name = "Michael Crosby"
Email = "crosbymichael@gmail.com"
GitHub = "crosbymichael"
[people.dnephin]
Name = "Daniel Nephin"
Email = "dnephin@gmail.com"
GitHub = "dnephin"
[people.duglin]
Name = "Doug Davis"
Email = "dug@us.ibm.com"
GitHub = "duglin"
[people.ehazlett]
Name = "Evan Hazlett"
Email = "ejhazlett@gmail.com"
GitHub = "ehazlett"
[people.erikh]
Name = "Erik Hollensbe"
Email = "erik@docker.com"
GitHub = "erikh"
[people.estesp]
Name = "Phil Estes"
Email = "estesp@linux.vnet.ibm.com"
GitHub = "estesp"
[people.fntlnz]
Name = "Lorenzo Fontana"
Email = "fontanalorenz@gmail.com"
GitHub = "fntlnz"
[people.gianarb]
Name = "Gianluca Arbezzano"
Email = "ga@thumpflow.com"
GitHub = "gianarb"
[people.icecrime]
Name = "Arnaud Porterie"
Email = "icecrime@gmail.com"
GitHub = "icecrime"
[people.jamtur01]
Name = "James Turnbull"
Email = "james@lovedthanlost.net"
GitHub = "jamtur01"
[people.jessfraz]
Name = "Jessie Frazelle"
Email = "jess@linux.com"
GitHub = "jessfraz"
[people.johnstep]
Name = "John Stephens"
Email = "johnstep@docker.com"
GitHub = "johnstep"
[people.justincormack]
Name = "Justin Cormack"
Email = "justin.cormack@docker.com"
GitHub = "justincormack"
[people.kolyshkin]
Name = "Kir Kolyshkin"
Email = "kolyshkin@gmail.com"
GitHub = "kolyshkin"
[people.lk4d4]
Name = "Alexander Morozov"
Email = "lk4d4@docker.com"
GitHub = "lk4d4"
[people.lowenna]
Name = "John Howard"
Email = "github@lowenna.com"
GitHub = "lowenna"
[people.mavenugo]
Name = "Madhu Venugopal"
Email = "madhu@docker.com"
GitHub = "mavenugo"
[people.mhbauer]
Name = "Morgan Bauer"
Email = "mbauer@us.ibm.com"
GitHub = "mhbauer"
[people.mlaventure]
Name = "Kenfe-Mickaël Laventure"
Email = "mickael.laventure@gmail.com"
GitHub = "mlaventure"
[people.moxiegirl]
Name = "Mary Anthony"
Email = "mary.anthony@docker.com"
GitHub = "moxiegirl"
[people.mrjana]
Name = "Jana Radhakrishnan"
Email = "mrjana@docker.com"
GitHub = "mrjana"
[people.ndeloof]
Name = "Nicolas De Loof"
Email = "nicolas.deloof@gmail.com"
GitHub = "ndeloof"
[people.neersighted]
Name = "Bjorn Neergaard"
Email = "bneergaard@mirantis.com"
GitHub = "neersighted"
[people.olljanat]
Name = "Olli Janatuinen"
Email = "olli.janatuinen@gmail.com"
GitHub = "olljanat"
[people.programmerq]
Name = "Jeff Anderson"
Email = "jeff@docker.com"
GitHub = "programmerq"
[people.ripcurld]
Name = "Boaz Shuster"
Email = "ripcurld.github@gmail.com"
GitHub = "ripcurld"
[people.rumpl]
Name = "Djordje Lukic"
Email = "djordje.lukic@docker.com"
GitHub = "rumpl"
[people.runcom]
Name = "Antonio Murdaca"
Email = "runcom@redhat.com"
GitHub = "runcom"
[people.samuelkarp]
Name = "Samuel Karp"
Email = "me@samuelkarp.com"
GitHub = "samuelkarp"
[people.samwhited]
Name = "Sam Whited"
Email = "sam@samwhited.com"
GitHub = "samwhited"
[people.shykes]
Name = "Solomon Hykes"
Email = "solomon@docker.com"
GitHub = "shykes"
[people.stevvooe]
Name = "Stephen Day"
Email = "stephen.day@docker.com"
GitHub = "stevvooe"
[people.sven]
Name = "Sven Dowideit"
Email = "SvenDowideit@home.org.au"
GitHub = "SvenDowideit"
[people.thajeztah]
Name = "Sebastiaan van Stijn"
Email = "github@gone.nl"
GitHub = "thaJeztah"
[people.tianon]
Name = "Tianon Gravi"
Email = "admwiggin@gmail.com"
GitHub = "tianon"
[people.tibor]
Name = "Tibor Vass"
Email = "tibor@docker.com"
GitHub = "tiborvass"
[people.tonistiigi]
Name = "Tõnis Tiigi"
Email = "tonis@docker.com"
GitHub = "tonistiigi"
[people.unclejack]
Name = "Cristian Staretu"
Email = "cristian.staretu@gmail.com"
GitHub = "unclejack"
[people.vbatts]
Name = "Vincent Batts"
Email = "vbatts@redhat.com"
GitHub = "vbatts"
[people.vdemeester]
Name = "Vincent Demeester"
Email = "vincent@sbr.pm"
GitHub = "vdemeester"
[people.vieux]
Name = "Victor Vieux"
Email = "vieux@docker.com"
GitHub = "vieux"
[people.vishh]
Name = "Vishnu Kannan"
Email = "vishnuk@google.com"
GitHub = "vishh"
[people.yongtang]
Name = "Yong Tang"
Email = "yong.tang.github@outlook.com"
GitHub = "yongtang"

130
Makefile
View File

@@ -1,7 +1,17 @@
.PHONY: all binary dynbinary build cross help install manpages run shell test test-docker-py test-integration test-unit validate validate-% win
DOCKER ?= docker
BUILDX ?= $(DOCKER) buildx
DOCKER_GITCOMMIT := $(shell git rev-parse HEAD)
# set the graph driver as the current graphdriver if not set
DOCKER_GRAPHDRIVER := $(if $(DOCKER_GRAPHDRIVER),$(DOCKER_GRAPHDRIVER),$(shell docker info 2>&1 | grep "Storage Driver" | sed 's/.*: //'))
export DOCKER_GRAPHDRIVER
# get OS/Arch of docker engine
DOCKER_OSARCH := $(shell bash -c 'source hack/make/.detect-daemon-osarch && echo $${DOCKER_ENGINE_OSARCH}')
DOCKERFILE := $(shell bash -c 'source hack/make/.detect-daemon-osarch && echo $${DOCKERFILE}')
DOCKER_GITCOMMIT := $(shell git rev-parse --short HEAD || echo unsupported)
export DOCKER_GITCOMMIT
# allow overriding the repository and branch that validation scripts are running
@@ -10,9 +20,6 @@ export VALIDATE_REPO
export VALIDATE_BRANCH
export VALIDATE_ORIGIN_BRANCH
export PAGER
export GIT_PAGER
# env vars passed through directly to Docker's build scripts
# to allow things like `make KEEPBUNDLE=1 binary` easily
# `project/PACKAGERS.md` have some limited documentation of some of these
@@ -21,9 +28,11 @@ export GIT_PAGER
# option of "go build". For example, a built-in graphdriver priority list
# can be changed during build time like this:
#
# make DOCKER_LDFLAGS="-X github.com/docker/docker/daemon/graphdriver.priority=overlay2,zfs" dynbinary
# make DOCKER_LDFLAGS="-X github.com/docker/docker/daemon/graphdriver.priority=overlay2,devicemapper" dynbinary
#
DOCKER_ENVS := \
-e DOCKER_CROSSPLATFORMS \
-e BUILD_APT_MIRROR \
-e BUILDFLAGS \
-e KEEPBUNDLE \
-e DOCKER_BUILD_ARGS \
@@ -31,11 +40,8 @@ DOCKER_ENVS := \
-e DOCKER_BUILD_OPTS \
-e DOCKER_BUILD_PKGS \
-e DOCKER_BUILDKIT \
-e DOCKER_BASH_COMPLETION_PATH \
-e DOCKER_CLI_PATH \
-e DOCKERCLI_VERSION \
-e DOCKERCLI_REPOSITORY \
-e DOCKERCLI_INTEGRATION_VERSION \
-e DOCKERCLI_INTEGRATION_REPOSITORY \
-e DOCKER_DEBUG \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT \
@@ -49,12 +55,9 @@ DOCKER_ENVS := \
-e DOCKER_USERLANDPROXY \
-e DOCKERD_ARGS \
-e DELVE_PORT \
-e FIREWALLD \
-e GITHUB_ACTIONS \
-e TEST_FORCE_VALIDATE \
-e TEST_INTEGRATION_DIR \
-e TEST_INTEGRATION_USE_SNAPSHOTTER \
-e TEST_INTEGRATION_FAIL_FAST \
-e TEST_SKIP_INTEGRATION \
-e TEST_SKIP_INTEGRATION_CLI \
-e TESTCOVERAGE \
@@ -72,22 +75,17 @@ DOCKER_ENVS := \
-e PLATFORM \
-e DEFAULT_PRODUCT_LICENSE \
-e PRODUCT \
-e PACKAGER_NAME \
-e PAGER \
-e GIT_PAGER \
-e OTEL_EXPORTER_OTLP_ENDPOINT \
-e OTEL_EXPORTER_OTLP_PROTOCOL \
-e OTEL_SERVICE_NAME
-e PACKAGER_NAME
# note: we _cannot_ add "-e DOCKER_BUILDTAGS" here because even if it's unset in the shell, that would shadow the "ENV DOCKER_BUILDTAGS" set in our Dockerfile, which is very important for our official builds
# to allow `make BIND_DIR=. shell` or `make BIND_DIR= test`
# (default to no bind mount if DOCKER_HOST is set)
# note: BINDDIR is supported for backwards-compatibility here
BIND_DIR := $(if $(BINDDIR),$(BINDDIR),$(if $(DOCKER_HOST),,.))
BIND_DIR := $(if $(BINDDIR),$(BINDDIR),$(if $(DOCKER_HOST),,bundles))
# DOCKER_MOUNT can be overridden, but use at your own risk!
# DOCKER_MOUNT can be overriden, but use at your own risk!
ifndef DOCKER_MOUNT
DOCKER_MOUNT := $(if $(BIND_DIR),-v "$(BIND_DIR):/go/src/github.com/docker/docker/$(BIND_DIR)")
DOCKER_MOUNT := $(if $(BIND_DIR),-v "$(CURDIR)/$(BIND_DIR):/go/src/github.com/docker/docker/$(BIND_DIR)")
DOCKER_MOUNT := $(if $(DOCKER_BINDDIR_MOUNT_OPTS),$(DOCKER_MOUNT):$(DOCKER_BINDDIR_MOUNT_OPTS),$(DOCKER_MOUNT))
# This allows the test suite to be able to run without worrying about the underlying fs used by the container running the daemon (e.g. aufs-on-aufs), so long as the host running the container is running a supported fs.
@@ -97,14 +95,8 @@ DOCKER_MOUNT := $(if $(DOCKER_MOUNT),$(DOCKER_MOUNT),-v /go/src/github.com/docke
DOCKER_MOUNT_CACHE := -v docker-dev-cache:/root/.cache -v docker-mod-cache:/go/pkg/mod/
DOCKER_MOUNT_CLI := $(if $(DOCKER_CLI_PATH),-v $(shell dirname $(DOCKER_CLI_PATH)):/usr/local/cli,)
ifdef BIND_GIT
# Gets the common .git directory (even from inside a git worktree)
GITDIR := $(shell realpath $(shell git rev-parse --git-common-dir))
MOUNT_GITDIR := $(if $(GITDIR),-v "$(GITDIR):$(GITDIR)")
endif
DOCKER_MOUNT := $(DOCKER_MOUNT) $(DOCKER_MOUNT_CACHE) $(DOCKER_MOUNT_CLI) $(DOCKER_MOUNT_BASH_COMPLETION) $(MOUNT_GITDIR)
DOCKER_MOUNT_BASH_COMPLETION := $(if $(DOCKER_BASH_COMPLETION_PATH),-v $(shell dirname $(DOCKER_BASH_COMPLETION_PATH)):/usr/local/completion/bash,)
DOCKER_MOUNT := $(DOCKER_MOUNT) $(DOCKER_MOUNT_CACHE) $(DOCKER_MOUNT_CLI) $(DOCKER_MOUNT_BASH_COMPLETION)
endif # ifndef DOCKER_MOUNT
# This allows to set the docker-dev container name
@@ -115,6 +107,8 @@ DOCKER_PORT_FORWARD := $(if $(DOCKER_PORT),-p "$(DOCKER_PORT)",)
DELVE_PORT_FORWARD := $(if $(DELVE_PORT),-p "$(DELVE_PORT)",)
DOCKER_FLAGS := $(DOCKER) run --rm --privileged $(DOCKER_CONTAINER_NAME) $(DOCKER_ENVS) $(DOCKER_MOUNT) $(DOCKER_PORT_FORWARD) $(DELVE_PORT_FORWARD)
BUILD_APT_MIRROR := $(if $(DOCKER_BUILD_APT_MIRROR),--build-arg APT_MIRROR=$(DOCKER_BUILD_APT_MIRROR))
export BUILD_APT_MIRROR
SWAGGER_DOCS_PORT ?= 9000
@@ -142,38 +136,38 @@ endif
DOCKER_RUN_DOCKER := $(DOCKER_FLAGS) "$(DOCKER_IMAGE)"
DOCKER_BUILD_ARGS += --build-arg=GO_VERSION
DOCKER_BUILD_ARGS += --build-arg=DOCKERCLI_VERSION
DOCKER_BUILD_ARGS += --build-arg=DOCKERCLI_REPOSITORY
DOCKER_BUILD_ARGS += --build-arg=DOCKERCLI_INTEGRATION_VERSION
DOCKER_BUILD_ARGS += --build-arg=DOCKERCLI_INTEGRATION_REPOSITORY
ifdef DOCKER_SYSTEMD
DOCKER_BUILD_ARGS += --build-arg=SYSTEMD=true
endif
ifdef FIREWALLD
DOCKER_BUILD_ARGS += --build-arg=FIREWALLD=true
BUILD_OPTS := ${BUILD_APT_MIRROR} ${DOCKER_BUILD_ARGS} ${DOCKER_BUILD_OPTS} -f "$(DOCKERFILE)"
BUILD_CMD := $(BUILDX) build
# This is used for the legacy "build" target and anything still depending on it
BUILD_CROSS =
ifdef DOCKER_CROSS
BUILD_CROSS = --build-arg CROSS=$(DOCKER_CROSS)
endif
ifdef DOCKER_CROSSPLATFORMS
BUILD_CROSS = --build-arg CROSS=true
endif
BUILD_OPTS := ${DOCKER_BUILD_ARGS} ${DOCKER_BUILD_OPTS}
BUILD_CMD := $(BUILDX) build
BAKE_CMD := $(BUILDX) bake
VERSION_AUTOGEN_ARGS = --build-arg VERSION --build-arg DOCKER_GITCOMMIT --build-arg PRODUCT --build-arg PLATFORM --build-arg DEFAULT_PRODUCT_LICENSE --build-arg PACKAGER_NAME
default: binary
.PHONY: all
all: build ## validate all checks, build linux binaries, run all tests,\ncross build non-linux binaries, and generate archives
$(DOCKER_RUN_DOCKER) bash -c 'hack/validate/default && hack/make.sh'
.PHONY: binary
binary: bundles ## build statically linked linux binaries
$(BAKE_CMD) binary
$(BUILD_CMD) $(BUILD_OPTS) --output=bundles/ --target=$@ $(VERSION_AUTOGEN_ARGS) .
.PHONY: dynbinary
dynbinary: bundles ## build dynamically linked linux binaries
$(BAKE_CMD) dynbinary
$(BUILD_CMD) $(BUILD_OPTS) --output=bundles/ --target=$@ $(VERSION_AUTOGEN_ARGS) .
.PHONY: cross
cross: bundles ## cross build the binaries
$(BAKE_CMD) binary-cross
cross: BUILD_OPTS += --build-arg CROSS=true --build-arg DOCKER_CROSSPLATFORMS
cross: bundles ## cross build the binaries for darwin, freebsd and\nwindows
$(BUILD_CMD) $(BUILD_OPTS) --output=bundles/ --target=$@ $(VERSION_AUTOGEN_ARGS) .
bundles:
mkdir bundles
@@ -185,43 +179,35 @@ clean: clean-cache
clean-cache: ## remove the docker volumes that are used for caching in the dev-container
docker volume rm -f docker-dev-cache docker-mod-cache
.PHONY: help
help: ## this help
@awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z0-9_-]+:.*?## / {gsub("\\\\n",sprintf("\n%22c",""), $$2);printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST)
.PHONY: install
install: ## install the linux binaries
KEEPBUNDLE=1 hack/make.sh install-binary
.PHONY: run
run: build ## run the docker daemon in a container
$(DOCKER_RUN_DOCKER) sh -c "KEEPBUNDLE=1 hack/make.sh install-binary run"
.PHONY: build
ifeq ($(BIND_DIR), .)
build: shell_target := --target=dev-base
else
build: shell_target := --target=dev
else
build: shell_target := --target=final
endif
build: bundles
$(BUILD_CMD) $(BUILD_OPTS) $(shell_target) --load -t "$(DOCKER_IMAGE)" .
$(BUILD_CMD) $(BUILD_OPTS) $(shell_target) --load $(BUILD_CROSS) -t "$(DOCKER_IMAGE)" .
.PHONY: shell
shell: build ## start a shell inside the build env
$(DOCKER_RUN_DOCKER) bash
.PHONY: test
test: build test-unit ## run the unit, integration and docker-py tests
$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary test-integration test-docker-py
$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary cross test-integration test-docker-py
.PHONY: test-docker-py
test-docker-py: build ## run the docker-py tests
$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary test-docker-py
.PHONY: test-integration-cli
test-integration-cli: test-integration ## (DEPRECATED) use test-integration
.PHONY: test-integration
ifneq ($(and $(TEST_SKIP_INTEGRATION),$(TEST_SKIP_INTEGRATION_CLI)),)
test-integration:
@echo Both integrations suites skipped per environment variables
@@ -230,31 +216,20 @@ test-integration: build ## run the integration tests
$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary test-integration
endif
.PHONY: test-integration-flaky
test-integration-flaky: build ## run the stress test for all new integration tests
$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary test-integration-flaky
.PHONY: test-unit
test-unit: build ## run the unit tests
$(DOCKER_RUN_DOCKER) hack/test/unit
.PHONY: validate
validate: build ## validate DCO, Seccomp profile generation, gofmt,\n./pkg/ isolation, golint, tests, tomls, go vet and vendor
$(DOCKER_RUN_DOCKER) hack/validate/all
.PHONY: validate-generate-files
validate-generate-files:
$(BUILD_CMD) --target "validate" \
--output "type=cacheonly" \
--file "./hack/dockerfiles/generate-files.Dockerfile" .
.PHONY: validate-%
validate-%: build ## validate specific check
$(DOCKER_RUN_DOCKER) hack/validate/$*
.PHONY: win
win: bundles ## cross build the binary for windows
$(BAKE_CMD) --set *.platform=windows/amd64 binary
win: build ## cross build the binary for windows
$(DOCKER_RUN_DOCKER) DOCKER_CROSSPLATFORMS=windows/amd64 hack/make.sh cross
.PHONY: swagger-gen
swagger-gen:
@@ -271,16 +246,3 @@ swagger-docs: ## preview the API documentation
-e 'REDOC_OPTIONS=hide-hostname="true" lazy-rendering' \
-p $(SWAGGER_DOCS_PORT):80 \
bfirsh/redoc:1.14.0
.PHONY: generate-files
generate-files:
$(eval $@_TMP_OUT := $(shell mktemp -d -t moby-output.XXXXXXXXXX))
@if [ -z "$($@_TMP_OUT)" ]; then \
echo "Temp dir is not set"; \
exit 1; \
fi
$(BUILD_CMD) --target "update" \
--output "type=local,dest=$($@_TMP_OUT)" \
--file "./hack/dockerfiles/generate-files.Dockerfile" .
cp -R "$($@_TMP_OUT)"/. .
rm -rf "$($@_TMP_OUT)"/*

View File

@@ -1,11 +1,6 @@
The Moby Project
================
[![PkgGoDev](https://pkg.go.dev/badge/github.com/docker/docker)](https://pkg.go.dev/github.com/docker/docker)
[![Go Report Card](https://goreportcard.com/badge/github.com/docker/docker)](https://goreportcard.com/report/github.com/docker/docker)
[![OpenSSF Scorecard](https://api.scorecard.dev/projects/github.com/moby/moby/badge)](https://scorecard.dev/viewer/?uri=github.com/moby/moby)
![Moby Project logo](docs/static_files/moby-project-logo.png "The Moby Project")
Moby is an open-source project created by Docker to enable and accelerate software containerization.
@@ -19,7 +14,7 @@ Moby is an open project guided by strong principles, aiming to be modular, flexi
It is open to the community to help set its direction.
- Modular: the project includes lots of components that have well-defined functions and APIs that work together.
- Batteries included but swappable: Moby includes enough components to build fully featured container systems, but its modular architecture ensures that most of the components can be swapped by different implementations.
- Batteries included but swappable: Moby includes enough components to build fully featured container system, but its modular architecture ensures that most of the components can be swapped by different implementations.
- Usable security: Moby provides secure defaults without compromising usability.
- Developer focused: The APIs are intended to be functional and useful to build powerful tools.
They are not necessarily intended as end user tools but as components aimed at developers.
@@ -37,7 +32,7 @@ New projects can be added if they fit with the community goals. Docker is commit
However, other projects are also encouraged to use Moby as an upstream, and to reuse the components in diverse ways, and all these uses will be treated in the same way. External maintainers and contributors are welcomed.
The Moby project is not intended as a location for support or feature requests for Docker products, but as a place for contributors to work on open source code, fix bugs, and make the code more useful.
The releases are supported by the maintainers, community and users, on a best efforts basis only. For customers who want enterprise or commercial support, [Docker Desktop](https://www.docker.com/products/docker-desktop/) and [Mirantis Container Runtime](https://www.mirantis.com/software/mirantis-container-runtime/) are the appropriate products for these use cases.
The releases are supported by the maintainers, community and users, on a best efforts basis only, and are not intended for customers who want enterprise or commercial support; Docker EE is the appropriate product for these use cases.
-----

View File

@@ -1,42 +1,9 @@
# Security Policy
# Reporting security issues
The maintainers of the Moby project take security seriously. If you discover
a security issue, please bring it to their attention right away!
The Moby maintainers take security seriously. If you discover a security issue, please bring it to their attention right away!
## Reporting a Vulnerability
### Reporting a Vulnerability
Please **DO NOT** file a public issue, instead send your report privately
to [security@docker.com](mailto:security@docker.com).
Please **DO NOT** file a public issue, instead send your report privately to security@docker.com.
Reporter(s) can expect a response within 72 hours, acknowledging the issue was
received.
## Review Process
After receiving the report, an initial triage and technical analysis is
performed to confirm the report and determine its scope. We may request
additional information in this stage of the process.
Once a reviewer has confirmed the relevance of the report, a draft security
advisory will be created on GitHub. The draft advisory will be used to discuss
the issue with maintainers, the reporter(s), and where applicable, other
affected parties under embargo.
If the vulnerability is accepted, a timeline for developing a patch, public
disclosure, and patch release will be determined. If there is an embargo period
on public disclosure before the patch release, the reporter(s) are expected to
participate in the discussion of the timeline and abide by agreed upon dates
for public disclosure.
## Accreditation
Security reports are greatly appreciated and we will publicly thank you,
although we will keep your name confidential if you request it. We also like to
send gifts - if you're into swag, make sure to let us know. We do not currently
offer a paid security bounty program at this time.
## Supported Versions
This project uses long-lived branches to maintain releases. Refer to
[BRANCHES-AND-TAGS.md](project/BRANCHES-AND-TAGS.md) in the default branch to
learn about the current maintenance status of each branch.
Security reports are greatly appreciated and we will publicly thank you for it, although we keep your name confidential if you request it. We also like to send gifts—if you're into schwag, make sure to let us know. We currently do not offer a paid security bounty program, but are not ruling it out in the future.

View File

@@ -37,6 +37,6 @@ There is hopefully enough example material in the file for you to copy a similar
When you make edits to `swagger.yaml`, you may want to check the generated API documentation to ensure it renders correctly.
Run `make swagger-docs` and a preview will be running at `http://localhost:9000`. Some of the styling may be incorrect, but you'll be able to ensure that it is generating the correct documentation.
Run `make swagger-docs` and a preview will be running at `http://localhost`. Some of the styling may be incorrect, but you'll be able to ensure that it is generating the correct documentation.
The production documentation is generated by vendoring `swagger.yaml` into [docker/docker.github.io](https://github.com/docker/docker.github.io).

View File

@@ -2,17 +2,8 @@ package api // import "github.com/docker/docker/api"
// Common constants for daemon and client.
const (
// DefaultVersion of the current REST API.
DefaultVersion = "1.50"
// MinSupportedAPIVersion is the minimum API version that can be supported
// by the API server, specified as "major.minor". Note that the daemon
// may be configured with a different minimum API version, as returned
// in [github.com/docker/docker/api/types.Version.MinAPIVersion].
//
// API requests for API versions lower than the configured version produce
// an error.
MinSupportedAPIVersion = "1.24"
// DefaultVersion of Current REST API
DefaultVersion = "1.42"
// NoBaseImageSpecifier is the symbol used by the FROM
// command to specify that no base image is to be used.

7
api/common_unix.go Normal file
View File

@@ -0,0 +1,7 @@
//go:build !windows
// +build !windows
package api // import "github.com/docker/docker/api"
// MinVersion represents Minimum REST API version supported
const MinVersion = "1.12"

8
api/common_windows.go Normal file
View File

@@ -0,0 +1,8 @@
package api // import "github.com/docker/docker/api"
// MinVersion represents Minimum REST API version supported
// Technically the first daemon API version released on Windows is v1.25 in
// engine version 1.13. However, some clients are explicitly using downlevel
// APIs (e.g. docker-compose v2.1 file format) and that is just too restrictive.
// Hence also allowing 1.24 on Windows.
const MinVersion string = "1.24"

View File

@@ -5,9 +5,9 @@ import (
"fmt"
"strconv"
"github.com/distribution/reference"
"github.com/docker/distribution/reference"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/backend"
"github.com/docker/docker/api/types/build"
"github.com/docker/docker/api/types/events"
"github.com/docker/docker/builder"
buildkit "github.com/docker/docker/builder/builder-next"
@@ -21,7 +21,7 @@ import (
// ImageComponent provides an interface for working with images
type ImageComponent interface {
SquashImage(from string, to string) (string, error)
TagImage(context.Context, image.ID, reference.Named) error
TagImageWithReference(image.ID, reference.Named) error
}
// Builder defines interface for running a build
@@ -52,62 +52,64 @@ func (b *Backend) RegisterGRPC(s *grpc.Server) {
// Build builds an image from a Source
func (b *Backend) Build(ctx context.Context, config backend.BuildConfig) (string, error) {
options := config.Options
useBuildKit := options.Version == build.BuilderBuildKit
useBuildKit := options.Version == types.BuilderBuildKit
tags, err := sanitizeRepoAndTags(options.Tags)
tagger, err := NewTagger(b.imageComponent, config.ProgressWriter.StdoutFormatter, options.Tags)
if err != nil {
return "", err
}
var buildResult *builder.Result
var build *builder.Result
if useBuildKit {
buildResult, err = b.buildkit.Build(ctx, config)
build, err = b.buildkit.Build(ctx, config)
if err != nil {
return "", err
}
} else {
buildResult, err = b.builder.Build(ctx, config)
build, err = b.builder.Build(ctx, config)
if err != nil {
return "", err
}
}
if buildResult == nil {
if build == nil {
return "", nil
}
imageID := buildResult.ImageID
var imageID = build.ImageID
if options.Squash {
if imageID, err = squashBuild(buildResult, b.imageComponent); err != nil {
if imageID, err = squashBuild(build, b.imageComponent); err != nil {
return "", err
}
if config.ProgressWriter.AuxFormatter != nil {
if err = config.ProgressWriter.AuxFormatter.Emit("moby.image.id", build.Result{ID: imageID}); err != nil {
if err = config.ProgressWriter.AuxFormatter.Emit("moby.image.id", types.BuildResult{ID: imageID}); err != nil {
return "", err
}
}
}
if imageID != "" && !useBuildKit {
if !useBuildKit {
stdout := config.ProgressWriter.StdoutFormatter
_, _ = fmt.Fprintf(stdout, "Successfully built %s\n", stringid.TruncateID(imageID))
err = tagImages(ctx, b.imageComponent, config.ProgressWriter.StdoutFormatter, image.ID(imageID), tags)
fmt.Fprintf(stdout, "Successfully built %s\n", stringid.TruncateID(imageID))
}
if imageID != "" {
err = tagger.TagImages(image.ID(imageID))
}
return imageID, err
}
// PruneCache removes all cached build sources
func (b *Backend) PruneCache(ctx context.Context, opts build.CachePruneOptions) (*build.CachePruneReport, error) {
func (b *Backend) PruneCache(ctx context.Context, opts types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error) {
buildCacheSize, cacheIDs, err := b.buildkit.Prune(ctx, opts)
if err != nil {
return nil, errors.Wrap(err, "failed to prune build cache")
}
b.eventsService.Log(events.ActionPrune, events.BuilderEventType, events.Actor{
b.eventsService.Log("prune", events.BuilderEventType, events.Actor{
Attributes: map[string]string{
"reclaimed": strconv.FormatInt(buildCacheSize, 10),
},
})
return &build.CachePruneReport{SpaceReclaimed: uint64(buildCacheSize), CachesDeleted: cacheIDs}, nil
return &types.BuildCachePruneReport{SpaceReclaimed: uint64(buildCacheSize), CachesDeleted: cacheIDs}, nil
}
// Cancel cancels the build by ID

View File

@@ -1,31 +1,55 @@
package build // import "github.com/docker/docker/api/server/backend/build"
import (
"context"
"fmt"
"io"
"github.com/distribution/reference"
"github.com/docker/distribution/reference"
"github.com/docker/docker/image"
"github.com/pkg/errors"
)
// tagImages creates image tags for the imageID.
func tagImages(ctx context.Context, ic ImageComponent, stdout io.Writer, imageID image.ID, repoAndTags []reference.Named) error {
for _, rt := range repoAndTags {
if err := ic.TagImage(ctx, imageID, rt); err != nil {
// Tagger is responsible for tagging an image created by a builder
type Tagger struct {
imageComponent ImageComponent
stdout io.Writer
repoAndTags []reference.Named
}
// NewTagger returns a new Tagger for tagging the images of a build.
// If any of the names are invalid tags an error is returned.
func NewTagger(backend ImageComponent, stdout io.Writer, names []string) (*Tagger, error) {
reposAndTags, err := sanitizeRepoAndTags(names)
if err != nil {
return nil, err
}
return &Tagger{
imageComponent: backend,
stdout: stdout,
repoAndTags: reposAndTags,
}, nil
}
// TagImages creates image tags for the imageID
func (bt *Tagger) TagImages(imageID image.ID) error {
for _, rt := range bt.repoAndTags {
if err := bt.imageComponent.TagImageWithReference(imageID, rt); err != nil {
return err
}
_, _ = fmt.Fprintln(stdout, "Successfully tagged", reference.FamiliarString(rt))
fmt.Fprintf(bt.stdout, "Successfully tagged %s\n", reference.FamiliarString(rt))
}
return nil
}
// sanitizeRepoAndTags parses the raw "t" parameter received from the client
// to a slice of repoAndTag. It removes duplicates, and validates each name
// to not contain a digest.
func sanitizeRepoAndTags(names []string) (repoAndTags []reference.Named, _ error) {
uniqNames := map[string]struct{}{}
// to a slice of repoAndTag.
// It also validates each repoName and tag.
func sanitizeRepoAndTags(names []string) ([]reference.Named, error) {
var (
repoAndTags []reference.Named
// This map is used for deduplicating the "-t" parameter.
uniqNames = make(map[string]struct{})
)
for _, repo := range names {
if repo == "" {
continue
@@ -36,12 +60,14 @@ func sanitizeRepoAndTags(names []string) (repoAndTags []reference.Named, _ error
return nil, err
}
if _, ok := ref.(reference.Digested); ok {
if _, isCanonical := ref.(reference.Canonical); isCanonical {
return nil, errors.New("build tag cannot contain a digest")
}
ref = reference.TagNameOnly(ref)
nameWithTag := ref.String()
if _, exists := uniqNames[nameWithTag]; !exists {
uniqNames[nameWithTag] = struct{}{}
repoAndTags = append(repoAndTags, ref)

View File

@@ -0,0 +1,34 @@
package server
import (
"net/http"
"github.com/docker/docker/api/server/httpstatus"
"github.com/docker/docker/api/server/httputils"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/versions"
"github.com/gorilla/mux"
"google.golang.org/grpc/status"
)
// makeErrorHandler makes an HTTP handler that decodes a Docker error and
// returns it in the response.
func makeErrorHandler(err error) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
statusCode := httpstatus.FromError(err)
vars := mux.Vars(r)
if apiVersionSupportsJSONErrors(vars["version"]) {
response := &types.ErrorResponse{
Message: err.Error(),
}
_ = httputils.WriteJSON(w, statusCode, response)
} else {
http.Error(w, status.Convert(err).Message(), statusCode)
}
}
}
func apiVersionSupportsJSONErrors(version string) bool {
const firstAPIVersionWithJSONErrors = "1.23"
return version == "" || versions.GreaterThan(version, firstAPIVersionWithJSONErrors)
}

View File

@@ -1,76 +1,81 @@
package httpstatus // import "github.com/docker/docker/api/server/httpstatus"
import (
"context"
"fmt"
"net/http"
cerrdefs "github.com/containerd/errdefs"
"github.com/containerd/log"
containerderrors "github.com/containerd/containerd/errdefs"
"github.com/docker/distribution/registry/api/errcode"
"github.com/docker/docker/errdefs"
"github.com/sirupsen/logrus"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
type causer interface {
Cause() error
}
// FromError retrieves status code from error message.
func FromError(err error) int {
if err == nil {
log.G(context.TODO()).WithError(err).Error("unexpected HTTP error handling")
logrus.WithFields(logrus.Fields{"error": err}).Error("unexpected HTTP error handling")
return http.StatusInternalServerError
}
// Resolve the error to ensure status is chosen from the first outermost error
rerr := cerrdefs.Resolve(err)
var statusCode int
// Stop right there
// Are you sure you should be adding a new error class here? Do one of the existing ones work?
// Note that the below functions are already checking the error causal chain for matches.
// Only check errors from the errdefs package, no new error type checking may be added
switch {
case cerrdefs.IsNotFound(rerr):
return http.StatusNotFound
case cerrdefs.IsInvalidArgument(rerr):
return http.StatusBadRequest
case cerrdefs.IsConflict(rerr):
return http.StatusConflict
case cerrdefs.IsUnauthorized(rerr):
return http.StatusUnauthorized
case cerrdefs.IsUnavailable(rerr):
return http.StatusServiceUnavailable
case cerrdefs.IsPermissionDenied(rerr):
return http.StatusForbidden
case cerrdefs.IsNotModified(rerr):
return http.StatusNotModified
case cerrdefs.IsNotImplemented(rerr):
return http.StatusNotImplemented
case cerrdefs.IsInternal(rerr) || cerrdefs.IsDataLoss(rerr) || cerrdefs.IsDeadlineExceeded(rerr) || cerrdefs.IsCanceled(rerr):
return http.StatusInternalServerError
case errdefs.IsNotFound(err):
statusCode = http.StatusNotFound
case errdefs.IsInvalidParameter(err):
statusCode = http.StatusBadRequest
case errdefs.IsConflict(err):
statusCode = http.StatusConflict
case errdefs.IsUnauthorized(err):
statusCode = http.StatusUnauthorized
case errdefs.IsUnavailable(err):
statusCode = http.StatusServiceUnavailable
case errdefs.IsForbidden(err):
statusCode = http.StatusForbidden
case errdefs.IsNotModified(err):
statusCode = http.StatusNotModified
case errdefs.IsNotImplemented(err):
statusCode = http.StatusNotImplemented
case errdefs.IsSystem(err) || errdefs.IsUnknown(err) || errdefs.IsDataLoss(err) || errdefs.IsDeadline(err) || errdefs.IsCancelled(err):
statusCode = http.StatusInternalServerError
default:
if statusCode := statusCodeFromGRPCError(err); statusCode != http.StatusInternalServerError {
statusCode = statusCodeFromGRPCError(err)
if statusCode != http.StatusInternalServerError {
return statusCode
}
if statusCode := statusCodeFromDistributionError(err); statusCode != http.StatusInternalServerError {
statusCode = statusCodeFromContainerdError(err)
if statusCode != http.StatusInternalServerError {
return statusCode
}
switch e := err.(type) {
case interface{ Unwrap() error }:
return FromError(e.Unwrap())
case interface{ Unwrap() []error }:
for _, ue := range e.Unwrap() {
if statusCode := FromError(ue); statusCode != http.StatusInternalServerError {
return statusCode
}
}
statusCode = statusCodeFromDistributionError(err)
if statusCode != http.StatusInternalServerError {
return statusCode
}
if e, ok := err.(causer); ok {
return FromError(e.Cause())
}
if !cerrdefs.IsUnknown(err) {
log.G(context.TODO()).WithFields(log.Fields{
"module": "api",
"error": err,
"error_type": fmt.Sprintf("%T", err),
}).Debug("FIXME: Got an API for which error does not match any expected type!!!")
}
return http.StatusInternalServerError
logrus.WithFields(logrus.Fields{
"module": "api",
"error_type": fmt.Sprintf("%T", err),
}).Debugf("FIXME: Got an API for which error does not match any expected type!!!: %+v", err)
}
if statusCode == 0 {
statusCode = http.StatusInternalServerError
}
return statusCode
}
// statusCodeFromGRPCError returns status code according to gRPC error
@@ -122,3 +127,24 @@ func statusCodeFromDistributionError(err error) int {
}
return http.StatusInternalServerError
}
// statusCodeFromContainerdError returns status code for containerd errors when
// consumed directly (not through gRPC)
func statusCodeFromContainerdError(err error) int {
switch {
case containerderrors.IsInvalidArgument(err):
return http.StatusBadRequest
case containerderrors.IsNotFound(err):
return http.StatusNotFound
case containerderrors.IsAlreadyExists(err):
return http.StatusConflict
case containerderrors.IsFailedPrecondition(err):
return http.StatusPreconditionFailed
case containerderrors.IsUnavailable(err):
return http.StatusServiceUnavailable
case containerderrors.IsNotImplemented(err):
return http.StatusNotImplemented
default:
return http.StatusInternalServerError
}
}

View File

@@ -12,4 +12,5 @@ import (
// container configuration.
type ContainerDecoder interface {
DecodeConfig(src io.Reader) (*container.Config, *container.HostConfig, *network.NetworkingConfig, error)
DecodeHostConfig(src io.Reader) (*container.HostConfig, error)
}

View File

@@ -1,27 +1,15 @@
package httputils // import "github.com/docker/docker/api/server/httputils"
import (
"encoding/json"
"fmt"
"net/http"
"strconv"
"strings"
"github.com/distribution/reference"
"github.com/docker/docker/errdefs"
"github.com/pkg/errors"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
)
// BoolValue transforms a form value in different formats into a boolean type.
func BoolValue(r *http.Request, k string) bool {
switch strings.ToLower(strings.TrimSpace(r.FormValue(k))) {
case "", "0", "no", "false", "none":
return false
default:
return true
}
s := strings.ToLower(strings.TrimSpace(r.FormValue(k)))
return !(s == "" || s == "0" || s == "no" || s == "false" || s == "none")
}
// BoolValueOrDefault returns the default bool passed if the query param is
@@ -33,29 +21,6 @@ func BoolValueOrDefault(r *http.Request, k string, d bool) bool {
return BoolValue(r, k)
}
// Uint32Value parses a form value into an uint32 type. It returns an error
// if the field is not set, empty, incorrectly formatted, or out of range.
func Uint32Value(r *http.Request, field string) (uint32, error) {
// strconv.ParseUint returns an "strconv.ErrSyntax" for negative values,
// not an "out of range". Strip the prefix before parsing, and use it
// later to detect valid, but negative values.
v, isNeg := strings.CutPrefix(r.Form.Get(field), "-")
if v == "" || v[0] == '+' {
// Fast-path for invalid values.
return 0, strconv.ErrSyntax
}
i, err := strconv.ParseUint(v, 10, 32)
if err != nil {
// Unwrap to remove the 'strconv.ParseUint: parsing "some-invalid-value":' prefix.
return 0, errors.Unwrap(err)
}
if isNeg {
return 0, strconv.ErrRange
}
return uint32(i), nil
}
// Int64ValueOrZero parses a form value into an int64 type.
// It returns 0 if the parsing fails.
func Int64ValueOrZero(r *http.Request, k string) int64 {
@@ -76,38 +41,6 @@ func Int64ValueOrDefault(r *http.Request, field string, def int64) (int64, error
return def, nil
}
// RepoTagReference parses form values "repo" and "tag" and returns a valid
// reference with repository and tag.
// If repo is empty, then a nil reference is returned.
// If no tag is given, then the default "latest" tag is set.
func RepoTagReference(repo, tag string) (reference.NamedTagged, error) {
if repo == "" {
return nil, nil
}
ref, err := reference.ParseNormalizedNamed(repo)
if err != nil {
return nil, err
}
if _, isDigested := ref.(reference.Digested); isDigested {
return nil, fmt.Errorf("cannot import digest reference")
}
if tag != "" {
return reference.WithTag(ref, tag)
}
withDefaultTag := reference.TagNameOnly(ref)
namedTagged, ok := withDefaultTag.(reference.NamedTagged)
if !ok {
return nil, fmt.Errorf("unexpected reference: %q", ref.String())
}
return namedTagged, nil
}
// ArchiveOptions stores archive information for different operations.
type ArchiveOptions struct {
Name string
@@ -141,43 +74,3 @@ func ArchiveFormValues(r *http.Request, vars map[string]string) (ArchiveOptions,
}
return ArchiveOptions{name, path}, nil
}
// DecodePlatform decodes the OCI platform JSON string into a Platform struct.
func DecodePlatform(platformJSON string) (*ocispec.Platform, error) {
var p ocispec.Platform
if err := json.Unmarshal([]byte(platformJSON), &p); err != nil {
return nil, errdefs.InvalidParameter(errors.Wrap(err, "failed to parse platform"))
}
hasAnyOptional := (p.Variant != "" || p.OSVersion != "" || len(p.OSFeatures) > 0)
if p.OS == "" && p.Architecture == "" && hasAnyOptional {
return nil, errdefs.InvalidParameter(errors.New("optional platform fields provided, but OS and Architecture are missing"))
}
if p.OS == "" || p.Architecture == "" {
return nil, errdefs.InvalidParameter(errors.New("both OS and Architecture must be provided"))
}
return &p, nil
}
// DecodePlatforms decodes the OCI platform JSON string into a Platform struct.
//
// Typically, the argument is a value of: r.Form["platform"]
func DecodePlatforms(platformJSONs []string) ([]ocispec.Platform, error) {
if len(platformJSONs) == 0 {
return nil, nil
}
var output []ocispec.Platform
for _, platform := range platformJSONs {
p, err := DecodePlatform(platform)
if err != nil {
return nil, err
}
output = append(output, *p)
}
return output, nil
}

View File

@@ -1,16 +1,9 @@
package httputils // import "github.com/docker/docker/api/server/httputils"
import (
"math"
"net/http"
"net/url"
"strconv"
"testing"
cerrdefs "github.com/containerd/errdefs"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"gotest.tools/v3/assert"
is "gotest.tools/v3/assert/cmp"
)
func TestBoolValue(t *testing.T) {
@@ -110,126 +103,3 @@ func TestInt64ValueOrDefaultWithError(t *testing.T) {
t.Fatal("Expected an error.")
}
}
func TestUint32Value(t *testing.T) {
const valueNotSet = "unset"
tests := []struct {
value string
expected uint32
expectedErr error
}{
{
value: "0",
expected: 0,
},
{
value: strconv.FormatUint(math.MaxUint32, 10),
expected: math.MaxUint32,
},
{
value: valueNotSet,
expectedErr: strconv.ErrSyntax,
},
{
value: "",
expectedErr: strconv.ErrSyntax,
},
{
value: "-1",
expectedErr: strconv.ErrRange,
},
{
value: "4294967296", // MaxUint32+1
expectedErr: strconv.ErrRange,
},
{
value: "not-a-number",
expectedErr: strconv.ErrSyntax,
},
}
for _, tc := range tests {
t.Run(tc.value, func(t *testing.T) {
r, _ := http.NewRequest(http.MethodPost, "", nil)
r.Form = url.Values{}
if tc.value != valueNotSet {
r.Form.Set("field", tc.value)
}
out, err := Uint32Value(r, "field")
assert.Check(t, is.Equal(tc.expected, out))
assert.Check(t, is.ErrorIs(err, tc.expectedErr))
})
}
}
func TestDecodePlatform(t *testing.T) {
tests := []struct {
doc string
platformJSON string
expected *ocispec.Platform
expectedErr string
}{
{
doc: "empty platform",
expectedErr: `failed to parse platform: unexpected end of JSON input`,
},
{
doc: "not JSON",
platformJSON: `linux/ams64`,
expectedErr: `failed to parse platform: invalid character 'l' looking for beginning of value`,
},
{
doc: "malformed JSON",
platformJSON: `{"architecture"`,
expectedErr: `failed to parse platform: unexpected end of JSON input`,
},
{
doc: "missing os",
platformJSON: `{"architecture":"amd64","os":""}`,
expectedErr: `both OS and Architecture must be provided`,
},
{
doc: "variant without architecture",
platformJSON: `{"architecture":"","os":"","variant":"v7"}`,
expectedErr: `optional platform fields provided, but OS and Architecture are missing`,
},
{
doc: "missing architecture",
platformJSON: `{"architecture":"","os":"linux"}`,
expectedErr: `both OS and Architecture must be provided`,
},
{
doc: "os.version without os and architecture",
platformJSON: `{"architecture":"","os":"","os.version":"12.0"}`,
expectedErr: `optional platform fields provided, but OS and Architecture are missing`,
},
{
doc: "os.features without os and architecture",
platformJSON: `{"architecture":"","os":"","os.features":["a","b"]}`,
expectedErr: `optional platform fields provided, but OS and Architecture are missing`,
},
{
doc: "valid platform",
platformJSON: `{"architecture":"arm64","os":"linux","os.version":"12.0", "os.features":["a","b"], "variant": "v7"}`,
expected: &ocispec.Platform{
Architecture: "arm64",
OS: "linux",
OSVersion: "12.0",
OSFeatures: []string{"a", "b"},
Variant: "v7",
},
},
}
for _, tc := range tests {
t.Run(tc.doc, func(t *testing.T) {
p, err := DecodePlatform(tc.platformJSON)
assert.Check(t, is.DeepEqual(p, tc.expected))
if tc.expectedErr != "" {
assert.Check(t, cerrdefs.IsInvalidArgument(err))
assert.Check(t, is.Error(err, tc.expectedErr))
} else {
assert.Check(t, err)
}
})
}
}

View File

@@ -33,7 +33,7 @@ func TestJsonContentType(t *testing.T) {
func TestReadJSON(t *testing.T) {
t.Run("nil body", func(t *testing.T) {
req, err := http.NewRequest(http.MethodPost, "https://example.com/some/path", nil)
req, err := http.NewRequest("POST", "https://example.com/some/path", nil)
if err != nil {
t.Error(err)
}
@@ -45,7 +45,7 @@ func TestReadJSON(t *testing.T) {
})
t.Run("empty body", func(t *testing.T) {
req, err := http.NewRequest(http.MethodPost, "https://example.com/some/path", strings.NewReader(""))
req, err := http.NewRequest("POST", "https://example.com/some/path", strings.NewReader(""))
if err != nil {
t.Error(err)
}
@@ -60,7 +60,7 @@ func TestReadJSON(t *testing.T) {
})
t.Run("with valid request", func(t *testing.T) {
req, err := http.NewRequest(http.MethodPost, "https://example.com/some/path", strings.NewReader(`{"SomeField":"some value"}`))
req, err := http.NewRequest("POST", "https://example.com/some/path", strings.NewReader(`{"SomeField":"some value"}`))
if err != nil {
t.Error(err)
}
@@ -75,7 +75,7 @@ func TestReadJSON(t *testing.T) {
}
})
t.Run("with whitespace", func(t *testing.T) {
req, err := http.NewRequest(http.MethodPost, "https://example.com/some/path", strings.NewReader(`
req, err := http.NewRequest("POST", "https://example.com/some/path", strings.NewReader(`
{"SomeField":"some value"}
@@ -95,7 +95,7 @@ func TestReadJSON(t *testing.T) {
})
t.Run("with extra content", func(t *testing.T) {
req, err := http.NewRequest(http.MethodPost, "https://example.com/some/path", strings.NewReader(`{"SomeField":"some value"} and more content`))
req, err := http.NewRequest("POST", "https://example.com/some/path", strings.NewReader(`{"SomeField":"some value"} and more content`))
if err != nil {
t.Error(err)
}
@@ -112,7 +112,7 @@ func TestReadJSON(t *testing.T) {
})
t.Run("invalid JSON", func(t *testing.T) {
req, err := http.NewRequest(http.MethodPost, "https://example.com/some/path", strings.NewReader(`{invalid json`))
req, err := http.NewRequest("POST", "https://example.com/some/path", strings.NewReader(`{invalid json`))
if err != nil {
t.Error(err)
}

View File

@@ -4,12 +4,11 @@ import (
"context"
"fmt"
"io"
"net/http"
"net/url"
"sort"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/backend"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/jsonmessage"
"github.com/docker/docker/pkg/stdcopy"
@@ -17,11 +16,7 @@ import (
// WriteLogStream writes an encoded byte stream of log messages from the
// messages channel, multiplexing them with a stdcopy.Writer if mux is true
func WriteLogStream(_ context.Context, w http.ResponseWriter, msgs <-chan *backend.LogMessage, config *container.LogsOptions, mux bool) {
// See https://github.com/moby/moby/issues/47448
// Trigger headers to be written immediately.
w.WriteHeader(http.StatusOK)
func WriteLogStream(_ context.Context, w io.Writer, msgs <-chan *backend.LogMessage, config *types.ContainerLogsOptions, mux bool) {
wf := ioutils.NewWriteFlusher(w)
defer wf.Close()

View File

@@ -1,9 +1,9 @@
package server // import "github.com/docker/docker/api/server"
import (
"github.com/containerd/log"
"github.com/docker/docker/api/server/httputils"
"github.com/docker/docker/api/server/middleware"
"github.com/sirupsen/logrus"
)
// handlerWithGlobalMiddlewares wraps the handler function for a request with
@@ -16,7 +16,7 @@ func (s *Server) handlerWithGlobalMiddlewares(handler httputils.APIFunc) httputi
next = m.WrapHandler(next)
}
if log.GetLevel() == log.DebugLevel {
if logrus.GetLevel() == logrus.DebugLevel {
next = middleware.DebugRequestMiddleware(next)
}

View File

@@ -0,0 +1,37 @@
package middleware // import "github.com/docker/docker/api/server/middleware"
import (
"context"
"net/http"
"github.com/sirupsen/logrus"
)
// CORSMiddleware injects CORS headers to each request
// when it's configured.
type CORSMiddleware struct {
defaultHeaders string
}
// NewCORSMiddleware creates a new CORSMiddleware with default headers.
func NewCORSMiddleware(d string) CORSMiddleware {
return CORSMiddleware{defaultHeaders: d}
}
// WrapHandler returns a new handler function wrapping the previous one in the request chain.
func (c CORSMiddleware) WrapHandler(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
// If "api-cors-header" is not given, but "api-enable-cors" is true, we set cors to "*"
// otherwise, all head values will be passed to HTTP handler
corsHeaders := c.defaultHeaders
if corsHeaders == "" {
corsHeaders = "*"
}
logrus.Debugf("CORS header is enabled and set to: %s", corsHeaders)
w.Header().Add("Access-Control-Allow-Origin", corsHeaders)
w.Header().Add("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept, X-Registry-Auth")
w.Header().Add("Access-Control-Allow-Methods", "HEAD, GET, POST, DELETE, PUT, OPTIONS")
return handler(ctx, w, r, vars)
}
}

View File

@@ -8,8 +8,6 @@ import (
"net/http"
"strings"
"github.com/containerd/log"
"github.com/docker/docker/api/server/httpstatus"
"github.com/docker/docker/api/server/httputils"
"github.com/docker/docker/pkg/ioutils"
"github.com/sirupsen/logrus"
@@ -18,37 +16,17 @@ import (
// DebugRequestMiddleware dumps the request to logger
func DebugRequestMiddleware(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
logger := log.G(ctx)
// Use a variable for fields to prevent overhead of repeatedly
// calling WithFields.
fields := log.Fields{
"module": "api",
"method": r.Method,
"request-url": r.RequestURI,
"vars": vars,
}
handleWithLogs := func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
logger.WithFields(fields).Debugf("handling %s request", r.Method)
err := handler(ctx, w, r, vars)
if err != nil {
// TODO(thaJeztah): unify this with Server.makeHTTPHandler, which also logs internal server errors as error-log. See https://github.com/moby/moby/pull/48740#discussion_r1816675574
fields["error-response"] = err
fields["status"] = httpstatus.FromError(err)
logger.WithFields(fields).Debugf("error response for %s request", r.Method)
}
return err
}
logrus.Debugf("Calling %s %s", r.Method, r.RequestURI)
if r.Method != http.MethodPost {
return handleWithLogs(ctx, w, r, vars)
return handler(ctx, w, r, vars)
}
if err := httputils.CheckForJSON(r); err != nil {
return handleWithLogs(ctx, w, r, vars)
return handler(ctx, w, r, vars)
}
maxBodySize := 4096 // 4KB
if r.ContentLength > int64(maxBodySize) {
return handleWithLogs(ctx, w, r, vars)
return handler(ctx, w, r, vars)
}
body := r.Body
@@ -58,25 +36,21 @@ func DebugRequestMiddleware(handler func(ctx context.Context, w http.ResponseWri
b, err := bufReader.Peek(maxBodySize)
if err != io.EOF {
// either there was an error reading, or the buffer is full (in which case the request is too large)
return handleWithLogs(ctx, w, r, vars)
return handler(ctx, w, r, vars)
}
var postForm map[string]interface{}
if err := json.Unmarshal(b, &postForm); err == nil {
maskSecretKeys(postForm)
// TODO(thaJeztah): is there a better way to detect if we're using JSON-formatted logs?
if _, ok := logger.Logger.Formatter.(*logrus.JSONFormatter); ok {
fields["form-data"] = postForm
formStr, errMarshal := json.Marshal(postForm)
if errMarshal == nil {
logrus.Debugf("form data: %s", string(formStr))
} else {
if data, err := json.Marshal(postForm); err != nil {
fields["form-data"] = postForm
} else {
fields["form-data"] = string(data)
}
logrus.Debugf("form data: %q", postForm)
}
}
return handleWithLogs(ctx, w, r, vars)
return handler(ctx, w, r, vars)
}
}

View File

@@ -6,7 +6,6 @@ import (
"net/http"
"runtime"
"github.com/docker/docker/api"
"github.com/docker/docker/api/server/httputils"
"github.com/docker/docker/api/types/versions"
)
@@ -14,40 +13,19 @@ import (
// VersionMiddleware is a middleware that
// validates the client and server versions.
type VersionMiddleware struct {
serverVersion string
// defaultAPIVersion is the default API version provided by the API server,
// specified as "major.minor". It is usually configured to the latest API
// version [github.com/docker/docker/api.DefaultVersion].
//
// API requests for API versions greater than this version are rejected by
// the server and produce a [versionUnsupportedError].
defaultAPIVersion string
// minAPIVersion is the minimum API version provided by the API server,
// specified as "major.minor".
//
// API requests for API versions lower than this version are rejected by
// the server and produce a [versionUnsupportedError].
minAPIVersion string
serverVersion string
defaultVersion string
minVersion string
}
// NewVersionMiddleware creates a VersionMiddleware with the given versions.
func NewVersionMiddleware(serverVersion, defaultAPIVersion, minAPIVersion string) (*VersionMiddleware, error) {
if versions.LessThan(defaultAPIVersion, api.MinSupportedAPIVersion) || versions.GreaterThan(defaultAPIVersion, api.DefaultVersion) {
return nil, fmt.Errorf("invalid default API version (%s): must be between %s and %s", defaultAPIVersion, api.MinSupportedAPIVersion, api.DefaultVersion)
// NewVersionMiddleware creates a new VersionMiddleware
// with the default versions.
func NewVersionMiddleware(s, d, m string) VersionMiddleware {
return VersionMiddleware{
serverVersion: s,
defaultVersion: d,
minVersion: m,
}
if versions.LessThan(minAPIVersion, api.MinSupportedAPIVersion) || versions.GreaterThan(minAPIVersion, api.DefaultVersion) {
return nil, fmt.Errorf("invalid minimum API version (%s): must be between %s and %s", minAPIVersion, api.MinSupportedAPIVersion, api.DefaultVersion)
}
if versions.GreaterThan(minAPIVersion, defaultAPIVersion) {
return nil, fmt.Errorf("invalid API version: the minimum API version (%s) is higher than the default version (%s)", minAPIVersion, defaultAPIVersion)
}
return &VersionMiddleware{
serverVersion: serverVersion,
defaultAPIVersion: defaultAPIVersion,
minAPIVersion: minAPIVersion,
}, nil
}
type versionUnsupportedError struct {
@@ -67,18 +45,18 @@ func (e versionUnsupportedError) InvalidParameter() {}
func (v VersionMiddleware) WrapHandler(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
w.Header().Set("Server", fmt.Sprintf("Docker/%s (%s)", v.serverVersion, runtime.GOOS))
w.Header().Set("Api-Version", v.defaultAPIVersion)
w.Header().Set("Ostype", runtime.GOOS)
w.Header().Set("API-Version", v.defaultVersion)
w.Header().Set("OSType", runtime.GOOS)
apiVersion := vars["version"]
if apiVersion == "" {
apiVersion = v.defaultAPIVersion
apiVersion = v.defaultVersion
}
if versions.LessThan(apiVersion, v.minAPIVersion) {
return versionUnsupportedError{version: apiVersion, minVersion: v.minAPIVersion}
if versions.LessThan(apiVersion, v.minVersion) {
return versionUnsupportedError{version: apiVersion, minVersion: v.minVersion}
}
if versions.GreaterThan(apiVersion, v.defaultAPIVersion) {
return versionUnsupportedError{version: apiVersion, maxVersion: v.defaultAPIVersion}
if versions.GreaterThan(apiVersion, v.defaultVersion) {
return versionUnsupportedError{version: apiVersion, maxVersion: v.defaultVersion}
}
ctx = context.WithValue(ctx, httputils.APIVersionKey{}, apiVersion)
return handler(ctx, w, r, vars)

View File

@@ -2,81 +2,27 @@ package middleware // import "github.com/docker/docker/api/server/middleware"
import (
"context"
"fmt"
"net/http"
"net/http/httptest"
"runtime"
"testing"
"github.com/docker/docker/api"
"github.com/docker/docker/api/server/httputils"
"gotest.tools/v3/assert"
is "gotest.tools/v3/assert/cmp"
)
func TestNewVersionMiddlewareValidation(t *testing.T) {
tests := []struct {
doc, defaultVersion, minVersion, expectedErr string
}{
{
doc: "defaults",
defaultVersion: api.DefaultVersion,
minVersion: api.MinSupportedAPIVersion,
},
{
doc: "invalid default lower than min",
defaultVersion: api.MinSupportedAPIVersion,
minVersion: api.DefaultVersion,
expectedErr: fmt.Sprintf("invalid API version: the minimum API version (%s) is higher than the default version (%s)", api.DefaultVersion, api.MinSupportedAPIVersion),
},
{
doc: "invalid default too low",
defaultVersion: "0.1",
minVersion: api.MinSupportedAPIVersion,
expectedErr: fmt.Sprintf("invalid default API version (0.1): must be between %s and %s", api.MinSupportedAPIVersion, api.DefaultVersion),
},
{
doc: "invalid default too high",
defaultVersion: "9999.9999",
minVersion: api.DefaultVersion,
expectedErr: fmt.Sprintf("invalid default API version (9999.9999): must be between %s and %s", api.MinSupportedAPIVersion, api.DefaultVersion),
},
{
doc: "invalid minimum too low",
defaultVersion: api.MinSupportedAPIVersion,
minVersion: "0.1",
expectedErr: fmt.Sprintf("invalid minimum API version (0.1): must be between %s and %s", api.MinSupportedAPIVersion, api.DefaultVersion),
},
{
doc: "invalid minimum too high",
defaultVersion: api.DefaultVersion,
minVersion: "9999.9999",
expectedErr: fmt.Sprintf("invalid minimum API version (9999.9999): must be between %s and %s", api.MinSupportedAPIVersion, api.DefaultVersion),
},
}
for _, tc := range tests {
t.Run(tc.doc, func(t *testing.T) {
_, err := NewVersionMiddleware("1.2.3", tc.defaultVersion, tc.minVersion)
if tc.expectedErr == "" {
assert.Check(t, err)
} else {
assert.Check(t, is.Error(err, tc.expectedErr))
}
})
}
}
func TestVersionMiddlewareVersion(t *testing.T) {
expectedVersion := "<not set>"
defaultVersion := "1.10.0"
minVersion := "1.2.0"
expectedVersion := defaultVersion
handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
v := httputils.VersionFromContext(ctx)
assert.Check(t, is.Equal(expectedVersion, v))
return nil
}
m, err := NewVersionMiddleware("1.2.3", api.DefaultVersion, api.MinSupportedAPIVersion)
assert.NilError(t, err)
m := NewVersionMiddleware(defaultVersion, defaultVersion, minVersion)
h := m.WrapHandler(handler)
req, _ := http.NewRequest(http.MethodGet, "/containers/json", nil)
@@ -89,19 +35,19 @@ func TestVersionMiddlewareVersion(t *testing.T) {
errString string
}{
{
expectedVersion: api.DefaultVersion,
expectedVersion: "1.10.0",
},
{
reqVersion: api.MinSupportedAPIVersion,
expectedVersion: api.MinSupportedAPIVersion,
reqVersion: "1.9.0",
expectedVersion: "1.9.0",
},
{
reqVersion: "0.1",
errString: fmt.Sprintf("client version 0.1 is too old. Minimum supported API version is %s, please upgrade your client to a newer version", api.MinSupportedAPIVersion),
errString: "client version 0.1 is too old. Minimum supported API version is 1.2.0, please upgrade your client to a newer version",
},
{
reqVersion: "9999.9999",
errString: fmt.Sprintf("client version 9999.9999 is too new. Maximum supported API version is %s", api.DefaultVersion),
errString: "client version 9999.9999 is too new. Maximum supported API version is 1.10.0",
},
}
@@ -125,8 +71,9 @@ func TestVersionMiddlewareWithErrorsReturnsHeaders(t *testing.T) {
return nil
}
m, err := NewVersionMiddleware("1.2.3", api.DefaultVersion, api.MinSupportedAPIVersion)
assert.NilError(t, err)
defaultVersion := "1.10.0"
minVersion := "1.2.0"
m := NewVersionMiddleware(defaultVersion, defaultVersion, minVersion)
h := m.WrapHandler(handler)
req, _ := http.NewRequest(http.MethodGet, "/containers/json", nil)
@@ -134,12 +81,12 @@ func TestVersionMiddlewareWithErrorsReturnsHeaders(t *testing.T) {
ctx := context.Background()
vars := map[string]string{"version": "0.1"}
err = h(ctx, resp, req, vars)
err := h(ctx, resp, req, vars)
assert.Check(t, is.ErrorContains(err, ""))
hdr := resp.Result().Header
assert.Check(t, is.Contains(hdr.Get("Server"), "Docker/1.2.3"))
assert.Check(t, is.Contains(hdr.Get("Server"), "Docker/"+defaultVersion))
assert.Check(t, is.Contains(hdr.Get("Server"), runtime.GOOS))
assert.Check(t, is.Equal(hdr.Get("Api-Version"), api.DefaultVersion))
assert.Check(t, is.Equal(hdr.Get("Ostype"), runtime.GOOS))
assert.Check(t, is.Equal(hdr.Get("API-Version"), defaultVersion))
assert.Check(t, is.Equal(hdr.Get("OSType"), runtime.GOOS))
}

View File

@@ -3,8 +3,8 @@ package build // import "github.com/docker/docker/api/server/router/build"
import (
"context"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/backend"
"github.com/docker/docker/api/types/build"
)
// Backend abstracts an image builder whose only purpose is to build an image referenced by an imageID.
@@ -13,8 +13,9 @@ type Backend interface {
// TODO: make this return a reference instead of string
Build(context.Context, backend.BuildConfig) (string, error)
// PruneCache prunes the build cache.
PruneCache(context.Context, build.CachePruneOptions) (*build.CachePruneReport, error)
// Prune build cache
PruneCache(context.Context, types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error)
Cancel(context.Context, string) error
}

View File

@@ -4,36 +4,38 @@ import (
"runtime"
"github.com/docker/docker/api/server/router"
"github.com/docker/docker/api/types/build"
"github.com/docker/docker/api/types"
)
// buildRouter is a router to talk with the build controller
type buildRouter struct {
backend Backend
daemon experimentalProvider
routes []router.Route
backend Backend
daemon experimentalProvider
routes []router.Route
features *map[string]bool
}
// NewRouter initializes a new build router
func NewRouter(b Backend, d experimentalProvider) router.Router {
func NewRouter(b Backend, d experimentalProvider, features *map[string]bool) router.Router {
r := &buildRouter{
backend: b,
daemon: d,
backend: b,
daemon: d,
features: features,
}
r.initRoutes()
return r
}
// Routes returns the available routers to the build controller
func (br *buildRouter) Routes() []router.Route {
return br.routes
func (r *buildRouter) Routes() []router.Route {
return r.routes
}
func (br *buildRouter) initRoutes() {
br.routes = []router.Route{
router.NewPostRoute("/build", br.postBuild),
router.NewPostRoute("/build/prune", br.postPrune),
router.NewPostRoute("/build/cancel", br.postCancel),
func (r *buildRouter) initRoutes() {
r.routes = []router.Route{
router.NewPostRoute("/build", r.postBuild),
router.NewPostRoute("/build/prune", r.postPrune),
router.NewPostRoute("/build/cancel", r.postCancel),
}
}
@@ -46,22 +48,15 @@ func (br *buildRouter) initRoutes() {
//
// This value is only a recommendation as advertised by the daemon, and it is
// up to the client to choose which builder to use.
func BuilderVersion(features map[string]bool) build.BuilderVersion {
func BuilderVersion(features map[string]bool) types.BuilderVersion {
// TODO(thaJeztah) move the default to daemon/config
bv := build.BuilderBuildKit
if runtime.GOOS == "windows" {
// BuildKit is not yet the default on Windows.
bv = build.BuilderV1
return types.BuilderV1
}
// Allow the features field in the daemon config to override the
// default builder to advertise.
if enable, ok := features["buildkit"]; ok {
if enable {
bv = build.BuilderBuildKit
} else {
bv = build.BuilderV1
}
bv := types.BuilderBuildKit
if v, ok := features["buildkit"]; ok && !v {
bv = types.BuilderV1
}
return bv
}

View File

@@ -13,36 +13,37 @@ import (
"strconv"
"strings"
"sync"
"syscall"
"github.com/containerd/log"
"github.com/docker/docker/api/server/httputils"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/backend"
"github.com/docker/docker/api/types/build"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/registry"
"github.com/docker/docker/api/types/versions"
"github.com/docker/docker/errdefs"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/progress"
"github.com/docker/docker/pkg/streamformatter"
units "github.com/docker/go-units"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
type invalidParam struct {
error
type invalidIsolationError string
func (e invalidIsolationError) Error() string {
return fmt.Sprintf("Unsupported isolation: %q", string(e))
}
func (e invalidParam) InvalidParameter() {}
func (e invalidIsolationError) InvalidParameter() {}
func newImageBuildOptions(ctx context.Context, r *http.Request) (*build.ImageBuildOptions, error) {
options := &build.ImageBuildOptions{
Version: build.BuilderV1, // Builder V1 is the default, but can be overridden
func newImageBuildOptions(ctx context.Context, r *http.Request) (*types.ImageBuildOptions, error) {
options := &types.ImageBuildOptions{
Version: types.BuilderV1, // Builder V1 is the default, but can be overridden
Dockerfile: r.FormValue("dockerfile"),
SuppressOutput: httputils.BoolValue(r, "q"),
NoCache: httputils.BoolValue(r, "nocache"),
ForceRemove: httputils.BoolValue(r, "forcerm"),
PullParent: httputils.BoolValue(r, "pull"),
MemorySwap: httputils.Int64ValueOrZero(r, "memswap"),
Memory: httputils.Int64ValueOrZero(r, "memory"),
CPUShares: httputils.Int64ValueOrZero(r, "cpushares"),
@@ -63,27 +64,29 @@ func newImageBuildOptions(ctx context.Context, r *http.Request) (*build.ImageBui
}
if runtime.GOOS != "windows" && options.SecurityOpt != nil {
// SecurityOpt only supports "credentials-spec" on Windows, and not used on other platforms.
return nil, invalidParam{errors.New("security options are not supported on " + runtime.GOOS)}
return nil, errdefs.InvalidParameter(errors.New("The daemon on this platform does not support setting security options on build"))
}
if httputils.BoolValue(r, "forcerm") {
version := httputils.VersionFromContext(ctx)
if httputils.BoolValue(r, "forcerm") && versions.GreaterThanOrEqualTo(version, "1.12") {
options.Remove = true
} else if r.FormValue("rm") == "" {
} else if r.FormValue("rm") == "" && versions.GreaterThanOrEqualTo(version, "1.12") {
options.Remove = true
} else {
options.Remove = httputils.BoolValue(r, "rm")
}
version := httputils.VersionFromContext(ctx)
if httputils.BoolValue(r, "pull") && versions.GreaterThanOrEqualTo(version, "1.16") {
options.PullParent = true
}
if versions.GreaterThanOrEqualTo(version, "1.32") {
options.Platform = r.FormValue("platform")
}
if versions.GreaterThanOrEqualTo(version, "1.40") {
outputsJSON := r.FormValue("outputs")
if outputsJSON != "" {
var outputs []build.ImageBuildOutput
var outputs []types.ImageBuildOutput
if err := json.Unmarshal([]byte(outputsJSON), &outputs); err != nil {
return nil, invalidParam{errors.Wrap(err, "invalid outputs specified")}
return nil, err
}
options.Outputs = outputs
}
@@ -100,14 +103,14 @@ func newImageBuildOptions(ctx context.Context, r *http.Request) (*build.ImageBui
if i := r.FormValue("isolation"); i != "" {
options.Isolation = container.Isolation(i)
if !options.Isolation.IsValid() {
return nil, invalidParam{errors.Errorf("unsupported isolation: %q", i)}
return nil, invalidIsolationError(options.Isolation)
}
}
if ulimitsJSON := r.FormValue("ulimits"); ulimitsJSON != "" {
buildUlimits := []*container.Ulimit{}
var buildUlimits = []*units.Ulimit{}
if err := json.Unmarshal([]byte(ulimitsJSON), &buildUlimits); err != nil {
return nil, invalidParam{errors.Wrap(err, "error reading ulimit settings")}
return nil, errors.Wrap(errdefs.InvalidParameter(err), "error reading ulimit settings")
}
options.Ulimits = buildUlimits
}
@@ -125,25 +128,25 @@ func newImageBuildOptions(ctx context.Context, r *http.Request) (*build.ImageBui
// so that it can print a warning about "foo" being unused if there is
// no "ARG foo" in the Dockerfile.
if buildArgsJSON := r.FormValue("buildargs"); buildArgsJSON != "" {
buildArgs := map[string]*string{}
var buildArgs = map[string]*string{}
if err := json.Unmarshal([]byte(buildArgsJSON), &buildArgs); err != nil {
return nil, invalidParam{errors.Wrap(err, "error reading build args")}
return nil, errors.Wrap(errdefs.InvalidParameter(err), "error reading build args")
}
options.BuildArgs = buildArgs
}
if labelsJSON := r.FormValue("labels"); labelsJSON != "" {
labels := map[string]string{}
var labels = map[string]string{}
if err := json.Unmarshal([]byte(labelsJSON), &labels); err != nil {
return nil, invalidParam{errors.Wrap(err, "error reading labels")}
return nil, errors.Wrap(errdefs.InvalidParameter(err), "error reading labels")
}
options.Labels = labels
}
if cacheFromJSON := r.FormValue("cachefrom"); cacheFromJSON != "" {
cacheFrom := []string{}
var cacheFrom = []string{}
if err := json.Unmarshal([]byte(cacheFromJSON), &cacheFrom); err != nil {
return nil, invalidParam{errors.Wrap(err, "error reading cache-from")}
return nil, err
}
options.CacheFrom = cacheFrom
}
@@ -159,14 +162,14 @@ func newImageBuildOptions(ctx context.Context, r *http.Request) (*build.ImageBui
return options, nil
}
func parseVersion(s string) (build.BuilderVersion, error) {
switch build.BuilderVersion(s) {
case build.BuilderV1:
return build.BuilderV1, nil
case build.BuilderBuildKit:
return build.BuilderBuildKit, nil
func parseVersion(s string) (types.BuilderVersion, error) {
switch types.BuilderVersion(s) {
case types.BuilderV1:
return types.BuilderV1, nil
case types.BuilderBuildKit:
return types.BuilderBuildKit, nil
default:
return "", invalidParam{errors.Errorf("invalid version %q", s)}
return "", errors.Errorf("invalid version %q", s)
}
}
@@ -176,58 +179,21 @@ func (br *buildRouter) postPrune(ctx context.Context, w http.ResponseWriter, r *
}
fltrs, err := filters.FromJSON(r.Form.Get("filters"))
if err != nil {
return err
return errors.Wrap(err, "could not parse filters")
}
ksfv := r.FormValue("keep-storage")
if ksfv == "" {
ksfv = "0"
}
ks, err := strconv.Atoi(ksfv)
if err != nil {
return errors.Wrapf(err, "keep-storage is in bytes and expects an integer, got %v", ksfv)
}
opts := build.CachePruneOptions{
All: httputils.BoolValue(r, "all"),
Filters: fltrs,
}
parseBytesFromFormValue := func(name string) (int64, error) {
if fv := r.FormValue(name); fv != "" {
bs, err := strconv.Atoi(fv)
if err != nil {
return 0, invalidParam{errors.Wrapf(err, "%s is in bytes and expects an integer, got %v", name, fv)}
}
return int64(bs), nil
}
return 0, nil
}
version := httputils.VersionFromContext(ctx)
if versions.GreaterThanOrEqualTo(version, "1.48") {
if bs, err := parseBytesFromFormValue("reserved-space"); err != nil {
return err
} else {
if bs == 0 {
// Deprecated parameter. Only checked if reserved-space is not used.
bs, err = parseBytesFromFormValue("keep-storage")
if err != nil {
return err
}
}
opts.ReservedSpace = bs
}
if bs, err := parseBytesFromFormValue("max-used-space"); err != nil {
return err
} else {
opts.MaxUsedSpace = bs
}
if bs, err := parseBytesFromFormValue("min-free-space"); err != nil {
return err
} else {
opts.MinFreeSpace = bs
}
} else {
// Only keep-storage was valid in pre-1.48 versions.
if bs, err := parseBytesFromFormValue("keep-storage"); err != nil {
return err
} else {
opts.ReservedSpace = bs
}
opts := types.BuildCachePruneOptions{
All: httputils.BoolValue(r, "all"),
Filters: fltrs,
KeepStorage: int64(ks),
}
report, err := br.backend.PruneCache(ctx, opts)
@@ -242,7 +208,7 @@ func (br *buildRouter) postCancel(ctx context.Context, w http.ResponseWriter, r
id := r.FormValue("id")
if id == "" {
return invalidParam{errors.New("build ID not provided")}
return errors.Errorf("build ID not provided")
}
return br.backend.Cancel(ctx, id)
@@ -282,9 +248,8 @@ func (br *buildRouter) postBuild(ctx context.Context, w http.ResponseWriter, r *
return err
}
_, err = output.Write(streamformatter.FormatError(err))
// don't log broken pipe errors as this is the normal case when a client aborts.
if err != nil && !errors.Is(err, syscall.EPIPE) {
log.G(ctx).WithError(err).Warn("could not write error response")
if err != nil {
logrus.Warnf("could not write error response: %v", err)
}
return nil
}
@@ -296,7 +261,7 @@ func (br *buildRouter) postBuild(ctx context.Context, w http.ResponseWriter, r *
buildOptions.AuthConfigs = getAuthConfigs(r.Header)
if buildOptions.Squash && !br.daemon.HasExperimental() {
return invalidParam{errors.New("squash is only supported with experimental mode")}
return errdefs.InvalidParameter(errors.New("squash is only supported with experimental mode"))
}
out := io.Writer(output)
@@ -319,9 +284,6 @@ func (br *buildRouter) postBuild(ctx context.Context, w http.ResponseWriter, r *
ProgressWriter: buildProgressWriter(out, wantAux, createProgressReader),
})
if err != nil {
if errors.Is(err, context.Canceled) {
log.G(ctx).Debug("build canceled")
}
return errf(err)
}
@@ -333,8 +295,8 @@ func (br *buildRouter) postBuild(ctx context.Context, w http.ResponseWriter, r *
return nil
}
func getAuthConfigs(header http.Header) map[string]registry.AuthConfig {
authConfigs := map[string]registry.AuthConfig{}
func getAuthConfigs(header http.Header) map[string]types.AuthConfig {
authConfigs := map[string]types.AuthConfig{}
authConfigsEncoded := header.Get("X-Registry-Config")
if authConfigsEncoded == "" {
@@ -353,14 +315,14 @@ type syncWriter struct {
mu sync.Mutex
}
func (s *syncWriter) Write(b []byte) (int, error) {
func (s *syncWriter) Write(b []byte) (count int, err error) {
s.mu.Lock()
defer s.mu.Unlock()
return s.w.Write(b)
count, err = s.w.Write(b)
s.mu.Unlock()
return
}
func buildProgressWriter(out io.Writer, wantAux bool, createProgressReader func(io.ReadCloser) io.ReadCloser) backend.ProgressWriter {
// see https://github.com/moby/moby/pull/21406
out = &syncWriter{w: out}
var aux *streamformatter.AuxFormatter
@@ -381,12 +343,8 @@ type flusher interface {
Flush()
}
type nopFlusher struct{}
func (f *nopFlusher) Flush() {}
func wrapOutputBufferedUntilRequestRead(rc io.ReadCloser, out io.Writer) (io.ReadCloser, io.Writer) {
var fl flusher = &nopFlusher{}
var fl flusher = &ioutils.NopFlusher{}
if f, ok := out.(flusher); ok {
fl = f
}

View File

@@ -1,10 +1,10 @@
package checkpoint // import "github.com/docker/docker/api/server/router/checkpoint"
import "github.com/docker/docker/api/types/checkpoint"
import "github.com/docker/docker/api/types"
// Backend for Checkpoint
type Backend interface {
CheckpointCreate(container string, config checkpoint.CreateOptions) error
CheckpointDelete(container string, config checkpoint.DeleteOptions) error
CheckpointList(container string, config checkpoint.ListOptions) ([]checkpoint.Summary, error)
CheckpointCreate(container string, config types.CheckpointCreateOptions) error
CheckpointDelete(container string, config types.CheckpointDeleteOptions) error
CheckpointList(container string, config types.CheckpointListOptions) ([]types.Checkpoint, error)
}

View File

@@ -23,14 +23,14 @@ func NewRouter(b Backend, decoder httputils.ContainerDecoder) router.Router {
}
// Routes returns the available routers to the checkpoint controller
func (cr *checkpointRouter) Routes() []router.Route {
return cr.routes
func (r *checkpointRouter) Routes() []router.Route {
return r.routes
}
func (cr *checkpointRouter) initRoutes() {
cr.routes = []router.Route{
router.NewGetRoute("/containers/{name:.*}/checkpoints", cr.getContainerCheckpoints, router.Experimental),
router.NewPostRoute("/containers/{name:.*}/checkpoints", cr.postContainerCheckpoint, router.Experimental),
router.NewDeleteRoute("/containers/{name}/checkpoints/{checkpoint}", cr.deleteContainerCheckpoint, router.Experimental),
func (r *checkpointRouter) initRoutes() {
r.routes = []router.Route{
router.NewGetRoute("/containers/{name:.*}/checkpoints", r.getContainerCheckpoints, router.Experimental),
router.NewPostRoute("/containers/{name:.*}/checkpoints", r.postContainerCheckpoint, router.Experimental),
router.NewDeleteRoute("/containers/{name}/checkpoints/{checkpoint}", r.deleteContainerCheckpoint, router.Experimental),
}
}

View File

@@ -5,20 +5,20 @@ import (
"net/http"
"github.com/docker/docker/api/server/httputils"
"github.com/docker/docker/api/types/checkpoint"
"github.com/docker/docker/api/types"
)
func (cr *checkpointRouter) postContainerCheckpoint(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
func (s *checkpointRouter) postContainerCheckpoint(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
var options checkpoint.CreateOptions
var options types.CheckpointCreateOptions
if err := httputils.ReadJSON(r, &options); err != nil {
return err
}
err := cr.backend.CheckpointCreate(vars["name"], options)
err := s.backend.CheckpointCreate(vars["name"], options)
if err != nil {
return err
}
@@ -27,14 +27,15 @@ func (cr *checkpointRouter) postContainerCheckpoint(ctx context.Context, w http.
return nil
}
func (cr *checkpointRouter) getContainerCheckpoints(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
func (s *checkpointRouter) getContainerCheckpoints(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
checkpoints, err := cr.backend.CheckpointList(vars["name"], checkpoint.ListOptions{
checkpoints, err := s.backend.CheckpointList(vars["name"], types.CheckpointListOptions{
CheckpointDir: r.Form.Get("dir"),
})
if err != nil {
return err
}
@@ -42,15 +43,16 @@ func (cr *checkpointRouter) getContainerCheckpoints(ctx context.Context, w http.
return httputils.WriteJSON(w, http.StatusOK, checkpoints)
}
func (cr *checkpointRouter) deleteContainerCheckpoint(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
func (s *checkpointRouter) deleteContainerCheckpoint(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
err := cr.backend.CheckpointDelete(vars["name"], checkpoint.DeleteOptions{
err := s.backend.CheckpointDelete(vars["name"], types.CheckpointDeleteOptions{
CheckpointDir: r.Form.Get("dir"),
CheckpointID: vars["checkpoint"],
})
if err != nil {
return err
}

View File

@@ -4,53 +4,57 @@ import (
"context"
"io"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/backend"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
"github.com/moby/go-archive"
containerpkg "github.com/docker/docker/container"
"github.com/docker/docker/pkg/archive"
)
// execBackend includes functions to implement to provide exec functionality.
type execBackend interface {
ContainerExecCreate(name string, options *container.ExecOptions) (string, error)
ContainerExecCreate(name string, config *types.ExecConfig) (string, error)
ContainerExecInspect(id string) (*backend.ExecInspect, error)
ContainerExecResize(ctx context.Context, name string, height, width uint32) error
ContainerExecStart(ctx context.Context, name string, options backend.ExecStartConfig) error
ContainerExecResize(name string, height, width int) error
ContainerExecStart(ctx context.Context, name string, options container.ExecStartOptions) error
ExecExists(name string) (bool, error)
}
// copyBackend includes functions to implement to provide container copy functionality.
type copyBackend interface {
ContainerArchivePath(name string, path string) (content io.ReadCloser, stat *container.PathStat, err error)
ContainerExport(ctx context.Context, name string, out io.Writer) error
ContainerArchivePath(name string, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error)
ContainerCopy(name string, res string) (io.ReadCloser, error)
ContainerExport(name string, out io.Writer) error
ContainerExtractToDir(name, path string, copyUIDGID, noOverwriteDirNonDir bool, content io.Reader) error
ContainerStatPath(name string, path string) (stat *container.PathStat, err error)
ContainerStatPath(name string, path string) (stat *types.ContainerPathStat, err error)
}
// stateBackend includes functions to implement to provide container state lifecycle functionality.
type stateBackend interface {
ContainerCreate(ctx context.Context, config backend.ContainerCreateConfig) (container.CreateResponse, error)
ContainerCreate(config types.ContainerCreateConfig) (container.CreateResponse, error)
ContainerKill(name string, signal string) error
ContainerPause(name string) error
ContainerRename(oldName, newName string) error
ContainerResize(ctx context.Context, name string, height, width uint32) error
ContainerResize(name string, height, width int) error
ContainerRestart(ctx context.Context, name string, options container.StopOptions) error
ContainerRm(name string, config *backend.ContainerRmConfig) error
ContainerStart(ctx context.Context, name string, checkpoint string, checkpointDir string) error
ContainerRm(name string, config *types.ContainerRmConfig) error
ContainerStart(name string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error
ContainerStop(ctx context.Context, name string, options container.StopOptions) error
ContainerUnpause(name string) error
ContainerUpdate(name string, hostConfig *container.HostConfig) (container.UpdateResponse, error)
ContainerWait(ctx context.Context, name string, condition container.WaitCondition) (<-chan container.StateStatus, error)
ContainerUpdate(name string, hostConfig *container.HostConfig) (container.ContainerUpdateOKBody, error)
ContainerWait(ctx context.Context, name string, condition containerpkg.WaitCondition) (<-chan containerpkg.StateStatus, error)
}
// monitorBackend includes functions to implement to provide containers monitoring functionality.
type monitorBackend interface {
ContainerChanges(ctx context.Context, name string) ([]archive.Change, error)
ContainerInspect(ctx context.Context, name string, options backend.ContainerInspectOptions) (*container.InspectResponse, error)
ContainerLogs(ctx context.Context, name string, config *container.LogsOptions) (msgs <-chan *backend.LogMessage, tty bool, err error)
ContainerChanges(name string) ([]archive.Change, error)
ContainerInspect(name string, size bool, version string) (interface{}, error)
ContainerLogs(ctx context.Context, name string, config *types.ContainerLogsOptions) (msgs <-chan *backend.LogMessage, tty bool, err error)
ContainerStats(ctx context.Context, name string, config *backend.ContainerStatsConfig) error
ContainerTop(name string, psArgs string) (*container.TopResponse, error)
Containers(ctx context.Context, config *container.ListOptions) ([]*container.Summary, error)
ContainerTop(name string, psArgs string) (*container.ContainerTopOKBody, error)
Containers(config *types.ContainerListOptions) ([]*types.Container, error)
}
// attachBackend includes function to implement to provide container attaching functionality.
@@ -60,11 +64,11 @@ type attachBackend interface {
// systemBackend includes functions to implement to provide system wide containers functionality
type systemBackend interface {
ContainersPrune(ctx context.Context, pruneFilters filters.Args) (*container.PruneReport, error)
ContainersPrune(ctx context.Context, pruneFilters filters.Args) (*types.ContainersPruneReport, error)
}
type commitBackend interface {
CreateImageFromContainer(ctx context.Context, name string, config *backend.CreateImageConfig) (imageID string, err error)
CreateImageFromContainer(name string, config *backend.CreateImageConfig) (imageID string, err error)
}
// Backend is all the methods that need to be implemented to provide container specific functionality.

View File

@@ -25,47 +25,48 @@ func NewRouter(b Backend, decoder httputils.ContainerDecoder, cgroup2 bool) rout
}
// Routes returns the available routes to the container controller
func (c *containerRouter) Routes() []router.Route {
return c.routes
func (r *containerRouter) Routes() []router.Route {
return r.routes
}
// initRoutes initializes the routes in container router
func (c *containerRouter) initRoutes() {
c.routes = []router.Route{
func (r *containerRouter) initRoutes() {
r.routes = []router.Route{
// HEAD
router.NewHeadRoute("/containers/{name:.*}/archive", c.headContainersArchive),
router.NewHeadRoute("/containers/{name:.*}/archive", r.headContainersArchive),
// GET
router.NewGetRoute("/containers/json", c.getContainersJSON),
router.NewGetRoute("/containers/{name:.*}/export", c.getContainersExport),
router.NewGetRoute("/containers/{name:.*}/changes", c.getContainersChanges),
router.NewGetRoute("/containers/{name:.*}/json", c.getContainersByName),
router.NewGetRoute("/containers/{name:.*}/top", c.getContainersTop),
router.NewGetRoute("/containers/{name:.*}/logs", c.getContainersLogs),
router.NewGetRoute("/containers/{name:.*}/stats", c.getContainersStats),
router.NewGetRoute("/containers/{name:.*}/attach/ws", c.wsContainersAttach),
router.NewGetRoute("/exec/{id:.*}/json", c.getExecByID),
router.NewGetRoute("/containers/{name:.*}/archive", c.getContainersArchive),
router.NewGetRoute("/containers/json", r.getContainersJSON),
router.NewGetRoute("/containers/{name:.*}/export", r.getContainersExport),
router.NewGetRoute("/containers/{name:.*}/changes", r.getContainersChanges),
router.NewGetRoute("/containers/{name:.*}/json", r.getContainersByName),
router.NewGetRoute("/containers/{name:.*}/top", r.getContainersTop),
router.NewGetRoute("/containers/{name:.*}/logs", r.getContainersLogs),
router.NewGetRoute("/containers/{name:.*}/stats", r.getContainersStats),
router.NewGetRoute("/containers/{name:.*}/attach/ws", r.wsContainersAttach),
router.NewGetRoute("/exec/{id:.*}/json", r.getExecByID),
router.NewGetRoute("/containers/{name:.*}/archive", r.getContainersArchive),
// POST
router.NewPostRoute("/containers/create", c.postContainersCreate),
router.NewPostRoute("/containers/{name:.*}/kill", c.postContainersKill),
router.NewPostRoute("/containers/{name:.*}/pause", c.postContainersPause),
router.NewPostRoute("/containers/{name:.*}/unpause", c.postContainersUnpause),
router.NewPostRoute("/containers/{name:.*}/restart", c.postContainersRestart),
router.NewPostRoute("/containers/{name:.*}/start", c.postContainersStart),
router.NewPostRoute("/containers/{name:.*}/stop", c.postContainersStop),
router.NewPostRoute("/containers/{name:.*}/wait", c.postContainersWait),
router.NewPostRoute("/containers/{name:.*}/resize", c.postContainersResize),
router.NewPostRoute("/containers/{name:.*}/attach", c.postContainersAttach),
router.NewPostRoute("/containers/{name:.*}/exec", c.postContainerExecCreate),
router.NewPostRoute("/exec/{name:.*}/start", c.postContainerExecStart),
router.NewPostRoute("/exec/{name:.*}/resize", c.postContainerExecResize),
router.NewPostRoute("/containers/{name:.*}/rename", c.postContainerRename),
router.NewPostRoute("/containers/{name:.*}/update", c.postContainerUpdate),
router.NewPostRoute("/containers/prune", c.postContainersPrune),
router.NewPostRoute("/commit", c.postCommit),
router.NewPostRoute("/containers/create", r.postContainersCreate),
router.NewPostRoute("/containers/{name:.*}/kill", r.postContainersKill),
router.NewPostRoute("/containers/{name:.*}/pause", r.postContainersPause),
router.NewPostRoute("/containers/{name:.*}/unpause", r.postContainersUnpause),
router.NewPostRoute("/containers/{name:.*}/restart", r.postContainersRestart),
router.NewPostRoute("/containers/{name:.*}/start", r.postContainersStart),
router.NewPostRoute("/containers/{name:.*}/stop", r.postContainersStop),
router.NewPostRoute("/containers/{name:.*}/wait", r.postContainersWait),
router.NewPostRoute("/containers/{name:.*}/resize", r.postContainersResize),
router.NewPostRoute("/containers/{name:.*}/attach", r.postContainersAttach),
router.NewPostRoute("/containers/{name:.*}/copy", r.postContainersCopy), // Deprecated since 1.8 (API v1.20), errors out since 1.12 (API v1.24)
router.NewPostRoute("/containers/{name:.*}/exec", r.postContainerExecCreate),
router.NewPostRoute("/exec/{name:.*}/start", r.postContainerExecStart),
router.NewPostRoute("/exec/{name:.*}/resize", r.postContainerExecResize),
router.NewPostRoute("/containers/{name:.*}/rename", r.postContainerRename),
router.NewPostRoute("/containers/{name:.*}/update", r.postContainerUpdate),
router.NewPostRoute("/containers/prune", r.postContainersPrune),
router.NewPostRoute("/commit", r.postCommit),
// PUT
router.NewPutRoute("/containers/{name:.*}/archive", c.putContainersArchive),
router.NewPutRoute("/containers/{name:.*}/archive", r.putContainersArchive),
// DELETE
router.NewDeleteRoute("/containers/{name:.*}", c.deleteContainers),
router.NewDeleteRoute("/containers/{name:.*}", r.deleteContainers),
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,350 +0,0 @@
package container
import (
"strings"
"testing"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/network"
"github.com/docker/docker/libnetwork/netlabel"
"gotest.tools/v3/assert"
is "gotest.tools/v3/assert/cmp"
)
func TestHandleMACAddressBC(t *testing.T) {
testcases := []struct {
name string
apiVersion string
ctrWideMAC string
networkMode container.NetworkMode
epConfig map[string]*network.EndpointSettings
expEpWithCtrWideMAC string
expEpWithNoMAC string
expCtrWideMAC string
expWarning string
expError string
}{
{
name: "old api ctr-wide mac mix id and name",
apiVersion: "1.43",
ctrWideMAC: "11:22:33:44:55:66",
networkMode: "aNetId",
epConfig: map[string]*network.EndpointSettings{"aNetName": {}},
expEpWithCtrWideMAC: "aNetName",
expCtrWideMAC: "11:22:33:44:55:66",
},
{
name: "old api clear ep mac",
apiVersion: "1.43",
networkMode: "aNetId",
epConfig: map[string]*network.EndpointSettings{"aNetName": {MacAddress: "11:22:33:44:55:66"}},
expEpWithNoMAC: "aNetName",
},
{
name: "old api no-network ctr-wide mac",
apiVersion: "1.43",
networkMode: "none",
ctrWideMAC: "11:22:33:44:55:66",
expError: "conflicting options: mac-address and the network mode",
expCtrWideMAC: "11:22:33:44:55:66",
},
{
name: "old api create ep",
apiVersion: "1.43",
networkMode: "aNetId",
ctrWideMAC: "11:22:33:44:55:66",
epConfig: map[string]*network.EndpointSettings{},
expEpWithCtrWideMAC: "aNetId",
expCtrWideMAC: "11:22:33:44:55:66",
},
{
name: "old api migrate ctr-wide mac",
apiVersion: "1.43",
ctrWideMAC: "11:22:33:44:55:66",
networkMode: "aNetName",
epConfig: map[string]*network.EndpointSettings{"aNetName": {}},
expEpWithCtrWideMAC: "aNetName",
expCtrWideMAC: "11:22:33:44:55:66",
},
{
name: "new api no macs",
apiVersion: "1.44",
networkMode: "aNetId",
epConfig: map[string]*network.EndpointSettings{"aNetName": {}},
},
{
name: "new api ep specific mac",
apiVersion: "1.44",
networkMode: "aNetName",
epConfig: map[string]*network.EndpointSettings{"aNetName": {MacAddress: "11:22:33:44:55:66"}},
},
{
name: "new api migrate ctr-wide mac to new ep",
apiVersion: "1.44",
ctrWideMAC: "11:22:33:44:55:66",
networkMode: "aNetName",
epConfig: map[string]*network.EndpointSettings{},
expEpWithCtrWideMAC: "aNetName",
expWarning: "The container-wide MacAddress field is now deprecated",
expCtrWideMAC: "",
},
{
name: "new api migrate ctr-wide mac to existing ep",
apiVersion: "1.44",
ctrWideMAC: "11:22:33:44:55:66",
networkMode: "aNetName",
epConfig: map[string]*network.EndpointSettings{"aNetName": {}},
expEpWithCtrWideMAC: "aNetName",
expWarning: "The container-wide MacAddress field is now deprecated",
expCtrWideMAC: "",
},
{
name: "new api mode vs name mismatch",
apiVersion: "1.44",
ctrWideMAC: "11:22:33:44:55:66",
networkMode: "aNetId",
epConfig: map[string]*network.EndpointSettings{"aNetName": {}},
expError: "unable to migrate container-wide MAC address to a specific network: HostConfig.NetworkMode must match the identity of a network in NetworkSettings.Networks",
expCtrWideMAC: "11:22:33:44:55:66",
},
{
name: "new api mac mismatch",
apiVersion: "1.44",
ctrWideMAC: "11:22:33:44:55:66",
networkMode: "aNetName",
epConfig: map[string]*network.EndpointSettings{"aNetName": {MacAddress: "00:11:22:33:44:55"}},
expError: "the container-wide MAC address must match the endpoint-specific MAC address",
expCtrWideMAC: "11:22:33:44:55:66",
},
}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
cfg := &container.Config{
MacAddress: tc.ctrWideMAC, //nolint:staticcheck // ignore SA1019: field is deprecated, but still used on API < v1.44.
}
hostCfg := &container.HostConfig{
NetworkMode: tc.networkMode,
}
epConfig := make(map[string]*network.EndpointSettings, len(tc.epConfig))
for k, v := range tc.epConfig {
v := *v
epConfig[k] = &v
}
netCfg := &network.NetworkingConfig{
EndpointsConfig: epConfig,
}
warning, err := handleMACAddressBC(cfg, hostCfg, netCfg, tc.apiVersion)
if tc.expError == "" {
assert.Check(t, err)
} else {
assert.Check(t, is.ErrorContains(err, tc.expError))
}
if tc.expWarning == "" {
assert.Check(t, is.Equal(warning, ""))
} else {
assert.Check(t, is.Contains(warning, tc.expWarning))
}
if tc.expEpWithCtrWideMAC != "" {
got := netCfg.EndpointsConfig[tc.expEpWithCtrWideMAC].MacAddress
assert.Check(t, is.Equal(got, tc.ctrWideMAC))
}
if tc.expEpWithNoMAC != "" {
got := netCfg.EndpointsConfig[tc.expEpWithNoMAC].MacAddress
assert.Check(t, is.Equal(got, ""))
}
gotCtrWideMAC := cfg.MacAddress //nolint:staticcheck // ignore SA1019: field is deprecated, but still used on API < v1.44.
assert.Check(t, is.Equal(gotCtrWideMAC, tc.expCtrWideMAC))
})
}
}
func TestEpConfigForNetMode(t *testing.T) {
testcases := []struct {
name string
apiVersion string
networkMode string
epConfig map[string]*network.EndpointSettings
expEpId string
expNumEps int
expError bool
}{
{
name: "old api no eps",
apiVersion: "1.43",
networkMode: "mynet",
expNumEps: 1,
},
{
name: "new api no eps",
apiVersion: "1.44",
networkMode: "mynet",
expNumEps: 1,
},
{
name: "old api with ep",
apiVersion: "1.43",
networkMode: "mynet",
epConfig: map[string]*network.EndpointSettings{
"anything": {EndpointID: "epone"},
},
expEpId: "epone",
expNumEps: 1,
},
{
name: "new api with matching ep",
apiVersion: "1.44",
networkMode: "mynet",
epConfig: map[string]*network.EndpointSettings{
"mynet": {EndpointID: "epone"},
},
expEpId: "epone",
expNumEps: 1,
},
{
name: "new api with mismatched ep",
apiVersion: "1.44",
networkMode: "mynet",
epConfig: map[string]*network.EndpointSettings{
"shortid": {EndpointID: "epone"},
},
expError: true,
},
}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
netConfig := &network.NetworkingConfig{
EndpointsConfig: tc.epConfig,
}
ep, err := epConfigForNetMode(tc.apiVersion, container.NetworkMode(tc.networkMode), netConfig)
if tc.expError {
assert.Check(t, is.ErrorContains(err, "HostConfig.NetworkMode must match the identity of a network in NetworkSettings.Networks"))
} else {
assert.Assert(t, err)
assert.Check(t, is.Equal(ep.EndpointID, tc.expEpId))
assert.Check(t, is.Len(netConfig.EndpointsConfig, tc.expNumEps))
}
})
}
}
func TestHandleSysctlBC(t *testing.T) {
testcases := []struct {
name string
apiVersion string
networkMode string
sysctls map[string]string
epConfig map[string]*network.EndpointSettings
expEpSysctls []string
expSysctls map[string]string
expWarningContains []string
expError string
}{
{
name: "migrate to new ep",
apiVersion: "1.46",
networkMode: "mynet",
sysctls: map[string]string{
"net.ipv6.conf.all.disable_ipv6": "0",
"net.ipv6.conf.eth0.accept_ra": "2",
"net.ipv6.conf.eth0.forwarding": "1",
},
expSysctls: map[string]string{
"net.ipv6.conf.all.disable_ipv6": "0",
},
expEpSysctls: []string{"net.ipv6.conf.IFNAME.forwarding=1", "net.ipv6.conf.IFNAME.accept_ra=2"},
expWarningContains: []string{
"Migrated",
"net.ipv6.conf.eth0.accept_ra", "net.ipv6.conf.IFNAME.accept_ra=2",
"net.ipv6.conf.eth0.forwarding", "net.ipv6.conf.IFNAME.forwarding=1",
},
},
{
name: "migrate nothing",
apiVersion: "1.46",
networkMode: "mynet",
sysctls: map[string]string{
"net.ipv6.conf.all.disable_ipv6": "0",
},
expSysctls: map[string]string{
"net.ipv6.conf.all.disable_ipv6": "0",
},
},
{
name: "migration disabled for newer api",
apiVersion: "1.48",
networkMode: "mynet",
sysctls: map[string]string{
"net.ipv6.conf.eth0.accept_ra": "2",
},
expError: "must be supplied using driver option 'com.docker.network.endpoint.sysctls'",
},
{
name: "only migrate eth0",
apiVersion: "1.46",
networkMode: "mynet",
sysctls: map[string]string{
"net.ipv6.conf.eth1.accept_ra": "2",
},
expError: "unable to determine network endpoint",
},
{
name: "net name mismatch",
apiVersion: "1.46",
networkMode: "mynet",
epConfig: map[string]*network.EndpointSettings{
"shortid": {EndpointID: "epone"},
},
sysctls: map[string]string{
"net.ipv6.conf.eth1.accept_ra": "2",
},
expError: "unable to find a network for sysctl",
},
}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
hostCfg := &container.HostConfig{
NetworkMode: container.NetworkMode(tc.networkMode),
Sysctls: map[string]string{},
}
for k, v := range tc.sysctls {
hostCfg.Sysctls[k] = v
}
netCfg := &network.NetworkingConfig{
EndpointsConfig: tc.epConfig,
}
warnings, err := handleSysctlBC(hostCfg, netCfg, tc.apiVersion)
for _, s := range tc.expWarningContains {
assert.Check(t, is.Contains(warnings, s))
}
if tc.expError != "" {
assert.Check(t, is.ErrorContains(err, tc.expError))
} else {
assert.Check(t, err)
assert.Check(t, is.DeepEqual(hostCfg.Sysctls, tc.expSysctls))
ep := netCfg.EndpointsConfig[tc.networkMode]
if ep == nil {
assert.Check(t, is.Nil(tc.expEpSysctls))
} else {
got, ok := ep.DriverOpts[netlabel.EndpointSysctls]
assert.Check(t, ok)
// Check for expected ep-sysctls.
for _, want := range tc.expEpSysctls {
assert.Check(t, is.Contains(got, want))
}
// Check for unexpected ep-sysctls.
assert.Check(t, is.Len(got, len(strings.Join(tc.expEpSysctls, ","))))
}
}
})
}
}

View File

@@ -10,12 +10,51 @@ import (
"net/http"
"github.com/docker/docker/api/server/httputils"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/versions"
gddohttputil "github.com/golang/gddo/httputil"
)
// setContainerPathStatHeader encodes the stat to JSON, base64 encode, and place in a header.
func setContainerPathStatHeader(stat *container.PathStat, header http.Header) error {
type pathError struct{}
func (pathError) Error() string {
return "Path cannot be empty"
}
func (pathError) InvalidParameter() {}
// postContainersCopy is deprecated in favor of getContainersArchive.
//
// Deprecated since 1.8 (API v1.20), errors out since 1.12 (API v1.24)
func (s *containerRouter) postContainersCopy(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
version := httputils.VersionFromContext(ctx)
if versions.GreaterThanOrEqualTo(version, "1.24") {
w.WriteHeader(http.StatusNotFound)
return nil
}
cfg := types.CopyConfig{}
if err := httputils.ReadJSON(r, &cfg); err != nil {
return err
}
if cfg.Resource == "" {
return pathError{}
}
data, err := s.backend.ContainerCopy(vars["name"], cfg.Resource)
if err != nil {
return err
}
defer data.Close()
w.Header().Set("Content-Type", "application/x-tar")
_, err = io.Copy(w, data)
return err
}
// // Encode the stat to JSON, base64 encode, and place in a header.
func setContainerPathStatHeader(stat *types.ContainerPathStat, header http.Header) error {
statJSON, err := json.Marshal(stat)
if err != nil {
return err
@@ -29,13 +68,13 @@ func setContainerPathStatHeader(stat *container.PathStat, header http.Header) er
return nil
}
func (c *containerRouter) headContainersArchive(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
func (s *containerRouter) headContainersArchive(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
v, err := httputils.ArchiveFormValues(r, vars)
if err != nil {
return err
}
stat, err := c.backend.ContainerStatPath(v.Name, v.Path)
stat, err := s.backend.ContainerStatPath(v.Name, v.Path)
if err != nil {
return err
}
@@ -66,13 +105,13 @@ func writeCompressedResponse(w http.ResponseWriter, r *http.Request, body io.Rea
return err
}
func (c *containerRouter) getContainersArchive(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
func (s *containerRouter) getContainersArchive(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
v, err := httputils.ArchiveFormValues(r, vars)
if err != nil {
return err
}
tarArchive, stat, err := c.backend.ContainerArchivePath(v.Name, v.Path)
tarArchive, stat, err := s.backend.ContainerArchivePath(v.Name, v.Path)
if err != nil {
return err
}
@@ -86,7 +125,7 @@ func (c *containerRouter) getContainersArchive(ctx context.Context, w http.Respo
return writeCompressedResponse(w, r, tarArchive)
}
func (c *containerRouter) putContainersArchive(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
func (s *containerRouter) putContainersArchive(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
v, err := httputils.ArchiveFormValues(r, vars)
if err != nil {
return err
@@ -95,5 +134,5 @@ func (c *containerRouter) putContainersArchive(ctx context.Context, w http.Respo
noOverwriteDirNonDir := httputils.BoolValue(r, "noOverwriteDirNonDir")
copyUIDGID := httputils.BoolValue(r, "copyUIDGID")
return c.backend.ContainerExtractToDir(v.Name, v.Path, copyUIDGID, noOverwriteDirNonDir, r.Body)
return s.backend.ContainerExtractToDir(v.Name, v.Path, copyUIDGID, noOverwriteDirNonDir, r.Body)
}

View File

@@ -5,20 +5,19 @@ import (
"fmt"
"io"
"net/http"
"strconv"
"github.com/containerd/log"
"github.com/docker/docker/api/server/httputils"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/backend"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/versions"
"github.com/docker/docker/errdefs"
"github.com/docker/docker/pkg/stdcopy"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
func (c *containerRouter) getExecByID(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
eConfig, err := c.backend.ContainerExecInspect(vars["id"])
func (s *containerRouter) getExecByID(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
eConfig, err := s.backend.ContainerExecInspect(vars["id"])
if err != nil {
return err
}
@@ -34,12 +33,12 @@ func (execCommandError) Error() string {
func (execCommandError) InvalidParameter() {}
func (c *containerRouter) postContainerExecCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
func (s *containerRouter) postContainerExecCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
execConfig := &container.ExecOptions{}
execConfig := &types.ExecConfig{}
if err := httputils.ReadJSON(r, execConfig); err != nil {
return err
}
@@ -55,53 +54,60 @@ func (c *containerRouter) postContainerExecCreate(ctx context.Context, w http.Re
}
// Register an instance of Exec in container.
id, err := c.backend.ContainerExecCreate(vars["name"], execConfig)
id, err := s.backend.ContainerExecCreate(vars["name"], execConfig)
if err != nil {
log.G(ctx).Errorf("Error setting up exec command in container %s: %v", vars["name"], err)
logrus.Errorf("Error setting up exec command in container %s: %v", vars["name"], err)
return err
}
return httputils.WriteJSON(w, http.StatusCreated, &container.ExecCreateResponse{
return httputils.WriteJSON(w, http.StatusCreated, &types.IDResponse{
ID: id,
})
}
// TODO(vishh): Refactor the code to avoid having to specify stream config as part of both create and start.
func (c *containerRouter) postContainerExecStart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
func (s *containerRouter) postContainerExecStart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
version := httputils.VersionFromContext(ctx)
if versions.LessThan(version, "1.22") {
// API versions before 1.22 did not enforce application/json content-type.
// Allow older clients to work by patching the content-type.
if r.Header.Get("Content-Type") != "application/json" {
r.Header.Set("Content-Type", "application/json")
}
}
var (
execName = vars["name"]
stdin, inStream io.ReadCloser
stdout, stderr, outStream io.Writer
)
options := &container.ExecStartOptions{}
if err := httputils.ReadJSON(r, options); err != nil {
execStartCheck := &types.ExecStartCheck{}
if err := httputils.ReadJSON(r, execStartCheck); err != nil {
return err
}
if exists, err := c.backend.ExecExists(execName); !exists {
if exists, err := s.backend.ExecExists(execName); !exists {
return err
}
if options.ConsoleSize != nil {
version := httputils.VersionFromContext(ctx)
if execStartCheck.ConsoleSize != nil {
// Not supported before 1.42
if versions.LessThan(version, "1.42") {
options.ConsoleSize = nil
execStartCheck.ConsoleSize = nil
}
// No console without tty
if !options.Tty {
options.ConsoleSize = nil
if !execStartCheck.Tty {
execStartCheck.ConsoleSize = nil
}
}
if !options.Detach {
if !execStartCheck.Detach {
var err error
// Setting up the streaming http interface.
inStream, outStream, err = httputils.HijackConnection(w)
@@ -112,60 +118,59 @@ func (c *containerRouter) postContainerExecStart(ctx context.Context, w http.Res
if _, ok := r.Header["Upgrade"]; ok {
contentType := types.MediaTypeRawStream
if !options.Tty && versions.GreaterThanOrEqualTo(httputils.VersionFromContext(ctx), "1.42") {
if !execStartCheck.Tty && versions.GreaterThanOrEqualTo(httputils.VersionFromContext(ctx), "1.42") {
contentType = types.MediaTypeMultiplexedStream
}
_, _ = fmt.Fprint(outStream, "HTTP/1.1 101 UPGRADED\r\nContent-Type: "+contentType+"\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n")
fmt.Fprint(outStream, "HTTP/1.1 101 UPGRADED\r\nContent-Type: "+contentType+"\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n")
} else {
_, _ = fmt.Fprint(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n")
fmt.Fprint(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n")
}
// copy headers that were removed as part of hijack
if err := w.Header().WriteSubset(outStream, nil); err != nil {
return err
}
_, _ = fmt.Fprint(outStream, "\r\n")
fmt.Fprint(outStream, "\r\n")
stdin = inStream
if options.Tty {
stdout = outStream
} else {
stdout = outStream
if !execStartCheck.Tty {
stderr = stdcopy.NewStdWriter(outStream, stdcopy.Stderr)
stdout = stdcopy.NewStdWriter(outStream, stdcopy.Stdout)
}
}
// Now run the user process in container.
//
// TODO: Maybe we should we pass ctx here if we're not detaching?
err := c.backend.ContainerExecStart(context.Background(), execName, backend.ExecStartConfig{
options := container.ExecStartOptions{
Stdin: stdin,
Stdout: stdout,
Stderr: stderr,
ConsoleSize: options.ConsoleSize,
})
if err != nil {
if options.Detach {
ConsoleSize: execStartCheck.ConsoleSize,
}
// Now run the user process in container.
// Maybe we should we pass ctx here if we're not detaching?
if err := s.backend.ContainerExecStart(context.Background(), execName, options); err != nil {
if execStartCheck.Detach {
return err
}
_, _ = fmt.Fprintf(stdout, "%v\r\n", err)
log.G(ctx).Errorf("Error running exec %s in container: %v", execName, err)
stdout.Write([]byte(err.Error() + "\r\n"))
logrus.Errorf("Error running exec %s in container: %v", execName, err)
}
return nil
}
func (c *containerRouter) postContainerExecResize(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
func (s *containerRouter) postContainerExecResize(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
height, err := httputils.Uint32Value(r, "h")
height, err := strconv.Atoi(r.Form.Get("h"))
if err != nil {
return errdefs.InvalidParameter(errors.Wrapf(err, "invalid resize height %q", r.Form.Get("h")))
return errdefs.InvalidParameter(err)
}
width, err := httputils.Uint32Value(r, "w")
width, err := strconv.Atoi(r.Form.Get("w"))
if err != nil {
return errdefs.InvalidParameter(errors.Wrapf(err, "invalid resize width %q", r.Form.Get("w")))
return errdefs.InvalidParameter(err)
}
return c.backend.ContainerExecResize(ctx, vars["name"], height, width)
return s.backend.ContainerExecResize(vars["name"], height, width)
}

View File

@@ -1,6 +1,3 @@
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
//go:build go1.23
package container // import "github.com/docker/docker/api/server/router/container"
import (
@@ -8,34 +5,17 @@ import (
"net/http"
"github.com/docker/docker/api/server/httputils"
"github.com/docker/docker/api/types/backend"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/versions"
"github.com/docker/docker/internal/sliceutil"
"github.com/docker/docker/pkg/stringid"
)
// getContainersByName inspects container's configuration and serializes it as json.
func (c *containerRouter) getContainersByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
ctr, err := c.backend.ContainerInspect(ctx, vars["name"], backend.ContainerInspectOptions{
Size: httputils.BoolValue(r, "size"),
})
func (s *containerRouter) getContainersByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
displaySize := httputils.BoolValue(r, "size")
version := httputils.VersionFromContext(ctx)
json, err := s.backend.ContainerInspect(vars["name"], displaySize, version)
if err != nil {
return err
}
version := httputils.VersionFromContext(ctx)
if versions.LessThan(version, "1.45") {
shortCID := stringid.TruncateID(ctr.ID)
for nwName, ep := range ctr.NetworkSettings.Networks {
if container.NetworkMode(nwName).IsUserDefined() {
ep.Aliases = sliceutil.Dedup(append(ep.Aliases, shortCID, ctr.Config.Hostname))
}
}
}
if versions.LessThan(version, "1.48") {
ctr.ImageManifestDescriptor = nil
}
return httputils.WriteJSON(w, http.StatusOK, ctr)
return httputils.WriteJSON(w, http.StatusOK, json)
}

View File

@@ -1,54 +0,0 @@
package container
import (
"context"
"net"
"syscall"
"github.com/containerd/log"
"github.com/docker/docker/internal/unix_noeintr"
"golang.org/x/sys/unix"
)
func notifyClosed(ctx context.Context, conn net.Conn, notify func()) {
sc, ok := conn.(syscall.Conn)
if !ok {
log.G(ctx).Debug("notifyClosed: conn does not support close notifications")
return
}
rc, err := sc.SyscallConn()
if err != nil {
log.G(ctx).WithError(err).Warn("notifyClosed: failed get raw conn for close notifications")
return
}
epFd, err := unix_noeintr.EpollCreate()
if err != nil {
log.G(ctx).WithError(err).Warn("notifyClosed: failed to create epoll fd")
return
}
defer unix.Close(epFd)
err = rc.Control(func(fd uintptr) {
err := unix_noeintr.EpollCtl(epFd, unix.EPOLL_CTL_ADD, int(fd), &unix.EpollEvent{
Events: unix.EPOLLHUP,
Fd: int32(fd),
})
if err != nil {
log.G(ctx).WithError(err).Warn("notifyClosed: failed to register fd for close notifications")
return
}
events := make([]unix.EpollEvent, 1)
if _, err := unix_noeintr.EpollWait(epFd, events, -1); err != nil {
log.G(ctx).WithError(err).Warn("notifyClosed: failed to wait for close notifications")
return
}
notify()
})
if err != nil {
log.G(ctx).WithError(err).Warn("notifyClosed: failed to register for close notifications")
return
}
}

View File

@@ -1,10 +0,0 @@
//go:build !linux
package container
import (
"context"
"net"
)
func notifyClosed(ctx context.Context, conn net.Conn, notify func()) {}

View File

@@ -24,13 +24,13 @@ type debugRouter struct {
func (r *debugRouter) initRoutes() {
r.routes = []router.Route{
router.NewGetRoute("/debug/vars", frameworkAdaptHandler(expvar.Handler())),
router.NewGetRoute("/debug/pprof/", frameworkAdaptHandlerFunc(pprof.Index)),
router.NewGetRoute("/debug/pprof/cmdline", frameworkAdaptHandlerFunc(pprof.Cmdline)),
router.NewGetRoute("/debug/pprof/profile", frameworkAdaptHandlerFunc(pprof.Profile)),
router.NewGetRoute("/debug/pprof/symbol", frameworkAdaptHandlerFunc(pprof.Symbol)),
router.NewGetRoute("/debug/pprof/trace", frameworkAdaptHandlerFunc(pprof.Trace)),
router.NewGetRoute("/debug/pprof/{name}", handlePprof),
router.NewGetRoute("/vars", frameworkAdaptHandler(expvar.Handler())),
router.NewGetRoute("/pprof/", frameworkAdaptHandlerFunc(pprof.Index)),
router.NewGetRoute("/pprof/cmdline", frameworkAdaptHandlerFunc(pprof.Cmdline)),
router.NewGetRoute("/pprof/profile", frameworkAdaptHandlerFunc(pprof.Profile)),
router.NewGetRoute("/pprof/symbol", frameworkAdaptHandlerFunc(pprof.Symbol)),
router.NewGetRoute("/pprof/trace", frameworkAdaptHandlerFunc(pprof.Trace)),
router.NewGetRoute("/pprof/{name}", handlePprof),
}
}

View File

@@ -3,13 +3,13 @@ package distribution // import "github.com/docker/docker/api/server/router/distr
import (
"context"
"github.com/distribution/reference"
"github.com/docker/distribution"
"github.com/docker/docker/api/types/registry"
"github.com/docker/distribution/reference"
"github.com/docker/docker/api/types"
)
// Backend is all the methods that need to be implemented
// to provide image specific functionality.
type Backend interface {
GetRepositories(context.Context, reference.Named, *registry.AuthConfig) ([]distribution.Repository, error)
GetRepository(context.Context, reference.Named, *types.AuthConfig) (distribution.Repository, error)
}

View File

@@ -18,14 +18,14 @@ func NewRouter(backend Backend) router.Router {
}
// Routes returns the available routes
func (dr *distributionRouter) Routes() []router.Route {
return dr.routes
func (r *distributionRouter) Routes() []router.Route {
return r.routes
}
// initRoutes initializes the routes in the distribution router
func (dr *distributionRouter) initRoutes() {
dr.routes = []router.Route{
func (r *distributionRouter) initRoutes() {
r.routes = []router.Route{
// GET
router.NewGetRoute("/distribution/{name:.*}/json", dr.getDistributionInfo),
router.NewGetRoute("/distribution/{name:.*}/json", r.getDistributionInfo),
}
}

View File

@@ -2,33 +2,49 @@ package distribution // import "github.com/docker/docker/api/server/router/distr
import (
"context"
"encoding/base64"
"encoding/json"
"net/http"
"strings"
"github.com/distribution/reference"
"github.com/docker/distribution"
"github.com/docker/distribution/manifest/manifestlist"
"github.com/docker/distribution/manifest/schema1"
"github.com/docker/distribution/manifest/schema2"
"github.com/docker/distribution/reference"
"github.com/docker/docker/api/server/httputils"
"github.com/docker/docker/api/types/registry"
distributionpkg "github.com/docker/docker/distribution"
"github.com/docker/docker/api/types"
registrytypes "github.com/docker/docker/api/types/registry"
"github.com/docker/docker/errdefs"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
func (dr *distributionRouter) getDistributionInfo(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
func (s *distributionRouter) getDistributionInfo(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
w.Header().Set("Content-Type", "application/json")
imgName := vars["name"]
var (
config = &types.AuthConfig{}
authEncoded = r.Header.Get("X-Registry-Auth")
distributionInspect registrytypes.DistributionInspect
)
if authEncoded != "" {
authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded))
if err := json.NewDecoder(authJSON).Decode(&config); err != nil {
// for a search it is not an error if no auth was given
// to increase compatibility with the existing api it is defaulting to be empty
config = &types.AuthConfig{}
}
}
image := vars["name"]
// TODO why is reference.ParseAnyReference() / reference.ParseNormalizedNamed() not using the reference.ErrTagInvalidFormat (and so on) errors?
ref, err := reference.ParseAnyReference(imgName)
ref, err := reference.ParseAnyReference(image)
if err != nil {
return errdefs.InvalidParameter(err)
}
@@ -38,58 +54,28 @@ func (dr *distributionRouter) getDistributionInfo(ctx context.Context, w http.Re
// full image ID
return errors.Errorf("no manifest found for full image ID")
}
return errdefs.InvalidParameter(errors.Errorf("unknown image reference format: %s", imgName))
return errdefs.InvalidParameter(errors.Errorf("unknown image reference format: %s", image))
}
// For a search it is not an error if no auth was given. Ignore invalid
// AuthConfig to increase compatibility with the existing API.
authConfig, _ := registry.DecodeAuthConfig(r.Header.Get(registry.AuthHeader))
repos, err := dr.backend.GetRepositories(ctx, namedRef, authConfig)
distrepo, err := s.backend.GetRepository(ctx, namedRef, config)
if err != nil {
return err
}
blobsrvc := distrepo.Blobs(ctx)
// Fetch the manifest; if a mirror is configured, try the mirror first,
// but continue with upstream on failure.
//
// FIXME(thaJeztah): construct "repositories" on-demand;
// GetRepositories() will attempt to connect to all endpoints (registries),
// but we may only need the first one if it contains the manifest we're
// looking for, or if the configured mirror is a pull-through mirror.
//
// Logic for this could be implemented similar to "distribution.Pull()",
// which uses the "pullEndpoints" utility to iterate over the list
// of endpoints;
//
// - https://github.com/moby/moby/blob/12c7411b6b7314bef130cd59f1c7384a7db06d0b/distribution/pull.go#L17-L31
// - https://github.com/moby/moby/blob/12c7411b6b7314bef130cd59f1c7384a7db06d0b/distribution/pull.go#L76-L152
var lastErr error
for _, repo := range repos {
distributionInspect, err := dr.fetchManifest(ctx, repo, namedRef)
if err != nil {
lastErr = err
continue
}
return httputils.WriteJSON(w, http.StatusOK, distributionInspect)
}
return lastErr
}
func (dr *distributionRouter) fetchManifest(ctx context.Context, distrepo distribution.Repository, namedRef reference.Named) (registry.DistributionInspect, error) {
var distributionInspect registry.DistributionInspect
if canonicalRef, ok := namedRef.(reference.Canonical); !ok {
namedRef = reference.TagNameOnly(namedRef)
taggedRef, ok := namedRef.(reference.NamedTagged)
if !ok {
return registry.DistributionInspect{}, errdefs.InvalidParameter(errors.Errorf("image reference not tagged: %s", namedRef))
return errdefs.InvalidParameter(errors.Errorf("image reference not tagged: %s", image))
}
descriptor, err := distrepo.Tags(ctx).Get(ctx, taggedRef.Tag())
if err != nil {
return registry.DistributionInspect{}, err
return err
}
distributionInspect.Descriptor = ocispec.Descriptor{
distributionInspect.Descriptor = v1.Descriptor{
MediaType: descriptor.MediaType,
Digest: descriptor.Digest,
Size: descriptor.Size,
@@ -104,7 +90,7 @@ func (dr *distributionRouter) fetchManifest(ctx context.Context, distrepo distri
// we have a digest, so we can retrieve the manifest
mnfstsrvc, err := distrepo.Manifests(ctx)
if err != nil {
return registry.DistributionInspect{}, err
return err
}
mnfst, err := mnfstsrvc.Get(ctx, distributionInspect.Descriptor.Digest)
if err != nil {
@@ -116,14 +102,14 @@ func (dr *distributionRouter) fetchManifest(ctx context.Context, distrepo distri
reference.ErrNameEmpty,
reference.ErrNameTooLong,
reference.ErrNameNotCanonical:
return registry.DistributionInspect{}, errdefs.InvalidParameter(err)
return errdefs.InvalidParameter(err)
}
return registry.DistributionInspect{}, err
return err
}
mediaType, payload, err := mnfst.Payload()
if err != nil {
return registry.DistributionInspect{}, err
return err
}
// update MediaType because registry might return something incorrect
distributionInspect.Descriptor.MediaType = mediaType
@@ -135,7 +121,7 @@ func (dr *distributionRouter) fetchManifest(ctx context.Context, distrepo distri
switch mnfstObj := mnfst.(type) {
case *manifestlist.DeserializedManifestList:
for _, m := range mnfstObj.Manifests {
distributionInspect.Platforms = append(distributionInspect.Platforms, ocispec.Platform{
distributionInspect.Platforms = append(distributionInspect.Platforms, v1.Platform{
Architecture: m.Platform.Architecture,
OS: m.Platform.OS,
OSVersion: m.Platform.OSVersion,
@@ -144,19 +130,21 @@ func (dr *distributionRouter) fetchManifest(ctx context.Context, distrepo distri
})
}
case *schema2.DeserializedManifest:
blobStore := distrepo.Blobs(ctx)
configJSON, err := blobStore.Get(ctx, mnfstObj.Config.Digest)
var platform ocispec.Platform
configJSON, err := blobsrvc.Get(ctx, mnfstObj.Config.Digest)
var platform v1.Platform
if err == nil {
err := json.Unmarshal(configJSON, &platform)
if err == nil && (platform.OS != "" || platform.Architecture != "") {
distributionInspect.Platforms = append(distributionInspect.Platforms, platform)
}
}
// TODO(thaJeztah); we only use this to produce a nice error, but as a result, we can't remove libtrust as dependency - see if we can reduce the dependencies, but still able to detect it's a deprecated manifest
case *schema1.SignedManifest:
return registry.DistributionInspect{}, distributionpkg.DeprecatedSchema1ImageError(namedRef)
platform := v1.Platform{
Architecture: mnfstObj.Architecture,
OS: "linux",
}
distributionInspect.Platforms = append(distributionInspect.Platforms, platform)
}
return distributionInspect, nil
return httputils.WriteJSON(w, http.StatusOK, distributionInspect)
}

View File

@@ -1,22 +1,8 @@
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
//go:build go1.23
package grpc // import "github.com/docker/docker/api/server/router/grpc"
import (
"context"
"fmt"
"os"
"strings"
"github.com/containerd/containerd/v2/defaults"
"github.com/containerd/log"
"github.com/docker/docker/api/server/router"
"github.com/docker/docker/internal/otelutil"
"github.com/moby/buildkit/util/grpcerrors"
"github.com/moby/buildkit/util/stack"
"github.com/moby/buildkit/util/tracing"
"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
"golang.org/x/net/http2"
"google.golang.org/grpc"
)
@@ -29,18 +15,12 @@ type grpcRouter struct {
// NewRouter initializes a new grpc http router
func NewRouter(backends ...Backend) router.Router {
tp, _ := otelutil.NewTracerProvider(context.Background(), false)
opts := []grpc.ServerOption{
grpc.StatsHandler(tracing.ServerStatsHandler(otelgrpc.WithTracerProvider(tp))),
grpc.ChainUnaryInterceptor(unaryInterceptor, grpcerrors.UnaryServerInterceptor),
grpc.StreamInterceptor(grpcerrors.StreamServerInterceptor),
grpc.MaxRecvMsgSize(defaults.DefaultMaxRecvMsgSize),
grpc.MaxSendMsgSize(defaults.DefaultMaxSendMsgSize),
}
r := &grpcRouter{
h2Server: &http2.Server{},
grpcServer: grpc.NewServer(opts...),
h2Server: &http2.Server{},
grpcServer: grpc.NewServer(
grpc.UnaryInterceptor(grpcerrors.UnaryServerInterceptor),
grpc.StreamInterceptor(grpcerrors.StreamServerInterceptor),
),
}
for _, b := range backends {
b.RegisterGRPC(r.grpcServer)
@@ -59,21 +39,3 @@ func (gr *grpcRouter) initRoutes() {
router.NewPostRoute("/grpc", gr.serveGRPC),
}
}
func unaryInterceptor(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, _ error) {
// This method is used by the clients to send their traces to buildkit so they can be included
// in the daemon trace and stored in the build history record. This method can not be traced because
// it would cause an infinite loop.
if strings.HasSuffix(info.FullMethod, "opentelemetry.proto.collector.trace.v1.TraceService/Export") {
return handler(ctx, req)
}
resp, err := handler(ctx, req)
if err != nil {
log.G(ctx).WithError(err).Error(info.FullMethod)
if log.GetLevel() >= log.DebugLevel {
_, _ = fmt.Fprintf(os.Stderr, "%+v", stack.Formatter(grpcerrors.FromGRPC(err)))
}
}
return resp, err
}

View File

@@ -4,13 +4,12 @@ import (
"context"
"io"
"github.com/distribution/reference"
"github.com/docker/docker/api/types/backend"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/image"
"github.com/docker/docker/api/types/registry"
dockerimage "github.com/docker/docker/image"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
specs "github.com/opencontainers/image-spec/specs-go/v1"
)
// Backend is all the methods that need to be implemented
@@ -22,26 +21,22 @@ type Backend interface {
}
type imageBackend interface {
ImageDelete(ctx context.Context, imageRef string, options image.RemoveOptions) ([]image.DeleteResponse, error)
ImageHistory(ctx context.Context, imageName string, platform *ocispec.Platform) ([]*image.HistoryResponseItem, error)
Images(ctx context.Context, opts image.ListOptions) ([]*image.Summary, error)
GetImage(ctx context.Context, refOrID string, options backend.GetImageOpts) (*dockerimage.Image, error)
ImageInspect(ctx context.Context, refOrID string, options backend.ImageInspectOpts) (*image.InspectResponse, error)
TagImage(ctx context.Context, id dockerimage.ID, newRef reference.Named) error
ImagesPrune(ctx context.Context, pruneFilters filters.Args) (*image.PruneReport, error)
ImageDelete(imageRef string, force, prune bool) ([]types.ImageDeleteResponseItem, error)
ImageHistory(imageName string) ([]*image.HistoryResponseItem, error)
Images(ctx context.Context, opts types.ImageListOptions) ([]*types.ImageSummary, error)
GetImage(refOrID string, platform *specs.Platform) (retImg *dockerimage.Image, retErr error)
TagImage(imageName, repository, tag string) (string, error)
ImagesPrune(ctx context.Context, pruneFilters filters.Args) (*types.ImagesPruneReport, error)
}
type importExportBackend interface {
LoadImage(ctx context.Context, inTar io.ReadCloser, platform *ocispec.Platform, outStream io.Writer, quiet bool) error
ImportImage(ctx context.Context, ref reference.Named, platform *ocispec.Platform, msg string, layerReader io.Reader, changes []string) (dockerimage.ID, error)
ExportImage(ctx context.Context, names []string, platform *ocispec.Platform, outStream io.Writer) error
LoadImage(inTar io.ReadCloser, outStream io.Writer, quiet bool) error
ImportImage(src string, repository string, platform *specs.Platform, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, changes []string) error
ExportImage(names []string, outStream io.Writer) error
}
type registryBackend interface {
PullImage(ctx context.Context, ref reference.Named, platform *ocispec.Platform, metaHeaders map[string][]string, authConfig *registry.AuthConfig, outStream io.Writer) error
PushImage(ctx context.Context, ref reference.Named, platform *ocispec.Platform, metaHeaders map[string][]string, authConfig *registry.AuthConfig, outStream io.Writer) error
}
type Searcher interface {
Search(ctx context.Context, searchFilters filters.Args, term string, limit int, authConfig *registry.AuthConfig, headers map[string][]string) ([]registry.SearchResult, error)
PullImage(ctx context.Context, image, tag string, platform *specs.Platform, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error
PushImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error
SearchRegistryForImages(ctx context.Context, searchFilters filters.Args, term string, limit int, authConfig *types.AuthConfig, metaHeaders map[string][]string) (*registry.SearchResults, error)
}

View File

@@ -2,47 +2,54 @@ package image // import "github.com/docker/docker/api/server/router/image"
import (
"github.com/docker/docker/api/server/router"
"github.com/docker/docker/image"
"github.com/docker/docker/layer"
"github.com/docker/docker/reference"
)
// imageRouter is a router to talk with the image controller
type imageRouter struct {
backend Backend
searcher Searcher
routes []router.Route
backend Backend
referenceBackend reference.Store
imageStore image.Store
layerStore layer.Store
routes []router.Route
}
// NewRouter initializes a new image router
func NewRouter(backend Backend, searcher Searcher) router.Router {
ir := &imageRouter{
backend: backend,
searcher: searcher,
func NewRouter(backend Backend, referenceBackend reference.Store, imageStore image.Store, layerStore layer.Store) router.Router {
r := &imageRouter{
backend: backend,
referenceBackend: referenceBackend,
imageStore: imageStore,
layerStore: layerStore,
}
ir.initRoutes()
return ir
r.initRoutes()
return r
}
// Routes returns the available routes to the image controller
func (ir *imageRouter) Routes() []router.Route {
return ir.routes
func (r *imageRouter) Routes() []router.Route {
return r.routes
}
// initRoutes initializes the routes in the image router
func (ir *imageRouter) initRoutes() {
ir.routes = []router.Route{
func (r *imageRouter) initRoutes() {
r.routes = []router.Route{
// GET
router.NewGetRoute("/images/json", ir.getImagesJSON),
router.NewGetRoute("/images/search", ir.getImagesSearch),
router.NewGetRoute("/images/get", ir.getImagesGet),
router.NewGetRoute("/images/{name:.*}/get", ir.getImagesGet),
router.NewGetRoute("/images/{name:.*}/history", ir.getImagesHistory),
router.NewGetRoute("/images/{name:.*}/json", ir.getImagesByName),
router.NewGetRoute("/images/json", r.getImagesJSON),
router.NewGetRoute("/images/search", r.getImagesSearch),
router.NewGetRoute("/images/get", r.getImagesGet),
router.NewGetRoute("/images/{name:.*}/get", r.getImagesGet),
router.NewGetRoute("/images/{name:.*}/history", r.getImagesHistory),
router.NewGetRoute("/images/{name:.*}/json", r.getImagesByName),
// POST
router.NewPostRoute("/images/load", ir.postImagesLoad),
router.NewPostRoute("/images/create", ir.postImagesCreate),
router.NewPostRoute("/images/{name:.*}/push", ir.postImagesPush),
router.NewPostRoute("/images/{name:.*}/tag", ir.postImagesTag),
router.NewPostRoute("/images/prune", ir.postImagesPrune),
router.NewPostRoute("/images/load", r.postImagesLoad),
router.NewPostRoute("/images/create", r.postImagesCreate),
router.NewPostRoute("/images/{name:.*}/push", r.postImagesPush),
router.NewPostRoute("/images/{name:.*}/tag", r.postImagesTag),
router.NewPostRoute("/images/prune", r.postImagesPrune),
// DELETE
router.NewDeleteRoute("/images/{name:.*}", ir.deleteImages),
router.NewDeleteRoute("/images/{name:.*}", r.deleteImages),
}
}

View File

@@ -2,52 +2,43 @@ package image // import "github.com/docker/docker/api/server/router/image"
import (
"context"
"fmt"
"io"
"encoding/base64"
"encoding/json"
"net/http"
"net/url"
"strconv"
"strings"
"time"
"github.com/containerd/platforms"
"github.com/distribution/reference"
"github.com/docker/docker/api"
"github.com/containerd/containerd/platforms"
"github.com/docker/distribution/reference"
"github.com/docker/docker/api/server/httputils"
"github.com/docker/docker/api/types/backend"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
imagetypes "github.com/docker/docker/api/types/image"
"github.com/docker/docker/api/types/registry"
"github.com/docker/docker/api/types/versions"
"github.com/docker/docker/builder/remotecontext"
"github.com/docker/docker/dockerversion"
"github.com/docker/docker/errdefs"
"github.com/docker/docker/image"
"github.com/docker/docker/layer"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/progress"
"github.com/docker/docker/pkg/streamformatter"
"github.com/docker/go-connections/nat"
dockerspec "github.com/moby/docker-image-spec/specs-go/v1"
"github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
specs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
// Creates an image from Pull or from Import
func (ir *imageRouter) postImagesCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
func (s *imageRouter) postImagesCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
var (
img = r.Form.Get("fromImage")
image = r.Form.Get("fromImage")
repo = r.Form.Get("repo")
tag = r.Form.Get("tag")
comment = r.Form.Get("message")
message = r.Form.Get("message")
progressErr error
output = ioutils.NewWriteFlusher(w)
platform *ocispec.Platform
platform *specs.Platform
)
defer output.Close()
@@ -58,13 +49,13 @@ func (ir *imageRouter) postImagesCreate(ctx context.Context, w http.ResponseWrit
if p := r.FormValue("platform"); p != "" {
sp, err := platforms.Parse(p)
if err != nil {
return errdefs.InvalidParameter(err)
return err
}
platform = &sp
}
}
if img != "" { // pull
if image != "" { // pull
metaHeaders := map[string][]string{}
for k, v := range r.Header {
if strings.HasPrefix(k, "X-Meta-") {
@@ -72,80 +63,20 @@ func (ir *imageRouter) postImagesCreate(ctx context.Context, w http.ResponseWrit
}
}
// Special case: "pull -a" may send an image name with a
// trailing :. This is ugly, but let's not break API
// compatibility.
imgName := strings.TrimSuffix(img, ":")
ref, err := reference.ParseNormalizedNamed(imgName)
if err != nil {
return errdefs.InvalidParameter(err)
}
// TODO(thaJeztah) this could use a WithTagOrDigest() utility
if tag != "" {
// The "tag" could actually be a digest.
var dgst digest.Digest
dgst, err = digest.Parse(tag)
if err == nil {
ref, err = reference.WithDigest(reference.TrimNamed(ref), dgst)
} else {
ref, err = reference.WithTag(ref, tag)
}
if err != nil {
return errdefs.InvalidParameter(err)
authEncoded := r.Header.Get("X-Registry-Auth")
authConfig := &types.AuthConfig{}
if authEncoded != "" {
authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded))
if err := json.NewDecoder(authJSON).Decode(authConfig); err != nil {
// for a pull it is not an error if no auth was given
// to increase compatibility with the existing api it is defaulting to be empty
authConfig = &types.AuthConfig{}
}
}
if err := validateRepoName(ref); err != nil {
return errdefs.Forbidden(err)
}
// For a pull it is not an error if no auth was given. Ignore invalid
// AuthConfig to increase compatibility with the existing API.
authConfig, _ := registry.DecodeAuthConfig(r.Header.Get(registry.AuthHeader))
progressErr = ir.backend.PullImage(ctx, ref, platform, metaHeaders, authConfig, output)
progressErr = s.backend.PullImage(ctx, image, tag, platform, metaHeaders, authConfig, output)
} else { // import
src := r.Form.Get("fromSrc")
tagRef, err := httputils.RepoTagReference(repo, tag)
if err != nil {
return errdefs.InvalidParameter(err)
}
if len(comment) == 0 {
comment = "Imported from " + src
}
var layerReader io.ReadCloser
defer r.Body.Close()
if src == "-" {
layerReader = r.Body
} else {
if len(strings.Split(src, "://")) == 1 {
src = "http://" + src
}
u, err := url.Parse(src)
if err != nil {
return errdefs.InvalidParameter(err)
}
resp, err := remotecontext.GetWithStatusError(u.String())
if err != nil {
return err
}
output.Write(streamformatter.FormatStatus("", "Downloading from %s", u))
progressOutput := streamformatter.NewJSONProgressOutput(output, true)
layerReader = progress.NewProgressReader(resp.Body, progressOutput, resp.ContentLength, "", "Importing")
defer layerReader.Close()
}
var id image.ID
id, progressErr = ir.backend.ImportImage(ctx, tagRef, platform, comment, layerReader, r.Form["changes"])
if progressErr == nil {
_, _ = output.Write(streamformatter.FormatStatus("", "%v", id.String()))
}
progressErr = s.backend.ImportImage(src, repo, platform, tag, message, r.Body, output, r.Form["changes"])
}
if progressErr != nil {
if !output.Flushed() {
@@ -157,7 +88,7 @@ func (ir *imageRouter) postImagesCreate(ctx context.Context, w http.ResponseWrit
return nil
}
func (ir *imageRouter) postImagesPush(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
func (s *imageRouter) postImagesPush(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
metaHeaders := map[string][]string{}
for k, v := range r.Header {
if strings.HasPrefix(k, "X-Meta-") {
@@ -167,64 +98,32 @@ func (ir *imageRouter) postImagesPush(ctx context.Context, w http.ResponseWriter
if err := httputils.ParseForm(r); err != nil {
return err
}
authConfig := &types.AuthConfig{}
var authConfig *registry.AuthConfig
if authEncoded := r.Header.Get(registry.AuthHeader); authEncoded != "" {
// the new format is to handle the authConfig as a header. Ignore invalid
// AuthConfig to increase compatibility with the existing API.
authConfig, _ = registry.DecodeAuthConfig(authEncoded)
authEncoded := r.Header.Get("X-Registry-Auth")
if authEncoded != "" {
// the new format is to handle the authConfig as a header
authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded))
if err := json.NewDecoder(authJSON).Decode(authConfig); err != nil {
// to increase compatibility to existing api it is defaulting to be empty
authConfig = &types.AuthConfig{}
}
} else {
// the old format is supported for compatibility if there was no authConfig header
var err error
authConfig, err = registry.DecodeAuthConfigBody(r.Body)
if err != nil {
return errors.Wrap(err, "bad parameters and missing X-Registry-Auth")
if err := json.NewDecoder(r.Body).Decode(authConfig); err != nil {
return errors.Wrap(errdefs.InvalidParameter(err), "Bad parameters and missing X-Registry-Auth")
}
}
image := vars["name"]
tag := r.Form.Get("tag")
output := ioutils.NewWriteFlusher(w)
defer output.Close()
w.Header().Set("Content-Type", "application/json")
img := vars["name"]
tag := r.Form.Get("tag")
var ref reference.Named
// Tag is empty only in case PushOptions.All is true.
if tag != "" {
r, err := httputils.RepoTagReference(img, tag)
if err != nil {
return errdefs.InvalidParameter(err)
}
ref = r
} else {
r, err := reference.ParseNormalizedNamed(img)
if err != nil {
return errdefs.InvalidParameter(err)
}
ref = r
}
var platform *ocispec.Platform
// Platform is optional, and only supported in API version 1.46 and later.
// However the PushOptions struct previously was an alias for the PullOptions struct
// which also contained a Platform field.
// This means that older clients may be sending a platform field, even
// though it wasn't really supported by the server.
// Don't break these clients and just ignore the platform field on older APIs.
if versions.GreaterThanOrEqualTo(httputils.VersionFromContext(ctx), "1.46") {
if formPlatform := r.Form.Get("platform"); formPlatform != "" {
p, err := httputils.DecodePlatform(formPlatform)
if err != nil {
return err
}
platform = p
}
}
if err := ir.backend.PushImage(ctx, ref, platform, metaHeaders, authConfig, output); err != nil {
if err := s.backend.PushImage(ctx, image, tag, metaHeaders, authConfig, output); err != nil {
if !output.Flushed() {
return err
}
@@ -233,7 +132,7 @@ func (ir *imageRouter) postImagesPush(ctx context.Context, w http.ResponseWriter
return nil
}
func (ir *imageRouter) getImagesGet(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
func (s *imageRouter) getImagesGet(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
@@ -249,22 +148,7 @@ func (ir *imageRouter) getImagesGet(ctx context.Context, w http.ResponseWriter,
names = r.Form["names"]
}
var platform *ocispec.Platform
if versions.GreaterThanOrEqualTo(httputils.VersionFromContext(ctx), "1.48") {
if formPlatforms := r.Form["platform"]; len(formPlatforms) > 1 {
// TODO(thaJeztah): remove once we support multiple platforms: see https://github.com/moby/moby/issues/48759
return errdefs.InvalidParameter(errors.New("multiple platform parameters not supported"))
}
if formPlatform := r.Form.Get("platform"); formPlatform != "" {
p, err := httputils.DecodePlatform(formPlatform)
if err != nil {
return err
}
platform = p
}
}
if err := ir.backend.ExportImage(ctx, names, platform, output); err != nil {
if err := s.backend.ExportImage(names, output); err != nil {
if !output.Flushed() {
return err
}
@@ -273,32 +157,17 @@ func (ir *imageRouter) getImagesGet(ctx context.Context, w http.ResponseWriter,
return nil
}
func (ir *imageRouter) postImagesLoad(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
func (s *imageRouter) postImagesLoad(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
var platform *ocispec.Platform
if versions.GreaterThanOrEqualTo(httputils.VersionFromContext(ctx), "1.48") {
if formPlatforms := r.Form["platform"]; len(formPlatforms) > 1 {
// TODO(thaJeztah): remove once we support multiple platforms: see https://github.com/moby/moby/issues/48759
return errdefs.InvalidParameter(errors.New("multiple platform parameters not supported"))
}
if formPlatform := r.Form.Get("platform"); formPlatform != "" {
p, err := httputils.DecodePlatform(formPlatform)
if err != nil {
return err
}
platform = p
}
}
quiet := httputils.BoolValueOrDefault(r, "quiet", true)
w.Header().Set("Content-Type", "application/json")
output := ioutils.NewWriteFlusher(w)
defer output.Close()
if err := ir.backend.LoadImage(ctx, r.Body, platform, output, quiet); err != nil {
if err := s.backend.LoadImage(r.Body, output, quiet); err != nil {
_, _ = output.Write(streamformatter.FormatError(err))
}
return nil
@@ -312,7 +181,7 @@ func (missingImageError) Error() string {
func (missingImageError) InvalidParameter() {}
func (ir *imageRouter) deleteImages(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
func (s *imageRouter) deleteImages(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
@@ -326,20 +195,7 @@ func (ir *imageRouter) deleteImages(ctx context.Context, w http.ResponseWriter,
force := httputils.BoolValue(r, "force")
prune := !httputils.BoolValue(r, "noprune")
var platforms []ocispec.Platform
if versions.GreaterThanOrEqualTo(httputils.VersionFromContext(ctx), "1.50") {
p, err := httputils.DecodePlatforms(r.Form["platforms"])
if err != nil {
return err
}
platforms = p
}
list, err := ir.backend.ImageDelete(ctx, name, imagetypes.RemoveOptions{
Force: force,
PruneChildren: prune,
Platforms: platforms,
})
list, err := s.backend.ImageDelete(name, force, prune)
if err != nil {
return err
}
@@ -347,78 +203,100 @@ func (ir *imageRouter) deleteImages(ctx context.Context, w http.ResponseWriter,
return httputils.WriteJSON(w, http.StatusOK, list)
}
func (ir *imageRouter) getImagesByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
var manifests bool
if r.Form.Get("manifests") != "" && versions.GreaterThanOrEqualTo(httputils.VersionFromContext(ctx), "1.48") {
manifests = httputils.BoolValue(r, "manifests")
}
var platform *ocispec.Platform
if r.Form.Get("platform") != "" && versions.GreaterThanOrEqualTo(httputils.VersionFromContext(ctx), "1.49") {
p, err := httputils.DecodePlatform(r.Form.Get("platform"))
if err != nil {
return errdefs.InvalidParameter(err)
}
platform = p
}
if manifests && platform != nil {
return errdefs.InvalidParameter(errors.New("conflicting options: manifests and platform options cannot both be set"))
}
imageInspect, err := ir.backend.ImageInspect(ctx, vars["name"], backend.ImageInspectOpts{
Manifests: manifests,
Platform: platform,
})
func (s *imageRouter) getImagesByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
image, err := s.backend.GetImage(vars["name"], nil)
if err != nil {
return err
}
// Make sure we output empty arrays instead of nil. While Go nil slice is functionally equivalent to an empty slice,
// it matters for the JSON representation.
if imageInspect.RepoTags == nil {
imageInspect.RepoTags = []string{}
}
if imageInspect.RepoDigests == nil {
imageInspect.RepoDigests = []string{}
}
version := httputils.VersionFromContext(ctx)
if versions.LessThan(version, "1.44") {
imageInspect.VirtualSize = imageInspect.Size //nolint:staticcheck // ignore SA1019: field is deprecated, but still set on API < v1.44.
if imageInspect.Created == "" {
// backwards compatibility for Created not existing returning "0001-01-01T00:00:00Z"
// https://github.com/moby/moby/issues/47368
imageInspect.Created = time.Time{}.Format(time.RFC3339Nano)
}
}
if versions.GreaterThanOrEqualTo(version, "1.45") {
imageInspect.Container = "" //nolint:staticcheck // ignore SA1019: field is deprecated, but still set on API < v1.45.
imageInspect.ContainerConfig = nil //nolint:staticcheck // ignore SA1019: field is deprecated, but still set on API < v1.45.
}
if versions.LessThan(version, "1.48") {
imageInspect.Descriptor = nil
}
if versions.LessThan(version, "1.50") {
type imageInspectLegacy struct {
imagetypes.InspectResponse
LegacyConfig *container.Config `json:"Config"`
}
return httputils.WriteJSON(w, http.StatusOK, imageInspectLegacy{
InspectResponse: *imageInspect,
LegacyConfig: dockerOCIImageConfigToContainerConfig(*imageInspect.Config),
})
imageInspect, err := s.toImageInspect(image)
if err != nil {
return err
}
return httputils.WriteJSON(w, http.StatusOK, imageInspect)
}
func (ir *imageRouter) getImagesJSON(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
func (s *imageRouter) toImageInspect(img *image.Image) (*types.ImageInspect, error) {
refs := s.referenceBackend.References(img.ID().Digest())
repoTags := []string{}
repoDigests := []string{}
for _, ref := range refs {
switch ref.(type) {
case reference.NamedTagged:
repoTags = append(repoTags, reference.FamiliarString(ref))
case reference.Canonical:
repoDigests = append(repoDigests, reference.FamiliarString(ref))
}
}
var size int64
var layerMetadata map[string]string
layerID := img.RootFS.ChainID()
if layerID != "" {
l, err := s.layerStore.Get(layerID)
if err != nil {
return nil, err
}
defer layer.ReleaseAndLog(s.layerStore, l)
size = l.Size()
layerMetadata, err = l.Metadata()
if err != nil {
return nil, err
}
}
comment := img.Comment
if len(comment) == 0 && len(img.History) > 0 {
comment = img.History[len(img.History)-1].Comment
}
lastUpdated, err := s.imageStore.GetLastUpdated(img.ID())
if err != nil {
return nil, err
}
return &types.ImageInspect{
ID: img.ID().String(),
RepoTags: repoTags,
RepoDigests: repoDigests,
Parent: img.Parent.String(),
Comment: comment,
Created: img.Created.Format(time.RFC3339Nano),
Container: img.Container,
ContainerConfig: &img.ContainerConfig,
DockerVersion: img.DockerVersion,
Author: img.Author,
Config: img.Config,
Architecture: img.Architecture,
Variant: img.Variant,
Os: img.OperatingSystem(),
OsVersion: img.OSVersion,
Size: size,
VirtualSize: size, // TODO: field unused, deprecate
GraphDriver: types.GraphDriverData{
Name: s.layerStore.DriverName(),
Data: layerMetadata,
},
RootFS: rootFSToAPIType(img.RootFS),
Metadata: types.ImageMetadata{
LastTagTime: lastUpdated,
},
}, nil
}
func rootFSToAPIType(rootfs *image.RootFS) types.RootFS {
var layers []string
for _, l := range rootfs.DiffIDs {
layers = append(layers, l.String())
}
return types.RootFS{
Type: rootfs.Type,
Layers: layers,
}
}
func (s *imageRouter) getImagesJSON(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
@@ -443,65 +321,21 @@ func (ir *imageRouter) getImagesJSON(ctx context.Context, w http.ResponseWriter,
sharedSize = httputils.BoolValue(r, "shared-size")
}
var manifests bool
if versions.GreaterThanOrEqualTo(version, "1.47") {
manifests = httputils.BoolValue(r, "manifests")
}
images, err := ir.backend.Images(ctx, imagetypes.ListOptions{
images, err := s.backend.Images(ctx, types.ImageListOptions{
All: httputils.BoolValue(r, "all"),
Filters: imageFilters,
SharedSize: sharedSize,
Manifests: manifests,
})
if err != nil {
return err
}
useNone := versions.LessThan(version, "1.43")
withVirtualSize := versions.LessThan(version, "1.44")
noDescriptor := versions.LessThan(version, "1.48")
for _, img := range images {
if useNone {
if len(img.RepoTags) == 0 && len(img.RepoDigests) == 0 {
img.RepoTags = append(img.RepoTags, "<none>:<none>")
img.RepoDigests = append(img.RepoDigests, "<none>@<none>")
}
} else {
if img.RepoTags == nil {
img.RepoTags = []string{}
}
if img.RepoDigests == nil {
img.RepoDigests = []string{}
}
}
if withVirtualSize {
img.VirtualSize = img.Size //nolint:staticcheck // ignore SA1019: field is deprecated, but still set on API < v1.44.
}
if noDescriptor {
img.Descriptor = nil
}
}
return httputils.WriteJSON(w, http.StatusOK, images)
}
func (ir *imageRouter) getImagesHistory(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
var platform *ocispec.Platform
if versions.GreaterThanOrEqualTo(httputils.VersionFromContext(ctx), "1.48") {
if formPlatform := r.Form.Get("platform"); formPlatform != "" {
p, err := httputils.DecodePlatform(formPlatform)
if err != nil {
return err
}
platform = p
}
}
history, err := ir.backend.ImageHistory(ctx, vars["name"], platform)
func (s *imageRouter) getImagesHistory(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
name := vars["name"]
history, err := s.backend.ImageHistory(name)
if err != nil {
return err
}
@@ -509,37 +343,40 @@ func (ir *imageRouter) getImagesHistory(ctx context.Context, w http.ResponseWrit
return httputils.WriteJSON(w, http.StatusOK, history)
}
func (ir *imageRouter) postImagesTag(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
func (s *imageRouter) postImagesTag(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
ref, err := httputils.RepoTagReference(r.Form.Get("repo"), r.Form.Get("tag"))
if ref == nil || err != nil {
return errdefs.InvalidParameter(err)
}
refName := reference.FamiliarName(ref)
if refName == string(digest.Canonical) {
return errdefs.InvalidParameter(errors.New("refusing to create an ambiguous tag using digest algorithm as name"))
}
img, err := ir.backend.GetImage(ctx, vars["name"], backend.GetImageOpts{})
if err != nil {
return errdefs.NotFound(err)
}
if err := ir.backend.TagImage(ctx, img.ID(), ref); err != nil {
if _, err := s.backend.TagImage(vars["name"], r.Form.Get("repo"), r.Form.Get("tag")); err != nil {
return err
}
w.WriteHeader(http.StatusCreated)
return nil
}
func (ir *imageRouter) getImagesSearch(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
func (s *imageRouter) getImagesSearch(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
var (
config *types.AuthConfig
authEncoded = r.Header.Get("X-Registry-Auth")
headers = map[string][]string{}
)
if authEncoded != "" {
authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded))
if err := json.NewDecoder(authJSON).Decode(&config); err != nil {
// for a search it is not an error if no auth was given
// to increase compatibility with the existing api it is defaulting to be empty
config = &types.AuthConfig{}
}
}
for k, v := range r.Header {
if strings.HasPrefix(k, "X-Meta-") {
headers[k] = v
}
}
var limit int
if r.Form.Get("limit") != "" {
@@ -554,26 +391,14 @@ func (ir *imageRouter) getImagesSearch(ctx context.Context, w http.ResponseWrite
return err
}
// For a search it is not an error if no auth was given. Ignore invalid
// AuthConfig to increase compatibility with the existing API.
authConfig, _ := registry.DecodeAuthConfig(r.Header.Get(registry.AuthHeader))
headers := http.Header{}
for k, v := range r.Header {
k = http.CanonicalHeaderKey(k)
if strings.HasPrefix(k, "X-Meta-") {
headers[k] = v
}
}
headers.Set("User-Agent", dockerversion.DockerUserAgent(ctx))
res, err := ir.searcher.Search(ctx, searchFilters, r.Form.Get("term"), limit, authConfig, headers)
query, err := s.backend.SearchRegistryForImages(ctx, searchFilters, r.Form.Get("term"), limit, config, headers)
if err != nil {
return err
}
return httputils.WriteJSON(w, http.StatusOK, res)
return httputils.WriteJSON(w, http.StatusOK, query.Results)
}
func (ir *imageRouter) postImagesPrune(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
func (s *imageRouter) postImagesPrune(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
@@ -583,42 +408,9 @@ func (ir *imageRouter) postImagesPrune(ctx context.Context, w http.ResponseWrite
return err
}
pruneReport, err := ir.backend.ImagesPrune(ctx, pruneFilters)
pruneReport, err := s.backend.ImagesPrune(ctx, pruneFilters)
if err != nil {
return err
}
return httputils.WriteJSON(w, http.StatusOK, pruneReport)
}
// validateRepoName validates the name of a repository.
func validateRepoName(name reference.Named) error {
familiarName := reference.FamiliarName(name)
if familiarName == api.NoBaseImageSpecifier {
return fmt.Errorf("'%s' is a reserved name", familiarName)
}
return nil
}
// FIXME(thaJeztah): this is a copy of dockerOCIImageConfigToContainerConfig in daemon/containerd: https://github.com/moby/moby/blob/6b617699c500522aa6526cfcae4558333911b11f/daemon/containerd/imagespec.go#L107-L128
func dockerOCIImageConfigToContainerConfig(cfg dockerspec.DockerOCIImageConfig) *container.Config {
exposedPorts := make(nat.PortSet, len(cfg.ExposedPorts))
for k, v := range cfg.ExposedPorts {
exposedPorts[nat.Port(k)] = v
}
return &container.Config{
Entrypoint: cfg.Entrypoint,
Env: cfg.Env,
Cmd: cfg.Cmd,
User: cfg.User,
WorkingDir: cfg.WorkingDir,
ExposedPorts: exposedPorts,
Volumes: cfg.Volumes,
Labels: cfg.Labels,
ArgsEscaped: cfg.ArgsEscaped, //nolint:staticcheck // Ignore SA1019. Need to keep it in image.
StopSignal: cfg.StopSignal,
Healthcheck: cfg.Healthcheck,
OnBuild: cfg.OnBuild,
Shell: cfg.Shell,
}
}

View File

@@ -3,28 +3,30 @@ package network // import "github.com/docker/docker/api/server/router/network"
import (
"context"
"github.com/docker/docker/api/types/backend"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/network"
"github.com/docker/docker/libnetwork"
)
// Backend is all the methods that need to be implemented
// to provide network specific functionality.
type Backend interface {
GetNetworks(filters.Args, backend.NetworkListConfig) ([]network.Inspect, error)
CreateNetwork(ctx context.Context, nc network.CreateRequest) (*network.CreateResponse, error)
ConnectContainerToNetwork(ctx context.Context, containerName, networkName string, endpointConfig *network.EndpointSettings) error
FindNetwork(idName string) (libnetwork.Network, error)
GetNetworks(filters.Args, types.NetworkListConfig) ([]types.NetworkResource, error)
CreateNetwork(nc types.NetworkCreateRequest) (*types.NetworkCreateResponse, error)
ConnectContainerToNetwork(containerName, networkName string, endpointConfig *network.EndpointSettings) error
DisconnectContainerFromNetwork(containerName string, networkName string, force bool) error
DeleteNetwork(networkID string) error
NetworksPrune(ctx context.Context, pruneFilters filters.Args) (*network.PruneReport, error)
NetworksPrune(ctx context.Context, pruneFilters filters.Args) (*types.NetworksPruneReport, error)
}
// ClusterBackend is all the methods that need to be implemented
// to provide cluster network specific functionality.
type ClusterBackend interface {
GetNetworks(filters.Args) ([]network.Inspect, error)
GetNetwork(name string) (network.Inspect, error)
GetNetworksByName(name string) ([]network.Inspect, error)
CreateNetwork(nc network.CreateRequest) (string, error)
GetNetworks(filters.Args) ([]types.NetworkResource, error)
GetNetwork(name string) (types.NetworkResource, error)
GetNetworksByName(name string) ([]types.NetworkResource, error)
CreateNetwork(nc types.NetworkCreateRequest) (string, error)
RemoveNetwork(name string) error
}

View File

@@ -0,0 +1 @@
package network // import "github.com/docker/docker/api/server/router/network"

View File

@@ -22,22 +22,22 @@ func NewRouter(b Backend, c ClusterBackend) router.Router {
}
// Routes returns the available routes to the network controller
func (n *networkRouter) Routes() []router.Route {
return n.routes
func (r *networkRouter) Routes() []router.Route {
return r.routes
}
func (n *networkRouter) initRoutes() {
n.routes = []router.Route{
func (r *networkRouter) initRoutes() {
r.routes = []router.Route{
// GET
router.NewGetRoute("/networks", n.getNetworksList),
router.NewGetRoute("/networks/", n.getNetworksList),
router.NewGetRoute("/networks/{id:.+}", n.getNetwork),
router.NewGetRoute("/networks", r.getNetworksList),
router.NewGetRoute("/networks/", r.getNetworksList),
router.NewGetRoute("/networks/{id:.+}", r.getNetwork),
// POST
router.NewPostRoute("/networks/create", n.postNetworkCreate),
router.NewPostRoute("/networks/{id:.*}/connect", n.postNetworkConnect),
router.NewPostRoute("/networks/{id:.*}/disconnect", n.postNetworkDisconnect),
router.NewPostRoute("/networks/prune", n.postNetworksPrune),
router.NewPostRoute("/networks/create", r.postNetworkCreate),
router.NewPostRoute("/networks/{id:.*}/connect", r.postNetworkConnect),
router.NewPostRoute("/networks/{id:.*}/disconnect", r.postNetworkDisconnect),
router.NewPostRoute("/networks/prune", r.postNetworksPrune),
// DELETE
router.NewDeleteRoute("/networks/{id:.*}", n.deleteNetwork),
router.NewDeleteRoute("/networks/{id:.*}", r.deleteNetwork),
}
}

View File

@@ -7,13 +7,13 @@ import (
"strings"
"github.com/docker/docker/api/server/httputils"
"github.com/docker/docker/api/types/backend"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/network"
"github.com/docker/docker/api/types/versions"
"github.com/docker/docker/errdefs"
"github.com/docker/docker/libnetwork"
"github.com/docker/docker/libnetwork/scope"
netconst "github.com/docker/docker/libnetwork/datastore"
"github.com/pkg/errors"
)
@@ -31,7 +31,7 @@ func (n *networkRouter) getNetworksList(ctx context.Context, w http.ResponseWrit
return err
}
var list []network.Summary
var list []types.NetworkResource
nr, err := n.cluster.GetNetworks(filter)
if err == nil {
list = nr
@@ -39,7 +39,7 @@ func (n *networkRouter) getNetworksList(ctx context.Context, w http.ResponseWrit
// Combine the network list returned by Docker daemon if it is not already
// returned by the cluster manager
localNetworks, err := n.backend.GetNetworks(filter, backend.NetworkListConfig{Detailed: versions.LessThan(httputils.VersionFromContext(ctx), "1.28")})
localNetworks, err := n.backend.GetNetworks(filter, types.NetworkListConfig{Detailed: versions.LessThan(httputils.VersionFromContext(ctx), "1.28")})
if err != nil {
return err
}
@@ -59,7 +59,7 @@ func (n *networkRouter) getNetworksList(ctx context.Context, w http.ResponseWrit
}
if list == nil {
list = []network.Summary{}
list = []types.NetworkResource{}
}
return httputils.WriteJSON(w, http.StatusOK, list)
@@ -75,13 +75,17 @@ func (e invalidRequestError) Error() string {
func (e invalidRequestError) InvalidParameter() {}
type ambiguousResultsError string
type ambigousResultsError string
func (e ambiguousResultsError) Error() string {
func (e ambigousResultsError) Error() string {
return "network " + string(e) + " is ambiguous"
}
func (ambiguousResultsError) InvalidParameter() {}
func (ambigousResultsError) InvalidParameter() {}
func nameConflict(name string) error {
return errdefs.Conflict(libnetwork.NetworkNameError(name))
}
func (n *networkRouter) getNetwork(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
@@ -98,7 +102,7 @@ func (n *networkRouter) getNetwork(ctx context.Context, w http.ResponseWriter, r
return errors.Wrapf(invalidRequestError{err}, "invalid value for verbose: %s", v)
}
}
networkScope := r.URL.Query().Get("scope")
scope := r.URL.Query().Get("scope")
// In case multiple networks have duplicate names, return error.
// TODO (yongtang): should we wrap with version here for backward compatibility?
@@ -108,29 +112,29 @@ func (n *networkRouter) getNetwork(ctx context.Context, w http.ResponseWriter, r
// For full name and partial ID, save the result first, and process later
// in case multiple records was found based on the same term
listByFullName := map[string]network.Inspect{}
listByPartialID := map[string]network.Inspect{}
listByFullName := map[string]types.NetworkResource{}
listByPartialID := map[string]types.NetworkResource{}
// TODO(@cpuguy83): All this logic for figuring out which network to return does not belong here
// Instead there should be a backend function to just get one network.
filter := filters.NewArgs(filters.Arg("idOrName", term))
if networkScope != "" {
filter.Add("scope", networkScope)
if scope != "" {
filter.Add("scope", scope)
}
networks, _ := n.backend.GetNetworks(filter, backend.NetworkListConfig{Detailed: true, Verbose: verbose})
for _, nw := range networks {
if nw.ID == term {
return httputils.WriteJSON(w, http.StatusOK, nw)
nw, _ := n.backend.GetNetworks(filter, types.NetworkListConfig{Detailed: true, Verbose: verbose})
for _, network := range nw {
if network.ID == term {
return httputils.WriteJSON(w, http.StatusOK, network)
}
if nw.Name == term {
if network.Name == term {
// No need to check the ID collision here as we are still in
// local scope and the network ID is unique in this scope.
listByFullName[nw.ID] = nw
listByFullName[network.ID] = network
}
if strings.HasPrefix(nw.ID, term) {
if strings.HasPrefix(network.ID, term) {
// No need to check the ID collision here as we are still in
// local scope and the network ID is unique in this scope.
listByPartialID[nw.ID] = nw
listByPartialID[network.ID] = network
}
}
@@ -140,37 +144,37 @@ func (n *networkRouter) getNetwork(ctx context.Context, w http.ResponseWriter, r
// or if the get network was passed with a network name and scope as swarm
// return the network. Skipped using isMatchingScope because it is true if the scope
// is not set which would be case if the client API v1.30
if strings.HasPrefix(nwk.ID, term) || networkScope == scope.Swarm {
if strings.HasPrefix(nwk.ID, term) || (netconst.SwarmScope == scope) {
// If we have a previous match "backend", return it, we need verbose when enabled
// ex: overlay/partial_ID or name/swarm_scope
if nwv, ok := listByPartialID[nwk.ID]; ok {
nwk = nwv
} else if nwv, ok = listByFullName[nwk.ID]; ok {
} else if nwv, ok := listByFullName[nwk.ID]; ok {
nwk = nwv
}
return httputils.WriteJSON(w, http.StatusOK, nwk)
}
}
networks, _ = n.cluster.GetNetworks(filter)
for _, nw := range networks {
if nw.ID == term {
return httputils.WriteJSON(w, http.StatusOK, nw)
nr, _ := n.cluster.GetNetworks(filter)
for _, network := range nr {
if network.ID == term {
return httputils.WriteJSON(w, http.StatusOK, network)
}
if nw.Name == term {
if network.Name == term {
// Check the ID collision as we are in swarm scope here, and
// the map (of the listByFullName) may have already had a
// network with the same ID (from local scope previously)
if _, ok := listByFullName[nw.ID]; !ok {
listByFullName[nw.ID] = nw
if _, ok := listByFullName[network.ID]; !ok {
listByFullName[network.ID] = network
}
}
if strings.HasPrefix(nw.ID, term) {
if strings.HasPrefix(network.ID, term) {
// Check the ID collision as we are in swarm scope here, and
// the map (of the listByPartialID) may have already had a
// network with the same ID (from local scope previously)
if _, ok := listByPartialID[nw.ID]; !ok {
listByPartialID[nw.ID] = nw
if _, ok := listByPartialID[network.ID]; !ok {
listByPartialID[network.ID] = network
}
}
}
@@ -182,7 +186,7 @@ func (n *networkRouter) getNetwork(ctx context.Context, w http.ResponseWriter, r
}
}
if len(listByFullName) > 1 {
return errors.Wrapf(ambiguousResultsError(term), "%d matches found based on name", len(listByFullName))
return errors.Wrapf(ambigousResultsError(term), "%d matches found based on name", len(listByFullName))
}
// Find based on partial ID, returns true only if no duplicates
@@ -192,7 +196,7 @@ func (n *networkRouter) getNetwork(ctx context.Context, w http.ResponseWriter, r
}
}
if len(listByPartialID) > 1 {
return errors.Wrapf(ambiguousResultsError(term), "%d matches found based on ID prefix", len(listByPartialID))
return errors.Wrapf(ambigousResultsError(term), "%d matches found based on ID prefix", len(listByPartialID))
}
return libnetwork.ErrNoSuchNetwork(term)
@@ -203,28 +207,27 @@ func (n *networkRouter) postNetworkCreate(ctx context.Context, w http.ResponseWr
return err
}
var create network.CreateRequest
var create types.NetworkCreateRequest
if err := httputils.ReadJSON(r, &create); err != nil {
return err
}
if nws, err := n.cluster.GetNetworksByName(create.Name); err == nil && len(nws) > 0 {
return libnetwork.NetworkNameError(create.Name)
return nameConflict(create.Name)
}
version := httputils.VersionFromContext(ctx)
// EnableIPv4 was introduced in API 1.48.
if versions.LessThan(version, "1.48") {
create.EnableIPv4 = nil
}
// For a Swarm-scoped network, this call to backend.CreateNetwork is used to
// validate the configuration. The network will not be created but, if the
// configuration is valid, ManagerRedirectError will be returned and handled
// below.
nw, err := n.backend.CreateNetwork(ctx, create)
nw, err := n.backend.CreateNetwork(create)
if err != nil {
var warning string
if _, ok := err.(libnetwork.NetworkNameError); ok {
// check if user defined CheckDuplicate, if set true, return err
// otherwise prepare a warning message
if create.CheckDuplicate {
return nameConflict(create.Name)
}
warning = libnetwork.NetworkNameError(create.Name).Error()
}
if _, ok := err.(libnetwork.ManagerRedirectError); !ok {
return err
}
@@ -232,8 +235,9 @@ func (n *networkRouter) postNetworkCreate(ctx context.Context, w http.ResponseWr
if err != nil {
return err
}
nw = &network.CreateResponse{
ID: id,
nw = &types.NetworkCreateResponse{
ID: id,
Warning: warning,
}
}
@@ -245,7 +249,7 @@ func (n *networkRouter) postNetworkConnect(ctx context.Context, w http.ResponseW
return err
}
var connect network.ConnectOptions
var connect types.NetworkConnect
if err := httputils.ReadJSON(r, &connect); err != nil {
return err
}
@@ -254,7 +258,7 @@ func (n *networkRouter) postNetworkConnect(ctx context.Context, w http.ResponseW
// The reason is that, In case of attachable network in swarm scope, the actual local network
// may not be available at the time. At the same time, inside daemon `ConnectContainerToNetwork`
// does the ambiguity check anyway. Therefore, passing the name to daemon would be enough.
return n.backend.ConnectContainerToNetwork(ctx, connect.Container, vars["id"], connect.EndpointConfig)
return n.backend.ConnectContainerToNetwork(connect.Container, vars["id"], connect.EndpointConfig)
}
func (n *networkRouter) postNetworkDisconnect(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
@@ -262,7 +266,7 @@ func (n *networkRouter) postNetworkDisconnect(ctx context.Context, w http.Respon
return err
}
var disconnect network.DisconnectOptions
var disconnect types.NetworkDisconnect
if err := httputils.ReadJSON(r, &disconnect); err != nil {
return err
}
@@ -317,47 +321,47 @@ func (n *networkRouter) postNetworksPrune(ctx context.Context, w http.ResponseWr
// For full name and partial ID, save the result first, and process later
// in case multiple records was found based on the same term
// TODO (yongtang): should we wrap with version here for backward compatibility?
func (n *networkRouter) findUniqueNetwork(term string) (network.Inspect, error) {
listByFullName := map[string]network.Inspect{}
listByPartialID := map[string]network.Inspect{}
func (n *networkRouter) findUniqueNetwork(term string) (types.NetworkResource, error) {
listByFullName := map[string]types.NetworkResource{}
listByPartialID := map[string]types.NetworkResource{}
filter := filters.NewArgs(filters.Arg("idOrName", term))
networks, _ := n.backend.GetNetworks(filter, backend.NetworkListConfig{Detailed: true})
for _, nw := range networks {
if nw.ID == term {
return nw, nil
nw, _ := n.backend.GetNetworks(filter, types.NetworkListConfig{Detailed: true})
for _, network := range nw {
if network.ID == term {
return network, nil
}
if nw.Name == term && !nw.Ingress {
if network.Name == term && !network.Ingress {
// No need to check the ID collision here as we are still in
// local scope and the network ID is unique in this scope.
listByFullName[nw.ID] = nw
listByFullName[network.ID] = network
}
if strings.HasPrefix(nw.ID, term) {
if strings.HasPrefix(network.ID, term) {
// No need to check the ID collision here as we are still in
// local scope and the network ID is unique in this scope.
listByPartialID[nw.ID] = nw
listByPartialID[network.ID] = network
}
}
networks, _ = n.cluster.GetNetworks(filter)
for _, nw := range networks {
if nw.ID == term {
return nw, nil
nr, _ := n.cluster.GetNetworks(filter)
for _, network := range nr {
if network.ID == term {
return network, nil
}
if nw.Name == term {
if network.Name == term {
// Check the ID collision as we are in swarm scope here, and
// the map (of the listByFullName) may have already had a
// network with the same ID (from local scope previously)
if _, ok := listByFullName[nw.ID]; !ok {
listByFullName[nw.ID] = nw
if _, ok := listByFullName[network.ID]; !ok {
listByFullName[network.ID] = network
}
}
if strings.HasPrefix(nw.ID, term) {
if strings.HasPrefix(network.ID, term) {
// Check the ID collision as we are in swarm scope here, and
// the map (of the listByPartialID) may have already had a
// network with the same ID (from local scope previously)
if _, ok := listByPartialID[nw.ID]; !ok {
listByPartialID[nw.ID] = nw
if _, ok := listByPartialID[network.ID]; !ok {
listByPartialID[network.ID] = network
}
}
}
@@ -369,7 +373,7 @@ func (n *networkRouter) findUniqueNetwork(term string) (network.Inspect, error)
}
}
if len(listByFullName) > 1 {
return network.Inspect{}, errdefs.InvalidParameter(errors.Errorf("network %s is ambiguous (%d matches found based on name)", term, len(listByFullName)))
return types.NetworkResource{}, errdefs.InvalidParameter(errors.Errorf("network %s is ambiguous (%d matches found based on name)", term, len(listByFullName)))
}
// Find based on partial ID, returns true only if no duplicates
@@ -379,8 +383,8 @@ func (n *networkRouter) findUniqueNetwork(term string) (network.Inspect, error)
}
}
if len(listByPartialID) > 1 {
return network.Inspect{}, errdefs.InvalidParameter(errors.Errorf("network %s is ambiguous (%d matches found based on ID prefix)", term, len(listByPartialID)))
return types.NetworkResource{}, errdefs.InvalidParameter(errors.Errorf("network %s is ambiguous (%d matches found based on ID prefix)", term, len(listByPartialID)))
}
return network.Inspect{}, errdefs.NotFound(libnetwork.ErrNoSuchNetwork(term))
return types.NetworkResource{}, errdefs.NotFound(libnetwork.ErrNoSuchNetwork(term))
}

View File

@@ -5,25 +5,23 @@ import (
"io"
"net/http"
"github.com/distribution/reference"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/backend"
"github.com/docker/distribution/reference"
enginetypes "github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/registry"
"github.com/docker/docker/plugin"
)
// Backend for Plugin
type Backend interface {
Disable(name string, config *backend.PluginDisableConfig) error
Enable(name string, config *backend.PluginEnableConfig) error
List(filters.Args) ([]types.Plugin, error)
Inspect(name string) (*types.Plugin, error)
Remove(name string, config *backend.PluginRmConfig) error
Disable(name string, config *enginetypes.PluginDisableConfig) error
Enable(name string, config *enginetypes.PluginEnableConfig) error
List(filters.Args) ([]enginetypes.Plugin, error)
Inspect(name string) (*enginetypes.Plugin, error)
Remove(name string, config *enginetypes.PluginRmConfig) error
Set(name string, args []string) error
Privileges(ctx context.Context, ref reference.Named, metaHeaders http.Header, authConfig *registry.AuthConfig) (types.PluginPrivileges, error)
Pull(ctx context.Context, ref reference.Named, name string, metaHeaders http.Header, authConfig *registry.AuthConfig, privileges types.PluginPrivileges, outStream io.Writer, opts ...plugin.CreateOpt) error
Push(ctx context.Context, name string, metaHeaders http.Header, authConfig *registry.AuthConfig, outStream io.Writer) error
Upgrade(ctx context.Context, ref reference.Named, name string, metaHeaders http.Header, authConfig *registry.AuthConfig, privileges types.PluginPrivileges, outStream io.Writer) error
CreateFromContext(ctx context.Context, tarCtx io.ReadCloser, options *types.PluginCreateOptions) error
Privileges(ctx context.Context, ref reference.Named, metaHeaders http.Header, authConfig *enginetypes.AuthConfig) (enginetypes.PluginPrivileges, error)
Pull(ctx context.Context, ref reference.Named, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, privileges enginetypes.PluginPrivileges, outStream io.Writer, opts ...plugin.CreateOpt) error
Push(ctx context.Context, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, outStream io.Writer) error
Upgrade(ctx context.Context, ref reference.Named, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, privileges enginetypes.PluginPrivileges, outStream io.Writer) error
CreateFromContext(ctx context.Context, tarCtx io.ReadCloser, options *enginetypes.PluginCreateOptions) error
}

View File

@@ -18,22 +18,22 @@ func NewRouter(b Backend) router.Router {
}
// Routes returns the available routers to the plugin controller
func (pr *pluginRouter) Routes() []router.Route {
return pr.routes
func (r *pluginRouter) Routes() []router.Route {
return r.routes
}
func (pr *pluginRouter) initRoutes() {
pr.routes = []router.Route{
router.NewGetRoute("/plugins", pr.listPlugins),
router.NewGetRoute("/plugins/{name:.*}/json", pr.inspectPlugin),
router.NewGetRoute("/plugins/privileges", pr.getPrivileges),
router.NewDeleteRoute("/plugins/{name:.*}", pr.removePlugin),
router.NewPostRoute("/plugins/{name:.*}/enable", pr.enablePlugin),
router.NewPostRoute("/plugins/{name:.*}/disable", pr.disablePlugin),
router.NewPostRoute("/plugins/pull", pr.pullPlugin),
router.NewPostRoute("/plugins/{name:.*}/push", pr.pushPlugin),
router.NewPostRoute("/plugins/{name:.*}/upgrade", pr.upgradePlugin),
router.NewPostRoute("/plugins/{name:.*}/set", pr.setPlugin),
router.NewPostRoute("/plugins/create", pr.createPlugin),
func (r *pluginRouter) initRoutes() {
r.routes = []router.Route{
router.NewGetRoute("/plugins", r.listPlugins),
router.NewGetRoute("/plugins/{name:.*}/json", r.inspectPlugin),
router.NewGetRoute("/plugins/privileges", r.getPrivileges),
router.NewDeleteRoute("/plugins/{name:.*}", r.removePlugin),
router.NewPostRoute("/plugins/{name:.*}/enable", r.enablePlugin),
router.NewPostRoute("/plugins/{name:.*}/disable", r.disablePlugin),
router.NewPostRoute("/plugins/pull", r.pullPlugin),
router.NewPostRoute("/plugins/{name:.*}/push", r.pushPlugin),
router.NewPostRoute("/plugins/{name:.*}/upgrade", r.upgradePlugin),
router.NewPostRoute("/plugins/{name:.*}/set", r.setPlugin),
router.NewPostRoute("/plugins/create", r.createPlugin),
}
}

View File

@@ -2,22 +2,23 @@ package plugin // import "github.com/docker/docker/api/server/router/plugin"
import (
"context"
"encoding/base64"
"encoding/json"
"net/http"
"strconv"
"strings"
"github.com/distribution/reference"
"github.com/docker/distribution/reference"
"github.com/docker/docker/api/server/httputils"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/backend"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/registry"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/streamformatter"
"github.com/pkg/errors"
)
func parseHeaders(headers http.Header) (map[string][]string, *registry.AuthConfig) {
func parseHeaders(headers http.Header) (map[string][]string, *types.AuthConfig) {
metaHeaders := map[string][]string{}
for k, v := range headers {
if strings.HasPrefix(k, "X-Meta-") {
@@ -25,8 +26,16 @@ func parseHeaders(headers http.Header) (map[string][]string, *registry.AuthConfi
}
}
// Ignore invalid AuthConfig to increase compatibility with the existing API.
authConfig, _ := registry.DecodeAuthConfig(headers.Get(registry.AuthHeader))
// Get X-Registry-Auth
authEncoded := headers.Get("X-Registry-Auth")
authConfig := &types.AuthConfig{}
if authEncoded != "" {
authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded))
if err := json.NewDecoder(authJSON).Decode(authConfig); err != nil {
authConfig = &types.AuthConfig{}
}
}
return metaHeaders, authConfig
}
@@ -187,8 +196,7 @@ func (pr *pluginRouter) createPlugin(ctx context.Context, w http.ResponseWriter,
}
options := &types.PluginCreateOptions{
RepoName: r.FormValue("name"),
}
RepoName: r.FormValue("name")}
if err := pr.backend.CreateFromContext(ctx, r.Body, options); err != nil {
return err
@@ -208,7 +216,7 @@ func (pr *pluginRouter) enablePlugin(ctx context.Context, w http.ResponseWriter,
if err != nil {
return err
}
config := &backend.PluginEnableConfig{Timeout: timeout}
config := &types.PluginEnableConfig{Timeout: timeout}
return pr.backend.Enable(name, config)
}
@@ -219,7 +227,7 @@ func (pr *pluginRouter) disablePlugin(ctx context.Context, w http.ResponseWriter
}
name := vars["name"]
config := &backend.PluginDisableConfig{
config := &types.PluginDisableConfig{
ForceDisable: httputils.BoolValue(r, "force"),
}
@@ -232,7 +240,7 @@ func (pr *pluginRouter) removePlugin(ctx context.Context, w http.ResponseWriter,
}
name := vars["name"]
config := &backend.PluginRmConfig{
config := &types.PluginRmConfig{
ForceRemove: httputils.BoolValue(r, "force"),
}
return pr.backend.Remove(name, config)

View File

@@ -18,12 +18,12 @@ func NewRouter(b Backend) router.Router {
}
// Routes returns the available routers to the session controller
func (sr *sessionRouter) Routes() []router.Route {
return sr.routes
func (r *sessionRouter) Routes() []router.Route {
return r.routes
}
func (sr *sessionRouter) initRoutes() {
sr.routes = []router.Route{
router.NewPostRoute("/session", sr.startSession),
func (r *sessionRouter) initRoutes() {
r.routes = []router.Route{
router.NewPostRoute("/session", r.startSession),
}
}

View File

@@ -3,40 +3,46 @@ package swarm // import "github.com/docker/docker/api/server/router/swarm"
import (
"context"
basictypes "github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/backend"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/swarm"
types "github.com/docker/docker/api/types/swarm"
)
// Backend abstracts a swarm manager.
type Backend interface {
Init(req swarm.InitRequest) (string, error)
Join(req swarm.JoinRequest) error
Leave(ctx context.Context, force bool) error
Inspect() (swarm.Swarm, error)
Update(uint64, swarm.Spec, swarm.UpdateFlags) error
Init(req types.InitRequest) (string, error)
Join(req types.JoinRequest) error
Leave(force bool) error
Inspect() (types.Swarm, error)
Update(uint64, types.Spec, types.UpdateFlags) error
GetUnlockKey() (string, error)
UnlockSwarm(req swarm.UnlockRequest) error
GetServices(swarm.ServiceListOptions) ([]swarm.Service, error)
GetService(idOrName string, insertDefaults bool) (swarm.Service, error)
CreateService(swarm.ServiceSpec, string, bool) (*swarm.ServiceCreateResponse, error)
UpdateService(string, uint64, swarm.ServiceSpec, swarm.ServiceUpdateOptions, bool) (*swarm.ServiceUpdateResponse, error)
UnlockSwarm(req types.UnlockRequest) error
GetServices(basictypes.ServiceListOptions) ([]types.Service, error)
GetService(idOrName string, insertDefaults bool) (types.Service, error)
CreateService(types.ServiceSpec, string, bool) (*basictypes.ServiceCreateResponse, error)
UpdateService(string, uint64, types.ServiceSpec, basictypes.ServiceUpdateOptions, bool) (*basictypes.ServiceUpdateResponse, error)
RemoveService(string) error
ServiceLogs(context.Context, *backend.LogSelector, *container.LogsOptions) (<-chan *backend.LogMessage, error)
GetNodes(swarm.NodeListOptions) ([]swarm.Node, error)
GetNode(string) (swarm.Node, error)
UpdateNode(string, uint64, swarm.NodeSpec) error
ServiceLogs(context.Context, *backend.LogSelector, *basictypes.ContainerLogsOptions) (<-chan *backend.LogMessage, error)
GetNodes(basictypes.NodeListOptions) ([]types.Node, error)
GetNode(string) (types.Node, error)
UpdateNode(string, uint64, types.NodeSpec) error
RemoveNode(string, bool) error
GetTasks(swarm.TaskListOptions) ([]swarm.Task, error)
GetTask(string) (swarm.Task, error)
GetSecrets(opts swarm.SecretListOptions) ([]swarm.Secret, error)
CreateSecret(s swarm.SecretSpec) (string, error)
GetTasks(basictypes.TaskListOptions) ([]types.Task, error)
GetTask(string) (types.Task, error)
GetSecrets(opts basictypes.SecretListOptions) ([]types.Secret, error)
CreateSecret(s types.SecretSpec) (string, error)
RemoveSecret(idOrName string) error
GetSecret(id string) (swarm.Secret, error)
UpdateSecret(idOrName string, version uint64, spec swarm.SecretSpec) error
GetConfigs(opts swarm.ConfigListOptions) ([]swarm.Config, error)
CreateConfig(s swarm.ConfigSpec) (string, error)
GetSecret(id string) (types.Secret, error)
UpdateSecret(idOrName string, version uint64, spec types.SecretSpec) error
GetConfigs(opts basictypes.ConfigListOptions) ([]types.Config, error)
CreateConfig(s types.ConfigSpec) (string, error)
RemoveConfig(id string) error
GetConfig(id string) (swarm.Config, error)
UpdateConfig(idOrName string, version uint64, spec swarm.ConfigSpec) error
GetConfig(id string) (types.Config, error)
UpdateConfig(idOrName string, version uint64, spec types.ConfigSpec) error
}

View File

@@ -6,15 +6,15 @@ import (
"net/http"
"strconv"
"github.com/containerd/log"
"github.com/docker/docker/api/server/httputils"
basictypes "github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/backend"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/registry"
types "github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/api/types/versions"
"github.com/docker/docker/errdefs"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
func (sr *swarmRouter) initCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
@@ -35,7 +35,7 @@ func (sr *swarmRouter) initCluster(ctx context.Context, w http.ResponseWriter, r
}
nodeID, err := sr.backend.Init(req)
if err != nil {
log.G(ctx).WithContext(ctx).WithError(err).Debug("Error initializing swarm")
logrus.Errorf("Error initializing swarm: %v", err)
return err
}
return httputils.WriteJSON(w, http.StatusOK, nodeID)
@@ -55,13 +55,13 @@ func (sr *swarmRouter) leaveCluster(ctx context.Context, w http.ResponseWriter,
}
force := httputils.BoolValue(r, "force")
return sr.backend.Leave(ctx, force)
return sr.backend.Leave(force)
}
func (sr *swarmRouter) inspectCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
swarm, err := sr.backend.Inspect()
if err != nil {
log.G(ctx).WithContext(ctx).WithError(err).Debug("Error getting swarm")
logrus.Errorf("Error getting swarm: %v", err)
return err
}
@@ -113,7 +113,7 @@ func (sr *swarmRouter) updateCluster(ctx context.Context, w http.ResponseWriter,
}
if err := sr.backend.Update(version, swarm, flags); err != nil {
log.G(ctx).WithContext(ctx).WithError(err).Debug("Error configuring swarm")
logrus.Errorf("Error configuring swarm: %v", err)
return err
}
return nil
@@ -126,7 +126,7 @@ func (sr *swarmRouter) unlockCluster(ctx context.Context, w http.ResponseWriter,
}
if err := sr.backend.UnlockSwarm(req); err != nil {
log.G(ctx).WithContext(ctx).WithError(err).Debug("Error unlocking swarm")
logrus.Errorf("Error unlocking swarm: %v", err)
return err
}
return nil
@@ -135,11 +135,11 @@ func (sr *swarmRouter) unlockCluster(ctx context.Context, w http.ResponseWriter,
func (sr *swarmRouter) getUnlockKey(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
unlockKey, err := sr.backend.GetUnlockKey()
if err != nil {
log.G(ctx).WithContext(ctx).WithError(err).Debug("Error retrieving swarm unlock key")
logrus.WithError(err).Errorf("Error retrieving swarm unlock key")
return err
}
return httputils.WriteJSON(w, http.StatusOK, &types.UnlockKeyResponse{
return httputils.WriteJSON(w, http.StatusOK, &basictypes.SwarmUnlockKeyResponse{
UnlockKey: unlockKey,
})
}
@@ -165,9 +165,9 @@ func (sr *swarmRouter) getServices(ctx context.Context, w http.ResponseWriter, r
}
}
services, err := sr.backend.GetServices(types.ServiceListOptions{Filters: filter, Status: status})
services, err := sr.backend.GetServices(basictypes.ServiceListOptions{Filters: filter, Status: status})
if err != nil {
log.G(ctx).WithContext(ctx).WithError(err).Debug("Error getting services")
logrus.Errorf("Error getting services: %v", err)
return err
}
@@ -193,10 +193,7 @@ func (sr *swarmRouter) getService(ctx context.Context, w http.ResponseWriter, r
service, err := sr.backend.GetService(vars["id"], insertDefaults)
if err != nil {
log.G(ctx).WithContext(ctx).WithFields(log.Fields{
"error": err,
"service-id": vars["id"],
}).Debug("Error getting service")
logrus.Errorf("Error getting service %s: %v", vars["id"], err)
return err
}
@@ -210,7 +207,7 @@ func (sr *swarmRouter) createService(ctx context.Context, w http.ResponseWriter,
}
// Get returns "" if the header does not exist
encodedAuth := r.Header.Get(registry.AuthHeader)
encodedAuth := r.Header.Get("X-Registry-Auth")
queryRegistry := false
if v := httputils.VersionFromContext(ctx); v != "" {
if versions.LessThan(v, "1.30") {
@@ -218,13 +215,9 @@ func (sr *swarmRouter) createService(ctx context.Context, w http.ResponseWriter,
}
adjustForAPIVersion(v, &service)
}
resp, err := sr.backend.CreateService(service, encodedAuth, queryRegistry)
if err != nil {
log.G(ctx).WithFields(log.Fields{
"error": err,
"service-name": service.Name,
}).Debug("Error creating service")
logrus.Errorf("Error creating service %s: %v", service.Name, err)
return err
}
@@ -244,10 +237,10 @@ func (sr *swarmRouter) updateService(ctx context.Context, w http.ResponseWriter,
return errdefs.InvalidParameter(err)
}
var flags types.ServiceUpdateOptions
var flags basictypes.ServiceUpdateOptions
// Get returns "" if the header does not exist
flags.EncodedRegistryAuth = r.Header.Get(registry.AuthHeader)
flags.EncodedRegistryAuth = r.Header.Get("X-Registry-Auth")
flags.RegistryAuthFrom = r.URL.Query().Get("registryAuthFrom")
flags.Rollback = r.URL.Query().Get("rollback")
queryRegistry := false
@@ -260,10 +253,7 @@ func (sr *swarmRouter) updateService(ctx context.Context, w http.ResponseWriter,
resp, err := sr.backend.UpdateService(vars["id"], version, service, flags, queryRegistry)
if err != nil {
log.G(ctx).WithContext(ctx).WithFields(log.Fields{
"error": err,
"service-id": vars["id"],
}).Debug("Error updating service")
logrus.Errorf("Error updating service %s: %v", vars["id"], err)
return err
}
return httputils.WriteJSON(w, http.StatusOK, resp)
@@ -271,10 +261,7 @@ func (sr *swarmRouter) updateService(ctx context.Context, w http.ResponseWriter,
func (sr *swarmRouter) removeService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := sr.backend.RemoveService(vars["id"]); err != nil {
log.G(ctx).WithContext(ctx).WithFields(log.Fields{
"error": err,
"service-id": vars["id"],
}).Debug("Error removing service")
logrus.Errorf("Error removing service %s: %v", vars["id"], err)
return err
}
return nil
@@ -313,9 +300,9 @@ func (sr *swarmRouter) getNodes(ctx context.Context, w http.ResponseWriter, r *h
return err
}
nodes, err := sr.backend.GetNodes(types.NodeListOptions{Filters: filter})
nodes, err := sr.backend.GetNodes(basictypes.NodeListOptions{Filters: filter})
if err != nil {
log.G(ctx).WithContext(ctx).WithError(err).Debug("Error getting nodes")
logrus.Errorf("Error getting nodes: %v", err)
return err
}
@@ -325,10 +312,7 @@ func (sr *swarmRouter) getNodes(ctx context.Context, w http.ResponseWriter, r *h
func (sr *swarmRouter) getNode(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
node, err := sr.backend.GetNode(vars["id"])
if err != nil {
log.G(ctx).WithContext(ctx).WithFields(log.Fields{
"error": err,
"node-id": vars["id"],
}).Debug("Error getting node")
logrus.Errorf("Error getting node %s: %v", vars["id"], err)
return err
}
@@ -349,10 +333,7 @@ func (sr *swarmRouter) updateNode(ctx context.Context, w http.ResponseWriter, r
}
if err := sr.backend.UpdateNode(vars["id"], version, node); err != nil {
log.G(ctx).WithContext(ctx).WithFields(log.Fields{
"error": err,
"node-id": vars["id"],
}).Debug("Error updating node")
logrus.Errorf("Error updating node %s: %v", vars["id"], err)
return err
}
return nil
@@ -366,10 +347,7 @@ func (sr *swarmRouter) removeNode(ctx context.Context, w http.ResponseWriter, r
force := httputils.BoolValue(r, "force")
if err := sr.backend.RemoveNode(vars["id"], force); err != nil {
log.G(ctx).WithContext(ctx).WithFields(log.Fields{
"error": err,
"node-id": vars["id"],
}).Debug("Error removing node")
logrus.Errorf("Error removing node %s: %v", vars["id"], err)
return err
}
return nil
@@ -384,9 +362,9 @@ func (sr *swarmRouter) getTasks(ctx context.Context, w http.ResponseWriter, r *h
return err
}
tasks, err := sr.backend.GetTasks(types.TaskListOptions{Filters: filter})
tasks, err := sr.backend.GetTasks(basictypes.TaskListOptions{Filters: filter})
if err != nil {
log.G(ctx).WithContext(ctx).WithError(err).Debug("Error getting tasks")
logrus.Errorf("Error getting tasks: %v", err)
return err
}
@@ -396,10 +374,7 @@ func (sr *swarmRouter) getTasks(ctx context.Context, w http.ResponseWriter, r *h
func (sr *swarmRouter) getTask(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
task, err := sr.backend.GetTask(vars["id"])
if err != nil {
log.G(ctx).WithContext(ctx).WithFields(log.Fields{
"error": err,
"task-id": vars["id"],
}).Debug("Error getting task")
logrus.Errorf("Error getting task %s: %v", vars["id"], err)
return err
}
@@ -415,7 +390,7 @@ func (sr *swarmRouter) getSecrets(ctx context.Context, w http.ResponseWriter, r
return err
}
secrets, err := sr.backend.GetSecrets(types.SecretListOptions{Filters: filters})
secrets, err := sr.backend.GetSecrets(basictypes.SecretListOptions{Filters: filters})
if err != nil {
return err
}
@@ -438,7 +413,7 @@ func (sr *swarmRouter) createSecret(ctx context.Context, w http.ResponseWriter,
return err
}
return httputils.WriteJSON(w, http.StatusCreated, &types.SecretCreateResponse{
return httputils.WriteJSON(w, http.StatusCreated, &basictypes.SecretCreateResponse{
ID: id,
})
}
@@ -486,7 +461,7 @@ func (sr *swarmRouter) getConfigs(ctx context.Context, w http.ResponseWriter, r
return err
}
configs, err := sr.backend.GetConfigs(types.ConfigListOptions{Filters: filters})
configs, err := sr.backend.GetConfigs(basictypes.ConfigListOptions{Filters: filters})
if err != nil {
return err
}
@@ -510,7 +485,7 @@ func (sr *swarmRouter) createConfig(ctx context.Context, w http.ResponseWriter,
return err
}
return httputils.WriteJSON(w, http.StatusCreated, &types.ConfigCreateResponse{
return httputils.WriteJSON(w, http.StatusCreated, &basictypes.ConfigCreateResponse{
ID: id,
})
}

View File

@@ -8,7 +8,6 @@ import (
"github.com/docker/docker/api/server/httputils"
basictypes "github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/backend"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/api/types/versions"
)
@@ -22,13 +21,13 @@ func (sr *swarmRouter) swarmLogs(ctx context.Context, w http.ResponseWriter, r *
// any error after the stream starts (i.e. container not found, wrong parameters)
// with the appropriate status code.
stdout, stderr := httputils.BoolValue(r, "stdout"), httputils.BoolValue(r, "stderr")
if !stdout && !stderr {
if !(stdout || stderr) {
return fmt.Errorf("Bad parameters: you must choose at least one stream")
}
// there is probably a neater way to manufacture the LogsOptions
// there is probably a neater way to manufacture the ContainerLogsOptions
// struct, probably in the caller, to eliminate the dependency on net/http
logsConfig := &container.LogsOptions{
logsConfig := &basictypes.ContainerLogsOptions{
Follow: httputils.BoolValue(r, "follow"),
Timestamps: httputils.BoolValue(r, "timestamps"),
Since: r.Form.Get("since"),
@@ -78,16 +77,6 @@ func adjustForAPIVersion(cliVersion string, service *swarm.ServiceSpec) {
if cliVersion == "" {
return
}
if versions.LessThan(cliVersion, "1.46") {
if service.TaskTemplate.ContainerSpec != nil {
for i, mount := range service.TaskTemplate.ContainerSpec.Mounts {
if mount.TmpfsOptions != nil {
mount.TmpfsOptions.Options = nil
service.TaskTemplate.ContainerSpec.Mounts[i] = mount
}
}
}
}
if versions.LessThan(cliVersion, "1.40") {
if service.TaskTemplate.ContainerSpec != nil {
// Sysctls for docker swarm services weren't supported before
@@ -129,26 +118,4 @@ func adjustForAPIVersion(cliVersion string, service *swarm.ServiceSpec) {
service.Mode.ReplicatedJob = nil
service.Mode.GlobalJob = nil
}
if versions.LessThan(cliVersion, "1.44") {
if service.TaskTemplate.ContainerSpec != nil {
// seccomp, apparmor, and no_new_privs were added in 1.44.
if service.TaskTemplate.ContainerSpec.Privileges != nil {
service.TaskTemplate.ContainerSpec.Privileges.Seccomp = nil
service.TaskTemplate.ContainerSpec.Privileges.AppArmor = nil
service.TaskTemplate.ContainerSpec.Privileges.NoNewPrivileges = false
}
if service.TaskTemplate.ContainerSpec.Healthcheck != nil {
// StartInterval was added in API 1.44
service.TaskTemplate.ContainerSpec.Healthcheck.StartInterval = 0
}
}
}
if versions.LessThan(cliVersion, "1.46") {
if service.TaskTemplate.ContainerSpec != nil && service.TaskTemplate.ContainerSpec.OomScoreAdj != 0 {
// OomScoreAdj was added in API 1.46
service.TaskTemplate.ContainerSpec.OomScoreAdj = 0
}
}
}

View File

@@ -4,13 +4,14 @@ import (
"reflect"
"testing"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/mount"
"github.com/docker/docker/api/types/swarm"
"github.com/docker/go-units"
)
func TestAdjustForAPIVersion(t *testing.T) {
expectedSysctls := map[string]string{"foo": "bar"}
var (
expectedSysctls = map[string]string{"foo": "bar"}
)
// testing the negative -- does this leave everything else alone? -- is
// prohibitively time-consuming to write, because it would need an object
// with literally every field filled in.
@@ -39,25 +40,13 @@ func TestAdjustForAPIVersion(t *testing.T) {
ConfigName: "configRuntime",
},
},
Ulimits: []*container.Ulimit{
Ulimits: []*units.Ulimit{
{
Name: "nofile",
Soft: 100,
Hard: 200,
},
},
Mounts: []mount.Mount{
{
Type: mount.TypeTmpfs,
Source: "/foo",
Target: "/bar",
TmpfsOptions: &mount.TmpfsOptions{
Options: [][]string{
{"exec"},
},
},
},
},
},
Placement: &swarm.Placement{
MaxReplicas: 222,
@@ -70,19 +59,6 @@ func TestAdjustForAPIVersion(t *testing.T) {
},
}
adjustForAPIVersion("1.46", spec)
if !reflect.DeepEqual(
spec.TaskTemplate.ContainerSpec.Mounts[0].TmpfsOptions.Options,
[][]string{{"exec"}},
) {
t.Error("TmpfsOptions.Options was stripped from spec")
}
adjustForAPIVersion("1.45", spec)
if len(spec.TaskTemplate.ContainerSpec.Mounts[0].TmpfsOptions.Options) != 0 {
t.Error("TmpfsOptions.Options not stripped from spec")
}
// first, does calling this with a later version correctly NOT strip
// fields? do the later version first, so we can reuse this spec in the
// next test.

View File

@@ -5,12 +5,9 @@ import (
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/build"
"github.com/docker/docker/api/types/events"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/registry"
"github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/api/types/system"
)
// DiskUsageOptions holds parameters for system disk usage query.
@@ -28,23 +25,18 @@ type DiskUsageOptions struct {
// Backend is the methods that need to be implemented to provide
// system specific functionality.
type Backend interface {
SystemInfo(context.Context) (*system.Info, error)
SystemVersion(context.Context) (types.Version, error)
SystemDiskUsage(ctx context.Context, opts DiskUsageOptions) (*system.DiskUsage, error)
SystemInfo() *types.Info
SystemVersion() types.Version
SystemDiskUsage(ctx context.Context, opts DiskUsageOptions) (*types.DiskUsage, error)
SubscribeToEvents(since, until time.Time, ef filters.Args) ([]events.Message, chan interface{})
UnsubscribeFromEvents(chan interface{})
AuthenticateToRegistry(ctx context.Context, authConfig *registry.AuthConfig) (string, string, error)
AuthenticateToRegistry(ctx context.Context, authConfig *types.AuthConfig) (string, string, error)
}
// ClusterBackend is all the methods that need to be implemented
// to provide cluster system specific functionality.
type ClusterBackend interface {
Info(context.Context) swarm.Info
}
// BuildBackend provides build specific system information.
type BuildBackend interface {
DiskUsage(context.Context) ([]*build.CacheRecord, error)
Info() swarm.Info
}
// StatusProvider provides methods to get the swarm status of the current node.

View File

@@ -1,39 +0,0 @@
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
//go:build go1.23
package system
import (
"encoding/json"
"github.com/docker/docker/api/types/system"
)
// infoResponse is a wrapper around [system.Info] with a custom
// marshal function for legacy fields.
type infoResponse struct {
*system.Info
// extraFields is for internal use to include deprecated fields on older API versions.
extraFields map[string]any
}
// MarshalJSON implements a custom marshaler to include legacy fields
// in API responses.
func (sc *infoResponse) MarshalJSON() ([]byte, error) {
type tmp *system.Info
base, err := json.Marshal((tmp)(sc.Info))
if err != nil {
return nil, err
}
if len(sc.extraFields) == 0 {
return base, nil
}
var merged map[string]any
_ = json.Unmarshal(base, &merged)
for k, v := range sc.extraFields {
merged[k] = v
}
return json.Marshal(merged)
}

View File

@@ -1,33 +0,0 @@
package system
import (
"encoding/json"
"strings"
"testing"
"github.com/docker/docker/api/types/system"
)
func TestLegacyFields(t *testing.T) {
infoResp := &infoResponse{
Info: &system.Info{
Containers: 10,
},
extraFields: map[string]any{
"LegacyFoo": false,
"LegacyBar": true,
},
}
data, err := json.MarshalIndent(infoResp, "", " ")
if err != nil {
t.Fatal(err)
}
if expected := `"LegacyFoo": false`; !strings.Contains(string(data), expected) {
t.Errorf("legacy fields should contain %s: %s", expected, string(data))
}
if expected := `"LegacyBar": true`; !strings.Contains(string(data), expected) {
t.Errorf("legacy fields should contain %s: %s", expected, string(data))
}
}

View File

@@ -1,11 +1,8 @@
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
//go:build go1.23
package system // import "github.com/docker/docker/api/server/router/system"
import (
"github.com/docker/docker/api/server/router"
"resenje.org/singleflight"
buildkit "github.com/docker/docker/builder/builder-next"
)
// systemRouter provides information about the Docker system overall.
@@ -14,17 +11,12 @@ type systemRouter struct {
backend Backend
cluster ClusterBackend
routes []router.Route
builder BuildBackend
features func() map[string]bool
// collectSystemInfo is a single-flight for the /info endpoint,
// unique per API version (as different API versions may return
// a different API response).
collectSystemInfo singleflight.Group[string, *infoResponse]
builder *buildkit.Builder
features *map[string]bool
}
// NewRouter initializes a new system router
func NewRouter(b Backend, c ClusterBackend, builder BuildBackend, features func() map[string]bool) router.Router {
func NewRouter(b Backend, c ClusterBackend, builder *buildkit.Builder, features *map[string]bool) router.Router {
r := &systemRouter{
backend: b,
cluster: c,

View File

@@ -1,6 +1,3 @@
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
//go:build go1.23
package system // import "github.com/docker/docker/api/server/router/system"
import (
@@ -10,20 +7,18 @@ import (
"net/http"
"time"
"github.com/containerd/log"
"github.com/docker/docker/api/server/httputils"
"github.com/docker/docker/api/server/router/build"
"github.com/docker/docker/api/types"
buildtypes "github.com/docker/docker/api/types/build"
"github.com/docker/docker/api/types/events"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/registry"
"github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/api/types/system"
timetypes "github.com/docker/docker/api/types/time"
"github.com/docker/docker/api/types/versions"
"github.com/docker/docker/pkg/ioutils"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
)
@@ -36,7 +31,7 @@ func (s *systemRouter) pingHandler(ctx context.Context, w http.ResponseWriter, r
w.Header().Add("Cache-Control", "no-cache, no-store, must-revalidate")
w.Header().Add("Pragma", "no-cache")
builderVersion := build.BuilderVersion(s.features())
builderVersion := build.BuilderVersion(*s.features)
if bv := builderVersion; bv != "" {
w.Header().Set("Builder-Version", string(bv))
}
@@ -62,93 +57,51 @@ func (s *systemRouter) swarmStatus() string {
}
func (s *systemRouter) getInfo(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
info := s.backend.SystemInfo()
if s.cluster != nil {
info.Swarm = s.cluster.Info()
info.Warnings = append(info.Warnings, info.Swarm.Warnings...)
}
version := httputils.VersionFromContext(ctx)
info, _, _ := s.collectSystemInfo.Do(ctx, version, func(ctx context.Context) (*infoResponse, error) {
info, err := s.backend.SystemInfo(ctx)
if versions.LessThan(version, "1.25") {
// TODO: handle this conversion in engine-api
type oldInfo struct {
*types.Info
ExecutionDriver string
}
old := &oldInfo{
Info: info,
ExecutionDriver: "<not supported>",
}
nameOnlySecurityOptions := []string{}
kvSecOpts, err := types.DecodeSecurityOptions(old.SecurityOptions)
if err != nil {
return nil, err
return err
}
if s.cluster != nil {
info.Swarm = s.cluster.Info(ctx)
info.Warnings = append(info.Warnings, info.Swarm.Warnings...)
for _, s := range kvSecOpts {
nameOnlySecurityOptions = append(nameOnlySecurityOptions, s.Name)
}
if versions.LessThan(version, "1.25") {
// TODO: handle this conversion in engine-api
kvSecOpts, err := system.DecodeSecurityOptions(info.SecurityOptions)
if err != nil {
info.Warnings = append(info.Warnings, err.Error())
}
var nameOnly []string
for _, so := range kvSecOpts {
nameOnly = append(nameOnly, so.Name)
}
info.SecurityOptions = nameOnly
old.SecurityOptions = nameOnlySecurityOptions
return httputils.WriteJSON(w, http.StatusOK, old)
}
if versions.LessThan(version, "1.39") {
if info.KernelVersion == "" {
info.KernelVersion = "<unknown>"
}
if versions.LessThan(version, "1.39") {
if info.KernelVersion == "" {
info.KernelVersion = "<unknown>"
}
if info.OperatingSystem == "" {
info.OperatingSystem = "<unknown>"
}
if info.OperatingSystem == "" {
info.OperatingSystem = "<unknown>"
}
if versions.LessThan(version, "1.44") {
for k, rt := range info.Runtimes {
// Status field introduced in API v1.44.
info.Runtimes[k] = system.RuntimeWithStatus{Runtime: rt.Runtime}
}
}
if versions.LessThan(version, "1.46") {
// Containerd field introduced in API v1.46.
info.Containerd = nil
}
if versions.LessThan(version, "1.47") {
// Field is omitted in API 1.48 and up, but should still be included
// in older versions, even if no values are set.
info.RegistryConfig.ExtraFields = map[string]any{
"AllowNondistributableArtifactsCIDRs": json.RawMessage(nil),
"AllowNondistributableArtifactsHostnames": json.RawMessage(nil),
}
}
if versions.LessThan(version, "1.49") {
// FirewallBackend field introduced in API v1.49.
info.FirewallBackend = nil
}
extraFields := map[string]any{}
if versions.LessThan(version, "1.49") {
// Expected commits are omitted in API 1.49, but should still be
// included in older versions.
info.ContainerdCommit.Expected = info.ContainerdCommit.ID //nolint:staticcheck // ignore SA1019: field is deprecated, but still used on API < v1.49.
info.RuncCommit.Expected = info.RuncCommit.ID //nolint:staticcheck // ignore SA1019: field is deprecated, but still used on API < v1.49.
info.InitCommit.Expected = info.InitCommit.ID //nolint:staticcheck // ignore SA1019: field is deprecated, but still used on API < v1.49.
}
if versions.GreaterThanOrEqualTo(version, "1.42") {
info.KernelMemory = false
}
if versions.LessThan(version, "1.50") {
info.DiscoveredDevices = nil
// These fields are omitted in > API 1.49, and always false
// older API versions.
extraFields = map[string]any{
"BridgeNfIptables": json.RawMessage("false"),
"BridgeNfIp6tables": json.RawMessage("false"),
}
}
return &infoResponse{Info: info, extraFields: extraFields}, nil
})
}
if versions.GreaterThanOrEqualTo(version, "1.42") {
info.KernelMemory = false
}
return httputils.WriteJSON(w, http.StatusOK, info)
}
func (s *systemRouter) getVersion(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
info, err := s.backend.SystemVersion(ctx)
if err != nil {
return err
}
info := s.backend.SystemVersion()
return httputils.WriteJSON(w, http.StatusOK, info)
}
@@ -163,7 +116,7 @@ func (s *systemRouter) getDiskUsage(ctx context.Context, w http.ResponseWriter,
var getContainers, getImages, getVolumes, getBuildCache bool
typeStrs, ok := r.Form["type"]
if versions.LessThan(version, "1.42") || !ok {
getContainers, getImages, getVolumes, getBuildCache = true, true, true, s.builder != nil
getContainers, getImages, getVolumes, getBuildCache = true, true, true, true
} else {
for _, typ := range typeStrs {
switch types.DiskUsageObject(typ) {
@@ -183,7 +136,7 @@ func (s *systemRouter) getDiskUsage(ctx context.Context, w http.ResponseWriter,
eg, ctx := errgroup.WithContext(ctx)
var systemDiskUsage *system.DiskUsage
var systemDiskUsage *types.DiskUsage
if getContainers || getImages || getVolumes {
eg.Go(func() error {
var err error
@@ -196,7 +149,7 @@ func (s *systemRouter) getDiskUsage(ctx context.Context, w http.ResponseWriter,
})
}
var buildCache []*buildtypes.CacheRecord
var buildCache []*types.BuildCache
if getBuildCache {
eg.Go(func() error {
var err error
@@ -207,7 +160,7 @@ func (s *systemRouter) getDiskUsage(ctx context.Context, w http.ResponseWriter,
if buildCache == nil {
// Ensure empty `BuildCache` field is represented as empty JSON array(`[]`)
// instead of `null` to be consistent with `Images`, `Containers` etc.
buildCache = []*buildtypes.CacheRecord{}
buildCache = []*types.BuildCache{}
}
return nil
})
@@ -232,42 +185,18 @@ func (s *systemRouter) getDiskUsage(ctx context.Context, w http.ResponseWriter,
b.Parent = "" //nolint:staticcheck // ignore SA1019 (Parent field is deprecated)
}
}
if versions.LessThan(version, "1.44") && systemDiskUsage != nil && systemDiskUsage.Images != nil {
for _, b := range systemDiskUsage.Images.Items {
b.VirtualSize = b.Size //nolint:staticcheck // ignore SA1019: field is deprecated, but still set on API < v1.44.
}
}
du := system.DiskUsage{}
if getBuildCache {
du.BuildCache = &buildtypes.CacheDiskUsage{
TotalSize: builderSize,
Items: buildCache,
}
du := types.DiskUsage{
BuildCache: buildCache,
BuilderSize: builderSize,
}
if systemDiskUsage != nil {
du.LayersSize = systemDiskUsage.LayersSize
du.Images = systemDiskUsage.Images
du.Containers = systemDiskUsage.Containers
du.Volumes = systemDiskUsage.Volumes
}
// Use the old struct for the API return value.
var v types.DiskUsage
if du.Images != nil {
v.LayersSize = du.Images.TotalSize
v.Images = du.Images.Items
}
if du.Containers != nil {
v.Containers = du.Containers.Items
}
if du.Volumes != nil {
v.Volumes = du.Volumes.Items
}
if du.BuildCache != nil {
v.BuildCache = du.BuildCache.Items
}
v.BuilderSize = builderSize
return httputils.WriteJSON(w, http.StatusOK, v)
return httputils.WriteJSON(w, http.StatusOK, du)
}
type invalidRequestError struct {
@@ -321,7 +250,6 @@ func (s *systemRouter) getEvents(ctx context.Context, w http.ResponseWriter, r *
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
output := ioutils.NewWriteFlusher(w)
defer output.Close()
output.Flush()
@@ -331,18 +259,7 @@ func (s *systemRouter) getEvents(ctx context.Context, w http.ResponseWriter, r *
buffered, l := s.backend.SubscribeToEvents(since, until, ef)
defer s.backend.UnsubscribeFromEvents(l)
shouldSkip := func(ev events.Message) bool { return false }
if versions.LessThan(httputils.VersionFromContext(ctx), "1.46") {
// Image create events were added in API 1.46
shouldSkip = func(ev events.Message) bool {
return ev.Type == "image" && ev.Action == "create"
}
}
for _, ev := range buffered {
if shouldSkip(ev) {
continue
}
if err := enc.Encode(ev); err != nil {
return err
}
@@ -357,10 +274,7 @@ func (s *systemRouter) getEvents(ctx context.Context, w http.ResponseWriter, r *
case ev := <-l:
jev, ok := ev.(events.Message)
if !ok {
log.G(ctx).Warnf("unexpected event message: %q", ev)
continue
}
if shouldSkip(jev) {
logrus.Warnf("unexpected event message: %q", ev)
continue
}
if err := enc.Encode(jev); err != nil {
@@ -369,14 +283,14 @@ func (s *systemRouter) getEvents(ctx context.Context, w http.ResponseWriter, r *
case <-timeout:
return nil
case <-ctx.Done():
log.G(ctx).Debug("Client context cancelled, stop sending events")
logrus.Debug("Client context cancelled, stop sending events")
return nil
}
}
}
func (s *systemRouter) postAuth(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
var config *registry.AuthConfig
var config *types.AuthConfig
err := json.NewDecoder(r.Body).Decode(&config)
r.Body.Close()
if err != nil {

View File

@@ -3,9 +3,11 @@ package volume // import "github.com/docker/docker/api/server/router/volume"
import (
"context"
"github.com/docker/docker/volume/service/opts"
// TODO return types need to be refactored into pkg
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/volume"
"github.com/docker/docker/volume/service/opts"
)
// Backend is the methods that need to be implemented to provide
@@ -15,7 +17,7 @@ type Backend interface {
Get(ctx context.Context, name string, opts ...opts.GetOption) (*volume.Volume, error)
Create(ctx context.Context, name, driverName string, opts ...opts.CreateOption) (*volume.Volume, error)
Remove(ctx context.Context, name string, opts ...opts.RemoveOption) error
Prune(ctx context.Context, pruneFilters filters.Args) (*volume.PruneReport, error)
Prune(ctx context.Context, pruneFilters filters.Args) (*types.VolumesPruneReport, error)
}
// ClusterBackend is the backend used for Swarm Cluster Volumes. Regular

View File

@@ -20,21 +20,21 @@ func NewRouter(b Backend, cb ClusterBackend) router.Router {
}
// Routes returns the available routes to the volumes controller
func (v *volumeRouter) Routes() []router.Route {
return v.routes
func (r *volumeRouter) Routes() []router.Route {
return r.routes
}
func (v *volumeRouter) initRoutes() {
v.routes = []router.Route{
func (r *volumeRouter) initRoutes() {
r.routes = []router.Route{
// GET
router.NewGetRoute("/volumes", v.getVolumesList),
router.NewGetRoute("/volumes/{name:.*}", v.getVolumeByName),
router.NewGetRoute("/volumes", r.getVolumesList),
router.NewGetRoute("/volumes/{name:.*}", r.getVolumeByName),
// POST
router.NewPostRoute("/volumes/create", v.postVolumesCreate),
router.NewPostRoute("/volumes/prune", v.postVolumesPrune),
router.NewPostRoute("/volumes/create", r.postVolumesCreate),
router.NewPostRoute("/volumes/prune", r.postVolumesPrune),
// PUT
router.NewPutRoute("/volumes/{name:.*}", v.putVolumesUpdate),
router.NewPutRoute("/volumes/{name:.*}", r.putVolumesUpdate),
// DELETE
router.NewDeleteRoute("/volumes/{name:.*}", v.deleteVolumes),
router.NewDeleteRoute("/volumes/{name:.*}", r.deleteVolumes),
}
}

View File

@@ -6,8 +6,6 @@ import (
"net/http"
"strconv"
cerrdefs "github.com/containerd/errdefs"
"github.com/containerd/log"
"github.com/docker/docker/api/server/httputils"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/versions"
@@ -15,6 +13,7 @@ import (
"github.com/docker/docker/errdefs"
"github.com/docker/docker/volume/service/opts"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
const (
@@ -70,7 +69,7 @@ func (v *volumeRouter) getVolumeByName(ctx context.Context, w http.ResponseWrite
// if the volume is not found in the regular volume backend, and the client
// is using an API version greater than 1.42 (when cluster volumes were
// introduced), then check if Swarm has the volume.
if cerrdefs.IsNotFound(err) && versions.GreaterThanOrEqualTo(version, clusterVolumesVersion) && v.cluster.IsManager() {
if errdefs.IsNotFound(err) && versions.GreaterThanOrEqualTo(version, clusterVolumesVersion) && v.cluster.IsManager() {
swarmVol, err := v.cluster.GetVolume(vars["name"])
// if swarm returns an error and that error indicates that swarm is not
// initialized, return original NotFound error. Otherwise, we'd return
@@ -117,10 +116,10 @@ func (v *volumeRouter) postVolumesCreate(ctx context.Context, w http.ResponseWri
// Instead, we will allow creating a volume with a duplicate name, which
// should not break anything.
if req.ClusterVolumeSpec != nil && versions.GreaterThanOrEqualTo(version, clusterVolumesVersion) {
log.G(ctx).Debug("using cluster volume")
logrus.Debug("using cluster volume")
vol, err = v.cluster.CreateVolume(req)
} else {
log.G(ctx).Debug("using regular volume")
logrus.Debug("using regular volume")
vol, err = v.backend.Create(ctx, req.Name, req.Driver, opts.WithCreateOptions(req.DriverOpts), opts.WithCreateLabels(req.Labels))
}
@@ -160,29 +159,25 @@ func (v *volumeRouter) deleteVolumes(ctx context.Context, w http.ResponseWriter,
}
force := httputils.BoolValue(r, "force")
// First we try deleting local volume. The volume may not be found as a
// local volume, but could be a cluster volume, so we ignore "not found"
// errors at this stage. Note that no "not found" error is produced if
// "force" is enabled.
err := v.backend.Remove(ctx, vars["name"], opts.WithPurgeOnError(force))
if err != nil && !cerrdefs.IsNotFound(err) {
return err
}
version := httputils.VersionFromContext(ctx)
// If no volume was found, the volume may be a cluster volume. If force
// is enabled, the volume backend won't return an error for non-existing
// volumes, so we don't know if removal succeeded (or not volume existed).
// In that case we always try to delete cluster volumes as well.
if cerrdefs.IsNotFound(err) || force {
version := httputils.VersionFromContext(ctx)
err := v.backend.Remove(ctx, vars["name"], opts.WithPurgeOnError(force))
// when a removal is forced, if the volume does not exist, no error will be
// returned. this means that to ensure forcing works on swarm volumes as
// well, we should always also force remove against the cluster.
if err != nil || force {
if versions.GreaterThanOrEqualTo(version, clusterVolumesVersion) && v.cluster.IsManager() {
err = v.cluster.RemoveVolume(vars["name"], force)
if errdefs.IsNotFound(err) || force {
err := v.cluster.RemoveVolume(vars["name"], force)
if err != nil {
return err
}
}
} else {
return err
}
}
if err != nil {
return err
}
w.WriteHeader(http.StatusNoContent)
return nil
}

View File

@@ -5,14 +5,13 @@ import (
"context"
"encoding/json"
"fmt"
"net/http"
"net/http/httptest"
"testing"
"gotest.tools/v3/assert"
cerrdefs "github.com/containerd/errdefs"
"github.com/docker/docker/api/server/httputils"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/volume"
"github.com/docker/docker/errdefs"
@@ -22,7 +21,7 @@ import (
func callGetVolume(v *volumeRouter, name string) (*httptest.ResponseRecorder, error) {
ctx := context.WithValue(context.Background(), httputils.APIVersionKey{}, clusterVolumesVersion)
vars := map[string]string{"name": name}
req := httptest.NewRequest(http.MethodGet, fmt.Sprintf("/volumes/%s", name), nil)
req := httptest.NewRequest("GET", fmt.Sprintf("/volumes/%s", name), nil)
resp := httptest.NewRecorder()
err := v.getVolumeByName(ctx, resp, req, vars)
@@ -32,7 +31,7 @@ func callGetVolume(v *volumeRouter, name string) (*httptest.ResponseRecorder, er
func callListVolumes(v *volumeRouter) (*httptest.ResponseRecorder, error) {
ctx := context.WithValue(context.Background(), httputils.APIVersionKey{}, clusterVolumesVersion)
vars := map[string]string{}
req := httptest.NewRequest(http.MethodGet, "/volumes", nil)
req := httptest.NewRequest("GET", "/volumes", nil)
resp := httptest.NewRecorder()
err := v.getVolumesList(ctx, resp, req, vars)
@@ -48,7 +47,7 @@ func TestGetVolumeByNameNotFoundNoSwarm(t *testing.T) {
_, err := callGetVolume(v, "notReal")
assert.Assert(t, err != nil)
assert.Assert(t, cerrdefs.IsNotFound(err))
assert.Assert(t, errdefs.IsNotFound(err))
}
func TestGetVolumeByNameNotFoundNotManager(t *testing.T) {
@@ -60,7 +59,7 @@ func TestGetVolumeByNameNotFoundNotManager(t *testing.T) {
_, err := callGetVolume(v, "notReal")
assert.Assert(t, err != nil)
assert.Assert(t, cerrdefs.IsNotFound(err))
assert.Assert(t, errdefs.IsNotFound(err))
}
func TestGetVolumeByNameNotFound(t *testing.T) {
@@ -72,13 +71,14 @@ func TestGetVolumeByNameNotFound(t *testing.T) {
_, err := callGetVolume(v, "notReal")
assert.Assert(t, err != nil)
assert.Assert(t, cerrdefs.IsNotFound(err))
assert.Assert(t, errdefs.IsNotFound(err))
}
func TestGetVolumeByNameFoundRegular(t *testing.T) {
v := &volumeRouter{
backend: &fakeVolumeBackend{
volumes: map[string]*volume.Volume{
"volume1": {
Name: "volume1",
},
@@ -108,7 +108,6 @@ func TestGetVolumeByNameFoundSwarm(t *testing.T) {
_, err := callGetVolume(v, "volume1")
assert.NilError(t, err)
}
func TestListVolumes(t *testing.T) {
v := &volumeRouter{
backend: &fakeVolumeBackend{
@@ -190,16 +189,17 @@ func TestCreateRegularVolume(t *testing.T) {
Driver: "foodriver",
}
var buf bytes.Buffer
err := json.NewEncoder(&buf).Encode(volumeCreate)
assert.NilError(t, err)
buf := bytes.Buffer{}
e := json.NewEncoder(&buf)
e.Encode(volumeCreate)
ctx := context.WithValue(context.Background(), httputils.APIVersionKey{}, clusterVolumesVersion)
req := httptest.NewRequest(http.MethodPost, "/volumes/create", &buf)
req := httptest.NewRequest("POST", "/volumes/create", &buf)
req.Header.Add("Content-Type", "application/json")
resp := httptest.NewRecorder()
err = v.postVolumesCreate(ctx, resp, req, nil)
err := v.postVolumesCreate(ctx, resp, req, nil)
assert.NilError(t, err)
respVolume := volume.Volume{}
@@ -228,18 +228,18 @@ func TestCreateSwarmVolumeNoSwarm(t *testing.T) {
Driver: "someCSI",
}
var buf bytes.Buffer
err := json.NewEncoder(&buf).Encode(volumeCreate)
assert.NilError(t, err)
buf := bytes.Buffer{}
json.NewEncoder(&buf).Encode(volumeCreate)
ctx := context.WithValue(context.Background(), httputils.APIVersionKey{}, clusterVolumesVersion)
req := httptest.NewRequest(http.MethodPost, "/volumes/create", &buf)
req := httptest.NewRequest("POST", "/volumes/create", &buf)
req.Header.Add("Content-Type", "application/json")
resp := httptest.NewRecorder()
err = v.postVolumesCreate(ctx, resp, req, nil)
err := v.postVolumesCreate(ctx, resp, req, nil)
assert.Assert(t, err != nil)
assert.Assert(t, cerrdefs.IsUnavailable(err))
assert.Assert(t, errdefs.IsUnavailable(err))
}
func TestCreateSwarmVolumeNotManager(t *testing.T) {
@@ -257,18 +257,18 @@ func TestCreateSwarmVolumeNotManager(t *testing.T) {
Driver: "someCSI",
}
var buf bytes.Buffer
err := json.NewEncoder(&buf).Encode(volumeCreate)
assert.NilError(t, err)
buf := bytes.Buffer{}
json.NewEncoder(&buf).Encode(volumeCreate)
ctx := context.WithValue(context.Background(), httputils.APIVersionKey{}, clusterVolumesVersion)
req := httptest.NewRequest(http.MethodPost, "/volumes/create", &buf)
req := httptest.NewRequest("POST", "/volumes/create", &buf)
req.Header.Add("Content-Type", "application/json")
resp := httptest.NewRecorder()
err = v.postVolumesCreate(ctx, resp, req, nil)
err := v.postVolumesCreate(ctx, resp, req, nil)
assert.Assert(t, err != nil)
assert.Assert(t, cerrdefs.IsUnavailable(err))
assert.Assert(t, errdefs.IsUnavailable(err))
}
func TestCreateVolumeCluster(t *testing.T) {
@@ -289,16 +289,16 @@ func TestCreateVolumeCluster(t *testing.T) {
Driver: "someCSI",
}
var buf bytes.Buffer
err := json.NewEncoder(&buf).Encode(volumeCreate)
assert.NilError(t, err)
buf := bytes.Buffer{}
json.NewEncoder(&buf).Encode(volumeCreate)
ctx := context.WithValue(context.Background(), httputils.APIVersionKey{}, clusterVolumesVersion)
req := httptest.NewRequest(http.MethodPost, "/volumes/create", &buf)
req := httptest.NewRequest("POST", "/volumes/create", &buf)
req.Header.Add("Content-Type", "application/json")
resp := httptest.NewRecorder()
err = v.postVolumesCreate(ctx, resp, req, nil)
err := v.postVolumesCreate(ctx, resp, req, nil)
assert.NilError(t, err)
respVolume := volume.Volume{}
@@ -336,17 +336,15 @@ func TestUpdateVolume(t *testing.T) {
Spec: &volume.ClusterVolumeSpec{},
}
var buf bytes.Buffer
err := json.NewEncoder(&buf).Encode(volumeUpdate)
assert.NilError(t, err)
buf := bytes.Buffer{}
json.NewEncoder(&buf).Encode(volumeUpdate)
ctx := context.WithValue(context.Background(), httputils.APIVersionKey{}, clusterVolumesVersion)
req := httptest.NewRequest(http.MethodPost, "/volumes/vol1/update?version=0", &buf)
req := httptest.NewRequest("POST", "/volumes/vol1/update?version=0", &buf)
req.Header.Add("Content-Type", "application/json")
resp := httptest.NewRecorder()
err = v.putVolumesUpdate(ctx, resp, req, map[string]string{"name": "vol1"})
err := v.putVolumesUpdate(ctx, resp, req, map[string]string{"name": "vol1"})
assert.NilError(t, err)
assert.Equal(t, c.volumes["vol1"].ClusterVolume.Meta.Version.Index, uint64(1))
@@ -365,19 +363,17 @@ func TestUpdateVolumeNoSwarm(t *testing.T) {
Spec: &volume.ClusterVolumeSpec{},
}
var buf bytes.Buffer
err := json.NewEncoder(&buf).Encode(volumeUpdate)
assert.NilError(t, err)
buf := bytes.Buffer{}
json.NewEncoder(&buf).Encode(volumeUpdate)
ctx := context.WithValue(context.Background(), httputils.APIVersionKey{}, clusterVolumesVersion)
req := httptest.NewRequest(http.MethodPost, "/volumes/vol1/update?version=0", &buf)
req := httptest.NewRequest("POST", "/volumes/vol1/update?version=0", &buf)
req.Header.Add("Content-Type", "application/json")
resp := httptest.NewRecorder()
err = v.putVolumesUpdate(ctx, resp, req, map[string]string{"name": "vol1"})
err := v.putVolumesUpdate(ctx, resp, req, map[string]string{"name": "vol1"})
assert.Assert(t, err != nil)
assert.Assert(t, cerrdefs.IsUnavailable(err))
assert.Assert(t, errdefs.IsUnavailable(err))
}
func TestUpdateVolumeNotFound(t *testing.T) {
@@ -397,19 +393,17 @@ func TestUpdateVolumeNotFound(t *testing.T) {
Spec: &volume.ClusterVolumeSpec{},
}
var buf bytes.Buffer
err := json.NewEncoder(&buf).Encode(volumeUpdate)
assert.NilError(t, err)
buf := bytes.Buffer{}
json.NewEncoder(&buf).Encode(volumeUpdate)
ctx := context.WithValue(context.Background(), httputils.APIVersionKey{}, clusterVolumesVersion)
req := httptest.NewRequest(http.MethodPost, "/volumes/vol1/update?version=0", &buf)
req := httptest.NewRequest("POST", "/volumes/vol1/update?version=0", &buf)
req.Header.Add("Content-Type", "application/json")
resp := httptest.NewRecorder()
err = v.putVolumesUpdate(ctx, resp, req, map[string]string{"name": "vol1"})
err := v.putVolumesUpdate(ctx, resp, req, map[string]string{"name": "vol1"})
assert.Assert(t, err != nil)
assert.Assert(t, cerrdefs.IsNotFound(err))
assert.Assert(t, errdefs.IsNotFound(err))
}
func TestVolumeRemove(t *testing.T) {
@@ -428,7 +422,7 @@ func TestVolumeRemove(t *testing.T) {
}
ctx := context.WithValue(context.Background(), httputils.APIVersionKey{}, clusterVolumesVersion)
req := httptest.NewRequest(http.MethodDelete, "/volumes/vol1", nil)
req := httptest.NewRequest("DELETE", "/volumes/vol1", nil)
resp := httptest.NewRecorder()
err := v.deleteVolumes(ctx, resp, req, map[string]string{"name": "vol1"})
@@ -455,7 +449,7 @@ func TestVolumeRemoveSwarm(t *testing.T) {
}
ctx := context.WithValue(context.Background(), httputils.APIVersionKey{}, clusterVolumesVersion)
req := httptest.NewRequest(http.MethodDelete, "/volumes/vol1", nil)
req := httptest.NewRequest("DELETE", "/volumes/vol1", nil)
resp := httptest.NewRecorder()
err := v.deleteVolumes(ctx, resp, req, map[string]string{"name": "vol1"})
@@ -472,12 +466,12 @@ func TestVolumeRemoveNotFoundNoSwarm(t *testing.T) {
}
ctx := context.WithValue(context.Background(), httputils.APIVersionKey{}, clusterVolumesVersion)
req := httptest.NewRequest(http.MethodDelete, "/volumes/vol1", nil)
req := httptest.NewRequest("DELETE", "/volumes/vol1", nil)
resp := httptest.NewRecorder()
err := v.deleteVolumes(ctx, resp, req, map[string]string{"name": "vol1"})
assert.Assert(t, err != nil)
assert.Assert(t, cerrdefs.IsNotFound(err), err.Error())
assert.Assert(t, errdefs.IsNotFound(err), err.Error())
}
func TestVolumeRemoveNotFoundNoManager(t *testing.T) {
@@ -489,12 +483,12 @@ func TestVolumeRemoveNotFoundNoManager(t *testing.T) {
}
ctx := context.WithValue(context.Background(), httputils.APIVersionKey{}, clusterVolumesVersion)
req := httptest.NewRequest(http.MethodDelete, "/volumes/vol1", nil)
req := httptest.NewRequest("DELETE", "/volumes/vol1", nil)
resp := httptest.NewRecorder()
err := v.deleteVolumes(ctx, resp, req, map[string]string{"name": "vol1"})
assert.Assert(t, err != nil)
assert.Assert(t, cerrdefs.IsNotFound(err))
assert.Assert(t, errdefs.IsNotFound(err))
}
func TestVolumeRemoveFoundNoSwarm(t *testing.T) {
@@ -513,7 +507,7 @@ func TestVolumeRemoveFoundNoSwarm(t *testing.T) {
}
ctx := context.WithValue(context.Background(), httputils.APIVersionKey{}, clusterVolumesVersion)
req := httptest.NewRequest(http.MethodDelete, "/volumes/vol1", nil)
req := httptest.NewRequest("DELETE", "/volumes/vol1", nil)
resp := httptest.NewRecorder()
err := v.deleteVolumes(ctx, resp, req, map[string]string{"name": "vol1"})
@@ -536,12 +530,12 @@ func TestVolumeRemoveNoSwarmInUse(t *testing.T) {
}
ctx := context.WithValue(context.Background(), httputils.APIVersionKey{}, clusterVolumesVersion)
req := httptest.NewRequest(http.MethodDelete, "/volumes/inuse", nil)
req := httptest.NewRequest("DELETE", "/volumes/inuse", nil)
resp := httptest.NewRecorder()
err := v.deleteVolumes(ctx, resp, req, map[string]string{"name": "inuse"})
assert.Assert(t, err != nil)
assert.Assert(t, cerrdefs.IsConflict(err))
assert.Assert(t, errdefs.IsConflict(err))
}
func TestVolumeRemoveSwarmForce(t *testing.T) {
@@ -564,16 +558,16 @@ func TestVolumeRemoveSwarmForce(t *testing.T) {
}
ctx := context.WithValue(context.Background(), httputils.APIVersionKey{}, clusterVolumesVersion)
req := httptest.NewRequest(http.MethodDelete, "/volumes/vol1", nil)
req := httptest.NewRequest("DELETE", "/volumes/vol1", nil)
resp := httptest.NewRecorder()
err := v.deleteVolumes(ctx, resp, req, map[string]string{"name": "vol1"})
assert.Assert(t, err != nil)
assert.Assert(t, cerrdefs.IsConflict(err))
assert.Assert(t, errdefs.IsConflict(err))
ctx = context.WithValue(context.Background(), httputils.APIVersionKey{}, clusterVolumesVersion)
req = httptest.NewRequest(http.MethodDelete, "/volumes/vol1?force=1", nil)
req = httptest.NewRequest("DELETE", "/volumes/vol1?force=1", nil)
resp = httptest.NewRecorder()
err = v.deleteVolumes(ctx, resp, req, map[string]string{"name": "vol1"})
@@ -588,7 +582,7 @@ type fakeVolumeBackend struct {
}
func (b *fakeVolumeBackend) List(_ context.Context, _ filters.Args) ([]*volume.Volume, []string, error) {
var volumes []*volume.Volume
volumes := []*volume.Volume{}
for _, v := range b.volumes {
volumes = append(volumes, v)
}
@@ -642,7 +636,7 @@ func (b *fakeVolumeBackend) Remove(_ context.Context, name string, o ...opts.Rem
return nil
}
func (b *fakeVolumeBackend) Prune(_ context.Context, _ filters.Args) (*volume.PruneReport, error) {
func (b *fakeVolumeBackend) Prune(_ context.Context, _ filters.Args) (*types.VolumesPruneReport, error) {
return nil, nil
}
@@ -678,12 +672,13 @@ func (c *fakeClusterBackend) GetVolume(nameOrID string) (volume.Volume, error) {
return volume.Volume{}, errdefs.NotFound(fmt.Errorf("volume %s not found", nameOrID))
}
func (c *fakeClusterBackend) GetVolumes(_ volume.ListOptions) ([]*volume.Volume, error) {
func (c *fakeClusterBackend) GetVolumes(options volume.ListOptions) ([]*volume.Volume, error) {
if err := c.checkSwarm(); err != nil {
return nil, err
}
var volumes []*volume.Volume
volumes := []*volume.Volume{}
for _, v := range c.volumes {
volumes = append(volumes, v)
}

Some files were not shown because too many files have changed in this diff Show More