Compare commits

..

65 Commits

Author SHA1 Message Date
Andrew Hsu
cdb0218236 Merge pull request #239 from seemethere/bundle_me_up_1806
[18.06-ce] [ENGSEC-28] CVE-2019-5736 apply fix via git bundle instead of patches
2019-02-06 15:30:06 -08:00
Eli Uriegas
d212dfee1a Switch from applying patches a git bundle
A git bundle allows us keep the same SHA, giving us the ability to
validate our patch against a known entity and allowing us to push
directly from our private forks to public forks without having to
re-apply any patches.

Signed-off-by: Eli Uriegas <eli.uriegas@docker.com>
2019-02-06 22:13:34 +00:00
Andrew Hsu
56ad4cdec6 Merge pull request #102 from andrewhsu/grpc1806
[18.06] cluster: set bigger grpc limit for array requests
2018-10-31 13:49:10 -07:00
Tonis Tiigi
cdabbbf33c cluster: set bigger grpc limit for array requests
4MB client side limit was introduced in vendoring go-grpc#1165 (v1.4.0)
making these requests likely to produce errors

Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>
(cherry picked from commit 489b8eda66)
Signed-off-by: Andrew Hsu <andrewhsu@docker.com>
2018-10-30 23:06:32 +00:00
Sebastiaan van Stijn
320063a2ad Merge pull request #34 from thaJeztah/18.06-backport-logissue
[18.06] backport select polling based watcher for Windows log watcher
2018-08-16 10:14:46 +02:00
Sebastiaan van Stijn
29871648df Merge pull request #31 from thaJeztah/18.06-backport-jjh.37562
[18.06] backport "don't invoke HCS shutdown if terminate called"
2018-08-16 10:14:03 +02:00
Andrew Hsu
68a4625393 Merge pull request #33 from thaJeztah/18.06-update-containerd
[18.06] backport bump containerd daemon to v1.1.2
2018-08-14 15:07:14 -07:00
Tejaswini Duggaraju
a86643ba40 Select polling based watcher for Windows log watcher
Signed-off-by: Tejaswini Duggaraju <naduggar@microsoft.com>
(cherry picked from commit df84cdd091)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-08-14 14:43:52 +02:00
Sebastiaan van Stijn
460fb308ed Bump containerd daemon to v1.1.2
Updates cri version to 1.0.4, to add `max-container-log-line-size`

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 9e773a12fb)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-08-08 10:48:31 +02:00
Andrew Hsu
c4a6ecfcd5 Merge pull request #25 from thaJeztah/18.06-backport-error_when_base_name_resolved_to_blank
[18.06] Return error if basename is expanded to blank
2018-08-07 16:00:37 -07:00
Andrew Hsu
576e49dd6a Merge pull request #21 from tiborvass/18.06-vendor-buildkit
[18.06] Set BuildKit's ExportedProduct variable to show useful errors in the future
2018-08-07 11:51:29 -07:00
Andrew Hsu
5150e8235c Merge pull request #32 from thaJeztah/18.06-bump_swarmkit
[18.06] Bump SwarmKit to 8852e88
2018-08-06 20:59:01 -07:00
Andrew Hsu
b974b42acf Merge pull request #29 from thaJeztah/18.06-disable-cri
[18.06] disable containerd CRI plugin
2018-08-06 17:39:11 -07:00
Yuichiro Kaneko
226fadc9fc Return error if basename is expanded to blank
Fix: https://github.com/moby/moby/issues/37325

Signed-off-by: Yuichiro Kaneko <spiketeika@gmail.com>
(cherry picked from commit c9542d313e)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-08-06 15:50:22 +02:00
Sebastiaan van Stijn
4f8777f6af Bump SwarmKit to 8852e8840e30d69db0b39a4a3d6447362e17c64f
Relevant changes;

- swarmkit #2593 agent: return error when failing to apply network key
- swarmkit #2645 Replace deprecated grpc functions
- swarmkit #2720 Test if error is nil before to log it
- swarmkit #2712 [orchestrator] Fix task sorting
- swarmkit #2677 [manager/orchestrator/reaper] Fix the condition used for skipping over running tasks

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 660fa129c0)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-08-06 15:05:35 +02:00
John Howard
777d535c23 Don't invoke HCS shutdown if terminate called
Signed-off-by: John Howard <jhoward@microsoft.com>
(cherry picked from commit 5cfededc7c)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-08-04 01:21:32 +02:00
Sebastiaan van Stijn
3dbf955771 18.06: disable containerd CRI plugin
Docker 18.06 does not have a configuration option to
disable the CRI plugin, and this plugin is not very
useful if containerd is not running standalone.

This patch disables the plugin if containerd is running
as child-process of dockerd.

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-07-31 15:32:13 +02:00
Tibor Vass
0832d1e1d4 validate: please vet
Signed-off-by: Tibor Vass <tibor@docker.com>
(cherry picked from commit 81599222fc)
Signed-off-by: Tibor Vass <tibor@docker.com>
2018-07-20 23:48:44 +00:00
Tibor Vass
89a8e672e0 builder: set buildkit's exported product variable via PRODUCT
This introduces a PRODUCT environment variable that is used to set a constant
at dockerversion.ProductName.

That is then used to set BuildKit's ExportedProduct variable in order to show
useful error messages to users when a certain version of the product doesn't
support a BuildKit feature.

Signed-off-by: Tibor Vass <tibor@docker.com>
(cherry picked from commit 195919d9d6)
Signed-off-by: Tibor Vass <tibor@docker.com>
2018-07-20 23:48:44 +00:00
Tibor Vass
918ea06954 vendor: buildkit to 98f1604134f945d48538ffca0e18662337b4a850
Signed-off-by: Tibor Vass <tibor@docker.com>
(cherry picked from commit 0ab7c1c5ba)
Signed-off-by: Tibor Vass <tibor@docker.com>
2018-07-20 23:48:44 +00:00
Andrew Hsu
a3ef7e9a9b Merge pull request #26 from thaJeztah/18.06-backport-fix_TestExternalGraphDriver_pull
[18.06] Fix flaky TestExternalGraphDriver/pull test
2018-07-18 08:09:40 -07:00
Andrew Hsu
fe1b4aa571 Merge pull request #22 from thaJeztah/18.06-backport-errorfix
[18.06] Fix error string in docker CLI test (Windows RS5)
2018-07-18 08:08:33 -07:00
Andrew Hsu
9475fe4f57 Merge pull request #27 from thaJeztah/18.06-backport-bump_swarmkit
[18.06] Update swarmkit to 68266392a176434d282760d2d6d0ab4c68edcae6
2018-07-18 08:07:57 -07:00
Sebastiaan van Stijn
2211a7dc42 Update swarmkit to 68266392a176434d282760d2d6d0ab4c68edcae6
changes included:

- swarmkit #2706 address unassigned task leak when service is removed
- swarmkit #2676 Fix racy batching on the dispatcher
- swarmkit #2693 Fix linting issues revealed by Go 1.11

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit c9377f4552)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-07-17 20:59:07 +02:00
Sebastiaan van Stijn
8fcc0d53cb Fix flaky TestExternalGraphDriver/pull test
This test occassionally fails on s390x and Power;

    03:16:04 --- FAIL: TestExternalGraphDriver/pull (1.08s)
    03:16:04 external_test.go:402: assertion failed: error is not nil: Error: No such image: busybox:latest

Most likely these failures are caused due to Docker Hub updating
the busybox:latest image, but not all architectures yet being
available.

Instead of using `:latest`, pull an image by digest, so that
the test doesn't depend on Docker Hub having all architectures
available for `:latest`.

I selected the same digest as is currently used as "frozen image"
in the Dockerfile.

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 352db26d5f)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-07-17 20:44:02 +02:00
Andrew Hsu
8308a17b6a Merge pull request #24 from thaJeztah/18.06-backport-fix-internal
[18.06] Fix flakyness in TestDockerNetworkInternalMode
2018-07-16 09:23:10 -07:00
Flavio Crisciani
b4c762831c Fix flakyness in TestDockerNetworkInternalMode
Instead of waiting for the DNS to fail, try to access
a specific external IP and verify that 100% of the pakcets
are being lost.

Signed-off-by: Flavio Crisciani <flavio.crisciani@docker.com>
(cherry picked from commit a2bb2144b3)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-07-15 21:42:00 +02:00
Sandeep Bansal
d025942c33 Fix error string in docker CLI test
Signed-off-by: Sandeep Bansal <sabansal@microsoft.com>
(cherry picked from commit 76ace9bb5e)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-07-12 13:52:32 +02:00
Andrew Hsu
371b590ace Merge pull request #20 from thaJeztah/18.06-backport-do_not_Healthcheck_RUN_command
[18.06] Ensure RUN instruction to run without Healthcheck
2018-07-11 17:47:16 -07:00
Yuichiro Kaneko
160be68bbd Ensure RUN instruction to run without Healthcheck
Before this commit Healthcheck run if HEALTHCHECK
instruction appears before RUN instruction.
By passing `withoutHealthcheck` to `copyRunConfig`,
always RUN instruction run without Healthcheck.

Fix: https://github.com/moby/moby/issues/37362

Signed-off-by: Yuichiro Kaneko <spiketeika@gmail.com>
(cherry picked from commit 44e08d8a7d)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-07-11 20:39:49 +02:00
Sebastiaan van Stijn
26055afbd4 Merge pull request #19 from thaJeztah/18.06-backport-bump_libnetwork
[18.06] Bump libnetwork to d00ceed44cc447c77f25cdf5d59e83163bdcb4c9
2018-07-11 19:55:11 +02:00
Sebastiaan van Stijn
f2dffe3b15 Merge pull request #18 from tiborvass/18.06-mountable-bugfix
[18.06] builder: fix duplicate calls to mountable
2018-07-11 19:54:44 +02:00
Sebastiaan van Stijn
d65a47f438 Bump libnetwork to d00ceed44cc447c77f25cdf5d59e83163bdcb4c9
The absence of the file /proc/sys/net/ipv6/conf/all/disable_ipv6
doesn't appear to affect functionality, at least at this time.

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit d58c4cbe6c)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-07-11 12:46:51 +02:00
Tonis Tiigi
622743a9b5 builder: fix duplicate calls to mountable
Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>
(cherry picked from commit ffa7233d15)
Signed-off-by: Tibor Vass <tibor@docker.com>
2018-07-11 02:34:47 +00:00
Andrew Hsu
4fc6e3ab7b Merge pull request #17 from thaJeztah/18.06-backport-bump_containerd_1.1.1
[18.06] Bump containerd daemon to v1.1.1
2018-07-10 15:25:14 -07:00
Andrew Hsu
6abf11d448 Merge pull request #15 from thaJeztah/18.06-backport-vendor-containerd
[18.06] vendor: update containerd to b41633746
2018-07-10 09:28:55 -07:00
Andrew Hsu
c863835e99 Merge pull request #16 from thaJeztah/18.06-backport-scalable-lb
[18.06] Improve scalability of the Linux load balancing
2018-07-10 09:26:50 -07:00
Brian Goff
df5aa018c6 Bump containerd daemon to v1.1.1
Signed-off-by: Brian Goff <cpuguy83@gmail.com>
(cherry picked from commit c083eb7595)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-07-10 01:50:02 +02:00
Andrew Hsu
22a77b0b92 Merge pull request #11 from thaJeztah/18.06-fix_bindmount_src_create_race
[18.06] Fix bindmount autocreate race
2018-07-09 14:53:50 -07:00
Chris Telfer
1c4232670a Bump libnetwork to 3ac297bc
Bump libnetwork to 3ac297bc7fd0afec9051bbb47024c9bc1d75bf5b in order to
get fix 0c3d9f00 which addresses a flaw that the scalable load balancing
code revealed.  Attempting to print sandbox IDs where the sandbox name
was too short results in a goroutine panic.  This can occur with
sandboxes with names of 1 or 2 characters in the previous code. But due
to naming updates in the scalable load balancing code, it could now
occur for networks whose name was 3 characters and at least one of the
integration tests employed such networks (named 'foo', 'bar' and 'baz').

This update also brings in several changes as well:
 * 6c7c6017 - Fix error handling about bridgeSetup
 * 5ed38221 - Optimize networkDB queue
 * cfa9afdb - ndots: produce error on negative numbers
 * 5586e226 - improve error message for invalid ndots number
 * 449672e5 - Allows to set generic knobs on the Sandbox
 * 6b4c4af7 - do not ignore user-provided "ndots:0" option
 * 843a0e42 - Adjust corner case for reconnect logic

Signed-off-by: Chris Telfer <ctelfer@docker.com>
(cherry picked from commit 0e162d9923)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-07-09 20:27:41 +02:00
Chris Telfer
cd3596eabc Update moby to use scalable-lb libnetwork APIs
This patch is required for the updated version of libnetwork and entails
two minor changes.

First, it uses the new libnetwork.NetworkDeleteOptionRemoveLB option to
the network.Delete() method to automatically remove the load balancing
endpoint for ingress networks.   This allows removal of the
deleteLoadBalancerSandbox() function whose functionality is now within
libnetwork.

The second change is to allocate a load balancer endpoint IP address for
all overlay networks rather than just "ingress" and windows overlay
networks.  Swarmkit is already performing this allocation, but moby was
not making use of these IP addresses for Linux overlay networks (except
ingress).  The current version of libnetwork makes use of these IP
addresses by creating a load balancing sandbox and endpoint similar to
ingress's  for all overlay network and putting all load balancing state
for a given node in that sandbox only.  This reduces the amount of linux
kernel state required per node.

In the prior scheme, libnetwork would program each container's network
namespace with every piece of load balancing state for every other
container that shared *any* network with the first container.  This
meant that the amount of kernel state on a given node scaled with the
square of the number of services in the cluster and with the square of
the number of containers per service.  With the new scheme, kernel state
at each node scales linearly with the number of services and the number
of containers per service.  This also reduces the number of system calls
required to add or remove tasks and containers.  Previously the number
of system calls required grew linearly with the number of other
tasks that shared a network with the container.  Now the number of
system calls grows linearly only with the number of networks that the
task/container is attached to.  This results in a significant
performance improvement when adding and removing services to a cluster
that already heavily loaded.

The primary disadvantage to this scheme is that it requires the
allocation of an additional IP address per node per subnet for every
node in the cluster that has a task on the given subnet.  However, as
mentioned, swarmkit is already allocating these IP addresses for every
node and they are going unused.  Future swarmkit modifications should be
examined to only allocate said IP addresses when nodes actually require
them.

Signed-off-by: Chris Telfer <ctelfer@docker.com>
(cherry picked from commit 8e0f6bc903)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-07-09 20:09:14 +02:00
Chris Telfer
0cd9442c75 bump libnetwork to b0186632
Bump libnetwork to b0186632522c68f4e1222c4f6d7dbe518882024f.   This
includes the following changes:
 * Dockerize protocol buffer generation and update (78d9390a..e12dd44c)
 * Use new plugin interfaces provided by plugin pkg (be94e134)
 * Improve linux load-balancing scalability (5111c24e..366b9110)

Signed-off-by: Chris Telfer <ctelfer@docker.com>
(cherry picked from commit 92335eaef1)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-07-09 20:08:39 +02:00
Tonis Tiigi
5e14dc7cb7 vendor: update containerd to b41633746
Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>
(cherry picked from commit f0e6158266)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-07-09 10:37:32 +02:00
Andrew Hsu
73ca638f39 Merge pull request #13 from thaJeztah/18.06-backport_bump-swarmkit
[18.06] Bump swarmkit to include task reaper fixes and more metrics.
2018-07-08 12:10:20 -07:00
Andrew Hsu
496e208a24 Merge pull request #14 from thaJeztah/18.06-backport-CVE-2018-10892
[18.06] Add /proc/acpi to masked paths
2018-07-06 16:50:13 -07:00
Antonio Murdaca
caf82772b7 Add /proc/acpi to masked paths
The deafult OCI linux spec in oci/defaults{_linux}.go in Docker/Moby
from 1.11 to current upstream master does not block /proc/acpi pathnames
allowing attackers to modify host's hardware like enabling/disabling
bluetooth or turning up/down keyboard brightness. SELinux prevents all
of this if enabled.

Signed-off-by: Antonio Murdaca <runcom@redhat.com>
(cherry picked from commit 569b9702a5)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-07-06 15:59:05 +02:00
Ying Li
ea4127cc0e Bump swarmkit to include task reaper fixes and more metrics.
This includes the following behavior-modifying PRs:

- docker/swarmkit#2673
- docker/swarmkit#2669
- docker/swarmkit#2675
- docker/swarmkit#2664

Signed-off-by: Ying Li <ying.li@docker.com>
(cherry picked from commit b322705750)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-07-06 00:43:16 +02:00
Brian Goff
8ade047f3c Fix bindmount autocreate race
When using the mounts API, bind mounts are not supposed to be
automatically created.

Before this patch there is a race condition between valiating that a
bind path exists and then actually setting up the bind mount where the
bind path may exist during validation but was removed during mountpooint
setup.

This adds a field to the mountpoint struct to ensure that binds created
over the mounts API are not accidentally created.

Signed-off-by: Brian Goff <cpuguy83@gmail.com>
(cherry picked from commit 1caeb79963)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-07-05 21:01:17 +02:00
Andrew Hsu
85b4dbd3db Merge pull request #4 from thaJeztah/18.06-backport-register-oci-mediatypes
[18.06] Register OCI image media types
2018-07-05 11:52:39 -07:00
Andrew Hsu
cba80832a6 Merge pull request #3 from thaJeztah/18.06-backport-update-windows-manifest-sorting
[18.06] LCOW: Prefer Windows over Linux in a manifest list
2018-07-05 11:52:18 -07:00
Andrew Hsu
0d029b0a42 Merge pull request #2 from thaJeztah/18.06-update_go_winio
[18.06] Update Microsoft/go-winio to 0.4.8
2018-07-05 10:21:43 -07:00
Andrew Hsu
c9bfc3c842 Merge pull request #10 from thaJeztah/18.06-update_buildkit
[18.06] vendor: update buildkit to 9acf51e491
2018-07-05 08:41:47 -07:00
Tibor Vass
12eee7ce0e Merge pull request #1 from thaJeztah/18.03-update-containerd-1.1.1-rc.2
[18.06] Update containerd to v1.1.1-rc.2
2018-07-05 08:00:22 -07:00
Andrew Hsu
394fdb711c Merge pull request #7 from tiborvass/18.06-cp
[18.06] cherry-pick set for rc2
2018-07-05 02:00:07 -07:00
Tonis Tiigi
c9757e3efc vendor: update buildkit to 9acf51e491
Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>
(cherry picked from commit 6144f50e55)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-07-04 16:34:34 +02:00
Tonis Tiigi
3500f3f27e builder: do not send duplicate status for completed jobs
Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>
(cherry picked from commit 6f7dd9428e)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-07-04 16:34:28 +02:00
Tibor Vass
f030a49747 api: Change Platform field back to string (temporary workaround)
This partially reverts https://github.com/moby/moby/pull/37350

Although specs.Platform is desirable in the API, there is more work
to be done on helper functions, namely containerd's platforms.Parse
that assumes the default platform of the Go runtime.

That prevents a client to use the recommended Parse function to
retrieve a specs.Platform object.

With this change, no parsing is expected from the client.

Signed-off-by: Tibor Vass <tibor@docker.com>
(cherry picked from commit facad55744)
Signed-off-by: Tibor Vass <tibor@docker.com>
2018-07-04 00:28:41 +00:00
Andrew Hsu
d29c1fa7d1 Merge pull request #5 from thaJeztah/18.06-bump-libnetwork
[18.06] bump libnetwork to 430c00a
2018-07-03 16:15:29 -07:00
Tibor Vass
26dd527fab builder: return image ID in API when using buildkit
Signed-off-by: Tibor Vass <tibor@docker.com>
(cherry picked from commit ca8022ec63)
Signed-off-by: Tibor Vass <tibor@docker.com>
2018-07-03 22:47:03 +00:00
Chris Telfer
cc1b68d5f2 Update tests w/ new libnetwork contraints
The TestDockerNetworkIPAMMultipleNetworks test allocates several
networks simultaneously with overlapping IP addresses.  Libnetwork now
forbids this.  Adjust the test case to use distinct IP ranges for the
networks it creates.

Signed-off-by: Chris Telfer <ctelfer@docker.com>
(cherry picked from commit efb7909bef)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-07-03 22:25:41 +02:00
Derek McGowan
126b5bce9d Register OCI image media types
OCI types are backwards compatible with Docker manifest
types, however the media types must be registered.

Signed-off-by: Derek McGowan <derek@mcgstyle.net>
(cherry picked from commit c4f0515837)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-07-03 22:25:07 +02:00
John Stephens
605cc35dc6 LCOW: Prefer Windows over Linux in a manifest list
When a manifest list contains both Linux and Windows images, always
prefer Windows when the platform OS is unspecified. Also, filter out any
Windows images with a higher build than the host, since they cannot run.

Signed-off-by: John Stephens <johnstep@docker.com>
(cherry picked from commit ddcdb7255d)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-07-03 22:24:27 +02:00
Sebastiaan van Stijn
d3a04c2092 Update Microsoft/go-winio to 0.4.8
Fixes named pipe support for hyper-v isolated containers

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 74095588ba)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-07-03 22:23:55 +02:00
Derek McGowan
bcfbeb8998 Update containerd to v1.1.1-rc.2
Signed-off-by: Derek McGowan <derek@mcgstyle.net>
(cherry picked from commit 735517928b)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-07-03 22:23:01 +02:00
Chris Telfer
a0c213f90c bump libnetwork to 430c00a
Bump libnetwork to 430c00a6a6b3dfdd774f21e1abd4ad6b0216c629.  This
includes the following moby-affecting changes:

 * Update vendoring for go-sockaddr (8df9f31a)
 * Fix inconsistent subnet allocation by preventing allocation of
   overlapping subnets (8579c5d2)
 * Handle IPv6 literals correctly in port bindings (474fcaf4)
 * Update vendoring for miekg/dns (8f307ac8)
 * Avoid subnet reallocation until required (9756ff7ed)
 * Bump libnetwork build to use go version 1.10.2 (603d2c1a)
 * Unwrap error type returned by PluginGetter (aacec8e1)
 * Update vendored components to match moby (d768021dd)
 * Add retry field to cluster-peers probe (dbbd06a7)
 * Fix net driver response loss on createEndpoint (1ab6e506)
   (fixes https://github.com/docker/for-linux/issues/348)

Signed-off-by: Chris Telfer <ctelfer@docker.com>
(cherry picked from commit f155f828a2)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-07-03 19:17:51 +02:00
10692 changed files with 705937 additions and 1931854 deletions

17
.DEREK.yml Normal file
View File

@@ -0,0 +1,17 @@
curators:
- aboch
- alexellis
- andrewhsu
- anonymuse
- chanwit
- ehazlett
- fntlnz
- gianarb
- mgoelzer
- programmerq
- rheinwein
- ripcurld0
- thajeztah
features:
- comments

View File

@@ -1,21 +0,0 @@
{
"name": "moby",
"build": {
"context": "..",
"dockerfile": "../Dockerfile",
"target": "devcontainer"
},
"workspaceFolder": "/go/src/github.com/docker/docker",
"workspaceMount": "source=${localWorkspaceFolder},target=/go/src/github.com/docker/docker,type=bind,consistency=cached",
"remoteUser": "root",
"runArgs": ["--privileged"],
"customizations": {
"vscode": {
"extensions": [
"golang.go"
]
}
}
}

View File

@@ -1,4 +1,7 @@
bundles
.gopath
vendor/pkg
.go-pkg-cache
.git
bundles/
cli/winresources/**/winres.json
cli/winresources/**/*.syso
hack/integration-cli-on-swarm/integration-cli-on-swarm

3
.gitattributes vendored
View File

@@ -1,3 +0,0 @@
Dockerfile* linguist-language=Dockerfile
vendor.mod linguist-language=Go-Module
vendor.sum linguist-language=Go-Checksums

9
.github/CODEOWNERS vendored
View File

@@ -4,10 +4,17 @@
# KEEP THIS FILE SORTED. Order is important. Last match takes precedence.
builder/** @tonistiigi
client/** @dnephin
contrib/mkimage/** @tianon
daemon/graphdriver/devmapper/** @rhvgoyal
daemon/graphdriver/lcow/** @johnstep @jhowardmsft
daemon/graphdriver/overlay/** @dmcgowan
daemon/graphdriver/overlay2/** @dmcgowan
daemon/graphdriver/windows/** @johnstep
daemon/graphdriver/windows/** @johnstep @jhowardmsft
daemon/logger/awslogs/** @samuelkarp
hack/** @tianon
hack/integration-cli-on-swarm/** @AkihiroSuda
integration-cli/** @vdemeester
integration/** @vdemeester
plugin/** @cpuguy83
project/** @thaJeztah

70
.github/ISSUE_TEMPLATE.md vendored Normal file
View File

@@ -0,0 +1,70 @@
<!--
If you are reporting a new issue, make sure that we do not have any duplicates
already open. You can ensure this by searching the issue list for this
repository. If there is a duplicate, please close your issue and add a comment
to the existing issue instead.
If you suspect your issue is a bug, please edit your issue description to
include the BUG REPORT INFORMATION shown below. If you fail to provide this
information within 7 days, we cannot debug your issue and will close it. We
will, however, reopen it if you later provide the information.
For more information about reporting issues, see
https://github.com/moby/moby/blob/master/CONTRIBUTING.md#reporting-other-issues
---------------------------------------------------
GENERAL SUPPORT INFORMATION
---------------------------------------------------
The GitHub issue tracker is for bug reports and feature requests.
General support for **docker** can be found at the following locations:
- Docker Support Forums - https://forums.docker.com
- Slack - community.docker.com #general channel
- Post a question on StackOverflow, using the Docker tag
General support for **moby** can be found at the following locations:
- Moby Project Forums - https://forums.mobyproject.org
- Slack - community.docker.com #moby-project channel
- Post a question on StackOverflow, using the Moby tag
---------------------------------------------------
BUG REPORT INFORMATION
---------------------------------------------------
Use the commands below to provide key information from your environment:
You do NOT have to include this information if this is a FEATURE REQUEST
-->
**Description**
<!--
Briefly describe the problem you are having in a few paragraphs.
-->
**Steps to reproduce the issue:**
1.
2.
3.
**Describe the results you received:**
**Describe the results you expected:**
**Additional information you deem important (e.g. issue happens only occasionally):**
**Output of `docker version`:**
```
(paste your output here)
```
**Output of `docker info`:**
```
(paste your output here)
```
**Additional environment details (AWS, VirtualBox, physical, etc.):**

View File

@@ -1,146 +0,0 @@
name: Bug report
description: Create a report to help us improve
labels:
- kind/bug
- status/0-triage
body:
- type: markdown
attributes:
value: |
Thank you for taking the time to report a bug!
If this is a security issue please report it to the [Docker Security team](mailto:security@docker.com).
- type: textarea
id: description
attributes:
label: Description
description: Please give a clear and concise description of the bug
validations:
required: true
- type: textarea
id: repro
attributes:
label: Reproduce
description: Steps to reproduce the bug
placeholder: |
1. docker run ...
2. docker kill ...
3. docker rm ...
validations:
required: true
- type: textarea
id: expected
attributes:
label: Expected behavior
description: What is the expected behavior?
placeholder: |
E.g. "`docker rm` should remove the container and cleanup all associated data"
- type: textarea
id: version
attributes:
label: docker version
description: Output of `docker version`
render: bash
placeholder: |
Client:
Version: 20.10.17
API version: 1.41
Go version: go1.17.11
Git commit: 100c70180fde3601def79a59cc3e996aa553c9b9
Built: Mon Jun 6 21:36:39 UTC 2022
OS/Arch: linux/amd64
Context: default
Experimental: true
Server:
Engine:
Version: 20.10.17
API version: 1.41 (minimum version 1.12)
Go version: go1.17.11
Git commit: a89b84221c8560e7a3dee2a653353429e7628424
Built: Mon Jun 6 22:32:38 2022
OS/Arch: linux/amd64
Experimental: true
containerd:
Version: 1.6.6
GitCommit: 10c12954828e7c7c9b6e0ea9b0c02b01407d3ae1
runc:
Version: 1.1.2
GitCommit: a916309fff0f838eb94e928713dbc3c0d0ac7aa4
docker-init:
Version: 0.19.0
GitCommit:
validations:
required: true
- type: textarea
id: info
attributes:
label: docker info
description: Output of `docker info`
render: bash
placeholder: |
Client:
Context: default
Debug Mode: false
Plugins:
buildx: Docker Buildx (Docker Inc., 0.8.2)
compose: Docker Compose (Docker Inc., 2.6.0)
Server:
Containers: 4
Running: 2
Paused: 0
Stopped: 2
Images: 80
Server Version: 20.10.17
Storage Driver: overlay2
Backing Filesystem: xfs
Supports d_type: true
Native Overlay Diff: false
userxattr: false
Logging Driver: local
Cgroup Driver: cgroupfs
Cgroup Version: 1
Plugins:
Volume: local
Network: bridge host ipvlan macvlan null overlay
Log: awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslog
Swarm: inactive
Runtimes: runc io.containerd.runc.v2 io.containerd.runtime.v1.linux
Default Runtime: runc
Init Binary: docker-init
containerd version: 10c12954828e7c7c9b6e0ea9b0c02b01407d3ae1
runc version: a916309fff0f838eb94e928713dbc3c0d0ac7aa4
init version:
Security Options:
apparmor
seccomp
Profile: default
Kernel Version: 5.13.0-1031-azure
Operating System: Ubuntu 20.04.4 LTS
OSType: linux
Architecture: x86_64
CPUs: 4
Total Memory: 15.63GiB
Name: dev
ID: UC44:2RFL:7NQ5:GGFW:34O5:DYRE:CLOH:VLGZ:64AZ:GFXC:PY6H:SAHY
Docker Root Dir: /var/lib/docker
Debug Mode: true
File Descriptors: 46
Goroutines: 134
System Time: 2022-07-06T18:07:54.812439392Z
EventsListeners: 0
Registry: https://index.docker.io/v1/
Labels:
Experimental: true
Insecure Registries:
127.0.0.0/8
Live Restore Enabled: true
validations:
required: true
- type: textarea
id: additional
attributes:
label: Additional Info
description: Additional info you want to provide such as logs, system info, environment, etc.
validations:
required: false

View File

@@ -1,8 +0,0 @@
blank_issues_enabled: false
contact_links:
- name: Security and Vulnerabilities
url: https://github.com/moby/moby/blob/master/SECURITY.md
about: Please report any security issues or vulnerabilities responsibly to the Docker security team. Please do not use the public issue tracker.
- name: Questions and Discussions
url: https://github.com/moby/moby/discussions/new
about: Use Github Discussions to ask questions and/or open discussion topics.

View File

@@ -1,13 +0,0 @@
name: Feature request
description: Missing functionality? Come tell us about it!
labels:
- kind/feature
- status/0-triage
body:
- type: textarea
id: description
attributes:
label: Description
description: What is the feature you want to see?
validations:
required: true

View File

@@ -19,18 +19,12 @@ Please provide the following information:
**- How to verify it**
**- Human readable description for the release notes**
**- Description for the changelog**
<!--
Write a short (one line) summary that describes the changes in this
pull request for inclusion in the changelog.
It must be placed inside the below triple backticks section.
NOTE: Only fill this section if changes introduced in this PR are user-facing.
The PR must have a relevant impact/ label.
pull request for inclusion in the changelog:
-->
```markdown changelog
```
**- A picture of a cute animal (not mandatory but encouraged)**

View File

@@ -1,27 +0,0 @@
name: 'Setup Runner'
description: 'Composite action to set up the GitHub Runner for jobs in the test.yml workflow'
runs:
using: composite
steps:
- run: |
sudo modprobe ip_vs
sudo modprobe ipv6
sudo modprobe ip6table_filter
sudo modprobe -r overlay
sudo modprobe overlay redirect_dir=off
shell: bash
- run: |
if [ ! -e /etc/docker/daemon.json ]; then
echo '{}' | sudo tee /etc/docker/daemon.json >/dev/null
fi
DOCKERD_CONFIG=$(jq '.+{"experimental":true,"live-restore":true,"ipv6":true,"fixed-cidr-v6":"2001:db8:1::/64"}' /etc/docker/daemon.json)
sudo tee /etc/docker/daemon.json <<<"$DOCKERD_CONFIG" >/dev/null
sudo service docker restart
shell: bash
- run: |
./contrib/check-config.sh || true
shell: bash
- run: |
docker info
shell: bash

View File

@@ -1,14 +0,0 @@
name: 'Setup Tracing'
description: 'Composite action to set up the tracing for test jobs'
runs:
using: composite
steps:
- run: |
set -e
# Jaeger is set up on Windows through an inline run step. If you update Jaeger here, don't forget to update
# the version set in .github/workflows/.windows.yml.
docker run -d --net=host --name jaeger -e COLLECTOR_OTLP_ENABLED=true jaegertracing/all-in-one:1.46
docker0_ip="$(ip -f inet addr show docker0 | grep -Po 'inet \K[\d.]+')"
echo "OTEL_EXPORTER_OTLP_ENDPOINT=http://${docker0_ip}:4318" >> "${GITHUB_ENV}"
shell: bash

View File

@@ -1,60 +0,0 @@
# reusable workflow
name: .dco
# TODO: hide reusable workflow from the UI. Tracked in https://github.com/community/community/discussions/12025
# Default to 'contents: read', which grants actions to read commits.
#
# If any permission is set, any permission not included in the list is
# implicitly set to "none".
#
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
permissions:
contents: read
on:
workflow_call:
env:
ALPINE_VERSION: "3.20"
jobs:
run:
runs-on: ubuntu-20.04
timeout-minutes: 10 # guardrails timeout for the whole job
steps:
-
name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
-
name: Dump context
uses: actions/github-script@v7
with:
script: |
console.log(JSON.stringify(context, null, 2));
-
name: Get base ref
id: base-ref
uses: actions/github-script@v7
with:
result-encoding: string
script: |
if (/^refs\/pull\//.test(context.ref) && context.payload?.pull_request?.base?.ref != undefined) {
return context.payload.pull_request.base.ref;
}
return context.ref.replace(/^refs\/heads\//g, '');
-
name: Validate
run: |
docker run --rm \
--quiet \
-v ./:/workspace \
-w /workspace \
-e VALIDATE_REPO \
-e VALIDATE_BRANCH \
alpine:${{ env.ALPINE_VERSION }} sh -c 'apk add --no-cache -q bash git openssh-client && git config --system --add safe.directory /workspace && hack/validate/dco'
env:
VALIDATE_REPO: ${{ github.server_url }}/${{ github.repository }}.git
VALIDATE_BRANCH: ${{ steps.base-ref.outputs.result }}

View File

@@ -1,45 +0,0 @@
# reusable workflow
name: .test-prepare
# TODO: hide reusable workflow from the UI. Tracked in https://github.com/community/community/discussions/12025
# Default to 'contents: read', which grants actions to read commits.
#
# If any permission is set, any permission not included in the list is
# implicitly set to "none".
#
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
permissions:
contents: read
on:
workflow_call:
outputs:
matrix:
description: Test matrix
value: ${{ jobs.run.outputs.matrix }}
jobs:
run:
runs-on: ubuntu-20.04
timeout-minutes: 120 # guardrails timeout for the whole job
outputs:
matrix: ${{ steps.set.outputs.matrix }}
steps:
-
name: Checkout
uses: actions/checkout@v4
-
name: Create matrix
id: set
uses: actions/github-script@v7
with:
script: |
let matrix = ['graphdriver'];
if ("${{ contains(github.event.pull_request.labels.*.name, 'containerd-integration') || github.event_name != 'pull_request' }}" == "true") {
matrix.push('snapshotter');
}
await core.group(`Set matrix`, async () => {
core.info(`matrix: ${JSON.stringify(matrix)}`);
core.setOutput('matrix', JSON.stringify(matrix));
});

View File

@@ -1,477 +0,0 @@
# reusable workflow
name: .test
# TODO: hide reusable workflow from the UI. Tracked in https://github.com/community/community/discussions/12025
# Default to 'contents: read', which grants actions to read commits.
#
# If any permission is set, any permission not included in the list is
# implicitly set to "none".
#
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
permissions:
contents: read
on:
workflow_call:
inputs:
storage:
required: true
type: string
default: "graphdriver"
env:
GO_VERSION: "1.22.12"
GOTESTLIST_VERSION: v0.3.1
TESTSTAT_VERSION: v0.1.25
ITG_CLI_MATRIX_SIZE: 6
DOCKER_EXPERIMENTAL: 1
DOCKER_GRAPHDRIVER: ${{ inputs.storage == 'snapshotter' && 'overlayfs' || 'overlay2' }}
TEST_INTEGRATION_USE_SNAPSHOTTER: ${{ inputs.storage == 'snapshotter' && '1' || '' }}
SETUP_BUILDX_VERSION: latest
SETUP_BUILDKIT_IMAGE: moby/buildkit:latest
jobs:
unit:
runs-on: ubuntu-20.04
timeout-minutes: 120 # guardrails timeout for the whole job
continue-on-error: ${{ github.event_name != 'pull_request' }}
steps:
-
name: Checkout
uses: actions/checkout@v4
-
name: Set up runner
uses: ./.github/actions/setup-runner
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
version: ${{ env.SETUP_BUILDX_VERSION }}
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
buildkitd-flags: --debug
-
name: Build dev image
uses: docker/bake-action@v6
with:
targets: dev
set: |
dev.cache-from=type=gha,scope=dev
-
name: Test
run: |
make -o build test-unit
-
name: Prepare reports
if: always()
run: |
mkdir -p bundles /tmp/reports
find bundles -path '*/root/*overlay2' -prune -o -type f \( -name '*-report.json' -o -name '*.log' -o -name '*.out' -o -name '*.prof' -o -name '*-report.xml' \) -print | xargs sudo tar -czf /tmp/reports.tar.gz
tar -xzf /tmp/reports.tar.gz -C /tmp/reports
sudo chown -R $(id -u):$(id -g) /tmp/reports
tree -nh /tmp/reports
-
name: Send to Codecov
uses: codecov/codecov-action@v4
with:
directory: ./bundles
env_vars: RUNNER_OS
flags: unit
token: ${{ secrets.CODECOV_TOKEN }} # used to upload coverage reports: https://github.com/moby/buildkit/pull/4660#issue-2142122533
-
name: Upload reports
if: always()
uses: actions/upload-artifact@v4
with:
name: test-reports-unit-${{ inputs.storage }}
path: /tmp/reports/*
retention-days: 1
unit-report:
runs-on: ubuntu-20.04
timeout-minutes: 10
continue-on-error: ${{ github.event_name != 'pull_request' }}
if: always()
needs:
- unit
steps:
-
name: Set up Go
uses: actions/setup-go@v5
with:
go-version: ${{ env.GO_VERSION }}
-
name: Download reports
uses: actions/download-artifact@v4
with:
name: test-reports-unit-${{ inputs.storage }}
path: /tmp/reports
-
name: Install teststat
run: |
go install github.com/vearutop/teststat@${{ env.TESTSTAT_VERSION }}
-
name: Create summary
run: |
find /tmp/reports -type f -name '*-go-test-report.json' -exec teststat -markdown {} \+ >> $GITHUB_STEP_SUMMARY
docker-py:
runs-on: ubuntu-20.04
timeout-minutes: 120 # guardrails timeout for the whole job
continue-on-error: ${{ github.event_name != 'pull_request' }}
steps:
-
name: Checkout
uses: actions/checkout@v4
-
name: Set up runner
uses: ./.github/actions/setup-runner
-
name: Set up tracing
uses: ./.github/actions/setup-tracing
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
version: ${{ env.SETUP_BUILDX_VERSION }}
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
buildkitd-flags: --debug
-
name: Build dev image
uses: docker/bake-action@v6
with:
targets: dev
set: |
dev.cache-from=type=gha,scope=dev
-
name: Test
run: |
make -o build test-docker-py
-
name: Prepare reports
if: always()
run: |
mkdir -p bundles /tmp/reports
find bundles -path '*/root/*overlay2' -prune -o -type f \( -name '*-report.json' -o -name '*.log' -o -name '*.out' -o -name '*.prof' -o -name '*-report.xml' \) -print | xargs sudo tar -czf /tmp/reports.tar.gz
tar -xzf /tmp/reports.tar.gz -C /tmp/reports
sudo chown -R $(id -u):$(id -g) /tmp/reports
tree -nh /tmp/reports
curl -sSLf localhost:16686/api/traces?service=integration-test-client > /tmp/reports/jaeger-trace.json
-
name: Test daemon logs
if: always()
run: |
cat bundles/test-docker-py/docker.log
-
name: Upload reports
if: always()
uses: actions/upload-artifact@v4
with:
name: test-reports-docker-py-${{ inputs.storage }}
path: /tmp/reports/*
retention-days: 1
integration-flaky:
runs-on: ubuntu-20.04
timeout-minutes: 120 # guardrails timeout for the whole job
continue-on-error: ${{ github.event_name != 'pull_request' }}
steps:
-
name: Checkout
uses: actions/checkout@v4
-
name: Set up runner
uses: ./.github/actions/setup-runner
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
version: ${{ env.SETUP_BUILDX_VERSION }}
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
buildkitd-flags: --debug
-
name: Build dev image
uses: docker/bake-action@v6
with:
targets: dev
set: |
dev.cache-from=type=gha,scope=dev
-
name: Test
run: |
make -o build test-integration-flaky
env:
TEST_SKIP_INTEGRATION_CLI: 1
integration:
runs-on: ${{ matrix.os }}
timeout-minutes: 120 # guardrails timeout for the whole job
continue-on-error: ${{ github.event_name != 'pull_request' }}
strategy:
fail-fast: false
matrix:
os:
- ubuntu-20.04
- ubuntu-22.04
mode:
- ""
- rootless
- systemd
#- rootless-systemd FIXME: https://github.com/moby/moby/issues/44084
steps:
-
name: Checkout
uses: actions/checkout@v4
-
name: Set up runner
uses: ./.github/actions/setup-runner
-
name: Set up tracing
uses: ./.github/actions/setup-tracing
-
name: Prepare
run: |
CACHE_DEV_SCOPE=dev
if [[ "${{ matrix.mode }}" == *"rootless"* ]]; then
echo "DOCKER_ROOTLESS=1" >> $GITHUB_ENV
fi
if [[ "${{ matrix.mode }}" == *"systemd"* ]]; then
echo "SYSTEMD=true" >> $GITHUB_ENV
CACHE_DEV_SCOPE="${CACHE_DEV_SCOPE}systemd"
fi
echo "CACHE_DEV_SCOPE=${CACHE_DEV_SCOPE}" >> $GITHUB_ENV
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
version: ${{ env.SETUP_BUILDX_VERSION }}
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
buildkitd-flags: --debug
-
name: Build dev image
uses: docker/bake-action@v6
with:
targets: dev
set: |
dev.cache-from=type=gha,scope=${{ env.CACHE_DEV_SCOPE }}
-
name: Test
run: |
make -o build test-integration
env:
TEST_SKIP_INTEGRATION_CLI: 1
TESTCOVERAGE: 1
-
name: Prepare reports
if: always()
run: |
reportsName=${{ matrix.os }}
if [ -n "${{ matrix.mode }}" ]; then
reportsName="$reportsName-${{ matrix.mode }}"
fi
reportsPath="/tmp/reports/$reportsName"
echo "TESTREPORTS_NAME=$reportsName" >> $GITHUB_ENV
mkdir -p bundles $reportsPath
find bundles -path '*/root/*overlay2' -prune -o -type f \( -name '*-report.json' -o -name '*.log' -o -name '*.out' -o -name '*.prof' -o -name '*-report.xml' \) -print | xargs sudo tar -czf /tmp/reports.tar.gz
tar -xzf /tmp/reports.tar.gz -C $reportsPath
sudo chown -R $(id -u):$(id -g) $reportsPath
tree -nh $reportsPath
curl -sSLf localhost:16686/api/traces?service=integration-test-client > $reportsPath/jaeger-trace.json
-
name: Send to Codecov
uses: codecov/codecov-action@v4
with:
directory: ./bundles/test-integration
env_vars: RUNNER_OS
flags: integration,${{ matrix.mode }}
token: ${{ secrets.CODECOV_TOKEN }} # used to upload coverage reports: https://github.com/moby/buildkit/pull/4660#issue-2142122533
-
name: Test daemon logs
if: always()
run: |
cat bundles/test-integration/docker.log
-
name: Upload reports
if: always()
uses: actions/upload-artifact@v4
with:
name: test-reports-integration-${{ inputs.storage }}-${{ env.TESTREPORTS_NAME }}
path: /tmp/reports/*
retention-days: 1
integration-report:
runs-on: ubuntu-20.04
timeout-minutes: 10
continue-on-error: ${{ github.event_name != 'pull_request' }}
if: always()
needs:
- integration
steps:
-
name: Set up Go
uses: actions/setup-go@v5
with:
go-version: ${{ env.GO_VERSION }}
-
name: Download reports
uses: actions/download-artifact@v4
with:
path: /tmp/reports
pattern: test-reports-integration-${{ inputs.storage }}-*
merge-multiple: true
-
name: Install teststat
run: |
go install github.com/vearutop/teststat@${{ env.TESTSTAT_VERSION }}
-
name: Create summary
run: |
find /tmp/reports -type f -name '*-go-test-report.json' -exec teststat -markdown {} \+ >> $GITHUB_STEP_SUMMARY
integration-cli-prepare:
runs-on: ubuntu-20.04
timeout-minutes: 120 # guardrails timeout for the whole job
continue-on-error: ${{ github.event_name != 'pull_request' }}
outputs:
matrix: ${{ steps.tests.outputs.matrix }}
steps:
-
name: Checkout
uses: actions/checkout@v4
-
name: Set up Go
uses: actions/setup-go@v5
with:
go-version: ${{ env.GO_VERSION }}
-
name: Install gotestlist
run:
go install github.com/crazy-max/gotestlist/cmd/gotestlist@${{ env.GOTESTLIST_VERSION }}
-
name: Create matrix
id: tests
working-directory: ./integration-cli
run: |
# This step creates a matrix for integration-cli tests. Tests suites
# are distributed in integration-cli job through a matrix. There is
# also overrides being added to the matrix like "./..." to run
# "Test integration" step exclusively and specific tests suites that
# take a long time to run.
matrix="$(gotestlist -d ${{ env.ITG_CLI_MATRIX_SIZE }} -o "./..." -o "DockerSwarmSuite" -o "DockerNetworkSuite|DockerExternalVolumeSuite" ./...)"
echo "matrix=$matrix" >> $GITHUB_OUTPUT
-
name: Show matrix
run: |
echo ${{ steps.tests.outputs.matrix }}
integration-cli:
runs-on: ubuntu-20.04
timeout-minutes: 120 # guardrails timeout for the whole job
continue-on-error: ${{ github.event_name != 'pull_request' }}
needs:
- integration-cli-prepare
strategy:
fail-fast: false
matrix:
test: ${{ fromJson(needs.integration-cli-prepare.outputs.matrix) }}
steps:
-
name: Checkout
uses: actions/checkout@v4
-
name: Set up runner
uses: ./.github/actions/setup-runner
-
name: Set up tracing
uses: ./.github/actions/setup-tracing
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
version: ${{ env.SETUP_BUILDX_VERSION }}
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
buildkitd-flags: --debug
-
name: Build dev image
uses: docker/bake-action@v6
with:
targets: dev
set: |
dev.cache-from=type=gha,scope=dev
-
name: Test
run: |
make -o build test-integration
env:
TEST_SKIP_INTEGRATION: 1
TESTCOVERAGE: 1
TESTFLAGS: "-test.run (${{ matrix.test }})/"
-
name: Prepare reports
if: always()
run: |
reportsName=$(echo -n "${{ matrix.test }}" | sha256sum | cut -d " " -f 1)
reportsPath=/tmp/reports/$reportsName
echo "TESTREPORTS_NAME=$reportsName" >> $GITHUB_ENV
mkdir -p bundles $reportsPath
echo "${{ matrix.test }}" | tr -s '|' '\n' | tee -a "$reportsPath/tests.txt"
find bundles -path '*/root/*overlay2' -prune -o -type f \( -name '*-report.json' -o -name '*.log' -o -name '*.out' -o -name '*.prof' -o -name '*-report.xml' \) -print | xargs sudo tar -czf /tmp/reports.tar.gz
tar -xzf /tmp/reports.tar.gz -C $reportsPath
sudo chown -R $(id -u):$(id -g) $reportsPath
tree -nh $reportsPath
curl -sSLf localhost:16686/api/traces?service=integration-test-client > $reportsPath/jaeger-trace.json
-
name: Send to Codecov
uses: codecov/codecov-action@v4
with:
directory: ./bundles/test-integration
env_vars: RUNNER_OS
flags: integration-cli
token: ${{ secrets.CODECOV_TOKEN }} # used to upload coverage reports: https://github.com/moby/buildkit/pull/4660#issue-2142122533
-
name: Test daemon logs
if: always()
run: |
cat bundles/test-integration/docker.log
-
name: Upload reports
if: always()
uses: actions/upload-artifact@v4
with:
name: test-reports-integration-cli-${{ inputs.storage }}-${{ env.TESTREPORTS_NAME }}
path: /tmp/reports/*
retention-days: 1
integration-cli-report:
runs-on: ubuntu-20.04
timeout-minutes: 10
continue-on-error: ${{ github.event_name != 'pull_request' }}
if: always()
needs:
- integration-cli
steps:
-
name: Set up Go
uses: actions/setup-go@v5
with:
go-version: ${{ env.GO_VERSION }}
-
name: Download reports
uses: actions/download-artifact@v4
with:
path: /tmp/reports
pattern: test-reports-integration-cli-${{ inputs.storage }}-*
merge-multiple: true
-
name: Install teststat
run: |
go install github.com/vearutop/teststat@${{ env.TESTSTAT_VERSION }}
-
name: Create summary
run: |
find /tmp/reports -type f -name '*-go-test-report.json' -exec teststat -markdown {} \+ >> $GITHUB_STEP_SUMMARY

View File

@@ -1,565 +0,0 @@
# reusable workflow
name: .windows
# TODO: hide reusable workflow from the UI. Tracked in https://github.com/community/community/discussions/12025
# Default to 'contents: read', which grants actions to read commits.
#
# If any permission is set, any permission not included in the list is
# implicitly set to "none".
#
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
permissions:
contents: read
on:
workflow_call:
inputs:
os:
required: true
type: string
storage:
required: true
type: string
default: "graphdriver"
send_coverage:
required: false
type: boolean
default: false
env:
GO_VERSION: "1.22.12"
GOTESTLIST_VERSION: v0.3.1
TESTSTAT_VERSION: v0.1.25
WINDOWS_BASE_IMAGE: mcr.microsoft.com/windows/servercore
WINDOWS_BASE_TAG_2019: ltsc2019
WINDOWS_BASE_TAG_2022: ltsc2022
TEST_IMAGE_NAME: moby:test
TEST_CTN_NAME: moby
DOCKER_BUILDKIT: 0
ITG_CLI_MATRIX_SIZE: 6
jobs:
build:
runs-on: ${{ inputs.os }}
timeout-minutes: 120 # guardrails timeout for the whole job
env:
GOPATH: ${{ github.workspace }}\go
GOBIN: ${{ github.workspace }}\go\bin
BIN_OUT: ${{ github.workspace }}\out
defaults:
run:
working-directory: ${{ env.GOPATH }}/src/github.com/docker/docker
steps:
-
name: Checkout
uses: actions/checkout@v4
with:
path: ${{ env.GOPATH }}/src/github.com/docker/docker
-
name: Env
run: |
Get-ChildItem Env: | Out-String
-
name: Init
run: |
New-Item -ItemType "directory" -Path "${{ github.workspace }}\go-build"
New-Item -ItemType "directory" -Path "${{ github.workspace }}\go\pkg\mod"
If ("${{ inputs.os }}" -eq "windows-2019") {
echo "WINDOWS_BASE_IMAGE_TAG=${{ env.WINDOWS_BASE_TAG_2019 }}" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf-8 -Append
} ElseIf ("${{ inputs.os }}" -eq "windows-2022") {
echo "WINDOWS_BASE_IMAGE_TAG=${{ env.WINDOWS_BASE_TAG_2022 }}" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf-8 -Append
}
-
name: Cache
uses: actions/cache@v4
with:
path: |
~\AppData\Local\go-build
~\go\pkg\mod
${{ github.workspace }}\go-build
${{ env.GOPATH }}\pkg\mod
key: ${{ inputs.os }}-${{ github.job }}-${{ hashFiles('**/vendor.sum') }}
restore-keys: |
${{ inputs.os }}-${{ github.job }}-
-
name: Docker info
run: |
docker info
-
name: Build base image
run: |
& docker build `
--build-arg WINDOWS_BASE_IMAGE `
--build-arg WINDOWS_BASE_IMAGE_TAG `
--build-arg GO_VERSION `
-t ${{ env.TEST_IMAGE_NAME }} `
-f Dockerfile.windows .
-
name: Build binaries
run: |
& docker run --name ${{ env.TEST_CTN_NAME }} -e "DOCKER_GITCOMMIT=${{ github.sha }}" `
-v "${{ github.workspace }}\go-build:C:\Users\ContainerAdministrator\AppData\Local\go-build" `
-v "${{ github.workspace }}\go\pkg\mod:C:\gopath\pkg\mod" `
${{ env.TEST_IMAGE_NAME }} hack\make.ps1 -Daemon -Client
-
name: Copy artifacts
run: |
New-Item -ItemType "directory" -Path "${{ env.BIN_OUT }}"
docker cp "${{ env.TEST_CTN_NAME }}`:c`:\gopath\src\github.com\docker\docker\bundles\docker.exe" ${{ env.BIN_OUT }}\
docker cp "${{ env.TEST_CTN_NAME }}`:c`:\gopath\src\github.com\docker\docker\bundles\dockerd.exe" ${{ env.BIN_OUT }}\
docker cp "${{ env.TEST_CTN_NAME }}`:c`:\gopath\bin\gotestsum.exe" ${{ env.BIN_OUT }}\
docker cp "${{ env.TEST_CTN_NAME }}`:c`:\containerd\bin\containerd.exe" ${{ env.BIN_OUT }}\
docker cp "${{ env.TEST_CTN_NAME }}`:c`:\containerd\bin\containerd-shim-runhcs-v1.exe" ${{ env.BIN_OUT }}\
-
name: Upload artifacts
uses: actions/upload-artifact@v4
with:
name: build-${{ inputs.storage }}-${{ inputs.os }}
path: ${{ env.BIN_OUT }}/*
if-no-files-found: error
retention-days: 2
unit-test:
runs-on: ${{ inputs.os }}
timeout-minutes: 120 # guardrails timeout for the whole job
env:
GOPATH: ${{ github.workspace }}\go
GOBIN: ${{ github.workspace }}\go\bin
defaults:
run:
working-directory: ${{ env.GOPATH }}/src/github.com/docker/docker
steps:
-
name: Checkout
uses: actions/checkout@v4
with:
path: ${{ env.GOPATH }}/src/github.com/docker/docker
-
name: Env
run: |
Get-ChildItem Env: | Out-String
-
name: Init
run: |
New-Item -ItemType "directory" -Path "${{ github.workspace }}\go-build"
New-Item -ItemType "directory" -Path "${{ github.workspace }}\go\pkg\mod"
New-Item -ItemType "directory" -Path "bundles"
If ("${{ inputs.os }}" -eq "windows-2019") {
echo "WINDOWS_BASE_IMAGE_TAG=${{ env.WINDOWS_BASE_TAG_2019 }}" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf-8 -Append
} ElseIf ("${{ inputs.os }}" -eq "windows-2022") {
echo "WINDOWS_BASE_IMAGE_TAG=${{ env.WINDOWS_BASE_TAG_2022 }}" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf-8 -Append
}
-
name: Cache
uses: actions/cache@v4
with:
path: |
~\AppData\Local\go-build
~\go\pkg\mod
${{ github.workspace }}\go-build
${{ env.GOPATH }}\pkg\mod
key: ${{ inputs.os }}-${{ github.job }}-${{ hashFiles('**/vendor.sum') }}
restore-keys: |
${{ inputs.os }}-${{ github.job }}-
-
name: Docker info
run: |
docker info
-
name: Build base image
run: |
& docker build `
--build-arg WINDOWS_BASE_IMAGE `
--build-arg WINDOWS_BASE_IMAGE_TAG `
--build-arg GO_VERSION `
-t ${{ env.TEST_IMAGE_NAME }} `
-f Dockerfile.windows .
-
name: Test
run: |
& docker run --name ${{ env.TEST_CTN_NAME }} -e "DOCKER_GITCOMMIT=${{ github.sha }}" `
-v "${{ github.workspace }}\go-build:C:\Users\ContainerAdministrator\AppData\Local\go-build" `
-v "${{ github.workspace }}\go\pkg\mod:C:\gopath\pkg\mod" `
-v "${{ env.GOPATH }}\src\github.com\docker\docker\bundles:C:\gopath\src\github.com\docker\docker\bundles" `
${{ env.TEST_IMAGE_NAME }} hack\make.ps1 -TestUnit
-
name: Send to Codecov
if: inputs.send_coverage
uses: codecov/codecov-action@v4
with:
working-directory: ${{ env.GOPATH }}\src\github.com\docker\docker
directory: bundles
env_vars: RUNNER_OS
flags: unit
token: ${{ secrets.CODECOV_TOKEN }} # used to upload coverage reports: https://github.com/moby/buildkit/pull/4660#issue-2142122533
-
name: Upload reports
if: always()
uses: actions/upload-artifact@v4
with:
name: ${{ inputs.os }}-${{ inputs.storage }}-unit-reports
path: ${{ env.GOPATH }}\src\github.com\docker\docker\bundles\*
retention-days: 1
unit-test-report:
runs-on: ubuntu-24.04
timeout-minutes: 120 # guardrails timeout for the whole job
if: always()
needs:
- unit-test
steps:
-
name: Set up Go
uses: actions/setup-go@v5
with:
go-version: ${{ env.GO_VERSION }}
-
name: Download artifacts
uses: actions/download-artifact@v4
with:
name: ${{ inputs.os }}-${{ inputs.storage }}-unit-reports
path: /tmp/artifacts
-
name: Install teststat
run: |
go install github.com/vearutop/teststat@${{ env.TESTSTAT_VERSION }}
-
name: Create summary
run: |
find /tmp/artifacts -type f -name '*-go-test-report.json' -exec teststat -markdown {} \+ >> $GITHUB_STEP_SUMMARY
integration-test-prepare:
runs-on: ubuntu-24.04
timeout-minutes: 120 # guardrails timeout for the whole job
outputs:
matrix: ${{ steps.tests.outputs.matrix }}
steps:
-
name: Checkout
uses: actions/checkout@v4
-
name: Set up Go
uses: actions/setup-go@v5
with:
go-version: ${{ env.GO_VERSION }}
-
name: Install gotestlist
run:
go install github.com/crazy-max/gotestlist/cmd/gotestlist@${{ env.GOTESTLIST_VERSION }}
-
name: Create matrix
id: tests
working-directory: ./integration-cli
run: |
# This step creates a matrix for integration-cli tests. Tests suites
# are distributed in integration-test job through a matrix. There is
# also an override being added to the matrix like "./..." to run
# "Test integration" step exclusively.
matrix="$(gotestlist -d ${{ env.ITG_CLI_MATRIX_SIZE }} -o "./..." ./...)"
echo "matrix=$matrix" >> $GITHUB_OUTPUT
-
name: Show matrix
run: |
echo ${{ steps.tests.outputs.matrix }}
integration-test:
runs-on: ${{ inputs.os }}
timeout-minutes: 120 # guardrails timeout for the whole job
continue-on-error: ${{ inputs.storage == 'snapshotter' && github.event_name != 'pull_request' }}
needs:
- build
- integration-test-prepare
strategy:
fail-fast: false
matrix:
storage:
- ${{ inputs.storage }}
runtime:
- builtin
- containerd
test: ${{ fromJson(needs.integration-test-prepare.outputs.matrix) }}
exclude:
- storage: snapshotter
runtime: builtin
env:
GOPATH: ${{ github.workspace }}\go
GOBIN: ${{ github.workspace }}\go\bin
BIN_OUT: ${{ github.workspace }}\out
defaults:
run:
working-directory: ${{ env.GOPATH }}/src/github.com/docker/docker
steps:
-
name: Checkout
uses: actions/checkout@v4
with:
path: ${{ env.GOPATH }}/src/github.com/docker/docker
-
name: Set up Jaeger
run: |
# Jaeger is set up on Linux through the setup-tracing action. If you update Jaeger here, don't forget to
# update the version set in .github/actions/setup-tracing/action.yml.
Invoke-WebRequest -Uri "https://github.com/jaegertracing/jaeger/releases/download/v1.46.0/jaeger-1.46.0-windows-amd64.tar.gz" -OutFile ".\jaeger-1.46.0-windows-amd64.tar.gz"
tar -zxvf ".\jaeger-1.46.0-windows-amd64.tar.gz"
Start-Process '.\jaeger-1.46.0-windows-amd64\jaeger-all-in-one.exe'
echo "OTEL_EXPORTER_OTLP_ENDPOINT=http://127.0.0.1:4318" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf-8 -Append
shell: pwsh
-
name: Env
run: |
Get-ChildItem Env: | Out-String
-
name: Download artifacts
uses: actions/download-artifact@v4
with:
name: build-${{ inputs.storage }}-${{ inputs.os }}
path: ${{ env.BIN_OUT }}
-
name: Init
run: |
New-Item -ItemType "directory" -Path "bundles"
If ("${{ inputs.os }}" -eq "windows-2019") {
echo "WINDOWS_BASE_IMAGE_TAG=${{ env.WINDOWS_BASE_TAG_2019 }}" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf-8 -Append
} ElseIf ("${{ inputs.os }}" -eq "windows-2022") {
echo "WINDOWS_BASE_IMAGE_TAG=${{ env.WINDOWS_BASE_TAG_2022 }}" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf-8 -Append
}
Write-Output "${{ env.BIN_OUT }}" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
$testName = ([System.BitConverter]::ToString((New-Object System.Security.Cryptography.SHA256Managed).ComputeHash([System.Text.Encoding]::UTF8.GetBytes("${{ matrix.test }}"))) -replace '-').ToLower()
echo "TESTREPORTS_NAME=$testName" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf-8 -Append
-
# removes docker service that is currently installed on the runner. we
# could use Uninstall-Package but not yet available on Windows runners.
# more info: https://github.com/actions/virtual-environments/blob/d3a5bad25f3b4326c5666bab0011ac7f1beec95e/images/win/scripts/Installers/Install-Docker.ps1#L11
name: Removing current daemon
run: |
if (Get-Service docker -ErrorAction SilentlyContinue) {
$dockerVersion = (docker version -f "{{.Server.Version}}")
Write-Host "Current installed Docker version: $dockerVersion"
# remove service
Stop-Service -Force -Name docker
Remove-Service -Name docker
# removes event log entry. we could use "Remove-EventLog -LogName -Source docker"
# but this cmd is not available atm
$ErrorActionPreference = "SilentlyContinue"
& reg delete "HKLM\SYSTEM\CurrentControlSet\Services\EventLog\Application\docker" /f 2>&1 | Out-Null
$ErrorActionPreference = "Stop"
Write-Host "Service removed"
}
-
name: Starting containerd
if: matrix.runtime == 'containerd'
run: |
Write-Host "Generating config"
& "${{ env.BIN_OUT }}\containerd.exe" config default | Out-File "$env:TEMP\ctn.toml" -Encoding ascii
Write-Host "Creating service"
New-Item -ItemType Directory "$env:TEMP\ctn-root" -ErrorAction SilentlyContinue | Out-Null
New-Item -ItemType Directory "$env:TEMP\ctn-state" -ErrorAction SilentlyContinue | Out-Null
Start-Process -Wait "${{ env.BIN_OUT }}\containerd.exe" `
-ArgumentList "--log-level=debug", `
"--config=$env:TEMP\ctn.toml", `
"--address=\\.\pipe\containerd-containerd", `
"--root=$env:TEMP\ctn-root", `
"--state=$env:TEMP\ctn-state", `
"--log-file=$env:TEMP\ctn.log", `
"--register-service"
Write-Host "Starting service"
Start-Service -Name containerd
Start-Sleep -Seconds 5
Write-Host "Service started successfully!"
-
name: Starting test daemon
run: |
Write-Host "Creating service"
If ("${{ matrix.runtime }}" -eq "containerd") {
$runtimeArg="--containerd=\\.\pipe\containerd-containerd"
echo "DOCKER_WINDOWS_CONTAINERD_RUNTIME=1" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf-8 -Append
}
New-Item -ItemType Directory "$env:TEMP\moby-root" -ErrorAction SilentlyContinue | Out-Null
New-Item -ItemType Directory "$env:TEMP\moby-exec" -ErrorAction SilentlyContinue | Out-Null
Start-Process -Wait -NoNewWindow "${{ env.BIN_OUT }}\dockerd" `
-ArgumentList $runtimeArg, "--debug", `
"--host=npipe:////./pipe/docker_engine", `
"--data-root=$env:TEMP\moby-root", `
"--exec-root=$env:TEMP\moby-exec", `
"--pidfile=$env:TEMP\docker.pid", `
"--register-service"
If ("${{ inputs.storage }}" -eq "snapshotter") {
# Make the env-var visible to the service-managed dockerd, as there's no CLI flag for this option.
& reg add "HKLM\SYSTEM\CurrentControlSet\Services\docker" /v Environment /t REG_MULTI_SZ /s '@' /d TEST_INTEGRATION_USE_SNAPSHOTTER=1
echo "TEST_INTEGRATION_USE_SNAPSHOTTER=1" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf-8 -Append
}
Write-Host "Starting service"
Start-Service -Name docker
Write-Host "Service started successfully!"
-
name: Waiting for test daemon to start
run: |
$tries=20
Write-Host "Waiting for the test daemon to start..."
While ($true) {
$ErrorActionPreference = "SilentlyContinue"
& "${{ env.BIN_OUT }}\docker" version
$ErrorActionPreference = "Stop"
If ($LastExitCode -eq 0) {
break
}
$tries--
If ($tries -le 0) {
Throw "Failed to get a response from the daemon"
}
Write-Host -NoNewline "."
Start-Sleep -Seconds 1
}
Write-Host "Test daemon started and replied!"
env:
DOCKER_HOST: npipe:////./pipe/docker_engine
-
name: Docker info
run: |
& "${{ env.BIN_OUT }}\docker" info
env:
DOCKER_HOST: npipe:////./pipe/docker_engine
-
name: Building contrib/busybox
run: |
& "${{ env.BIN_OUT }}\docker" build -t busybox `
--build-arg WINDOWS_BASE_IMAGE `
--build-arg WINDOWS_BASE_IMAGE_TAG `
.\contrib\busybox\
env:
DOCKER_HOST: npipe:////./pipe/docker_engine
-
name: List images
run: |
& "${{ env.BIN_OUT }}\docker" images
env:
DOCKER_HOST: npipe:////./pipe/docker_engine
-
name: Set up Go
uses: actions/setup-go@v5
with:
go-version: ${{ env.GO_VERSION }}
-
name: Test integration
if: matrix.test == './...'
run: |
.\hack\make.ps1 -TestIntegration
env:
DOCKER_HOST: npipe:////./pipe/docker_engine
GO111MODULE: "off"
TEST_CLIENT_BINARY: ${{ env.BIN_OUT }}\docker
-
name: Test integration-cli
if: matrix.test != './...'
run: |
.\hack\make.ps1 -TestIntegrationCli
env:
DOCKER_HOST: npipe:////./pipe/docker_engine
GO111MODULE: "off"
TEST_CLIENT_BINARY: ${{ env.BIN_OUT }}\docker
INTEGRATION_TESTRUN: ${{ matrix.test }}
-
name: Send to Codecov
if: inputs.send_coverage
uses: codecov/codecov-action@v4
with:
working-directory: ${{ env.GOPATH }}\src\github.com\docker\docker
directory: bundles
env_vars: RUNNER_OS
flags: integration,${{ matrix.runtime }}
token: ${{ secrets.CODECOV_TOKEN }} # used to upload coverage reports: https://github.com/moby/buildkit/pull/4660#issue-2142122533
-
name: Docker info
run: |
& "${{ env.BIN_OUT }}\docker" info
env:
DOCKER_HOST: npipe:////./pipe/docker_engine
-
name: Stop containerd
if: always() && matrix.runtime == 'containerd'
run: |
$ErrorActionPreference = "SilentlyContinue"
Stop-Service -Force -Name containerd
$ErrorActionPreference = "Stop"
-
name: Containerd logs
if: always() && matrix.runtime == 'containerd'
run: |
Copy-Item "$env:TEMP\ctn.log" -Destination ".\bundles\containerd.log"
Get-Content "$env:TEMP\ctn.log" | Out-Host
-
name: Stop daemon
if: always()
run: |
$ErrorActionPreference = "SilentlyContinue"
Stop-Service -Force -Name docker
$ErrorActionPreference = "Stop"
-
# as the daemon is registered as a service we have to check the event
# logs against the docker provider.
name: Daemon event logs
if: always()
run: |
Get-WinEvent -ea SilentlyContinue `
-FilterHashtable @{ProviderName= "docker"; LogName = "application"} |
Sort-Object @{Expression="TimeCreated";Descending=$false} |
ForEach-Object {"$($_.TimeCreated.ToUniversalTime().ToString("o")) [$($_.LevelDisplayName)] $($_.Message)"} |
Tee-Object -file ".\bundles\daemon.log"
-
name: Download Jaeger traces
if: always()
run: |
Invoke-WebRequest `
-Uri "http://127.0.0.1:16686/api/traces?service=integration-test-client" `
-OutFile ".\bundles\jaeger-trace.json"
-
name: Upload reports
if: always()
uses: actions/upload-artifact@v4
with:
name: ${{ inputs.os }}-${{ inputs.storage }}-integration-reports-${{ matrix.runtime }}-${{ env.TESTREPORTS_NAME }}
path: ${{ env.GOPATH }}\src\github.com\docker\docker\bundles\*
retention-days: 1
integration-test-report:
runs-on: ubuntu-24.04
timeout-minutes: 120 # guardrails timeout for the whole job
continue-on-error: ${{ inputs.storage == 'snapshotter' && github.event_name != 'pull_request' }}
if: always()
needs:
- integration-test
strategy:
fail-fast: false
matrix:
storage:
- ${{ inputs.storage }}
runtime:
- builtin
- containerd
exclude:
- storage: snapshotter
runtime: builtin
steps:
-
name: Set up Go
uses: actions/setup-go@v5
with:
go-version: ${{ env.GO_VERSION }}
-
name: Download reports
uses: actions/download-artifact@v4
with:
path: /tmp/reports
pattern: ${{ inputs.os }}-${{ inputs.storage }}-integration-reports-${{ matrix.runtime }}-*
merge-multiple: true
-
name: Install teststat
run: |
go install github.com/vearutop/teststat@${{ env.TESTSTAT_VERSION }}
-
name: Create summary
run: |
find /tmp/reports -type f -name '*-go-test-report.json' -exec teststat -markdown {} \+ >> $GITHUB_STEP_SUMMARY

View File

@@ -1,276 +0,0 @@
name: arm64
# Default to 'contents: read', which grants actions to read commits.
#
# If any permission is set, any permission not included in the list is
# implicitly set to "none".
#
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
permissions:
contents: read
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
on:
workflow_dispatch:
push:
branches:
- 'master'
- '[0-9]+.[0-9]+'
- '[0-9]+.x'
pull_request:
env:
GO_VERSION: "1.22.12"
TESTSTAT_VERSION: v0.1.25
DESTDIR: ./build
SETUP_BUILDX_VERSION: edge
SETUP_BUILDKIT_IMAGE: moby/buildkit:latest
DOCKER_EXPERIMENTAL: 1
jobs:
validate-dco:
uses: ./.github/workflows/.dco.yml
build:
runs-on: ubuntu-22.04-arm
timeout-minutes: 20 # guardrails timeout for the whole job
needs:
- validate-dco
strategy:
fail-fast: false
matrix:
target:
- binary
- dynbinary
steps:
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
version: ${{ env.SETUP_BUILDX_VERSION }}
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
buildkitd-flags: --debug
-
name: Build
uses: docker/bake-action@v6
with:
targets: ${{ matrix.target }}
-
name: List artifacts
run: |
tree -nh ${{ env.DESTDIR }}
-
name: Check artifacts
run: |
find ${{ env.DESTDIR }} -type f -exec file -e ascii -- {} +
build-dev:
runs-on: ubuntu-22.04-arm
timeout-minutes: 120 # guardrails timeout for the whole job
needs:
- validate-dco
steps:
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
version: ${{ env.SETUP_BUILDX_VERSION }}
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
buildkitd-flags: --debug
-
name: Build dev image
uses: docker/bake-action@v6
with:
targets: dev
set: |
*.cache-from=type=gha,scope=dev-arm64
*.cache-to=type=gha,scope=dev-arm64,mode=max
*.output=type=cacheonly
test-unit:
runs-on: ubuntu-22.04-arm
timeout-minutes: 120 # guardrails timeout for the whole job
needs:
- build-dev
steps:
-
name: Checkout
uses: actions/checkout@v4
-
name: Set up runner
uses: ./.github/actions/setup-runner
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
version: ${{ env.SETUP_BUILDX_VERSION }}
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
buildkitd-flags: --debug
-
name: Build dev image
uses: docker/bake-action@v6
with:
targets: dev
set: |
dev.cache-from=type=gha,scope=dev-arm64
-
name: Test
run: |
make -o build test-unit
-
name: Prepare reports
if: always()
run: |
mkdir -p bundles /tmp/reports
find bundles -path '*/root/*overlay2' -prune -o -type f \( -name '*-report.json' -o -name '*.log' -o -name '*.out' -o -name '*.prof' -o -name '*-report.xml' \) -print | xargs sudo tar -czf /tmp/reports.tar.gz
tar -xzf /tmp/reports.tar.gz -C /tmp/reports
sudo chown -R $(id -u):$(id -g) /tmp/reports
tree -nh /tmp/reports
-
name: Send to Codecov
uses: codecov/codecov-action@v4
with:
directory: ./bundles
env_vars: RUNNER_OS
flags: unit
token: ${{ secrets.CODECOV_TOKEN }} # used to upload coverage reports: https://github.com/moby/buildkit/pull/4660#issue-2142122533
-
name: Upload reports
if: always()
uses: actions/upload-artifact@v4
with:
name: test-reports-unit-arm64-graphdriver
path: /tmp/reports/*
retention-days: 1
test-unit-report:
runs-on: ubuntu-20.04
timeout-minutes: 10
continue-on-error: ${{ github.event_name != 'pull_request' }}
if: always()
needs:
- test-unit
steps:
-
name: Set up Go
uses: actions/setup-go@v5
with:
go-version: ${{ env.GO_VERSION }}
cache-dependency-path: vendor.sum
-
name: Download reports
uses: actions/download-artifact@v4
with:
pattern: test-reports-unit-arm64-*
path: /tmp/reports
-
name: Install teststat
run: |
go install github.com/vearutop/teststat@${{ env.TESTSTAT_VERSION }}
-
name: Create summary
run: |
find /tmp/reports -type f -name '*-go-test-report.json' -exec teststat -markdown {} \+ >> $GITHUB_STEP_SUMMARY
test-integration:
runs-on: ubuntu-22.04-arm
timeout-minutes: 120 # guardrails timeout for the whole job
continue-on-error: ${{ github.event_name != 'pull_request' }}
needs:
- build-dev
steps:
-
name: Checkout
uses: actions/checkout@v4
-
name: Set up runner
uses: ./.github/actions/setup-runner
-
name: Set up tracing
uses: ./.github/actions/setup-tracing
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
version: ${{ env.SETUP_BUILDX_VERSION }}
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
buildkitd-flags: --debug
-
name: Build dev image
uses: docker/bake-action@v6
with:
targets: dev
set: |
dev.cache-from=type=gha,scope=dev-arm64
-
name: Test
run: |
make -o build test-integration
env:
TEST_SKIP_INTEGRATION_CLI: 1
TESTCOVERAGE: 1
-
name: Prepare reports
if: always()
run: |
reportsPath="/tmp/reports/arm64-graphdriver"
mkdir -p bundles $reportsPath
find bundles -path '*/root/*overlay2' -prune -o -type f \( -name '*-report.json' -o -name '*.log' -o -name '*.out' -o -name '*.prof' -o -name '*-report.xml' \) -print | xargs sudo tar -czf /tmp/reports.tar.gz
tar -xzf /tmp/reports.tar.gz -C $reportsPath
sudo chown -R $(id -u):$(id -g) $reportsPath
tree -nh $reportsPath
curl -sSLf localhost:16686/api/traces?service=integration-test-client > $reportsPath/jaeger-trace.json
-
name: Send to Codecov
uses: codecov/codecov-action@v4
with:
directory: ./bundles/test-integration
env_vars: RUNNER_OS
flags: integration
token: ${{ secrets.CODECOV_TOKEN }} # used to upload coverage reports: https://github.com/moby/buildkit/pull/4660#issue-2142122533
-
name: Test daemon logs
if: always()
run: |
cat bundles/test-integration/docker.log
-
name: Upload reports
if: always()
uses: actions/upload-artifact@v4
with:
name: test-reports-integration-arm64-graphdriver
path: /tmp/reports/*
retention-days: 1
test-integration-report:
runs-on: ubuntu-20.04
timeout-minutes: 10
continue-on-error: ${{ github.event_name != 'pull_request' }}
if: always()
needs:
- test-integration
steps:
-
name: Set up Go
uses: actions/setup-go@v5
with:
go-version: ${{ env.GO_VERSION }}
cache-dependency-path: vendor.sum
-
name: Download reports
uses: actions/download-artifact@v4
with:
path: /tmp/reports
pattern: test-reports-integration-arm64-*
merge-multiple: true
-
name: Install teststat
run: |
go install github.com/vearutop/teststat@${{ env.TESTSTAT_VERSION }}
-
name: Create summary
run: |
find /tmp/reports -type f -name '*-go-test-report.json' -exec teststat -markdown {} \+ >> $GITHUB_STEP_SUMMARY

View File

@@ -1,215 +0,0 @@
name: bin-image
# Default to 'contents: read', which grants actions to read commits.
#
# If any permission is set, any permission not included in the list is
# implicitly set to "none".
#
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
permissions:
contents: read
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
on:
workflow_dispatch:
push:
branches:
- 'master'
- '[0-9]+.[0-9]+'
- '[0-9]+.x'
tags:
- 'v*'
pull_request:
env:
MOBYBIN_REPO_SLUG: moby/moby-bin
DOCKER_GITCOMMIT: ${{ github.sha }}
VERSION: ${{ github.ref }}
PLATFORM: Moby Engine - Nightly
PRODUCT: moby-bin
PACKAGER_NAME: The Moby Project
SETUP_BUILDX_VERSION: latest
SETUP_BUILDKIT_IMAGE: moby/buildkit:latest
jobs:
validate-dco:
if: ${{ !startsWith(github.ref, 'refs/tags/v') }}
uses: ./.github/workflows/.dco.yml
prepare:
runs-on: ubuntu-20.04
timeout-minutes: 20 # guardrails timeout for the whole job
outputs:
platforms: ${{ steps.platforms.outputs.matrix }}
steps:
-
name: Checkout
uses: actions/checkout@v4
-
name: Docker meta
id: meta
uses: docker/metadata-action@v5
with:
images: |
${{ env.MOBYBIN_REPO_SLUG }}
### versioning strategy
## push semver tag v23.0.0
# moby/moby-bin:23.0.0
# moby/moby-bin:latest
## push semver prerelease tag v23.0.0-beta.1
# moby/moby-bin:23.0.0-beta.1
## push on master
# moby/moby-bin:master
## push on 23.0 branch
# moby/moby-bin:23.0
## any push
# moby/moby-bin:sha-ad132f5
tags: |
type=semver,pattern={{version}}
type=ref,event=branch
type=ref,event=pr
type=sha
-
name: Rename meta bake definition file
# see https://github.com/docker/metadata-action/issues/381#issuecomment-1918607161
run: |
bakeFile="${{ steps.meta.outputs.bake-file }}"
mv "${bakeFile#cwd://}" "/tmp/bake-meta.json"
-
name: Upload meta bake definition
uses: actions/upload-artifact@v4
with:
name: bake-meta
path: /tmp/bake-meta.json
if-no-files-found: error
retention-days: 1
-
name: Create platforms matrix
id: platforms
run: |
echo "matrix=$(docker buildx bake bin-image-cross --print | jq -cr '.target."bin-image-cross".platforms')" >>${GITHUB_OUTPUT}
build:
runs-on: ubuntu-20.04
timeout-minutes: 120 # guardrails timeout for the whole job
needs:
- validate-dco
- prepare
if: always() && !contains(needs.*.result, 'failure') && !contains(needs.*.result, 'cancelled')
strategy:
fail-fast: false
matrix:
platform: ${{ fromJson(needs.prepare.outputs.platforms) }}
steps:
-
name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
-
name: Prepare
run: |
platform=${{ matrix.platform }}
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
-
name: Download meta bake definition
uses: actions/download-artifact@v4
with:
name: bake-meta
path: /tmp
-
name: Set up QEMU
uses: docker/setup-qemu-action@v3
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
version: ${{ env.SETUP_BUILDX_VERSION }}
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
buildkitd-flags: --debug
-
name: Login to Docker Hub
if: github.event_name != 'pull_request' && github.repository == 'moby/moby'
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_MOBYBIN_USERNAME }}
password: ${{ secrets.DOCKERHUB_MOBYBIN_TOKEN }}
-
name: Build
id: bake
uses: docker/bake-action@v6
with:
source: .
files: |
./docker-bake.hcl
/tmp/bake-meta.json
targets: bin-image
set: |
*.platform=${{ matrix.platform }}
*.output=type=image,name=${{ env.MOBYBIN_REPO_SLUG }},push-by-digest=true,name-canonical=true,push=${{ github.event_name != 'pull_request' && github.repository == 'moby/moby' }}
*.tags=
-
name: Export digest
if: github.event_name != 'pull_request' && github.repository == 'moby/moby'
run: |
mkdir -p /tmp/digests
digest="${{ fromJSON(steps.bake.outputs.metadata)['bin-image']['containerimage.digest'] }}"
touch "/tmp/digests/${digest#sha256:}"
-
name: Upload digest
if: github.event_name != 'pull_request' && github.repository == 'moby/moby'
uses: actions/upload-artifact@v4
with:
name: digests-${{ env.PLATFORM_PAIR }}
path: /tmp/digests/*
if-no-files-found: error
retention-days: 1
merge:
runs-on: ubuntu-20.04
timeout-minutes: 120 # guardrails timeout for the whole job
needs:
- build
if: always() && !contains(needs.*.result, 'failure') && !contains(needs.*.result, 'cancelled') && github.event_name != 'pull_request' && github.repository == 'moby/moby'
steps:
-
name: Download meta bake definition
uses: actions/download-artifact@v4
with:
name: bake-meta
path: /tmp
-
name: Download digests
uses: actions/download-artifact@v4
with:
path: /tmp/digests
pattern: digests-*
merge-multiple: true
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
version: ${{ env.SETUP_BUILDX_VERSION }}
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
buildkitd-flags: --debug
-
name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_MOBYBIN_USERNAME }}
password: ${{ secrets.DOCKERHUB_MOBYBIN_TOKEN }}
-
name: Create manifest list and push
working-directory: /tmp/digests
run: |
set -x
docker buildx imagetools create $(jq -cr '.target."docker-metadata-action".tags | map("-t " + .) | join(" ")' /tmp/bake-meta.json) \
$(printf '${{ env.MOBYBIN_REPO_SLUG }}@sha256:%s ' *)
-
name: Inspect image
run: |
set -x
docker buildx imagetools inspect ${{ env.MOBYBIN_REPO_SLUG }}:$(jq -cr '.target."docker-metadata-action".args.DOCKER_META_VERSION' /tmp/bake-meta.json)

View File

@@ -1,163 +0,0 @@
name: buildkit
# Default to 'contents: read', which grants actions to read commits.
#
# If any permission is set, any permission not included in the list is
# implicitly set to "none".
#
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
permissions:
contents: read
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
on:
workflow_dispatch:
push:
branches:
- 'master'
- '[0-9]+.[0-9]+'
- '[0-9]+.x'
pull_request:
env:
GO_VERSION: "1.22.12"
DESTDIR: ./build
SETUP_BUILDX_VERSION: latest
SETUP_BUILDKIT_IMAGE: moby/buildkit:latest
jobs:
validate-dco:
uses: ./.github/workflows/.dco.yml
build:
runs-on: ubuntu-20.04
timeout-minutes: 120 # guardrails timeout for the whole job
needs:
- validate-dco
steps:
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
version: ${{ env.SETUP_BUILDX_VERSION }}
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
buildkitd-flags: --debug
-
name: Build
uses: docker/bake-action@v6
with:
targets: binary
-
name: Upload artifacts
uses: actions/upload-artifact@v4
with:
name: binary
path: ${{ env.DESTDIR }}
if-no-files-found: error
retention-days: 1
test:
runs-on: ubuntu-20.04
timeout-minutes: 120 # guardrails timeout for the whole job
needs:
- build
env:
TEST_IMAGE_BUILD: "0"
TEST_IMAGE_ID: "buildkit-tests"
strategy:
fail-fast: false
matrix:
worker:
- dockerd
- dockerd-containerd
pkg:
- client
- cmd/buildctl
- solver
- frontend
- frontend/dockerfile
typ:
- integration
steps:
-
name: Prepare
run: |
disabledFeatures="cache_backend_azblob,cache_backend_s3"
if [ "${{ matrix.worker }}" = "dockerd" ]; then
disabledFeatures="${disabledFeatures},merge_diff"
fi
echo "BUILDKIT_TEST_DISABLE_FEATURES=${disabledFeatures}" >> $GITHUB_ENV
# Expose `ACTIONS_RUNTIME_TOKEN` and `ACTIONS_CACHE_URL`, which is used
# in BuildKit's test suite to skip/unskip cache exporters:
# https://github.com/moby/buildkit/blob/567a99433ca23402d5e9b9f9124005d2e59b8861/client/client_test.go#L5407-L5411
-
name: Expose GitHub Runtime
uses: crazy-max/ghaction-github-runtime@v3
-
name: Checkout
uses: actions/checkout@v4
with:
path: moby
-
name: Set up Go
uses: actions/setup-go@v5
with:
go-version: ${{ env.GO_VERSION }}
-
name: BuildKit ref
run: |
echo "$(./hack/buildkit-ref)" >> $GITHUB_ENV
working-directory: moby
-
name: Checkout BuildKit ${{ env.BUILDKIT_REF }}
uses: actions/checkout@v4
with:
repository: ${{ env.BUILDKIT_REPO }}
ref: ${{ env.BUILDKIT_REF }}
path: buildkit
-
name: Set up QEMU
uses: docker/setup-qemu-action@v3
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
version: ${{ env.SETUP_BUILDX_VERSION }}
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
buildkitd-flags: --debug
-
name: Download binary artifacts
uses: actions/download-artifact@v4
with:
name: binary
path: ./buildkit/build/moby/
-
name: Update daemon.json
run: |
sudo rm -f /etc/docker/daemon.json
sudo service docker restart
docker version
docker info
-
name: Build test image
uses: docker/bake-action@v6
with:
source: .
workdir: ./buildkit
targets: integration-tests
set: |
*.output=type=docker,name=${{ env.TEST_IMAGE_ID }}
-
name: Test
run: |
./hack/test ${{ matrix.typ }}
env:
CONTEXT: "."
TEST_DOCKERD: "1"
TEST_DOCKERD_BINARY: "./build/moby/dockerd"
TESTPKGS: "./${{ matrix.pkg }}"
TESTFLAGS: "-v --parallel=1 --timeout=30m --run=//worker=${{ matrix.worker }}$"
working-directory: buildkit

View File

@@ -1,156 +0,0 @@
name: ci
# Default to 'contents: read', which grants actions to read commits.
#
# If any permission is set, any permission not included in the list is
# implicitly set to "none".
#
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
permissions:
contents: read
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
on:
workflow_dispatch:
push:
branches:
- 'master'
- '[0-9]+.[0-9]+'
- '[0-9]+.x'
pull_request:
env:
DESTDIR: ./build
SETUP_BUILDX_VERSION: latest
SETUP_BUILDKIT_IMAGE: moby/buildkit:latest
jobs:
validate-dco:
uses: ./.github/workflows/.dco.yml
build:
runs-on: ubuntu-20.04
timeout-minutes: 20 # guardrails timeout for the whole job
needs:
- validate-dco
strategy:
fail-fast: false
matrix:
target:
- binary
- dynbinary
steps:
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
version: ${{ env.SETUP_BUILDX_VERSION }}
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
buildkitd-flags: --debug
-
name: Build
uses: docker/bake-action@v6
with:
targets: ${{ matrix.target }}
-
name: List artifacts
run: |
tree -nh ${{ env.DESTDIR }}
-
name: Check artifacts
run: |
find ${{ env.DESTDIR }} -type f -exec file -e ascii -- {} +
prepare-cross:
runs-on: ubuntu-24.04
timeout-minutes: 20 # guardrails timeout for the whole job
needs:
- validate-dco
outputs:
matrix: ${{ steps.platforms.outputs.matrix }}
steps:
-
name: Checkout
uses: actions/checkout@v4
-
name: Create matrix
id: platforms
run: |
matrix="$(docker buildx bake binary-cross --print | jq -cr '.target."binary-cross".platforms')"
echo "matrix=$matrix" >> $GITHUB_OUTPUT
-
name: Show matrix
run: |
echo ${{ steps.platforms.outputs.matrix }}
cross:
runs-on: ubuntu-20.04
timeout-minutes: 20 # guardrails timeout for the whole job
needs:
- validate-dco
- prepare-cross
strategy:
fail-fast: false
matrix:
platform: ${{ fromJson(needs.prepare-cross.outputs.matrix) }}
steps:
-
name: Prepare
run: |
platform=${{ matrix.platform }}
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
version: ${{ env.SETUP_BUILDX_VERSION }}
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
buildkitd-flags: --debug
-
name: Build
uses: docker/bake-action@v6
with:
targets: all
set: |
*.platform=${{ matrix.platform }}
-
name: List artifacts
run: |
tree -nh ${{ env.DESTDIR }}
-
name: Check artifacts
run: |
find ${{ env.DESTDIR }} -type f -exec file -e ascii -- {} +
govulncheck:
runs-on: ubuntu-24.04
timeout-minutes: 120 # guardrails timeout for the whole job
permissions:
# required to write sarif report
security-events: write
# required to check out the repository
contents: read
steps:
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
version: ${{ env.SETUP_BUILDX_VERSION }}
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
buildkitd-flags: --debug
-
name: Run
uses: docker/bake-action@v6
with:
targets: govulncheck
env:
GOVULNCHECK_FORMAT: sarif
-
name: Upload SARIF report
if: ${{ github.event_name != 'pull_request' && github.repository == 'moby/moby' }}
uses: github/codeql-action/upload-sarif@v3
with:
sarif_file: ${{ env.DESTDIR }}/govulncheck.out

View File

@@ -1,71 +0,0 @@
name: codeql
# Default to 'contents: read', which grants actions to read commits.
#
# If any permission is set, any permission not included in the list is
# implicitly set to "none".
#
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
permissions:
contents: read
on:
push:
branches:
- 'master'
- '[0-9]+.[0-9]+'
- '[0-9]+.x'
tags:
- 'v*'
pull_request:
# The branches below must be a subset of the branches above
branches: ["master"]
schedule:
# ┌───────────── minute (0 - 59)
# │ ┌───────────── hour (0 - 23)
# │ │ ┌───────────── day of the month (1 - 31)
# │ │ │ ┌───────────── month (1 - 12)
# │ │ │ │ ┌───────────── day of the week (0 - 6) (Sunday to Saturday)
# │ │ │ │ │
# │ │ │ │ │
# │ │ │ │ │
# * * * * *
- cron: '0 9 * * 4'
jobs:
codeql:
runs-on: ubuntu-24.04
timeout-minutes: 10
permissions:
actions: read
contents: read
security-events: write
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 2
# CodeQL 2.16.4's auto-build added support for multi-module repositories,
# and is trying to be smart by searching for modules in every directory,
# including vendor directories. If no module is found, it's creating one
# which is ... not what we want, so let's give it a "go.mod".
# see: https://github.com/docker/cli/pull/4944#issuecomment-2002034698
- name: Create go.mod
run: |
ln -s vendor.mod go.mod
ln -s vendor.sum go.sum
- name: Update Go
uses: actions/setup-go@v5
with:
go-version: "1.22.12"
- name: Initialize CodeQL
uses: github/codeql-action/init@v3
with:
languages: go
- name: Autobuild
uses: github/codeql-action/autobuild@v3
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v3
with:
category: "/language:go"

View File

@@ -1,199 +0,0 @@
name: test
# Default to 'contents: read', which grants actions to read commits.
#
# If any permission is set, any permission not included in the list is
# implicitly set to "none".
#
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
permissions:
contents: read
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
on:
workflow_dispatch:
push:
branches:
- 'master'
- '[0-9]+.[0-9]+'
- '[0-9]+.x'
pull_request:
env:
GO_VERSION: "1.22.12"
GIT_PAGER: "cat"
PAGER: "cat"
SETUP_BUILDX_VERSION: latest
SETUP_BUILDKIT_IMAGE: moby/buildkit:latest
jobs:
validate-dco:
uses: ./.github/workflows/.dco.yml
build-dev:
runs-on: ubuntu-20.04
timeout-minutes: 120 # guardrails timeout for the whole job
needs:
- validate-dco
strategy:
fail-fast: false
matrix:
mode:
- ""
- systemd
steps:
-
name: Prepare
run: |
if [ "${{ matrix.mode }}" = "systemd" ]; then
echo "SYSTEMD=true" >> $GITHUB_ENV
fi
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
version: ${{ env.SETUP_BUILDX_VERSION }}
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
buildkitd-flags: --debug
-
name: Build dev image
uses: docker/bake-action@v6
with:
targets: dev
set: |
*.cache-from=type=gha,scope=dev${{ matrix.mode }}
*.cache-to=type=gha,scope=dev${{ matrix.mode }},mode=max
*.output=type=cacheonly
test:
needs:
- build-dev
- validate-dco
uses: ./.github/workflows/.test.yml
secrets: inherit
strategy:
fail-fast: false
matrix:
storage:
- graphdriver
- snapshotter
with:
storage: ${{ matrix.storage }}
validate-prepare:
runs-on: ubuntu-20.04
timeout-minutes: 10 # guardrails timeout for the whole job
needs:
- validate-dco
outputs:
matrix: ${{ steps.scripts.outputs.matrix }}
steps:
-
name: Checkout
uses: actions/checkout@v4
-
name: Create matrix
id: scripts
run: |
scripts=$(cd ./hack/validate && jq -nc '$ARGS.positional - ["all", "default", "dco"] | map(select(test("[.]")|not)) + ["generate-files"]' --args *)
echo "matrix=$scripts" >> $GITHUB_OUTPUT
-
name: Show matrix
run: |
echo ${{ steps.scripts.outputs.matrix }}
validate:
runs-on: ubuntu-20.04
timeout-minutes: 30 # guardrails timeout for the whole job
needs:
- validate-prepare
- build-dev
strategy:
fail-fast: true
matrix:
script: ${{ fromJson(needs.validate-prepare.outputs.matrix) }}
steps:
-
name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
-
name: Set up runner
uses: ./.github/actions/setup-runner
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
version: ${{ env.SETUP_BUILDX_VERSION }}
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
buildkitd-flags: --debug
-
name: Build dev image
uses: docker/bake-action@v6
with:
targets: dev
set: |
dev.cache-from=type=gha,scope=dev
-
name: Validate
run: |
make -o build validate-${{ matrix.script }}
smoke-prepare:
runs-on: ubuntu-20.04
timeout-minutes: 10 # guardrails timeout for the whole job
needs:
- validate-dco
outputs:
matrix: ${{ steps.platforms.outputs.matrix }}
steps:
-
name: Checkout
uses: actions/checkout@v4
-
name: Create matrix
id: platforms
run: |
matrix="$(docker buildx bake binary-smoketest --print | jq -cr '.target."binary-smoketest".platforms')"
echo "matrix=$matrix" >> $GITHUB_OUTPUT
-
name: Show matrix
run: |
echo ${{ steps.platforms.outputs.matrix }}
smoke:
runs-on: ubuntu-20.04
timeout-minutes: 20 # guardrails timeout for the whole job
needs:
- smoke-prepare
strategy:
fail-fast: false
matrix:
platform: ${{ fromJson(needs.smoke-prepare.outputs.matrix) }}
steps:
-
name: Prepare
run: |
platform=${{ matrix.platform }}
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
-
name: Set up QEMU
uses: docker/setup-qemu-action@v3
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
version: ${{ env.SETUP_BUILDX_VERSION }}
driver-opts: image=${{ env.SETUP_BUILDKIT_IMAGE }}
buildkitd-flags: --debug
-
name: Test
uses: docker/bake-action@v6
with:
targets: binary-smoketest
set: |
*.platform=${{ matrix.platform }}

View File

@@ -1,88 +0,0 @@
name: validate-pr
# Default to 'contents: read', which grants actions to read commits.
#
# If any permission is set, any permission not included in the list is
# implicitly set to "none".
#
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
permissions:
contents: read
on:
pull_request:
types: [opened, edited, labeled, unlabeled, synchronize]
jobs:
check-area-label:
runs-on: ubuntu-20.04
timeout-minutes: 120 # guardrails timeout for the whole job
steps:
- name: Missing `area/` label
if: contains(join(github.event.pull_request.labels.*.name, ','), 'impact/') && !contains(join(github.event.pull_request.labels.*.name, ','), 'area/')
run: |
echo "::error::Every PR with an 'impact/*' label should also have an 'area/*' label"
exit 1
- name: OK
run: exit 0
check-changelog:
runs-on: ubuntu-20.04
timeout-minutes: 120 # guardrails timeout for the whole job
env:
HAS_IMPACT_LABEL: ${{ contains(join(github.event.pull_request.labels.*.name, ','), 'impact/') }}
PR_BODY: |
${{ github.event.pull_request.body }}
steps:
- name: Check changelog description
run: |
# Extract the `markdown changelog` note code block
block=$(echo -n "$PR_BODY" | tr -d '\r' | awk '/^```markdown changelog$/{flag=1;next}/^```$/{flag=0}flag')
# Strip empty lines
desc=$(echo "$block" | awk NF)
if [ "$HAS_IMPACT_LABEL" = "true" ]; then
if [ -z "$desc" ]; then
echo "::error::Changelog section is empty. Please provide a description for the changelog."
exit 1
fi
len=$(echo -n "$desc" | wc -c)
if [[ $len -le 6 ]]; then
echo "::error::Description looks too short: $desc"
exit 1
fi
else
if [ -n "$desc" ]; then
echo "::error::PR has a changelog description, but no changelog label"
echo "::error::Please add the relevant 'impact/' label to the PR or remove the changelog description"
exit 1
fi
fi
echo "This PR will be included in the release notes with the following note:"
echo "$desc"
check-pr-branch:
runs-on: ubuntu-20.04
timeout-minutes: 120 # guardrails timeout for the whole job
env:
PR_TITLE: ${{ github.event.pull_request.title }}
steps:
# Backports or PR that target a release branch directly should mention the target branch in the title, for example:
# [X.Y backport] Some change that needs backporting to X.Y
# [X.Y] Change directly targeting the X.Y branch
- name: Check release branch
id: title_branch
run: |
# get the intended major version prefix ("[27.1 backport]" -> "27.") from the PR title.
[[ "$PR_TITLE" =~ ^\[([0-9]*\.)[^]]*\] ]] && branch="${BASH_REMATCH[1]}"
# get major version prefix from the release branch ("27.x -> "27.")
[[ "$GITHUB_BASE_REF" =~ ^([0-9]*\.) ]] && target_branch="${BASH_REMATCH[1]}" || target_branch="$GITHUB_BASE_REF"
if [[ "$target_branch" != "$branch" ]] && ! [[ "$GITHUB_BASE_REF" == "master" && "$branch" == "" ]]; then
echo "::error::PR is opened against the $GITHUB_BASE_REF branch, but its title suggests otherwise."
exit 1
fi

View File

@@ -1,42 +0,0 @@
name: windows-2019
# Default to 'contents: read', which grants actions to read commits.
#
# If any permission is set, any permission not included in the list is
# implicitly set to "none".
#
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
permissions:
contents: read
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
on:
schedule:
- cron: '0 10 * * *'
workflow_dispatch:
jobs:
validate-dco:
uses: ./.github/workflows/.dco.yml
test-prepare:
uses: ./.github/workflows/.test-prepare.yml
needs:
- validate-dco
run:
needs:
- test-prepare
uses: ./.github/workflows/.windows.yml
secrets: inherit
strategy:
fail-fast: false
matrix:
storage: ${{ fromJson(needs.test-prepare.outputs.matrix) }}
with:
os: windows-2019
storage: ${{ matrix.storage }}
send_coverage: false

View File

@@ -1,46 +0,0 @@
name: windows-2022
# Default to 'contents: read', which grants actions to read commits.
#
# If any permission is set, any permission not included in the list is
# implicitly set to "none".
#
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
permissions:
contents: read
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
on:
workflow_dispatch:
push:
branches:
- 'master'
- '[0-9]+.[0-9]+'
- '[0-9]+.x'
pull_request:
jobs:
validate-dco:
uses: ./.github/workflows/.dco.yml
test-prepare:
uses: ./.github/workflows/.test-prepare.yml
needs:
- validate-dco
run:
needs:
- test-prepare
uses: ./.github/workflows/.windows.yml
secrets: inherit
strategy:
fail-fast: false
matrix:
storage: ${{ fromJson(needs.test-prepare.outputs.matrix) }}
with:
os: windows-2022
storage: ${{ matrix.storage }}
send_coverage: true

36
.gitignore vendored
View File

@@ -1,28 +1,24 @@
# If you want to ignore files created by your editor/tools, please consider a
# [global .gitignore](https://help.github.com/articles/ignoring-files).
*~
*.bak
# Docker project generated files to ignore
# if you want to ignore files created by your editor/tools,
# please consider a global .gitignore https://help.github.com/articles/ignoring-files
*.exe
*.exe~
*.orig
test.main
.*.swp
.DS_Store
thumbs.db
# local repository customization
.envrc
# a .bashrc may be added to customize the build environment
.bashrc
.editorconfig
# build artifacts
.gopath/
.go-pkg-cache/
autogen/
bundles/
cli/winresources/*/*.syso
cli/winresources/*/winres.json
cmd/dockerd/dockerd
contrib/builder/rpm/*/changelog
# ci artifacts
*.exe
*.gz
go-test-report.json
junit-report.xml
dockerversion/version_autogen.go
dockerversion/version_autogen_unix.go
vendor/pkg/
hack/integration-cli-on-swarm/integration-cli-on-swarm
coverage.txt
profile.out
test.main

View File

@@ -1,188 +0,0 @@
linters:
enable:
- depguard
- dupword # Checks for duplicate words in the source code.
- goimports
- gosec
- gosimple
- govet
- forbidigo
- importas
- ineffassign
- misspell
- revive
- staticcheck
- typecheck
- unconvert
- unused
disable:
- errcheck
run:
concurrency: 2
modules-download-mode: vendor
skip-dirs:
- docs
linters-settings:
dupword:
ignore:
- "true" # some tests use this as expected output
- "false" # some tests use this as expected output
- "root" # for tests using "ls" output with files owned by "root:root"
forbidigo:
forbid:
- pkg: github.com/vishvananda/netlink$
p: ^netlink\.(Handle\.)?(AddrList|BridgeVlanList|ChainList|ClassList|ConntrackTableList|ConntrackDeleteFilter$|ConntrackDeleteFilters|DevLinkGetDeviceList|DevLinkGetAllPortList|DevlinkGetDeviceParams|FilterList|FouList|GenlFamilyList|GTPPDPList|LinkByName|LinkByAlias|LinkList|LinkSubscribeWithOptions|NeighList$|NeighProxyList|NeighListExecute|NeighSubscribeWithOptions|LinkGetProtinfo|QdiscList|RdmaLinkList|RdmaLinkByName|RdmaLinkDel|RouteList|RouteListFilteredIter|RuleListFiltered$|RouteSubscribeWithOptions|RuleList$|RuleListFiltered|SocketGet|SocketDiagTCPInfo|SocketDiagTCP|SocketDiagUDPInfo|SocketDiagUDP|UnixSocketDiagInfo|UnixSocketDiag|VDPAGetDevConfigList|VDPAGetDevList|VDPAGetMGMTDevList|XfrmPolicyList|XfrmStateList)
msg: Use internal nlwrap package for EINTR handling.
- pkg: github.com/docker/docker/internal/nlwrap$
p: ^nlwrap.Handle.(BridgeVlanList|ChainList|ClassList|ConntrackDeleteFilter$|DevLinkGetDeviceList|DevLinkGetAllPortList|DevlinkGetDeviceParams|FilterList|FouList|GenlFamilyList|GTPPDPList|LinkByAlias|LinkSubscribeWithOptions|NeighList$|NeighProxyList|NeighListExecute|NeighSubscribeWithOptions|LinkGetProtinfo|QdiscList|RdmaLinkList|RdmaLinkByName|RdmaLinkDel|RouteListFilteredIter|RuleListFiltered$|RouteSubscribeWithOptions|RuleList$|RuleListFiltered|SocketGet|SocketDiagTCPInfo|SocketDiagTCP|SocketDiagUDPInfo|SocketDiagUDP|UnixSocketDiagInfo|UnixSocketDiag|VDPAGetDevConfigList|VDPAGetDevList|VDPAGetMGMTDevList)
msg: Add a wrapper to nlwrap.Handle for EINTR handling and update the list in .golangci.yml.
analyze-types: true
importas:
# Do not allow unaliased imports of aliased packages.
no-unaliased: true
alias:
# Enforce alias to prevent it accidentally being used instead of our
# own errdefs package (or vice-versa).
- pkg: github.com/containerd/errdefs
alias: cerrdefs
- pkg: github.com/opencontainers/image-spec/specs-go/v1
alias: ocispec
govet:
check-shadowing: false
gosec:
excludes:
- G115 # FIXME temporarily suppress 'G115: integer overflow conversion': it produces many hits, some of which may be false positives, and need to be looked at; see https://github.com/moby/moby/issues/48358
depguard:
rules:
main:
deny:
- pkg: io/ioutil
desc: The io/ioutil package has been deprecated, see https://go.dev/doc/go1.16#ioutil
- pkg: "github.com/stretchr/testify/assert"
desc: Use "gotest.tools/v3/assert" instead
- pkg: "github.com/stretchr/testify/require"
desc: Use "gotest.tools/v3/assert" instead
- pkg: "github.com/stretchr/testify/suite"
desc: Do not use
- pkg: "github.com/containerd/containerd/errdefs"
desc: The errdefs package has moved to a separate module, https://github.com/containerd/errdefs
- pkg: "github.com/containerd/containerd/log"
desc: The logs package has moved to a separate module, https://github.com/containerd/log
- pkg: "github.com/containerd/containerd/pkg/userns"
desc: Use github.com/moby/sys/userns instead.
- pkg: "github.com/opencontainers/runc/libcontainer/userns"
desc: Use github.com/moby/sys/userns instead.
- pkg: "github.com/tonistiigi/fsutil"
desc: The fsutil module does not have a stable API, so we should not have a direct dependency unless necessary.
revive:
rules:
# FIXME make sure all packages have a description. Currently, there's many packages without.
- name: package-comments
disabled: true
issues:
# The default exclusion rules are a bit too permissive, so copying the relevant ones below
exclude-use-default: false
exclude-rules:
# We prefer to use an "exclude-list" so that new "default" exclusions are not
# automatically inherited. We can decide whether or not to follow upstream
# defaults when updating golang-ci-lint versions.
# Unfortunately, this means we have to copy the whole exclusion pattern, as
# (unlike the "include" option), the "exclude" option does not take exclusion
# ID's.
#
# These exclusion patterns are copied from the default excludes at:
# https://github.com/golangci/golangci-lint/blob/v1.46.2/pkg/config/issues.go#L10-L104
# EXC0001
- text: "Error return value of .((os\\.)?std(out|err)\\..*|.*Close|.*Flush|os\\.Remove(All)?|.*print(f|ln)?|os\\.(Un)?Setenv). is not checked"
linters:
- errcheck
# EXC0006
- text: "Use of unsafe calls should be audited"
linters:
- gosec
# EXC0007
- text: "Subprocess launch(ed with variable|ing should be audited)"
linters:
- gosec
# EXC0008
# TODO: evaluate these and fix where needed: G307: Deferring unsafe method "*os.File" on type "Close" (gosec)
- text: "(G104|G307)"
linters:
- gosec
# EXC0009
- text: "(Expect directory permissions to be 0750 or less|Expect file permissions to be 0600 or less)"
linters:
- gosec
# EXC0010
- text: "Potential file inclusion via variable"
linters:
- gosec
# Looks like the match in "EXC0007" above doesn't catch this one
# TODO: consider upstreaming this to golangci-lint's default exclusion rules
- text: "G204: Subprocess launched with a potential tainted input or cmd arguments"
linters:
- gosec
# Looks like the match in "EXC0009" above doesn't catch this one
# TODO: consider upstreaming this to golangci-lint's default exclusion rules
- text: "G306: Expect WriteFile permissions to be 0600 or less"
linters:
- gosec
# Exclude some linters from running on tests files.
- path: _test\.go
linters:
- errcheck
- gosec
# Suppress golint complaining about generated types in api/types/
- text: "type name will be used as (container|volume)\\.(Container|Volume).* by other packages, and that stutters; consider calling this"
path: "api/types/(volume|container)/"
linters:
- revive
# FIXME temporarily suppress these until we migrated these to internal.
- text: "SA1019: fileutils\\.GetTotalUsedFds"
linters:
- staticcheck
# FIXME temporarily suppress these (see https://github.com/gotestyourself/gotest.tools/issues/272)
- text: "SA1019: (assert|cmp|is)\\.ErrorType is deprecated"
linters:
- staticcheck
# FIXME temporarily suppress these until https://github.com/moby/moby/pull/49072 is merged, which removes their use.
- text: "SA1019: system\\.(FromStatT|Mkdev|Mknod|StatT)"
path: "pkg/archive/"
linters:
- staticcheck
# FIXME temporarily suppress these until they are moved internal to container/streams.
- text: "SA1019: ioutils\\.(ErrClosed|BytesPipe|NewBytesPipe)"
path: "container/stream/"
linters:
- staticcheck
- text: "ineffectual assignment to ctx"
source: "ctx[, ].*=.*\\(ctx[,)]"
linters:
- ineffassign
- text: "SA4006: this value of `ctx` is never used"
source: "ctx[, ].*=.*\\(ctx[,)]"
linters:
- staticcheck
# Maximum issues count per one linter. Set to 0 to disable. Default is 50.
max-issues-per-linter: 0
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
max-same-issues: 0

371
.mailmap
View File

@@ -1,25 +1,15 @@
# This file lists the canonical name and email of contributors, and is used to
# generate AUTHORS (in hack/generate-authors.sh).
#
# To find new duplicates, regenerate AUTHORS and scan for name duplicates, or
# run the following to find email duplicates:
# git log --format='%aE - %aN' | sort -uf | awk -v IGNORECASE=1 '$1 in a {print a[$1]; print}; {a[$1]=$0}'
#
# For an explanation of this file format, consult gitmailmap(5).
# Generate AUTHORS: hack/generate-authors.sh
Aaron Yoshitake <airandfingers@gmail.com>
# Tip for finding duplicates (besides scanning the output of AUTHORS for name
# duplicates that aren't also email duplicates): scan the output of:
# git log --format='%aE - %aN' | sort -uf
#
# For explanation on this file format: man git-shortlog
<21551195@zju.edu.cn> <hsinko@users.noreply.github.com>
<mr.wrfly@gmail.com> <wrfly@users.noreply.github.com>
Aaron L. Xu <liker.xu@foxmail.com>
Aaron L. Xu <liker.xu@foxmail.com> <likexu@harmonycloud.cn>
Aaron Lehmann <alehmann@netflix.com>
Aaron Lehmann <alehmann@netflix.com> <aaron.lehmann@docker.com>
Abhinandan Prativadi <aprativadi@gmail.com>
Abhinandan Prativadi <aprativadi@gmail.com> <abhi@docker.com>
Abhinandan Prativadi <aprativadi@gmail.com> abhi <user.email>
Abhishek Chanda <abhishek.becs@gmail.com>
Abhishek Chanda <abhishek.becs@gmail.com> <abhishek.chanda@emc.com>
Ada Mancini <ada@docker.com>
Adam Dobrawy <naczelnik@jawnosc.tk>
Adam Dobrawy <naczelnik@jawnosc.tk> <ad-m@users.noreply.github.com>
Abhinandan Prativadi <abhi@docker.com>
Adrien Gallouët <adrien@gallouet.fr> <angt@users.noreply.github.com>
Ahmed Kamal <email.ahmedkamal@googlemail.com>
Ahmet Alp Balkan <ahmetb@microsoft.com> <ahmetalpbalkan@gmail.com>
@@ -27,54 +17,27 @@ AJ Bowen <aj@soulshake.net>
AJ Bowen <aj@soulshake.net> <aj@gandi.net>
AJ Bowen <aj@soulshake.net> <amy@gandi.net>
Akihiro Matsushima <amatsusbit@gmail.com> <amatsus@users.noreply.github.com>
Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp>
Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp> <suda.akihiro@lab.ntt.co.jp>
Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp> <suda.kyoto@gmail.com>
Akshay Moghe <akshay.moghe@gmail.com>
Alano Terblanche <alano.terblanche@docker.com>
Alano Terblanche <alano.terblanche@docker.com> <18033717+Benehiko@users.noreply.github.com>
Albin Kerouanton <albinker@gmail.com>
Albin Kerouanton <albinker@gmail.com> <557933+akerouanton@users.noreply.github.com>
Albin Kerouanton <albinker@gmail.com> <albin@akerouanton.name>
Akihiro Suda <suda.akihiro@lab.ntt.co.jp> <suda.kyoto@gmail.com>
Aleksa Sarai <asarai@suse.de>
Aleksa Sarai <asarai@suse.de> <asarai@suse.com>
Aleksa Sarai <asarai@suse.de> <cyphar@cyphar.com>
Aleksandrs Fadins <aleks@s-ko.net>
Alessandro Boch <aboch@tetrationanalytics.com>
Alessandro Boch <aboch@tetrationanalytics.com> <aboch@docker.com>
Alessandro Boch <aboch@tetrationanalytics.com> <aboch@socketplane.io>
Alessandro Boch <aboch@tetrationanalytics.com> <aboch@users.noreply.github.com>
Alex Chan <alex@alexwlchan.net>
Alex Chan <alex@alexwlchan.net> <alex.chan@metaswitch.com>
Alex Chen <alexchenunix@gmail.com> <root@localhost.localdomain>
Alex Ellis <alexellis2@gmail.com>
Alex Goodman <wagoodman@gmail.com> <wagoodman@users.noreply.github.com>
Alexander Larsson <alexl@redhat.com> <alexander.larsson@gmail.com>
Alexander Morozov <lk4d4math@gmail.com>
Alexander Morozov <lk4d4math@gmail.com> <lk4d4@docker.com>
Alexander Morozov <lk4d4@docker.com>
Alexander Morozov <lk4d4@docker.com> <lk4d4math@gmail.com>
Alexandre Beslic <alexandre.beslic@gmail.com> <abronan@docker.com>
Alexandre González <agonzalezro@gmail.com>
Alexis Ries <ries.alexis@gmail.com>
Alexis Ries <ries.alexis@gmail.com> <alexis.ries.ext@orange.com>
Alexis Thomas <fr.alexisthomas@gmail.com>
Alicia Lauerman <alicia@eta.im> <allydevour@me.com>
Allen Sun <allensun.shl@alibaba-inc.com> <allen.sun@daocloud.io>
Allen Sun <allensun.shl@alibaba-inc.com> <shlallen1990@gmail.com>
Anca Iordache <anca.iordache@docker.com>
Andrea Denisse Gómez <crypto.andrea@protonmail.ch>
Andrew Baxter <423qpsxzhh8k3h@s.rendaw.me>
Andrew Baxter <423qpsxzhh8k3h@s.rendaw.me> andrew <>
Andrew Kim <taeyeonkim90@gmail.com>
Andrew Kim <taeyeonkim90@gmail.com> <akim01@fortinet.com>
Andrew Weiss <andrew.weiss@docker.com> <andrew.weiss@microsoft.com>
Andrew Weiss <andrew.weiss@docker.com> <andrew.weiss@outlook.com>
Andrey Kolomentsev <andrey.kolomentsev@docker.com>
Andrey Kolomentsev <andrey.kolomentsev@docker.com> <andrey.kolomentsev@gmail.com>
André Martins <aanm90@gmail.com> <martins@noironetworks.com>
Andy Rothfusz <github@developersupport.net> <github@metaliveblog.com>
Andy Smith <github@anarkystic.com>
Andy Zhang <andy.zhangtao@hotmail.com>
Andy Zhang <andy.zhangtao@hotmail.com> <ztao@tibco-support.com>
Ankush Agarwal <ankushagarwal11@gmail.com> <ankushagarwal@users.noreply.github.com>
Antonio Murdaca <antonio.murdaca@gmail.com> <amurdaca@redhat.com>
Antonio Murdaca <antonio.murdaca@gmail.com> <me@runcom.ninja>
@@ -84,49 +47,28 @@ Antonio Murdaca <antonio.murdaca@gmail.com> <runcom@users.noreply.github.com>
Anuj Bahuguna <anujbahuguna.dev@gmail.com>
Anuj Bahuguna <anujbahuguna.dev@gmail.com> <abahuguna@fiberlink.com>
Anusha Ragunathan <anusha.ragunathan@docker.com> <anusha@docker.com>
Anyu Wang <wanganyu@outlook.com>
Arko Dasgupta <arko@tetrate.io>
Arko Dasgupta <arko@tetrate.io> <arko.dasgupta@docker.com>
Arko Dasgupta <arko@tetrate.io> <arkodg@users.noreply.github.com>
Arnaud Porterie <icecrime@gmail.com>
Arnaud Porterie <icecrime@gmail.com> <arnaud.porterie@docker.com>
Arnaud Rebillout <arnaud.rebillout@collabora.com>
Arnaud Rebillout <arnaud.rebillout@collabora.com> <elboulangero@gmail.com>
Arnaud Porterie <arnaud.porterie@docker.com>
Arnaud Porterie <arnaud.porterie@docker.com> <icecrime@gmail.com>
Arthur Gautier <baloo@gandi.net> <superbaloo+registrations.github@superbaloo.net>
Artur Meyster <arthurfbi@yahoo.com>
Avi Miller <avi.miller@oracle.com> <avi.miller@gmail.com>
Ben Bonnefoy <frenchben@docker.com>
Ben Golub <ben.golub@dotcloud.com>
Ben Toews <mastahyeti@gmail.com> <mastahyeti@users.noreply.github.com>
Benny Ng <benny.tpng@gmail.com>
Benoit Chesneau <bchesneau@gmail.com>
Bevisy Zhang <binbin36520@gmail.com>
Bhiraj Butala <abhiraj.butala@gmail.com>
Bhumika Bayani <bhumikabayani@gmail.com>
Bilal Amarni <bilal.amarni@gmail.com> <bamarni@users.noreply.github.com>
Bill Wang <ozbillwang@gmail.com> <SydOps@users.noreply.github.com>
Bily Zhang <xcoder@tenxcloud.com>
Bin Liu <liubin0329@gmail.com>
Bin Liu <liubin0329@gmail.com> <liubin0329@users.noreply.github.com>
Bingshen Wang <bingshen.wbs@alibaba-inc.com>
Bjorn Neergaard <bjorn@neersighted.com>
Bjorn Neergaard <bjorn@neersighted.com> <bjorn.neergaard@docker.com>
Bjorn Neergaard <bjorn@neersighted.com> <bneergaard@mirantis.com>
Boaz Shuster <ripcurld.github@gmail.com>
Bojun Zhu <bojun.zhu@foxmail.com>
Boqin Qin <bobbqqin@gmail.com>
Boshi Lian <farmer1992@gmail.com>
Brandon Philips <brandon.philips@coreos.com> <brandon@ifup.co>
Brandon Philips <brandon.philips@coreos.com> <brandon@ifup.org>
Brent Salisbury <brent.salisbury@docker.com> <brent@docker.com>
Brian Goff <cpuguy83@gmail.com>
Brian Goff <cpuguy83@gmail.com> <bgoff@cpuguy83-mbp.home>
Brian Goff <cpuguy83@gmail.com> <bgoff@cpuguy83-mbp.local>
Brian Goff <cpuguy83@gmail.com> <brian.goff@microsoft.com>
Brian Goff <cpuguy83@gmail.com> <cpuguy@hey.com>
Calvin Liu <flycalvin@qq.com>
Cameron Sparr <gh@sparr.email>
Carlos de Paula <me@carlosedp.com>
Chander Govindarajan <chandergovind@gmail.com>
Chao Wang <wangchao.fnst@cn.fujitsu.com> <chaowang@localhost.localdomain>
Charles Hooper <charles.hooper@dotcloud.com> <chooper@plumata.com>
@@ -135,27 +77,15 @@ Chen Chuanliang <chen.chuanliang@zte.com.cn>
Chen Mingjie <chenmingjie0828@163.com>
Chen Qiu <cheney-90@hotmail.com>
Chen Qiu <cheney-90@hotmail.com> <21321229@zju.edu.cn>
Chengfei Shang <cfshang@alauda.io>
Chentianze <cmoman@126.com>
Chris Dias <cdias@microsoft.com>
Chris McKinnel <chris.mckinnel@tangentlabs.co.uk>
Chris Price <cprice@mirantis.com>
Chris Price <cprice@mirantis.com> <chris.price@docker.com>
Chris Telfer <ctelfer@docker.com>
Chris Telfer <ctelfer@docker.com> <ctelfer@users.noreply.github.com>
Christopher Biscardi <biscarch@sketcht.com>
Christopher Latham <sudosurootdev@gmail.com>
Christopher Petito <chrisjpetito@gmail.com>
Christopher Petito <chrisjpetito@gmail.com> <47751006+krissetto@users.noreply.github.com>
Christy Norman <christy@linux.vnet.ibm.com>
Chun Chen <ramichen@tencent.com> <chenchun.feed@gmail.com>
Corbin Coleman <corbin.coleman@docker.com>
Cristian Ariza <dev@cristianrz.com>
Cristian Staretu <cristian.staretu@gmail.com>
Cristian Staretu <cristian.staretu@gmail.com> <unclejack@users.noreply.github.com>
Cristian Staretu <cristian.staretu@gmail.com> <unclejacksons@gmail.com>
cui fliter <imcusg@gmail.com>
cui fliter <imcusg@gmail.com> cuishuang <imcusg@gmail.com>
CUI Wei <ghostplant@qq.com> cuiwei13 <cuiwei13@pku.edu.cn>
Daehyeok Mun <daehyeok@gmail.com>
Daehyeok Mun <daehyeok@gmail.com> <daehyeok@daehyeok-ui-MacBook-Air.local>
@@ -167,7 +97,6 @@ Daniel Garcia <daniel@danielgarcia.info>
Daniel Gasienica <daniel@gasienica.ch> <dgasienica@zynga.com>
Daniel Goosen <daniel.goosen@surveysampling.com> <djgoosen@users.noreply.github.com>
Daniel Grunwell <mwgrunny@gmail.com>
Daniel Hiltgen <daniel.hiltgen@docker.com> <dhiltgen@users.noreply.github.com>
Daniel J Walsh <dwalsh@redhat.com>
Daniel Mizyrycki <daniel.mizyrycki@dotcloud.com> <daniel@dotcloud.com>
Daniel Mizyrycki <daniel.mizyrycki@dotcloud.com> <mzdaniel@glidelink.net>
@@ -175,42 +104,26 @@ Daniel Mizyrycki <daniel.mizyrycki@dotcloud.com> <root@vagrant-ubuntu-12.10.vagr
Daniel Nephin <dnephin@docker.com> <dnephin@gmail.com>
Daniel Norberg <dano@spotify.com> <daniel.norberg@gmail.com>
Daniel Watkins <daniel@daniel-watkins.co.uk>
Daniel Zhang <jmzwcn@gmail.com>
Danny Yates <danny@codeaholics.org> <Danny.Yates@mailonline.co.uk>
Darren Shepherd <darren.s.shepherd@gmail.com> <darren@rancher.com>
Dattatraya Kumbhar <dattatraya.kumbhar@gslab.com>
Dave Goodchild <buddhamagnet@gmail.com>
Dave Henderson <dhenderson@gmail.com> <Dave.Henderson@ca.ibm.com>
Dave Tucker <dt@docker.com> <dave@dtucker.co.uk>
David Dooling <dooling@gmail.com>
David Dooling <dooling@gmail.com> <david.dooling@docker.com>
David M. Karr <davidmichaelkarr@gmail.com>
David Sheets <dsheets@docker.com> <sheets@alum.mit.edu>
David Sissitka <me@dsissitka.com>
David Williamson <david.williamson@docker.com> <davidwilliamson@users.noreply.github.com>
Derek Ch <denc716@gmail.com>
Derek McGowan <derek@mcg.dev>
Derek McGowan <derek@mcg.dev> <derek@mcgstyle.net>
Deshi Xiao <dxiao@redhat.com> <dsxiao@dataman-inc.com>
Deshi Xiao <dxiao@redhat.com> <xiaods@gmail.com>
Dhilip Kumars <dhilip.kumar.s@huawei.com>
Diego Siqueira <dieg0@live.com>
Diogo Monica <diogo@docker.com> <diogo.monica@gmail.com>
Dmitry Sharshakov <d3dx12.xx@gmail.com>
Dmitry Sharshakov <d3dx12.xx@gmail.com> <sh7dm@outlook.com>
Dmytro Iakovliev <dmytro.iakovliev@zodiacsystems.com>
Dominic Yin <yindongchao@inspur.com>
Dominik Honnef <dominik@honnef.co> <dominikh@fork-bomb.org>
Doug Davis <dug@us.ibm.com> <duglin@users.noreply.github.com>
Doug Tangren <d.tangren@gmail.com>
Drew Erny <derny@mirantis.com>
Drew Erny <derny@mirantis.com> <drew.erny@docker.com>
Elan Ruusamäe <glen@pld-linux.org>
Elan Ruusamäe <glen@pld-linux.org> <glen@delfi.ee>
Elango Sivanandam <elango.siva@docker.com>
Elango Sivanandam <elango.siva@docker.com> <elango@docker.com>
Eli Uriegas <seemethere101@gmail.com>
Eli Uriegas <seemethere101@gmail.com> <eli.uriegas@docker.com>
Eric G. Noriega <enoriega@vizuri.com> <egnoriega@users.noreply.github.com>
Eric Hanchrow <ehanchrow@ine.com> <eric.hanchrow@gmail.com>
Eric Rosenberg <ehaydenr@gmail.com> <ehaydenr@users.noreply.github.com>
@@ -230,109 +143,66 @@ Felix Hupfeld <felix@quobyte.com> <quofelix@users.noreply.github.com>
Felix Ruess <felix.ruess@gmail.com> <felix.ruess@roboception.de>
Feng Yan <fy2462@gmail.com>
Fengtu Wang <wangfengtu@huawei.com> <wangfengtu@huawei.com>
Filipe Pina <hzlu1ot0@duck.com>
Filipe Pina <hzlu1ot0@duck.com> <636320+fopina@users.noreply.github.com>
Francisco Carriedo <fcarriedo@gmail.com>
Frank Rosquin <frank.rosquin+github@gmail.com> <frank.rosquin@gmail.com>
Frank Yang <yyb196@gmail.com>
Frederick F. Kautz IV <fkautz@redhat.com> <fkautz@alumni.cmu.edu>
Fu JinLin <withlin@yeah.net>
Gabriel Goller <gabrielgoller123@gmail.com>
Gabriel Nicolas Avellaneda <avellaneda.gabriel@gmail.com>
Gaetan de Villele <gdevillele@gmail.com>
Gang Qiao <qiaohai8866@gmail.com> <1373319223@qq.com>
Geon Kim <geon0250@gmail.com>
George Kontridze <george@bugsnag.com>
Gerwim Feiken <g.feiken@tfe.nl> <gerwim@gmail.com>
Giampaolo Mancini <giampaolo@trampolineup.com>
Giovan Isa Musthofa <giovanism@outlook.co.id>
Gopikannan Venugopalsamy <gopikannan.venugopalsamy@gmail.com>
Gou Rao <gou@portworx.com> <gourao@users.noreply.github.com>
Grant Millar <rid@cylo.io>
Grant Millar <rid@cylo.io> <grant@cylo.io>
Grant Millar <rid@cylo.io> <grant@seednet.eu>
Greg Stephens <greg@udon.org>
Guillaume J. Charmes <guillaume.charmes@docker.com> <charmes.guillaume@gmail.com>
Guillaume J. Charmes <guillaume.charmes@docker.com> <guillaume.charmes@dotcloud.com>
Guillaume J. Charmes <guillaume.charmes@docker.com> <guillaume@charmes.net>
Guillaume J. Charmes <guillaume.charmes@docker.com> <guillaume@docker.com>
Guillaume J. Charmes <guillaume.charmes@docker.com> <guillaume@dotcloud.com>
Gunadhya S. <6939749+gunadhya@users.noreply.github.com>
Guoqiang QI <guoqiang.qi1@gmail.com>
Guri <odg0318@gmail.com>
Gurjeet Singh <gurjeet@singh.im> <singh.gurjeet@gmail.com>
Gustav Sinder <gustav.sinder@gmail.com>
Günther Jungbluth <gunther@gameslabs.net>
Hakan Özler <hakan.ozler@kodcu.com>
Hao Shu Wei <haoshuwei24@gmail.com>
Hao Shu Wei <haoshuwei24@gmail.com> <haoshuwei1989@163.com>
Hao Shu Wei <haoshuwei24@gmail.com> <haosw@cn.ibm.com>
Hao Shu Wei <haosw@cn.ibm.com>
Hao Shu Wei <haosw@cn.ibm.com> <haoshuwei1989@163.com>
Harald Albers <github@albersweb.de> <albers@users.noreply.github.com>
Harald Niesche <harald@niesche.de>
Harold Cooper <hrldcpr@gmail.com>
Harry Zhang <harryz@hyper.sh>
Harry Zhang <harryz@hyper.sh> <harryzhang@zju.edu.cn>
Harry Zhang <harryz@hyper.sh> <resouer@163.com>
Harry Zhang <harryz@hyper.sh> <resouer@gmail.com>
Harry Zhang <resouer@163.com>
Harshal Patil <harshal.patil@in.ibm.com> <harche@users.noreply.github.com>
He Simei <hesimei@zju.edu.cn>
Helen Xie <chenjg@harmonycloud.cn>
Hiroyuki Sasagawa <hs19870702@gmail.com>
Hollie Teal <hollie@docker.com>
Hollie Teal <hollie@docker.com> <hollie.teal@docker.com>
Hollie Teal <hollie@docker.com> <hollietealok@users.noreply.github.com>
hsinko <21551195@zju.edu.cn> <hsinko@users.noreply.github.com>
Hu Keping <hukeping@huawei.com>
Huajin Tong <fliterdashen@gmail.com>
Hui Kang <hkang.sunysb@gmail.com>
Hui Kang <hkang.sunysb@gmail.com> <kangh@us.ibm.com>
Huu Nguyen <huu@prismskylabs.com> <whoshuu@gmail.com>
Hyeongkyu Lee <hyeongkyu.lee@navercorp.com>
Hyzhou Zhy <hyzhou.zhy@alibaba-inc.com>
Hyzhou Zhy <hyzhou.zhy@alibaba-inc.com> <1187766782@qq.com>
Ian Campbell <ian.campbell@docker.com>
Ian Campbell <ian.campbell@docker.com> <ijc@docker.com>
Ilya Khlopotov <ilya.khlopotov@gmail.com>
Iskander Sharipov <quasilyte@gmail.com>
Ivan Babrou <ibobrik@gmail.com>
Ivan Markin <sw@nogoegst.net> <twim@riseup.net>
Jack Laxson <jackjrabbit@gmail.com>
Jacob Atzen <jacob@jacobatzen.dk> <jatzen@gmail.com>
Jacob Tomlinson <jacob@tom.linson.uk> <jacobtomlinson@users.noreply.github.com>
Jaivish Kothari <janonymous.codevulture@gmail.com>
Jake Moshenko <jake@devtable.com>
Jakub Drahos <jdrahos@pulsepoint.com>
Jakub Drahos <jdrahos@pulsepoint.com> <jack.drahos@gmail.com>
James Nesbitt <jnesbitt@mirantis.com>
James Nesbitt <jnesbitt@mirantis.com> <james.nesbitt@wunderkraut.com>
Jamie Hannaford <jamie@limetree.org> <jamie.hannaford@rackspace.com>
Jan Götte <jaseg@jaseg.net>
Jana Radhakrishnan <mrjana@docker.com>
Jana Radhakrishnan <mrjana@docker.com> <mrjana@socketplane.io>
Javier Bassi <javierbassi@gmail.com>
Javier Bassi <javierbassi@gmail.com> <CrimsonGlory@users.noreply.github.com>
Jay Lim <jay@imjching.com>
Jay Lim <jay@imjching.com> <imjching@hotmail.com>
Jean Rouge <rougej+github@gmail.com> <jer329@cornell.edu>
Jean-Baptiste Barth <jeanbaptiste.barth@gmail.com>
Jean-Baptiste Dalido <jeanbaptiste@appgratis.com>
Jean-Tiare Le Bigot <jt@yadutaf.fr> <admin@jtlebi.fr>
Jeff Anderson <jeff@docker.com> <jefferya@programmerq.net>
Jeff Nickoloff <jeff.nickoloff@gmail.com> <jeff@allingeek.com>
Jeroen Franse <jeroenfranse@gmail.com>
Jessica Frazelle <jess@oxide.computer>
Jessica Frazelle <jess@oxide.computer> <acidburn@docker.com>
Jessica Frazelle <jess@oxide.computer> <acidburn@google.com>
Jessica Frazelle <jess@oxide.computer> <acidburn@microsoft.com>
Jessica Frazelle <jess@oxide.computer> <jess@docker.com>
Jessica Frazelle <jess@oxide.computer> <jess@mesosphere.com>
Jessica Frazelle <jess@oxide.computer> <jessfraz@google.com>
Jessica Frazelle <jess@oxide.computer> <jfrazelle@users.noreply.github.com>
Jessica Frazelle <jess@oxide.computer> <me@jessfraz.com>
Jessica Frazelle <jess@oxide.computer> <princess@docker.com>
Jian Liao <jliao@alauda.io>
Jiang Jinyang <jjyruby@gmail.com>
Jiang Jinyang <jjyruby@gmail.com> <jiangjinyang@outlook.com>
Jessica Frazelle <jessfraz@google.com>
Jessica Frazelle <jessfraz@google.com> <acidburn@docker.com>
Jessica Frazelle <jessfraz@google.com> <acidburn@google.com>
Jessica Frazelle <jessfraz@google.com> <jess@docker.com>
Jessica Frazelle <jessfraz@google.com> <jess@mesosphere.com>
Jessica Frazelle <jessfraz@google.com> <jfrazelle@users.noreply.github.com>
Jessica Frazelle <jessfraz@google.com> <me@jessfraz.com>
Jessica Frazelle <jessfraz@google.com> <princess@docker.com>
Jim Galasyn <jim.galasyn@docker.com>
Jiuyue Ma <majiuyue@huawei.com>
Joey Geiger <jgeiger@gmail.com>
@@ -341,25 +211,19 @@ Joffrey F <joffrey@docker.com> <f.joffrey@gmail.com>
Joffrey F <joffrey@docker.com> <joffrey@dotcloud.com>
Johan Euphrosine <proppy@google.com> <proppy@aminche.com>
John Harris <john@johnharris.io>
John Howard <github@lowenna.com>
John Howard <github@lowenna.com> <10522484+lowenna@users.noreply.github.com>
John Howard <github@lowenna.com> <jhoward@microsoft.com>
John Howard <github@lowenna.com> <jhoward@ntdev.microsoft.com>
John Howard <github@lowenna.com> <jhowardmsft@users.noreply.github.com>
John Howard <github@lowenna.com> <john.howard@microsoft.com>
John Howard <github@lowenna.com> <john@lowenna.com>
John Howard (VM) <John.Howard@microsoft.com>
John Howard (VM) <John.Howard@microsoft.com> <jhoward@microsoft.com>
John Howard (VM) <John.Howard@microsoft.com> <jhoward@ntdev.microsoft.com>
John Howard (VM) <John.Howard@microsoft.com> <jhowardmsft@users.noreply.github.com>
John Howard (VM) <John.Howard@microsoft.com> <john.howard@microsoft.com>
John Stephens <johnstep@docker.com> <johnstep@users.noreply.github.com>
Jon Surrell <jon.surrell@gmail.com> <jon.surrell@automattic.com>
Jonathan A. Sternberg <jonathansternberg@gmail.com>
Jonathan A. Sternberg <jonathansternberg@gmail.com> <jonathan.sternberg@docker.com>
Jonathan Choy <jonathan.j.choy@gmail.com>
Jonathan Choy <jonathan.j.choy@gmail.com> <oni@tetsujinlabs.com>
Jon Surrell <jon.surrell@gmail.com> <jon.surrell@automattic.com>
Jordan Arentsen <blissdev@gmail.com>
Jordan Jennings <jjn2009@gmail.com> <jjn2009@users.noreply.github.com>
Jorit Kleine-Möllhoff <joppich@bricknet.de> <joppich@users.noreply.github.com>
Jose Diaz-Gonzalez <email@josediazgonzalez.com>
Jose Diaz-Gonzalez <email@josediazgonzalez.com> <jose@seatgeek.com>
Jose Diaz-Gonzalez <email@josediazgonzalez.com> <josegonzalez@users.noreply.github.com>
Jose Diaz-Gonzalez <jose@seatgeek.com> <josegonzalez@users.noreply.github.com>
Josh Bonczkowski <josh.bonczkowski@gmail.com>
Josh Eveleth <joshe@opendns.com> <jeveleth@users.noreply.github.com>
Josh Hawn <josh.hawn@docker.com> <jlhawn@berkeley.edu>
@@ -369,14 +233,10 @@ Josh Wilson <josh.wilson@fivestars.com> <jcwilson@users.noreply.github.com>
Joyce Jang <mail@joycejang.com>
Julien Bordellier <julienbordellier@gmail.com> <git@julienbordellier.com>
Julien Bordellier <julienbordellier@gmail.com> <me@julienbordellier.com>
Jun Du <dujun5@huawei.com>
Justin Cormack <justin.cormack@docker.com>
Justin Cormack <justin.cormack@docker.com> <justin.cormack@unikernel.com>
Justin Cormack <justin.cormack@docker.com> <justin@specialbusservice.com>
Justin Keller <85903732+jk-vb@users.noreply.github.com>
Justin Keller <85903732+jk-vb@users.noreply.github.com> <jkeller@vb-jkeller-mbp.local>
Justin Simonelis <justin.p.simonelis@gmail.com> <justin.simonelis@PTS-JSIMON2.toronto.exclamation.com>
Justin Terry <juterry@microsoft.com>
Jérôme Petazzoni <jerome.petazzoni@docker.com> <jerome.petazzoni@dotcloud.com>
Jérôme Petazzoni <jerome.petazzoni@docker.com> <jerome.petazzoni@gmail.com>
Jérôme Petazzoni <jerome.petazzoni@docker.com> <jp@enix.org>
@@ -385,15 +245,9 @@ Kai Qiang Wu (Kennan) <wkq5325@gmail.com>
Kai Qiang Wu (Kennan) <wkq5325@gmail.com> <wkqwu@cn.ibm.com>
Kamil Domański <kamil@domanski.co>
Kamjar Gerami <kami.gerami@gmail.com>
Karthik Nayak <karthik.188@gmail.com>
Karthik Nayak <karthik.188@gmail.com> <Karthik.188@gmail.com>
Ken Cochrane <kencochrane@gmail.com> <KenCochrane@gmail.com>
Ken Herner <kherner@progress.com> <chosenken@gmail.com>
Ken Reese <krrgithub@gmail.com>
Kenfe-Mickaël Laventure <mickael.laventure@gmail.com>
Kevin Alvarez <github@crazymax.dev>
Kevin Alvarez <github@crazymax.dev> <1951866+crazy-max@users.noreply.github.com>
Kevin Alvarez <github@crazymax.dev> <crazy-max@users.noreply.github.com>
Kevin Feyrer <kevin.feyrer@btinternet.com> <kevinfeyrer@users.noreply.github.com>
Kevin Kern <kaiwentan@harmonycloud.cn>
Kevin Meredith <kevin.m.meredith@gmail.com>
@@ -404,25 +258,17 @@ Konrad Kleine <konrad.wilhelm.kleine@gmail.com> <kwk@users.noreply.github.com>
Konstantin Gribov <grossws@gmail.com>
Konstantin Pelykh <kpelykh@zettaset.com>
Kotaro Yoshimatsu <kotaro.yoshimatsu@gmail.com>
Kunal Kushwaha <kushwaha_kunal_v7@lab.ntt.co.jp>
Kunal Kushwaha <kushwaha_kunal_v7@lab.ntt.co.jp> <btkushuwahak@KUNAL-PC.swh.swh.nttdata.co.jp>
Kunal Kushwaha <kushwaha_kunal_v7@lab.ntt.co.jp> <kunal.kushwaha@gmail.com>
Kyle Squizzato <ksquizz@gmail.com>
Kyle Squizzato <ksquizz@gmail.com> <kyle.squizzato@docker.com>
Lajos Papp <lajos.papp@sequenceiq.com> <lalyos@yahoo.com>
Lei Gong <lgong@alauda.io>
Lei Jitang <leijitang@huawei.com>
Lei Jitang <leijitang@huawei.com> <leijitang@gmail.com>
Lei Jitang <leijitang@huawei.com> <leijitang@outlook.com>
Leiiwang <u2takey@gmail.com>
Liang Mingqiang <mqliang.zju@gmail.com>
Liang-Chi Hsieh <viirya@gmail.com>
Liao Qingwei <liaoqingwei@huawei.com>
Linus Heckemann <lheckemann@twig-world.com>
Linus Heckemann <lheckemann@twig-world.com> <anonymouse2048@gmail.com>
Lokesh Mandvekar <lsm5@fedoraproject.org> <lsm5@redhat.com>
Lorenzo Fontana <fontanalorenz@gmail.com> <fontanalorenzo@me.com>
Lorenzo Fontana <fontanalorenz@gmail.com> <lo@linux.com>
Lorenzo Fontana <lo@linux.com> <fontanalorenzo@me.com>
Louis Opter <kalessin@kalessin.fr>
Louis Opter <kalessin@kalessin.fr> <louis@dotcloud.com>
Luca Favatella <luca.favatella@erlang-solutions.com> <lucafavatella@users.noreply.github.com>
@@ -431,11 +277,8 @@ Lyn <energylyn@zju.edu.cn>
Lynda O'Leary <lyndaoleary29@gmail.com>
Lynda O'Leary <lyndaoleary29@gmail.com> <lyndaoleary@hotmail.com>
Ma Müller <mueller-ma@users.noreply.github.com>
Madhan Raj Mookkandy <MadhanRaj.Mookkandy@microsoft.com>
Madhan Raj Mookkandy <MadhanRaj.Mookkandy@microsoft.com> <madhanm@corp.microsoft.com>
Madhan Raj Mookkandy <MadhanRaj.Mookkandy@microsoft.com> <madhanm@microsoft.com>
Madhu Venugopal <mavenugo@gmail.com> <madhu@docker.com>
Madhu Venugopal <mavenugo@gmail.com> <madhu@socketplane.io>
Madhu Venugopal <madhu@socketplane.io> <madhu@docker.com>
Mageee <fangpuyi@foxmail.com> <21521230.zju.edu.cn>
Mansi Nahar <mmn4185@rit.edu> <mansi.nahar@macbookpro-mansinahar.local>
Mansi Nahar <mmn4185@rit.edu> <mansinahar@users.noreply.github.com>
@@ -448,12 +291,10 @@ Markan Patel <mpatel678@gmail.com>
Markus Kortlang <hyp3rdino@googlemail.com> <markus.kortlang@lhsystems.com>
Martin Redmond <redmond.martin@gmail.com> <martin@tinychat.com>
Martin Redmond <redmond.martin@gmail.com> <xgithub@redmond5.com>
Maru Newby <mnewby@thesprawl.net>
Mary Anthony <mary.anthony@docker.com> <mary@docker.com>
Mary Anthony <mary.anthony@docker.com> <moxieandmore@gmail.com>
Mary Anthony <mary.anthony@docker.com> moxiegirl <mary@docker.com>
Masato Ohba <over.rye@gmail.com>
Mathieu Paturel <mathieu.paturel@gmail.com>
Matt Bentley <matt.bentley@docker.com> <mbentley@mbentley.net>
Matt Schurenko <matt.schurenko@gmail.com>
Matt Williams <mattyw@me.com>
@@ -463,52 +304,29 @@ Matthew Mosesohn <raytrac3r@gmail.com>
Matthew Mueller <mattmuelle@gmail.com>
Matthias Kühnle <git.nivoc@neverbox.com> <kuehnle@online.de>
Mauricio Garavaglia <mauricio@medallia.com> <mauriciogaravaglia@gmail.com>
Maxwell <csuhp007@gmail.com>
Maxwell <csuhp007@gmail.com> <csuhqg@foxmail.com>
Menghui Chen <menghui.chen@alibaba-inc.com>
Michael Beskin <mrbeskin@gmail.com>
Michael Crosby <crosbymichael@gmail.com>
Michael Crosby <crosbymichael@gmail.com> <crosby.michael@gmail.com>
Michael Crosby <crosbymichael@gmail.com> <michael@crosbymichael.com>
Michael Crosby <crosbymichael@gmail.com> <michael@docker.com>
Michael Crosby <crosbymichael@gmail.com> <michael@thepasture.io>
Michael Crosby <michael@docker.com> <crosby.michael@gmail.com>
Michael Crosby <michael@docker.com> <crosbymichael@gmail.com>
Michael Crosby <michael@docker.com> <michael@crosbymichael.com>
Michał Gryko <github@odkurzacz.org>
Michael Hudson-Doyle <michael.hudson@canonical.com> <michael.hudson@linaro.org>
Michael Huettermann <michael@huettermann.net>
Michael Käufl <docker@c.michael-kaeufl.de> <michael-k@users.noreply.github.com>
Michael Nussbaum <michael.nussbaum@getbraintree.com>
Michael Nussbaum <michael.nussbaum@getbraintree.com> <code@getbraintree.com>
Michael Spetsiotis <michael_spets@hotmail.com>
Michael Stapelberg <michael+gh@stapelberg.de>
Michael Stapelberg <michael+gh@stapelberg.de> <stapelberg@google.com>
Michal Kostrzewa <michal.kostrzewa@codilime.com>
Michal Kostrzewa <michal.kostrzewa@codilime.com> <kostrzewa.michal@o2.pl>
Michal Minář <miminar@redhat.com>
Michał Gryko <github@odkurzacz.org>
Michiel de Jong <michiel@unhosted.org>
Mickaël Fortunato <morsi.morsicus@gmail.com>
Miguel Angel Alvarez Cabrerizo <doncicuto@gmail.com> <30386061+doncicuto@users.noreply.github.com>
Miguel Angel Fernández <elmendalerenda@gmail.com>
Mihai Borobocea <MihaiBorob@gmail.com> <MihaiBorobocea@gmail.com>
Mikael Davranche <mikael.davranche@corp.ovh.com>
Mikael Davranche <mikael.davranche@corp.ovh.com> <mikael.davranche@corp.ovh.net>
Mike Casas <mkcsas0@gmail.com> <mikecasas@users.noreply.github.com>
Mike Goelzer <mike.goelzer@docker.com> <mgoelzer@docker.com>
Milas Bowman <devnull@milas.dev>
Milas Bowman <devnull@milas.dev> <milas.bowman@docker.com>
Milas Bowman <devnull@milas.dev> <milasb@gmail.com>
Milind Chawre <milindchawre@gmail.com>
Misty Stanley-Jones <misty@docker.com> <misty@apache.org>
Mohammad Banikazemi <MBanikazemi@gmail.com>
Mohammad Banikazemi <MBanikazemi@gmail.com> <mb@us.ibm.com>
Mohd Sadiq <mohdsadiq058@gmail.com> <42430865+msadiq058@users.noreply.github.com>
Mohd Sadiq <mohdsadiq058@gmail.com> <mohdsadiq058@gmail.com>
Mohit Soni <mosoni@ebay.com> <mohitsoni1989@gmail.com>
Moorthy RS <rsmoorthy@gmail.com> <rsmoorthy@users.noreply.github.com>
Moysés Borges <moysesb@gmail.com>
Moysés Borges <moysesb@gmail.com> <moyses.furtado@wplex.com.br>
mrfly <mr.wrfly@gmail.com> <wrfly@users.noreply.github.com>
Nace Oroz <orkica@gmail.com>
Natasha Jarus <linuxmercedes@gmail.com>
Nathan LeClaire <nathan.leclaire@docker.com> <nathan.leclaire@gmail.com>
Nathan LeClaire <nathan.leclaire@docker.com> <nathanleclaire@gmail.com>
Neil Horman <nhorman@tuxdriver.com> <nhorman@hmswarspite.think-freely.org>
@@ -520,13 +338,7 @@ Nolan Darilek <nolan@thewordnerd.info>
O.S. Tezer <ostezer@gmail.com>
O.S. Tezer <ostezer@gmail.com> <ostezer@users.noreply.github.com>
Oh Jinkyun <tintypemolly@gmail.com> <tintypemolly@Ohui-MacBook-Pro.local>
Oliver Reason <oli@overrateddev.co>
Olli Janatuinen <olli.janatuinen@gmail.com>
Olli Janatuinen <olli.janatuinen@gmail.com> <olljanat@users.noreply.github.com>
Onur Filiz <onur.filiz@microsoft.com>
Onur Filiz <onur.filiz@microsoft.com> <ofiliz@users.noreply.github.com>
Ouyang Liduo <oyld0210@163.com>
Patrick St. laurent <patrick@saint-laurent.us>
Patrick Stapleton <github@gdi2290.com>
Paul Liljenberg <liljenberg.paul@gmail.com> <letters@paulnotcom.se>
Pavel Tikhomirov <ptikhomirov@virtuozzo.com> <ptikhomirov@parallels.com>
@@ -536,66 +348,38 @@ Peter Dave Hello <hsu@peterdavehello.org> <PeterDaveHello@users.noreply.github.c
Peter Jaffe <pjaffe@nevo.com>
Peter Nagy <xificurC@gmail.com> <pnagy@gratex.com>
Peter Waller <p@pwaller.net> <peter@scraperwiki.com>
Phil Estes <estesp@gmail.com>
Phil Estes <estesp@gmail.com> <estesp@amazon.com>
Phil Estes <estesp@gmail.com> <estesp@linux.vnet.ibm.com>
Phil Estes <estesp@linux.vnet.ibm.com> <estesp@gmail.com>
Philip Alexander Etling <paetling@gmail.com>
Philipp Gillé <philipp.gille@gmail.com> <philippgille@users.noreply.github.com>
Prasanna Gautam <prasannagautam@gmail.com>
Puneet Pruthi <puneet.pruthi@oracle.com>
Puneet Pruthi <puneet.pruthi@oracle.com> <puneetpruthi@gmail.com>
Qiang Huang <h.huangqiang@huawei.com>
Qiang Huang <h.huangqiang@huawei.com> <qhuang@10.0.2.15>
Qin TianHuan <tianhuan@bingotree.cn>
Ray Tsang <rayt@google.com> <saturnism@users.noreply.github.com>
Renaud Gaubert <rgaubert@nvidia.com> <renaud.gaubert@gmail.com>
Richard Scothern <richard.scothern@gmail.com>
Rob Murray <rob.murray@docker.com>
Rob Murray <rob.murray@docker.com> <148866618+robmry@users.noreply.github.com>
Robert Terhaar <rterhaar@atlanticdynamic.com> <robbyt@users.noreply.github.com>
Roberto G. Hashioka <roberto.hashioka@docker.com> <roberto_hashioka@hotmail.com>
Roberto Muñoz Fernández <robertomf@gmail.com> <roberto.munoz.fernandez.contractor@bbva.com>
Robin Thoni <robin@rthoni.com>
Roman Dudin <katrmr@gmail.com> <decadent@users.noreply.github.com>
Rong Zhang <rongzhang@alauda.io>
Rongxiang Song <tinysong1226@gmail.com>
Rony Weng <ronyweng@synology.com>
Ross Boucher <rboucher@gmail.com>
Rui Cao <ruicao@alauda.io>
Rui JingAn <quiterace@gmail.com>
Runshen Zhu <runshen.zhu@gmail.com>
Ryan Stelly <ryan.stelly@live.com>
Ryoga Saito <contact@proelbtn.com>
Ryoga Saito <contact@proelbtn.com> <proelbtn@users.noreply.github.com>
Sainath Grandhi <sainath.grandhi@intel.com>
Sainath Grandhi <sainath.grandhi@intel.com> <saiallforums@gmail.com>
Sakeven Jiang <jc5930@sina.cn>
Samuel Karp <me@samuelkarp.com> <skarp@amazon.com>
Sandeep Bansal <sabansal@microsoft.com>
Sandeep Bansal <sabansal@microsoft.com> <msabansal@microsoft.com>
Santhosh Manohar <santhosh@docker.com>
Sargun Dhillon <sargun@netflix.com> <sargun@sargun.me>
Satoshi Tagomori <tagomoris@gmail.com>
Sean Lee <seanlee@tw.ibm.com> <scaleoutsean@users.noreply.github.com>
Sebastiaan van Stijn <github@gone.nl>
Sebastiaan van Stijn <github@gone.nl> <moby@example.com>
Sebastiaan van Stijn <github@gone.nl> <sebastiaan@ws-key-sebas3.dpi1.dpi>
Sebastiaan van Stijn <github@gone.nl> <thaJeztah@users.noreply.github.com>
Sebastian Thomschke <sebthom@users.noreply.github.com>
Seongyeol Lim <seongyeol37@gmail.com>
Serhii Nakon <serhii.n@thescimus.com>
Shaun Kaasten <shaunk@gmail.com>
Shawn Landden <shawn@churchofgit.com> <shawnlandden@gmail.com>
Shengbo Song <thomassong@tencent.com>
Shengbo Song <thomassong@tencent.com> <mymneo@163.com>
Shih-Yuan Lee <fourdollars@gmail.com>
Shishir Mahajan <shishir.mahajan@redhat.com> <smahajan@redhat.com>
Shu-Wai Chow <shu-wai.chow@seattlechildrens.org>
Shukui Yang <yangshukui@huawei.com>
Shuwei Hao <haosw@cn.ibm.com>
Shuwei Hao <haosw@cn.ibm.com> <haoshuwei24@gmail.com>
Sidhartha Mani <sidharthamn@gmail.com>
Sjoerd Langkemper <sjoerd-github@linuxonly.nl> <sjoerd@byte.nl>
Smark Meng <smark@freecoop.net>
Smark Meng <smark@freecoop.net> <smarkm@users.noreply.github.com>
Solomon Hykes <solomon@docker.com> <s@docker.com>
Solomon Hykes <solomon@docker.com> <solomon.hykes@dotcloud.com>
Solomon Hykes <solomon@docker.com> <solomon@dotcloud.com>
@@ -609,12 +393,9 @@ Stefan Berger <stefanb@linux.vnet.ibm.com>
Stefan Berger <stefanb@linux.vnet.ibm.com> <stefanb@us.ibm.com>
Stefan J. Wernli <swernli@microsoft.com> <swernli@ntdev.microsoft.com>
Stefan S. <tronicum@user.github.com>
Stefan Scherer <stefan.scherer@docker.com>
Stefan Scherer <stefan.scherer@docker.com> <scherer_stefan@icloud.com>
Stephan Spindler <shutefan@gmail.com> <shutefan@users.noreply.github.com>
Stephen Day <stevvooe@gmail.com>
Stephen Day <stevvooe@gmail.com> <stephen.day@docker.com>
Stephen Day <stevvooe@gmail.com> <stevvooe@users.noreply.github.com>
Stephen Day <stephen.day@docker.com>
Stephen Day <stephen.day@docker.com> <stevvooe@users.noreply.github.com>
Steve Desmond <steve@vtsv.ca> <stevedesmond-ca@users.noreply.github.com>
Sun Gengze <690388648@qq.com>
Sun Jianbo <wonderflow.sun@gmail.com>
@@ -626,52 +407,31 @@ Sven Dowideit <SvenDowideit@home.org.au> <SvenDowideit@fosiki.com>
Sven Dowideit <SvenDowideit@home.org.au> <SvenDowideit@home.org.au>
Sven Dowideit <SvenDowideit@home.org.au> <SvenDowideit@users.noreply.github.com>
Sven Dowideit <SvenDowideit@home.org.au> <¨SvenDowideit@home.org.au¨>
Sylvain Baubeau <lebauce@gmail.com>
Sylvain Baubeau <lebauce@gmail.com> <sbaubeau@redhat.com>
Sylvain Bellemare <sylvain@ascribe.io>
Sylvain Bellemare <sylvain@ascribe.io> <sylvain.bellemare@ezeep.com>
Takuto Sato <tockn.jp@gmail.com>
Tangi Colin <tangicolin@gmail.com>
Tejesh Mehta <tejesh.mehta@gmail.com> <tj@init.me>
Terry Chu <zue.hterry@gmail.com>
Terry Chu <zue.hterry@gmail.com> <jubosh.tw@gmail.com>
Thatcher Peskens <thatcher@docker.com>
Thatcher Peskens <thatcher@docker.com> <thatcher@dotcloud.com>
Thatcher Peskens <thatcher@docker.com> <thatcher@gmx.net>
Thiago Alves Silva <thiago.alves@aurea.com>
Thiago Alves Silva <thiago.alves@aurea.com> <thiagoalves@users.noreply.github.com>
Thomas Gazagnaire <thomas@gazagnaire.org> <thomas@gazagnaire.com>
Thomas Ledos <thomas.ledos92@gmail.com>
Thomas Léveil <thomasleveil@gmail.com>
Thomas Léveil <thomasleveil@gmail.com> <thomasleveil@users.noreply.github.com>
Tibor Vass <teabee89@gmail.com> <tibor@docker.com>
Tibor Vass <teabee89@gmail.com> <tiborvass@users.noreply.github.com>
Till Claassen <pixelistik@users.noreply.github.com>
Tim Bart <tim@fewagainstmany.com>
Tim Bosse <taim@bosboot.org> <maztaim@users.noreply.github.com>
Tim Potter <tpot@hpe.com>
Tim Potter <tpot@hpe.com> <tpot@Tims-MacBook-Pro.local>
Tim Ruffles <oi@truffles.me.uk> <timruffles@googlemail.com>
Tim Terhorst <mynamewastaken+git@gmail.com>
Tim Wagner <tim.wagner@freenet.ag>
Tim Wagner <tim.wagner@freenet.ag> <33624860+herrwagner@users.noreply.github.com>
Tim Zju <21651152@zju.edu.cn>
Timothy Hobbs <timothyhobbs@seznam.cz>
Toli Kuznets <toli@docker.com>
Tom Barlow <tomwbarlow@gmail.com>
Tom Denham <tom@tomdee.co.uk>
Tom Denham <tom@tomdee.co.uk> <tom.denham@metaswitch.com>
Tom Sweeney <tsweeney@redhat.com>
Tom Wilkie <tom.wilkie@gmail.com>
Tom Wilkie <tom.wilkie@gmail.com> <tom@weave.works>
Tõnis Tiigi <tonistiigi@gmail.com>
Trace Andreason <tandreason@gmail.com>
Trapier Marshall <tmarshall@mirantis.com>
Trapier Marshall <tmarshall@mirantis.com> <trapier.marshall@docker.com>
Trishna Guha <trishnaguha17@gmail.com>
Tristan Carel <tristan@cogniteev.com>
Tristan Carel <tristan@cogniteev.com> <tristan.carel@gmail.com>
Tyler Brown <tylers.pile@gmail.com>
Umesh Yadav <umesh4257@gmail.com>
Umesh Yadav <umesh4257@gmail.com> <dungeonmaster18@users.noreply.github.com>
Victor Lyuboslavsky <victor@victoreda.com>
@@ -681,25 +441,15 @@ Victor Vieux <victor.vieux@docker.com> <victor@docker.com>
Victor Vieux <victor.vieux@docker.com> <victor@dotcloud.com>
Victor Vieux <victor.vieux@docker.com> <victorvieux@gmail.com>
Victor Vieux <victor.vieux@docker.com> <vieux@docker.com>
Vikas Choudhary <choudharyvikas16@gmail.com>
Vikram bir Singh <vsingh@mirantis.com>
Vikram bir Singh <vsingh@mirantis.com> <vikrambir.singh@docker.com>
Viktor Vojnovski <viktor.vojnovski@amadeus.com> <vojnovski@gmail.com>
Vincent Batts <vbatts@redhat.com> <vbatts@hashbangbash.com>
Vincent Bernat <vincent@bernat.ch>
Vincent Bernat <vincent@bernat.ch> <bernat@luffy.cx>
Vincent Bernat <vincent@bernat.ch> <Vincent.Bernat@exoscale.ch>
Vincent Bernat <vincent@bernat.ch> <vincent@bernat.im>
Vincent Boulineau <vincent.boulineau@datadoghq.com>
Vincent Bernat <Vincent.Bernat@exoscale.ch> <bernat@luffy.cx>
Vincent Bernat <Vincent.Bernat@exoscale.ch> <vincent@bernat.im>
Vincent Demeester <vincent.demeester@docker.com> <vincent+github@demeester.fr>
Vincent Demeester <vincent.demeester@docker.com> <vincent@demeester.fr>
Vincent Demeester <vincent.demeester@docker.com> <vincent@sbr.pm>
Vishnu Kannan <vishnuk@google.com>
Vitaly Ostrosablin <vostrosablin@virtuozzo.com>
Vitaly Ostrosablin <vostrosablin@virtuozzo.com> <tmp6154@yandex.ru>
Vladimir Rutsky <altsysrq@gmail.com> <iamironbob@gmail.com>
Vladislav Kolesnikov <vkolesnikov@beget.ru>
Vladislav Kolesnikov <vkolesnikov@beget.ru> <prime@vladqa.ru>
Walter Stanish <walter@pratyeka.org>
Wang Chao <chao.wang@ucloud.cn>
Wang Chao <chao.wang@ucloud.cn> <wcwxyz@gmail.com>
@@ -711,27 +461,12 @@ Wang Yuexiao <wang.yuexiao@zte.com.cn>
Wayne Chang <wayne@neverfear.org>
Wayne Song <wsong@docker.com> <wsong@users.noreply.github.com>
Wei Wu <wuwei4455@gmail.com> cizixs <cizixs@163.com>
Wei-Ting Kuo <waitingkuo0527@gmail.com>
Wen Cheng Ma <wenchma@cn.ibm.com>
Wenjun Tang <tangwj2@lenovo.com> <dodia@163.com>
Wewang Xiaorenfine <wang.xiaoren@zte.com.cn>
Will Weaver <monkey@buildingbananas.com>
Wing-Kam Wong <wingkwong.code@gmail.com>
WuLonghui <wlh6666@qq.com>
Xian Chaobo <xianchaobo@huawei.com>
Xian Chaobo <xianchaobo@huawei.com> <jimmyxian2004@yahoo.com.cn>
Xianglin Gao <xlgao@zju.edu.cn>
Xianjie <guxianjie@gmail.com>
Xianjie <guxianjie@gmail.com> <datastream@datastream-laptop.local>
Xianlu Bird <xianlubird@gmail.com>
Xiao YongBiao <xyb4638@gmail.com>
Xiao Zhang <xiaozhang0210@hotmail.com>
Xiaodong Liu <liuxiaodong@loongson.cn>
Xiaodong Zhang <a4012017@sina.com>
Xiaohua Ding <xiao_hua_ding@sina.cn>
Xiaoyu Zhang <zhang.xiaoyu33@zte.com.cn>
Xinfeng Liu <XinfengLiu@icloud.com>
Xinfeng Liu <XinfengLiu@icloud.com> <xinfeng.liu@gmail.com>
Xuecong Liao <satorulogic@gmail.com>
Yamasaki Masahide <masahide.y@gmail.com>
Yao Zaiyong <yaozaiyong@hotmail.com>
@@ -742,29 +477,15 @@ Yi EungJun <eungjun.yi@navercorp.com> <semtlenori@gmail.com>
Ying Li <ying.li@docker.com>
Ying Li <ying.li@docker.com> <cyli@twistedmatrix.com>
Yong Tang <yong.tang.github@outlook.com> <yongtang@users.noreply.github.com>
Yongxin Li <yxli@alauda.io>
Yosef Fertel <yfertel@gmail.com> <frosforever@users.noreply.github.com>
Yu Changchun <yuchangchun1@huawei.com>
Yu Chengxia <yuchengxia@huawei.com>
Yu Peng <yu.peng36@zte.com.cn>
Yu Peng <yu.peng36@zte.com.cn> <yupeng36@zte.com.cn>
Yuan Sun <sunyuan3@huawei.com>
Yue Zhang <zy675793960@yeah.net>
Yufei Xiong <yufei.xiong@qq.com>
Zach Gershman <zachgersh@gmail.com>
Zach Gershman <zachgersh@gmail.com> <zachgersh@users.noreply.github.com>
Zachary Jaffee <zjaffee@us.ibm.com> <zij@case.edu>
Zachary Jaffee <zjaffee@us.ibm.com> <zjaffee@apache.org>
Zhang Kun <zkazure@gmail.com>
Zhang Wentao <zhangwentao234@huawei.com>
ZhangHang <stevezhang2014@gmail.com>
Zhenkun Bi <bi.zhenkun@zte.com.cn>
Zhou Hao <zhouhao@cn.fujitsu.com>
Zhoulin Xie <zhoulin.xie@daocloud.io>
Zhu Kunjia <zhu.kunjia@zte.com.cn>
Ziheng Liu <lzhfromustc@gmail.com>
Zou Yu <zouyu7@huawei.com>
Zuhayr Elahi <zuhayr.elahi@docker.com>
Zuhayr Elahi <zuhayr.elahi@docker.com> <elahi.zuhayr@gmail.com>
정재영 <jjy600901@gmail.com>
정재영 <jjy600901@gmail.com> <43400316+J-jaeyoung@users.noreply.github.com>

596
AUTHORS

File diff suppressed because it is too large Load Diff

3609
CHANGELOG.md Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -27,10 +27,10 @@ issue, please bring it to their attention right away!
Please **DO NOT** file a public issue, instead send your report privately to
[security@docker.com](mailto:security@docker.com).
Security reports are greatly appreciated and we will publicly thank you for it,
although we keep your name confidential if you request it. We also like to send
gifts&mdash;if you're into schwag, make sure to let us know. We currently do not
offer a paid security bounty program, but are not ruling it out in the future.
Security reports are greatly appreciated and we will publicly thank you for it.
We also like to send gifts&mdash;if you're into schwag, make sure to let
us know. We currently do not offer a paid security bounty program, but are not
ruling it out in the future.
## Reporting other issues
@@ -72,7 +72,7 @@ anybody starts working on it.
We are always thrilled to receive pull requests. We do our best to process them
quickly. If your pull request is not accepted on the first try,
don't get discouraged! Our contributor's guide explains [the review process we
use for simple changes](https://docs.docker.com/contribute/overview/).
use for simple changes](https://docs.docker.com/opensource/workflow/make-a-contribution/).
### Design and cleanup proposals
@@ -101,8 +101,9 @@ the contributors guide.
<td>
<p>
Register for the Docker Community Slack at
<a href="https://dockr.ly/comm-slack" target="_blank">https://dockr.ly/comm-slack</a>.
<a href="https://community.docker.com/registrations/groups/4316" target="_blank">https://community.docker.com/registrations/groups/4316</a>.
We use the #moby-project channel for general discussion, and there are separate channels for other Moby projects such as #containerd.
Archives are available at <a href="https://dockercommunity.slackarchive.io/" target="_blank">https://dockercommunity.slackarchive.io/</a>.
</p>
</td>
</tr>
@@ -309,6 +310,36 @@ Don't forget: being a maintainer is a time investment. Make sure you
will have time to make yourself available. You don't have to be a
maintainer to make a difference on the project!
### Manage issues and pull requests using the Derek bot
If you want to help label, assign, close or reopen issues or pull requests
without commit rights, ask a maintainer to add your Github handle to the
`.DEREK.yml` file. [Derek](https://github.com/alexellis/derek) is a bot that extends
Github's user permissions to help non-committers to manage issues and pull requests simply by commenting.
For example:
* Labels
```
Derek add label: kind/question
Derek remove label: status/claimed
```
* Assign work
```
Derek assign: username
Derek unassign: me
```
* Manage issues and PRs
```
Derek close
Derek reopen
```
## Moby community guidelines
We want to keep the Moby community awesome, growing and collaborative. We need
@@ -422,6 +453,6 @@ The rules:
guidelines. Since you've read all the rules, you now know that.
If you are having trouble getting into the mood of idiomatic Go, we recommend
reading through [Effective Go](https://go.dev/doc/effective_go). The
[Go Blog](https://go.dev/blog/) is also a great resource. Drinking the
reading through [Effective Go](https://golang.org/doc/effective_go.html). The
[Go Blog](https://blog.golang.org) is also a great resource. Drinking the
kool-aid is a lot easier than going thirsty.

View File

@@ -1,671 +1,241 @@
# syntax=docker/dockerfile:1.7
# This file describes the standard way to build Docker, using docker
#
# Usage:
#
# # Use make to build a development environment image and run it in a container.
# # This is slow the first time.
# make BIND_DIR=. shell
#
# The following commands are executed inside the running container.
ARG GO_VERSION=1.22.12
ARG BASE_DEBIAN_DISTRO="bookworm"
ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}"
ARG XX_VERSION=1.6.1
# # Make a dockerd binary.
# # hack/make.sh binary
#
# # Install dockerd to /usr/local/bin
# # make install
#
# # Run unit tests
# # hack/test/unit
#
# # Run tests e.g. integration, py
# # hack/make.sh binary test-integration test-docker-py
#
# Note: AppArmor used to mess with privileged mode, but this is no longer
# the case. Therefore, you don't have to disable it anymore.
#
ARG VPNKIT_VERSION=0.5.0
ARG DOCKERCLI_REPOSITORY="https://github.com/docker/cli.git"
ARG DOCKERCLI_VERSION=v27.5.0
# cli version used for integration-cli tests
ARG DOCKERCLI_INTEGRATION_REPOSITORY="https://github.com/docker/cli.git"
ARG DOCKERCLI_INTEGRATION_VERSION=v17.06.2-ce
ARG BUILDX_VERSION=0.20.0
ARG COMPOSE_VERSION=v2.32.4
ARG SYSTEMD="false"
ARG DOCKER_STATIC=1
# REGISTRY_VERSION specifies the version of the registry to download from
# https://hub.docker.com/r/distribution/distribution. This version of
# the registry is used to test schema 2 manifests. Generally, the version
# specified here should match a current release.
ARG REGISTRY_VERSION=2.8.3
# delve is currently only supported on linux/amd64 and linux/arm64;
# https://github.com/go-delve/delve/blob/v1.8.1/pkg/proc/native/support_sentinel.go#L1-L6
ARG DELVE_SUPPORTED=${TARGETPLATFORM#linux/amd64} DELVE_SUPPORTED=${DELVE_SUPPORTED#linux/arm64}
ARG DELVE_SUPPORTED=${DELVE_SUPPORTED:+"unsupported"}
ARG DELVE_SUPPORTED=${DELVE_SUPPORTED:-"supported"}
# cross compilation helper
FROM --platform=$BUILDPLATFORM tonistiigi/xx:${XX_VERSION} AS xx
# dummy stage to make sure the image is built for deps that don't support some
# architectures
FROM --platform=$BUILDPLATFORM busybox AS build-dummy
RUN mkdir -p /build
FROM scratch AS binary-dummy
COPY --from=build-dummy /build /build
# base
FROM --platform=$BUILDPLATFORM ${GOLANG_IMAGE} AS base
COPY --from=xx / /
RUN echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache
RUN apt-get update && apt-get install --no-install-recommends -y file
ENV GO111MODULE=off
ENV GOTOOLCHAIN=local
FROM golang:1.10.3 AS base
# FIXME(vdemeester) this is kept for other script depending on it to not fail right away
# Remove this once the other scripts uses something else to detect the version
ENV GO_VERSION 1.10.3
# allow replacing httpredir or deb mirror
ARG APT_MIRROR=deb.debian.org
RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list
FROM base AS criu
ADD --chmod=0644 https://download.opensuse.org/repositories/devel:/tools:/criu/Debian_11/Release.key /etc/apt/trusted.gpg.d/criu.gpg.asc
RUN --mount=type=cache,sharing=locked,id=moby-criu-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-criu-aptcache,target=/var/cache/apt \
echo 'deb https://download.opensuse.org/repositories/devel:/tools:/criu/Debian_12/ /' > /etc/apt/sources.list.d/criu.list \
&& apt-get update \
&& apt-get install -y --no-install-recommends criu \
&& install -D /usr/sbin/criu /build/criu \
&& /build/criu --version
# registry
FROM base AS registry-src
WORKDIR /usr/src/registry
RUN git init . && git remote add origin "https://github.com/distribution/distribution.git"
# Install CRIU for checkpoint/restore support
ENV CRIU_VERSION 3.6
# Install dependancy packages specific to criu
RUN apt-get update && apt-get install -y \
libnet-dev \
libprotobuf-c0-dev \
libprotobuf-dev \
libnl-3-dev \
libcap-dev \
protobuf-compiler \
protobuf-c-compiler \
python-protobuf \
&& mkdir -p /usr/src/criu \
&& curl -sSL https://github.com/checkpoint-restore/criu/archive/v${CRIU_VERSION}.tar.gz | tar -C /usr/src/criu/ -xz --strip-components=1 \
&& cd /usr/src/criu \
&& make \
&& make PREFIX=/build/ install-criu
FROM base AS registry
WORKDIR /go/src/github.com/docker/distribution
# Install two versions of the registry. The first is an older version that
# only supports schema1 manifests. The second is a newer version that supports
# both. This allows integration-cli tests to cover push/pull with both schema1
# and schema2 manifests.
ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd
ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827
RUN set -x \
&& export GOPATH="$(mktemp -d)" \
&& git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \
&& (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \
&& GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \
go build -buildmode=pie -o /build/registry-v2 github.com/docker/distribution/cmd/registry \
&& case $(dpkg --print-architecture) in \
amd64|ppc64*|s390x) \
(cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1"); \
GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH"; \
go build -buildmode=pie -o /build/registry-v2-schema1 github.com/docker/distribution/cmd/registry; \
;; \
esac \
&& rm -rf "$GOPATH"
# REGISTRY_VERSION_SCHEMA1 specifies the version of the registry to build and
# install from the https://github.com/docker/distribution repository. This is
# an older (pre v2.3.0) version of the registry that only supports schema1
# manifests. This version of the registry is not working on arm64, so installation
# is skipped on that architecture.
ARG REGISTRY_VERSION_SCHEMA1=v2.1.0
ARG TARGETPLATFORM
RUN --mount=from=registry-src,src=/usr/src/registry,rw \
--mount=type=cache,target=/root/.cache/go-build,id=registry-build-$TARGETPLATFORM \
--mount=type=cache,target=/go/pkg/mod \
--mount=type=tmpfs,target=/go/src <<EOT
set -ex
export GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH"
# Make the /build directory no matter what so that it doesn't fail on arm64 or
# any other platform where we don't build this registry
mkdir /build
case $TARGETPLATFORM in
linux/amd64|linux/arm/v7|linux/ppc64le|linux/s390x)
git fetch -q --depth 1 origin "${REGISTRY_VERSION_SCHEMA1}" +refs/tags/*:refs/tags/*
git checkout -q FETCH_HEAD
CGO_ENABLED=0 xx-go build -o /build/registry-v2-schema1 -v ./cmd/registry
xx-verify /build/registry-v2-schema1
;;
esac
EOT
FROM distribution/distribution:$REGISTRY_VERSION AS registry-v2
RUN mkdir /build && mv /bin/registry /build/registry-v2
# go-swagger
FROM base AS swagger-src
WORKDIR /usr/src/swagger
# Currently uses a fork from https://github.com/kolyshkin/go-swagger/tree/golang-1.13-fix
# TODO: move to under moby/ or fix upstream go-swagger to work for us.
RUN git init . && git remote add origin "https://github.com/kolyshkin/go-swagger.git"
# GO_SWAGGER_COMMIT specifies the version of the go-swagger binary to build and
# install. Go-swagger is used in CI for validating swagger.yaml in hack/validate/swagger-gen
ARG GO_SWAGGER_COMMIT=c56166c036004ba7a3a321e5951ba472b9ae298c
RUN git fetch -q --depth 1 origin "${GO_SWAGGER_COMMIT}" && git checkout -q FETCH_HEAD
FROM base AS docker-py
# Get the "docker-py" source so we can run their integration tests
ENV DOCKER_PY_COMMIT 8b246db271a85d6541dc458838627e89c683e42f
RUN git clone https://github.com/docker/docker-py.git /build \
&& cd /build \
&& git checkout -q $DOCKER_PY_COMMIT
FROM base AS swagger
WORKDIR /go/src/github.com/go-swagger/go-swagger
ARG TARGETPLATFORM
RUN --mount=from=swagger-src,src=/usr/src/swagger,rw \
--mount=type=cache,target=/root/.cache/go-build,id=swagger-build-$TARGETPLATFORM \
--mount=type=cache,target=/go/pkg/mod \
--mount=type=tmpfs,target=/go/src/ <<EOT
set -e
xx-go build -o /build/swagger ./cmd/swagger
xx-verify /build/swagger
EOT
# Install go-swagger for validating swagger.yaml
ENV GO_SWAGGER_COMMIT c28258affb0b6251755d92489ef685af8d4ff3eb
RUN set -x \
&& export GOPATH="$(mktemp -d)" \
&& git clone https://github.com/go-swagger/go-swagger.git "$GOPATH/src/github.com/go-swagger/go-swagger" \
&& (cd "$GOPATH/src/github.com/go-swagger/go-swagger" && git checkout -q "$GO_SWAGGER_COMMIT") \
&& go build -o /build/swagger github.com/go-swagger/go-swagger/cmd/swagger \
&& rm -rf "$GOPATH"
# frozen-images
# See also frozenImages in "testutil/environment/protect.go" (which needs to
# be updated when adding images to this list)
FROM debian:${BASE_DEBIAN_DISTRO} AS frozen-images
RUN --mount=type=cache,sharing=locked,id=moby-frozen-images-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-frozen-images-aptcache,target=/var/cache/apt \
apt-get update && apt-get install -y --no-install-recommends \
ca-certificates \
curl \
jq
FROM base AS frozen-images
RUN apt-get update && apt-get install -y jq ca-certificates --no-install-recommends
# Get useful and necessary Hub images so we can "docker load" locally instead of pulling
COPY contrib/download-frozen-image-v2.sh /
ARG TARGETARCH
ARG TARGETVARIANT
RUN /download-frozen-image-v2.sh /build \
busybox:latest@sha256:95cf004f559831017cdf4628aaf1bb30133677be8702a8c5f2994629f637a209 \
busybox:glibc@sha256:1f81263701cddf6402afe9f33fca0266d9fff379e59b1748f33d3072da71ee85 \
debian:bookworm-slim@sha256:2bc5c236e9b262645a323e9088dfa3bb1ecb16cc75811daf40a23a824d665be9 \
hello-world:latest@sha256:d58e752213a51785838f9eed2b7a498ffa1cb3aa7f946dda11af39286c3db9a9 \
arm32v7/hello-world:latest@sha256:50b8560ad574c779908da71f7ce370c0a2471c098d44d1c8f6b513c5a55eeeb1
buildpack-deps:jessie@sha256:dd86dced7c9cd2a724e779730f0a53f93b7ef42228d4344b25ce9a42a1486251 \
busybox:latest@sha256:bbc3a03235220b170ba48a157dd097dd1379299370e1ed99ce976df0355d24f0 \
busybox:glibc@sha256:0b55a30394294ab23b9afd58fab94e61a923f5834fba7ddbae7f8e0c11ba85e6 \
debian:jessie@sha256:287a20c5f73087ab406e6b364833e3fb7b3ae63ca0eb3486555dc27ed32c6e60 \
hello-world:latest@sha256:be0cd392e45be79ffeffa6b05338b98ebb16c87b255f48e297ec7f98e123905c
# See also ensureFrozenImagesLinux() in "integration-cli/fixtures_linux_daemon_test.go" (which needs to be updated when adding images to this list)
# delve
FROM base AS delve-src
WORKDIR /usr/src/delve
RUN git init . && git remote add origin "https://github.com/go-delve/delve.git"
# DELVE_VERSION specifies the version of the Delve debugger binary
# from the https://github.com/go-delve/delve repository.
# It can be used to run Docker with a possibility of
# attaching debugger to it.
ARG DELVE_VERSION=v1.23.0
RUN git fetch -q --depth 1 origin "${DELVE_VERSION}" +refs/tags/*:refs/tags/* && git checkout -q FETCH_HEAD
# Just a little hack so we don't have to install these deps twice, once for runc and once for dockerd
FROM base AS runtime-dev
RUN apt-get update && apt-get install -y \
libapparmor-dev \
libseccomp-dev
FROM base AS delve-supported
WORKDIR /usr/src/delve
ARG TARGETPLATFORM
RUN --mount=from=delve-src,src=/usr/src/delve,rw \
--mount=type=cache,target=/root/.cache/go-build,id=delve-build-$TARGETPLATFORM \
--mount=type=cache,target=/go/pkg/mod <<EOT
set -e
GO111MODULE=on xx-go build -o /build/dlv ./cmd/dlv
xx-verify /build/dlv
EOT
FROM binary-dummy AS delve-unsupported
FROM delve-${DELVE_SUPPORTED} AS delve
FROM base AS tomlv
ENV INSTALL_BINARY_NAME=tomlv
COPY hack/dockerfile/install/install.sh ./install.sh
COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./
RUN PREFIX=/build/ ./install.sh $INSTALL_BINARY_NAME
FROM base AS tomll
# GOTOML_VERSION specifies the version of the tomll binary to build and install
# from the https://github.com/pelletier/go-toml repository. This binary is used
# in CI in the hack/validate/toml script.
#
# When updating this version, consider updating the github.com/pelletier/go-toml
# dependency in vendor.mod accordingly.
ARG GOTOML_VERSION=v1.8.1
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
GOBIN=/build/ GO111MODULE=on go install "github.com/pelletier/go-toml/cmd/tomll@${GOTOML_VERSION}" \
&& /build/tomll --help
FROM base AS vndr
ENV INSTALL_BINARY_NAME=vndr
COPY hack/dockerfile/install/install.sh ./install.sh
COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./
RUN PREFIX=/build/ ./install.sh $INSTALL_BINARY_NAME
FROM base AS gowinres
# GOWINRES_VERSION defines go-winres tool version
ARG GOWINRES_VERSION=v0.3.1
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
GOBIN=/build/ GO111MODULE=on go install "github.com/tc-hib/go-winres@${GOWINRES_VERSION}" \
&& /build/go-winres --help
FROM base AS containerd
RUN apt-get update && apt-get install -y btrfs-tools
ENV INSTALL_BINARY_NAME=containerd
COPY hack/dockerfile/install/install.sh ./install.sh
COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./
RUN PREFIX=/build/ ./install.sh $INSTALL_BINARY_NAME
# containerd
FROM base AS containerd-src
WORKDIR /usr/src/containerd
RUN git init . && git remote add origin "https://github.com/containerd/containerd.git"
# CONTAINERD_VERSION is used to build containerd binaries, and used for the
# integration tests. The distributed docker .deb and .rpm packages depend on a
# separate (containerd.io) package, which may be a different version as is
# specified here. The containerd golang package is also pinned in vendor.mod.
# When updating the binary version you may also need to update the vendor
# version to pick up bug fixes or new APIs, however, usually the Go packages
# are built from a commit from the master branch.
ARG CONTAINERD_VERSION=v1.7.25
RUN git fetch -q --depth 1 origin "${CONTAINERD_VERSION}" +refs/tags/*:refs/tags/* && git checkout -q FETCH_HEAD
FROM base AS proxy
ENV INSTALL_BINARY_NAME=proxy
COPY hack/dockerfile/install/install.sh ./install.sh
COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./
RUN PREFIX=/build/ ./install.sh $INSTALL_BINARY_NAME
FROM base AS containerd-build
WORKDIR /go/src/github.com/containerd/containerd
ARG TARGETPLATFORM
RUN --mount=type=cache,sharing=locked,id=moby-containerd-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-containerd-aptcache,target=/var/cache/apt \
apt-get update && xx-apt-get install -y --no-install-recommends \
gcc \
libbtrfs-dev \
libsecret-1-dev \
pkg-config
ARG DOCKER_STATIC
RUN --mount=from=containerd-src,src=/usr/src/containerd,rw \
--mount=type=cache,target=/root/.cache/go-build,id=containerd-build-$TARGETPLATFORM <<EOT
set -e
export CC=$(xx-info)-gcc
export CGO_ENABLED=$([ "$DOCKER_STATIC" = "1" ] && echo "0" || echo "1")
xx-go --wrap
make $([ "$DOCKER_STATIC" = "1" ] && echo "STATIC=1") binaries
xx-verify $([ "$DOCKER_STATIC" = "1" ] && echo "--static") bin/containerd
xx-verify $([ "$DOCKER_STATIC" = "1" ] && echo "--static") bin/containerd-shim-runc-v2
xx-verify $([ "$DOCKER_STATIC" = "1" ] && echo "--static") bin/ctr
mkdir /build
mv bin/containerd bin/containerd-shim-runc-v2 bin/ctr /build
EOT
FROM containerd-build AS containerd-linux
FROM binary-dummy AS containerd-windows
FROM containerd-${TARGETOS} AS containerd
FROM base AS golangci_lint
ARG GOLANGCI_LINT_VERSION=v1.60.2
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
GOBIN=/build/ GO111MODULE=on go install "github.com/golangci/golangci-lint/cmd/golangci-lint@${GOLANGCI_LINT_VERSION}" \
&& /build/golangci-lint --version
FROM base AS gotestsum
ARG GOTESTSUM_VERSION=v1.8.2
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
GOBIN=/build/ GO111MODULE=on go install "gotest.tools/gotestsum@${GOTESTSUM_VERSION}" \
&& /build/gotestsum --version
FROM base AS shfmt
ARG SHFMT_VERSION=v3.8.0
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
GOBIN=/build/ GO111MODULE=on go install "mvdan.cc/sh/v3/cmd/shfmt@${SHFMT_VERSION}" \
&& /build/shfmt --version
FROM base AS gopls
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
GOBIN=/build/ GO111MODULE=on go install "golang.org/x/tools/gopls@latest" \
&& /build/gopls version
FROM base AS gometalinter
ENV INSTALL_BINARY_NAME=gometalinter
COPY hack/dockerfile/install/install.sh ./install.sh
COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./
RUN PREFIX=/build/ ./install.sh $INSTALL_BINARY_NAME
FROM base AS dockercli
WORKDIR /go/src/github.com/docker/cli
ARG DOCKERCLI_REPOSITORY
ARG DOCKERCLI_VERSION
ARG TARGETPLATFORM
RUN --mount=source=hack/dockerfile/cli.sh,target=/download-or-build-cli.sh \
--mount=type=cache,id=dockercli-git-$TARGETPLATFORM,sharing=locked,target=./.git \
--mount=type=cache,target=/root/.cache/go-build,id=dockercli-build-$TARGETPLATFORM \
rm -f ./.git/*.lock \
&& /download-or-build-cli.sh ${DOCKERCLI_VERSION} ${DOCKERCLI_REPOSITORY} /build \
&& /build/docker --version
ENV INSTALL_BINARY_NAME=dockercli
COPY hack/dockerfile/install/install.sh ./install.sh
COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./
RUN PREFIX=/build/ ./install.sh $INSTALL_BINARY_NAME
FROM base AS dockercli-integration
WORKDIR /go/src/github.com/docker/cli
ARG DOCKERCLI_INTEGRATION_REPOSITORY
ARG DOCKERCLI_INTEGRATION_VERSION
ARG TARGETPLATFORM
RUN --mount=source=hack/dockerfile/cli.sh,target=/download-or-build-cli.sh \
--mount=type=cache,id=dockercli-git-$TARGETPLATFORM,sharing=locked,target=./.git \
--mount=type=cache,target=/root/.cache/go-build,id=dockercli-build-$TARGETPLATFORM \
rm -f ./.git/*.lock \
&& /download-or-build-cli.sh ${DOCKERCLI_INTEGRATION_VERSION} ${DOCKERCLI_INTEGRATION_REPOSITORY} /build \
&& /build/docker --version
FROM runtime-dev AS runc
ENV INSTALL_BINARY_NAME=runc
COPY hack/dockerfile/install/install.sh ./install.sh
COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./
COPY git-bundles git-bundles
RUN PREFIX=/build/ ./install.sh $INSTALL_BINARY_NAME
# runc
FROM base AS runc-src
WORKDIR /usr/src/runc
RUN git init . && git remote add origin "https://github.com/opencontainers/runc.git"
# RUNC_VERSION should match the version that is used by the containerd version
# that is used. If you need to update runc, open a pull request in the containerd
# project first, and update both after that is merged. When updating RUNC_VERSION,
# consider updating runc in vendor.mod accordingly.
ARG RUNC_VERSION=v1.2.5
RUN git fetch -q --depth 1 origin "${RUNC_VERSION}" +refs/tags/*:refs/tags/* && git checkout -q FETCH_HEAD
FROM base AS tini
RUN apt-get update && apt-get install -y cmake vim-common
COPY hack/dockerfile/install/install.sh ./install.sh
ENV INSTALL_BINARY_NAME=tini
COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./
RUN PREFIX=/build/ ./install.sh $INSTALL_BINARY_NAME
FROM base AS runc-build
WORKDIR /go/src/github.com/opencontainers/runc
ARG TARGETPLATFORM
RUN --mount=type=cache,sharing=locked,id=moby-runc-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-runc-aptcache,target=/var/cache/apt \
apt-get update && xx-apt-get install -y --no-install-recommends \
dpkg-dev \
gcc \
libc6-dev \
libseccomp-dev \
pkg-config
ARG DOCKER_STATIC
RUN --mount=from=runc-src,src=/usr/src/runc,rw \
--mount=type=cache,target=/root/.cache/go-build,id=runc-build-$TARGETPLATFORM <<EOT
set -e
xx-go --wrap
CGO_ENABLED=1 make "$([ "$DOCKER_STATIC" = "1" ] && echo "static" || echo "runc")"
xx-verify $([ "$DOCKER_STATIC" = "1" ] && echo "--static") runc
mkdir /build
mv runc /build/
EOT
FROM runc-build AS runc-linux
FROM binary-dummy AS runc-windows
FROM runc-${TARGETOS} AS runc
# tini
FROM base AS tini-src
WORKDIR /usr/src/tini
RUN git init . && git remote add origin "https://github.com/krallin/tini.git"
# TINI_VERSION specifies the version of tini (docker-init) to build. This
# binary is used when starting containers with the `--init` option.
ARG TINI_VERSION=v0.19.0
RUN git fetch -q --depth 1 origin "${TINI_VERSION}" +refs/tags/*:refs/tags/* && git checkout -q FETCH_HEAD
FROM base AS tini-build
WORKDIR /go/src/github.com/krallin/tini
RUN --mount=type=cache,sharing=locked,id=moby-tini-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-tini-aptcache,target=/var/cache/apt \
apt-get update && apt-get install -y --no-install-recommends cmake
ARG TARGETPLATFORM
RUN --mount=type=cache,sharing=locked,id=moby-tini-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-tini-aptcache,target=/var/cache/apt \
xx-apt-get install -y --no-install-recommends \
gcc \
libc6-dev \
pkg-config
RUN --mount=from=tini-src,src=/usr/src/tini,rw \
--mount=type=cache,target=/root/.cache/go-build,id=tini-build-$TARGETPLATFORM <<EOT
set -e
CC=$(xx-info)-gcc cmake .
make tini-static
xx-verify --static tini-static
mkdir /build
mv tini-static /build/docker-init
EOT
FROM tini-build AS tini-linux
FROM binary-dummy AS tini-windows
FROM tini-${TARGETOS} AS tini
# rootlesskit
FROM base AS rootlesskit-src
WORKDIR /usr/src/rootlesskit
RUN git init . && git remote add origin "https://github.com/rootless-containers/rootlesskit.git"
# When updating, also update vendor.mod and hack/dockerfile/install/rootlesskit.installer accordingly.
ARG ROOTLESSKIT_VERSION=v2.3.2
RUN git fetch -q --depth 1 origin "${ROOTLESSKIT_VERSION}" +refs/tags/*:refs/tags/* && git checkout -q FETCH_HEAD
FROM base AS rootlesskit-build
WORKDIR /go/src/github.com/rootless-containers/rootlesskit
ARG TARGETPLATFORM
RUN --mount=type=cache,sharing=locked,id=moby-rootlesskit-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-rootlesskit-aptcache,target=/var/cache/apt \
apt-get update && xx-apt-get install -y --no-install-recommends \
gcc \
libc6-dev \
pkg-config
ENV GO111MODULE=on
ARG DOCKER_STATIC
RUN --mount=from=rootlesskit-src,src=/usr/src/rootlesskit,rw \
--mount=type=cache,target=/go/pkg/mod \
--mount=type=cache,target=/root/.cache/go-build,id=rootlesskit-build-$TARGETPLATFORM <<EOT
set -e
export CGO_ENABLED=$([ "$DOCKER_STATIC" = "1" ] && echo "0" || echo "1")
xx-go build -o /build/rootlesskit -ldflags="$([ "$DOCKER_STATIC" != "1" ] && echo "-linkmode=external")" ./cmd/rootlesskit
xx-verify $([ "$DOCKER_STATIC" = "1" ] && echo "--static") /build/rootlesskit
xx-go build -o /build/rootlesskit-docker-proxy -ldflags="$([ "$DOCKER_STATIC" != "1" ] && echo "-linkmode=external")" ./cmd/rootlesskit-docker-proxy
xx-verify $([ "$DOCKER_STATIC" = "1" ] && echo "--static") /build/rootlesskit-docker-proxy
EOT
COPY --link ./contrib/dockerd-rootless.sh /build/
COPY --link ./contrib/dockerd-rootless-setuptool.sh /build/
FROM rootlesskit-build AS rootlesskit-linux
FROM binary-dummy AS rootlesskit-windows
FROM rootlesskit-${TARGETOS} AS rootlesskit
FROM base AS crun
ARG CRUN_VERSION=1.12
RUN --mount=type=cache,sharing=locked,id=moby-crun-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-crun-aptcache,target=/var/cache/apt \
apt-get update && apt-get install -y --no-install-recommends \
autoconf \
automake \
build-essential \
libcap-dev \
libprotobuf-c-dev \
libseccomp-dev \
libsystemd-dev \
libtool \
libudev-dev \
libyajl-dev \
python3 \
;
RUN --mount=type=tmpfs,target=/tmp/crun-build \
git clone https://github.com/containers/crun.git /tmp/crun-build && \
cd /tmp/crun-build && \
git checkout -q "${CRUN_VERSION}" && \
./autogen.sh && \
./configure --bindir=/build && \
make -j install
# vpnkit
# use dummy scratch stage to avoid build to fail for unsupported platforms
FROM scratch AS vpnkit-windows
FROM scratch AS vpnkit-linux-386
FROM scratch AS vpnkit-linux-arm
FROM scratch AS vpnkit-linux-ppc64le
FROM scratch AS vpnkit-linux-riscv64
FROM scratch AS vpnkit-linux-s390x
FROM djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-linux-amd64
FROM djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-linux-arm64
FROM vpnkit-linux-${TARGETARCH} AS vpnkit-linux
FROM vpnkit-${TARGETOS} AS vpnkit
# containerutility
FROM base AS containerutil-src
WORKDIR /usr/src/containerutil
RUN git init . && git remote add origin "https://github.com/docker-archive/windows-container-utility.git"
ARG CONTAINERUTILITY_VERSION=aa1ba87e99b68e0113bd27ec26c60b88f9d4ccd9
RUN git fetch -q --depth 1 origin "${CONTAINERUTILITY_VERSION}" +refs/tags/*:refs/tags/* && git checkout -q FETCH_HEAD
FROM base AS containerutil-build
WORKDIR /usr/src/containerutil
ARG TARGETPLATFORM
RUN xx-apt-get install -y --no-install-recommends \
gcc \
g++ \
libc6-dev \
pkg-config
RUN --mount=from=containerutil-src,src=/usr/src/containerutil,rw \
--mount=type=cache,target=/root/.cache/go-build,id=containerutil-build-$TARGETPLATFORM <<EOT
set -e
CC="$(xx-info)-gcc" CXX="$(xx-info)-g++" make
xx-verify --static containerutility.exe
mkdir /build
mv containerutility.exe /build/
EOT
FROM binary-dummy AS containerutil-linux
FROM containerutil-build AS containerutil-windows-amd64
FROM containerutil-windows-${TARGETARCH} AS containerutil-windows
FROM containerutil-${TARGETOS} AS containerutil
FROM docker/buildx-bin:${BUILDX_VERSION} as buildx
FROM docker/compose-bin:${COMPOSE_VERSION} as compose
FROM base AS dev-systemd-false
COPY --link --from=frozen-images /build/ /docker-frozen-images
COPY --link --from=swagger /build/ /usr/local/bin/
COPY --link --from=delve /build/ /usr/local/bin/
COPY --link --from=tomll /build/ /usr/local/bin/
COPY --link --from=gowinres /build/ /usr/local/bin/
COPY --link --from=tini /build/ /usr/local/bin/
COPY --link --from=registry /build/ /usr/local/bin/
COPY --link --from=registry-v2 /build/ /usr/local/bin/
# Skip the CRIU stage for now, as the opensuse package repository is sometimes
# unstable, and we're currently not using it in CI.
#
# FIXME(thaJeztah): re-enable this stage when https://github.com/moby/moby/issues/38963 is resolved (see https://github.com/moby/moby/pull/38984)
# COPY --link --from=criu /build/ /usr/local/bin/
COPY --link --from=gotestsum /build/ /usr/local/bin/
COPY --link --from=golangci_lint /build/ /usr/local/bin/
COPY --link --from=shfmt /build/ /usr/local/bin/
COPY --link --from=runc /build/ /usr/local/bin/
COPY --link --from=containerd /build/ /usr/local/bin/
COPY --link --from=rootlesskit /build/ /usr/local/bin/
COPY --link --from=vpnkit / /usr/local/bin/
COPY --link --from=containerutil /build/ /usr/local/bin/
COPY --link --from=crun /build/ /usr/local/bin/
COPY --link hack/dockerfile/etc/docker/ /etc/docker/
COPY --link --from=buildx /buildx /usr/local/libexec/docker/cli-plugins/docker-buildx
COPY --link --from=compose /docker-compose /usr/libexec/docker/cli-plugins/docker-compose
ENV PATH=/usr/local/cli:$PATH
ENV TEST_CLIENT_BINARY=/usr/local/cli-integration/docker
ENV CONTAINERD_ADDRESS=/run/docker/containerd/containerd.sock
ENV CONTAINERD_NAMESPACE=moby
WORKDIR /go/src/github.com/docker/docker
VOLUME /var/lib/docker
VOLUME /home/unprivilegeduser/.local/share/docker
# Wrap all commands in the "docker-in-docker" script to allow nested containers
ENTRYPOINT ["hack/dind"]
FROM dev-systemd-false AS dev-systemd-true
RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \
apt-get update && apt-get install -y --no-install-recommends \
dbus \
dbus-user-session \
systemd \
systemd-sysv
ENTRYPOINT ["hack/dind-systemd"]
FROM dev-systemd-${SYSTEMD} AS dev-base
# TODO: Some of this is only really needed for testing, it would be nice to split this up
FROM runtime-dev AS dev
RUN groupadd -r docker
RUN useradd --create-home --gid docker unprivilegeduser \
&& mkdir -p /home/unprivilegeduser/.local/share/docker \
&& chown -R unprivilegeduser /home/unprivilegeduser
# Let us use a .bashrc file
RUN ln -sfv /go/src/github.com/docker/docker/.bashrc ~/.bashrc
RUN useradd --create-home --gid docker unprivilegeduser
# Activate bash completion and include Docker's completion if mounted with DOCKER_BASH_COMPLETION_PATH
RUN echo "source /usr/share/bash-completion/bash_completion" >> /etc/bash.bashrc
RUN ln -s /usr/local/completion/bash/docker /etc/bash_completion.d/docker
RUN ldconfig
# Set dev environment as safe git directory to prevent "dubious ownership" errors
# when bind-mounting the source into the dev-container. See https://github.com/moby/moby/pull/44930
RUN git config --global --add safe.directory $GOPATH/src/github.com/docker/docker
# This should only install packages that are specifically needed for the dev environment and nothing else
# Do you really need to add another package here? Can it be done in a different build stage?
RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \
apt-get update && apt-get install -y --no-install-recommends \
apparmor \
bash-completion \
bzip2 \
inetutils-ping \
iproute2 \
iptables \
jq \
libcap2-bin \
libnet1 \
libnl-3-200 \
libprotobuf-c1 \
libyajl2 \
net-tools \
patch \
pigz \
sudo \
systemd-journal-remote \
thin-provisioning-tools \
uidmap \
vim \
vim-common \
xfsprogs \
xz-utils \
zip \
zstd
# Switch to use iptables instead of nftables (to match the CI hosts)
# TODO use some kind of runtime auto-detection instead if/when nftables is supported (https://github.com/moby/moby/issues/26824)
RUN update-alternatives --set iptables /usr/sbin/iptables-legacy || true \
&& update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy || true \
&& update-alternatives --set arptables /usr/sbin/arptables-legacy || true
RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \
apt-get update && apt-get install --no-install-recommends -y \
gcc \
pkg-config \
dpkg-dev \
libapparmor-dev \
libseccomp-dev \
libsecret-1-dev \
libsystemd-dev \
libudev-dev \
yamllint
COPY --link --from=dockercli /build/ /usr/local/cli
COPY --link --from=dockercli-integration /build/ /usr/local/cli-integration
RUN apt-get update && apt-get install -y \
apparmor \
aufs-tools \
bash-completion \
btrfs-tools \
iptables \
jq \
libdevmapper-dev \
libudev-dev \
libsystemd-dev \
binutils-mingw-w64 \
g++-mingw-w64-x86-64 \
net-tools \
pigz \
python-backports.ssl-match-hostname \
python-dev \
python-mock \
python-pip \
python-requests \
python-setuptools \
python-websocket \
python-wheel \
thin-provisioning-tools \
vim \
vim-common \
xfsprogs \
zip \
bzip2 \
xz-utils \
--no-install-recommends
COPY --from=swagger /build/swagger* /usr/local/bin/
COPY --from=frozen-images /build/ /docker-frozen-images
COPY --from=gometalinter /build/ /usr/local/bin/
COPY --from=tomlv /build/ /usr/local/bin/
COPY --from=vndr /build/ /usr/local/bin/
COPY --from=tini /build/ /usr/local/bin/
COPY --from=runc /build/ /usr/local/bin/
COPY --from=containerd /build/ /usr/local/bin/
COPY --from=proxy /build/ /usr/local/bin/
COPY --from=dockercli /build/ /usr/local/cli
COPY --from=registry /build/registry* /usr/local/bin/
COPY --from=criu /build/ /usr/local/
COPY --from=docker-py /build/ /docker-py
# TODO: This is for the docker-py tests, which shouldn't really be needed for
# this image, but currently CI is expecting to run this image. This should be
# split out into a separate image, including all the `python-*` deps installed
# above.
RUN cd /docker-py \
&& pip install docker-pycreds==0.2.1 \
&& pip install yamllint==1.5.0 \
&& pip install -r test-requirements.txt
FROM base AS build
COPY --from=gowinres /build/ /usr/local/bin/
ENV PATH=/usr/local/cli:$PATH
ENV DOCKER_BUILDTAGS apparmor seccomp selinux
# Options for hack/validate/gometalinter
ENV GOMETALINTER_OPTS="--deadline=2m"
WORKDIR /go/src/github.com/docker/docker
ENV GO111MODULE=off
ENV CGO_ENABLED=1
RUN --mount=type=cache,sharing=locked,id=moby-build-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-build-aptcache,target=/var/cache/apt \
apt-get update && apt-get install --no-install-recommends -y \
clang \
lld \
llvm
ARG TARGETPLATFORM
RUN --mount=type=cache,sharing=locked,id=moby-build-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-build-aptcache,target=/var/cache/apt \
xx-apt-get install --no-install-recommends -y \
dpkg-dev \
gcc \
libapparmor-dev \
libc6-dev \
libseccomp-dev \
libsecret-1-dev \
libsystemd-dev \
libudev-dev \
pkg-config
ARG DOCKER_BUILDTAGS
ARG DOCKER_DEBUG
ARG DOCKER_GITCOMMIT=HEAD
ARG DOCKER_LDFLAGS
ARG DOCKER_STATIC
ARG VERSION
ARG PLATFORM
ARG PRODUCT
ARG DEFAULT_PRODUCT_LICENSE
ARG PACKAGER_NAME
# PREFIX overrides DEST dir in make.sh script otherwise it fails because of
# read only mount in current work dir
ENV PREFIX=/tmp
RUN <<EOT
# in bullseye arm64 target does not link with lld so configure it to use ld instead
if [ "$(xx-info arch)" = "arm64" ]; then
XX_CC_PREFER_LINKER=ld xx-clang --setup-target-triple
fi
EOT
RUN --mount=type=bind,target=.,rw \
--mount=type=tmpfs,target=cli/winresources/dockerd \
--mount=type=tmpfs,target=cli/winresources/docker-proxy \
--mount=type=cache,target=/root/.cache/go-build,id=moby-build-$TARGETPLATFORM <<EOT
set -e
target=$([ "$DOCKER_STATIC" = "1" ] && echo "binary" || echo "dynbinary")
xx-go --wrap
PKG_CONFIG=$(xx-go env PKG_CONFIG) ./hack/make.sh $target
xx-verify $([ "$DOCKER_STATIC" = "1" ] && echo "--static") /tmp/bundles/${target}-daemon/dockerd$([ "$(xx-info os)" = "windows" ] && echo ".exe")
xx-verify $([ "$DOCKER_STATIC" = "1" ] && echo "--static") /tmp/bundles/${target}-daemon/docker-proxy$([ "$(xx-info os)" = "windows" ] && echo ".exe")
mkdir /build
mv /tmp/bundles/${target}-daemon/* /build/
EOT
# usage:
# > docker buildx bake binary
# > DOCKER_STATIC=0 docker buildx bake binary
# or
# > make binary
# > make dynbinary
FROM scratch AS binary
COPY --from=build /build/ /
# usage:
# > docker buildx bake all
FROM scratch AS all
COPY --link --from=tini /build/ /
COPY --link --from=runc /build/ /
COPY --link --from=containerd /build/ /
COPY --link --from=rootlesskit /build/ /
COPY --link --from=containerutil /build/ /
COPY --link --from=vpnkit / /
COPY --link --from=build /build /
# smoke tests
# usage:
# > docker buildx bake binary-smoketest
FROM --platform=$TARGETPLATFORM base AS smoketest
WORKDIR /usr/local/bin
COPY --from=build /build .
RUN <<EOT
set -ex
file dockerd
dockerd --version
file docker-proxy
docker-proxy --version
EOT
# devcontainer is a stage used by .devcontainer/devcontainer.json
FROM dev-base AS devcontainer
COPY --link . .
COPY --link --from=gopls /build/ /usr/local/bin/
# usage:
# > make shell
# > SYSTEMD=true make shell
FROM dev-base AS dev
COPY --link . .
VOLUME /var/lib/docker
# Wrap all commands in the "docker-in-docker" script to allow nested containers
ENTRYPOINT ["hack/dind"]
# Upload docker source
COPY . /go/src/github.com/docker/docker

74
Dockerfile.e2e Normal file
View File

@@ -0,0 +1,74 @@
## Step 1: Build tests
FROM golang:1.10.3-alpine3.7 as builder
RUN apk add --update \
bash \
btrfs-progs-dev \
build-base \
curl \
lvm2-dev \
jq \
&& rm -rf /var/cache/apk/*
RUN mkdir -p /go/src/github.com/docker/docker/
WORKDIR /go/src/github.com/docker/docker/
# Generate frozen images
COPY contrib/download-frozen-image-v2.sh contrib/download-frozen-image-v2.sh
RUN contrib/download-frozen-image-v2.sh /output/docker-frozen-images \
buildpack-deps:jessie@sha256:dd86dced7c9cd2a724e779730f0a53f93b7ef42228d4344b25ce9a42a1486251 \
busybox:latest@sha256:bbc3a03235220b170ba48a157dd097dd1379299370e1ed99ce976df0355d24f0 \
busybox:glibc@sha256:0b55a30394294ab23b9afd58fab94e61a923f5834fba7ddbae7f8e0c11ba85e6 \
debian:jessie@sha256:287a20c5f73087ab406e6b364833e3fb7b3ae63ca0eb3486555dc27ed32c6e60 \
hello-world:latest@sha256:be0cd392e45be79ffeffa6b05338b98ebb16c87b255f48e297ec7f98e123905c
# Install dockercli
# Please edit hack/dockerfile/install/<name>.installer to update them.
COPY hack/dockerfile/install hack/dockerfile/install
RUN ./hack/dockerfile/install/install.sh dockercli
# Set tag and add sources
ARG DOCKER_GITCOMMIT
ENV DOCKER_GITCOMMIT=${DOCKER_GITCOMMIT:-undefined}
ADD . .
# Build DockerSuite.TestBuild* dependency
RUN CGO_ENABLED=0 go build -buildmode=pie -o /output/httpserver github.com/docker/docker/contrib/httpserver
# Build the integration tests and copy the resulting binaries to /output/tests
RUN hack/make.sh build-integration-test-binary
RUN mkdir -p /output/tests && find . -name test.main -exec cp --parents '{}' /output/tests \;
## Step 2: Generate testing image
FROM alpine:3.7 as runner
# GNU tar is used for generating the emptyfs image
RUN apk add --update \
bash \
ca-certificates \
g++ \
git \
iptables \
pigz \
tar \
xz \
&& rm -rf /var/cache/apk/*
# Add an unprivileged user to be used for tests which need it
RUN addgroup docker && adduser -D -G docker unprivilegeduser -s /bin/ash
COPY contrib/httpserver/Dockerfile /tests/contrib/httpserver/Dockerfile
COPY contrib/syscall-test /tests/contrib/syscall-test
COPY integration-cli/fixtures /tests/integration-cli/fixtures
COPY hack/test/e2e-run.sh /scripts/run.sh
COPY hack/make/.ensure-emptyfs /scripts/ensure-emptyfs.sh
COPY --from=builder /output/docker-frozen-images /docker-frozen-images
COPY --from=builder /output/httpserver /tests/contrib/httpserver/httpserver
COPY --from=builder /output/tests /tests
COPY --from=builder /usr/local/bin/docker /usr/bin/docker
ENV DOCKER_REMOTE_DAEMON=1 DOCKER_INTEGRATION_DAEMON_DEST=/
ENTRYPOINT ["/scripts/run.sh"]

View File

@@ -5,24 +5,24 @@
# This represents the bare minimum required to build and test Docker.
ARG GO_VERSION=1.22.12
FROM debian:stretch
ARG BASE_DEBIAN_DISTRO="bookworm"
ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}"
FROM ${GOLANG_IMAGE}
ENV GO111MODULE=off
ENV GOTOOLCHAIN=local
# allow replacing httpredir or deb mirror
ARG APT_MIRROR=deb.debian.org
RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list
# Compile and runtime deps
# https://github.com/docker/docker/blob/master/project/PACKAGERS.md#build-dependencies
# https://github.com/docker/docker/blob/master/project/PACKAGERS.md#runtime-dependencies
RUN apt-get update && apt-get install -y --no-install-recommends \
btrfs-tools \
build-essential \
curl \
cmake \
gcc \
git \
libapparmor-dev \
libdevmapper-dev \
libseccomp-dev \
ca-certificates \
e2fsprogs \
@@ -33,9 +33,22 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
xfsprogs \
xz-utils \
\
aufs-tools \
vim-common \
&& rm -rf /var/lib/apt/lists/*
# Install Go
# IMPORTANT: If the version of Go is updated, the Windows to Linux CI machines
# will need updating, to avoid errors. Ping #docker-maintainers on IRC
# with a heads-up.
# IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored
ENV GO_VERSION 1.10.3
RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" \
| tar -xzC /usr/local
ENV PATH /go/bin:/usr/local/go/bin:$PATH
ENV GOPATH /go
ENV CGO_LDFLAGS -L/lib
# Install runc, containerd, tini and docker-proxy
# Please edit hack/dockerfile/install/<name>.installer to update them.
COPY hack/dockerfile/install hack/dockerfile/install

View File

@@ -45,8 +45,8 @@
#
# 1. Clone the sources from github.com:
#
# >> git clone https://github.com/docker/docker.git C:\gopath\src\github.com\docker\docker
# >> Cloning into 'C:\gopath\src\github.com\docker\docker'...
# >> git clone https://github.com/docker/docker.git C:\go\src\github.com\docker\docker
# >> Cloning into 'C:\go\src\github.com\docker\docker'...
# >> remote: Counting objects: 186216, done.
# >> remote: Compressing objects: 100% (21/21), done.
# >> remote: Total 186216 (delta 5), reused 0 (delta 0), pack-reused 186195
@@ -59,7 +59,7 @@
#
# 2. Change directory to the cloned docker sources:
#
# >> cd C:\gopath\src\github.com\docker\docker
# >> cd C:\go\src\github.com\docker\docker
#
#
# 3. Build a docker image with the components required to build the docker binaries from source
@@ -79,8 +79,8 @@
# 5. Copy the binaries out of the container, replacing HostPath with an appropriate destination
# folder on the host system where you want the binaries to be located.
#
# >> docker cp binaries:C:\gopath\src\github.com\docker\docker\bundles\docker.exe C:\HostPath\docker.exe
# >> docker cp binaries:C:\gopath\src\github.com\docker\docker\bundles\dockerd.exe C:\HostPath\dockerd.exe
# >> docker cp binaries:C:\go\src\github.com\docker\docker\bundles\docker.exe C:\HostPath\docker.exe
# >> docker cp binaries:C:\go\src\github.com\docker\docker\bundles\dockerd.exe C:\HostPath\dockerd.exe
#
#
# 6. (Optional) Remove the interim container holding the built executable binaries:
@@ -147,38 +147,24 @@
# The docker integration tests do not currently run in a container on Windows, predominantly
# due to Windows not supporting privileged mode, so anything using a volume would fail.
# They (along with the rest of the docker CI suite) can be run using
# https://github.com/kevpar/docker-w2wCIScripts/blob/master/runCI/Invoke-DockerCI.ps1.
# https://github.com/jhowardmsft/docker-w2wCIScripts/blob/master/runCI/Invoke-DockerCI.ps1.
#
# -----------------------------------------------------------------------------------------
# The number of build steps below are explicitly minimised to improve performance.
ARG WINDOWS_BASE_IMAGE=mcr.microsoft.com/windows/servercore
ARG WINDOWS_BASE_IMAGE_TAG=ltsc2022
FROM ${WINDOWS_BASE_IMAGE}:${WINDOWS_BASE_IMAGE_TAG}
FROM microsoft/windowsservercore
# Use PowerShell as the default shell
SHELL ["powershell", "-Command", "$ErrorActionPreference = 'Stop'; $ProgressPreference = 'SilentlyContinue';"]
ARG GO_VERSION=1.22.12
ARG GOTESTSUM_VERSION=v1.8.2
ARG GOWINRES_VERSION=v0.3.1
ARG CONTAINERD_VERSION=v1.7.25
# Environment variable notes:
# - GO_VERSION must be consistent with 'Dockerfile' used by Linux.
# - CONTAINERD_VERSION must be consistent with 'hack/dockerfile/install/containerd.installer' used by Linux.
# - FROM_DOCKERFILE is used for detection of building within a container.
ENV GO_VERSION=${GO_VERSION} `
CONTAINERD_VERSION=${CONTAINERD_VERSION} `
ENV GO_VERSION=1.10.3 `
GIT_VERSION=2.11.1 `
GOPATH=C:\gopath `
GO111MODULE=off `
GOTOOLCHAIN=local `
FROM_DOCKERFILE=1 `
GOTESTSUM_VERSION=${GOTESTSUM_VERSION} `
GOWINRES_VERSION=${GOWINRES_VERSION}
GOPATH=C:\go `
FROM_DOCKERFILE=1
RUN `
Function Test-Nano() { `
@@ -207,30 +193,28 @@ RUN `
Throw ("Failed to download " + $source) `
}`
} else { `
[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; `
$webClient = New-Object System.Net.WebClient; `
$webClient.DownloadFile($source, $target); `
} `
} `
`
setx /M PATH $('C:\git\cmd;C:\git\usr\bin;'+$Env:PATH+';C:\gcc\bin;C:\go\bin;C:\containerd\bin'); `
setx /M PATH $('C:\git\cmd;C:\git\usr\bin;'+$Env:PATH+';C:\gcc\bin;C:\go\bin'); `
`
Write-Host INFO: Downloading git...; `
$location='https://www.nuget.org/api/v2/package/GitForWindows/'+$Env:GIT_VERSION; `
Download-File $location C:\gitsetup.zip; `
`
Write-Host INFO: Downloading go...; `
$dlGoVersion=$Env:GO_VERSION; `
Download-File "https://go.dev/dl/go${dlGoVersion}.windows-amd64.zip" C:\go.zip; `
Download-File $('https://golang.org/dl/go'+$Env:GO_VERSION+'.windows-amd64.zip') C:\go.zip; `
`
Write-Host INFO: Downloading compiler 1 of 3...; `
Download-File https://raw.githubusercontent.com/moby/docker-tdmgcc/master/gcc.zip C:\gcc.zip; `
Download-File https://raw.githubusercontent.com/jhowardmsft/docker-tdmgcc/master/gcc.zip C:\gcc.zip; `
`
Write-Host INFO: Downloading compiler 2 of 3...; `
Download-File https://raw.githubusercontent.com/moby/docker-tdmgcc/master/runtime.zip C:\runtime.zip; `
Download-File https://raw.githubusercontent.com/jhowardmsft/docker-tdmgcc/master/runtime.zip C:\runtime.zip; `
`
Write-Host INFO: Downloading compiler 3 of 3...; `
Download-File https://raw.githubusercontent.com/moby/docker-tdmgcc/master/binutils.zip C:\binutils.zip; `
Download-File https://raw.githubusercontent.com/jhowardmsft/docker-tdmgcc/master/binutils.zip C:\binutils.zip; `
`
Write-Host INFO: Extracting git...; `
Expand-Archive C:\gitsetup.zip C:\git-tmp; `
@@ -254,61 +238,19 @@ RUN `
Remove-Item C:\binutils.zip; `
Remove-Item C:\gitsetup.zip; `
`
Write-Host INFO: Downloading containerd; `
Install-Package -Force 7Zip4PowerShell; `
$location='https://github.com/containerd/containerd/releases/download/'+$Env:CONTAINERD_VERSION+'/containerd-'+$Env:CONTAINERD_VERSION.TrimStart('v')+'-windows-amd64.tar.gz'; `
Download-File $location C:\containerd.tar.gz; `
New-Item -Path C:\containerd -ItemType Directory; `
Expand-7Zip C:\containerd.tar.gz C:\; `
Expand-7Zip C:\containerd.tar C:\containerd; `
Remove-Item C:\containerd.tar.gz; `
Remove-Item C:\containerd.tar; `
`
# Ensure all directories exist that we will require below....
$srcDir = """$Env:GOPATH`\src\github.com\docker\docker\bundles"""; `
Write-Host INFO: Ensuring existence of directory $srcDir...; `
New-Item -Force -ItemType Directory -Path $srcDir | Out-Null; `
Write-Host INFO: Creating source directory...; `
New-Item -ItemType Directory -Path C:\go\src\github.com\docker\docker | Out-Null; `
`
Write-Host INFO: Configuring git core.autocrlf...; `
C:\git\cmd\git config --global core.autocrlf true;
RUN `
Function Install-GoTestSum() { `
$Env:GO111MODULE = 'on'; `
$tmpGobin = "${Env:GOBIN_TMP}"; `
$Env:GOBIN = """${Env:GOPATH}`\bin"""; `
Write-Host "INFO: Installing gotestsum version $Env:GOTESTSUM_VERSION in $Env:GOBIN"; `
&go install "gotest.tools/gotestsum@${Env:GOTESTSUM_VERSION}"; `
$Env:GOBIN = "${tmpGobin}"; `
$Env:GO111MODULE = 'off'; `
if ($LASTEXITCODE -ne 0) { `
Throw '"gotestsum install failed..."'; `
} `
} `
C:\git\cmd\git config --global core.autocrlf true; `
`
Install-GoTestSum
RUN `
Function Install-GoWinres() { `
$Env:GO111MODULE = 'on'; `
$tmpGobin = "${Env:GOBIN_TMP}"; `
$Env:GOBIN = """${Env:GOPATH}`\bin"""; `
Write-Host "INFO: Installing go-winres version $Env:GOWINRES_VERSION in $Env:GOBIN"; `
&go install "github.com/tc-hib/go-winres@${Env:GOWINRES_VERSION}"; `
$Env:GOBIN = "${tmpGobin}"; `
$Env:GO111MODULE = 'off'; `
if ($LASTEXITCODE -ne 0) { `
Throw '"go-winres install failed..."'; `
} `
} `
`
Install-GoWinres
Write-Host INFO: Completed
# Make PowerShell the default entrypoint
ENTRYPOINT ["powershell.exe"]
# Set the working directory to the location of the sources
WORKDIR ${GOPATH}\src\github.com\docker\docker
WORKDIR C:\go\src\github.com\docker\docker
# Copy the sources into the container
COPY . .

View File

@@ -176,7 +176,7 @@
END OF TERMS AND CONDITIONS
Copyright 2013-2018 Docker, Inc.
Copyright 2013-2017 Docker, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -24,24 +24,21 @@
# subsystem maintainers accountable. If ownership is unclear, they are the de facto owners.
people = [
"akerouanton",
"aaronlehmann",
"akihirosuda",
"anusha",
"coolljt0725",
"corhere",
"cpuguy83",
"crazy-max",
"crosbymichael",
"dnephin",
"duglin",
"estesp",
"jhowardmsft",
"johnstep",
"justincormack",
"kolyshkin",
"laurazard",
"mhbauer",
"neersighted",
"robmry",
"rumpl",
"mlaventure",
"runcom",
"samuelkarp",
"stevvooe",
"thajeztah",
"tianon",
@@ -50,10 +47,18 @@
"unclejack",
"vdemeester",
"vieux",
"vvoland",
"yongtang"
]
[Org."Docs maintainers"]
# TODO Describe the docs maintainers role.
people = [
"misty",
"thajeztah"
]
[Org.Curators]
# The curators help ensure that incoming issues and pull requests are properly triaged and
@@ -69,15 +74,13 @@
people = [
"alexellis",
"andrewhsu",
"bsousaa",
"dmcgowan",
"anonymuse",
"chanwit",
"fntlnz",
"gianarb",
"olljanat",
"programmerq",
"rheinwein",
"ripcurld",
"sam-thibault",
"samwhited",
"thajeztah"
]
@@ -88,12 +91,6 @@
# Thank you!
people = [
# Aaron Lehmann was a maintainer for swarmkit, the registry, and the engine,
# and contributed many improvements, features, and bugfixes in those areas,
# among which "automated service rollbacks", templated secrets and configs,
# and resumable image layer downloads.
"aaronlehmann",
# Harald Albers is the mastermind behind the bash completion scripts for the
# Docker CLI. The completion scripts moved to the Docker CLI repository, so
# you can now find him perform his magic in the https://github.com/docker/cli repository.
@@ -113,30 +110,6 @@
# and tweets as @calavera.
"calavera",
# Michael Crosby was "chief maintainer" of the Docker project.
# During his time as a maintainer, Michael contributed to many
# milestones of the project; he was release captain of Docker v1.0.0,
# started the development of "libcontainer" (what later became runc)
# and containerd, as well as demoing cool hacks such as live migrating
# a game server container with checkpoint/restore.
#
# Michael is currently a maintainer of containerd, but you may see
# him around in other projects on GitHub.
"crosbymichael",
# Before becoming a maintainer, Daniel Nephin was a core contributor
# to "Fig" (now known as Docker Compose). As a maintainer for both the
# Engine and Docker CLI, Daniel contributed many features, among which
# the `docker stack` commands, allowing users to deploy their Docker
# Compose projects as a Swarm service.
"dnephin",
# Doug Davis contributed many features and fixes for the classic builder,
# such as "wildcard" copy, the dockerignore file, custom paths/names
# for the Dockerfile, as well as enhancements to the API and documentation.
# Follow Doug on Twitter, where he tweets as @duginabox.
"duglin",
# As a maintainer, Erik was responsible for the "builder", and
# started the first designs for the new networking model in
# Docker. Erik is now working on all kinds of plugins for Docker
@@ -145,7 +118,7 @@
# still stumble into him in our issue tracker, or on IRC.
"erikh",
# Evan Hazlett is the creator of the Shipyard and Interlock open source projects,
# Evan Hazlett is the creator of of the Shipyard and Interlock open source projects,
# and the author of "Orca", which became the foundation of Docker Universal Control
# Plane (UCP). As a maintainer, Evan helped integrating SwarmKit (secrets, tasks)
# into the Docker engine.
@@ -187,19 +160,9 @@
# check out her open source projects on GitHub https://github.com/jessfraz (a must-try).
"jessfraz",
# As a maintainer, John Howard managed to make the impossible possible;
# to run Docker on Windows. After facing many challenges, teaching
# fellow-maintainers that 'Windows is not Linux', and many changes in
# Windows Server to facilitate containers, native Windows containers
# saw the light of day in 2015.
#
# John is now enjoying life without containers: playing piano, painting,
# and walking his dogs, but you may occasionally see him drop by on GitHub.
"lowenna",
# Alexander Morozov contributed many features to Docker, worked on the premise of
# what later became containerd (and worked on that too), and made a "stupid" Go
# vendor tool specifically for docker/docker needs: vndr (https://github.com/LK4D4/vndr).
# vendor tool specificaly for docker/docker needs: vndr (https://github.com/LK4D4/vndr).
# Not many know that Alexander is a master negotiator, being able to change course
# of action with a single "Nope, we're not gonna do that".
"lk4d4",
@@ -210,13 +173,6 @@
# Swarm mode networking.
"mavenugo",
# As a maintainer, Kenfe-Mickaël Laventure worked on the container runtime,
# integrating containerd 1.0 with the daemon, and adding support for custom
# OCI runtimes, as well as implementing the `docker prune` subcommands,
# which was a welcome feature to be added. You can keep up with Mickaél on
# Twitter (@kmlaventure).
"mlaventure",
# As a docs maintainer, Mary Anthony contributed greatly to the Docker
# docs. She wrote the Docker Contributor Guide and Getting Started
# Guides. She helped create a doc build system independent of
@@ -284,11 +240,6 @@
Email = "aaron.lehmann@docker.com"
GitHub = "aaronlehmann"
[people.akerouanton]
Name = "Albin Kerouanton"
Email = "albinker@gmail.com"
GitHub = "akerouanton"
[people.alexellis]
Name = "Alex Ellis"
Email = "alexellis2@gmail.com"
@@ -296,7 +247,7 @@
[people.akihirosuda]
Name = "Akihiro Suda"
Email = "akihiro.suda.cz@hco.ntt.co.jp"
Email = "suda.akihiro@lab.ntt.co.jp"
GitHub = "AkihiroSuda"
[people.aluzzardi]
@@ -314,15 +265,15 @@
Email = "andrewhsu@docker.com"
GitHub = "andrewhsu"
[people.anonymuse]
Name = "Jesse White"
Email = "anonymuse@gmail.com"
GitHub = "anonymuse"
[people.anusha]
Name = "Anusha Ragunathan"
Email = "anusha@docker.com"
GitHub = "anusha-ragunathan"
[people.bsousaa]
Name = "Bruno de Sousa"
Email = "bruno.sousa@docker.com"
GitHub = "bsousaa"
[people.calavera]
Name = "David Calavera"
@@ -334,20 +285,15 @@
Email = "leijitang@huawei.com"
GitHub = "coolljt0725"
[people.corhere]
Name = "Cory Snider"
Email = "csnider@mirantis.com"
GitHub = "corhere"
[people.cpuguy83]
Name = "Brian Goff"
Email = "cpuguy83@gmail.com"
GitHub = "cpuguy83"
[people.crazy-max]
Name = "Kevin Alvarez"
Email = "contact@crazymax.dev"
GitHub = "crazy-max"
[people.chanwit]
Name = "Chanwit Kaewkasi"
Email = "chanwit@gmail.com"
GitHub = "chanwit"
[people.crosbymichael]
Name = "Michael Crosby"
@@ -359,11 +305,6 @@
Email = "dnephin@gmail.com"
GitHub = "dnephin"
[people.dmcgowan]
Name = "Derek McGowan"
Email = "derek@mcgstyle.net"
GitHub = "dmcgowan"
[people.duglin]
Name = "Doug Davis"
Email = "dug@us.ibm.com"
@@ -404,6 +345,11 @@
Email = "james@lovedthanlost.net"
GitHub = "jamtur01"
[people.jhowardmsft]
Name = "John Howard"
Email = "jhoward@microsoft.com"
GitHub = "jhowardmsft"
[people.jessfraz]
Name = "Jessie Frazelle"
Email = "jess@linux.com"
@@ -419,26 +365,11 @@
Email = "justin.cormack@docker.com"
GitHub = "justincormack"
[people.kolyshkin]
Name = "Kir Kolyshkin"
Email = "kolyshkin@gmail.com"
GitHub = "kolyshkin"
[people.laurazard]
Name = "Laura Brehm"
Email = "laura.brehm@docker.com"
GitHub = "laurazard"
[people.lk4d4]
Name = "Alexander Morozov"
Email = "lk4d4@docker.com"
GitHub = "lk4d4"
[people.lowenna]
Name = "John Howard"
Email = "github@lowenna.com"
GitHub = "lowenna"
[people.mavenugo]
Name = "Madhu Venugopal"
Email = "madhu@docker.com"
@@ -449,6 +380,11 @@
Email = "mbauer@us.ibm.com"
GitHub = "mhbauer"
[people.misty]
Name = "Misty Stanley-Jones"
Email = "misty@docker.com"
GitHub = "mistyhacks"
[people.mlaventure]
Name = "Kenfe-Mickaël Laventure"
Email = "mickael.laventure@gmail.com"
@@ -464,56 +400,26 @@
Email = "mrjana@docker.com"
GitHub = "mrjana"
[people.neersighted]
Name = "Bjorn Neergaard"
Email = "bjorn@neersighted.com"
GitHub = "neersighted"
[people.olljanat]
Name = "Olli Janatuinen"
Email = "olli.janatuinen@gmail.com"
GitHub = "olljanat"
[people.programmerq]
Name = "Jeff Anderson"
Email = "jeff@docker.com"
GitHub = "programmerq"
[people.robmry]
Name = "Rob Murray"
Email = "rob.murray@docker.com"
GitHub = "robmry"
[people.rheinwein]
Name = "Laura Frank"
Email = "laura@codeship.com"
GitHub = "rheinwein"
[people.ripcurld]
Name = "Boaz Shuster"
Email = "ripcurld.github@gmail.com"
GitHub = "ripcurld"
[people.rumpl]
Name = "Djordje Lukic"
Email = "djordje.lukic@docker.com"
GitHub = "rumpl"
[people.runcom]
Name = "Antonio Murdaca"
Email = "runcom@redhat.com"
GitHub = "runcom"
[people.sam-thibault]
Name = "Sam Thibault"
Email = "sam.thibault@docker.com"
GitHub = "sam-thibault"
[people.samuelkarp]
Name = "Samuel Karp"
Email = "me@samuelkarp.com"
GitHub = "samuelkarp"
[people.samwhited]
Name = "Sam Whited"
Email = "sam@samwhited.com"
GitHub = "samwhited"
[people.shykes]
Name = "Solomon Hykes"
Email = "solomon@docker.com"
@@ -573,11 +479,6 @@
Name = "Vishnu Kannan"
Email = "vishnuk@google.com"
GitHub = "vishh"
[people.vvoland]
Name = "Paweł Gronowski"
Email = "pawel.gronowski@docker.com"
GitHub = "vvoland"
[people.yongtang]
Name = "Yong Tang"

232
Makefile
View File

@@ -1,22 +1,18 @@
DOCKER ?= docker
BUILDX ?= $(DOCKER) buildx
.PHONY: all binary dynbinary build cross help init-go-pkg-cache install manpages run shell test test-docker-py test-integration test-unit validate win
# set the graph driver as the current graphdriver if not set
DOCKER_GRAPHDRIVER := $(if $(DOCKER_GRAPHDRIVER),$(DOCKER_GRAPHDRIVER),$(shell docker info -f '{{ .Driver }}' 2>&1))
DOCKER_GRAPHDRIVER := $(if $(DOCKER_GRAPHDRIVER),$(DOCKER_GRAPHDRIVER),$(shell docker info 2>&1 | grep "Storage Driver" | sed 's/.*: //'))
export DOCKER_GRAPHDRIVER
DOCKER_INCREMENTAL_BINARY := $(if $(DOCKER_INCREMENTAL_BINARY),$(DOCKER_INCREMENTAL_BINARY),1)
export DOCKER_INCREMENTAL_BINARY
DOCKER_GITCOMMIT := $(shell git rev-parse HEAD)
# get OS/Arch of docker engine
DOCKER_OSARCH := $(shell bash -c 'source hack/make/.detect-daemon-osarch && echo $${DOCKER_ENGINE_OSARCH}')
DOCKERFILE := $(shell bash -c 'source hack/make/.detect-daemon-osarch && echo $${DOCKERFILE}')
DOCKER_GITCOMMIT := $(shell git rev-parse --short HEAD || echo unsupported)
export DOCKER_GITCOMMIT
# allow overriding the repository and branch that validation scripts are running
# against these are used in hack/validate/.validate to check what changed in the PR.
export VALIDATE_REPO
export VALIDATE_BRANCH
export VALIDATE_ORIGIN_BRANCH
export PAGER
export GIT_PAGER
# env vars passed through directly to Docker's build scripts
# to allow things like `make KEEPBUNDLE=1 binary` easily
# `project/PACKAGERS.md` have some limited documentation of some of these
@@ -25,98 +21,82 @@ export GIT_PAGER
# option of "go build". For example, a built-in graphdriver priority list
# can be changed during build time like this:
#
# make DOCKER_LDFLAGS="-X github.com/docker/docker/daemon/graphdriver.priority=overlay2,zfs" dynbinary
# make DOCKER_LDFLAGS="-X github.com/docker/docker/daemon/graphdriver.priority=overlay2,devicemapper" dynbinary
#
DOCKER_ENVS := \
-e DOCKER_CROSSPLATFORMS \
-e BUILD_APT_MIRROR \
-e BUILDFLAGS \
-e KEEPBUNDLE \
-e DOCKER_BUILD_ARGS \
-e DOCKER_BUILD_GOGC \
-e DOCKER_BUILD_OPTS \
-e DOCKER_BUILD_PKGS \
-e DOCKER_BUILDKIT \
-e DOCKER_BASH_COMPLETION_PATH \
-e DOCKER_CLI_PATH \
-e DOCKERCLI_VERSION \
-e DOCKERCLI_REPOSITORY \
-e DOCKERCLI_INTEGRATION_VERSION \
-e DOCKERCLI_INTEGRATION_REPOSITORY \
-e DOCKER_DEBUG \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT \
-e DOCKER_GRAPHDRIVER \
-e DOCKER_INCREMENTAL_BINARY \
-e DOCKER_LDFLAGS \
-e DOCKER_PORT \
-e DOCKER_REMAP_ROOT \
-e DOCKER_ROOTLESS \
-e DOCKER_STORAGE_OPTS \
-e DOCKER_TEST_HOST \
-e DOCKER_USERLANDPROXY \
-e DOCKERD_ARGS \
-e DELVE_PORT \
-e GITHUB_ACTIONS \
-e TEST_FORCE_VALIDATE \
-e TEST_INTEGRATION_DIR \
-e TEST_INTEGRATION_USE_SNAPSHOTTER \
-e TEST_INTEGRATION_FAIL_FAST \
-e TEST_SKIP_INTEGRATION \
-e TEST_SKIP_INTEGRATION_CLI \
-e TEST_IGNORE_CGROUP_CHECK \
-e TESTCOVERAGE \
-e TESTDEBUG \
-e TESTDIRS \
-e TESTFLAGS \
-e TESTFLAGS_INTEGRATION \
-e TESTFLAGS_INTEGRATION_CLI \
-e TEST_FILTER \
-e TIMEOUT \
-e VALIDATE_REPO \
-e VALIDATE_BRANCH \
-e VALIDATE_ORIGIN_BRANCH \
-e HTTP_PROXY \
-e HTTPS_PROXY \
-e NO_PROXY \
-e http_proxy \
-e https_proxy \
-e no_proxy \
-e VERSION \
-e PLATFORM \
-e DEFAULT_PRODUCT_LICENSE \
-e PRODUCT \
-e PACKAGER_NAME \
-e PAGER \
-e GIT_PAGER \
-e OTEL_EXPORTER_OTLP_ENDPOINT \
-e OTEL_EXPORTER_OTLP_PROTOCOL \
-e OTEL_SERVICE_NAME
-e PRODUCT
# note: we _cannot_ add "-e DOCKER_BUILDTAGS" here because even if it's unset in the shell, that would shadow the "ENV DOCKER_BUILDTAGS" set in our Dockerfile, which is very important for our official builds
# to allow `make BIND_DIR=. shell` or `make BIND_DIR= test`
# (default to no bind mount if DOCKER_HOST is set)
# note: BINDDIR is supported for backwards-compatibility here
BIND_DIR := $(if $(BINDDIR),$(BINDDIR),$(if $(DOCKER_HOST),,bundles))
# DOCKER_MOUNT can be overridden, but use at your own risk!
ifndef DOCKER_MOUNT
DOCKER_MOUNT := $(if $(BIND_DIR),-v "$(CURDIR)/$(BIND_DIR):/go/src/github.com/docker/docker/$(BIND_DIR)")
DOCKER_MOUNT := $(if $(DOCKER_BINDDIR_MOUNT_OPTS),$(DOCKER_MOUNT):$(DOCKER_BINDDIR_MOUNT_OPTS),$(DOCKER_MOUNT))
# This allows the test suite to be able to run without worrying about the underlying fs used by the container running the daemon (e.g. aufs-on-aufs), so long as the host running the container is running a supported fs.
# The volume will be cleaned up when the container is removed due to `--rm`.
# Note that `BIND_DIR` will already be set to `bundles` if `DOCKER_HOST` is not set (see above BIND_DIR line), in such case this will do nothing since `DOCKER_MOUNT` will already be set.
DOCKER_MOUNT := $(if $(DOCKER_MOUNT),$(DOCKER_MOUNT),-v /go/src/github.com/docker/docker/bundles) -v "$(CURDIR)/.git:/go/src/github.com/docker/docker/.git"
DOCKER_MOUNT_CACHE := -v docker-dev-cache:/root/.cache -v docker-mod-cache:/go/pkg/mod/
DOCKER_MOUNT_CLI := $(if $(DOCKER_CLI_PATH),-v $(shell dirname $(DOCKER_CLI_PATH)):/usr/local/cli,)
DOCKER_MOUNT_BASH_COMPLETION := $(if $(DOCKER_BASH_COMPLETION_PATH),-v $(shell dirname $(DOCKER_BASH_COMPLETION_PATH)):/usr/local/completion/bash,)
DOCKER_MOUNT := $(DOCKER_MOUNT) $(DOCKER_MOUNT_CACHE) $(DOCKER_MOUNT_CLI) $(DOCKER_MOUNT_BASH_COMPLETION)
endif # ifndef DOCKER_MOUNT
# This allows to set the docker-dev container name
DOCKER_CONTAINER_NAME := $(if $(CONTAINER_NAME),--name $(CONTAINER_NAME),)
DOCKER_IMAGE := docker-dev
DOCKER_PORT_FORWARD := $(if $(DOCKER_PORT),-p "$(DOCKER_PORT)",)
DELVE_PORT_FORWARD := $(if $(DELVE_PORT),-p "$(DELVE_PORT)",)
# enable package cache if DOCKER_INCREMENTAL_BINARY and DOCKER_MOUNT (i.e.DOCKER_HOST) are set
PKGCACHE_MAP := gopath:/go/pkg goroot-linux_amd64:/usr/local/go/pkg/linux_amd64 goroot-linux_amd64_netgo:/usr/local/go/pkg/linux_amd64_netgo
PKGCACHE_VOLROOT := dockerdev-go-pkg-cache
PKGCACHE_VOL := $(if $(PKGCACHE_DIR),$(CURDIR)/$(PKGCACHE_DIR)/,$(PKGCACHE_VOLROOT)-)
DOCKER_MOUNT_PKGCACHE := $(if $(DOCKER_INCREMENTAL_BINARY),$(shell echo $(PKGCACHE_MAP) | sed -E 's@([^ ]*)@-v "$(PKGCACHE_VOL)\1"@g'),)
DOCKER_MOUNT_CLI := $(if $(DOCKER_CLI_PATH),-v $(shell dirname $(DOCKER_CLI_PATH)):/usr/local/cli,)
DOCKER_MOUNT_BASH_COMPLETION := $(if $(DOCKER_BASH_COMPLETION_PATH),-v $(shell dirname $(DOCKER_BASH_COMPLETION_PATH)):/usr/local/completion/bash,)
DOCKER_MOUNT := $(DOCKER_MOUNT) $(DOCKER_MOUNT_PKGCACHE) $(DOCKER_MOUNT_CLI) $(DOCKER_MOUNT_BASH_COMPLETION)
DOCKER_FLAGS := $(DOCKER) run --rm --privileged $(DOCKER_CONTAINER_NAME) $(DOCKER_ENVS) $(DOCKER_MOUNT) $(DOCKER_PORT_FORWARD) $(DELVE_PORT_FORWARD)
GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
GIT_BRANCH_CLEAN := $(shell echo $(GIT_BRANCH) | sed -e "s/[^[:alnum:]]/-/g")
DOCKER_IMAGE := docker-dev$(if $(GIT_BRANCH_CLEAN),:$(GIT_BRANCH_CLEAN))
DOCKER_PORT_FORWARD := $(if $(DOCKER_PORT),-p "$(DOCKER_PORT)",)
DOCKER_FLAGS := docker run --rm -i --privileged $(DOCKER_CONTAINER_NAME) $(DOCKER_ENVS) $(DOCKER_MOUNT) $(DOCKER_PORT_FORWARD)
BUILD_APT_MIRROR := $(if $(DOCKER_BUILD_APT_MIRROR),--build-arg APT_MIRROR=$(DOCKER_BUILD_APT_MIRROR))
export BUILD_APT_MIRROR
SWAGGER_DOCS_PORT ?= 9000
INTEGRATION_CLI_MASTER_IMAGE := $(if $(INTEGRATION_CLI_MASTER_IMAGE), $(INTEGRATION_CLI_MASTER_IMAGE), integration-cli-master)
INTEGRATION_CLI_WORKER_IMAGE := $(if $(INTEGRATION_CLI_WORKER_IMAGE), $(INTEGRATION_CLI_WORKER_IMAGE), integration-cli-worker)
define \n
@@ -130,127 +110,70 @@ ifeq ($(INTERACTIVE), 1)
DOCKER_FLAGS += -t
endif
# on GitHub Runners input device is not a TTY but we allocate a pseudo-one,
# otherwise keep STDIN open even if not attached if not a GitHub Runner.
ifeq ($(GITHUB_ACTIONS),true)
DOCKER_FLAGS += -t
else
DOCKER_FLAGS += -i
endif
DOCKER_RUN_DOCKER := $(DOCKER_FLAGS) "$(DOCKER_IMAGE)"
DOCKER_BUILD_ARGS += --build-arg=GO_VERSION
DOCKER_BUILD_ARGS += --build-arg=DOCKERCLI_VERSION
DOCKER_BUILD_ARGS += --build-arg=DOCKERCLI_REPOSITORY
DOCKER_BUILD_ARGS += --build-arg=DOCKERCLI_INTEGRATION_VERSION
DOCKER_BUILD_ARGS += --build-arg=DOCKERCLI_INTEGRATION_REPOSITORY
ifdef DOCKER_SYSTEMD
DOCKER_BUILD_ARGS += --build-arg=SYSTEMD=true
endif
BUILD_OPTS := ${DOCKER_BUILD_ARGS} ${DOCKER_BUILD_OPTS}
BUILD_CMD := $(BUILDX) build
BAKE_CMD := $(BUILDX) bake
default: binary
.PHONY: all
all: build ## validate all checks, build linux binaries, run all tests,\ncross build non-linux binaries, and generate archives
all: build ## validate all checks, build linux binaries, run all tests\ncross build non-linux binaries and generate archives
$(DOCKER_RUN_DOCKER) bash -c 'hack/validate/default && hack/make.sh'
.PHONY: binary
binary: bundles ## build statically linked linux binaries
$(BAKE_CMD) binary
binary: build ## build the linux binaries
$(DOCKER_RUN_DOCKER) hack/make.sh binary
.PHONY: dynbinary
dynbinary: bundles ## build dynamically linked linux binaries
$(BAKE_CMD) dynbinary
dynbinary: build ## build the linux dynbinaries
$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary
.PHONY: cross
cross: bundles ## cross build the binaries
$(BAKE_CMD) binary-cross
build: bundles init-go-pkg-cache
$(warning The docker client CLI has moved to github.com/docker/cli. For a dev-test cycle involving the CLI, run:${\n} DOCKER_CLI_PATH=/host/path/to/cli/binary make shell ${\n} then change the cli and compile into a binary at the same location.${\n})
docker build ${BUILD_APT_MIRROR} ${DOCKER_BUILD_ARGS} -t "$(DOCKER_IMAGE)" -f "$(DOCKERFILE)" .
bundles:
mkdir bundles
.PHONY: clean
clean: clean-cache
clean: clean-pkg-cache-vol ## clean up cached resources
.PHONY: clean-cache
clean-cache: ## remove the docker volumes that are used for caching in the dev-container
docker volume rm -f docker-dev-cache docker-mod-cache
clean-pkg-cache-vol:
@- $(foreach mapping,$(PKGCACHE_MAP), \
$(shell docker volume rm $(PKGCACHE_VOLROOT)-$(shell echo $(mapping) | awk -F':/' '{ print $$1 }') > /dev/null 2>&1) \
)
cross: build ## cross build the binaries for darwin, freebsd and\nwindows
$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary binary cross
.PHONY: help
help: ## this help
@awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z0-9_-]+:.*?## / {gsub("\\\\n",sprintf("\n%22c",""), $$2);printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST)
@awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {sub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST)
init-go-pkg-cache:
$(if $(PKGCACHE_DIR), mkdir -p $(shell echo $(PKGCACHE_MAP) | sed -E 's@([^: ]*):[^ ]*@$(PKGCACHE_DIR)/\1@g'))
.PHONY: install
install: ## install the linux binaries
KEEPBUNDLE=1 hack/make.sh install-binary
.PHONY: run
run: build ## run the docker daemon in a container
$(DOCKER_RUN_DOCKER) sh -c "KEEPBUNDLE=1 hack/make.sh install-binary run"
.PHONY: build
ifeq ($(BIND_DIR), .)
build: shell_target := --target=dev-base
else
build: shell_target := --target=dev
endif
build: bundles
$(BUILD_CMD) $(BUILD_OPTS) $(shell_target) --load -t "$(DOCKER_IMAGE)" .
.PHONY: shell
shell: build ## start a shell inside the build env
shell: build ## start a shell inside the build env
$(DOCKER_RUN_DOCKER) bash
.PHONY: test
test: build test-unit ## run the unit, integration and docker-py tests
$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary test-integration test-docker-py
$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary cross test-integration test-docker-py
.PHONY: test-docker-py
test-docker-py: build ## run the docker-py tests
$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary test-docker-py
.PHONY: test-integration-cli
test-integration-cli: test-integration ## (DEPRECATED) use test-integration
.PHONY: test-integration
ifneq ($(and $(TEST_SKIP_INTEGRATION),$(TEST_SKIP_INTEGRATION_CLI)),)
test-integration:
@echo Both integrations suites skipped per environment variables
else
test-integration: build ## run the integration tests
$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary test-integration
endif
.PHONY: test-integration-flaky
test-integration-flaky: build ## run the stress test for all new integration tests
$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary test-integration-flaky
.PHONY: test-unit
test-unit: build ## run the unit tests
$(DOCKER_RUN_DOCKER) hack/test/unit
.PHONY: validate
validate: build ## validate DCO, Seccomp profile generation, gofmt,\n./pkg/ isolation, golint, tests, tomls, go vet and vendor
$(DOCKER_RUN_DOCKER) hack/validate/all
.PHONY: validate-generate-files
validate-generate-files:
$(BUILD_CMD) --target "validate" \
--output "type=cacheonly" \
--file "./hack/dockerfiles/generate-files.Dockerfile" .
.PHONY: validate-%
validate-%: build ## validate specific check
$(DOCKER_RUN_DOCKER) hack/validate/$*
.PHONY: win
win: bundles ## cross build the binary for windows
$(BAKE_CMD) --set *.platform=windows/amd64 binary
win: build ## cross build the binary for windows
$(DOCKER_RUN_DOCKER) hack/make.sh win
.PHONY: swagger-gen
swagger-gen:
@@ -266,17 +189,20 @@ swagger-docs: ## preview the API documentation
@docker run --rm -v $(PWD)/api/swagger.yaml:/usr/share/nginx/html/swagger.yaml \
-e 'REDOC_OPTIONS=hide-hostname="true" lazy-rendering' \
-p $(SWAGGER_DOCS_PORT):80 \
bfirsh/redoc:1.14.0
bfirsh/redoc:1.6.2
.PHONY: generate-files
generate-files:
$(eval $@_TMP_OUT := $(shell mktemp -d -t moby-output.XXXXXXXXXX))
@if [ -z "$($@_TMP_OUT)" ]; then \
echo "Temp dir is not set"; \
exit 1; \
fi
$(BUILD_CMD) --target "update" \
--output "type=local,dest=$($@_TMP_OUT)" \
--file "./hack/dockerfiles/generate-files.Dockerfile" .
cp -R "$($@_TMP_OUT)"/. .
rm -rf "$($@_TMP_OUT)"/*
build-integration-cli-on-swarm: build ## build images and binary for running integration-cli on Swarm in parallel
@echo "Building hack/integration-cli-on-swarm (if build fails, please refer to hack/integration-cli-on-swarm/README.md)"
go build -buildmode=pie -o ./hack/integration-cli-on-swarm/integration-cli-on-swarm ./hack/integration-cli-on-swarm/host
@echo "Building $(INTEGRATION_CLI_MASTER_IMAGE)"
docker build -t $(INTEGRATION_CLI_MASTER_IMAGE) hack/integration-cli-on-swarm/agent
# For worker, we don't use `docker build` so as to enable DOCKER_INCREMENTAL_BINARY and so on
@echo "Building $(INTEGRATION_CLI_WORKER_IMAGE) from $(DOCKER_IMAGE)"
$(eval tmp := integration-cli-worker-tmp)
# We mount pkgcache, but not bundle (bundle needs to be baked into the image)
# For avoiding bakings DOCKER_GRAPHDRIVER and so on to image, we cannot use $(DOCKER_ENVS) here
docker run -t -d --name $(tmp) -e DOCKER_GITCOMMIT -e BUILDFLAGS -e DOCKER_INCREMENTAL_BINARY --privileged $(DOCKER_MOUNT_PKGCACHE) $(DOCKER_IMAGE) top
docker exec $(tmp) hack/make.sh build-integration-test-binary dynbinary
docker exec $(tmp) go build -buildmode=pie -o /worker github.com/docker/docker/hack/integration-cli-on-swarm/agent/worker
docker commit -c 'ENTRYPOINT ["/worker"]' $(tmp) $(INTEGRATION_CLI_WORKER_IMAGE)
docker rm -f $(tmp)

2
NOTICE
View File

@@ -3,7 +3,7 @@ Copyright 2012-2017 Docker, Inc.
This product includes software developed at Docker, Inc. (https://www.docker.com).
This product contains software (https://github.com/creack/pty) developed
This product contains software (https://github.com/kr/pty) developed
by Keith Rarick, licensed under the MIT License.
The following is courtesy of our legal counsel:

View File

@@ -14,7 +14,7 @@ Moby is an open project guided by strong principles, aiming to be modular, flexi
It is open to the community to help set its direction.
- Modular: the project includes lots of components that have well-defined functions and APIs that work together.
- Batteries included but swappable: Moby includes enough components to build fully featured container systems, but its modular architecture ensures that most of the components can be swapped by different implementations.
- Batteries included but swappable: Moby includes enough components to build fully featured container system, but its modular architecture ensures that most of the components can be swapped by different implementations.
- Usable security: Moby provides secure defaults without compromising usability.
- Developer focused: The APIs are intended to be functional and useful to build powerful tools.
They are not necessarily intended as end user tools but as components aimed at developers.
@@ -32,7 +32,7 @@ New projects can be added if they fit with the community goals. Docker is commit
However, other projects are also encouraged to use Moby as an upstream, and to reuse the components in diverse ways, and all these uses will be treated in the same way. External maintainers and contributors are welcomed.
The Moby project is not intended as a location for support or feature requests for Docker products, but as a place for contributors to work on open source code, fix bugs, and make the code more useful.
The releases are supported by the maintainers, community and users, on a best efforts basis only. For customers who want enterprise or commercial support, [Docker Desktop](https://www.docker.com/products/docker-desktop/) and [Mirantis Container Runtime](https://www.mirantis.com/software/mirantis-container-runtime/) are the appropriate products for these use cases.
The releases are supported by the maintainers, community and users, on a best efforts basis only, and are not intended for customers who want enterprise or commercial support; Docker EE is the appropriate product for these use cases.
-----

View File

@@ -35,83 +35,34 @@ issue, in the Slack channel, or in person at the Moby Summits that happen every
## 1.1 Runtime improvements
Over time we have accumulated a lot of functionality in the container runtime
aspect of Moby while also growing in other areas. Much of the container runtime
pieces are now duplicated work available in other, lower level components such
as [containerd](https://containerd.io).
We introduced [`runC`](https://runc.io) as a standalone low-level tool for container
execution in 2015, the first stage in spinning out parts of the Engine into standalone tools.
Moby currently only utilizes containerd for basic runtime state management, e.g. starting
and stopping a container, which is what the pre-containerd 1.0 daemon provided.
Now that containerd is a full-fledged container runtime which supports full
container life-cycle management, we would like to start relying more on containerd
and removing the bits in Moby which are now duplicated. This will necessitate
a significant effort to refactor and even remove large parts of Moby's codebase.
As runC continued evolving, and the OCI specification along with it, we created
[`containerd`](https://github.com/containerd/containerd), a daemon to control and monitor `runC`.
In late 2016 this was relaunched as the `containerd` 1.0 track, aiming to provide a common runtime
for the whole spectrum of container systems, including Kubernetes, with wide community support.
This change meant that there was an increased scope for `containerd`, including image management
and storage drivers.
Tracking issues:
Moby will rely on a long-running `containerd` companion daemon for all container execution
related operations. This could open the door in the future for Engine restarts without interrupting
running containers. The switch over to containerd 1.0 is an important goal for the project, and
will result in a significant simplification of the functions implemented in this repository.
- [#38043](https://github.com/moby/moby/issues/38043) Proposal: containerd image integration
## 1.2 Image Builder
Work is ongoing to integrate [BuildKit](https://github.com/moby/buildkit) into
Moby and replace the "v0" build implementation. Buildkit offers better cache
management, parallelizable build steps, and better extensibility while also
keeping builds portable, a chief tenent of Moby's builder.
Upon completion of this effort, users will have a builder that performs better
while also being more extensible, enabling users to provide their own custom
syntax which can be either Dockerfile-like or something completely different.
See [buildpacks on buildkit](https://github.com/tonistiigi/buildkit-pack) as an
example of this extensibility.
New features for the builder and Dockerfile should be implemented first in the
BuildKit backend using an external Dockerfile implementation from the container
images. This allows everyone to test and evaluate the feature without upgrading
their daemon. New features should go to the experimental channel first, and can be
part of the `docker/dockerfile:experimental` image. From there they graduate to
`docker/dockerfile:latest` and binary releases. The Dockerfile frontend source
code is temporarily located at
[https://github.com/moby/buildkit/tree/master/frontend/dockerfile](https://github.com/moby/buildkit/tree/master/frontend/dockerfile)
with separate new features defined with go build tags.
Tracking issues:
- [#32925](https://github.com/moby/moby/issues/32925) discussion: builder future: buildkit
## 1.3 Rootless Mode
Running the daemon requires elevated privileges for many tasks. We would like to
support running the daemon as a normal, unprivileged user without requiring `suid`
binaries.
Tracking issues:
- [#37375](https://github.com/moby/moby/issues/37375) Proposal: allow running `dockerd` as an unprivileged user (aka rootless mode)
## 1.4 Testing
Moby has many tests, both unit and integration. Moby needs more tests which can
cover the full spectrum functionality and edge cases out there.
Tests in the `integration-cli` folder should also be migrated into (both in
location and style) the `integration` folder. These newer tests are simpler to
run in isolation, simpler to read, simpler to write, and more fully exercise the
API. Meanwhile tests of the docker CLI should generally live in docker/cli.
Tracking issues:
- [#32866](https://github.com/moby/moby/issues/32866) Replace integration-cli suite with API test suite
## 1.5 Internal decoupling
## 1.2 Internal decoupling
A lot of work has been done in trying to decouple Moby internals. This process of creating
standalone projects with a well defined function that attract a dedicated community should continue.
As well as integrating `containerd` we would like to integrate [BuildKit](https://github.com/moby/buildkit)
as the next standalone component.
We see gRPC as the natural communication layer between decoupled components.
In addition to pushing out large components into other projects, much of the
internal code structure, and in particular the
["Daemon"](https://godoc.org/github.com/docker/docker/daemon#Daemon) object,
should be split into smaller, more manageable, and more testable components.
## 1.3 Custom assembly tooling
We have been prototyping the Moby [assembly tool](https://github.com/moby/tool) which was originally
developed for LinuxKit and intend to turn it into a more generic packaging and assembly mechanism
that can build not only the default version of Moby, as distribution packages or other useful forms,
but can also build very different container systems, themselves built of cooperating daemons built in
and running in containers. We intend to merge this functionality into this repo.

View File

@@ -1,9 +0,0 @@
# Reporting security issues
The Moby maintainers take security seriously. If you discover a security issue, please bring it to their attention right away!
### Reporting a Vulnerability
Please **DO NOT** file a public issue, instead send your report privately to security@docker.com.
Security reports are greatly appreciated and we will publicly thank you for it, although we keep your name confidential if you request it. We also like to send gifts—if you're into schwag, make sure to let us know. We currently do not offer a paid security bounty program, but are not ruling it out in the future.

View File

@@ -28,7 +28,7 @@ Most code changes will fall into one of the following categories.
### Writing tests for new features
New code should be covered by unit tests. If the code is difficult to test with
unit tests, then that is a good sign that it should be refactored to make it
a unit tests then that is a good sign that it should be refactored to make it
easier to reuse and maintain. Consider accepting unexported interfaces instead
of structs so that fakes can be provided for dependencies.
@@ -44,23 +44,16 @@ case. Error cases should be handled by unit tests.
Bugs fixes should include a unit test case which exercises the bug.
A bug fix may also include new assertions in existing integration tests for the
A bug fix may also include new assertions in an existing integration tests for the
API endpoint.
### Writing new integration tests
Note the `integration-cli` tests are deprecated; new tests will be rejected by
the CI.
Instead, implement new tests under `integration/`.
### Integration tests environment considerations
When adding new tests or modifying existing tests under `integration/`, testing
When adding new tests or modifying existing test under `integration/`, testing
environment should be properly considered. `skip.If` from
[gotest.tools/skip](https://godoc.org/gotest.tools/skip) can be used to make the
test run conditionally. Full testing environment conditions can be found at
[environment.go](https://github.com/moby/moby/blob/6b6eeed03b963a27085ea670f40cd5ff8a61f32e/testutil/environment/environment.go)
[environment.go](https://github.com/moby/moby/blob/cb37987ee11655ed6bbef663d245e55922354c68/internal/test/environment/environment.go)
Here is a quick example. If the test needs to interact with a docker daemon on
the same host, the following condition should be checked within the test code
@@ -74,8 +67,6 @@ If a remote daemon is detected, the test will be skipped.
## Running tests
### Unit Tests
To run the unit test suite:
```
@@ -91,36 +82,8 @@ The following environment variables may be used to run a subset of tests:
* `TESTFLAGS` - flags passed to `go test`, to run tests which match a pattern
use `TESTFLAGS="-test.run TestNameOrPrefix"`
### Integration Tests
To run the integration test suite:
```
make test-integration
```
This make target runs both the "integration" suite and the "integration-cli"
suite.
You can specify which integration test dirs to build and run by specifying
the list of dirs in the TEST_INTEGRATION_DIR environment variable.
You can also explicitly skip either suite by setting (any value) in
TEST_SKIP_INTEGRATION and/or TEST_SKIP_INTEGRATION_CLI environment variables.
Flags specific to each suite can be set in the TESTFLAGS_INTEGRATION and
TESTFLAGS_INTEGRATION_CLI environment variables.
If all you want is to specify a test filter to run, you can set the
`TEST_FILTER` environment variable. This ends up getting passed directly to `go
test -run` (or `go test -check-f`, depending on the test suite). It will also
automatically set the other above mentioned environment variables accordingly.
### Go Version
You can change a version of golang used for building stuff that is being tested
by setting `GO_VERSION` variable, for example:
```
make GO_VERSION=1.12.8 test
```

View File

@@ -37,6 +37,6 @@ There is hopefully enough example material in the file for you to copy a similar
When you make edits to `swagger.yaml`, you may want to check the generated API documentation to ensure it renders correctly.
Run `make swagger-docs` and a preview will be running at `http://localhost:9000`. Some of the styling may be incorrect, but you'll be able to ensure that it is generating the correct documentation.
Run `make swagger-docs` and a preview will be running at `http://localhost`. Some of the styling may be incorrect, but you'll be able to ensure that it is generating the correct documentation.
The production documentation is generated by vendoring `swagger.yaml` into [docker/docker.github.io](https://github.com/docker/docker.github.io).

View File

@@ -2,17 +2,8 @@ package api // import "github.com/docker/docker/api"
// Common constants for daemon and client.
const (
// DefaultVersion of the current REST API.
DefaultVersion = "1.47"
// MinSupportedAPIVersion is the minimum API version that can be supported
// by the API server, specified as "major.minor". Note that the daemon
// may be configured with a different minimum API version, as returned
// in [github.com/docker/docker/api/types.Version.MinAPIVersion].
//
// API requests for API versions lower than the configured version produce
// an error.
MinSupportedAPIVersion = "1.24"
// DefaultVersion of Current REST API
DefaultVersion = "1.38"
// NoBaseImageSpecifier is the symbol used by the FROM
// command to specify that no base image is to be used.

6
api/common_unix.go Normal file
View File

@@ -0,0 +1,6 @@
// +build !windows
package api // import "github.com/docker/docker/api"
// MinVersion represents Minimum REST API version supported
const MinVersion = "1.12"

8
api/common_windows.go Normal file
View File

@@ -0,0 +1,8 @@
package api // import "github.com/docker/docker/api"
// MinVersion represents Minimum REST API version supported
// Technically the first daemon API version released on Windows is v1.25 in
// engine version 1.13. However, some clients are explicitly using downlevel
// APIs (e.g. docker-compose v2.1 file format) and that is just too restrictive.
// Hence also allowing 1.24 on Windows.
const MinVersion string = "1.24"

View File

@@ -3,25 +3,23 @@ package build // import "github.com/docker/docker/api/server/backend/build"
import (
"context"
"fmt"
"strconv"
"github.com/distribution/reference"
"github.com/docker/distribution/reference"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/backend"
"github.com/docker/docker/api/types/events"
"github.com/docker/docker/builder"
buildkit "github.com/docker/docker/builder/builder-next"
daemonevents "github.com/docker/docker/daemon/events"
"github.com/docker/docker/builder/fscache"
"github.com/docker/docker/image"
"github.com/docker/docker/pkg/stringid"
"github.com/pkg/errors"
"google.golang.org/grpc"
"golang.org/x/sync/errgroup"
)
// ImageComponent provides an interface for working with images
type ImageComponent interface {
SquashImage(from string, to string) (string, error)
TagImage(context.Context, image.ID, reference.Named) error
TagImageWithReference(image.ID, reference.Named) error
}
// Builder defines interface for running a build
@@ -32,21 +30,14 @@ type Builder interface {
// Backend provides build functionality to the API router
type Backend struct {
builder Builder
fsCache *fscache.FSCache
imageComponent ImageComponent
buildkit *buildkit.Builder
eventsService *daemonevents.Events
}
// NewBackend creates a new build backend from components
func NewBackend(components ImageComponent, builder Builder, buildkit *buildkit.Builder, es *daemonevents.Events) (*Backend, error) {
return &Backend{imageComponent: components, builder: builder, buildkit: buildkit, eventsService: es}, nil
}
// RegisterGRPC registers buildkit controller to the grpc server.
func (b *Backend) RegisterGRPC(s *grpc.Server) {
if b.buildkit != nil {
b.buildkit.RegisterGRPC(s)
}
func NewBackend(components ImageComponent, builder Builder, fsCache *fscache.FSCache, buildkit *buildkit.Builder) (*Backend, error) {
return &Backend{imageComponent: components, builder: builder, fsCache: fsCache, buildkit: buildkit}, nil
}
// Build builds an image from a Source
@@ -54,7 +45,7 @@ func (b *Backend) Build(ctx context.Context, config backend.BuildConfig) (string
options := config.Options
useBuildKit := options.Version == types.BuilderBuildKit
tags, err := sanitizeRepoAndTags(options.Tags)
tagger, err := NewTagger(b.imageComponent, config.ProgressWriter.StdoutFormatter, options.Tags)
if err != nil {
return "", err
}
@@ -76,7 +67,7 @@ func (b *Backend) Build(ctx context.Context, config backend.BuildConfig) (string
return "", nil
}
imageID := build.ImageID
var imageID = build.ImageID
if options.Squash {
if imageID, err = squashBuild(build, b.imageComponent); err != nil {
return "", err
@@ -88,26 +79,43 @@ func (b *Backend) Build(ctx context.Context, config backend.BuildConfig) (string
}
}
if imageID != "" && !useBuildKit {
if !useBuildKit {
stdout := config.ProgressWriter.StdoutFormatter
_, _ = fmt.Fprintf(stdout, "Successfully built %s\n", stringid.TruncateID(imageID))
err = tagImages(ctx, b.imageComponent, config.ProgressWriter.StdoutFormatter, image.ID(imageID), tags)
fmt.Fprintf(stdout, "Successfully built %s\n", stringid.TruncateID(imageID))
err = tagger.TagImages(image.ID(imageID))
}
return imageID, err
}
// PruneCache removes all cached build sources
func (b *Backend) PruneCache(ctx context.Context, opts types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error) {
buildCacheSize, cacheIDs, err := b.buildkit.Prune(ctx, opts)
if err != nil {
return nil, errors.Wrap(err, "failed to prune build cache")
}
b.eventsService.Log(events.ActionPrune, events.BuilderEventType, events.Actor{
Attributes: map[string]string{
"reclaimed": strconv.FormatInt(buildCacheSize, 10),
},
func (b *Backend) PruneCache(ctx context.Context) (*types.BuildCachePruneReport, error) {
eg, ctx := errgroup.WithContext(ctx)
var fsCacheSize uint64
eg.Go(func() error {
var err error
fsCacheSize, err = b.fsCache.Prune(ctx)
if err != nil {
return errors.Wrap(err, "failed to prune fscache")
}
return nil
})
return &types.BuildCachePruneReport{SpaceReclaimed: uint64(buildCacheSize), CachesDeleted: cacheIDs}, nil
var buildCacheSize int64
eg.Go(func() error {
var err error
buildCacheSize, err = b.buildkit.Prune(ctx)
if err != nil {
return errors.Wrap(err, "failed to prune build cache")
}
return nil
})
if err := eg.Wait(); err != nil {
return nil, err
}
return &types.BuildCachePruneReport{SpaceReclaimed: fsCacheSize + uint64(buildCacheSize)}, nil
}
// Cancel cancels the build by ID

View File

@@ -1,31 +1,55 @@
package build // import "github.com/docker/docker/api/server/backend/build"
import (
"context"
"fmt"
"io"
"github.com/distribution/reference"
"github.com/docker/distribution/reference"
"github.com/docker/docker/image"
"github.com/pkg/errors"
)
// tagImages creates image tags for the imageID.
func tagImages(ctx context.Context, ic ImageComponent, stdout io.Writer, imageID image.ID, repoAndTags []reference.Named) error {
for _, rt := range repoAndTags {
if err := ic.TagImage(ctx, imageID, rt); err != nil {
// Tagger is responsible for tagging an image created by a builder
type Tagger struct {
imageComponent ImageComponent
stdout io.Writer
repoAndTags []reference.Named
}
// NewTagger returns a new Tagger for tagging the images of a build.
// If any of the names are invalid tags an error is returned.
func NewTagger(backend ImageComponent, stdout io.Writer, names []string) (*Tagger, error) {
reposAndTags, err := sanitizeRepoAndTags(names)
if err != nil {
return nil, err
}
return &Tagger{
imageComponent: backend,
stdout: stdout,
repoAndTags: reposAndTags,
}, nil
}
// TagImages creates image tags for the imageID
func (bt *Tagger) TagImages(imageID image.ID) error {
for _, rt := range bt.repoAndTags {
if err := bt.imageComponent.TagImageWithReference(imageID, rt); err != nil {
return err
}
_, _ = fmt.Fprintln(stdout, "Successfully tagged", reference.FamiliarString(rt))
fmt.Fprintf(bt.stdout, "Successfully tagged %s\n", reference.FamiliarString(rt))
}
return nil
}
// sanitizeRepoAndTags parses the raw "t" parameter received from the client
// to a slice of repoAndTag. It removes duplicates, and validates each name
// to not contain a digest.
func sanitizeRepoAndTags(names []string) (repoAndTags []reference.Named, err error) {
uniqNames := map[string]struct{}{}
// to a slice of repoAndTag.
// It also validates each repoName and tag.
func sanitizeRepoAndTags(names []string) ([]reference.Named, error) {
var (
repoAndTags []reference.Named
// This map is used for deduplicating the "-t" parameter.
uniqNames = make(map[string]struct{})
)
for _, repo := range names {
if repo == "" {
continue
@@ -36,12 +60,14 @@ func sanitizeRepoAndTags(names []string) (repoAndTags []reference.Named, err err
return nil, err
}
if _, ok := ref.(reference.Digested); ok {
if _, isCanonical := ref.(reference.Canonical); isCanonical {
return nil, errors.New("build tag cannot contain a digest")
}
ref = reference.TagNameOnly(ref)
nameWithTag := ref.String()
if _, exists := uniqNames[nameWithTag]; !exists {
uniqNames[nameWithTag] = struct{}{}
repoAndTags = append(repoAndTags, ref)

View File

@@ -1,143 +0,0 @@
package httpstatus // import "github.com/docker/docker/api/server/httpstatus"
import (
"context"
"fmt"
"net/http"
cerrdefs "github.com/containerd/errdefs"
"github.com/containerd/log"
"github.com/docker/distribution/registry/api/errcode"
"github.com/docker/docker/errdefs"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
type causer interface {
Cause() error
}
// FromError retrieves status code from error message.
func FromError(err error) int {
if err == nil {
log.G(context.TODO()).WithError(err).Error("unexpected HTTP error handling")
return http.StatusInternalServerError
}
// Stop right there
// Are you sure you should be adding a new error class here? Do one of the existing ones work?
// Note that the below functions are already checking the error causal chain for matches.
switch {
case errdefs.IsNotFound(err):
return http.StatusNotFound
case errdefs.IsInvalidParameter(err):
return http.StatusBadRequest
case errdefs.IsConflict(err):
return http.StatusConflict
case errdefs.IsUnauthorized(err):
return http.StatusUnauthorized
case errdefs.IsUnavailable(err):
return http.StatusServiceUnavailable
case errdefs.IsForbidden(err):
return http.StatusForbidden
case errdefs.IsNotModified(err):
return http.StatusNotModified
case errdefs.IsNotImplemented(err):
return http.StatusNotImplemented
case errdefs.IsSystem(err) || errdefs.IsUnknown(err) || errdefs.IsDataLoss(err) || errdefs.IsDeadline(err) || errdefs.IsCancelled(err):
return http.StatusInternalServerError
default:
if statusCode := statusCodeFromGRPCError(err); statusCode != http.StatusInternalServerError {
return statusCode
}
if statusCode := statusCodeFromContainerdError(err); statusCode != http.StatusInternalServerError {
return statusCode
}
if statusCode := statusCodeFromDistributionError(err); statusCode != http.StatusInternalServerError {
return statusCode
}
if e, ok := err.(causer); ok {
return FromError(e.Cause())
}
log.G(context.TODO()).WithFields(log.Fields{
"module": "api",
"error": err,
"error_type": fmt.Sprintf("%T", err),
}).Debug("FIXME: Got an API for which error does not match any expected type!!!")
return http.StatusInternalServerError
}
}
// statusCodeFromGRPCError returns status code according to gRPC error
func statusCodeFromGRPCError(err error) int {
switch status.Code(err) {
case codes.InvalidArgument: // code 3
return http.StatusBadRequest
case codes.NotFound: // code 5
return http.StatusNotFound
case codes.AlreadyExists: // code 6
return http.StatusConflict
case codes.PermissionDenied: // code 7
return http.StatusForbidden
case codes.FailedPrecondition: // code 9
return http.StatusBadRequest
case codes.Unauthenticated: // code 16
return http.StatusUnauthorized
case codes.OutOfRange: // code 11
return http.StatusBadRequest
case codes.Unimplemented: // code 12
return http.StatusNotImplemented
case codes.Unavailable: // code 14
return http.StatusServiceUnavailable
default:
// codes.Canceled(1)
// codes.Unknown(2)
// codes.DeadlineExceeded(4)
// codes.ResourceExhausted(8)
// codes.Aborted(10)
// codes.Internal(13)
// codes.DataLoss(15)
return http.StatusInternalServerError
}
}
// statusCodeFromDistributionError returns status code according to registry errcode
// code is loosely based on errcode.ServeJSON() in docker/distribution
func statusCodeFromDistributionError(err error) int {
switch errs := err.(type) {
case errcode.Errors:
if len(errs) < 1 {
return http.StatusInternalServerError
}
if _, ok := errs[0].(errcode.ErrorCoder); ok {
return statusCodeFromDistributionError(errs[0])
}
case errcode.ErrorCoder:
return errs.ErrorCode().Descriptor().HTTPStatusCode
}
return http.StatusInternalServerError
}
// statusCodeFromContainerdError returns status code for containerd errors when
// consumed directly (not through gRPC)
func statusCodeFromContainerdError(err error) int {
switch {
case cerrdefs.IsInvalidArgument(err):
return http.StatusBadRequest
case cerrdefs.IsNotFound(err):
return http.StatusNotFound
case cerrdefs.IsAlreadyExists(err):
return http.StatusConflict
case cerrdefs.IsFailedPrecondition(err):
return http.StatusPreconditionFailed
case cerrdefs.IsUnavailable(err):
return http.StatusServiceUnavailable
case cerrdefs.IsNotImplemented(err):
return http.StatusNotImplemented
default:
return http.StatusInternalServerError
}
}

View File

@@ -12,4 +12,5 @@ import (
// container configuration.
type ContainerDecoder interface {
DecodeConfig(src io.Reader) (*container.Config, *container.HostConfig, *network.NetworkingConfig, error)
DecodeHostConfig(src io.Reader) (*container.HostConfig, error)
}

View File

@@ -0,0 +1,131 @@
package httputils // import "github.com/docker/docker/api/server/httputils"
import (
"fmt"
"net/http"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/versions"
"github.com/docker/docker/errdefs"
"github.com/gorilla/mux"
"github.com/sirupsen/logrus"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
)
type causer interface {
Cause() error
}
// GetHTTPErrorStatusCode retrieves status code from error message.
func GetHTTPErrorStatusCode(err error) int {
if err == nil {
logrus.WithFields(logrus.Fields{"error": err}).Error("unexpected HTTP error handling")
return http.StatusInternalServerError
}
var statusCode int
// Stop right there
// Are you sure you should be adding a new error class here? Do one of the existing ones work?
// Note that the below functions are already checking the error causal chain for matches.
switch {
case errdefs.IsNotFound(err):
statusCode = http.StatusNotFound
case errdefs.IsInvalidParameter(err):
statusCode = http.StatusBadRequest
case errdefs.IsConflict(err) || errdefs.IsAlreadyExists(err):
statusCode = http.StatusConflict
case errdefs.IsUnauthorized(err):
statusCode = http.StatusUnauthorized
case errdefs.IsUnavailable(err):
statusCode = http.StatusServiceUnavailable
case errdefs.IsForbidden(err):
statusCode = http.StatusForbidden
case errdefs.IsNotModified(err):
statusCode = http.StatusNotModified
case errdefs.IsNotImplemented(err):
statusCode = http.StatusNotImplemented
case errdefs.IsSystem(err) || errdefs.IsUnknown(err) || errdefs.IsDataLoss(err) || errdefs.IsDeadline(err) || errdefs.IsCancelled(err):
statusCode = http.StatusInternalServerError
default:
statusCode = statusCodeFromGRPCError(err)
if statusCode != http.StatusInternalServerError {
return statusCode
}
if e, ok := err.(causer); ok {
return GetHTTPErrorStatusCode(e.Cause())
}
logrus.WithFields(logrus.Fields{
"module": "api",
"error_type": fmt.Sprintf("%T", err),
}).Debugf("FIXME: Got an API for which error does not match any expected type!!!: %+v", err)
}
if statusCode == 0 {
statusCode = http.StatusInternalServerError
}
return statusCode
}
func apiVersionSupportsJSONErrors(version string) bool {
const firstAPIVersionWithJSONErrors = "1.23"
return version == "" || versions.GreaterThan(version, firstAPIVersionWithJSONErrors)
}
// MakeErrorHandler makes an HTTP handler that decodes a Docker error and
// returns it in the response.
func MakeErrorHandler(err error) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
statusCode := GetHTTPErrorStatusCode(err)
vars := mux.Vars(r)
if apiVersionSupportsJSONErrors(vars["version"]) {
response := &types.ErrorResponse{
Message: err.Error(),
}
WriteJSON(w, statusCode, response)
} else {
http.Error(w, grpc.ErrorDesc(err), statusCode)
}
}
}
// statusCodeFromGRPCError returns status code according to gRPC error
func statusCodeFromGRPCError(err error) int {
switch grpc.Code(err) {
case codes.InvalidArgument: // code 3
return http.StatusBadRequest
case codes.NotFound: // code 5
return http.StatusNotFound
case codes.AlreadyExists: // code 6
return http.StatusConflict
case codes.PermissionDenied: // code 7
return http.StatusForbidden
case codes.FailedPrecondition: // code 9
return http.StatusBadRequest
case codes.Unauthenticated: // code 16
return http.StatusUnauthorized
case codes.OutOfRange: // code 11
return http.StatusBadRequest
case codes.Unimplemented: // code 12
return http.StatusNotImplemented
case codes.Unavailable: // code 14
return http.StatusServiceUnavailable
default:
if e, ok := err.(causer); ok {
return statusCodeFromGRPCError(e.Cause())
}
// codes.Canceled(1)
// codes.Unknown(2)
// codes.DeadlineExceeded(4)
// codes.ResourceExhausted(8)
// codes.Aborted(10)
// codes.Internal(13)
// codes.DataLoss(15)
return http.StatusInternalServerError
}
}

View File

@@ -1,17 +1,9 @@
package httputils // import "github.com/docker/docker/api/server/httputils"
import (
"encoding/json"
"fmt"
"net/http"
"strconv"
"strings"
"github.com/distribution/reference"
"github.com/docker/docker/errdefs"
"github.com/pkg/errors"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
)
// BoolValue transforms a form value in different formats into a boolean type.
@@ -49,38 +41,6 @@ func Int64ValueOrDefault(r *http.Request, field string, def int64) (int64, error
return def, nil
}
// RepoTagReference parses form values "repo" and "tag" and returns a valid
// reference with repository and tag.
// If repo is empty, then a nil reference is returned.
// If no tag is given, then the default "latest" tag is set.
func RepoTagReference(repo, tag string) (reference.NamedTagged, error) {
if repo == "" {
return nil, nil
}
ref, err := reference.ParseNormalizedNamed(repo)
if err != nil {
return nil, err
}
if _, isDigested := ref.(reference.Digested); isDigested {
return nil, fmt.Errorf("cannot import digest reference")
}
if tag != "" {
return reference.WithTag(ref, tag)
}
withDefaultTag := reference.TagNameOnly(ref)
namedTagged, ok := withDefaultTag.(reference.NamedTagged)
if !ok {
return nil, fmt.Errorf("unexpected reference: %q", ref.String())
}
return namedTagged, nil
}
// ArchiveOptions stores archive information for different operations.
type ArchiveOptions struct {
Name string
@@ -114,24 +74,3 @@ func ArchiveFormValues(r *http.Request, vars map[string]string) (ArchiveOptions,
}
return ArchiveOptions{name, path}, nil
}
// DecodePlatform decodes the OCI platform JSON string into a Platform struct.
func DecodePlatform(platformJSON string) (*ocispec.Platform, error) {
var p ocispec.Platform
if err := json.Unmarshal([]byte(platformJSON), &p); err != nil {
return nil, errdefs.InvalidParameter(errors.Wrap(err, "failed to parse platform"))
}
hasAnyOptional := (p.Variant != "" || p.OSVersion != "" || len(p.OSFeatures) > 0)
if p.OS == "" && p.Architecture == "" && hasAnyOptional {
return nil, errdefs.InvalidParameter(errors.New("optional platform fields provided, but OS and Architecture are missing"))
}
if p.OS == "" || p.Architecture == "" {
return nil, errdefs.InvalidParameter(errors.New("both OS and Architecture must be provided"))
}
return &p, nil
}

View File

@@ -1,16 +1,9 @@
package httputils // import "github.com/docker/docker/api/server/httputils"
import (
"encoding/json"
"net/http"
"net/url"
"testing"
"github.com/containerd/platforms"
"github.com/docker/docker/errdefs"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"gotest.tools/v3/assert"
)
func TestBoolValue(t *testing.T) {
@@ -30,7 +23,7 @@ func TestBoolValue(t *testing.T) {
for c, e := range cases {
v := url.Values{}
v.Set("test", c)
r, _ := http.NewRequest(http.MethodPost, "", nil)
r, _ := http.NewRequest("POST", "", nil)
r.Form = v
a := BoolValue(r, "test")
@@ -41,14 +34,14 @@ func TestBoolValue(t *testing.T) {
}
func TestBoolValueOrDefault(t *testing.T) {
r, _ := http.NewRequest(http.MethodGet, "", nil)
r, _ := http.NewRequest("GET", "", nil)
if !BoolValueOrDefault(r, "queryparam", true) {
t.Fatal("Expected to get true default value, got false")
}
v := url.Values{}
v.Set("param", "")
r, _ = http.NewRequest(http.MethodGet, "", nil)
r, _ = http.NewRequest("GET", "", nil)
r.Form = v
if BoolValueOrDefault(r, "param", true) {
t.Fatal("Expected not to get true")
@@ -66,7 +59,7 @@ func TestInt64ValueOrZero(t *testing.T) {
for c, e := range cases {
v := url.Values{}
v.Set("test", c)
r, _ := http.NewRequest(http.MethodPost, "", nil)
r, _ := http.NewRequest("POST", "", nil)
r.Form = v
a := Int64ValueOrZero(r, "test")
@@ -86,7 +79,7 @@ func TestInt64ValueOrDefault(t *testing.T) {
for c, e := range cases {
v := url.Values{}
v.Set("test", c)
r, _ := http.NewRequest(http.MethodPost, "", nil)
r, _ := http.NewRequest("POST", "", nil)
r.Form = v
a, err := Int64ValueOrDefault(r, "test", -1)
@@ -102,7 +95,7 @@ func TestInt64ValueOrDefault(t *testing.T) {
func TestInt64ValueOrDefaultWithError(t *testing.T) {
v := url.Values{}
v.Set("test", "invalid")
r, _ := http.NewRequest(http.MethodPost, "", nil)
r, _ := http.NewRequest("POST", "", nil)
r.Form = v
_, err := Int64ValueOrDefault(r, "test", -1)
@@ -110,23 +103,3 @@ func TestInt64ValueOrDefaultWithError(t *testing.T) {
t.Fatal("Expected an error.")
}
}
func TestParsePlatformInvalid(t *testing.T) {
for _, tc := range []ocispec.Platform{
{
OSVersion: "1.2.3",
OSFeatures: []string{"a", "b"},
},
{OSVersion: "12.0"},
{OS: "linux"},
{Architecture: "amd64"},
} {
t.Run(platforms.Format(tc), func(t *testing.T) {
js, err := json.Marshal(tc)
assert.NilError(t, err)
_, err = DecodePlatform(string(js))
assert.Check(t, errdefs.IsInvalidParameter(err))
})
}
}

View File

@@ -2,7 +2,6 @@ package httputils // import "github.com/docker/docker/api/server/httputils"
import (
"context"
"encoding/json"
"io"
"mime"
"net/http"
@@ -10,10 +9,13 @@ import (
"github.com/docker/docker/errdefs"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
type contextKey string
// APIVersionKey is the client's requested API version.
type APIVersionKey struct{}
const APIVersionKey contextKey = "api-version"
// APIFunc is an adapter to allow the use of ordinary functions as Docker API endpoints.
// Any function that has the appropriate signature can be registered as an API endpoint (e.g. getVersion).
@@ -27,7 +29,7 @@ func HijackConnection(w http.ResponseWriter) (io.ReadCloser, io.Writer, error) {
return nil, nil, err
}
// Flush the options to make sure the client sets the raw mode
_, _ = conn.Write([]byte{})
conn.Write([]byte{})
return conn, conn, nil
}
@@ -37,9 +39,9 @@ func CloseStreams(streams ...interface{}) {
if tcpc, ok := stream.(interface {
CloseWrite() error
}); ok {
_ = tcpc.CloseWrite()
tcpc.CloseWrite()
} else if closer, ok := stream.(io.Closer); ok {
_ = closer.Close()
closer.Close()
}
}
}
@@ -49,50 +51,17 @@ func CheckForJSON(r *http.Request) error {
ct := r.Header.Get("Content-Type")
// No Content-Type header is ok as long as there's no Body
if ct == "" && (r.Body == nil || r.ContentLength == 0) {
return nil
if ct == "" {
if r.Body == nil || r.ContentLength == 0 {
return nil
}
}
// Otherwise it better be json
return matchesContentType(ct, "application/json")
}
// ReadJSON validates the request to have the correct content-type, and decodes
// the request's Body into out.
func ReadJSON(r *http.Request, out interface{}) error {
err := CheckForJSON(r)
if err != nil {
return err
}
if r.Body == nil || r.ContentLength == 0 {
// an empty body is not invalid, so don't return an error; see
// https://lists.w3.org/Archives/Public/ietf-http-wg/2010JulSep/0272.html
if matchesContentType(ct, "application/json") {
return nil
}
dec := json.NewDecoder(r.Body)
err = dec.Decode(out)
defer r.Body.Close()
if err != nil {
if err == io.EOF {
return errdefs.InvalidParameter(errors.New("invalid JSON: got EOF while reading request body"))
}
return errdefs.InvalidParameter(errors.Wrap(err, "invalid JSON"))
}
if dec.More() {
return errdefs.InvalidParameter(errors.New("unexpected content after JSON"))
}
return nil
}
// WriteJSON writes the value v to the http response stream as json with standard json encoding.
func WriteJSON(w http.ResponseWriter, code int, v interface{}) error {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(code)
enc := json.NewEncoder(w)
enc.SetEscapeHTML(false)
return enc.Encode(v)
return errdefs.InvalidParameter(errors.Errorf("Content-Type specified (%s) must be 'application/json'", ct))
}
// ParseForm ensures the request form is parsed even with invalid content types.
@@ -114,7 +83,7 @@ func VersionFromContext(ctx context.Context) string {
return ""
}
if val := ctx.Value(APIVersionKey{}); val != nil {
if val := ctx.Value(APIVersionKey); val != nil {
return val.(string)
}
@@ -122,13 +91,10 @@ func VersionFromContext(ctx context.Context) string {
}
// matchesContentType validates the content type against the expected one
func matchesContentType(contentType, expectedType string) error {
func matchesContentType(contentType, expectedType string) bool {
mimetype, _, err := mime.ParseMediaType(contentType)
if err != nil {
return errdefs.InvalidParameter(errors.Wrapf(err, "malformed Content-Type header (%s)", contentType))
logrus.Errorf("Error parsing media type: %s error: %v", contentType, err)
}
if mimetype != expectedType {
return errdefs.InvalidParameter(errors.Errorf("unsupported Content-Type header (%s): must be '%s'", contentType, expectedType))
}
return nil
return err == nil && mimetype == expectedType
}

View File

@@ -1,130 +1,18 @@
package httputils // import "github.com/docker/docker/api/server/httputils"
import (
"net/http"
"strings"
"testing"
)
import "testing"
// matchesContentType
func TestJsonContentType(t *testing.T) {
err := matchesContentType("application/json", "application/json")
if err != nil {
t.Error(err)
if !matchesContentType("application/json", "application/json") {
t.Fail()
}
err = matchesContentType("application/json; charset=utf-8", "application/json")
if err != nil {
t.Error(err)
if !matchesContentType("application/json; charset=utf-8", "application/json") {
t.Fail()
}
expected := "unsupported Content-Type header (dockerapplication/json): must be 'application/json'"
err = matchesContentType("dockerapplication/json", "application/json")
if err == nil || err.Error() != expected {
t.Errorf(`expected "%s", got "%v"`, expected, err)
}
expected = "malformed Content-Type header (foo;;;bar): mime: invalid media parameter"
err = matchesContentType("foo;;;bar", "application/json")
if err == nil || err.Error() != expected {
t.Errorf(`expected "%s", got "%v"`, expected, err)
if matchesContentType("dockerapplication/json", "application/json") {
t.Fail()
}
}
func TestReadJSON(t *testing.T) {
t.Run("nil body", func(t *testing.T) {
req, err := http.NewRequest(http.MethodPost, "https://example.com/some/path", nil)
if err != nil {
t.Error(err)
}
foo := struct{}{}
err = ReadJSON(req, &foo)
if err != nil {
t.Error(err)
}
})
t.Run("empty body", func(t *testing.T) {
req, err := http.NewRequest(http.MethodPost, "https://example.com/some/path", strings.NewReader(""))
if err != nil {
t.Error(err)
}
foo := struct{ SomeField string }{}
err = ReadJSON(req, &foo)
if err != nil {
t.Error(err)
}
if foo.SomeField != "" {
t.Errorf("expected: '', got: %s", foo.SomeField)
}
})
t.Run("with valid request", func(t *testing.T) {
req, err := http.NewRequest(http.MethodPost, "https://example.com/some/path", strings.NewReader(`{"SomeField":"some value"}`))
if err != nil {
t.Error(err)
}
req.Header.Set("Content-Type", "application/json")
foo := struct{ SomeField string }{}
err = ReadJSON(req, &foo)
if err != nil {
t.Error(err)
}
if foo.SomeField != "some value" {
t.Errorf("expected: 'some value', got: %s", foo.SomeField)
}
})
t.Run("with whitespace", func(t *testing.T) {
req, err := http.NewRequest(http.MethodPost, "https://example.com/some/path", strings.NewReader(`
{"SomeField":"some value"}
`))
if err != nil {
t.Error(err)
}
req.Header.Set("Content-Type", "application/json")
foo := struct{ SomeField string }{}
err = ReadJSON(req, &foo)
if err != nil {
t.Error(err)
}
if foo.SomeField != "some value" {
t.Errorf("expected: 'some value', got: %s", foo.SomeField)
}
})
t.Run("with extra content", func(t *testing.T) {
req, err := http.NewRequest(http.MethodPost, "https://example.com/some/path", strings.NewReader(`{"SomeField":"some value"} and more content`))
if err != nil {
t.Error(err)
}
req.Header.Set("Content-Type", "application/json")
foo := struct{ SomeField string }{}
err = ReadJSON(req, &foo)
if err == nil {
t.Error("expected an error, got none")
}
expected := "unexpected content after JSON"
if err.Error() != expected {
t.Errorf("expected: '%s', got: %s", expected, err.Error())
}
})
t.Run("invalid JSON", func(t *testing.T) {
req, err := http.NewRequest(http.MethodPost, "https://example.com/some/path", strings.NewReader(`{invalid json`))
if err != nil {
t.Error(err)
}
req.Header.Set("Content-Type", "application/json")
foo := struct{ SomeField string }{}
err = ReadJSON(req, &foo)
if err == nil {
t.Error("expected an error, got none")
}
expected := "invalid JSON: invalid character 'i' looking for beginning of object key string"
if err.Error() != expected {
t.Errorf("expected: '%s', got: %s", expected, err.Error())
}
})
}

View File

@@ -0,0 +1,15 @@
package httputils // import "github.com/docker/docker/api/server/httputils"
import (
"encoding/json"
"net/http"
)
// WriteJSON writes the value v to the http response stream as json with standard json encoding.
func WriteJSON(w http.ResponseWriter, code int, v interface{}) error {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(code)
enc := json.NewEncoder(w)
enc.SetEscapeHTML(false)
return enc.Encode(v)
}

View File

@@ -4,12 +4,11 @@ import (
"context"
"fmt"
"io"
"net/http"
"net/url"
"sort"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/backend"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/jsonmessage"
"github.com/docker/docker/pkg/stdcopy"
@@ -17,11 +16,7 @@ import (
// WriteLogStream writes an encoded byte stream of log messages from the
// messages channel, multiplexing them with a stdcopy.Writer if mux is true
func WriteLogStream(_ context.Context, w http.ResponseWriter, msgs <-chan *backend.LogMessage, config *container.LogsOptions, mux bool) {
// See https://github.com/moby/moby/issues/47448
// Trigger headers to be written immediately.
w.WriteHeader(http.StatusOK)
func WriteLogStream(_ context.Context, w io.Writer, msgs <-chan *backend.LogMessage, config *types.ContainerLogsOptions, mux bool) {
wf := ioutils.NewWriteFlusher(w)
defer wf.Close()
@@ -56,10 +51,10 @@ func WriteLogStream(_ context.Context, w http.ResponseWriter, msgs <-chan *backe
logLine = append([]byte(msg.Timestamp.Format(jsonmessage.RFC3339NanoFixed)+" "), logLine...)
}
if msg.Source == "stdout" && config.ShowStdout {
_, _ = outStream.Write(logLine)
outStream.Write(logLine)
}
if msg.Source == "stderr" && config.ShowStderr {
_, _ = errStream.Write(logLine)
errStream.Write(logLine)
}
}
}

View File

@@ -1,9 +1,9 @@
package server // import "github.com/docker/docker/api/server"
import (
"github.com/containerd/log"
"github.com/docker/docker/api/server/httputils"
"github.com/docker/docker/api/server/middleware"
"github.com/sirupsen/logrus"
)
// handlerWithGlobalMiddlewares wraps the handler function for a request with
@@ -16,7 +16,7 @@ func (s *Server) handlerWithGlobalMiddlewares(handler httputils.APIFunc) httputi
next = m.WrapHandler(next)
}
if log.GetLevel() == log.DebugLevel {
if s.cfg.Logging && logrus.GetLevel() == logrus.DebugLevel {
next = middleware.DebugRequestMiddleware(next)
}

View File

@@ -4,21 +4,16 @@ import (
"context"
"net/http"
"github.com/containerd/log"
"github.com/docker/docker/api/types/registry"
"github.com/sirupsen/logrus"
)
// CORSMiddleware injects CORS headers to each request
// when it's configured.
//
// Deprecated: CORS headers should not be set on the API. This feature will be removed in the next release.
type CORSMiddleware struct {
defaultHeaders string
}
// NewCORSMiddleware creates a new CORSMiddleware with default headers.
//
// Deprecated: CORS headers should not be set on the API. This feature will be removed in the next release.
func NewCORSMiddleware(d string) CORSMiddleware {
return CORSMiddleware{defaultHeaders: d}
}
@@ -33,9 +28,9 @@ func (c CORSMiddleware) WrapHandler(handler func(ctx context.Context, w http.Res
corsHeaders = "*"
}
log.G(ctx).Debugf("CORS header is enabled and set to: %s", corsHeaders)
logrus.Debugf("CORS header is enabled and set to: %s", corsHeaders)
w.Header().Add("Access-Control-Allow-Origin", corsHeaders)
w.Header().Add("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept, "+registry.AuthHeader)
w.Header().Add("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept, X-Registry-Auth")
w.Header().Add("Access-Control-Allow-Methods", "HEAD, GET, POST, DELETE, PUT, OPTIONS")
return handler(ctx, w, r, vars)
}

View File

@@ -8,17 +8,17 @@ import (
"net/http"
"strings"
"github.com/containerd/log"
"github.com/docker/docker/api/server/httputils"
"github.com/docker/docker/pkg/ioutils"
"github.com/sirupsen/logrus"
)
// DebugRequestMiddleware dumps the request to logger
func DebugRequestMiddleware(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
log.G(ctx).Debugf("Calling %s %s", r.Method, r.RequestURI)
logrus.Debugf("Calling %s %s", r.Method, r.RequestURI)
if r.Method != http.MethodPost {
if r.Method != "POST" {
return handler(ctx, w, r, vars)
}
if err := httputils.CheckForJSON(r); err != nil {
@@ -41,12 +41,12 @@ func DebugRequestMiddleware(handler func(ctx context.Context, w http.ResponseWri
var postForm map[string]interface{}
if err := json.Unmarshal(b, &postForm); err == nil {
maskSecretKeys(postForm)
maskSecretKeys(postForm, r.RequestURI)
formStr, errMarshal := json.Marshal(postForm)
if errMarshal == nil {
log.G(ctx).Debugf("form data: %s", string(formStr))
logrus.Debugf("form data: %s", string(formStr))
} else {
log.G(ctx).Debugf("form data: %q", postForm)
logrus.Debugf("form data: %q", postForm)
}
}
@@ -54,37 +54,41 @@ func DebugRequestMiddleware(handler func(ctx context.Context, w http.ResponseWri
}
}
func maskSecretKeys(inp interface{}) {
func maskSecretKeys(inp interface{}, path string) {
// Remove any query string from the path
idx := strings.Index(path, "?")
if idx != -1 {
path = path[:idx]
}
// Remove trailing / characters
path = strings.TrimRight(path, "/")
if arr, ok := inp.([]interface{}); ok {
for _, f := range arr {
maskSecretKeys(f)
maskSecretKeys(f, path)
}
return
}
if form, ok := inp.(map[string]interface{}); ok {
scrub := []string{
// Note: The Data field contains the base64-encoded secret in 'secret'
// and 'config' create and update requests. Currently, no other POST
// API endpoints use a data field, so we scrub this field unconditionally.
// Change this handling to be conditional if a new endpoint is added
// in future where this field should not be scrubbed.
"data",
"jointoken",
"password",
"secret",
"signingcakey",
"unlockkey",
}
loop0:
for k, v := range form {
for _, m := range scrub {
for _, m := range []string{"password", "secret", "jointoken", "unlockkey", "signingcakey"} {
if strings.EqualFold(m, k) {
form[k] = "*****"
continue loop0
}
}
maskSecretKeys(v)
maskSecretKeys(v, path)
}
// Route-specific redactions
if strings.HasSuffix(path, "/secrets/create") {
for k := range form {
if k == "Data" {
form[k] = "*****"
}
}
}
}
}

View File

@@ -3,31 +3,37 @@ package middleware // import "github.com/docker/docker/api/server/middleware"
import (
"testing"
"gotest.tools/v3/assert"
is "gotest.tools/v3/assert/cmp"
"gotest.tools/assert"
is "gotest.tools/assert/cmp"
)
func TestMaskSecretKeys(t *testing.T) {
tests := []struct {
doc string
path string
input map[string]interface{}
expected map[string]interface{}
}{
{
doc: "secret/config create and update requests",
path: "/v1.30/secrets/create",
input: map[string]interface{}{"Data": "foo", "Name": "name", "Labels": map[string]interface{}{}},
expected: map[string]interface{}{"Data": "*****", "Name": "name", "Labels": map[string]interface{}{}},
},
{
doc: "masking other fields (recursively)",
path: "/v1.30/secrets/create//",
input: map[string]interface{}{"Data": "foo", "Name": "name", "Labels": map[string]interface{}{}},
expected: map[string]interface{}{"Data": "*****", "Name": "name", "Labels": map[string]interface{}{}},
},
{
path: "/secrets/create?key=val",
input: map[string]interface{}{"Data": "foo", "Name": "name", "Labels": map[string]interface{}{}},
expected: map[string]interface{}{"Data": "*****", "Name": "name", "Labels": map[string]interface{}{}},
},
{
path: "/v1.30/some/other/path",
input: map[string]interface{}{
"password": "pass",
"secret": "secret",
"jointoken": "jointoken",
"unlockkey": "unlockkey",
"signingcakey": "signingcakey",
"password": "pass",
"other": map[string]interface{}{
"password": "pass",
"secret": "secret",
"jointoken": "jointoken",
"unlockkey": "unlockkey",
@@ -35,13 +41,8 @@ func TestMaskSecretKeys(t *testing.T) {
},
},
expected: map[string]interface{}{
"password": "*****",
"secret": "*****",
"jointoken": "*****",
"unlockkey": "*****",
"signingcakey": "*****",
"password": "*****",
"other": map[string]interface{}{
"password": "*****",
"secret": "*****",
"jointoken": "*****",
"unlockkey": "*****",
@@ -49,27 +50,10 @@ func TestMaskSecretKeys(t *testing.T) {
},
},
},
{
doc: "case insensitive field matching",
input: map[string]interface{}{
"PASSWORD": "pass",
"other": map[string]interface{}{
"PASSWORD": "pass",
},
},
expected: map[string]interface{}{
"PASSWORD": "*****",
"other": map[string]interface{}{
"PASSWORD": "*****",
},
},
},
}
for _, testcase := range tests {
t.Run(testcase.doc, func(t *testing.T) {
maskSecretKeys(testcase.input)
assert.Check(t, is.DeepEqual(testcase.expected, testcase.input))
})
maskSecretKeys(testcase.input, testcase.path)
assert.Check(t, is.DeepEqual(testcase.expected, testcase.input))
}
}

View File

@@ -6,7 +6,6 @@ import (
"net/http"
"runtime"
"github.com/docker/docker/api"
"github.com/docker/docker/api/server/httputils"
"github.com/docker/docker/api/types/versions"
)
@@ -14,40 +13,19 @@ import (
// VersionMiddleware is a middleware that
// validates the client and server versions.
type VersionMiddleware struct {
serverVersion string
// defaultAPIVersion is the default API version provided by the API server,
// specified as "major.minor". It is usually configured to the latest API
// version [github.com/docker/docker/api.DefaultVersion].
//
// API requests for API versions greater than this version are rejected by
// the server and produce a [versionUnsupportedError].
defaultAPIVersion string
// minAPIVersion is the minimum API version provided by the API server,
// specified as "major.minor".
//
// API requests for API versions lower than this version are rejected by
// the server and produce a [versionUnsupportedError].
minAPIVersion string
serverVersion string
defaultVersion string
minVersion string
}
// NewVersionMiddleware creates a VersionMiddleware with the given versions.
func NewVersionMiddleware(serverVersion, defaultAPIVersion, minAPIVersion string) (*VersionMiddleware, error) {
if versions.LessThan(defaultAPIVersion, api.MinSupportedAPIVersion) || versions.GreaterThan(defaultAPIVersion, api.DefaultVersion) {
return nil, fmt.Errorf("invalid default API version (%s): must be between %s and %s", defaultAPIVersion, api.MinSupportedAPIVersion, api.DefaultVersion)
// NewVersionMiddleware creates a new VersionMiddleware
// with the default versions.
func NewVersionMiddleware(s, d, m string) VersionMiddleware {
return VersionMiddleware{
serverVersion: s,
defaultVersion: d,
minVersion: m,
}
if versions.LessThan(minAPIVersion, api.MinSupportedAPIVersion) || versions.GreaterThan(minAPIVersion, api.DefaultVersion) {
return nil, fmt.Errorf("invalid minimum API version (%s): must be between %s and %s", minAPIVersion, api.MinSupportedAPIVersion, api.DefaultVersion)
}
if versions.GreaterThan(minAPIVersion, defaultAPIVersion) {
return nil, fmt.Errorf("invalid API version: the minimum API version (%s) is higher than the default version (%s)", minAPIVersion, defaultAPIVersion)
}
return &VersionMiddleware{
serverVersion: serverVersion,
defaultAPIVersion: defaultAPIVersion,
minAPIVersion: minAPIVersion,
}, nil
}
type versionUnsupportedError struct {
@@ -67,20 +45,21 @@ func (e versionUnsupportedError) InvalidParameter() {}
func (v VersionMiddleware) WrapHandler(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
w.Header().Set("Server", fmt.Sprintf("Docker/%s (%s)", v.serverVersion, runtime.GOOS))
w.Header().Set("Api-Version", v.defaultAPIVersion)
w.Header().Set("Ostype", runtime.GOOS)
w.Header().Set("API-Version", v.defaultVersion)
w.Header().Set("OSType", runtime.GOOS)
apiVersion := vars["version"]
if apiVersion == "" {
apiVersion = v.defaultAPIVersion
apiVersion = v.defaultVersion
}
if versions.LessThan(apiVersion, v.minAPIVersion) {
return versionUnsupportedError{version: apiVersion, minVersion: v.minAPIVersion}
if versions.LessThan(apiVersion, v.minVersion) {
return versionUnsupportedError{version: apiVersion, minVersion: v.minVersion}
}
if versions.GreaterThan(apiVersion, v.defaultAPIVersion) {
return versionUnsupportedError{version: apiVersion, maxVersion: v.defaultAPIVersion}
if versions.GreaterThan(apiVersion, v.defaultVersion) {
return versionUnsupportedError{version: apiVersion, maxVersion: v.defaultVersion}
}
ctx = context.WithValue(ctx, httputils.APIVersionKey{}, apiVersion)
ctx = context.WithValue(ctx, httputils.APIVersionKey, apiVersion)
return handler(ctx, w, r, vars)
}
}

View File

@@ -2,85 +2,30 @@ package middleware // import "github.com/docker/docker/api/server/middleware"
import (
"context"
"fmt"
"net/http"
"net/http/httptest"
"runtime"
"testing"
"github.com/docker/docker/api"
"github.com/docker/docker/api/server/httputils"
"gotest.tools/v3/assert"
is "gotest.tools/v3/assert/cmp"
"gotest.tools/assert"
is "gotest.tools/assert/cmp"
)
func TestNewVersionMiddlewareValidation(t *testing.T) {
tests := []struct {
doc, defaultVersion, minVersion, expectedErr string
}{
{
doc: "defaults",
defaultVersion: api.DefaultVersion,
minVersion: api.MinSupportedAPIVersion,
},
{
doc: "invalid default lower than min",
defaultVersion: api.MinSupportedAPIVersion,
minVersion: api.DefaultVersion,
expectedErr: fmt.Sprintf("invalid API version: the minimum API version (%s) is higher than the default version (%s)", api.DefaultVersion, api.MinSupportedAPIVersion),
},
{
doc: "invalid default too low",
defaultVersion: "0.1",
minVersion: api.MinSupportedAPIVersion,
expectedErr: fmt.Sprintf("invalid default API version (0.1): must be between %s and %s", api.MinSupportedAPIVersion, api.DefaultVersion),
},
{
doc: "invalid default too high",
defaultVersion: "9999.9999",
minVersion: api.DefaultVersion,
expectedErr: fmt.Sprintf("invalid default API version (9999.9999): must be between %s and %s", api.MinSupportedAPIVersion, api.DefaultVersion),
},
{
doc: "invalid minimum too low",
defaultVersion: api.MinSupportedAPIVersion,
minVersion: "0.1",
expectedErr: fmt.Sprintf("invalid minimum API version (0.1): must be between %s and %s", api.MinSupportedAPIVersion, api.DefaultVersion),
},
{
doc: "invalid minimum too high",
defaultVersion: api.DefaultVersion,
minVersion: "9999.9999",
expectedErr: fmt.Sprintf("invalid minimum API version (9999.9999): must be between %s and %s", api.MinSupportedAPIVersion, api.DefaultVersion),
},
}
for _, tc := range tests {
tc := tc
t.Run(tc.doc, func(t *testing.T) {
_, err := NewVersionMiddleware("1.2.3", tc.defaultVersion, tc.minVersion)
if tc.expectedErr == "" {
assert.Check(t, err)
} else {
assert.Check(t, is.Error(err, tc.expectedErr))
}
})
}
}
func TestVersionMiddlewareVersion(t *testing.T) {
expectedVersion := "<not set>"
defaultVersion := "1.10.0"
minVersion := "1.2.0"
expectedVersion := defaultVersion
handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
v := httputils.VersionFromContext(ctx)
assert.Check(t, is.Equal(expectedVersion, v))
return nil
}
m, err := NewVersionMiddleware("1.2.3", api.DefaultVersion, api.MinSupportedAPIVersion)
assert.NilError(t, err)
m := NewVersionMiddleware(defaultVersion, defaultVersion, minVersion)
h := m.WrapHandler(handler)
req, _ := http.NewRequest(http.MethodGet, "/containers/json", nil)
req, _ := http.NewRequest("GET", "/containers/json", nil)
resp := httptest.NewRecorder()
ctx := context.Background()
@@ -90,19 +35,19 @@ func TestVersionMiddlewareVersion(t *testing.T) {
errString string
}{
{
expectedVersion: api.DefaultVersion,
expectedVersion: "1.10.0",
},
{
reqVersion: api.MinSupportedAPIVersion,
expectedVersion: api.MinSupportedAPIVersion,
reqVersion: "1.9.0",
expectedVersion: "1.9.0",
},
{
reqVersion: "0.1",
errString: fmt.Sprintf("client version 0.1 is too old. Minimum supported API version is %s, please upgrade your client to a newer version", api.MinSupportedAPIVersion),
errString: "client version 0.1 is too old. Minimum supported API version is 1.2.0, please upgrade your client to a newer version",
},
{
reqVersion: "9999.9999",
errString: fmt.Sprintf("client version 9999.9999 is too new. Maximum supported API version is %s", api.DefaultVersion),
errString: "client version 9999.9999 is too new. Maximum supported API version is 1.10.0",
},
}
@@ -126,21 +71,22 @@ func TestVersionMiddlewareWithErrorsReturnsHeaders(t *testing.T) {
return nil
}
m, err := NewVersionMiddleware("1.2.3", api.DefaultVersion, api.MinSupportedAPIVersion)
assert.NilError(t, err)
defaultVersion := "1.10.0"
minVersion := "1.2.0"
m := NewVersionMiddleware(defaultVersion, defaultVersion, minVersion)
h := m.WrapHandler(handler)
req, _ := http.NewRequest(http.MethodGet, "/containers/json", nil)
req, _ := http.NewRequest("GET", "/containers/json", nil)
resp := httptest.NewRecorder()
ctx := context.Background()
vars := map[string]string{"version": "0.1"}
err = h(ctx, resp, req, vars)
err := h(ctx, resp, req, vars)
assert.Check(t, is.ErrorContains(err, ""))
hdr := resp.Result().Header
assert.Check(t, is.Contains(hdr.Get("Server"), "Docker/1.2.3"))
assert.Check(t, is.Contains(hdr.Get("Server"), "Docker/"+defaultVersion))
assert.Check(t, is.Contains(hdr.Get("Server"), runtime.GOOS))
assert.Check(t, is.Equal(hdr.Get("Api-Version"), api.DefaultVersion))
assert.Check(t, is.Equal(hdr.Get("Ostype"), runtime.GOOS))
assert.Check(t, is.Equal(hdr.Get("API-Version"), defaultVersion))
assert.Check(t, is.Equal(hdr.Get("OSType"), runtime.GOOS))
}

View File

@@ -14,7 +14,8 @@ type Backend interface {
Build(context.Context, backend.BuildConfig) (string, error)
// Prune build cache
PruneCache(context.Context, types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error)
PruneCache(context.Context) (*types.BuildCachePruneReport, error)
Cancel(context.Context, string) error
}

View File

@@ -1,11 +1,6 @@
package build // import "github.com/docker/docker/api/server/router/build"
import (
"runtime"
"github.com/docker/docker/api/server/router"
"github.com/docker/docker/api/types"
)
import "github.com/docker/docker/api/server/router"
// buildRouter is a router to talk with the build controller
type buildRouter struct {
@@ -16,10 +11,7 @@ type buildRouter struct {
// NewRouter initializes a new build router
func NewRouter(b Backend, d experimentalProvider) router.Router {
r := &buildRouter{
backend: b,
daemon: d,
}
r := &buildRouter{backend: b, daemon: d}
r.initRoutes()
return r
}
@@ -31,30 +23,8 @@ func (r *buildRouter) Routes() []router.Route {
func (r *buildRouter) initRoutes() {
r.routes = []router.Route{
router.NewPostRoute("/build", r.postBuild),
router.NewPostRoute("/build/prune", r.postPrune),
router.NewPostRoute("/build", r.postBuild, router.WithCancel),
router.NewPostRoute("/build/prune", r.postPrune, router.WithCancel),
router.NewPostRoute("/build/cancel", r.postCancel),
}
}
// BuilderVersion derives the default docker builder version from the config.
//
// The default on Linux is version "2" (BuildKit), but the daemon can be
// configured to recommend version "1" (classic Builder). Windows does not
// yet support BuildKit for native Windows images, and uses "1" (classic builder)
// as a default.
//
// This value is only a recommendation as advertised by the daemon, and it is
// up to the client to choose which builder to use.
func BuilderVersion(features map[string]bool) types.BuilderVersion {
// TODO(thaJeztah) move the default to daemon/config
if runtime.GOOS == "windows" {
return types.BuilderV1
}
bv := types.BuilderBuildKit
if v, ok := features["buildkit"]; ok && !v {
bv = types.BuilderV1
}
return bv
}

View File

@@ -14,99 +14,89 @@ import (
"strings"
"sync"
"github.com/containerd/log"
"github.com/docker/docker/api/server/httputils"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/backend"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/registry"
"github.com/docker/docker/api/types/versions"
"github.com/docker/docker/errdefs"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/progress"
"github.com/docker/docker/pkg/streamformatter"
units "github.com/docker/go-units"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
type invalidParam struct {
error
type invalidIsolationError string
func (e invalidIsolationError) Error() string {
return fmt.Sprintf("Unsupported isolation: %q", string(e))
}
func (e invalidParam) InvalidParameter() {}
func (e invalidIsolationError) InvalidParameter() {}
func newImageBuildOptions(ctx context.Context, r *http.Request) (*types.ImageBuildOptions, error) {
options := &types.ImageBuildOptions{
Version: types.BuilderV1, // Builder V1 is the default, but can be overridden
Dockerfile: r.FormValue("dockerfile"),
SuppressOutput: httputils.BoolValue(r, "q"),
NoCache: httputils.BoolValue(r, "nocache"),
ForceRemove: httputils.BoolValue(r, "forcerm"),
PullParent: httputils.BoolValue(r, "pull"),
MemorySwap: httputils.Int64ValueOrZero(r, "memswap"),
Memory: httputils.Int64ValueOrZero(r, "memory"),
CPUShares: httputils.Int64ValueOrZero(r, "cpushares"),
CPUPeriod: httputils.Int64ValueOrZero(r, "cpuperiod"),
CPUQuota: httputils.Int64ValueOrZero(r, "cpuquota"),
CPUSetCPUs: r.FormValue("cpusetcpus"),
CPUSetMems: r.FormValue("cpusetmems"),
CgroupParent: r.FormValue("cgroupparent"),
NetworkMode: r.FormValue("networkmode"),
Tags: r.Form["t"],
ExtraHosts: r.Form["extrahosts"],
SecurityOpt: r.Form["securityopt"],
Squash: httputils.BoolValue(r, "squash"),
Target: r.FormValue("target"),
RemoteContext: r.FormValue("remote"),
SessionID: r.FormValue("session"),
BuildID: r.FormValue("buildid"),
}
if runtime.GOOS != "windows" && options.SecurityOpt != nil {
// SecurityOpt only supports "credentials-spec" on Windows, and not used on other platforms.
return nil, invalidParam{errors.New("security options are not supported on " + runtime.GOOS)}
}
if httputils.BoolValue(r, "forcerm") {
version := httputils.VersionFromContext(ctx)
options := &types.ImageBuildOptions{}
if httputils.BoolValue(r, "forcerm") && versions.GreaterThanOrEqualTo(version, "1.12") {
options.Remove = true
} else if r.FormValue("rm") == "" {
} else if r.FormValue("rm") == "" && versions.GreaterThanOrEqualTo(version, "1.12") {
options.Remove = true
} else {
options.Remove = httputils.BoolValue(r, "rm")
}
version := httputils.VersionFromContext(ctx)
if httputils.BoolValue(r, "pull") && versions.GreaterThanOrEqualTo(version, "1.16") {
options.PullParent = true
}
options.Dockerfile = r.FormValue("dockerfile")
options.SuppressOutput = httputils.BoolValue(r, "q")
options.NoCache = httputils.BoolValue(r, "nocache")
options.ForceRemove = httputils.BoolValue(r, "forcerm")
options.MemorySwap = httputils.Int64ValueOrZero(r, "memswap")
options.Memory = httputils.Int64ValueOrZero(r, "memory")
options.CPUShares = httputils.Int64ValueOrZero(r, "cpushares")
options.CPUPeriod = httputils.Int64ValueOrZero(r, "cpuperiod")
options.CPUQuota = httputils.Int64ValueOrZero(r, "cpuquota")
options.CPUSetCPUs = r.FormValue("cpusetcpus")
options.CPUSetMems = r.FormValue("cpusetmems")
options.CgroupParent = r.FormValue("cgroupparent")
options.NetworkMode = r.FormValue("networkmode")
options.Tags = r.Form["t"]
options.ExtraHosts = r.Form["extrahosts"]
options.SecurityOpt = r.Form["securityopt"]
options.Squash = httputils.BoolValue(r, "squash")
options.Target = r.FormValue("target")
options.RemoteContext = r.FormValue("remote")
if versions.GreaterThanOrEqualTo(version, "1.32") {
options.Platform = r.FormValue("platform")
}
if versions.GreaterThanOrEqualTo(version, "1.40") {
outputsJSON := r.FormValue("outputs")
if outputsJSON != "" {
var outputs []types.ImageBuildOutput
if err := json.Unmarshal([]byte(outputsJSON), &outputs); err != nil {
return nil, invalidParam{errors.Wrap(err, "invalid outputs specified")}
}
options.Outputs = outputs
}
}
if s := r.Form.Get("shmsize"); s != "" {
shmSize, err := strconv.ParseInt(s, 10, 64)
if r.Form.Get("shmsize") != "" {
shmSize, err := strconv.ParseInt(r.Form.Get("shmsize"), 10, 64)
if err != nil {
return nil, err
}
options.ShmSize = shmSize
}
if i := r.FormValue("isolation"); i != "" {
options.Isolation = container.Isolation(i)
if !options.Isolation.IsValid() {
return nil, invalidParam{errors.Errorf("unsupported isolation: %q", i)}
if i := container.Isolation(r.FormValue("isolation")); i != "" {
if !container.Isolation.IsValid(i) {
return nil, invalidIsolationError(i)
}
options.Isolation = i
}
if ulimitsJSON := r.FormValue("ulimits"); ulimitsJSON != "" {
buildUlimits := []*container.Ulimit{}
if runtime.GOOS != "windows" && options.SecurityOpt != nil {
return nil, errdefs.InvalidParameter(errors.New("The daemon on this platform does not support setting security options on build"))
}
var buildUlimits = []*units.Ulimit{}
ulimitsJSON := r.FormValue("ulimits")
if ulimitsJSON != "" {
if err := json.Unmarshal([]byte(ulimitsJSON), &buildUlimits); err != nil {
return nil, invalidParam{errors.Wrap(err, "error reading ulimit settings")}
return nil, errors.Wrap(errdefs.InvalidParameter(err), "error reading ulimit settings")
}
options.Ulimits = buildUlimits
}
@@ -123,76 +113,55 @@ func newImageBuildOptions(ctx context.Context, r *http.Request) (*types.ImageBui
// the fact they mentioned it, we need to pass that along to the builder
// so that it can print a warning about "foo" being unused if there is
// no "ARG foo" in the Dockerfile.
if buildArgsJSON := r.FormValue("buildargs"); buildArgsJSON != "" {
buildArgs := map[string]*string{}
buildArgsJSON := r.FormValue("buildargs")
if buildArgsJSON != "" {
var buildArgs = map[string]*string{}
if err := json.Unmarshal([]byte(buildArgsJSON), &buildArgs); err != nil {
return nil, invalidParam{errors.Wrap(err, "error reading build args")}
return nil, errors.Wrap(errdefs.InvalidParameter(err), "error reading build args")
}
options.BuildArgs = buildArgs
}
if labelsJSON := r.FormValue("labels"); labelsJSON != "" {
labels := map[string]string{}
labelsJSON := r.FormValue("labels")
if labelsJSON != "" {
var labels = map[string]string{}
if err := json.Unmarshal([]byte(labelsJSON), &labels); err != nil {
return nil, invalidParam{errors.Wrap(err, "error reading labels")}
return nil, errors.Wrap(errdefs.InvalidParameter(err), "error reading labels")
}
options.Labels = labels
}
if cacheFromJSON := r.FormValue("cachefrom"); cacheFromJSON != "" {
cacheFrom := []string{}
cacheFromJSON := r.FormValue("cachefrom")
if cacheFromJSON != "" {
var cacheFrom = []string{}
if err := json.Unmarshal([]byte(cacheFromJSON), &cacheFrom); err != nil {
return nil, invalidParam{errors.Wrap(err, "error reading cache-from")}
return nil, err
}
options.CacheFrom = cacheFrom
}
if bv := r.FormValue("version"); bv != "" {
v, err := parseVersion(bv)
if err != nil {
return nil, err
}
options.Version = v
options.SessionID = r.FormValue("session")
options.BuildID = r.FormValue("buildid")
builderVersion, err := parseVersion(r.FormValue("version"))
if err != nil {
return nil, err
}
options.Version = builderVersion
return options, nil
}
func parseVersion(s string) (types.BuilderVersion, error) {
switch types.BuilderVersion(s) {
case types.BuilderV1:
if s == "" || s == string(types.BuilderV1) {
return types.BuilderV1, nil
case types.BuilderBuildKit:
return types.BuilderBuildKit, nil
default:
return "", invalidParam{errors.Errorf("invalid version %q", s)}
}
if s == string(types.BuilderBuildKit) {
return types.BuilderBuildKit, nil
}
return "", errors.Errorf("invalid version %s", s)
}
func (br *buildRouter) postPrune(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
fltrs, err := filters.FromJSON(r.Form.Get("filters"))
if err != nil {
return err
}
ksfv := r.FormValue("keep-storage")
if ksfv == "" {
ksfv = "0"
}
ks, err := strconv.Atoi(ksfv)
if err != nil {
return invalidParam{errors.Wrapf(err, "keep-storage is in bytes and expects an integer, got %v", ksfv)}
}
opts := types.BuildCachePruneOptions{
All: httputils.BoolValue(r, "all"),
Filters: fltrs,
KeepStorage: int64(ks),
}
report, err := br.backend.PruneCache(ctx, opts)
report, err := br.backend.PruneCache(ctx)
if err != nil {
return err
}
@@ -204,7 +173,7 @@ func (br *buildRouter) postCancel(ctx context.Context, w http.ResponseWriter, r
id := r.FormValue("id")
if id == "" {
return invalidParam{errors.New("build ID not provided")}
return errors.Errorf("build ID not provided")
}
return br.backend.Cancel(ctx, id)
@@ -231,11 +200,12 @@ func (br *buildRouter) postBuild(ctx context.Context, w http.ResponseWriter, r *
}
output := ioutils.NewWriteFlusher(ww)
defer func() { _ = output.Close() }()
defer output.Close()
errf := func(err error) error {
if httputils.BoolValue(r, "q") && notVerboseBuffer.Len() > 0 {
_, _ = output.Write(notVerboseBuffer.Bytes())
output.Write(notVerboseBuffer.Bytes())
}
// Do not write the error in the http output if it's still empty.
@@ -245,7 +215,7 @@ func (br *buildRouter) postBuild(ctx context.Context, w http.ResponseWriter, r *
}
_, err = output.Write(streamformatter.FormatError(err))
if err != nil {
log.G(ctx).Warnf("could not write error response: %v", err)
logrus.Warnf("could not write error response: %v", err)
}
return nil
}
@@ -257,7 +227,11 @@ func (br *buildRouter) postBuild(ctx context.Context, w http.ResponseWriter, r *
buildOptions.AuthConfigs = getAuthConfigs(r.Header)
if buildOptions.Squash && !br.daemon.HasExperimental() {
return invalidParam{errors.New("squash is only supported with experimental mode")}
return errdefs.InvalidParameter(errors.New("squash is only supported with experimental mode"))
}
if buildOptions.Version == types.BuilderBuildKit && !br.daemon.HasExperimental() {
return errdefs.InvalidParameter(errors.New("buildkit is only supported with experimental mode"))
}
out := io.Writer(output)
@@ -286,13 +260,13 @@ func (br *buildRouter) postBuild(ctx context.Context, w http.ResponseWriter, r *
// Everything worked so if -q was provided the output from the daemon
// should be just the image ID and we'll print that to stdout.
if buildOptions.SuppressOutput {
_, _ = fmt.Fprintln(streamformatter.NewStdoutWriter(output), imgID)
fmt.Fprintln(streamformatter.NewStdoutWriter(output), imgID)
}
return nil
}
func getAuthConfigs(header http.Header) map[string]registry.AuthConfig {
authConfigs := map[string]registry.AuthConfig{}
func getAuthConfigs(header http.Header) map[string]types.AuthConfig {
authConfigs := map[string]types.AuthConfig{}
authConfigsEncoded := header.Get("X-Registry-Config")
if authConfigsEncoded == "" {
@@ -302,7 +276,7 @@ func getAuthConfigs(header http.Header) map[string]registry.AuthConfig {
authConfigsJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authConfigsEncoded))
// Pulling an image does not error when no auth is provided so to remain
// consistent with the existing api decode errors are ignored
_ = json.NewDecoder(authConfigsJSON).Decode(&authConfigs)
json.NewDecoder(authConfigsJSON).Decode(&authConfigs)
return authConfigs
}
@@ -339,12 +313,8 @@ type flusher interface {
Flush()
}
type nopFlusher struct{}
func (f *nopFlusher) Flush() {}
func wrapOutputBufferedUntilRequestRead(rc io.ReadCloser, out io.Writer) (io.ReadCloser, io.Writer) {
var fl flusher = &nopFlusher{}
var fl flusher = &ioutils.NopFlusher{}
if f, ok := out.(flusher); ok {
fl = f
}
@@ -428,7 +398,7 @@ func (w *wcf) notify() {
w.mu.Lock()
if !w.ready {
if w.buf.Len() > 0 {
_, _ = io.Copy(w.Writer, w.buf)
io.Copy(w.Writer, w.buf)
}
if w.flushed {
w.flusher.Flush()

View File

@@ -1,10 +1,10 @@
package checkpoint // import "github.com/docker/docker/api/server/router/checkpoint"
import "github.com/docker/docker/api/types/checkpoint"
import "github.com/docker/docker/api/types"
// Backend for Checkpoint
type Backend interface {
CheckpointCreate(container string, config checkpoint.CreateOptions) error
CheckpointDelete(container string, config checkpoint.DeleteOptions) error
CheckpointList(container string, config checkpoint.ListOptions) ([]checkpoint.Summary, error)
CheckpointCreate(container string, config types.CheckpointCreateOptions) error
CheckpointDelete(container string, config types.CheckpointDeleteOptions) error
CheckpointList(container string, config types.CheckpointListOptions) ([]types.Checkpoint, error)
}

View File

@@ -2,10 +2,11 @@ package checkpoint // import "github.com/docker/docker/api/server/router/checkpo
import (
"context"
"encoding/json"
"net/http"
"github.com/docker/docker/api/server/httputils"
"github.com/docker/docker/api/types/checkpoint"
"github.com/docker/docker/api/types"
)
func (s *checkpointRouter) postContainerCheckpoint(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
@@ -13,8 +14,10 @@ func (s *checkpointRouter) postContainerCheckpoint(ctx context.Context, w http.R
return err
}
var options checkpoint.CreateOptions
if err := httputils.ReadJSON(r, &options); err != nil {
var options types.CheckpointCreateOptions
decoder := json.NewDecoder(r.Body)
if err := decoder.Decode(&options); err != nil {
return err
}
@@ -32,9 +35,10 @@ func (s *checkpointRouter) getContainerCheckpoints(ctx context.Context, w http.R
return err
}
checkpoints, err := s.backend.CheckpointList(vars["name"], checkpoint.ListOptions{
checkpoints, err := s.backend.CheckpointList(vars["name"], types.CheckpointListOptions{
CheckpointDir: r.Form.Get("dir"),
})
if err != nil {
return err
}
@@ -47,10 +51,11 @@ func (s *checkpointRouter) deleteContainerCheckpoint(ctx context.Context, w http
return err
}
err := s.backend.CheckpointDelete(vars["name"], checkpoint.DeleteOptions{
err := s.backend.CheckpointDelete(vars["name"], types.CheckpointDeleteOptions{
CheckpointDir: r.Form.Get("dir"),
CheckpointID: vars["checkpoint"],
})
if err != nil {
return err
}

View File

@@ -14,32 +14,33 @@ import (
// execBackend includes functions to implement to provide exec functionality.
type execBackend interface {
ContainerExecCreate(name string, options *container.ExecOptions) (string, error)
ContainerExecCreate(name string, config *types.ExecConfig) (string, error)
ContainerExecInspect(id string) (*backend.ExecInspect, error)
ContainerExecResize(name string, height, width int) error
ContainerExecStart(ctx context.Context, name string, options backend.ExecStartConfig) error
ContainerExecStart(ctx context.Context, name string, stdin io.Reader, stdout io.Writer, stderr io.Writer) error
ExecExists(name string) (bool, error)
}
// copyBackend includes functions to implement to provide container copy functionality.
type copyBackend interface {
ContainerArchivePath(name string, path string) (content io.ReadCloser, stat *container.PathStat, err error)
ContainerExport(ctx context.Context, name string, out io.Writer) error
ContainerArchivePath(name string, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error)
ContainerCopy(name string, res string) (io.ReadCloser, error)
ContainerExport(name string, out io.Writer) error
ContainerExtractToDir(name, path string, copyUIDGID, noOverwriteDirNonDir bool, content io.Reader) error
ContainerStatPath(name string, path string) (stat *container.PathStat, err error)
ContainerStatPath(name string, path string) (stat *types.ContainerPathStat, err error)
}
// stateBackend includes functions to implement to provide container state lifecycle functionality.
type stateBackend interface {
ContainerCreate(ctx context.Context, config backend.ContainerCreateConfig) (container.CreateResponse, error)
ContainerKill(name string, signal string) error
ContainerCreate(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error)
ContainerKill(name string, sig uint64) error
ContainerPause(name string) error
ContainerRename(oldName, newName string) error
ContainerResize(name string, height, width int) error
ContainerRestart(ctx context.Context, name string, options container.StopOptions) error
ContainerRm(name string, config *backend.ContainerRmConfig) error
ContainerStart(ctx context.Context, name string, checkpoint string, checkpointDir string) error
ContainerStop(ctx context.Context, name string, options container.StopOptions) error
ContainerRestart(name string, seconds *int) error
ContainerRm(name string, config *types.ContainerRmConfig) error
ContainerStart(name string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error
ContainerStop(name string, seconds *int) error
ContainerUnpause(name string) error
ContainerUpdate(name string, hostConfig *container.HostConfig) (container.ContainerUpdateOKBody, error)
ContainerWait(ctx context.Context, name string, condition containerpkg.WaitCondition) (<-chan containerpkg.StateStatus, error)
@@ -47,12 +48,13 @@ type stateBackend interface {
// monitorBackend includes functions to implement to provide containers monitoring functionality.
type monitorBackend interface {
ContainerChanges(ctx context.Context, name string) ([]archive.Change, error)
ContainerInspect(ctx context.Context, name string, size bool, version string) (interface{}, error)
ContainerLogs(ctx context.Context, name string, config *container.LogsOptions) (msgs <-chan *backend.LogMessage, tty bool, err error)
ContainerChanges(name string) ([]archive.Change, error)
ContainerInspect(name string, size bool, version string) (interface{}, error)
ContainerLogs(ctx context.Context, name string, config *types.ContainerLogsOptions) (msgs <-chan *backend.LogMessage, tty bool, err error)
ContainerStats(ctx context.Context, name string, config *backend.ContainerStatsConfig) error
ContainerTop(name string, psArgs string) (*container.ContainerTopOKBody, error)
Containers(ctx context.Context, config *container.ListOptions) ([]*types.Container, error)
Containers(config *types.ContainerListOptions) ([]*types.Container, error)
}
// attachBackend includes function to implement to provide container attaching functionality.
@@ -62,11 +64,11 @@ type attachBackend interface {
// systemBackend includes functions to implement to provide system wide containers functionality
type systemBackend interface {
ContainersPrune(ctx context.Context, pruneFilters filters.Args) (*container.PruneReport, error)
ContainersPrune(ctx context.Context, pruneFilters filters.Args) (*types.ContainersPruneReport, error)
}
type commitBackend interface {
CreateImageFromContainer(ctx context.Context, name string, config *backend.CreateImageConfig) (imageID string, err error)
CreateImageFromContainer(name string, config *backend.CreateImageConfig) (imageID string, err error)
}
// Backend is all the methods that need to be implemented to provide container specific functionality.

View File

@@ -10,15 +10,13 @@ type containerRouter struct {
backend Backend
decoder httputils.ContainerDecoder
routes []router.Route
cgroup2 bool
}
// NewRouter initializes a new container router
func NewRouter(b Backend, decoder httputils.ContainerDecoder, cgroup2 bool) router.Router {
func NewRouter(b Backend, decoder httputils.ContainerDecoder) router.Router {
r := &containerRouter{
backend: b,
decoder: decoder,
cgroup2: cgroup2,
}
r.initRoutes()
return r
@@ -40,8 +38,8 @@ func (r *containerRouter) initRoutes() {
router.NewGetRoute("/containers/{name:.*}/changes", r.getContainersChanges),
router.NewGetRoute("/containers/{name:.*}/json", r.getContainersByName),
router.NewGetRoute("/containers/{name:.*}/top", r.getContainersTop),
router.NewGetRoute("/containers/{name:.*}/logs", r.getContainersLogs),
router.NewGetRoute("/containers/{name:.*}/stats", r.getContainersStats),
router.NewGetRoute("/containers/{name:.*}/logs", r.getContainersLogs, router.WithCancel),
router.NewGetRoute("/containers/{name:.*}/stats", r.getContainersStats, router.WithCancel),
router.NewGetRoute("/containers/{name:.*}/attach/ws", r.wsContainersAttach),
router.NewGetRoute("/exec/{id:.*}/json", r.getExecByID),
router.NewGetRoute("/containers/{name:.*}/archive", r.getContainersArchive),
@@ -53,15 +51,16 @@ func (r *containerRouter) initRoutes() {
router.NewPostRoute("/containers/{name:.*}/restart", r.postContainersRestart),
router.NewPostRoute("/containers/{name:.*}/start", r.postContainersStart),
router.NewPostRoute("/containers/{name:.*}/stop", r.postContainersStop),
router.NewPostRoute("/containers/{name:.*}/wait", r.postContainersWait),
router.NewPostRoute("/containers/{name:.*}/wait", r.postContainersWait, router.WithCancel),
router.NewPostRoute("/containers/{name:.*}/resize", r.postContainersResize),
router.NewPostRoute("/containers/{name:.*}/attach", r.postContainersAttach),
router.NewPostRoute("/containers/{name:.*}/copy", r.postContainersCopy), // Deprecated since 1.8, Errors out since 1.12
router.NewPostRoute("/containers/{name:.*}/exec", r.postContainerExecCreate),
router.NewPostRoute("/exec/{name:.*}/start", r.postContainerExecStart),
router.NewPostRoute("/exec/{name:.*}/resize", r.postContainerExecResize),
router.NewPostRoute("/containers/{name:.*}/rename", r.postContainerRename),
router.NewPostRoute("/containers/{name:.*}/update", r.postContainerUpdate),
router.NewPostRoute("/containers/prune", r.postContainersPrune),
router.NewPostRoute("/containers/prune", r.postContainersPrune, router.WithCancel),
router.NewPostRoute("/commit", r.postCommit),
// PUT
router.NewPutRoute("/containers/{name:.*}/archive", r.putContainersArchive),

View File

@@ -6,30 +6,21 @@ import (
"fmt"
"io"
"net/http"
"runtime"
"strconv"
"strings"
"syscall"
"github.com/containerd/log"
"github.com/containerd/platforms"
"github.com/docker/docker/api/server/httpstatus"
"github.com/docker/docker/api/server/httputils"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/backend"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/mount"
"github.com/docker/docker/api/types/network"
"github.com/docker/docker/api/types/versions"
containerpkg "github.com/docker/docker/container"
networkSettings "github.com/docker/docker/daemon/network"
"github.com/docker/docker/errdefs"
"github.com/docker/docker/libnetwork/netlabel"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/runconfig"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/docker/docker/pkg/signal"
"github.com/pkg/errors"
"go.opentelemetry.io/otel"
"github.com/sirupsen/logrus"
"golang.org/x/net/websocket"
)
@@ -42,31 +33,29 @@ func (s *containerRouter) postCommit(ctx context.Context, w http.ResponseWriter,
return err
}
// FIXME(thaJeztah): change this to unmarshal just [container.Config]:
// The commit endpoint accepts a [container.Config], but the decoder uses a
// [container.CreateRequest], which is a superset, and also contains
// [container.HostConfig] and [network.NetworkConfig]. Those structs
// are discarded here, but decoder.DecodeConfig also performs validation,
// so a request containing those additional fields would result in a
// validation error.
// TODO: remove pause arg, and always pause in backend
pause := httputils.BoolValue(r, "pause")
version := httputils.VersionFromContext(ctx)
if r.FormValue("pause") == "" && versions.GreaterThanOrEqualTo(version, "1.13") {
pause = true
}
config, _, _, err := s.decoder.DecodeConfig(r.Body)
if err != nil && !errors.Is(err, io.EOF) { // Do not fail if body is empty.
if err != nil && err != io.EOF { //Do not fail if body is empty.
return err
}
ref, err := httputils.RepoTagReference(r.Form.Get("repo"), r.Form.Get("tag"))
if err != nil {
return errdefs.InvalidParameter(err)
}
imgID, err := s.backend.CreateImageFromContainer(ctx, r.Form.Get("container"), &backend.CreateImageConfig{
Pause: httputils.BoolValueOrDefault(r, "pause", true), // TODO(dnephin): remove pause arg, and always pause in backend
Tag: ref,
commitCfg := &backend.CreateImageConfig{
Pause: pause,
Repo: r.Form.Get("repo"),
Tag: r.Form.Get("tag"),
Author: r.Form.Get("author"),
Comment: r.Form.Get("comment"),
Config: config,
Changes: r.Form["changes"],
})
}
imgID, err := s.backend.CreateImageFromContainer(r.Form.Get("container"), commitCfg)
if err != nil {
return err
}
@@ -83,7 +72,7 @@ func (s *containerRouter) getContainersJSON(ctx context.Context, w http.Response
return err
}
config := &container.ListOptions{
config := &types.ContainerListOptions{
All: httputils.BoolValue(r, "all"),
Size: httputils.BoolValue(r, "size"),
Since: r.Form.Get("since"),
@@ -99,20 +88,11 @@ func (s *containerRouter) getContainersJSON(ctx context.Context, w http.Response
config.Limit = limit
}
containers, err := s.backend.Containers(ctx, config)
containers, err := s.backend.Containers(config)
if err != nil {
return err
}
version := httputils.VersionFromContext(ctx)
if versions.LessThan(version, "1.46") {
for _, c := range containers {
// Ignore HostConfig.Annotations because it was added in API v1.46.
c.HostConfig.Annotations = nil
}
}
return httputils.WriteJSON(w, http.StatusOK, containers)
}
@@ -125,25 +105,14 @@ func (s *containerRouter) getContainersStats(ctx context.Context, w http.Respons
if !stream {
w.Header().Set("Content-Type", "application/json")
}
var oneShot bool
if versions.GreaterThanOrEqualTo(httputils.VersionFromContext(ctx), "1.41") {
oneShot = httputils.BoolValueOrDefault(r, "one-shot", false)
config := &backend.ContainerStatsConfig{
Stream: stream,
OutStream: w,
Version: httputils.VersionFromContext(ctx),
}
return s.backend.ContainerStats(ctx, vars["name"], &backend.ContainerStatsConfig{
Stream: stream,
OneShot: oneShot,
OutStream: func() io.Writer {
// Assume that when this is called the request is OK.
w.WriteHeader(http.StatusOK)
if !stream {
return w
}
wf := ioutils.NewWriteFlusher(w)
wf.Flush()
return wf
},
})
return s.backend.ContainerStats(ctx, vars["name"], config)
}
func (s *containerRouter) getContainersLogs(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
@@ -162,7 +131,7 @@ func (s *containerRouter) getContainersLogs(ctx context.Context, w http.Response
}
containerName := vars["name"]
logsConfig := &container.LogsOptions{
logsConfig := &types.ContainerLogsOptions{
Follow: httputils.BoolValue(r, "follow"),
Timestamps: httputils.BoolValue(r, "timestamps"),
Since: r.Form.Get("since"),
@@ -178,12 +147,6 @@ func (s *containerRouter) getContainersLogs(ctx context.Context, w http.Response
return err
}
contentType := types.MediaTypeRawStream
if !tty && versions.GreaterThanOrEqualTo(httputils.VersionFromContext(ctx), "1.42") {
contentType = types.MediaTypeMultiplexedStream
}
w.Header().Set("Content-Type", contentType)
// if has a tty, we're not muxing streams. if it doesn't, we are. simple.
// this is the point of no return for writing a response. once we call
// WriteLogStream, the response has been started and errors will be
@@ -193,30 +156,51 @@ func (s *containerRouter) getContainersLogs(ctx context.Context, w http.Response
}
func (s *containerRouter) getContainersExport(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
return s.backend.ContainerExport(ctx, vars["name"], w)
return s.backend.ContainerExport(vars["name"], w)
}
func (s *containerRouter) postContainersStart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
ctx, span := otel.Tracer("").Start(ctx, "containerRouter.postContainersStart")
defer span.End()
type bodyOnStartError struct{}
func (bodyOnStartError) Error() string {
return "starting container with non-empty request body was deprecated since API v1.22 and removed in v1.24"
}
func (bodyOnStartError) InvalidParameter() {}
func (s *containerRouter) postContainersStart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
// If contentLength is -1, we can assumed chunked encoding
// or more technically that the length is unknown
// https://golang.org/src/pkg/net/http/request.go#L139
// net/http otherwise seems to swallow any headers related to chunked encoding
// including r.TransferEncoding
// allow a nil body for backwards compatibility
//
version := httputils.VersionFromContext(ctx)
var hostConfig *container.HostConfig
// A non-nil json object is at least 7 characters.
if r.ContentLength > 7 || r.ContentLength == -1 {
return errdefs.InvalidParameter(errors.New("starting container with non-empty request body was deprecated since API v1.22 and removed in v1.24"))
if versions.GreaterThanOrEqualTo(version, "1.24") {
return bodyOnStartError{}
}
if err := httputils.CheckForJSON(r); err != nil {
return err
}
c, err := s.decoder.DecodeHostConfig(r.Body)
if err != nil {
return err
}
hostConfig = c
}
if err := httputils.ParseForm(r); err != nil {
return err
}
if err := s.backend.ContainerStart(ctx, vars["name"], r.Form.Get("checkpoint"), r.Form.Get("checkpoint-dir")); err != nil {
checkpoint := r.Form.Get("checkpoint")
checkpointDir := r.Form.Get("checkpoint-dir")
if err := s.backend.ContainerStart(vars["name"], hostConfig, checkpoint, checkpointDir); err != nil {
return err
}
@@ -229,37 +213,52 @@ func (s *containerRouter) postContainersStop(ctx context.Context, w http.Respons
return err
}
var (
options container.StopOptions
version = httputils.VersionFromContext(ctx)
)
if versions.GreaterThanOrEqualTo(version, "1.42") {
options.Signal = r.Form.Get("signal")
}
var seconds *int
if tmpSeconds := r.Form.Get("t"); tmpSeconds != "" {
valSeconds, err := strconv.Atoi(tmpSeconds)
if err != nil {
return err
}
options.Timeout = &valSeconds
seconds = &valSeconds
}
if err := s.backend.ContainerStop(ctx, vars["name"], options); err != nil {
if err := s.backend.ContainerStop(vars["name"], seconds); err != nil {
return err
}
w.WriteHeader(http.StatusNoContent)
return nil
}
func (s *containerRouter) postContainersKill(_ context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
func (s *containerRouter) postContainersKill(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
var sig syscall.Signal
name := vars["name"]
if err := s.backend.ContainerKill(name, r.Form.Get("signal")); err != nil {
return errors.Wrapf(err, "cannot kill container: %s", name)
// If we have a signal, look at it. Otherwise, do nothing
if sigStr := r.Form.Get("signal"); sigStr != "" {
var err error
if sig, err = signal.ParseSignal(sigStr); err != nil {
return errdefs.InvalidParameter(err)
}
}
if err := s.backend.ContainerKill(name, uint64(sig)); err != nil {
var isStopped bool
if errdefs.IsConflict(err) {
isStopped = true
}
// Return error that's not caused because the container is stopped.
// Return error if the container is not running and the api is >= 1.20
// to keep backwards compatibility.
version := httputils.VersionFromContext(ctx)
if versions.GreaterThanOrEqualTo(version, "1.20") || !isStopped {
return errors.Wrapf(err, "Cannot kill container: %s", name)
}
}
w.WriteHeader(http.StatusNoContent)
@@ -271,26 +270,21 @@ func (s *containerRouter) postContainersRestart(ctx context.Context, w http.Resp
return err
}
var (
options container.StopOptions
version = httputils.VersionFromContext(ctx)
)
if versions.GreaterThanOrEqualTo(version, "1.42") {
options.Signal = r.Form.Get("signal")
}
var seconds *int
if tmpSeconds := r.Form.Get("t"); tmpSeconds != "" {
valSeconds, err := strconv.Atoi(tmpSeconds)
if err != nil {
return err
}
options.Timeout = &valSeconds
seconds = &valSeconds
}
if err := s.backend.ContainerRestart(ctx, vars["name"], options); err != nil {
if err := s.backend.ContainerRestart(vars["name"], seconds); err != nil {
return err
}
w.WriteHeader(http.StatusNoContent)
return nil
}
@@ -335,21 +329,18 @@ func (s *containerRouter) postContainersWait(ctx context.Context, w http.Respons
if err := httputils.ParseForm(r); err != nil {
return err
}
if v := r.Form.Get("condition"); v != "" {
switch container.WaitCondition(v) {
case container.WaitConditionNotRunning:
waitCondition = containerpkg.WaitConditionNotRunning
case container.WaitConditionNextExit:
waitCondition = containerpkg.WaitConditionNextExit
case container.WaitConditionRemoved:
waitCondition = containerpkg.WaitConditionRemoved
legacyRemovalWaitPre134 = versions.LessThan(version, "1.34")
default:
return errdefs.InvalidParameter(errors.Errorf("invalid condition: %q", v))
}
switch container.WaitCondition(r.Form.Get("condition")) {
case container.WaitConditionNextExit:
waitCondition = containerpkg.WaitConditionNextExit
case container.WaitConditionRemoved:
waitCondition = containerpkg.WaitConditionRemoved
legacyRemovalWaitPre134 = versions.LessThan(version, "1.34")
}
}
// Note: the context should get canceled if the client closes the
// connection since this handler has been wrapped by the
// router.WithCancel() wrapper.
waitC, err := s.backend.ContainerWait(ctx, vars["name"], waitCondition)
if err != nil {
return err
@@ -376,19 +367,19 @@ func (s *containerRouter) postContainersWait(ctx context.Context, w http.Respons
return nil
}
var waitError *container.WaitExitError
var waitError *container.ContainerWaitOKBodyError
if status.Err() != nil {
waitError = &container.WaitExitError{Message: status.Err().Error()}
waitError = &container.ContainerWaitOKBodyError{Message: status.Err().Error()}
}
return json.NewEncoder(w).Encode(&container.WaitResponse{
return json.NewEncoder(w).Encode(&container.ContainerWaitOKBody{
StatusCode: int64(status.ExitCode()),
Error: waitError,
})
}
func (s *containerRouter) getContainersChanges(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
changes, err := s.backend.ContainerChanges(ctx, vars["name"])
changes, err := s.backend.ContainerChanges(vars["name"])
if err != nil {
return err
}
@@ -427,26 +418,15 @@ func (s *containerRouter) postContainerUpdate(ctx context.Context, w http.Respon
if err := httputils.ParseForm(r); err != nil {
return err
}
var updateConfig container.UpdateConfig
if err := httputils.ReadJSON(r, &updateConfig); err != nil {
if err := httputils.CheckForJSON(r); err != nil {
return err
}
if versions.LessThan(httputils.VersionFromContext(ctx), "1.40") {
updateConfig.PidsLimit = nil
}
if versions.GreaterThanOrEqualTo(httputils.VersionFromContext(ctx), "1.42") {
// Ignore KernelMemory removed in API 1.42.
updateConfig.KernelMemory = 0
}
var updateConfig container.UpdateConfig
if updateConfig.PidsLimit != nil && *updateConfig.PidsLimit <= 0 {
// Both `0` and `-1` are accepted to set "unlimited" when updating.
// Historically, any negative value was accepted, so treat them as
// "unlimited" as well.
var unlimited int64
updateConfig.PidsLimit = &unlimited
decoder := json.NewDecoder(r.Body)
if err := decoder.Decode(&updateConfig); err != nil {
return err
}
hostConfig := &container.HostConfig{
@@ -475,411 +455,37 @@ func (s *containerRouter) postContainersCreate(ctx context.Context, w http.Respo
config, hostConfig, networkingConfig, err := s.decoder.DecodeConfig(r.Body)
if err != nil {
if errors.Is(err, io.EOF) {
return errdefs.InvalidParameter(errors.New("invalid JSON: got EOF while reading request body"))
}
return err
}
if config == nil {
return errdefs.InvalidParameter(runconfig.ErrEmptyConfig)
}
if hostConfig == nil {
hostConfig = &container.HostConfig{}
}
if networkingConfig == nil {
networkingConfig = &network.NetworkingConfig{}
}
if networkingConfig.EndpointsConfig == nil {
networkingConfig.EndpointsConfig = make(map[string]*network.EndpointSettings)
}
// The NetworkMode "default" is used as a way to express a container should
// be attached to the OS-dependant default network, in an OS-independent
// way. Doing this conversion as soon as possible ensures we have less
// NetworkMode to handle down the path (including in the
// backward-compatibility layer we have just below).
//
// Note that this is not the only place where this conversion has to be
// done (as there are various other places where containers get created).
if hostConfig.NetworkMode == "" || hostConfig.NetworkMode.IsDefault() {
hostConfig.NetworkMode = networkSettings.DefaultNetwork
if nw, ok := networkingConfig.EndpointsConfig[network.NetworkDefault]; ok {
networkingConfig.EndpointsConfig[hostConfig.NetworkMode.NetworkName()] = nw
delete(networkingConfig.EndpointsConfig, network.NetworkDefault)
}
}
version := httputils.VersionFromContext(ctx)
adjustCPUShares := versions.LessThan(version, "1.19")
// When using API 1.24 and under, the client is responsible for removing the container
if versions.LessThan(version, "1.25") {
if hostConfig != nil && versions.LessThan(version, "1.25") {
hostConfig.AutoRemove = false
}
if versions.LessThan(version, "1.40") {
// Ignore BindOptions.NonRecursive because it was added in API 1.40.
for _, m := range hostConfig.Mounts {
if bo := m.BindOptions; bo != nil {
bo.NonRecursive = false
}
}
// Ignore KernelMemoryTCP because it was added in API 1.40.
hostConfig.KernelMemoryTCP = 0
// Older clients (API < 1.40) expects the default to be shareable, make them happy
if hostConfig.IpcMode.IsEmpty() {
hostConfig.IpcMode = container.IPCModeShareable
}
}
if versions.LessThan(version, "1.41") {
// Older clients expect the default to be "host" on cgroup v1 hosts
if !s.cgroup2 && hostConfig.CgroupnsMode.IsEmpty() {
hostConfig.CgroupnsMode = container.CgroupnsModeHost
}
}
var platform *ocispec.Platform
if versions.GreaterThanOrEqualTo(version, "1.41") {
if v := r.Form.Get("platform"); v != "" {
p, err := platforms.Parse(v)
if err != nil {
return errdefs.InvalidParameter(err)
}
platform = &p
}
}
if versions.LessThan(version, "1.42") {
for _, m := range hostConfig.Mounts {
// Ignore BindOptions.CreateMountpoint because it was added in API 1.42.
if bo := m.BindOptions; bo != nil {
bo.CreateMountpoint = false
}
// These combinations are invalid, but weren't validated in API < 1.42.
// We reset them here, so that validation doesn't produce an error.
if o := m.VolumeOptions; o != nil && m.Type != mount.TypeVolume {
m.VolumeOptions = nil
}
if o := m.TmpfsOptions; o != nil && m.Type != mount.TypeTmpfs {
m.TmpfsOptions = nil
}
if bo := m.BindOptions; bo != nil {
// Ignore BindOptions.CreateMountpoint because it was added in API 1.42.
bo.CreateMountpoint = false
}
}
if runtime.GOOS == "linux" {
// ConsoleSize is not respected by Linux daemon before API 1.42
hostConfig.ConsoleSize = [2]uint{0, 0}
}
}
if versions.GreaterThanOrEqualTo(version, "1.42") {
// Ignore KernelMemory removed in API 1.42.
hostConfig.KernelMemory = 0
for _, m := range hostConfig.Mounts {
if o := m.VolumeOptions; o != nil && m.Type != mount.TypeVolume {
return errdefs.InvalidParameter(fmt.Errorf("VolumeOptions must not be specified on mount type %q", m.Type))
}
if o := m.BindOptions; o != nil && m.Type != mount.TypeBind {
return errdefs.InvalidParameter(fmt.Errorf("BindOptions must not be specified on mount type %q", m.Type))
}
if o := m.TmpfsOptions; o != nil && m.Type != mount.TypeTmpfs {
return errdefs.InvalidParameter(fmt.Errorf("TmpfsOptions must not be specified on mount type %q", m.Type))
}
}
}
if versions.LessThan(version, "1.43") {
// Ignore Annotations because it was added in API v1.43.
hostConfig.Annotations = nil
}
defaultReadOnlyNonRecursive := false
if versions.LessThan(version, "1.44") {
if config.Healthcheck != nil {
// StartInterval was added in API 1.44
config.Healthcheck.StartInterval = 0
}
// Set ReadOnlyNonRecursive to true because it was added in API 1.44
// Before that all read-only mounts were non-recursive.
// Keep that behavior for clients on older APIs.
defaultReadOnlyNonRecursive = true
for _, m := range hostConfig.Mounts {
if m.Type == mount.TypeBind {
if m.BindOptions != nil && m.BindOptions.ReadOnlyForceRecursive {
// NOTE: that technically this is a breaking change for older
// API versions, and we should ignore the new field.
// However, this option may be incorrectly set by a client with
// the expectation that the failing to apply recursive read-only
// is enforced, so we decided to produce an error instead,
// instead of silently ignoring.
return errdefs.InvalidParameter(errors.New("BindOptions.ReadOnlyForceRecursive needs API v1.44 or newer"))
}
}
}
// Creating a container connected to several networks is not supported until v1.44.
if len(networkingConfig.EndpointsConfig) > 1 {
l := make([]string, 0, len(networkingConfig.EndpointsConfig))
for k := range networkingConfig.EndpointsConfig {
l = append(l, k)
}
return errdefs.InvalidParameter(errors.Errorf("Container cannot be created with multiple network endpoints: %s", strings.Join(l, ", ")))
}
}
if versions.LessThan(version, "1.45") {
for _, m := range hostConfig.Mounts {
if m.VolumeOptions != nil && m.VolumeOptions.Subpath != "" {
return errdefs.InvalidParameter(errors.New("VolumeOptions.Subpath needs API v1.45 or newer"))
}
}
}
var warnings []string
if warn, err := handleMACAddressBC(config, hostConfig, networkingConfig, version); err != nil {
return err
} else if warn != "" {
warnings = append(warnings, warn)
}
if warn, err := handleSysctlBC(hostConfig, networkingConfig, version); err != nil {
return err
} else if warn != "" {
warnings = append(warnings, warn)
}
if hostConfig.PidsLimit != nil && *hostConfig.PidsLimit <= 0 {
// Don't set a limit if either no limit was specified, or "unlimited" was
// explicitly set.
// Both `0` and `-1` are accepted as "unlimited", and historically any
// negative value was accepted, so treat those as "unlimited" as well.
hostConfig.PidsLimit = nil
}
ccr, err := s.backend.ContainerCreate(ctx, backend.ContainerCreateConfig{
Name: name,
Config: config,
HostConfig: hostConfig,
NetworkingConfig: networkingConfig,
Platform: platform,
DefaultReadOnlyNonRecursive: defaultReadOnlyNonRecursive,
ccr, err := s.backend.ContainerCreate(types.ContainerCreateConfig{
Name: name,
Config: config,
HostConfig: hostConfig,
NetworkingConfig: networkingConfig,
AdjustCPUShares: adjustCPUShares,
})
if err != nil {
return err
}
ccr.Warnings = append(ccr.Warnings, warnings...)
return httputils.WriteJSON(w, http.StatusCreated, ccr)
}
// handleMACAddressBC takes care of backward-compatibility for the container-wide MAC address by mutating the
// networkingConfig to set the endpoint-specific MACAddress field introduced in API v1.44. It returns a warning message
// or an error if the container-wide field was specified for API >= v1.44.
func handleMACAddressBC(config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, version string) (string, error) {
deprecatedMacAddress := config.MacAddress //nolint:staticcheck // ignore SA1019: field is deprecated, but still used on API < v1.44.
// For older versions of the API, migrate the container-wide MAC address to EndpointsConfig.
if versions.LessThan(version, "1.44") {
if deprecatedMacAddress == "" {
// If a MAC address is supplied in EndpointsConfig, discard it because the old API
// would have ignored it.
for _, ep := range networkingConfig.EndpointsConfig {
ep.MacAddress = ""
}
return "", nil
}
if !hostConfig.NetworkMode.IsBridge() && !hostConfig.NetworkMode.IsUserDefined() {
return "", runconfig.ErrConflictContainerNetworkAndMac
}
epConfig, err := epConfigForNetMode(version, hostConfig.NetworkMode, networkingConfig)
if err != nil {
return "", err
}
epConfig.MacAddress = deprecatedMacAddress
return "", nil
}
// The container-wide MacAddress parameter is deprecated and should now be specified in EndpointsConfig.
if deprecatedMacAddress == "" {
return "", nil
}
var warning string
if hostConfig.NetworkMode.IsBridge() || hostConfig.NetworkMode.IsUserDefined() {
ep, err := epConfigForNetMode(version, hostConfig.NetworkMode, networkingConfig)
if err != nil {
return "", errors.Wrap(err, "unable to migrate container-wide MAC address to a specific network")
}
// ep is the endpoint that needs the container-wide MAC address; migrate the address
// to it, or bail out if there's a mismatch.
if ep.MacAddress == "" {
ep.MacAddress = deprecatedMacAddress
} else if ep.MacAddress != deprecatedMacAddress {
return "", errdefs.InvalidParameter(errors.New("the container-wide MAC address must match the endpoint-specific MAC address for the main network, or be left empty"))
}
}
warning = "The container-wide MacAddress field is now deprecated. It should be specified in EndpointsConfig instead."
config.MacAddress = "" //nolint:staticcheck // ignore SA1019: field is deprecated, but still used on API < v1.44.
return warning, nil
}
// handleSysctlBC migrates top level network endpoint-specific '--sysctl'
// settings to an DriverOpts for an endpoint. This is necessary because sysctls
// are applied during container task creation, but sysctls that name an interface
// (for example 'net.ipv6.conf.eth0.forwarding') cannot be applied until the
// interface has been created. So, these settings are removed from hostConfig.Sysctls
// and added to DriverOpts[netlabel.EndpointSysctls].
//
// Because interface names ('ethN') are allocated sequentially, and the order of
// network connections is not deterministic on container restart, only 'eth0'
// would work reliably in a top-level '--sysctl' option, and then only when
// there's a single initial network connection. So, settings for 'eth0' are
// migrated to the primary interface, identified by 'hostConfig.NetworkMode'.
// Settings for other interfaces are treated as errors.
//
// In the DriverOpts, because the interface name cannot be determined in advance, the
// interface name is replaced by "IFNAME". For example, 'net.ipv6.conf.eth0.forwarding'
// becomes 'net.ipv6.conf.IFNAME.forwarding'. The value in DriverOpts is a
// comma-separated list.
//
// A warning is generated when settings are migrated.
func handleSysctlBC(
hostConfig *container.HostConfig,
netConfig *network.NetworkingConfig,
version string,
) (string, error) {
if !hostConfig.NetworkMode.IsPrivate() {
return "", nil
}
var ep *network.EndpointSettings
var toDelete []string
var netIfSysctls []string
for k, v := range hostConfig.Sysctls {
// If the sysctl name matches "net.*.*.eth0.*" ...
if spl := strings.SplitN(k, ".", 5); len(spl) == 5 && spl[0] == "net" && strings.HasPrefix(spl[3], "eth") {
netIfSysctl := fmt.Sprintf("net.%s.%s.IFNAME.%s=%s", spl[1], spl[2], spl[4], v)
// Find the EndpointConfig to migrate settings to, if not already found.
if ep == nil {
/* TODO(robmry) - apply this to the API version used in 28.0.0
// Per-endpoint sysctls were introduced in API version 1.46. Migration is
// needed, but refuse to do it automatically for newer versions of the API.
if versions.GreaterThan(version, "1.??") {
return "", fmt.Errorf("interface specific sysctl setting %q must be supplied using driver option '%s'",
k, netlabel.EndpointSysctls)
}
*/
var err error
ep, err = epConfigForNetMode(version, hostConfig.NetworkMode, netConfig)
if err != nil {
return "", fmt.Errorf("unable to find a network for sysctl %s: %w", k, err)
}
}
// Only try to migrate settings for "eth0", anything else would always
// have behaved unpredictably.
if spl[3] != "eth0" {
return "", fmt.Errorf(`unable to determine network endpoint for sysctl %s, use driver option '%s' to set per-interface sysctls`,
k, netlabel.EndpointSysctls)
}
// Prepare the migration.
toDelete = append(toDelete, k)
netIfSysctls = append(netIfSysctls, netIfSysctl)
}
}
if ep == nil {
return "", nil
}
newDriverOpt := strings.Join(netIfSysctls, ",")
warning := fmt.Sprintf(`Migrated sysctl %q to DriverOpts{%q:%q}.`,
strings.Join(toDelete, ","),
netlabel.EndpointSysctls, newDriverOpt)
// Append existing per-endpoint sysctls to the migrated sysctls (give priority
// to per-endpoint settings).
if ep.DriverOpts == nil {
ep.DriverOpts = map[string]string{}
}
if oldDriverOpt, ok := ep.DriverOpts[netlabel.EndpointSysctls]; ok {
newDriverOpt += "," + oldDriverOpt
}
ep.DriverOpts[netlabel.EndpointSysctls] = newDriverOpt
// Delete migrated settings from the top-level sysctls.
for _, k := range toDelete {
delete(hostConfig.Sysctls, k)
}
return warning, nil
}
// epConfigForNetMode finds, or creates, an entry in netConfig.EndpointsConfig
// corresponding to nwMode.
//
// nwMode.NetworkName() may be the network's name, its id, or its short-id.
//
// The corresponding endpoint in netConfig.EndpointsConfig may be keyed on a
// different one of name/id/short-id. If there's any ambiguity (there are
// endpoints but the names don't match), return an error and do not create a new
// endpoint, because it might be a duplicate.
func epConfigForNetMode(
version string,
nwMode container.NetworkMode,
netConfig *network.NetworkingConfig,
) (*network.EndpointSettings, error) {
nwName := nwMode.NetworkName()
// It's always safe to create an EndpointsConfig entry under nwName if there are
// no entries already (because there can't be an entry for this network nwName
// refers to under any other name/short-id/id).
if len(netConfig.EndpointsConfig) == 0 {
es := &network.EndpointSettings{}
netConfig.EndpointsConfig = map[string]*network.EndpointSettings{
nwName: es,
}
return es, nil
}
// There cannot be more than one entry in EndpointsConfig with API < 1.44.
if versions.LessThan(version, "1.44") {
// No need to check for a match between NetworkMode and the names/ids in EndpointsConfig,
// the old version of the API would pick this network anyway.
for _, ep := range netConfig.EndpointsConfig {
return ep, nil
}
}
// There is existing endpoint config - if it's not indexed by NetworkMode.Name(), we
// can't tell which network the container-wide settings are intended for. NetworkMode,
// the keys in EndpointsConfig and the NetworkID in EndpointsConfig may mix network
// name/id/short-id. It's not safe to create EndpointsConfig under the NetworkMode
// name to store the container-wide setting, because that may result in two sets
// of EndpointsConfig for the same network and one set will be discarded later. So,
// reject the request ...
ep, ok := netConfig.EndpointsConfig[nwName]
if !ok {
return nil, errdefs.InvalidParameter(
errors.New("HostConfig.NetworkMode must match the identity of a network in NetworkSettings.Networks"))
}
return ep, nil
}
func (s *containerRouter) deleteContainers(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
name := vars["name"]
config := &backend.ContainerRmConfig{
config := &types.ContainerRmConfig{
ForceRemove: httputils.BoolValue(r, "force"),
RemoveVolume: httputils.BoolValue(r, "v"),
RemoveLink: httputils.BoolValue(r, "link"),
@@ -926,8 +532,7 @@ func (s *containerRouter) postContainersAttach(ctx context.Context, w http.Respo
return errdefs.InvalidParameter(errors.Errorf("error attaching to container %s, hijack connection missing", containerName))
}
contentType := types.MediaTypeRawStream
setupStreams := func(multiplexed bool, cancel func()) (io.ReadCloser, io.Writer, io.Writer, error) {
setupStreams := func() (io.ReadCloser, io.Writer, io.Writer, error) {
conn, _, err := hijacker.Hijack()
if err != nil {
return nil, nil, nil, err
@@ -937,18 +542,11 @@ func (s *containerRouter) postContainersAttach(ctx context.Context, w http.Respo
conn.Write([]byte{})
if upgrade {
if multiplexed && versions.GreaterThanOrEqualTo(httputils.VersionFromContext(ctx), "1.42") {
contentType = types.MediaTypeMultiplexedStream
}
// FIXME(thaJeztah): we should not ignore errors here; see https://github.com/moby/moby/pull/48359#discussion_r1725562802
fmt.Fprintf(conn, "HTTP/1.1 101 UPGRADED\r\nContent-Type: %v\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n\r\n", contentType)
fmt.Fprintf(conn, "HTTP/1.1 101 UPGRADED\r\nContent-Type: application/vnd.docker.raw-stream\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n\r\n")
} else {
// FIXME(thaJeztah): we should not ignore errors here; see https://github.com/moby/moby/pull/48359#discussion_r1725562802
fmt.Fprint(conn, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n")
fmt.Fprintf(conn, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n")
}
go notifyClosed(ctx, conn, cancel)
closer := func() error {
httputils.CloseStreams(conn)
return nil
@@ -968,16 +566,16 @@ func (s *containerRouter) postContainersAttach(ctx context.Context, w http.Respo
}
if err = s.backend.ContainerAttach(containerName, attachConfig); err != nil {
log.G(ctx).WithError(err).Errorf("Handler for %s %s returned error", r.Method, r.URL.Path)
logrus.Errorf("Handler for %s %s returned error: %v", r.Method, r.URL.Path, err)
// Remember to close stream if error happens
conn, _, errHijack := hijacker.Hijack()
if errHijack != nil {
log.G(ctx).WithError(err).Errorf("Handler for %s %s: unable to close stream; error when hijacking connection", r.Method, r.URL.Path)
} else {
statusCode := httpstatus.FromError(err)
if errHijack == nil {
statusCode := httputils.GetHTTPErrorStatusCode(err)
statusText := http.StatusText(statusCode)
fmt.Fprintf(conn, "HTTP/1.1 %d %s\r\nContent-Type: %s\r\n\r\n%s\r\n", statusCode, statusText, contentType, err.Error())
fmt.Fprintf(conn, "HTTP/1.1 %d %s\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n%s\r\n", statusCode, statusText, err.Error())
httputils.CloseStreams(conn)
} else {
logrus.Errorf("Error Hijacking: %v", err)
}
}
return nil
@@ -997,7 +595,7 @@ func (s *containerRouter) wsContainersAttach(ctx context.Context, w http.Respons
version := httputils.VersionFromContext(ctx)
setupStreams := func(multiplexed bool, cancel func()) (io.ReadCloser, io.Writer, io.Writer, error) {
setupStreams := func() (io.ReadCloser, io.Writer, io.Writer, error) {
wsChan := make(chan *websocket.Conn)
h := func(conn *websocket.Conn) {
wsChan <- conn
@@ -1016,27 +614,18 @@ func (s *containerRouter) wsContainersAttach(ctx context.Context, w http.Respons
if versions.GreaterThanOrEqualTo(version, "1.28") {
conn.PayloadType = websocket.BinaryFrame
}
// TODO: Close notifications
return conn, conn, conn, nil
}
useStdin, useStdout, useStderr := true, true, true
if versions.GreaterThanOrEqualTo(version, "1.42") {
useStdin = httputils.BoolValue(r, "stdin")
useStdout = httputils.BoolValue(r, "stdout")
useStderr = httputils.BoolValue(r, "stderr")
}
attachConfig := &backend.ContainerAttachConfig{
GetStreams: setupStreams,
UseStdin: useStdin,
UseStdout: useStdout,
UseStderr: useStderr,
Logs: httputils.BoolValue(r, "logs"),
Stream: httputils.BoolValue(r, "stream"),
DetachKeys: detachKeys,
MuxStreams: false, // never multiplex, as we rely on websocket to manage distinct streams
UseStdin: true,
UseStdout: true,
UseStderr: true,
MuxStreams: false, // TODO: this should be true since it's a single stream for both stdout and stderr
}
err = s.backend.ContainerAttach(containerName, attachConfig)
@@ -1044,9 +633,9 @@ func (s *containerRouter) wsContainersAttach(ctx context.Context, w http.Respons
select {
case <-started:
if err != nil {
log.G(ctx).Errorf("Error attaching websocket: %s", err)
logrus.Errorf("Error attaching websocket: %s", err)
} else {
log.G(ctx).Debug("websocket connection was closed by client")
logrus.Debug("websocket connection was closed by client")
}
return nil
default:
@@ -1061,7 +650,7 @@ func (s *containerRouter) postContainersPrune(ctx context.Context, w http.Respon
pruneFilters, err := filters.FromJSON(r.Form.Get("filters"))
if err != nil {
return err
return errdefs.InvalidParameter(err)
}
pruneReport, err := s.backend.ContainersPrune(ctx, pruneFilters)

View File

@@ -1,352 +0,0 @@
package container
import (
"strings"
"testing"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/network"
"github.com/docker/docker/libnetwork/netlabel"
"gotest.tools/v3/assert"
is "gotest.tools/v3/assert/cmp"
)
func TestHandleMACAddressBC(t *testing.T) {
testcases := []struct {
name string
apiVersion string
ctrWideMAC string
networkMode container.NetworkMode
epConfig map[string]*network.EndpointSettings
expEpWithCtrWideMAC string
expEpWithNoMAC string
expCtrWideMAC string
expWarning string
expError string
}{
{
name: "old api ctr-wide mac mix id and name",
apiVersion: "1.43",
ctrWideMAC: "11:22:33:44:55:66",
networkMode: "aNetId",
epConfig: map[string]*network.EndpointSettings{"aNetName": {}},
expEpWithCtrWideMAC: "aNetName",
expCtrWideMAC: "11:22:33:44:55:66",
},
{
name: "old api clear ep mac",
apiVersion: "1.43",
networkMode: "aNetId",
epConfig: map[string]*network.EndpointSettings{"aNetName": {MacAddress: "11:22:33:44:55:66"}},
expEpWithNoMAC: "aNetName",
},
{
name: "old api no-network ctr-wide mac",
apiVersion: "1.43",
networkMode: "none",
ctrWideMAC: "11:22:33:44:55:66",
expError: "conflicting options: mac-address and the network mode",
expCtrWideMAC: "11:22:33:44:55:66",
},
{
name: "old api create ep",
apiVersion: "1.43",
networkMode: "aNetId",
ctrWideMAC: "11:22:33:44:55:66",
epConfig: map[string]*network.EndpointSettings{},
expEpWithCtrWideMAC: "aNetId",
expCtrWideMAC: "11:22:33:44:55:66",
},
{
name: "old api migrate ctr-wide mac",
apiVersion: "1.43",
ctrWideMAC: "11:22:33:44:55:66",
networkMode: "aNetName",
epConfig: map[string]*network.EndpointSettings{"aNetName": {}},
expEpWithCtrWideMAC: "aNetName",
expCtrWideMAC: "11:22:33:44:55:66",
},
{
name: "new api no macs",
apiVersion: "1.44",
networkMode: "aNetId",
epConfig: map[string]*network.EndpointSettings{"aNetName": {}},
},
{
name: "new api ep specific mac",
apiVersion: "1.44",
networkMode: "aNetName",
epConfig: map[string]*network.EndpointSettings{"aNetName": {MacAddress: "11:22:33:44:55:66"}},
},
{
name: "new api migrate ctr-wide mac to new ep",
apiVersion: "1.44",
ctrWideMAC: "11:22:33:44:55:66",
networkMode: "aNetName",
epConfig: map[string]*network.EndpointSettings{},
expEpWithCtrWideMAC: "aNetName",
expWarning: "The container-wide MacAddress field is now deprecated",
expCtrWideMAC: "",
},
{
name: "new api migrate ctr-wide mac to existing ep",
apiVersion: "1.44",
ctrWideMAC: "11:22:33:44:55:66",
networkMode: "aNetName",
epConfig: map[string]*network.EndpointSettings{"aNetName": {}},
expEpWithCtrWideMAC: "aNetName",
expWarning: "The container-wide MacAddress field is now deprecated",
expCtrWideMAC: "",
},
{
name: "new api mode vs name mismatch",
apiVersion: "1.44",
ctrWideMAC: "11:22:33:44:55:66",
networkMode: "aNetId",
epConfig: map[string]*network.EndpointSettings{"aNetName": {}},
expError: "unable to migrate container-wide MAC address to a specific network: HostConfig.NetworkMode must match the identity of a network in NetworkSettings.Networks",
expCtrWideMAC: "11:22:33:44:55:66",
},
{
name: "new api mac mismatch",
apiVersion: "1.44",
ctrWideMAC: "11:22:33:44:55:66",
networkMode: "aNetName",
epConfig: map[string]*network.EndpointSettings{"aNetName": {MacAddress: "00:11:22:33:44:55"}},
expError: "the container-wide MAC address must match the endpoint-specific MAC address",
expCtrWideMAC: "11:22:33:44:55:66",
},
}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
cfg := &container.Config{
MacAddress: tc.ctrWideMAC, //nolint:staticcheck // ignore SA1019: field is deprecated, but still used on API < v1.44.
}
hostCfg := &container.HostConfig{
NetworkMode: tc.networkMode,
}
epConfig := make(map[string]*network.EndpointSettings, len(tc.epConfig))
for k, v := range tc.epConfig {
v := *v
epConfig[k] = &v
}
netCfg := &network.NetworkingConfig{
EndpointsConfig: epConfig,
}
warning, err := handleMACAddressBC(cfg, hostCfg, netCfg, tc.apiVersion)
if tc.expError == "" {
assert.Check(t, err)
} else {
assert.Check(t, is.ErrorContains(err, tc.expError))
}
if tc.expWarning == "" {
assert.Check(t, is.Equal(warning, ""))
} else {
assert.Check(t, is.Contains(warning, tc.expWarning))
}
if tc.expEpWithCtrWideMAC != "" {
got := netCfg.EndpointsConfig[tc.expEpWithCtrWideMAC].MacAddress
assert.Check(t, is.Equal(got, tc.ctrWideMAC))
}
if tc.expEpWithNoMAC != "" {
got := netCfg.EndpointsConfig[tc.expEpWithNoMAC].MacAddress
assert.Check(t, is.Equal(got, ""))
}
gotCtrWideMAC := cfg.MacAddress //nolint:staticcheck // ignore SA1019: field is deprecated, but still used on API < v1.44.
assert.Check(t, is.Equal(gotCtrWideMAC, tc.expCtrWideMAC))
})
}
}
func TestEpConfigForNetMode(t *testing.T) {
testcases := []struct {
name string
apiVersion string
networkMode string
epConfig map[string]*network.EndpointSettings
expEpId string
expNumEps int
expError bool
}{
{
name: "old api no eps",
apiVersion: "1.43",
networkMode: "mynet",
expNumEps: 1,
},
{
name: "new api no eps",
apiVersion: "1.44",
networkMode: "mynet",
expNumEps: 1,
},
{
name: "old api with ep",
apiVersion: "1.43",
networkMode: "mynet",
epConfig: map[string]*network.EndpointSettings{
"anything": {EndpointID: "epone"},
},
expEpId: "epone",
expNumEps: 1,
},
{
name: "new api with matching ep",
apiVersion: "1.44",
networkMode: "mynet",
epConfig: map[string]*network.EndpointSettings{
"mynet": {EndpointID: "epone"},
},
expEpId: "epone",
expNumEps: 1,
},
{
name: "new api with mismatched ep",
apiVersion: "1.44",
networkMode: "mynet",
epConfig: map[string]*network.EndpointSettings{
"shortid": {EndpointID: "epone"},
},
expError: true,
},
}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
netConfig := &network.NetworkingConfig{
EndpointsConfig: tc.epConfig,
}
ep, err := epConfigForNetMode(tc.apiVersion, container.NetworkMode(tc.networkMode), netConfig)
if tc.expError {
assert.Check(t, is.ErrorContains(err, "HostConfig.NetworkMode must match the identity of a network in NetworkSettings.Networks"))
} else {
assert.Assert(t, err)
assert.Check(t, is.Equal(ep.EndpointID, tc.expEpId))
assert.Check(t, is.Len(netConfig.EndpointsConfig, tc.expNumEps))
}
})
}
}
func TestHandleSysctlBC(t *testing.T) {
testcases := []struct {
name string
apiVersion string
networkMode string
sysctls map[string]string
epConfig map[string]*network.EndpointSettings
expEpSysctls []string
expSysctls map[string]string
expWarningContains []string
expError string
}{
{
name: "migrate to new ep",
apiVersion: "1.46",
networkMode: "mynet",
sysctls: map[string]string{
"net.ipv6.conf.all.disable_ipv6": "0",
"net.ipv6.conf.eth0.accept_ra": "2",
"net.ipv6.conf.eth0.forwarding": "1",
},
expSysctls: map[string]string{
"net.ipv6.conf.all.disable_ipv6": "0",
},
expEpSysctls: []string{"net.ipv6.conf.IFNAME.forwarding=1", "net.ipv6.conf.IFNAME.accept_ra=2"},
expWarningContains: []string{
"Migrated",
"net.ipv6.conf.eth0.accept_ra", "net.ipv6.conf.IFNAME.accept_ra=2",
"net.ipv6.conf.eth0.forwarding", "net.ipv6.conf.IFNAME.forwarding=1",
},
},
{
name: "migrate nothing",
apiVersion: "1.46",
networkMode: "mynet",
sysctls: map[string]string{
"net.ipv6.conf.all.disable_ipv6": "0",
},
expSysctls: map[string]string{
"net.ipv6.conf.all.disable_ipv6": "0",
},
},
/* TODO(robmry) - enable this test for the API version used in 28.0.0
{
name: "migration disabled for newer api",
apiVersion: "1.??",
networkMode: "mynet",
sysctls: map[string]string{
"net.ipv6.conf.eth0.accept_ra": "2",
},
expError: "must be supplied using driver option 'com.docker.network.endpoint.sysctls'",
},
*/
{
name: "only migrate eth0",
apiVersion: "1.46",
networkMode: "mynet",
sysctls: map[string]string{
"net.ipv6.conf.eth1.accept_ra": "2",
},
expError: "unable to determine network endpoint",
},
{
name: "net name mismatch",
apiVersion: "1.46",
networkMode: "mynet",
epConfig: map[string]*network.EndpointSettings{
"shortid": {EndpointID: "epone"},
},
sysctls: map[string]string{
"net.ipv6.conf.eth1.accept_ra": "2",
},
expError: "unable to find a network for sysctl",
},
}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
hostCfg := &container.HostConfig{
NetworkMode: container.NetworkMode(tc.networkMode),
Sysctls: map[string]string{},
}
for k, v := range tc.sysctls {
hostCfg.Sysctls[k] = v
}
netCfg := &network.NetworkingConfig{
EndpointsConfig: tc.epConfig,
}
warnings, err := handleSysctlBC(hostCfg, netCfg, tc.apiVersion)
for _, s := range tc.expWarningContains {
assert.Check(t, is.Contains(warnings, s))
}
if tc.expError != "" {
assert.Check(t, is.ErrorContains(err, tc.expError))
} else {
assert.Check(t, err)
assert.Check(t, is.DeepEqual(hostCfg.Sysctls, tc.expSysctls))
ep := netCfg.EndpointsConfig[tc.networkMode]
if ep == nil {
assert.Check(t, is.Nil(tc.expEpSysctls))
} else {
got, ok := ep.DriverOpts[netlabel.EndpointSysctls]
assert.Check(t, ok)
// Check for expected ep-sysctls.
for _, want := range tc.expEpSysctls {
assert.Check(t, is.Contains(got, want))
}
// Check for unexpected ep-sysctls.
assert.Check(t, is.Len(got, len(strings.Join(tc.expEpSysctls, ","))))
}
}
})
}
}

View File

@@ -10,12 +10,53 @@ import (
"net/http"
"github.com/docker/docker/api/server/httputils"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/versions"
gddohttputil "github.com/golang/gddo/httputil"
)
// setContainerPathStatHeader encodes the stat to JSON, base64 encode, and place in a header.
func setContainerPathStatHeader(stat *container.PathStat, header http.Header) error {
type pathError struct{}
func (pathError) Error() string {
return "Path cannot be empty"
}
func (pathError) InvalidParameter() {}
// postContainersCopy is deprecated in favor of getContainersArchive.
func (s *containerRouter) postContainersCopy(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
// Deprecated since 1.8, Errors out since 1.12
version := httputils.VersionFromContext(ctx)
if versions.GreaterThanOrEqualTo(version, "1.24") {
w.WriteHeader(http.StatusNotFound)
return nil
}
if err := httputils.CheckForJSON(r); err != nil {
return err
}
cfg := types.CopyConfig{}
if err := json.NewDecoder(r.Body).Decode(&cfg); err != nil {
return err
}
if cfg.Resource == "" {
return pathError{}
}
data, err := s.backend.ContainerCopy(vars["name"], cfg.Resource)
if err != nil {
return err
}
defer data.Close()
w.Header().Set("Content-Type", "application/x-tar")
_, err = io.Copy(w, data)
return err
}
// // Encode the stat to JSON, base64 encode, and place in a header.
func setContainerPathStatHeader(stat *types.ContainerPathStat, header http.Header) error {
statJSON, err := json.Marshal(stat)
if err != nil {
return err

View File

@@ -2,19 +2,18 @@ package container // import "github.com/docker/docker/api/server/router/containe
import (
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"strconv"
"github.com/containerd/log"
"github.com/docker/docker/api/server/httputils"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/backend"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/versions"
"github.com/docker/docker/errdefs"
"github.com/docker/docker/pkg/stdcopy"
"github.com/sirupsen/logrus"
)
func (s *containerRouter) getExecByID(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
@@ -38,9 +37,13 @@ func (s *containerRouter) postContainerExecCreate(ctx context.Context, w http.Re
if err := httputils.ParseForm(r); err != nil {
return err
}
if err := httputils.CheckForJSON(r); err != nil {
return err
}
name := vars["name"]
execConfig := &container.ExecOptions{}
if err := httputils.ReadJSON(r, execConfig); err != nil {
execConfig := &types.ExecConfig{}
if err := json.NewDecoder(r.Body).Decode(execConfig); err != nil {
return err
}
@@ -48,16 +51,10 @@ func (s *containerRouter) postContainerExecCreate(ctx context.Context, w http.Re
return execCommandError{}
}
version := httputils.VersionFromContext(ctx)
if versions.LessThan(version, "1.42") {
// Not supported by API versions before 1.42
execConfig.ConsoleSize = nil
}
// Register an instance of Exec in container.
id, err := s.backend.ContainerExecCreate(vars["name"], execConfig)
id, err := s.backend.ContainerExecCreate(name, execConfig)
if err != nil {
log.G(ctx).Errorf("Error setting up exec command in container %s: %v", vars["name"], err)
logrus.Errorf("Error setting up exec command in container %s: %v", name, err)
return err
}
@@ -72,14 +69,21 @@ func (s *containerRouter) postContainerExecStart(ctx context.Context, w http.Res
return err
}
version := httputils.VersionFromContext(ctx)
if versions.GreaterThan(version, "1.21") {
if err := httputils.CheckForJSON(r); err != nil {
return err
}
}
var (
execName = vars["name"]
stdin, inStream io.ReadCloser
stdout, stderr, outStream io.Writer
)
options := &container.ExecStartOptions{}
if err := httputils.ReadJSON(r, options); err != nil {
execStartCheck := &types.ExecStartCheck{}
if err := json.NewDecoder(r.Body).Decode(execStartCheck); err != nil {
return err
}
@@ -87,21 +91,7 @@ func (s *containerRouter) postContainerExecStart(ctx context.Context, w http.Res
return err
}
if options.ConsoleSize != nil {
version := httputils.VersionFromContext(ctx)
// Not supported before 1.42
if versions.LessThan(version, "1.42") {
options.ConsoleSize = nil
}
// No console without tty
if !options.Tty {
options.ConsoleSize = nil
}
}
if !options.Detach {
if !execStartCheck.Detach {
var err error
// Setting up the streaming http interface.
inStream, outStream, err = httputils.HijackConnection(w)
@@ -111,45 +101,33 @@ func (s *containerRouter) postContainerExecStart(ctx context.Context, w http.Res
defer httputils.CloseStreams(inStream, outStream)
if _, ok := r.Header["Upgrade"]; ok {
contentType := types.MediaTypeRawStream
if !options.Tty && versions.GreaterThanOrEqualTo(httputils.VersionFromContext(ctx), "1.42") {
contentType = types.MediaTypeMultiplexedStream
}
_, _ = fmt.Fprint(outStream, "HTTP/1.1 101 UPGRADED\r\nContent-Type: "+contentType+"\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n")
fmt.Fprint(outStream, "HTTP/1.1 101 UPGRADED\r\nContent-Type: application/vnd.docker.raw-stream\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n")
} else {
_, _ = fmt.Fprint(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n")
fmt.Fprint(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n")
}
// copy headers that were removed as part of hijack
if err := w.Header().WriteSubset(outStream, nil); err != nil {
return err
}
_, _ = fmt.Fprint(outStream, "\r\n")
fmt.Fprint(outStream, "\r\n")
stdin = inStream
if options.Tty {
stdout = outStream
} else {
stdout = outStream
if !execStartCheck.Tty {
stderr = stdcopy.NewStdWriter(outStream, stdcopy.Stderr)
stdout = stdcopy.NewStdWriter(outStream, stdcopy.Stdout)
}
}
// Now run the user process in container.
//
// TODO: Maybe we should we pass ctx here if we're not detaching?
err := s.backend.ContainerExecStart(context.Background(), execName, backend.ExecStartConfig{
Stdin: stdin,
Stdout: stdout,
Stderr: stderr,
ConsoleSize: options.ConsoleSize,
})
if err != nil {
if options.Detach {
// Maybe we should we pass ctx here if we're not detaching?
if err := s.backend.ContainerExecStart(context.Background(), execName, stdin, stdout, stderr); err != nil {
if execStartCheck.Detach {
return err
}
_, _ = fmt.Fprintf(stdout, "%v\r\n", err)
log.G(ctx).Errorf("Error running exec %s in container: %v", execName, err)
stdout.Write([]byte(err.Error() + "\r\n"))
logrus.Errorf("Error running exec %s in container: %v", execName, err)
}
return nil
}

View File

@@ -12,7 +12,7 @@ func (s *containerRouter) getContainersByName(ctx context.Context, w http.Respon
displaySize := httputils.BoolValue(r, "size")
version := httputils.VersionFromContext(ctx)
json, err := s.backend.ContainerInspect(ctx, vars["name"], displaySize, version)
json, err := s.backend.ContainerInspect(vars["name"], displaySize, version)
if err != nil {
return err
}

View File

@@ -1,54 +0,0 @@
package container
import (
"context"
"net"
"syscall"
"github.com/containerd/log"
"github.com/docker/docker/internal/unix_noeintr"
"golang.org/x/sys/unix"
)
func notifyClosed(ctx context.Context, conn net.Conn, notify func()) {
sc, ok := conn.(syscall.Conn)
if !ok {
log.G(ctx).Debug("notifyClosed: conn does not support close notifications")
return
}
rc, err := sc.SyscallConn()
if err != nil {
log.G(ctx).WithError(err).Warn("notifyClosed: failed get raw conn for close notifications")
return
}
epFd, err := unix_noeintr.EpollCreate()
if err != nil {
log.G(ctx).WithError(err).Warn("notifyClosed: failed to create epoll fd")
return
}
defer unix.Close(epFd)
err = rc.Control(func(fd uintptr) {
err := unix_noeintr.EpollCtl(epFd, unix.EPOLL_CTL_ADD, int(fd), &unix.EpollEvent{
Events: unix.EPOLLHUP,
Fd: int32(fd),
})
if err != nil {
log.G(ctx).WithError(err).Warn("notifyClosed: failed to register fd for close notifications")
return
}
events := make([]unix.EpollEvent, 1)
if _, err := unix_noeintr.EpollWait(epFd, events, -1); err != nil {
log.G(ctx).WithError(err).Warn("notifyClosed: failed to wait for close notifications")
return
}
notify()
})
if err != nil {
log.G(ctx).WithError(err).Warn("notifyClosed: failed to register for close notifications")
return
}
}

View File

@@ -1,10 +0,0 @@
//go:build !linux
package container
import (
"context"
"net"
)
func notifyClosed(ctx context.Context, conn net.Conn, notify func()) {}

View File

@@ -3,13 +3,13 @@ package distribution // import "github.com/docker/docker/api/server/router/distr
import (
"context"
"github.com/distribution/reference"
"github.com/docker/distribution"
"github.com/docker/docker/api/types/registry"
"github.com/docker/distribution/reference"
"github.com/docker/docker/api/types"
)
// Backend is all the methods that need to be implemented
// to provide image specific functionality.
type Backend interface {
GetRepositories(context.Context, reference.Named, *registry.AuthConfig) ([]distribution.Repository, error)
GetRepository(context.Context, reference.Named, *types.AuthConfig) (distribution.Repository, bool, error)
}

View File

@@ -2,20 +2,19 @@ package distribution // import "github.com/docker/docker/api/server/router/distr
import (
"context"
"encoding/base64"
"encoding/json"
"net/http"
"os"
"strings"
"github.com/distribution/reference"
"github.com/docker/distribution"
"github.com/docker/distribution/manifest/manifestlist"
"github.com/docker/distribution/manifest/schema1"
"github.com/docker/distribution/manifest/schema2"
"github.com/docker/distribution/reference"
"github.com/docker/docker/api/server/httputils"
"github.com/docker/docker/api/types/registry"
distributionpkg "github.com/docker/docker/distribution"
"github.com/docker/docker/errdefs"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/docker/docker/api/types"
registrytypes "github.com/docker/docker/api/types/registry"
"github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
@@ -26,12 +25,26 @@ func (s *distributionRouter) getDistributionInfo(ctx context.Context, w http.Res
w.Header().Set("Content-Type", "application/json")
imgName := vars["name"]
var (
config = &types.AuthConfig{}
authEncoded = r.Header.Get("X-Registry-Auth")
distributionInspect registrytypes.DistributionInspect
)
// TODO why is reference.ParseAnyReference() / reference.ParseNormalizedNamed() not using the reference.ErrTagInvalidFormat (and so on) errors?
ref, err := reference.ParseAnyReference(imgName)
if authEncoded != "" {
authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded))
if err := json.NewDecoder(authJSON).Decode(&config); err != nil {
// for a search it is not an error if no auth was given
// to increase compatibility with the existing api it is defaulting to be empty
config = &types.AuthConfig{}
}
}
image := vars["name"]
ref, err := reference.ParseAnyReference(image)
if err != nil {
return errdefs.InvalidParameter(err)
return err
}
namedRef, ok := ref.(reference.Named)
if !ok {
@@ -39,58 +52,28 @@ func (s *distributionRouter) getDistributionInfo(ctx context.Context, w http.Res
// full image ID
return errors.Errorf("no manifest found for full image ID")
}
return errdefs.InvalidParameter(errors.Errorf("unknown image reference format: %s", imgName))
return errors.Errorf("unknown image reference format: %s", image)
}
// For a search it is not an error if no auth was given. Ignore invalid
// AuthConfig to increase compatibility with the existing API.
authConfig, _ := registry.DecodeAuthConfig(r.Header.Get(registry.AuthHeader))
repos, err := s.backend.GetRepositories(ctx, namedRef, authConfig)
distrepo, _, err := s.backend.GetRepository(ctx, namedRef, config)
if err != nil {
return err
}
blobsrvc := distrepo.Blobs(ctx)
// Fetch the manifest; if a mirror is configured, try the mirror first,
// but continue with upstream on failure.
//
// FIXME(thaJeztah): construct "repositories" on-demand;
// GetRepositories() will attempt to connect to all endpoints (registries),
// but we may only need the first one if it contains the manifest we're
// looking for, or if the configured mirror is a pull-through mirror.
//
// Logic for this could be implemented similar to "distribution.Pull()",
// which uses the "pullEndpoints" utility to iterate over the list
// of endpoints;
//
// - https://github.com/moby/moby/blob/12c7411b6b7314bef130cd59f1c7384a7db06d0b/distribution/pull.go#L17-L31
// - https://github.com/moby/moby/blob/12c7411b6b7314bef130cd59f1c7384a7db06d0b/distribution/pull.go#L76-L152
var lastErr error
for _, repo := range repos {
distributionInspect, err := s.fetchManifest(ctx, repo, namedRef)
if err != nil {
lastErr = err
continue
}
return httputils.WriteJSON(w, http.StatusOK, distributionInspect)
}
return lastErr
}
func (s *distributionRouter) fetchManifest(ctx context.Context, distrepo distribution.Repository, namedRef reference.Named) (registry.DistributionInspect, error) {
var distributionInspect registry.DistributionInspect
if canonicalRef, ok := namedRef.(reference.Canonical); !ok {
namedRef = reference.TagNameOnly(namedRef)
taggedRef, ok := namedRef.(reference.NamedTagged)
if !ok {
return registry.DistributionInspect{}, errdefs.InvalidParameter(errors.Errorf("image reference not tagged: %s", namedRef))
return errors.Errorf("image reference not tagged: %s", image)
}
descriptor, err := distrepo.Tags(ctx).Get(ctx, taggedRef.Tag())
if err != nil {
return registry.DistributionInspect{}, err
return err
}
distributionInspect.Descriptor = ocispec.Descriptor{
distributionInspect.Descriptor = v1.Descriptor{
MediaType: descriptor.MediaType,
Digest: descriptor.Digest,
Size: descriptor.Size,
@@ -105,26 +88,16 @@ func (s *distributionRouter) fetchManifest(ctx context.Context, distrepo distrib
// we have a digest, so we can retrieve the manifest
mnfstsrvc, err := distrepo.Manifests(ctx)
if err != nil {
return registry.DistributionInspect{}, err
return err
}
mnfst, err := mnfstsrvc.Get(ctx, distributionInspect.Descriptor.Digest)
if err != nil {
switch err {
case reference.ErrReferenceInvalidFormat,
reference.ErrTagInvalidFormat,
reference.ErrDigestInvalidFormat,
reference.ErrNameContainsUppercase,
reference.ErrNameEmpty,
reference.ErrNameTooLong,
reference.ErrNameNotCanonical:
return registry.DistributionInspect{}, errdefs.InvalidParameter(err)
}
return registry.DistributionInspect{}, err
return err
}
mediaType, payload, err := mnfst.Payload()
if err != nil {
return registry.DistributionInspect{}, err
return err
}
// update MediaType because registry might return something incorrect
distributionInspect.Descriptor.MediaType = mediaType
@@ -136,7 +109,7 @@ func (s *distributionRouter) fetchManifest(ctx context.Context, distrepo distrib
switch mnfstObj := mnfst.(type) {
case *manifestlist.DeserializedManifestList:
for _, m := range mnfstObj.Manifests {
distributionInspect.Platforms = append(distributionInspect.Platforms, ocispec.Platform{
distributionInspect.Platforms = append(distributionInspect.Platforms, v1.Platform{
Architecture: m.Platform.Architecture,
OS: m.Platform.OS,
OSVersion: m.Platform.OSVersion,
@@ -145,9 +118,8 @@ func (s *distributionRouter) fetchManifest(ctx context.Context, distrepo distrib
})
}
case *schema2.DeserializedManifest:
blobStore := distrepo.Blobs(ctx)
configJSON, err := blobStore.Get(ctx, mnfstObj.Config.Digest)
var platform ocispec.Platform
configJSON, err := blobsrvc.Get(ctx, mnfstObj.Config.Digest)
var platform v1.Platform
if err == nil {
err := json.Unmarshal(configJSON, &platform)
if err == nil && (platform.OS != "" || platform.Architecture != "") {
@@ -155,14 +127,12 @@ func (s *distributionRouter) fetchManifest(ctx context.Context, distrepo distrib
}
}
case *schema1.SignedManifest:
if os.Getenv("DOCKER_ENABLE_DEPRECATED_PULL_SCHEMA_1_IMAGE") == "" {
return registry.DistributionInspect{}, distributionpkg.DeprecatedSchema1ImageError(namedRef)
}
platform := ocispec.Platform{
platform := v1.Platform{
Architecture: mnfstObj.Architecture,
OS: "linux",
}
distributionInspect.Platforms = append(distributionInspect.Platforms, platform)
}
return distributionInspect, nil
return httputils.WriteJSON(w, http.StatusOK, distributionInspect)
}

View File

@@ -44,7 +44,7 @@ func experimentalHandler(ctx context.Context, w http.ResponseWriter, r *http.Req
return notImplementedError{}
}
// Handler returns the APIFunc to let the server wrap it in middlewares.
// Handler returns returns the APIFunc to let the server wrap it in middlewares.
func (r *experimentalRoute) Handler() httputils.APIFunc {
return r.handler
}

View File

@@ -1,8 +0,0 @@
package grpc // import "github.com/docker/docker/api/server/router/grpc"
import "google.golang.org/grpc"
// Backend abstracts a registerable GRPC service.
type Backend interface {
RegisterGRPC(*grpc.Server)
}

View File

@@ -1,79 +0,0 @@
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
//go:build go1.22
package grpc // import "github.com/docker/docker/api/server/router/grpc"
import (
"context"
"fmt"
"os"
"strings"
"github.com/containerd/containerd/defaults"
"github.com/containerd/log"
"github.com/docker/docker/api/server/router"
"github.com/docker/docker/internal/otelutil"
"github.com/moby/buildkit/util/grpcerrors"
"github.com/moby/buildkit/util/stack"
"github.com/moby/buildkit/util/tracing"
"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
"golang.org/x/net/http2"
"google.golang.org/grpc"
)
type grpcRouter struct {
routes []router.Route
grpcServer *grpc.Server
h2Server *http2.Server
}
// NewRouter initializes a new grpc http router
func NewRouter(backends ...Backend) router.Router {
tp, _ := otelutil.NewTracerProvider(context.Background(), false)
opts := []grpc.ServerOption{
grpc.StatsHandler(tracing.ServerStatsHandler(otelgrpc.WithTracerProvider(tp))),
grpc.ChainUnaryInterceptor(unaryInterceptor, grpcerrors.UnaryServerInterceptor),
grpc.StreamInterceptor(grpcerrors.StreamServerInterceptor),
grpc.MaxRecvMsgSize(defaults.DefaultMaxRecvMsgSize),
grpc.MaxSendMsgSize(defaults.DefaultMaxSendMsgSize),
}
r := &grpcRouter{
h2Server: &http2.Server{},
grpcServer: grpc.NewServer(opts...),
}
for _, b := range backends {
b.RegisterGRPC(r.grpcServer)
}
r.initRoutes()
return r
}
// Routes returns the available routers to the session controller
func (gr *grpcRouter) Routes() []router.Route {
return gr.routes
}
func (gr *grpcRouter) initRoutes() {
gr.routes = []router.Route{
router.NewPostRoute("/grpc", gr.serveGRPC),
}
}
func unaryInterceptor(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) {
// This method is used by the clients to send their traces to buildkit so they can be included
// in the daemon trace and stored in the build history record. This method can not be traced because
// it would cause an infinite loop.
if strings.HasSuffix(info.FullMethod, "opentelemetry.proto.collector.trace.v1.TraceService/Export") {
return handler(ctx, req)
}
resp, err = handler(ctx, req)
if err != nil {
log.G(ctx).WithError(err).Error(info.FullMethod)
if log.GetLevel() >= log.DebugLevel {
fmt.Fprintf(os.Stderr, "%+v", stack.Formatter(grpcerrors.FromGRPC(err)))
}
}
return resp, err
}

View File

@@ -1,45 +0,0 @@
package grpc // import "github.com/docker/docker/api/server/router/grpc"
import (
"context"
"net/http"
"github.com/pkg/errors"
"golang.org/x/net/http2"
)
func (gr *grpcRouter) serveGRPC(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
h, ok := w.(http.Hijacker)
if !ok {
return errors.New("handler does not support hijack")
}
proto := r.Header.Get("Upgrade")
if proto == "" {
return errors.New("no upgrade proto in request")
}
if proto != "h2c" {
return errors.Errorf("protocol %s not supported", proto)
}
conn, _, err := h.Hijack()
if err != nil {
return err
}
resp := &http.Response{
StatusCode: http.StatusSwitchingProtocols,
ProtoMajor: 1,
ProtoMinor: 1,
Header: http.Header{},
}
resp.Header.Set("Connection", "Upgrade")
resp.Header.Set("Upgrade", proto)
// set raw mode
conn.Write([]byte{})
resp.Write(conn)
// https://godoc.org/golang.org/x/net/http2#Server.ServeConn
// TODO: is it a problem that conn has already been written to?
gr.h2Server.ServeConn(conn, &http2.ServeConnOpts{Handler: gr.grpcServer})
return nil
}

View File

@@ -4,13 +4,11 @@ import (
"context"
"io"
"github.com/distribution/reference"
"github.com/docker/docker/api/types/backend"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/image"
"github.com/docker/docker/api/types/registry"
dockerimage "github.com/docker/docker/image"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
specs "github.com/opencontainers/image-spec/specs-go/v1"
)
// Backend is all the methods that need to be implemented
@@ -22,25 +20,22 @@ type Backend interface {
}
type imageBackend interface {
ImageDelete(ctx context.Context, imageRef string, force, prune bool) ([]image.DeleteResponse, error)
ImageHistory(ctx context.Context, imageName string) ([]*image.HistoryResponseItem, error)
Images(ctx context.Context, opts image.ListOptions) ([]*image.Summary, error)
GetImage(ctx context.Context, refOrID string, options backend.GetImageOpts) (*dockerimage.Image, error)
TagImage(ctx context.Context, id dockerimage.ID, newRef reference.Named) error
ImagesPrune(ctx context.Context, pruneFilters filters.Args) (*image.PruneReport, error)
ImageDelete(imageRef string, force, prune bool) ([]types.ImageDeleteResponseItem, error)
ImageHistory(imageName string) ([]*image.HistoryResponseItem, error)
Images(imageFilters filters.Args, all bool, withExtraAttrs bool) ([]*types.ImageSummary, error)
LookupImage(name string) (*types.ImageInspect, error)
TagImage(imageName, repository, tag string) (string, error)
ImagesPrune(ctx context.Context, pruneFilters filters.Args) (*types.ImagesPruneReport, error)
}
type importExportBackend interface {
LoadImage(ctx context.Context, inTar io.ReadCloser, outStream io.Writer, quiet bool) error
ImportImage(ctx context.Context, ref reference.Named, platform *ocispec.Platform, msg string, layerReader io.Reader, changes []string) (dockerimage.ID, error)
ExportImage(ctx context.Context, names []string, outStream io.Writer) error
LoadImage(inTar io.ReadCloser, outStream io.Writer, quiet bool) error
ImportImage(src string, repository, platform string, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, changes []string) error
ExportImage(names []string, outStream io.Writer) error
}
type registryBackend interface {
PullImage(ctx context.Context, ref reference.Named, platform *ocispec.Platform, metaHeaders map[string][]string, authConfig *registry.AuthConfig, outStream io.Writer) error
PushImage(ctx context.Context, ref reference.Named, platform *ocispec.Platform, metaHeaders map[string][]string, authConfig *registry.AuthConfig, outStream io.Writer) error
}
type Searcher interface {
Search(ctx context.Context, searchFilters filters.Args, term string, limit int, authConfig *registry.AuthConfig, headers map[string][]string) ([]registry.SearchResult, error)
PullImage(ctx context.Context, image, tag string, platform *specs.Platform, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error
PushImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error
SearchRegistryForImages(ctx context.Context, filtersArgs string, term string, limit int, authConfig *types.AuthConfig, metaHeaders map[string][]string) (*registry.SearchResults, error)
}

View File

@@ -2,56 +2,43 @@ package image // import "github.com/docker/docker/api/server/router/image"
import (
"github.com/docker/docker/api/server/router"
"github.com/docker/docker/image"
"github.com/docker/docker/layer"
"github.com/docker/docker/reference"
)
// imageRouter is a router to talk with the image controller
type imageRouter struct {
backend Backend
searcher Searcher
referenceBackend reference.Store
imageStore image.Store
layerStore layer.Store
routes []router.Route
backend Backend
routes []router.Route
}
// NewRouter initializes a new image router
func NewRouter(backend Backend, searcher Searcher, referenceBackend reference.Store, imageStore image.Store, layerStore layer.Store) router.Router {
ir := &imageRouter{
backend: backend,
searcher: searcher,
referenceBackend: referenceBackend,
imageStore: imageStore,
layerStore: layerStore,
}
ir.initRoutes()
return ir
func NewRouter(backend Backend) router.Router {
r := &imageRouter{backend: backend}
r.initRoutes()
return r
}
// Routes returns the available routes to the image controller
func (ir *imageRouter) Routes() []router.Route {
return ir.routes
func (r *imageRouter) Routes() []router.Route {
return r.routes
}
// initRoutes initializes the routes in the image router
func (ir *imageRouter) initRoutes() {
ir.routes = []router.Route{
func (r *imageRouter) initRoutes() {
r.routes = []router.Route{
// GET
router.NewGetRoute("/images/json", ir.getImagesJSON),
router.NewGetRoute("/images/search", ir.getImagesSearch),
router.NewGetRoute("/images/get", ir.getImagesGet),
router.NewGetRoute("/images/{name:.*}/get", ir.getImagesGet),
router.NewGetRoute("/images/{name:.*}/history", ir.getImagesHistory),
router.NewGetRoute("/images/{name:.*}/json", ir.getImagesByName),
router.NewGetRoute("/images/json", r.getImagesJSON),
router.NewGetRoute("/images/search", r.getImagesSearch),
router.NewGetRoute("/images/get", r.getImagesGet),
router.NewGetRoute("/images/{name:.*}/get", r.getImagesGet),
router.NewGetRoute("/images/{name:.*}/history", r.getImagesHistory),
router.NewGetRoute("/images/{name:.*}/json", r.getImagesByName),
// POST
router.NewPostRoute("/images/load", ir.postImagesLoad),
router.NewPostRoute("/images/create", ir.postImagesCreate),
router.NewPostRoute("/images/{name:.*}/push", ir.postImagesPush),
router.NewPostRoute("/images/{name:.*}/tag", ir.postImagesTag),
router.NewPostRoute("/images/prune", ir.postImagesPrune),
router.NewPostRoute("/images/load", r.postImagesLoad),
router.NewPostRoute("/images/create", r.postImagesCreate, router.WithCancel),
router.NewPostRoute("/images/{name:.*}/push", r.postImagesPush, router.WithCancel),
router.NewPostRoute("/images/{name:.*}/tag", r.postImagesTag),
router.NewPostRoute("/images/prune", r.postImagesPrune, router.WithCancel),
// DELETE
router.NewDeleteRoute("/images/{name:.*}", ir.deleteImages),
router.NewDeleteRoute("/images/{name:.*}", r.deleteImages),
}
}

View File

@@ -2,50 +2,41 @@ package image // import "github.com/docker/docker/api/server/router/image"
import (
"context"
"fmt"
"io"
"encoding/base64"
"encoding/json"
"net/http"
"net/url"
"strconv"
"strings"
"time"
"github.com/containerd/platforms"
"github.com/distribution/reference"
"github.com/docker/docker/api"
"github.com/containerd/containerd/platforms"
"github.com/docker/docker/api/server/httputils"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/backend"
"github.com/docker/docker/api/types/filters"
imagetypes "github.com/docker/docker/api/types/image"
"github.com/docker/docker/api/types/registry"
"github.com/docker/docker/api/types/versions"
"github.com/docker/docker/builder/remotecontext"
"github.com/docker/docker/dockerversion"
"github.com/docker/docker/errdefs"
"github.com/docker/docker/image"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/progress"
"github.com/docker/docker/pkg/streamformatter"
"github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/docker/docker/pkg/system"
"github.com/docker/docker/registry"
specs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
// Creates an image from Pull or from Import
func (ir *imageRouter) postImagesCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
func (s *imageRouter) postImagesCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
var (
img = r.Form.Get("fromImage")
repo = r.Form.Get("repo")
tag = r.Form.Get("tag")
comment = r.Form.Get("message")
progressErr error
output = ioutils.NewWriteFlusher(w)
platform *ocispec.Platform
image = r.Form.Get("fromImage")
repo = r.Form.Get("repo")
tag = r.Form.Get("tag")
message = r.Form.Get("message")
err error
output = ioutils.NewWriteFlusher(w)
platform *specs.Platform
)
defer output.Close()
@@ -53,109 +44,62 @@ func (ir *imageRouter) postImagesCreate(ctx context.Context, w http.ResponseWrit
version := httputils.VersionFromContext(ctx)
if versions.GreaterThanOrEqualTo(version, "1.32") {
if p := r.FormValue("platform"); p != "" {
sp, err := platforms.Parse(p)
apiPlatform := r.FormValue("platform")
if apiPlatform != "" {
sp, err := platforms.Parse(apiPlatform)
if err != nil {
return errdefs.InvalidParameter(err)
return err
}
if err := system.ValidatePlatform(sp); err != nil {
return err
}
platform = &sp
}
}
if img != "" { // pull
metaHeaders := map[string][]string{}
for k, v := range r.Header {
if strings.HasPrefix(k, "X-Meta-") {
metaHeaders[k] = v
}
}
// Special case: "pull -a" may send an image name with a
// trailing :. This is ugly, but let's not break API
// compatibility.
imgName := strings.TrimSuffix(img, ":")
ref, err := reference.ParseNormalizedNamed(imgName)
if err != nil {
return errdefs.InvalidParameter(err)
}
// TODO(thaJeztah) this could use a WithTagOrDigest() utility
if tag != "" {
// The "tag" could actually be a digest.
var dgst digest.Digest
dgst, err = digest.Parse(tag)
if err == nil {
ref, err = reference.WithDigest(reference.TrimNamed(ref), dgst)
} else {
ref, err = reference.WithTag(ref, tag)
}
if err != nil {
return errdefs.InvalidParameter(err)
}
}
if err := validateRepoName(ref); err != nil {
return errdefs.Forbidden(err)
}
// For a pull it is not an error if no auth was given. Ignore invalid
// AuthConfig to increase compatibility with the existing API.
authConfig, _ := registry.DecodeAuthConfig(r.Header.Get(registry.AuthHeader))
progressErr = ir.backend.PullImage(ctx, ref, platform, metaHeaders, authConfig, output)
} else { // import
src := r.Form.Get("fromSrc")
tagRef, err := httputils.RepoTagReference(repo, tag)
if err != nil {
return errdefs.InvalidParameter(err)
}
if len(comment) == 0 {
comment = "Imported from " + src
}
var layerReader io.ReadCloser
defer r.Body.Close()
if src == "-" {
layerReader = r.Body
} else {
if len(strings.Split(src, "://")) == 1 {
src = "http://" + src
}
u, err := url.Parse(src)
if err != nil {
return errdefs.InvalidParameter(err)
if err == nil {
if image != "" { //pull
metaHeaders := map[string][]string{}
for k, v := range r.Header {
if strings.HasPrefix(k, "X-Meta-") {
metaHeaders[k] = v
}
}
resp, err := remotecontext.GetWithStatusError(u.String())
if err != nil {
return err
authEncoded := r.Header.Get("X-Registry-Auth")
authConfig := &types.AuthConfig{}
if authEncoded != "" {
authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded))
if err := json.NewDecoder(authJSON).Decode(authConfig); err != nil {
// for a pull it is not an error if no auth was given
// to increase compatibility with the existing api it is defaulting to be empty
authConfig = &types.AuthConfig{}
}
}
output.Write(streamformatter.FormatStatus("", "Downloading from %s", u))
progressOutput := streamformatter.NewJSONProgressOutput(output, true)
layerReader = progress.NewProgressReader(resp.Body, progressOutput, resp.ContentLength, "", "Importing")
defer layerReader.Close()
}
var id image.ID
id, progressErr = ir.backend.ImportImage(ctx, tagRef, platform, comment, layerReader, r.Form["changes"])
if progressErr == nil {
_, _ = output.Write(streamformatter.FormatStatus("", "%v", id.String()))
err = s.backend.PullImage(ctx, image, tag, platform, metaHeaders, authConfig, output)
} else { //import
src := r.Form.Get("fromSrc")
// 'err' MUST NOT be defined within this block, we need any error
// generated from the download to be available to the output
// stream processing below
os := ""
if platform != nil {
os = platform.OS
}
err = s.backend.ImportImage(src, repo, os, tag, message, r.Body, output, r.Form["changes"])
}
}
if progressErr != nil {
if err != nil {
if !output.Flushed() {
return progressErr
return err
}
_, _ = output.Write(streamformatter.FormatError(progressErr))
output.Write(streamformatter.FormatError(err))
}
return nil
}
func (ir *imageRouter) postImagesPush(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
func (s *imageRouter) postImagesPush(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
metaHeaders := map[string][]string{}
for k, v := range r.Header {
if strings.HasPrefix(k, "X-Meta-") {
@@ -165,73 +109,41 @@ func (ir *imageRouter) postImagesPush(ctx context.Context, w http.ResponseWriter
if err := httputils.ParseForm(r); err != nil {
return err
}
authConfig := &types.AuthConfig{}
var authConfig *registry.AuthConfig
if authEncoded := r.Header.Get(registry.AuthHeader); authEncoded != "" {
// the new format is to handle the authConfig as a header. Ignore invalid
// AuthConfig to increase compatibility with the existing API.
authConfig, _ = registry.DecodeAuthConfig(authEncoded)
authEncoded := r.Header.Get("X-Registry-Auth")
if authEncoded != "" {
// the new format is to handle the authConfig as a header
authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded))
if err := json.NewDecoder(authJSON).Decode(authConfig); err != nil {
// to increase compatibility to existing api it is defaulting to be empty
authConfig = &types.AuthConfig{}
}
} else {
// the old format is supported for compatibility if there was no authConfig header
var err error
authConfig, err = registry.DecodeAuthConfigBody(r.Body)
if err != nil {
return errors.Wrap(err, "bad parameters and missing X-Registry-Auth")
if err := json.NewDecoder(r.Body).Decode(authConfig); err != nil {
return errors.Wrap(errdefs.InvalidParameter(err), "Bad parameters and missing X-Registry-Auth")
}
}
image := vars["name"]
tag := r.Form.Get("tag")
output := ioutils.NewWriteFlusher(w)
defer output.Close()
w.Header().Set("Content-Type", "application/json")
img := vars["name"]
tag := r.Form.Get("tag")
var ref reference.Named
// Tag is empty only in case PushOptions.All is true.
if tag != "" {
r, err := httputils.RepoTagReference(img, tag)
if err != nil {
return errdefs.InvalidParameter(err)
}
ref = r
} else {
r, err := reference.ParseNormalizedNamed(img)
if err != nil {
return errdefs.InvalidParameter(err)
}
ref = r
}
var platform *ocispec.Platform
// Platform is optional, and only supported in API version 1.46 and later.
// However the PushOptions struct previously was an alias for the PullOptions struct
// which also contained a Platform field.
// This means that older clients may be sending a platform field, even
// though it wasn't really supported by the server.
// Don't break these clients and just ignore the platform field on older APIs.
if versions.GreaterThanOrEqualTo(httputils.VersionFromContext(ctx), "1.46") {
if formPlatform := r.Form.Get("platform"); formPlatform != "" {
p, err := httputils.DecodePlatform(formPlatform)
if err != nil {
return err
}
platform = p
}
}
if err := ir.backend.PushImage(ctx, ref, platform, metaHeaders, authConfig, output); err != nil {
if err := s.backend.PushImage(ctx, image, tag, metaHeaders, authConfig, output); err != nil {
if !output.Flushed() {
return err
}
_, _ = output.Write(streamformatter.FormatError(err))
output.Write(streamformatter.FormatError(err))
}
return nil
}
func (ir *imageRouter) getImagesGet(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
func (s *imageRouter) getImagesGet(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
@@ -247,16 +159,16 @@ func (ir *imageRouter) getImagesGet(ctx context.Context, w http.ResponseWriter,
names = r.Form["names"]
}
if err := ir.backend.ExportImage(ctx, names, output); err != nil {
if err := s.backend.ExportImage(names, output); err != nil {
if !output.Flushed() {
return err
}
_, _ = output.Write(streamformatter.FormatError(err))
output.Write(streamformatter.FormatError(err))
}
return nil
}
func (ir *imageRouter) postImagesLoad(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
func (s *imageRouter) postImagesLoad(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
@@ -266,8 +178,8 @@ func (ir *imageRouter) postImagesLoad(ctx context.Context, w http.ResponseWriter
output := ioutils.NewWriteFlusher(w)
defer output.Close()
if err := ir.backend.LoadImage(ctx, r.Body, output, quiet); err != nil {
_, _ = output.Write(streamformatter.FormatError(err))
if err := s.backend.LoadImage(r.Body, output, quiet); err != nil {
output.Write(streamformatter.FormatError(err))
}
return nil
}
@@ -280,7 +192,7 @@ func (missingImageError) Error() string {
func (missingImageError) InvalidParameter() {}
func (ir *imageRouter) deleteImages(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
func (s *imageRouter) deleteImages(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
@@ -294,7 +206,7 @@ func (ir *imageRouter) deleteImages(ctx context.Context, w http.ResponseWriter,
force := httputils.BoolValue(r, "force")
prune := !httputils.BoolValue(r, "noprune")
list, err := ir.backend.ImageDelete(ctx, name, force, prune)
list, err := s.backend.ImageDelete(name, force, prune)
if err != nil {
return err
}
@@ -302,103 +214,16 @@ func (ir *imageRouter) deleteImages(ctx context.Context, w http.ResponseWriter,
return httputils.WriteJSON(w, http.StatusOK, list)
}
func (ir *imageRouter) getImagesByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
img, err := ir.backend.GetImage(ctx, vars["name"], backend.GetImageOpts{Details: true})
func (s *imageRouter) getImagesByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
imageInspect, err := s.backend.LookupImage(vars["name"])
if err != nil {
return err
}
imageInspect, err := ir.toImageInspect(img)
if err != nil {
return err
}
version := httputils.VersionFromContext(ctx)
if versions.LessThan(version, "1.44") {
imageInspect.VirtualSize = imageInspect.Size //nolint:staticcheck // ignore SA1019: field is deprecated, but still set on API < v1.44.
if imageInspect.Created == "" {
// backwards compatibility for Created not existing returning "0001-01-01T00:00:00Z"
// https://github.com/moby/moby/issues/47368
imageInspect.Created = time.Time{}.Format(time.RFC3339Nano)
}
}
if versions.GreaterThanOrEqualTo(version, "1.45") {
imageInspect.Container = "" //nolint:staticcheck // ignore SA1019: field is deprecated, but still set on API < v1.45.
imageInspect.ContainerConfig = nil //nolint:staticcheck // ignore SA1019: field is deprecated, but still set on API < v1.45.
}
return httputils.WriteJSON(w, http.StatusOK, imageInspect)
}
func (ir *imageRouter) toImageInspect(img *image.Image) (*types.ImageInspect, error) {
var repoTags, repoDigests []string
for _, ref := range img.Details.References {
switch ref.(type) {
case reference.NamedTagged:
repoTags = append(repoTags, reference.FamiliarString(ref))
case reference.Canonical:
repoDigests = append(repoDigests, reference.FamiliarString(ref))
}
}
comment := img.Comment
if len(comment) == 0 && len(img.History) > 0 {
comment = img.History[len(img.History)-1].Comment
}
// Make sure we output empty arrays instead of nil.
if repoTags == nil {
repoTags = []string{}
}
if repoDigests == nil {
repoDigests = []string{}
}
var created string
if img.Created != nil {
created = img.Created.Format(time.RFC3339Nano)
}
return &types.ImageInspect{
ID: img.ID().String(),
RepoTags: repoTags,
RepoDigests: repoDigests,
Parent: img.Parent.String(),
Comment: comment,
Created: created,
Container: img.Container, //nolint:staticcheck // ignore SA1019: field is deprecated, but still set on API < v1.45.
ContainerConfig: &img.ContainerConfig, //nolint:staticcheck // ignore SA1019: field is deprecated, but still set on API < v1.45.
DockerVersion: img.DockerVersion,
Author: img.Author,
Config: img.Config,
Architecture: img.Architecture,
Variant: img.Variant,
Os: img.OperatingSystem(),
OsVersion: img.OSVersion,
Size: img.Details.Size,
GraphDriver: types.GraphDriverData{
Name: img.Details.Driver,
Data: img.Details.Metadata,
},
RootFS: rootFSToAPIType(img.RootFS),
Metadata: imagetypes.Metadata{
LastTagTime: img.Details.LastUpdated,
},
}, nil
}
func rootFSToAPIType(rootfs *image.RootFS) types.RootFS {
var layers []string
for _, l := range rootfs.DiffIDs {
layers = append(layers, l.String())
}
return types.RootFS{
Type: rootfs.Type,
Layers: layers,
}
}
func (ir *imageRouter) getImagesJSON(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
func (s *imageRouter) getImagesJSON(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
@@ -408,62 +233,23 @@ func (ir *imageRouter) getImagesJSON(ctx context.Context, w http.ResponseWriter,
return err
}
version := httputils.VersionFromContext(ctx)
if versions.LessThan(version, "1.41") {
// NOTE: filter is a shell glob string applied to repository names.
filterParam := r.Form.Get("filter")
if filterParam != "" {
imageFilters.Add("reference", filterParam)
}
filterParam := r.Form.Get("filter")
// FIXME(vdemeester) This has been deprecated in 1.13, and is target for removal for v17.12
if filterParam != "" {
imageFilters.Add("reference", filterParam)
}
var sharedSize bool
if versions.GreaterThanOrEqualTo(version, "1.42") {
// NOTE: Support for the "shared-size" parameter was added in API 1.42.
sharedSize = httputils.BoolValue(r, "shared-size")
}
var manifests bool
if versions.GreaterThanOrEqualTo(version, "1.47") {
manifests = httputils.BoolValue(r, "manifests")
}
images, err := ir.backend.Images(ctx, imagetypes.ListOptions{
All: httputils.BoolValue(r, "all"),
Filters: imageFilters,
SharedSize: sharedSize,
Manifests: manifests,
})
images, err := s.backend.Images(imageFilters, httputils.BoolValue(r, "all"), false)
if err != nil {
return err
}
useNone := versions.LessThan(version, "1.43")
withVirtualSize := versions.LessThan(version, "1.44")
for _, img := range images {
if useNone {
if len(img.RepoTags) == 0 && len(img.RepoDigests) == 0 {
img.RepoTags = append(img.RepoTags, "<none>:<none>")
img.RepoDigests = append(img.RepoDigests, "<none>@<none>")
}
} else {
if img.RepoTags == nil {
img.RepoTags = []string{}
}
if img.RepoDigests == nil {
img.RepoDigests = []string{}
}
}
if withVirtualSize {
img.VirtualSize = img.Size //nolint:staticcheck // ignore SA1019: field is deprecated, but still set on API < v1.44.
}
}
return httputils.WriteJSON(w, http.StatusOK, images)
}
func (ir *imageRouter) getImagesHistory(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
history, err := ir.backend.ImageHistory(ctx, vars["name"])
func (s *imageRouter) getImagesHistory(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
name := vars["name"]
history, err := s.backend.ImageHistory(name)
if err != nil {
return err
}
@@ -471,71 +257,56 @@ func (ir *imageRouter) getImagesHistory(ctx context.Context, w http.ResponseWrit
return httputils.WriteJSON(w, http.StatusOK, history)
}
func (ir *imageRouter) postImagesTag(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
func (s *imageRouter) postImagesTag(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
ref, err := httputils.RepoTagReference(r.Form.Get("repo"), r.Form.Get("tag"))
if ref == nil || err != nil {
return errdefs.InvalidParameter(err)
}
refName := reference.FamiliarName(ref)
if refName == string(digest.Canonical) {
return errdefs.InvalidParameter(errors.New("refusing to create an ambiguous tag using digest algorithm as name"))
}
img, err := ir.backend.GetImage(ctx, vars["name"], backend.GetImageOpts{})
if err != nil {
return errdefs.NotFound(err)
}
if err := ir.backend.TagImage(ctx, img.ID(), ref); err != nil {
if _, err := s.backend.TagImage(vars["name"], r.Form.Get("repo"), r.Form.Get("tag")); err != nil {
return err
}
w.WriteHeader(http.StatusCreated)
return nil
}
func (ir *imageRouter) getImagesSearch(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
func (s *imageRouter) getImagesSearch(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
var (
config *types.AuthConfig
authEncoded = r.Header.Get("X-Registry-Auth")
headers = map[string][]string{}
)
var limit int
if r.Form.Get("limit") != "" {
var err error
limit, err = strconv.Atoi(r.Form.Get("limit"))
if err != nil || limit < 0 {
return errdefs.InvalidParameter(errors.Wrap(err, "invalid limit specified"))
if authEncoded != "" {
authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded))
if err := json.NewDecoder(authJSON).Decode(&config); err != nil {
// for a search it is not an error if no auth was given
// to increase compatibility with the existing api it is defaulting to be empty
config = &types.AuthConfig{}
}
}
searchFilters, err := filters.FromJSON(r.Form.Get("filters"))
if err != nil {
return err
}
// For a search it is not an error if no auth was given. Ignore invalid
// AuthConfig to increase compatibility with the existing API.
authConfig, _ := registry.DecodeAuthConfig(r.Header.Get(registry.AuthHeader))
headers := http.Header{}
for k, v := range r.Header {
k = http.CanonicalHeaderKey(k)
if strings.HasPrefix(k, "X-Meta-") {
headers[k] = v
}
}
headers.Set("User-Agent", dockerversion.DockerUserAgent(ctx))
res, err := ir.searcher.Search(ctx, searchFilters, r.Form.Get("term"), limit, authConfig, headers)
limit := registry.DefaultSearchLimit
if r.Form.Get("limit") != "" {
limitValue, err := strconv.Atoi(r.Form.Get("limit"))
if err != nil {
return err
}
limit = limitValue
}
query, err := s.backend.SearchRegistryForImages(ctx, r.Form.Get("filters"), r.Form.Get("term"), limit, config, headers)
if err != nil {
return err
}
return httputils.WriteJSON(w, http.StatusOK, res)
return httputils.WriteJSON(w, http.StatusOK, query.Results)
}
func (ir *imageRouter) postImagesPrune(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
func (s *imageRouter) postImagesPrune(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
@@ -545,18 +316,9 @@ func (ir *imageRouter) postImagesPrune(ctx context.Context, w http.ResponseWrite
return err
}
pruneReport, err := ir.backend.ImagesPrune(ctx, pruneFilters)
pruneReport, err := s.backend.ImagesPrune(ctx, pruneFilters)
if err != nil {
return err
}
return httputils.WriteJSON(w, http.StatusOK, pruneReport)
}
// validateRepoName validates the name of a repository.
func validateRepoName(name reference.Named) error {
familiarName := reference.FamiliarName(name)
if familiarName == api.NoBaseImageSpecifier {
return fmt.Errorf("'%s' is a reserved name", familiarName)
}
return nil
}

View File

@@ -1,6 +1,7 @@
package router // import "github.com/docker/docker/api/server/router"
import (
"context"
"net/http"
"github.com/docker/docker/api/server/httputils"
@@ -44,30 +45,60 @@ func NewRoute(method, path string, handler httputils.APIFunc, opts ...RouteWrapp
// NewGetRoute initializes a new route with the http method GET.
func NewGetRoute(path string, handler httputils.APIFunc, opts ...RouteWrapper) Route {
return NewRoute(http.MethodGet, path, handler, opts...)
return NewRoute("GET", path, handler, opts...)
}
// NewPostRoute initializes a new route with the http method POST.
func NewPostRoute(path string, handler httputils.APIFunc, opts ...RouteWrapper) Route {
return NewRoute(http.MethodPost, path, handler, opts...)
return NewRoute("POST", path, handler, opts...)
}
// NewPutRoute initializes a new route with the http method PUT.
func NewPutRoute(path string, handler httputils.APIFunc, opts ...RouteWrapper) Route {
return NewRoute(http.MethodPut, path, handler, opts...)
return NewRoute("PUT", path, handler, opts...)
}
// NewDeleteRoute initializes a new route with the http method DELETE.
func NewDeleteRoute(path string, handler httputils.APIFunc, opts ...RouteWrapper) Route {
return NewRoute(http.MethodDelete, path, handler, opts...)
return NewRoute("DELETE", path, handler, opts...)
}
// NewOptionsRoute initializes a new route with the http method OPTIONS.
func NewOptionsRoute(path string, handler httputils.APIFunc, opts ...RouteWrapper) Route {
return NewRoute(http.MethodOptions, path, handler, opts...)
return NewRoute("OPTIONS", path, handler, opts...)
}
// NewHeadRoute initializes a new route with the http method HEAD.
func NewHeadRoute(path string, handler httputils.APIFunc, opts ...RouteWrapper) Route {
return NewRoute(http.MethodHead, path, handler, opts...)
return NewRoute("HEAD", path, handler, opts...)
}
func cancellableHandler(h httputils.APIFunc) httputils.APIFunc {
return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if notifier, ok := w.(http.CloseNotifier); ok {
notify := notifier.CloseNotify()
notifyCtx, cancel := context.WithCancel(ctx)
finished := make(chan struct{})
defer close(finished)
ctx = notifyCtx
go func() {
select {
case <-notify:
cancel()
case <-finished:
}
}()
}
return h(ctx, w, r, vars)
}
}
// WithCancel makes new route which embeds http.CloseNotifier feature to
// context.Context of handler.
func WithCancel(r Route) Route {
return localRoute{
method: r.Method(),
path: r.Path(),
handler: cancellableHandler(r.Handler()),
}
}

View File

@@ -3,28 +3,30 @@ package network // import "github.com/docker/docker/api/server/router/network"
import (
"context"
"github.com/docker/docker/api/types/backend"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/network"
"github.com/docker/libnetwork"
)
// Backend is all the methods that need to be implemented
// to provide network specific functionality.
type Backend interface {
GetNetworks(filters.Args, backend.NetworkListConfig) ([]network.Inspect, error)
CreateNetwork(nc network.CreateRequest) (*network.CreateResponse, error)
ConnectContainerToNetwork(ctx context.Context, containerName, networkName string, endpointConfig *network.EndpointSettings) error
FindNetwork(idName string) (libnetwork.Network, error)
GetNetworks() []libnetwork.Network
CreateNetwork(nc types.NetworkCreateRequest) (*types.NetworkCreateResponse, error)
ConnectContainerToNetwork(containerName, networkName string, endpointConfig *network.EndpointSettings) error
DisconnectContainerFromNetwork(containerName string, networkName string, force bool) error
DeleteNetwork(networkID string) error
NetworksPrune(ctx context.Context, pruneFilters filters.Args) (*network.PruneReport, error)
NetworksPrune(ctx context.Context, pruneFilters filters.Args) (*types.NetworksPruneReport, error)
}
// ClusterBackend is all the methods that need to be implemented
// to provide cluster network specific functionality.
type ClusterBackend interface {
GetNetworks(filters.Args) ([]network.Inspect, error)
GetNetwork(name string) (network.Inspect, error)
GetNetworksByName(name string) ([]network.Inspect, error)
CreateNetwork(nc network.CreateRequest) (string, error)
GetNetworks() ([]types.NetworkResource, error)
GetNetwork(name string) (types.NetworkResource, error)
GetNetworksByName(name string) ([]types.NetworkResource, error)
CreateNetwork(nc types.NetworkCreateRequest) (string, error)
RemoveNetwork(name string) error
}

View File

@@ -0,0 +1,93 @@
package network // import "github.com/docker/docker/api/server/router/network"
import (
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/runconfig"
)
func filterNetworkByType(nws []types.NetworkResource, netType string) ([]types.NetworkResource, error) {
retNws := []types.NetworkResource{}
switch netType {
case "builtin":
for _, nw := range nws {
if runconfig.IsPreDefinedNetwork(nw.Name) {
retNws = append(retNws, nw)
}
}
case "custom":
for _, nw := range nws {
if !runconfig.IsPreDefinedNetwork(nw.Name) {
retNws = append(retNws, nw)
}
}
default:
return nil, invalidFilter(netType)
}
return retNws, nil
}
type invalidFilter string
func (e invalidFilter) Error() string {
return "Invalid filter: 'type'='" + string(e) + "'"
}
func (e invalidFilter) InvalidParameter() {}
// filterNetworks filters network list according to user specified filter
// and returns user chosen networks
func filterNetworks(nws []types.NetworkResource, filter filters.Args) ([]types.NetworkResource, error) {
// if filter is empty, return original network list
if filter.Len() == 0 {
return nws, nil
}
displayNet := []types.NetworkResource{}
for _, nw := range nws {
if filter.Contains("driver") {
if !filter.ExactMatch("driver", nw.Driver) {
continue
}
}
if filter.Contains("name") {
if !filter.Match("name", nw.Name) {
continue
}
}
if filter.Contains("id") {
if !filter.Match("id", nw.ID) {
continue
}
}
if filter.Contains("label") {
if !filter.MatchKVList("label", nw.Labels) {
continue
}
}
if filter.Contains("scope") {
if !filter.ExactMatch("scope", nw.Scope) {
continue
}
}
displayNet = append(displayNet, nw)
}
if filter.Contains("type") {
typeNet := []types.NetworkResource{}
errFilter := filter.WalkValues("type", func(fval string) error {
passList, err := filterNetworkByType(displayNet, fval)
if err != nil {
return err
}
typeNet = append(typeNet, passList...)
return nil
})
if errFilter != nil {
return nil, errFilter
}
displayNet = typeNet
}
return displayNet, nil
}

View File

@@ -0,0 +1,149 @@
// +build !windows
package network // import "github.com/docker/docker/api/server/router/network"
import (
"strings"
"testing"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
)
func TestFilterNetworks(t *testing.T) {
networks := []types.NetworkResource{
{
Name: "host",
Driver: "host",
Scope: "local",
},
{
Name: "bridge",
Driver: "bridge",
Scope: "local",
},
{
Name: "none",
Driver: "null",
Scope: "local",
},
{
Name: "myoverlay",
Driver: "overlay",
Scope: "swarm",
},
{
Name: "mydrivernet",
Driver: "mydriver",
Scope: "local",
},
{
Name: "mykvnet",
Driver: "mykvdriver",
Scope: "global",
},
}
bridgeDriverFilters := filters.NewArgs()
bridgeDriverFilters.Add("driver", "bridge")
overlayDriverFilters := filters.NewArgs()
overlayDriverFilters.Add("driver", "overlay")
nonameDriverFilters := filters.NewArgs()
nonameDriverFilters.Add("driver", "noname")
customDriverFilters := filters.NewArgs()
customDriverFilters.Add("type", "custom")
builtinDriverFilters := filters.NewArgs()
builtinDriverFilters.Add("type", "builtin")
invalidDriverFilters := filters.NewArgs()
invalidDriverFilters.Add("type", "invalid")
localScopeFilters := filters.NewArgs()
localScopeFilters.Add("scope", "local")
swarmScopeFilters := filters.NewArgs()
swarmScopeFilters.Add("scope", "swarm")
globalScopeFilters := filters.NewArgs()
globalScopeFilters.Add("scope", "global")
testCases := []struct {
filter filters.Args
resultCount int
err string
}{
{
filter: bridgeDriverFilters,
resultCount: 1,
err: "",
},
{
filter: overlayDriverFilters,
resultCount: 1,
err: "",
},
{
filter: nonameDriverFilters,
resultCount: 0,
err: "",
},
{
filter: customDriverFilters,
resultCount: 3,
err: "",
},
{
filter: builtinDriverFilters,
resultCount: 3,
err: "",
},
{
filter: invalidDriverFilters,
resultCount: 0,
err: "Invalid filter: 'type'='invalid'",
},
{
filter: localScopeFilters,
resultCount: 4,
err: "",
},
{
filter: swarmScopeFilters,
resultCount: 1,
err: "",
},
{
filter: globalScopeFilters,
resultCount: 1,
err: "",
},
}
for _, testCase := range testCases {
result, err := filterNetworks(networks, testCase.filter)
if testCase.err != "" {
if err == nil {
t.Fatalf("expect error '%s', got no error", testCase.err)
} else if !strings.Contains(err.Error(), testCase.err) {
t.Fatalf("expect error '%s', got '%s'", testCase.err, err)
}
} else {
if err != nil {
t.Fatalf("expect no error, got error '%s'", err)
}
// Make sure result is not nil
if result == nil {
t.Fatal("filterNetworks should not return nil")
}
if len(result) != testCase.resultCount {
t.Fatalf("expect '%d' networks, got '%d' networks", testCase.resultCount, len(result))
}
}
}
}

View File

@@ -36,7 +36,7 @@ func (r *networkRouter) initRoutes() {
router.NewPostRoute("/networks/create", r.postNetworkCreate),
router.NewPostRoute("/networks/{id:.*}/connect", r.postNetworkConnect),
router.NewPostRoute("/networks/{id:.*}/disconnect", r.postNetworkDisconnect),
router.NewPostRoute("/networks/prune", r.postNetworksPrune),
router.NewPostRoute("/networks/prune", r.postNetworksPrune, router.WithCancel),
// DELETE
router.NewDeleteRoute("/networks/{id:.*}", r.deleteNetwork),
}

View File

@@ -2,66 +2,83 @@ package network // import "github.com/docker/docker/api/server/router/network"
import (
"context"
"encoding/json"
"net/http"
"strconv"
"strings"
"github.com/docker/docker/api/server/httputils"
"github.com/docker/docker/api/types/backend"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/network"
"github.com/docker/docker/api/types/versions"
"github.com/docker/docker/errdefs"
"github.com/docker/docker/libnetwork"
"github.com/docker/docker/libnetwork/scope"
"github.com/docker/libnetwork"
netconst "github.com/docker/libnetwork/datastore"
"github.com/docker/libnetwork/networkdb"
"github.com/pkg/errors"
)
var (
// acceptedNetworkFilters is a list of acceptable filters
acceptedNetworkFilters = map[string]bool{
"driver": true,
"type": true,
"name": true,
"id": true,
"label": true,
"scope": true,
}
)
func (n *networkRouter) getNetworksList(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
filter, err := filters.FromJSON(r.Form.Get("filters"))
filter := r.Form.Get("filters")
netFilters, err := filters.FromJSON(filter)
if err != nil {
return err
}
if err := network.ValidateFilters(filter); err != nil {
if err := netFilters.Validate(acceptedNetworkFilters); err != nil {
return err
}
var list []network.Summary
nr, err := n.cluster.GetNetworks(filter)
if err == nil {
list = nr
list := []types.NetworkResource{}
if nr, err := n.cluster.GetNetworks(); err == nil {
list = append(list, nr...)
}
// Combine the network list returned by Docker daemon if it is not already
// returned by the cluster manager
localNetworks, err := n.backend.GetNetworks(filter, backend.NetworkListConfig{Detailed: versions.LessThan(httputils.VersionFromContext(ctx), "1.28")})
SKIP:
for _, nw := range n.backend.GetNetworks() {
for _, nl := range list {
if nl.ID == nw.ID() {
continue SKIP
}
}
var nr *types.NetworkResource
// Versions < 1.28 fetches all the containers attached to a network
// in a network list api call. It is a heavy weight operation when
// run across all the networks. Starting API version 1.28, this detailed
// info is available for network specific GET API (equivalent to inspect)
if versions.LessThan(httputils.VersionFromContext(ctx), "1.28") {
nr = n.buildDetailedNetworkResources(nw, false)
} else {
nr = n.buildNetworkResource(nw)
}
list = append(list, *nr)
}
list, err = filterNetworks(list, netFilters)
if err != nil {
return err
}
var idx map[string]bool
if len(list) > 0 {
idx = make(map[string]bool, len(list))
for _, n := range list {
idx[n.ID] = true
}
}
for _, n := range localNetworks {
if idx[n.ID] {
continue
}
list = append(list, n)
}
if list == nil {
list = []network.Summary{}
}
return httputils.WriteJSON(w, http.StatusOK, list)
}
@@ -75,13 +92,17 @@ func (e invalidRequestError) Error() string {
func (e invalidRequestError) InvalidParameter() {}
type ambiguousResultsError string
type ambigousResultsError string
func (e ambiguousResultsError) Error() string {
func (e ambigousResultsError) Error() string {
return "network " + string(e) + " is ambiguous"
}
func (ambiguousResultsError) InvalidParameter() {}
func (ambigousResultsError) InvalidParameter() {}
func nameConflict(name string) error {
return errdefs.Conflict(libnetwork.NetworkNameError(name))
}
func (n *networkRouter) getNetwork(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
@@ -98,7 +119,14 @@ func (n *networkRouter) getNetwork(ctx context.Context, w http.ResponseWriter, r
return errors.Wrapf(invalidRequestError{err}, "invalid value for verbose: %s", v)
}
}
networkScope := r.URL.Query().Get("scope")
scope := r.URL.Query().Get("scope")
isMatchingScope := func(scope, term string) bool {
if term != "" {
return scope == term
}
return true
}
// In case multiple networks have duplicate names, return error.
// TODO (yongtang): should we wrap with version here for backward compatibility?
@@ -108,29 +136,23 @@ func (n *networkRouter) getNetwork(ctx context.Context, w http.ResponseWriter, r
// For full name and partial ID, save the result first, and process later
// in case multiple records was found based on the same term
listByFullName := map[string]network.Inspect{}
listByPartialID := map[string]network.Inspect{}
listByFullName := map[string]types.NetworkResource{}
listByPartialID := map[string]types.NetworkResource{}
// TODO(@cpuguy83): All this logic for figuring out which network to return does not belong here
// Instead there should be a backend function to just get one network.
filter := filters.NewArgs(filters.Arg("idOrName", term))
if networkScope != "" {
filter.Add("scope", networkScope)
}
networks, _ := n.backend.GetNetworks(filter, backend.NetworkListConfig{Detailed: true, Verbose: verbose})
for _, nw := range networks {
if nw.ID == term {
return httputils.WriteJSON(w, http.StatusOK, nw)
nw := n.backend.GetNetworks()
for _, network := range nw {
if network.ID() == term && isMatchingScope(network.Info().Scope(), scope) {
return httputils.WriteJSON(w, http.StatusOK, *n.buildDetailedNetworkResources(network, verbose))
}
if nw.Name == term {
if network.Name() == term && isMatchingScope(network.Info().Scope(), scope) {
// No need to check the ID collision here as we are still in
// local scope and the network ID is unique in this scope.
listByFullName[nw.ID] = nw
listByFullName[network.ID()] = *n.buildDetailedNetworkResources(network, verbose)
}
if strings.HasPrefix(nw.ID, term) {
if strings.HasPrefix(network.ID(), term) && isMatchingScope(network.Info().Scope(), scope) {
// No need to check the ID collision here as we are still in
// local scope and the network ID is unique in this scope.
listByPartialID[nw.ID] = nw
listByPartialID[network.ID()] = *n.buildDetailedNetworkResources(network, verbose)
}
}
@@ -140,7 +162,7 @@ func (n *networkRouter) getNetwork(ctx context.Context, w http.ResponseWriter, r
// or if the get network was passed with a network name and scope as swarm
// return the network. Skipped using isMatchingScope because it is true if the scope
// is not set which would be case if the client API v1.30
if strings.HasPrefix(nwk.ID, term) || networkScope == scope.Swarm {
if strings.HasPrefix(nwk.ID, term) || (netconst.SwarmScope == scope) {
// If we have a previous match "backend", return it, we need verbose when enabled
// ex: overlay/partial_ID or name/swarm_scope
if nwv, ok := listByPartialID[nwk.ID]; ok {
@@ -152,25 +174,25 @@ func (n *networkRouter) getNetwork(ctx context.Context, w http.ResponseWriter, r
}
}
networks, _ = n.cluster.GetNetworks(filter)
for _, nw := range networks {
if nw.ID == term {
return httputils.WriteJSON(w, http.StatusOK, nw)
nr, _ := n.cluster.GetNetworks()
for _, network := range nr {
if network.ID == term && isMatchingScope(network.Scope, scope) {
return httputils.WriteJSON(w, http.StatusOK, network)
}
if nw.Name == term {
if network.Name == term && isMatchingScope(network.Scope, scope) {
// Check the ID collision as we are in swarm scope here, and
// the map (of the listByFullName) may have already had a
// network with the same ID (from local scope previously)
if _, ok := listByFullName[nw.ID]; !ok {
listByFullName[nw.ID] = nw
if _, ok := listByFullName[network.ID]; !ok {
listByFullName[network.ID] = network
}
}
if strings.HasPrefix(nw.ID, term) {
if strings.HasPrefix(network.ID, term) && isMatchingScope(network.Scope, scope) {
// Check the ID collision as we are in swarm scope here, and
// the map (of the listByPartialID) may have already had a
// network with the same ID (from local scope previously)
if _, ok := listByPartialID[nw.ID]; !ok {
listByPartialID[nw.ID] = nw
if _, ok := listByPartialID[network.ID]; !ok {
listByPartialID[network.ID] = network
}
}
}
@@ -182,7 +204,7 @@ func (n *networkRouter) getNetwork(ctx context.Context, w http.ResponseWriter, r
}
}
if len(listByFullName) > 1 {
return errors.Wrapf(ambiguousResultsError(term), "%d matches found based on name", len(listByFullName))
return errors.Wrapf(ambigousResultsError(term), "%d matches found based on name", len(listByFullName))
}
// Find based on partial ID, returns true only if no duplicates
@@ -192,32 +214,43 @@ func (n *networkRouter) getNetwork(ctx context.Context, w http.ResponseWriter, r
}
}
if len(listByPartialID) > 1 {
return errors.Wrapf(ambiguousResultsError(term), "%d matches found based on ID prefix", len(listByPartialID))
return errors.Wrapf(ambigousResultsError(term), "%d matches found based on ID prefix", len(listByPartialID))
}
return libnetwork.ErrNoSuchNetwork(term)
}
func (n *networkRouter) postNetworkCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
var create types.NetworkCreateRequest
if err := httputils.ParseForm(r); err != nil {
return err
}
var create network.CreateRequest
if err := httputils.ReadJSON(r, &create); err != nil {
if err := httputils.CheckForJSON(r); err != nil {
return err
}
if err := json.NewDecoder(r.Body).Decode(&create); err != nil {
return err
}
if nws, err := n.cluster.GetNetworksByName(create.Name); err == nil && len(nws) > 0 {
return libnetwork.NetworkNameError(create.Name)
return nameConflict(create.Name)
}
// For a Swarm-scoped network, this call to backend.CreateNetwork is used to
// validate the configuration. The network will not be created but, if the
// configuration is valid, ManagerRedirectError will be returned and handled
// below.
nw, err := n.backend.CreateNetwork(create)
if err != nil {
var warning string
if _, ok := err.(libnetwork.NetworkNameError); ok {
// check if user defined CheckDuplicate, if set true, return err
// otherwise prepare a warning message
if create.CheckDuplicate {
return nameConflict(create.Name)
}
warning = libnetwork.NetworkNameError(create.Name).Error()
}
if _, ok := err.(libnetwork.ManagerRedirectError); !ok {
return err
}
@@ -225,8 +258,9 @@ func (n *networkRouter) postNetworkCreate(ctx context.Context, w http.ResponseWr
if err != nil {
return err
}
nw = &network.CreateResponse{
ID: id,
nw = &types.NetworkCreateResponse{
ID: id,
Warning: warning,
}
}
@@ -234,12 +268,16 @@ func (n *networkRouter) postNetworkCreate(ctx context.Context, w http.ResponseWr
}
func (n *networkRouter) postNetworkConnect(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
var connect types.NetworkConnect
if err := httputils.ParseForm(r); err != nil {
return err
}
var connect network.ConnectOptions
if err := httputils.ReadJSON(r, &connect); err != nil {
if err := httputils.CheckForJSON(r); err != nil {
return err
}
if err := json.NewDecoder(r.Body).Decode(&connect); err != nil {
return err
}
@@ -247,16 +285,20 @@ func (n *networkRouter) postNetworkConnect(ctx context.Context, w http.ResponseW
// The reason is that, In case of attachable network in swarm scope, the actual local network
// may not be available at the time. At the same time, inside daemon `ConnectContainerToNetwork`
// does the ambiguity check anyway. Therefore, passing the name to daemon would be enough.
return n.backend.ConnectContainerToNetwork(ctx, connect.Container, vars["id"], connect.EndpointConfig)
return n.backend.ConnectContainerToNetwork(connect.Container, vars["id"], connect.EndpointConfig)
}
func (n *networkRouter) postNetworkDisconnect(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
var disconnect types.NetworkDisconnect
if err := httputils.ParseForm(r); err != nil {
return err
}
var disconnect network.DisconnectOptions
if err := httputils.ReadJSON(r, &disconnect); err != nil {
if err := httputils.CheckForJSON(r); err != nil {
return err
}
if err := json.NewDecoder(r.Body).Decode(&disconnect); err != nil {
return err
}
@@ -285,6 +327,182 @@ func (n *networkRouter) deleteNetwork(ctx context.Context, w http.ResponseWriter
return nil
}
func (n *networkRouter) buildNetworkResource(nw libnetwork.Network) *types.NetworkResource {
r := &types.NetworkResource{}
if nw == nil {
return r
}
info := nw.Info()
r.Name = nw.Name()
r.ID = nw.ID()
r.Created = info.Created()
r.Scope = info.Scope()
r.Driver = nw.Type()
r.EnableIPv6 = info.IPv6Enabled()
r.Internal = info.Internal()
r.Attachable = info.Attachable()
r.Ingress = info.Ingress()
r.Options = info.DriverOptions()
r.Containers = make(map[string]types.EndpointResource)
buildIpamResources(r, info)
r.Labels = info.Labels()
r.ConfigOnly = info.ConfigOnly()
if cn := info.ConfigFrom(); cn != "" {
r.ConfigFrom = network.ConfigReference{Network: cn}
}
peers := info.Peers()
if len(peers) != 0 {
r.Peers = buildPeerInfoResources(peers)
}
return r
}
func (n *networkRouter) buildDetailedNetworkResources(nw libnetwork.Network, verbose bool) *types.NetworkResource {
if nw == nil {
return &types.NetworkResource{}
}
r := n.buildNetworkResource(nw)
epl := nw.Endpoints()
for _, e := range epl {
ei := e.Info()
if ei == nil {
continue
}
sb := ei.Sandbox()
tmpID := e.ID()
key := "ep-" + tmpID
if sb != nil {
key = sb.ContainerID()
}
r.Containers[key] = buildEndpointResource(tmpID, e.Name(), ei)
}
if !verbose {
return r
}
services := nw.Info().Services()
r.Services = make(map[string]network.ServiceInfo)
for name, service := range services {
tasks := []network.Task{}
for _, t := range service.Tasks {
tasks = append(tasks, network.Task{
Name: t.Name,
EndpointID: t.EndpointID,
EndpointIP: t.EndpointIP,
Info: t.Info,
})
}
r.Services[name] = network.ServiceInfo{
VIP: service.VIP,
Ports: service.Ports,
Tasks: tasks,
LocalLBIndex: service.LocalLBIndex,
}
}
return r
}
func buildPeerInfoResources(peers []networkdb.PeerInfo) []network.PeerInfo {
peerInfo := make([]network.PeerInfo, 0, len(peers))
for _, peer := range peers {
peerInfo = append(peerInfo, network.PeerInfo{
Name: peer.Name,
IP: peer.IP,
})
}
return peerInfo
}
func buildIpamResources(r *types.NetworkResource, nwInfo libnetwork.NetworkInfo) {
id, opts, ipv4conf, ipv6conf := nwInfo.IpamConfig()
ipv4Info, ipv6Info := nwInfo.IpamInfo()
r.IPAM.Driver = id
r.IPAM.Options = opts
r.IPAM.Config = []network.IPAMConfig{}
for _, ip4 := range ipv4conf {
if ip4.PreferredPool == "" {
continue
}
iData := network.IPAMConfig{}
iData.Subnet = ip4.PreferredPool
iData.IPRange = ip4.SubPool
iData.Gateway = ip4.Gateway
iData.AuxAddress = ip4.AuxAddresses
r.IPAM.Config = append(r.IPAM.Config, iData)
}
if len(r.IPAM.Config) == 0 {
for _, ip4Info := range ipv4Info {
iData := network.IPAMConfig{}
iData.Subnet = ip4Info.IPAMData.Pool.String()
if ip4Info.IPAMData.Gateway != nil {
iData.Gateway = ip4Info.IPAMData.Gateway.IP.String()
}
r.IPAM.Config = append(r.IPAM.Config, iData)
}
}
hasIpv6Conf := false
for _, ip6 := range ipv6conf {
if ip6.PreferredPool == "" {
continue
}
hasIpv6Conf = true
iData := network.IPAMConfig{}
iData.Subnet = ip6.PreferredPool
iData.IPRange = ip6.SubPool
iData.Gateway = ip6.Gateway
iData.AuxAddress = ip6.AuxAddresses
r.IPAM.Config = append(r.IPAM.Config, iData)
}
if !hasIpv6Conf {
for _, ip6Info := range ipv6Info {
if ip6Info.IPAMData.Pool == nil {
continue
}
iData := network.IPAMConfig{}
iData.Subnet = ip6Info.IPAMData.Pool.String()
iData.Gateway = ip6Info.IPAMData.Gateway.String()
r.IPAM.Config = append(r.IPAM.Config, iData)
}
}
}
func buildEndpointResource(id string, name string, info libnetwork.EndpointInfo) types.EndpointResource {
er := types.EndpointResource{}
er.EndpointID = id
er.Name = name
ei := info
if ei == nil {
return er
}
if iface := ei.Iface(); iface != nil {
if mac := iface.MacAddress(); mac != nil {
er.MacAddress = mac.String()
}
if ip := iface.Address(); ip != nil && len(ip.IP) > 0 {
er.IPv4Address = ip.String()
}
if ipv6 := iface.AddressIPv6(); ipv6 != nil && len(ipv6.IP) > 0 {
er.IPv6Address = ipv6.String()
}
}
return er
}
func (n *networkRouter) postNetworksPrune(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
@@ -310,47 +528,47 @@ func (n *networkRouter) postNetworksPrune(ctx context.Context, w http.ResponseWr
// For full name and partial ID, save the result first, and process later
// in case multiple records was found based on the same term
// TODO (yongtang): should we wrap with version here for backward compatibility?
func (n *networkRouter) findUniqueNetwork(term string) (network.Inspect, error) {
listByFullName := map[string]network.Inspect{}
listByPartialID := map[string]network.Inspect{}
func (n *networkRouter) findUniqueNetwork(term string) (types.NetworkResource, error) {
listByFullName := map[string]types.NetworkResource{}
listByPartialID := map[string]types.NetworkResource{}
nw := n.backend.GetNetworks()
for _, network := range nw {
if network.ID() == term {
return *n.buildDetailedNetworkResources(network, false), nil
filter := filters.NewArgs(filters.Arg("idOrName", term))
networks, _ := n.backend.GetNetworks(filter, backend.NetworkListConfig{Detailed: true})
for _, nw := range networks {
if nw.ID == term {
return nw, nil
}
if nw.Name == term && !nw.Ingress {
if network.Name() == term && !network.Info().Ingress() {
// No need to check the ID collision here as we are still in
// local scope and the network ID is unique in this scope.
listByFullName[nw.ID] = nw
listByFullName[network.ID()] = *n.buildDetailedNetworkResources(network, false)
}
if strings.HasPrefix(nw.ID, term) {
if strings.HasPrefix(network.ID(), term) {
// No need to check the ID collision here as we are still in
// local scope and the network ID is unique in this scope.
listByPartialID[nw.ID] = nw
listByPartialID[network.ID()] = *n.buildDetailedNetworkResources(network, false)
}
}
networks, _ = n.cluster.GetNetworks(filter)
for _, nw := range networks {
if nw.ID == term {
return nw, nil
nr, _ := n.cluster.GetNetworks()
for _, network := range nr {
if network.ID == term {
return network, nil
}
if nw.Name == term {
if network.Name == term {
// Check the ID collision as we are in swarm scope here, and
// the map (of the listByFullName) may have already had a
// network with the same ID (from local scope previously)
if _, ok := listByFullName[nw.ID]; !ok {
listByFullName[nw.ID] = nw
if _, ok := listByFullName[network.ID]; !ok {
listByFullName[network.ID] = network
}
}
if strings.HasPrefix(nw.ID, term) {
if strings.HasPrefix(network.ID, term) {
// Check the ID collision as we are in swarm scope here, and
// the map (of the listByPartialID) may have already had a
// network with the same ID (from local scope previously)
if _, ok := listByPartialID[nw.ID]; !ok {
listByPartialID[nw.ID] = nw
if _, ok := listByPartialID[network.ID]; !ok {
listByPartialID[network.ID] = network
}
}
}
@@ -362,7 +580,7 @@ func (n *networkRouter) findUniqueNetwork(term string) (network.Inspect, error)
}
}
if len(listByFullName) > 1 {
return network.Inspect{}, errdefs.InvalidParameter(errors.Errorf("network %s is ambiguous (%d matches found based on name)", term, len(listByFullName)))
return types.NetworkResource{}, errdefs.InvalidParameter(errors.Errorf("network %s is ambiguous (%d matches found based on name)", term, len(listByFullName)))
}
// Find based on partial ID, returns true only if no duplicates
@@ -372,8 +590,8 @@ func (n *networkRouter) findUniqueNetwork(term string) (network.Inspect, error)
}
}
if len(listByPartialID) > 1 {
return network.Inspect{}, errdefs.InvalidParameter(errors.Errorf("network %s is ambiguous (%d matches found based on ID prefix)", term, len(listByPartialID)))
return types.NetworkResource{}, errdefs.InvalidParameter(errors.Errorf("network %s is ambiguous (%d matches found based on ID prefix)", term, len(listByPartialID)))
}
return network.Inspect{}, errdefs.NotFound(libnetwork.ErrNoSuchNetwork(term))
return types.NetworkResource{}, errdefs.NotFound(libnetwork.ErrNoSuchNetwork(term))
}

View File

@@ -5,25 +5,23 @@ import (
"io"
"net/http"
"github.com/distribution/reference"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/backend"
"github.com/docker/distribution/reference"
enginetypes "github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/registry"
"github.com/docker/docker/plugin"
)
// Backend for Plugin
type Backend interface {
Disable(name string, config *backend.PluginDisableConfig) error
Enable(name string, config *backend.PluginEnableConfig) error
List(filters.Args) ([]types.Plugin, error)
Inspect(name string) (*types.Plugin, error)
Remove(name string, config *backend.PluginRmConfig) error
Disable(name string, config *enginetypes.PluginDisableConfig) error
Enable(name string, config *enginetypes.PluginEnableConfig) error
List(filters.Args) ([]enginetypes.Plugin, error)
Inspect(name string) (*enginetypes.Plugin, error)
Remove(name string, config *enginetypes.PluginRmConfig) error
Set(name string, args []string) error
Privileges(ctx context.Context, ref reference.Named, metaHeaders http.Header, authConfig *registry.AuthConfig) (types.PluginPrivileges, error)
Pull(ctx context.Context, ref reference.Named, name string, metaHeaders http.Header, authConfig *registry.AuthConfig, privileges types.PluginPrivileges, outStream io.Writer, opts ...plugin.CreateOpt) error
Push(ctx context.Context, name string, metaHeaders http.Header, authConfig *registry.AuthConfig, outStream io.Writer) error
Upgrade(ctx context.Context, ref reference.Named, name string, metaHeaders http.Header, authConfig *registry.AuthConfig, privileges types.PluginPrivileges, outStream io.Writer) error
CreateFromContext(ctx context.Context, tarCtx io.ReadCloser, options *types.PluginCreateOptions) error
Privileges(ctx context.Context, ref reference.Named, metaHeaders http.Header, authConfig *enginetypes.AuthConfig) (enginetypes.PluginPrivileges, error)
Pull(ctx context.Context, ref reference.Named, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, privileges enginetypes.PluginPrivileges, outStream io.Writer, opts ...plugin.CreateOpt) error
Push(ctx context.Context, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, outStream io.Writer) error
Upgrade(ctx context.Context, ref reference.Named, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, privileges enginetypes.PluginPrivileges, outStream io.Writer) error
CreateFromContext(ctx context.Context, tarCtx io.ReadCloser, options *enginetypes.PluginCreateOptions) error
}

View File

@@ -28,11 +28,11 @@ func (r *pluginRouter) initRoutes() {
router.NewGetRoute("/plugins/{name:.*}/json", r.inspectPlugin),
router.NewGetRoute("/plugins/privileges", r.getPrivileges),
router.NewDeleteRoute("/plugins/{name:.*}", r.removePlugin),
router.NewPostRoute("/plugins/{name:.*}/enable", r.enablePlugin),
router.NewPostRoute("/plugins/{name:.*}/enable", r.enablePlugin), // PATCH?
router.NewPostRoute("/plugins/{name:.*}/disable", r.disablePlugin),
router.NewPostRoute("/plugins/pull", r.pullPlugin),
router.NewPostRoute("/plugins/{name:.*}/push", r.pushPlugin),
router.NewPostRoute("/plugins/{name:.*}/upgrade", r.upgradePlugin),
router.NewPostRoute("/plugins/pull", r.pullPlugin, router.WithCancel),
router.NewPostRoute("/plugins/{name:.*}/push", r.pushPlugin, router.WithCancel),
router.NewPostRoute("/plugins/{name:.*}/upgrade", r.upgradePlugin, router.WithCancel),
router.NewPostRoute("/plugins/{name:.*}/set", r.setPlugin),
router.NewPostRoute("/plugins/create", r.createPlugin),
}

View File

@@ -2,22 +2,23 @@ package plugin // import "github.com/docker/docker/api/server/router/plugin"
import (
"context"
"encoding/base64"
"encoding/json"
"net/http"
"strconv"
"strings"
"github.com/distribution/reference"
"github.com/docker/distribution/reference"
"github.com/docker/docker/api/server/httputils"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/backend"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/registry"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/streamformatter"
"github.com/pkg/errors"
)
func parseHeaders(headers http.Header) (map[string][]string, *registry.AuthConfig) {
func parseHeaders(headers http.Header) (map[string][]string, *types.AuthConfig) {
metaHeaders := map[string][]string{}
for k, v := range headers {
if strings.HasPrefix(k, "X-Meta-") {
@@ -25,8 +26,16 @@ func parseHeaders(headers http.Header) (map[string][]string, *registry.AuthConfi
}
}
// Ignore invalid AuthConfig to increase compatibility with the existing API.
authConfig, _ := registry.DecodeAuthConfig(headers.Get(registry.AuthHeader))
// Get X-Registry-Auth
authEncoded := headers.Get("X-Registry-Auth")
authConfig := &types.AuthConfig{}
if authEncoded != "" {
authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded))
if err := json.NewDecoder(authJSON).Decode(authConfig); err != nil {
authConfig = &types.AuthConfig{}
}
}
return metaHeaders, authConfig
}
@@ -85,8 +94,12 @@ func (pr *pluginRouter) upgradePlugin(ctx context.Context, w http.ResponseWriter
}
var privileges types.PluginPrivileges
if err := httputils.ReadJSON(r, &privileges); err != nil {
return err
dec := json.NewDecoder(r.Body)
if err := dec.Decode(&privileges); err != nil {
return errors.Wrap(err, "failed to parse privileges")
}
if dec.More() {
return errors.New("invalid privileges")
}
metaHeaders, authConfig := parseHeaders(r.Header)
@@ -108,7 +121,7 @@ func (pr *pluginRouter) upgradePlugin(ctx context.Context, w http.ResponseWriter
if !output.Flushed() {
return err
}
_, _ = output.Write(streamformatter.FormatError(err))
output.Write(streamformatter.FormatError(err))
}
return nil
@@ -120,8 +133,12 @@ func (pr *pluginRouter) pullPlugin(ctx context.Context, w http.ResponseWriter, r
}
var privileges types.PluginPrivileges
if err := httputils.ReadJSON(r, &privileges); err != nil {
return err
dec := json.NewDecoder(r.Body)
if err := dec.Decode(&privileges); err != nil {
return errors.Wrap(err, "failed to parse privileges")
}
if dec.More() {
return errors.New("invalid privileges")
}
metaHeaders, authConfig := parseHeaders(r.Header)
@@ -143,7 +160,7 @@ func (pr *pluginRouter) pullPlugin(ctx context.Context, w http.ResponseWriter, r
if !output.Flushed() {
return err
}
_, _ = output.Write(streamformatter.FormatError(err))
output.Write(streamformatter.FormatError(err))
}
return nil
@@ -187,13 +204,12 @@ func (pr *pluginRouter) createPlugin(ctx context.Context, w http.ResponseWriter,
}
options := &types.PluginCreateOptions{
RepoName: r.FormValue("name"),
}
RepoName: r.FormValue("name")}
if err := pr.backend.CreateFromContext(ctx, r.Body, options); err != nil {
return err
}
// TODO: send progress bar
//TODO: send progress bar
w.WriteHeader(http.StatusNoContent)
return nil
}
@@ -208,7 +224,7 @@ func (pr *pluginRouter) enablePlugin(ctx context.Context, w http.ResponseWriter,
if err != nil {
return err
}
config := &backend.PluginEnableConfig{Timeout: timeout}
config := &types.PluginEnableConfig{Timeout: timeout}
return pr.backend.Enable(name, config)
}
@@ -219,7 +235,7 @@ func (pr *pluginRouter) disablePlugin(ctx context.Context, w http.ResponseWriter
}
name := vars["name"]
config := &backend.PluginDisableConfig{
config := &types.PluginDisableConfig{
ForceDisable: httputils.BoolValue(r, "force"),
}
@@ -232,7 +248,7 @@ func (pr *pluginRouter) removePlugin(ctx context.Context, w http.ResponseWriter,
}
name := vars["name"]
config := &backend.PluginRmConfig{
config := &types.PluginRmConfig{
ForceRemove: httputils.BoolValue(r, "force"),
}
return pr.backend.Remove(name, config)
@@ -252,14 +268,14 @@ func (pr *pluginRouter) pushPlugin(ctx context.Context, w http.ResponseWriter, r
if !output.Flushed() {
return err
}
_, _ = output.Write(streamformatter.FormatError(err))
output.Write(streamformatter.FormatError(err))
}
return nil
}
func (pr *pluginRouter) setPlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
var args []string
if err := httputils.ReadJSON(r, &args); err != nil {
if err := json.NewDecoder(r.Body).Decode(&args); err != nil {
return err
}
if err := pr.backend.Set(vars["name"], args); err != nil {

View File

@@ -24,6 +24,6 @@ func (r *sessionRouter) Routes() []router.Route {
func (r *sessionRouter) initRoutes() {
r.routes = []router.Route{
router.NewPostRoute("/session", r.startSession),
router.Experimental(router.NewPostRoute("/session", r.startSession)),
}
}

View File

@@ -3,41 +3,46 @@ package swarm // import "github.com/docker/docker/api/server/router/swarm"
import (
"context"
"github.com/docker/docker/api/types"
basictypes "github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/backend"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/swarm"
types "github.com/docker/docker/api/types/swarm"
)
// Backend abstracts a swarm manager.
type Backend interface {
Init(req swarm.InitRequest) (string, error)
Join(req swarm.JoinRequest) error
Leave(ctx context.Context, force bool) error
Inspect() (swarm.Swarm, error)
Update(uint64, swarm.Spec, swarm.UpdateFlags) error
Init(req types.InitRequest) (string, error)
Join(req types.JoinRequest) error
Leave(force bool) error
Inspect() (types.Swarm, error)
Update(uint64, types.Spec, types.UpdateFlags) error
GetUnlockKey() (string, error)
UnlockSwarm(req swarm.UnlockRequest) error
GetServices(types.ServiceListOptions) ([]swarm.Service, error)
GetService(idOrName string, insertDefaults bool) (swarm.Service, error)
CreateService(swarm.ServiceSpec, string, bool) (*swarm.ServiceCreateResponse, error)
UpdateService(string, uint64, swarm.ServiceSpec, types.ServiceUpdateOptions, bool) (*swarm.ServiceUpdateResponse, error)
UnlockSwarm(req types.UnlockRequest) error
GetServices(basictypes.ServiceListOptions) ([]types.Service, error)
GetService(idOrName string, insertDefaults bool) (types.Service, error)
CreateService(types.ServiceSpec, string, bool) (*basictypes.ServiceCreateResponse, error)
UpdateService(string, uint64, types.ServiceSpec, basictypes.ServiceUpdateOptions, bool) (*basictypes.ServiceUpdateResponse, error)
RemoveService(string) error
ServiceLogs(context.Context, *backend.LogSelector, *container.LogsOptions) (<-chan *backend.LogMessage, error)
GetNodes(types.NodeListOptions) ([]swarm.Node, error)
GetNode(string) (swarm.Node, error)
UpdateNode(string, uint64, swarm.NodeSpec) error
ServiceLogs(context.Context, *backend.LogSelector, *basictypes.ContainerLogsOptions) (<-chan *backend.LogMessage, error)
GetNodes(basictypes.NodeListOptions) ([]types.Node, error)
GetNode(string) (types.Node, error)
UpdateNode(string, uint64, types.NodeSpec) error
RemoveNode(string, bool) error
GetTasks(types.TaskListOptions) ([]swarm.Task, error)
GetTask(string) (swarm.Task, error)
GetSecrets(opts types.SecretListOptions) ([]swarm.Secret, error)
CreateSecret(s swarm.SecretSpec) (string, error)
GetTasks(basictypes.TaskListOptions) ([]types.Task, error)
GetTask(string) (types.Task, error)
GetSecrets(opts basictypes.SecretListOptions) ([]types.Secret, error)
CreateSecret(s types.SecretSpec) (string, error)
RemoveSecret(idOrName string) error
GetSecret(id string) (swarm.Secret, error)
UpdateSecret(idOrName string, version uint64, spec swarm.SecretSpec) error
GetConfigs(opts types.ConfigListOptions) ([]swarm.Config, error)
CreateConfig(s swarm.ConfigSpec) (string, error)
GetSecret(id string) (types.Secret, error)
UpdateSecret(idOrName string, version uint64, spec types.SecretSpec) error
GetConfigs(opts basictypes.ConfigListOptions) ([]types.Config, error)
CreateConfig(s types.ConfigSpec) (string, error)
RemoveConfig(id string) error
GetConfig(id string) (swarm.Config, error)
UpdateConfig(idOrName string, version uint64, spec swarm.ConfigSpec) error
GetConfig(id string) (types.Config, error)
UpdateConfig(idOrName string, version uint64, spec types.ConfigSpec) error
}

View File

@@ -37,7 +37,7 @@ func (sr *swarmRouter) initRoutes() {
router.NewPostRoute("/services/create", sr.createService),
router.NewPostRoute("/services/{id}/update", sr.updateService),
router.NewDeleteRoute("/services/{id}", sr.removeService),
router.NewGetRoute("/services/{id}/logs", sr.getServiceLogs),
router.NewGetRoute("/services/{id}/logs", sr.getServiceLogs, router.WithCancel),
router.NewGetRoute("/nodes", sr.getNodes),
router.NewGetRoute("/nodes/{id}", sr.getNode),
@@ -46,7 +46,7 @@ func (sr *swarmRouter) initRoutes() {
router.NewGetRoute("/tasks", sr.getTasks),
router.NewGetRoute("/tasks/{id}", sr.getTask),
router.NewGetRoute("/tasks/{id}/logs", sr.getTaskLogs),
router.NewGetRoute("/tasks/{id}/logs", sr.getTaskLogs, router.WithCancel),
router.NewGetRoute("/secrets", sr.getSecrets),
router.NewPostRoute("/secrets/create", sr.createSecret),

View File

@@ -2,41 +2,30 @@ package swarm // import "github.com/docker/docker/api/server/router/swarm"
import (
"context"
"encoding/json"
"fmt"
"net/http"
"strconv"
"github.com/containerd/log"
"github.com/docker/docker/api/server/httputils"
basictypes "github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/backend"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/registry"
types "github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/api/types/versions"
"github.com/docker/docker/errdefs"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
func (sr *swarmRouter) initCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
var req types.InitRequest
if err := httputils.ReadJSON(r, &req); err != nil {
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
return err
}
version := httputils.VersionFromContext(ctx)
// DefaultAddrPool and SubnetSize were added in API 1.39. Ignore on older API versions.
if versions.LessThan(version, "1.39") {
req.DefaultAddrPool = nil
req.SubnetSize = 0
}
// DataPathPort was added in API 1.40. Ignore this option on older API versions.
if versions.LessThan(version, "1.40") {
req.DataPathPort = 0
}
nodeID, err := sr.backend.Init(req)
if err != nil {
log.G(ctx).WithContext(ctx).WithError(err).Debug("Error initializing swarm")
logrus.Errorf("Error initializing swarm: %v", err)
return err
}
return httputils.WriteJSON(w, http.StatusOK, nodeID)
@@ -44,7 +33,7 @@ func (sr *swarmRouter) initCluster(ctx context.Context, w http.ResponseWriter, r
func (sr *swarmRouter) joinCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
var req types.JoinRequest
if err := httputils.ReadJSON(r, &req); err != nil {
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
return err
}
return sr.backend.Join(req)
@@ -56,13 +45,13 @@ func (sr *swarmRouter) leaveCluster(ctx context.Context, w http.ResponseWriter,
}
force := httputils.BoolValue(r, "force")
return sr.backend.Leave(ctx, force)
return sr.backend.Leave(force)
}
func (sr *swarmRouter) inspectCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
swarm, err := sr.backend.Inspect()
if err != nil {
log.G(ctx).WithContext(ctx).WithError(err).Debug("Error getting swarm")
logrus.Errorf("Error getting swarm: %v", err)
return err
}
@@ -71,7 +60,7 @@ func (sr *swarmRouter) inspectCluster(ctx context.Context, w http.ResponseWriter
func (sr *swarmRouter) updateCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
var swarm types.Spec
if err := httputils.ReadJSON(r, &swarm); err != nil {
if err := json.NewDecoder(r.Body).Decode(&swarm); err != nil {
return err
}
@@ -114,7 +103,7 @@ func (sr *swarmRouter) updateCluster(ctx context.Context, w http.ResponseWriter,
}
if err := sr.backend.Update(version, swarm, flags); err != nil {
log.G(ctx).WithContext(ctx).WithError(err).Debug("Error configuring swarm")
logrus.Errorf("Error configuring swarm: %v", err)
return err
}
return nil
@@ -122,12 +111,12 @@ func (sr *swarmRouter) updateCluster(ctx context.Context, w http.ResponseWriter,
func (sr *swarmRouter) unlockCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
var req types.UnlockRequest
if err := httputils.ReadJSON(r, &req); err != nil {
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
return err
}
if err := sr.backend.UnlockSwarm(req); err != nil {
log.G(ctx).WithContext(ctx).WithError(err).Debug("Error unlocking swarm")
logrus.Errorf("Error unlocking swarm: %v", err)
return err
}
return nil
@@ -136,7 +125,7 @@ func (sr *swarmRouter) unlockCluster(ctx context.Context, w http.ResponseWriter,
func (sr *swarmRouter) getUnlockKey(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
unlockKey, err := sr.backend.GetUnlockKey()
if err != nil {
log.G(ctx).WithContext(ctx).WithError(err).Debug("Error retrieving swarm unlock key")
logrus.WithError(err).Errorf("Error retrieving swarm unlock key")
return err
}
@@ -151,24 +140,12 @@ func (sr *swarmRouter) getServices(ctx context.Context, w http.ResponseWriter, r
}
filter, err := filters.FromJSON(r.Form.Get("filters"))
if err != nil {
return err
return errdefs.InvalidParameter(err)
}
// the status query parameter is only support in API versions >= 1.41. If
// the client is using a lesser version, ignore the parameter.
cliVersion := httputils.VersionFromContext(ctx)
var status bool
if value := r.URL.Query().Get("status"); value != "" && !versions.LessThan(cliVersion, "1.41") {
var err error
status, err = strconv.ParseBool(value)
if err != nil {
return errors.Wrapf(errdefs.InvalidParameter(err), "invalid value for status: %s", value)
}
}
services, err := sr.backend.GetServices(basictypes.ServiceListOptions{Filters: filter, Status: status})
services, err := sr.backend.GetServices(basictypes.ServiceListOptions{Filters: filter})
if err != nil {
log.G(ctx).WithContext(ctx).WithError(err).Debug("Error getting services")
logrus.Errorf("Error getting services: %v", err)
return err
}
@@ -177,27 +154,18 @@ func (sr *swarmRouter) getServices(ctx context.Context, w http.ResponseWriter, r
func (sr *swarmRouter) getService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
var insertDefaults bool
if value := r.URL.Query().Get("insertDefaults"); value != "" {
var err error
insertDefaults, err = strconv.ParseBool(value)
if err != nil {
err := fmt.Errorf("invalid value for insertDefaults: %s", value)
return errors.Wrapf(errdefs.InvalidParameter(err), "invalid value for insertDefaults: %s", value)
}
}
// you may note that there is no code here to handle the "status" query
// parameter, as in getServices. the Status field is not supported when
// retrieving an individual service because the Backend API changes
// required to accommodate it would be too disruptive, and because that
// field is so rarely needed as part of an individual service inspection.
service, err := sr.backend.GetService(vars["id"], insertDefaults)
if err != nil {
log.G(ctx).WithContext(ctx).WithFields(log.Fields{
"error": err,
"service-id": vars["id"],
}).Debug("Error getting service")
logrus.Errorf("Error getting service %s: %v", vars["id"], err)
return err
}
@@ -206,30 +174,21 @@ func (sr *swarmRouter) getService(ctx context.Context, w http.ResponseWriter, r
func (sr *swarmRouter) createService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
var service types.ServiceSpec
if err := httputils.ReadJSON(r, &service); err != nil {
if err := json.NewDecoder(r.Body).Decode(&service); err != nil {
return err
}
// TODO(thaJeztah): remove logentries check and migration code in release v26.0.0.
if service.TaskTemplate.LogDriver != nil && service.TaskTemplate.LogDriver.Name == "logentries" {
return errdefs.InvalidParameter(errors.New("the logentries logging driver has been deprecated and removed"))
}
// Get returns "" if the header does not exist
encodedAuth := r.Header.Get(registry.AuthHeader)
encodedAuth := r.Header.Get("X-Registry-Auth")
cliVersion := r.Header.Get("version")
queryRegistry := false
if v := httputils.VersionFromContext(ctx); v != "" {
if versions.LessThan(v, "1.30") {
queryRegistry = true
}
adjustForAPIVersion(v, &service)
if cliVersion != "" && versions.LessThan(cliVersion, "1.30") {
queryRegistry = true
}
resp, err := sr.backend.CreateService(service, encodedAuth, queryRegistry)
if err != nil {
log.G(ctx).WithFields(log.Fields{
"error": err,
"service-name": service.Name,
}).Debug("Error creating service")
logrus.Errorf("Error creating service %s: %v", service.Name, err)
return err
}
@@ -238,13 +197,9 @@ func (sr *swarmRouter) createService(ctx context.Context, w http.ResponseWriter,
func (sr *swarmRouter) updateService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
var service types.ServiceSpec
if err := httputils.ReadJSON(r, &service); err != nil {
if err := json.NewDecoder(r.Body).Decode(&service); err != nil {
return err
}
// TODO(thaJeztah): remove logentries check and migration code in release v26.0.0.
if service.TaskTemplate.LogDriver != nil && service.TaskTemplate.LogDriver.Name == "logentries" {
return errdefs.InvalidParameter(errors.New("the logentries logging driver has been deprecated and removed"))
}
rawVersion := r.URL.Query().Get("version")
version, err := strconv.ParseUint(rawVersion, 10, 64)
@@ -256,23 +211,18 @@ func (sr *swarmRouter) updateService(ctx context.Context, w http.ResponseWriter,
var flags basictypes.ServiceUpdateOptions
// Get returns "" if the header does not exist
flags.EncodedRegistryAuth = r.Header.Get(registry.AuthHeader)
flags.EncodedRegistryAuth = r.Header.Get("X-Registry-Auth")
flags.RegistryAuthFrom = r.URL.Query().Get("registryAuthFrom")
flags.Rollback = r.URL.Query().Get("rollback")
cliVersion := r.Header.Get("version")
queryRegistry := false
if v := httputils.VersionFromContext(ctx); v != "" {
if versions.LessThan(v, "1.30") {
queryRegistry = true
}
adjustForAPIVersion(v, &service)
if cliVersion != "" && versions.LessThan(cliVersion, "1.30") {
queryRegistry = true
}
resp, err := sr.backend.UpdateService(vars["id"], version, service, flags, queryRegistry)
if err != nil {
log.G(ctx).WithContext(ctx).WithFields(log.Fields{
"error": err,
"service-id": vars["id"],
}).Debug("Error updating service")
logrus.Errorf("Error updating service %s: %v", vars["id"], err)
return err
}
return httputils.WriteJSON(w, http.StatusOK, resp)
@@ -280,10 +230,7 @@ func (sr *swarmRouter) updateService(ctx context.Context, w http.ResponseWriter,
func (sr *swarmRouter) removeService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := sr.backend.RemoveService(vars["id"]); err != nil {
log.G(ctx).WithContext(ctx).WithFields(log.Fields{
"error": err,
"service-id": vars["id"],
}).Debug("Error removing service")
logrus.Errorf("Error removing service %s: %v", vars["id"], err)
return err
}
return nil
@@ -324,7 +271,7 @@ func (sr *swarmRouter) getNodes(ctx context.Context, w http.ResponseWriter, r *h
nodes, err := sr.backend.GetNodes(basictypes.NodeListOptions{Filters: filter})
if err != nil {
log.G(ctx).WithContext(ctx).WithError(err).Debug("Error getting nodes")
logrus.Errorf("Error getting nodes: %v", err)
return err
}
@@ -334,10 +281,7 @@ func (sr *swarmRouter) getNodes(ctx context.Context, w http.ResponseWriter, r *h
func (sr *swarmRouter) getNode(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
node, err := sr.backend.GetNode(vars["id"])
if err != nil {
log.G(ctx).WithContext(ctx).WithFields(log.Fields{
"error": err,
"node-id": vars["id"],
}).Debug("Error getting node")
logrus.Errorf("Error getting node %s: %v", vars["id"], err)
return err
}
@@ -346,7 +290,7 @@ func (sr *swarmRouter) getNode(ctx context.Context, w http.ResponseWriter, r *ht
func (sr *swarmRouter) updateNode(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
var node types.NodeSpec
if err := httputils.ReadJSON(r, &node); err != nil {
if err := json.NewDecoder(r.Body).Decode(&node); err != nil {
return err
}
@@ -358,10 +302,7 @@ func (sr *swarmRouter) updateNode(ctx context.Context, w http.ResponseWriter, r
}
if err := sr.backend.UpdateNode(vars["id"], version, node); err != nil {
log.G(ctx).WithContext(ctx).WithFields(log.Fields{
"error": err,
"node-id": vars["id"],
}).Debug("Error updating node")
logrus.Errorf("Error updating node %s: %v", vars["id"], err)
return err
}
return nil
@@ -375,10 +316,7 @@ func (sr *swarmRouter) removeNode(ctx context.Context, w http.ResponseWriter, r
force := httputils.BoolValue(r, "force")
if err := sr.backend.RemoveNode(vars["id"], force); err != nil {
log.G(ctx).WithContext(ctx).WithFields(log.Fields{
"error": err,
"node-id": vars["id"],
}).Debug("Error removing node")
logrus.Errorf("Error removing node %s: %v", vars["id"], err)
return err
}
return nil
@@ -395,7 +333,7 @@ func (sr *swarmRouter) getTasks(ctx context.Context, w http.ResponseWriter, r *h
tasks, err := sr.backend.GetTasks(basictypes.TaskListOptions{Filters: filter})
if err != nil {
log.G(ctx).WithContext(ctx).WithError(err).Debug("Error getting tasks")
logrus.Errorf("Error getting tasks: %v", err)
return err
}
@@ -405,10 +343,7 @@ func (sr *swarmRouter) getTasks(ctx context.Context, w http.ResponseWriter, r *h
func (sr *swarmRouter) getTask(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
task, err := sr.backend.GetTask(vars["id"])
if err != nil {
log.G(ctx).WithContext(ctx).WithFields(log.Fields{
"error": err,
"task-id": vars["id"],
}).Debug("Error getting task")
logrus.Errorf("Error getting task %s: %v", vars["id"], err)
return err
}
@@ -434,7 +369,7 @@ func (sr *swarmRouter) getSecrets(ctx context.Context, w http.ResponseWriter, r
func (sr *swarmRouter) createSecret(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
var secret types.SecretSpec
if err := httputils.ReadJSON(r, &secret); err != nil {
if err := json.NewDecoder(r.Body).Decode(&secret); err != nil {
return err
}
version := httputils.VersionFromContext(ctx)
@@ -472,8 +407,8 @@ func (sr *swarmRouter) getSecret(ctx context.Context, w http.ResponseWriter, r *
func (sr *swarmRouter) updateSecret(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
var secret types.SecretSpec
if err := httputils.ReadJSON(r, &secret); err != nil {
return err
if err := json.NewDecoder(r.Body).Decode(&secret); err != nil {
return errdefs.InvalidParameter(err)
}
rawVersion := r.URL.Query().Get("version")
@@ -505,7 +440,7 @@ func (sr *swarmRouter) getConfigs(ctx context.Context, w http.ResponseWriter, r
func (sr *swarmRouter) createConfig(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
var config types.ConfigSpec
if err := httputils.ReadJSON(r, &config); err != nil {
if err := json.NewDecoder(r.Body).Decode(&config); err != nil {
return err
}
@@ -544,8 +479,8 @@ func (sr *swarmRouter) getConfig(ctx context.Context, w http.ResponseWriter, r *
func (sr *swarmRouter) updateConfig(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
var config types.ConfigSpec
if err := httputils.ReadJSON(r, &config); err != nil {
return err
if err := json.NewDecoder(r.Body).Decode(&config); err != nil {
return errdefs.InvalidParameter(err)
}
rawVersion := r.URL.Query().Get("version")

Some files were not shown because too many files have changed in this diff Show More