mirror of
https://github.com/moby/moby.git
synced 2026-01-15 09:51:27 +00:00
Compare commits
365 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
bb2eab21c6 | ||
|
|
cfc4677f62 | ||
|
|
978690e4f9 | ||
|
|
44eb640a1b | ||
|
|
69ef0358c3 | ||
|
|
fc72ed9760 | ||
|
|
da6bb8c408 | ||
|
|
a889a17a63 | ||
|
|
6f581c1808 | ||
|
|
de3143c6b9 | ||
|
|
f547f2f3c0 | ||
|
|
0c4b6b1742 | ||
|
|
f088bcadd5 | ||
|
|
5770145433 | ||
|
|
d15fe0d782 | ||
|
|
98040b95a7 | ||
|
|
546005804c | ||
|
|
a8184baf3b | ||
|
|
e571db3846 | ||
|
|
a913b5ad7e | ||
|
|
73a98393c6 | ||
|
|
acb8204a7f | ||
|
|
313f105443 | ||
|
|
2b1ba3ea6b | ||
|
|
f493b770a9 | ||
|
|
36430f7970 | ||
|
|
fb24b99a2b | ||
|
|
0c65191c49 | ||
|
|
bf78bf3458 | ||
|
|
dcf06b3f5f | ||
|
|
aba1d597bc | ||
|
|
e37ac41afb | ||
|
|
963e3ec65c | ||
|
|
526203dd7f | ||
|
|
f528e2ab96 | ||
|
|
3989be2f7b | ||
|
|
d406a5fd22 | ||
|
|
394f6c14ad | ||
|
|
77a01aaec7 | ||
|
|
df2427022a | ||
|
|
9e4c508b55 | ||
|
|
cb358e8a19 | ||
|
|
2bc33b4c26 | ||
|
|
3768c71d9e | ||
|
|
7b9e86f789 | ||
|
|
214e6363b3 | ||
|
|
5052c38846 | ||
|
|
d15be0c54d | ||
|
|
27982c186e | ||
|
|
9d990cbae8 | ||
|
|
3508cfb149 | ||
|
|
67633130c6 | ||
|
|
2e13f771f3 | ||
|
|
bbf3f33dc8 | ||
|
|
2dc7a1dc25 | ||
|
|
fc657692c7 | ||
|
|
e75fa6684c | ||
|
|
abe6b3dc9b | ||
|
|
297f224a92 | ||
|
|
f48f4dde24 | ||
|
|
cd8873dd3d | ||
|
|
2dce69e001 | ||
|
|
5c4dc48995 | ||
|
|
10fa0d5321 | ||
|
|
356f483038 | ||
|
|
c0edbfd621 | ||
|
|
e46e43470b | ||
|
|
d4e2341f93 | ||
|
|
e32bfd347c | ||
|
|
65c7f3bac3 | ||
|
|
5f35b157a3 | ||
|
|
76e132ed56 | ||
|
|
6f7ee1c942 | ||
|
|
f476deac40 | ||
|
|
11973d0c0a | ||
|
|
251610397c | ||
|
|
83f90039ef | ||
|
|
2fd846d40f | ||
|
|
f9ab209417 | ||
|
|
bfca3185ee | ||
|
|
7f45eb041c | ||
|
|
b76a60dee6 | ||
|
|
4acfbaba1e | ||
|
|
e749a31322 | ||
|
|
7370bbc034 | ||
|
|
38152f4d5b | ||
|
|
21feb1808d | ||
|
|
5e15ce3a4a | ||
|
|
92b96ac2ed | ||
|
|
e0b105623e | ||
|
|
9d86e1d204 | ||
|
|
3a946f5291 | ||
|
|
cf1e138ab1 | ||
|
|
7175841ebd | ||
|
|
f3e180b704 | ||
|
|
afdc9a804a | ||
|
|
e24277883f | ||
|
|
07e84005ac | ||
|
|
39d3d3db56 | ||
|
|
4b79d9078a | ||
|
|
1e0f2186a9 | ||
|
|
4404c36460 | ||
|
|
75634f9a1e | ||
|
|
ad11d3f232 | ||
|
|
cbaf1808cb | ||
|
|
03015fe6de | ||
|
|
fa3804f8ba | ||
|
|
4c1a3f096c | ||
|
|
09a2f7a667 | ||
|
|
02e02e512f | ||
|
|
24de1f7adc | ||
|
|
c4685540e4 | ||
|
|
5aac513617 | ||
|
|
80dc5186ec | ||
|
|
f9cb47a052 | ||
|
|
5202b5c781 | ||
|
|
28c34259c7 | ||
|
|
67ea873f61 | ||
|
|
f72c96c5c4 | ||
|
|
1bbb6f2454 | ||
|
|
c0be73f88d | ||
|
|
727c4fdee3 | ||
|
|
b4c4be1f22 | ||
|
|
7106874e39 | ||
|
|
4bef6f5510 | ||
|
|
f056df579a | ||
|
|
c062238ea4 | ||
|
|
20ff8a2380 | ||
|
|
ca99cab891 | ||
|
|
5829b244ec | ||
|
|
3bc8fccc1b | ||
|
|
4a96094bf5 | ||
|
|
00b44caa69 | ||
|
|
1fcb1dd728 | ||
|
|
aaa8f96cc9 | ||
|
|
671bf589e2 | ||
|
|
e1b240d6bd | ||
|
|
18a54ed59c | ||
|
|
0c66bc948a | ||
|
|
a12d359c1a | ||
|
|
2d12e69c9f | ||
|
|
33ab36d6b3 | ||
|
|
fa10084a82 | ||
|
|
43ce8f7d24 | ||
|
|
87d9d96ab0 | ||
|
|
a5ecbf4d22 | ||
|
|
99aa9bb766 | ||
|
|
6442025060 | ||
|
|
ac6624773e | ||
|
|
4669418731 | ||
|
|
ff07aadeb0 | ||
|
|
cde4767cbd | ||
|
|
1fe550cfc7 | ||
|
|
edef49eeac | ||
|
|
dbcd0e7aee | ||
|
|
0a87dc9f71 | ||
|
|
ed3c4e8d8e | ||
|
|
3956644474 | ||
|
|
262ad3bb2f | ||
|
|
44d42c2b16 | ||
|
|
14eb977c15 | ||
|
|
f8e5145e96 | ||
|
|
24888a10f6 | ||
|
|
3a1896db63 | ||
|
|
47319e065d | ||
|
|
b9b6e68903 | ||
|
|
c6c4d07830 | ||
|
|
9136c32327 | ||
|
|
7cb488934b | ||
|
|
aea1aa0daa | ||
|
|
79caa2f955 | ||
|
|
87552f2e67 | ||
|
|
aad639c1fa | ||
|
|
bee5153c5b | ||
|
|
31a938c73c | ||
|
|
9d44956d8c | ||
|
|
08d01be870 | ||
|
|
3660ee30e3 | ||
|
|
3424a7c2e3 | ||
|
|
36fda30565 | ||
|
|
541fda8e90 | ||
|
|
a8b1fec072 | ||
|
|
70c3d7783f | ||
|
|
fd0904805c | ||
|
|
3977a3c6e8 | ||
|
|
43cfc50bbb | ||
|
|
f6ebfaea19 | ||
|
|
daa8708601 | ||
|
|
7114360901 | ||
|
|
fc6192786a | ||
|
|
3d6a13f072 | ||
|
|
5ebe35cc09 | ||
|
|
5dfec22079 | ||
|
|
cee2490d84 | ||
|
|
3ce520ec80 | ||
|
|
7772535e79 | ||
|
|
bebad9e22e | ||
|
|
b31d51cac6 | ||
|
|
1d7fb64a6e | ||
|
|
ae65811be2 | ||
|
|
0e873d5cd8 | ||
|
|
2bc36de638 | ||
|
|
aca9143c13 | ||
|
|
e143eed8bc | ||
|
|
7d621608dd | ||
|
|
997ec12ec8 | ||
|
|
4a8f744255 | ||
|
|
49a2f5c55c | ||
|
|
07efcaf3b2 | ||
|
|
6b04087d5f | ||
|
|
d752acd960 | ||
|
|
7f94f2b393 | ||
|
|
970c938b56 | ||
|
|
d41ebd79f7 | ||
|
|
d0fadc859d | ||
|
|
40b28dc7e1 | ||
|
|
44c5f7721a | ||
|
|
a13cd44a13 | ||
|
|
2e89072681 | ||
|
|
7b5de59256 | ||
|
|
00b1722fb4 | ||
|
|
8fdaad4018 | ||
|
|
fefe6290e5 | ||
|
|
f925f295f4 | ||
|
|
cc770330f8 | ||
|
|
e42f7db450 | ||
|
|
9a57be4ac4 | ||
|
|
95831246a2 | ||
|
|
8af2e62556 | ||
|
|
6236ebaed5 | ||
|
|
50d3438b26 | ||
|
|
366d551cd2 | ||
|
|
393027d1b1 | ||
|
|
21d818be87 | ||
|
|
6d65028804 | ||
|
|
c0e1c67c78 | ||
|
|
b9b8ddc160 | ||
|
|
d96d56ff09 | ||
|
|
cc7b8cc980 | ||
|
|
8ca74127d9 | ||
|
|
fc2942d4e0 | ||
|
|
874954d8bd | ||
|
|
0bfb1bded3 | ||
|
|
4765040aa3 | ||
|
|
de0300b1c6 | ||
|
|
4807ef2af0 | ||
|
|
c853881610 | ||
|
|
2450c5a46b | ||
|
|
a490e68553 | ||
|
|
5d2b3687b0 | ||
|
|
d7e36c99fb | ||
|
|
1249d36bdd | ||
|
|
287d1656de | ||
|
|
39976cd2bf | ||
|
|
85f1b6ff8f | ||
|
|
1650fa8889 | ||
|
|
e9e7491f2b | ||
|
|
2609d4e252 | ||
|
|
188c5d4a7c | ||
|
|
ff4ec67b90 | ||
|
|
fee68df273 | ||
|
|
b5a0d7a188 | ||
|
|
f7cf9fbe48 | ||
|
|
ee87eaf9ad | ||
|
|
09a0b0a84a | ||
|
|
8e6ed32610 | ||
|
|
dfd2f917dc | ||
|
|
4f1dd92056 | ||
|
|
f10c50958c | ||
|
|
40515da6d6 | ||
|
|
3b9370fcf8 | ||
|
|
51bf7da729 | ||
|
|
f1bd611d41 | ||
|
|
e9f7c05ae1 | ||
|
|
72156dd7a4 | ||
|
|
554a933944 | ||
|
|
8d43d7fa6b | ||
|
|
a61b411ceb | ||
|
|
d2590dc3cd | ||
|
|
274316f89e | ||
|
|
e3e3a31989 | ||
|
|
704e7a2d71 | ||
|
|
87b7e40a34 | ||
|
|
901fb577cb | ||
|
|
fc8b388eac | ||
|
|
9aeda305fd | ||
|
|
48e314fbe2 | ||
|
|
29c636bf80 | ||
|
|
64b0b54fc8 | ||
|
|
e8d00f02aa | ||
|
|
7b086898ee | ||
|
|
292d352ee4 | ||
|
|
2293a20972 | ||
|
|
76fa56b62d | ||
|
|
e5958a8f08 | ||
|
|
2dc3e510d4 | ||
|
|
e7f4963e73 | ||
|
|
629397f70e | ||
|
|
1e6029e81e | ||
|
|
2a33c73574 | ||
|
|
4bf8eec265 | ||
|
|
dfcb3e17ae | ||
|
|
8e9684c029 | ||
|
|
2c17e9a333 | ||
|
|
d1d9fd50c2 | ||
|
|
8912c1fade | ||
|
|
332de3f1e3 | ||
|
|
2160f0041d | ||
|
|
3254fa3b50 | ||
|
|
b73c27ef6b | ||
|
|
ec89e7cde1 | ||
|
|
15f9cb5c4d | ||
|
|
ebfc35f887 | ||
|
|
f47d5ced16 | ||
|
|
6c78a1166e | ||
|
|
8ae63006f1 | ||
|
|
aeb600bc4a | ||
|
|
e0d8418ddc | ||
|
|
e6a5f44e61 | ||
|
|
7130076488 | ||
|
|
0133759476 | ||
|
|
86839c826f | ||
|
|
f93e0ef4d6 | ||
|
|
572457e265 | ||
|
|
49377cdd63 | ||
|
|
910d5c44fc | ||
|
|
0e3d20cb20 | ||
|
|
a96b75191e | ||
|
|
a285cd4d88 | ||
|
|
4f057d8bb6 | ||
|
|
1240460547 | ||
|
|
d9a6b805b3 | ||
|
|
e88c28941f | ||
|
|
9c4984db6b | ||
|
|
af7c8ff045 | ||
|
|
6de52a29a8 | ||
|
|
ad0ee82f0d | ||
|
|
85b9568d0e | ||
|
|
826003ecae | ||
|
|
e2bd8edb0d | ||
|
|
44fde1bdb7 | ||
|
|
d8f20bfdc1 | ||
|
|
6ab3b50a3f | ||
|
|
6d41219bae | ||
|
|
dcbd68a1d4 | ||
|
|
112fb22152 | ||
|
|
a60b458179 | ||
|
|
a9081299dd | ||
|
|
48a144954e | ||
|
|
c4c8a80958 | ||
|
|
1b928c1bd5 | ||
|
|
e34ab5200d | ||
|
|
863ca3f185 | ||
|
|
edcc51cbee | ||
|
|
6408132d74 | ||
|
|
d64dd71200 | ||
|
|
e0ba440909 | ||
|
|
269e10a725 | ||
|
|
149b7e7f03 | ||
|
|
c51efa8617 | ||
|
|
52791b1c14 | ||
|
|
cdbca4061b | ||
|
|
c52e221207 | ||
|
|
e417e8dfc2 | ||
|
|
6905fe7488 |
3
.github/CODEOWNERS
vendored
3
.github/CODEOWNERS
vendored
@@ -5,7 +5,8 @@
|
||||
|
||||
builder/** @tonistiigi
|
||||
contrib/mkimage/** @tianon
|
||||
daemon/graphdriver/devmapper/** @rhvgoyal
|
||||
daemon/graphdriver/devmapper/** @rhvgoyal
|
||||
daemon/graphdriver/overlay/** @dmcgowan
|
||||
daemon/graphdriver/overlay2/** @dmcgowan
|
||||
daemon/graphdriver/windows/** @johnstep
|
||||
daemon/logger/awslogs/** @samuelkarp
|
||||
|
||||
70
.github/ISSUE_TEMPLATE.md
vendored
Normal file
70
.github/ISSUE_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,70 @@
|
||||
<!--
|
||||
If you are reporting a new issue, make sure that we do not have any duplicates
|
||||
already open. You can ensure this by searching the issue list for this
|
||||
repository. If there is a duplicate, please close your issue and add a comment
|
||||
to the existing issue instead.
|
||||
|
||||
If you suspect your issue is a bug, please edit your issue description to
|
||||
include the BUG REPORT INFORMATION shown below. If you fail to provide this
|
||||
information within 7 days, we cannot debug your issue and will close it. We
|
||||
will, however, reopen it if you later provide the information.
|
||||
|
||||
For more information about reporting issues, see
|
||||
https://github.com/moby/moby/blob/master/CONTRIBUTING.md#reporting-other-issues
|
||||
|
||||
---------------------------------------------------
|
||||
GENERAL SUPPORT INFORMATION
|
||||
---------------------------------------------------
|
||||
|
||||
The GitHub issue tracker is for bug reports and feature requests.
|
||||
General support for **docker** can be found at the following locations:
|
||||
|
||||
- Docker Support Forums - https://forums.docker.com
|
||||
- Slack - community.docker.com #general channel
|
||||
- Post a question on StackOverflow, using the Docker tag
|
||||
|
||||
General support for **moby** can be found at the following locations:
|
||||
|
||||
- Moby Project Forums - https://forums.mobyproject.org
|
||||
- Slack - community.docker.com #moby-project channel
|
||||
- Post a question on StackOverflow, using the Moby tag
|
||||
|
||||
---------------------------------------------------
|
||||
BUG REPORT INFORMATION
|
||||
---------------------------------------------------
|
||||
Use the commands below to provide key information from your environment:
|
||||
You do NOT have to include this information if this is a FEATURE REQUEST
|
||||
-->
|
||||
|
||||
**Description**
|
||||
|
||||
<!--
|
||||
Briefly describe the problem you are having in a few paragraphs.
|
||||
-->
|
||||
|
||||
**Steps to reproduce the issue:**
|
||||
1.
|
||||
2.
|
||||
3.
|
||||
|
||||
**Describe the results you received:**
|
||||
|
||||
|
||||
**Describe the results you expected:**
|
||||
|
||||
|
||||
**Additional information you deem important (e.g. issue happens only occasionally):**
|
||||
|
||||
**Output of `docker version`:**
|
||||
|
||||
```
|
||||
(paste your output here)
|
||||
```
|
||||
|
||||
**Output of `docker info`:**
|
||||
|
||||
```
|
||||
(paste your output here)
|
||||
```
|
||||
|
||||
**Additional environment details (AWS, VirtualBox, physical, etc.):**
|
||||
146
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
146
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@@ -1,146 +0,0 @@
|
||||
name: Bug report
|
||||
description: Create a report to help us improve
|
||||
labels:
|
||||
- kind/bug
|
||||
- status/0-triage
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Thank you for taking the time to report a bug!
|
||||
If this is a security issue please report it to the [Docker Security team](mailto:security@docker.com).
|
||||
- type: textarea
|
||||
id: description
|
||||
attributes:
|
||||
label: Description
|
||||
description: Please give a clear and concise description of the bug
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: repro
|
||||
attributes:
|
||||
label: Reproduce
|
||||
description: Steps to reproduce the bug
|
||||
placeholder: |
|
||||
1. docker run ...
|
||||
2. docker kill ...
|
||||
3. docker rm ...
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: expected
|
||||
attributes:
|
||||
label: Expected behavior
|
||||
description: What is the expected behavior?
|
||||
placeholder: |
|
||||
E.g. "`docker rm` should remove the container and cleanup all associated data"
|
||||
- type: textarea
|
||||
id: version
|
||||
attributes:
|
||||
label: docker version
|
||||
description: Output of `docker version`
|
||||
render: bash
|
||||
placeholder: |
|
||||
Client:
|
||||
Version: 20.10.17
|
||||
API version: 1.41
|
||||
Go version: go1.17.11
|
||||
Git commit: 100c70180fde3601def79a59cc3e996aa553c9b9
|
||||
Built: Mon Jun 6 21:36:39 UTC 2022
|
||||
OS/Arch: linux/amd64
|
||||
Context: default
|
||||
Experimental: true
|
||||
|
||||
Server:
|
||||
Engine:
|
||||
Version: 20.10.17
|
||||
API version: 1.41 (minimum version 1.12)
|
||||
Go version: go1.17.11
|
||||
Git commit: a89b84221c8560e7a3dee2a653353429e7628424
|
||||
Built: Mon Jun 6 22:32:38 2022
|
||||
OS/Arch: linux/amd64
|
||||
Experimental: true
|
||||
containerd:
|
||||
Version: 1.6.6
|
||||
GitCommit: 10c12954828e7c7c9b6e0ea9b0c02b01407d3ae1
|
||||
runc:
|
||||
Version: 1.1.2
|
||||
GitCommit: a916309fff0f838eb94e928713dbc3c0d0ac7aa4
|
||||
docker-init:
|
||||
Version: 0.19.0
|
||||
GitCommit:
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: info
|
||||
attributes:
|
||||
label: docker info
|
||||
description: Output of `docker info`
|
||||
render: bash
|
||||
placeholder: |
|
||||
Client:
|
||||
Context: default
|
||||
Debug Mode: false
|
||||
Plugins:
|
||||
buildx: Docker Buildx (Docker Inc., 0.8.2)
|
||||
compose: Docker Compose (Docker Inc., 2.6.0)
|
||||
|
||||
Server:
|
||||
Containers: 4
|
||||
Running: 2
|
||||
Paused: 0
|
||||
Stopped: 2
|
||||
Images: 80
|
||||
Server Version: 20.10.17
|
||||
Storage Driver: overlay2
|
||||
Backing Filesystem: xfs
|
||||
Supports d_type: true
|
||||
Native Overlay Diff: false
|
||||
userxattr: false
|
||||
Logging Driver: local
|
||||
Cgroup Driver: cgroupfs
|
||||
Cgroup Version: 1
|
||||
Plugins:
|
||||
Volume: local
|
||||
Network: bridge host ipvlan macvlan null overlay
|
||||
Log: awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslog
|
||||
Swarm: inactive
|
||||
Runtimes: runc io.containerd.runc.v2 io.containerd.runtime.v1.linux
|
||||
Default Runtime: runc
|
||||
Init Binary: docker-init
|
||||
containerd version: 10c12954828e7c7c9b6e0ea9b0c02b01407d3ae1
|
||||
runc version: a916309fff0f838eb94e928713dbc3c0d0ac7aa4
|
||||
init version:
|
||||
Security Options:
|
||||
apparmor
|
||||
seccomp
|
||||
Profile: default
|
||||
Kernel Version: 5.13.0-1031-azure
|
||||
Operating System: Ubuntu 20.04.4 LTS
|
||||
OSType: linux
|
||||
Architecture: x86_64
|
||||
CPUs: 4
|
||||
Total Memory: 15.63GiB
|
||||
Name: dev
|
||||
ID: UC44:2RFL:7NQ5:GGFW:34O5:DYRE:CLOH:VLGZ:64AZ:GFXC:PY6H:SAHY
|
||||
Docker Root Dir: /var/lib/docker
|
||||
Debug Mode: true
|
||||
File Descriptors: 46
|
||||
Goroutines: 134
|
||||
System Time: 2022-07-06T18:07:54.812439392Z
|
||||
EventsListeners: 0
|
||||
Registry: https://index.docker.io/v1/
|
||||
Labels:
|
||||
Experimental: true
|
||||
Insecure Registries:
|
||||
127.0.0.0/8
|
||||
Live Restore Enabled: true
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: additional
|
||||
attributes:
|
||||
label: Additional Info
|
||||
description: Additional info you want to provide such as logs, system info, environment, etc.
|
||||
validations:
|
||||
required: false
|
||||
8
.github/ISSUE_TEMPLATE/config.yml
vendored
8
.github/ISSUE_TEMPLATE/config.yml
vendored
@@ -1,8 +0,0 @@
|
||||
blank_issues_enabled: false
|
||||
contact_links:
|
||||
- name: Security and Vulnerabilities
|
||||
url: https://github.com/moby/moby/blob/master/SECURITY.md
|
||||
about: Please report any security issues or vulnerabilities responsibly to the Docker security team. Please do not use the public issue tracker.
|
||||
- name: Questions and Discussions
|
||||
url: https://github.com/moby/moby/discussions/new
|
||||
about: Use Github Discussions to ask questions and/or open discussion topics.
|
||||
13
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
13
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
@@ -1,13 +0,0 @@
|
||||
name: Feature request
|
||||
description: Missing functionality? Come tell us about it!
|
||||
labels:
|
||||
- kind/feature
|
||||
- status/0-triage
|
||||
body:
|
||||
- type: textarea
|
||||
id: description
|
||||
attributes:
|
||||
label: Description
|
||||
description: What is the feature you want to see?
|
||||
validations:
|
||||
required: true
|
||||
2
.github/actions/setup-runner/action.yml
vendored
2
.github/actions/setup-runner/action.yml
vendored
@@ -13,7 +13,7 @@ runs:
|
||||
shell: bash
|
||||
- run: |
|
||||
if [ ! -e /etc/docker/daemon.json ]; then
|
||||
echo '{}' | sudo tee /etc/docker/daemon.json >/dev/null
|
||||
echo '{}' | tee /etc/docker/daemon.json >/dev/null
|
||||
fi
|
||||
DOCKERD_CONFIG=$(jq '.+{"experimental":true,"live-restore":true,"ipv6":true,"fixed-cidr-v6":"2001:db8:1::/64"}' /etc/docker/daemon.json)
|
||||
sudo tee /etc/docker/daemon.json <<<"$DOCKERD_CONFIG" >/dev/null
|
||||
|
||||
17
.github/workflows/.windows.yml
vendored
17
.github/workflows/.windows.yml
vendored
@@ -15,8 +15,8 @@ on:
|
||||
default: false
|
||||
|
||||
env:
|
||||
GO_VERSION: "1.20.13"
|
||||
GOTESTLIST_VERSION: v0.3.1
|
||||
GO_VERSION: 1.19.3
|
||||
GOTESTLIST_VERSION: v0.2.0
|
||||
TESTSTAT_VERSION: v0.1.3
|
||||
WINDOWS_BASE_IMAGE: mcr.microsoft.com/windows/servercore
|
||||
WINDOWS_BASE_TAG_2019: ltsc2019
|
||||
@@ -228,11 +228,10 @@ jobs:
|
||||
id: tests
|
||||
working-directory: ./integration-cli
|
||||
run: |
|
||||
# This step creates a matrix for integration-cli tests. Tests suites
|
||||
# are distributed in integration-test job through a matrix. There is
|
||||
# also an override being added to the matrix like "./..." to run
|
||||
# "Test integration" step exclusively.
|
||||
matrix="$(gotestlist -d ${{ env.ITG_CLI_MATRIX_SIZE }} -o "./..." ./...)"
|
||||
# Distribute integration-cli tests for the matrix in integration-test job.
|
||||
# Also prepend ./... to the matrix. This is a special case to run "Test integration" step exclusively.
|
||||
matrix="$(gotestlist -d ${{ env.ITG_CLI_MATRIX_SIZE }} ./...)"
|
||||
matrix="$(echo "$matrix" | jq -c '. |= ["./..."] + .')"
|
||||
echo "matrix=$matrix" >> $GITHUB_OUTPUT
|
||||
-
|
||||
name: Show matrix
|
||||
@@ -455,9 +454,9 @@ jobs:
|
||||
run: |
|
||||
Get-WinEvent -ea SilentlyContinue `
|
||||
-FilterHashtable @{ProviderName= "docker"; LogName = "application"} |
|
||||
Select-Object -Property TimeCreated, @{N='Detailed Message'; E={$_.Message}} |
|
||||
Sort-Object @{Expression="TimeCreated";Descending=$false} |
|
||||
ForEach-Object {"$($_.TimeCreated.ToUniversalTime().ToString("o")) [$($_.LevelDisplayName)] $($_.Message)"} |
|
||||
Tee-Object -file ".\bundles\daemon.log"
|
||||
Select-Object -ExpandProperty 'Detailed Message' | Tee-Object -file ".\bundles\daemon.log"
|
||||
-
|
||||
name: Upload reports
|
||||
if: always()
|
||||
|
||||
183
.github/workflows/bin-image.yml
vendored
183
.github/workflows/bin-image.yml
vendored
@@ -1,183 +0,0 @@
|
||||
name: bin-image
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- 'master'
|
||||
- '[0-9]+.[0-9]+'
|
||||
tags:
|
||||
- 'v*'
|
||||
pull_request:
|
||||
|
||||
env:
|
||||
MOBYBIN_REPO_SLUG: moby/moby-bin
|
||||
DOCKER_GITCOMMIT: ${{ github.sha }}
|
||||
VERSION: ${{ github.ref }}
|
||||
PLATFORM: Moby Engine - Nightly
|
||||
PRODUCT: moby-bin
|
||||
PACKAGER_NAME: The Moby Project
|
||||
|
||||
jobs:
|
||||
validate-dco:
|
||||
if: ${{ !startsWith(github.ref, 'refs/tags/v') }}
|
||||
uses: ./.github/workflows/.dco.yml
|
||||
|
||||
prepare:
|
||||
runs-on: ubuntu-20.04
|
||||
outputs:
|
||||
platforms: ${{ steps.platforms.outputs.matrix }}
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
-
|
||||
name: Docker meta
|
||||
id: meta
|
||||
uses: docker/metadata-action@v4
|
||||
with:
|
||||
images: |
|
||||
${{ env.MOBYBIN_REPO_SLUG }}
|
||||
### versioning strategy
|
||||
## push semver tag v23.0.0
|
||||
# moby/moby-bin:23.0.0
|
||||
# moby/moby-bin:latest
|
||||
## push semver prelease tag v23.0.0-beta.1
|
||||
# moby/moby-bin:23.0.0-beta.1
|
||||
## push on master
|
||||
# moby/moby-bin:master
|
||||
## push on 23.0 branch
|
||||
# moby/moby-bin:23.0
|
||||
## any push
|
||||
# moby/moby-bin:sha-ad132f5
|
||||
tags: |
|
||||
type=semver,pattern={{version}}
|
||||
type=ref,event=branch
|
||||
type=ref,event=pr
|
||||
type=sha
|
||||
-
|
||||
name: Rename meta bake definition file
|
||||
run: |
|
||||
mv "${{ steps.meta.outputs.bake-file }}" "/tmp/bake-meta.json"
|
||||
-
|
||||
name: Upload meta bake definition
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: bake-meta
|
||||
path: /tmp/bake-meta.json
|
||||
if-no-files-found: error
|
||||
retention-days: 1
|
||||
-
|
||||
name: Create platforms matrix
|
||||
id: platforms
|
||||
run: |
|
||||
echo "matrix=$(docker buildx bake bin-image-cross --print | jq -cr '.target."bin-image-cross".platforms')" >>${GITHUB_OUTPUT}
|
||||
|
||||
build:
|
||||
runs-on: ubuntu-20.04
|
||||
needs:
|
||||
- validate-dco
|
||||
- prepare
|
||||
if: always() && !contains(needs.*.result, 'failure') && !contains(needs.*.result, 'cancelled')
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
platform: ${{ fromJson(needs.prepare.outputs.platforms) }}
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
-
|
||||
name: Download meta bake definition
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: bake-meta
|
||||
path: /tmp
|
||||
-
|
||||
name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
-
|
||||
name: Login to Docker Hub
|
||||
if: github.event_name != 'pull_request' && github.repository == 'moby/moby'
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_MOBYBIN_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_MOBYBIN_TOKEN }}
|
||||
-
|
||||
name: Build
|
||||
id: bake
|
||||
uses: docker/bake-action@v3
|
||||
with:
|
||||
files: |
|
||||
./docker-bake.hcl
|
||||
/tmp/bake-meta.json
|
||||
targets: bin-image
|
||||
set: |
|
||||
*.platform=${{ matrix.platform }}
|
||||
*.output=type=image,name=${{ env.MOBYBIN_REPO_SLUG }},push-by-digest=true,name-canonical=true,push=${{ github.event_name != 'pull_request' && github.repository == 'moby/moby' }}
|
||||
*.tags=
|
||||
-
|
||||
name: Export digest
|
||||
if: github.event_name != 'pull_request' && github.repository == 'moby/moby'
|
||||
run: |
|
||||
mkdir -p /tmp/digests
|
||||
digest="${{ fromJSON(steps.bake.outputs.metadata)['bin-image']['containerimage.digest'] }}"
|
||||
touch "/tmp/digests/${digest#sha256:}"
|
||||
-
|
||||
name: Upload digest
|
||||
if: github.event_name != 'pull_request' && github.repository == 'moby/moby'
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: digests
|
||||
path: /tmp/digests/*
|
||||
if-no-files-found: error
|
||||
retention-days: 1
|
||||
|
||||
merge:
|
||||
runs-on: ubuntu-20.04
|
||||
needs:
|
||||
- build
|
||||
if: always() && !contains(needs.*.result, 'failure') && !contains(needs.*.result, 'cancelled') && github.event_name != 'pull_request' && github.repository == 'moby/moby'
|
||||
steps:
|
||||
-
|
||||
name: Download meta bake definition
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: bake-meta
|
||||
path: /tmp
|
||||
-
|
||||
name: Download digests
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: digests
|
||||
path: /tmp/digests
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
-
|
||||
name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_MOBYBIN_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_MOBYBIN_TOKEN }}
|
||||
-
|
||||
name: Create manifest list and push
|
||||
working-directory: /tmp/digests
|
||||
run: |
|
||||
set -x
|
||||
docker buildx imagetools create $(jq -cr '.target."docker-metadata-action".tags | map("-t " + .) | join(" ")' /tmp/bake-meta.json) \
|
||||
$(printf '${{ env.MOBYBIN_REPO_SLUG }}@sha256:%s ' *)
|
||||
-
|
||||
name: Inspect image
|
||||
run: |
|
||||
set -x
|
||||
docker buildx imagetools inspect ${{ env.MOBYBIN_REPO_SLUG }}:$(jq -cr '.target."docker-metadata-action".args.DOCKER_META_VERSION' /tmp/bake-meta.json)
|
||||
33
.github/workflows/buildkit.yml
vendored
33
.github/workflows/buildkit.yml
vendored
@@ -9,13 +9,11 @@ on:
|
||||
push:
|
||||
branches:
|
||||
- 'master'
|
||||
- '[0-9]+.[0-9]+'
|
||||
- '[0-9]+.[0-9]{2}'
|
||||
pull_request:
|
||||
|
||||
env:
|
||||
ALPINE_VERSION: "3.18"
|
||||
GO_VERSION: "1.20.12"
|
||||
DESTDIR: ./build
|
||||
BUNDLES_OUTPUT: ./bundles
|
||||
|
||||
jobs:
|
||||
validate-dco:
|
||||
@@ -42,7 +40,7 @@ jobs:
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: binary
|
||||
path: ${{ env.DESTDIR }}
|
||||
path: ${{ env.BUNDLES_OUTPUT }}
|
||||
if-no-files-found: error
|
||||
retention-days: 1
|
||||
|
||||
@@ -54,9 +52,6 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
worker:
|
||||
- dockerd
|
||||
- dockerd-containerd
|
||||
pkg:
|
||||
- client
|
||||
- cmd/buildctl
|
||||
@@ -66,14 +61,6 @@ jobs:
|
||||
typ:
|
||||
- integration
|
||||
steps:
|
||||
-
|
||||
name: Prepare
|
||||
run: |
|
||||
disabledFeatures="cache_backend_azblob,cache_backend_s3"
|
||||
if [ "${{ matrix.worker }}" = "dockerd" ]; then
|
||||
disabledFeatures="${disabledFeatures},merge_diff"
|
||||
fi
|
||||
echo "BUILDKIT_TEST_DISABLE_FEATURES=${disabledFeatures}" >> $GITHUB_ENV
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
@@ -82,13 +69,16 @@ jobs:
|
||||
-
|
||||
name: BuildKit ref
|
||||
run: |
|
||||
echo "$(./hack/buildkit-ref)" >> $GITHUB_ENV
|
||||
./hack/go-mod-prepare.sh
|
||||
# FIXME(thaJeztah) temporarily overriding version to use for tests; remove with the next release of buildkit
|
||||
# echo "BUILDKIT_REF=$(./hack/buildkit-ref)" >> $GITHUB_ENV
|
||||
echo "BUILDKIT_REF=4febae4f874bd8ef52dec30e988c8fe0bc96b3b9" >> $GITHUB_ENV
|
||||
working-directory: moby
|
||||
-
|
||||
name: Checkout BuildKit ${{ env.BUILDKIT_REF }}
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
repository: ${{ env.BUILDKIT_REPO }}
|
||||
repository: "moby/buildkit"
|
||||
ref: ${{ env.BUILDKIT_REF }}
|
||||
path: buildkit
|
||||
-
|
||||
@@ -106,7 +96,7 @@ jobs:
|
||||
-
|
||||
name: Update daemon.json
|
||||
run: |
|
||||
sudo rm -f /etc/docker/daemon.json
|
||||
sudo rm /etc/docker/daemon.json
|
||||
sudo service docker restart
|
||||
docker version
|
||||
docker info
|
||||
@@ -117,8 +107,7 @@ jobs:
|
||||
env:
|
||||
CONTEXT: "."
|
||||
TEST_DOCKERD: "1"
|
||||
TEST_DOCKERD_BINARY: "./build/moby/dockerd"
|
||||
TEST_DOCKERD_BINARY: "./build/moby/binary-daemon/dockerd"
|
||||
TESTPKGS: "./${{ matrix.pkg }}"
|
||||
# Skip buildkit tests checking the digest (see https://github.com/moby/buildkit/pull/3736)
|
||||
TESTFLAGS: "-v --parallel=1 --timeout=30m --run=/^Test([^R]|.[^e]|..[^p]|...[^r]|....[^o]|.....[^S])/worker=${{ matrix.worker }}$"
|
||||
TESTFLAGS: "-v --parallel=1 --timeout=30m --run=//worker=dockerd$"
|
||||
working-directory: buildkit
|
||||
|
||||
63
.github/workflows/ci.yml
vendored
63
.github/workflows/ci.yml
vendored
@@ -10,10 +10,12 @@ on:
|
||||
branches:
|
||||
- 'master'
|
||||
- '[0-9]+.[0-9]+'
|
||||
tags:
|
||||
- 'v*'
|
||||
pull_request:
|
||||
|
||||
env:
|
||||
DESTDIR: ./build
|
||||
BUNDLES_OUTPUT: ./bundles
|
||||
|
||||
jobs:
|
||||
validate-dco:
|
||||
@@ -43,53 +45,32 @@ jobs:
|
||||
uses: docker/bake-action@v2
|
||||
with:
|
||||
targets: ${{ matrix.target }}
|
||||
-
|
||||
name: List artifacts
|
||||
run: |
|
||||
tree -nh ${{ env.DESTDIR }}
|
||||
-
|
||||
name: Check artifacts
|
||||
run: |
|
||||
find ${{ env.DESTDIR }} -type f -exec file -e ascii -- {} +
|
||||
-
|
||||
name: Upload artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ matrix.target }}
|
||||
path: ${{ env.DESTDIR }}
|
||||
path: ${{ env.BUNDLES_OUTPUT }}
|
||||
if-no-files-found: error
|
||||
retention-days: 7
|
||||
|
||||
prepare-cross:
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- validate-dco
|
||||
outputs:
|
||||
matrix: ${{ steps.platforms.outputs.matrix }}
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
-
|
||||
name: Create matrix
|
||||
id: platforms
|
||||
run: |
|
||||
matrix="$(docker buildx bake binary-cross --print | jq -cr '.target."binary-cross".platforms')"
|
||||
echo "matrix=$matrix" >> $GITHUB_OUTPUT
|
||||
-
|
||||
name: Show matrix
|
||||
run: |
|
||||
echo ${{ steps.platforms.outputs.matrix }}
|
||||
|
||||
cross:
|
||||
runs-on: ubuntu-20.04
|
||||
needs:
|
||||
- validate-dco
|
||||
- prepare-cross
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
platform: ${{ fromJson(needs.prepare-cross.outputs.matrix) }}
|
||||
platform:
|
||||
- linux/amd64
|
||||
- linux/arm/v5
|
||||
- linux/arm/v6
|
||||
- linux/arm/v7
|
||||
- linux/arm64
|
||||
- linux/ppc64le
|
||||
- linux/s390x
|
||||
- windows/amd64
|
||||
- windows/arm64
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
@@ -108,22 +89,14 @@ jobs:
|
||||
name: Build
|
||||
uses: docker/bake-action@v2
|
||||
with:
|
||||
targets: all
|
||||
set: |
|
||||
*.platform=${{ matrix.platform }}
|
||||
-
|
||||
name: List artifacts
|
||||
run: |
|
||||
tree -nh ${{ env.DESTDIR }}
|
||||
-
|
||||
name: Check artifacts
|
||||
run: |
|
||||
find ${{ env.DESTDIR }} -type f -exec file -e ascii -- {} +
|
||||
targets: cross
|
||||
env:
|
||||
DOCKER_CROSSPLATFORMS: ${{ matrix.platform }}
|
||||
-
|
||||
name: Upload artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: cross-${{ env.PLATFORM_PAIR }}
|
||||
path: ${{ env.DESTDIR }}
|
||||
path: ${{ env.BUNDLES_OUTPUT }}
|
||||
if-no-files-found: error
|
||||
retention-days: 7
|
||||
|
||||
72
.github/workflows/test.yml
vendored
72
.github/workflows/test.yml
vendored
@@ -10,11 +10,13 @@ on:
|
||||
branches:
|
||||
- 'master'
|
||||
- '[0-9]+.[0-9]+'
|
||||
tags:
|
||||
- 'v*'
|
||||
pull_request:
|
||||
|
||||
env:
|
||||
GO_VERSION: "1.20.13"
|
||||
GOTESTLIST_VERSION: v0.3.1
|
||||
GO_VERSION: 1.19.3
|
||||
GOTESTLIST_VERSION: v0.2.0
|
||||
TESTSTAT_VERSION: v0.1.3
|
||||
ITG_CLI_MATRIX_SIZE: 6
|
||||
DOCKER_EXPERIMENTAL: 1
|
||||
@@ -80,7 +82,6 @@ jobs:
|
||||
|
||||
validate:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 120
|
||||
needs:
|
||||
- validate-prepare
|
||||
- build-dev
|
||||
@@ -164,7 +165,6 @@ jobs:
|
||||
|
||||
unit-report:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 10
|
||||
if: always()
|
||||
needs:
|
||||
- unit
|
||||
@@ -353,7 +353,6 @@ jobs:
|
||||
|
||||
integration-report:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 10
|
||||
if: always()
|
||||
needs:
|
||||
- integration
|
||||
@@ -402,12 +401,10 @@ jobs:
|
||||
id: tests
|
||||
working-directory: ./integration-cli
|
||||
run: |
|
||||
# This step creates a matrix for integration-cli tests. Tests suites
|
||||
# are distributed in integration-cli job through a matrix. There is
|
||||
# also overrides being added to the matrix like "./..." to run
|
||||
# "Test integration" step exclusively and specific tests suites that
|
||||
# take a long time to run.
|
||||
matrix="$(gotestlist -d ${{ env.ITG_CLI_MATRIX_SIZE }} -o "./..." -o "DockerSwarmSuite" -o "DockerNetworkSuite|DockerExternalVolumeSuite" ./...)"
|
||||
# Distribute integration-cli tests for the matrix in integration-test job.
|
||||
# Also prepend ./... to the matrix. This is a special case to run "Test integration" step exclusively.
|
||||
matrix="$(gotestlist -d ${{ env.ITG_CLI_MATRIX_SIZE }} ./...)"
|
||||
matrix="$(echo "$matrix" | jq -c '. |= ["./..."] + .')"
|
||||
echo "matrix=$matrix" >> $GITHUB_OUTPUT
|
||||
-
|
||||
name: Show matrix
|
||||
@@ -482,7 +479,6 @@ jobs:
|
||||
|
||||
integration-cli-report:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 10
|
||||
if: always()
|
||||
needs:
|
||||
- integration-cli
|
||||
@@ -506,55 +502,3 @@ jobs:
|
||||
name: Create summary
|
||||
run: |
|
||||
teststat -markdown $(find /tmp/reports -type f -name '*.json' -print0 | xargs -0) >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
prepare-smoke:
|
||||
runs-on: ubuntu-20.04
|
||||
needs:
|
||||
- validate-dco
|
||||
outputs:
|
||||
matrix: ${{ steps.platforms.outputs.matrix }}
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
-
|
||||
name: Create matrix
|
||||
id: platforms
|
||||
run: |
|
||||
matrix="$(docker buildx bake binary-smoketest --print | jq -cr '.target."binary-smoketest".platforms')"
|
||||
echo "matrix=$matrix" >> $GITHUB_OUTPUT
|
||||
-
|
||||
name: Show matrix
|
||||
run: |
|
||||
echo ${{ steps.platforms.outputs.matrix }}
|
||||
|
||||
smoke:
|
||||
runs-on: ubuntu-20.04
|
||||
needs:
|
||||
- prepare-smoke
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
platform: ${{ fromJson(needs.prepare-smoke.outputs.matrix) }}
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
-
|
||||
name: Prepare
|
||||
run: |
|
||||
platform=${{ matrix.platform }}
|
||||
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
|
||||
-
|
||||
name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
-
|
||||
name: Test
|
||||
uses: docker/bake-action@v2
|
||||
with:
|
||||
targets: binary-smoketest
|
||||
set: |
|
||||
*.platform=${{ matrix.platform }}
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -13,6 +13,8 @@ thumbs.db
|
||||
.bashrc
|
||||
.editorconfig
|
||||
|
||||
# top-level go.mod is not meant to be checked in
|
||||
/go.mod
|
||||
# build artifacts
|
||||
bundles/
|
||||
cli/winresources/*/*.syso
|
||||
|
||||
18
AUTHORS
18
AUTHORS
@@ -29,7 +29,6 @@ Adam Pointer <adam.pointer@skybettingandgaming.com>
|
||||
Adam Singer <financeCoding@gmail.com>
|
||||
Adam Walz <adam@adamwalz.net>
|
||||
Adam Williams <awilliams@mirantis.com>
|
||||
AdamKorcz <adam@adalogics.com>
|
||||
Addam Hardy <addam.hardy@gmail.com>
|
||||
Aditi Rajagopal <arajagopal@us.ibm.com>
|
||||
Aditya <aditya@netroy.in>
|
||||
@@ -82,7 +81,6 @@ Alex Goodman <wagoodman@gmail.com>
|
||||
Alex Nordlund <alexander.nordlund@nasdaq.com>
|
||||
Alex Olshansky <i@creagenics.com>
|
||||
Alex Samorukov <samm@os2.kiev.ua>
|
||||
Alex Stockinger <alex@atomicjar.com>
|
||||
Alex Warhawk <ax.warhawk@gmail.com>
|
||||
Alexander Artemenko <svetlyak.40wt@gmail.com>
|
||||
Alexander Boyd <alex@opengroove.org>
|
||||
@@ -200,7 +198,6 @@ Anusha Ragunathan <anusha.ragunathan@docker.com>
|
||||
Anyu Wang <wanganyu@outlook.com>
|
||||
apocas <petermdias@gmail.com>
|
||||
Arash Deshmeh <adeshmeh@ca.ibm.com>
|
||||
arcosx <arcosx@outlook.com>
|
||||
ArikaChen <eaglesora@gmail.com>
|
||||
Arko Dasgupta <arko@tetrate.io>
|
||||
Arnaud Lefebvre <a.lefebvre@outlook.fr>
|
||||
@@ -244,7 +241,6 @@ Benjamin Atkin <ben@benatkin.com>
|
||||
Benjamin Baker <Benjamin.baker@utexas.edu>
|
||||
Benjamin Boudreau <boudreau.benjamin@gmail.com>
|
||||
Benjamin Böhmke <benjamin@boehmke.net>
|
||||
Benjamin Wang <wachao@vmware.com>
|
||||
Benjamin Yolken <yolken@stripe.com>
|
||||
Benny Ng <benny.tpng@gmail.com>
|
||||
Benoit Chesneau <bchesneau@gmail.com>
|
||||
@@ -638,7 +634,6 @@ Eng Zer Jun <engzerjun@gmail.com>
|
||||
Enguerran <engcolson@gmail.com>
|
||||
Eohyung Lee <liquidnuker@gmail.com>
|
||||
epeterso <epeterson@breakpoint-labs.com>
|
||||
er0k <er0k@er0k.net>
|
||||
Eric Barch <barch@tomesoftware.com>
|
||||
Eric Curtin <ericcurtin17@gmail.com>
|
||||
Eric G. Noriega <enoriega@vizuri.com>
|
||||
@@ -759,7 +754,6 @@ Félix Baylac-Jacqué <baylac.felix@gmail.com>
|
||||
Félix Cantournet <felix.cantournet@cloudwatt.com>
|
||||
Gabe Rosenhouse <gabe@missionst.com>
|
||||
Gabor Nagy <mail@aigeruth.hu>
|
||||
Gabriel Adrian Samfira <gsamfira@cloudbasesolutions.com>
|
||||
Gabriel Goller <gabrielgoller123@gmail.com>
|
||||
Gabriel L. Somlo <gsomlo@gmail.com>
|
||||
Gabriel Linder <linder.gabriel@gmail.com>
|
||||
@@ -861,7 +855,6 @@ Hongbin Lu <hongbin034@gmail.com>
|
||||
Hongxu Jia <hongxu.jia@windriver.com>
|
||||
Honza Pokorny <me@honza.ca>
|
||||
Hsing-Hui Hsu <hsinghui@amazon.com>
|
||||
Hsing-Yu (David) Chen <davidhsingyuchen@gmail.com>
|
||||
hsinko <21551195@zju.edu.cn>
|
||||
Hu Keping <hukeping@huawei.com>
|
||||
Hu Tao <hutao@cn.fujitsu.com>
|
||||
@@ -894,7 +887,6 @@ Igor Dolzhikov <bluesriverz@gmail.com>
|
||||
Igor Karpovich <i.karpovich@currencysolutions.com>
|
||||
Iliana Weller <iweller@amazon.com>
|
||||
Ilkka Laukkanen <ilkka@ilkka.io>
|
||||
Illia Antypenko <ilya@antipenko.pp.ua>
|
||||
Illo Abdulrahim <abdulrahim.illo@nokia.com>
|
||||
Ilya Dmitrichenko <errordeveloper@gmail.com>
|
||||
Ilya Gusev <mail@igusev.ru>
|
||||
@@ -946,7 +938,6 @@ Jamie Hannaford <jamie@limetree.org>
|
||||
Jamshid Afshar <jafshar@yahoo.com>
|
||||
Jan Breig <git@pygos.space>
|
||||
Jan Chren <dev.rindeal@gmail.com>
|
||||
Jan Garcia <github-public@n-garcia.com>
|
||||
Jan Götte <jaseg@jaseg.net>
|
||||
Jan Keromnes <janx@linux.com>
|
||||
Jan Koprowski <jan.koprowski@gmail.com>
|
||||
@@ -1215,7 +1206,6 @@ Kimbro Staken <kstaken@kstaken.com>
|
||||
Kir Kolyshkin <kolyshkin@gmail.com>
|
||||
Kiran Gangadharan <kiran.daredevil@gmail.com>
|
||||
Kirill SIbirev <l0kix2@gmail.com>
|
||||
Kirk Easterson <kirk.easterson@gmail.com>
|
||||
knappe <tyler.knappe@gmail.com>
|
||||
Kohei Tsuruta <coheyxyz@gmail.com>
|
||||
Koichi Shiraishi <k@zchee.io>
|
||||
@@ -1250,12 +1240,10 @@ Lars Kellogg-Stedman <lars@redhat.com>
|
||||
Lars R. Damerow <lars@pixar.com>
|
||||
Lars-Magnus Skog <ralphtheninja@riseup.net>
|
||||
Laszlo Meszaros <lacienator@gmail.com>
|
||||
Laura Brehm <laurabrehm@hey.com>
|
||||
Laura Frank <ljfrank@gmail.com>
|
||||
Laurent Bernaille <laurent.bernaille@datadoghq.com>
|
||||
Laurent Erignoux <lerignoux@gmail.com>
|
||||
Laurie Voss <github@seldo.com>
|
||||
Leandro Motta Barros <lmb@stackedboxes.org>
|
||||
Leandro Siqueira <leandro.siqueira@gmail.com>
|
||||
Lee Calcote <leecalcote@gmail.com>
|
||||
Lee Chao <932819864@qq.com>
|
||||
@@ -1575,7 +1563,6 @@ Nick Neisen <nwneisen@gmail.com>
|
||||
Nick Parker <nikaios@gmail.com>
|
||||
Nick Payne <nick@kurai.co.uk>
|
||||
Nick Russo <nicholasjamesrusso@gmail.com>
|
||||
Nick Santos <nick.santos@docker.com>
|
||||
Nick Stenning <nick.stenning@digital.cabinet-office.gov.uk>
|
||||
Nick Stinemates <nick@stinemates.org>
|
||||
Nick Wood <nwood@microsoft.com>
|
||||
@@ -1597,7 +1584,6 @@ NikolaMandic <mn080202@gmail.com>
|
||||
Nikolas Garofil <nikolas.garofil@uantwerpen.be>
|
||||
Nikolay Edigaryev <edigaryev@gmail.com>
|
||||
Nikolay Milovanov <nmil@itransformers.net>
|
||||
ningmingxiao <ning.mingxiao@zte.com.cn>
|
||||
Nirmal Mehta <nirmalkmehta@gmail.com>
|
||||
Nishant Totla <nishanttotla@gmail.com>
|
||||
NIWA Hideyuki <niwa.niwa@nifty.ne.jp>
|
||||
@@ -1629,7 +1615,6 @@ Omri Shiv <Omri.Shiv@teradata.com>
|
||||
Onur Filiz <onur.filiz@microsoft.com>
|
||||
Oriol Francès <oriolfa@gmail.com>
|
||||
Oscar Bonilla <6f6231@gmail.com>
|
||||
oscar.chen <2972789494@qq.com>
|
||||
Oskar Niburski <oskarniburski@gmail.com>
|
||||
Otto Kekäläinen <otto@seravo.fi>
|
||||
Ouyang Liduo <oyld0210@163.com>
|
||||
@@ -1837,7 +1822,6 @@ Rory Hunter <roryhunter2@gmail.com>
|
||||
Rory McCune <raesene@gmail.com>
|
||||
Ross Boucher <rboucher@gmail.com>
|
||||
Rovanion Luckey <rovanion.luckey@gmail.com>
|
||||
Roy Reznik <roy@wiz.io>
|
||||
Royce Remer <royceremer@gmail.com>
|
||||
Rozhnov Alexandr <nox73@ya.ru>
|
||||
Rudolph Gottesheim <r.gottesheim@loot.at>
|
||||
@@ -2287,7 +2271,6 @@ Xiaoyu Zhang <zhang.xiaoyu33@zte.com.cn>
|
||||
xichengliudui <1693291525@qq.com>
|
||||
xiekeyang <xiekeyang@huawei.com>
|
||||
Ximo Guanter Gonzálbez <joaquin.guantergonzalbez@telefonica.com>
|
||||
xin.li <xin.li@daocloud.io>
|
||||
Xinbo Weng <xihuanbo_0521@zju.edu.cn>
|
||||
Xinfeng Liu <xinfeng.liu@gmail.com>
|
||||
Xinzi Zhou <imdreamrunner@gmail.com>
|
||||
@@ -2299,7 +2282,6 @@ Yahya <ya7yaz@gmail.com>
|
||||
yalpul <yalpul@gmail.com>
|
||||
YAMADA Tsuyoshi <tyamada@minimum2scp.org>
|
||||
Yamasaki Masahide <masahide.y@gmail.com>
|
||||
Yamazaki Masashi <masi19bw@gmail.com>
|
||||
Yan Feng <yanfeng2@huawei.com>
|
||||
Yan Zhu <yanzhu@alauda.io>
|
||||
Yang Bai <hamo.by@gmail.com>
|
||||
|
||||
@@ -72,7 +72,7 @@ anybody starts working on it.
|
||||
We are always thrilled to receive pull requests. We do our best to process them
|
||||
quickly. If your pull request is not accepted on the first try,
|
||||
don't get discouraged! Our contributor's guide explains [the review process we
|
||||
use for simple changes](https://docs.docker.com/contribute/overview/).
|
||||
use for simple changes](https://docs.docker.com/opensource/workflow/make-a-contribution/).
|
||||
|
||||
### Design and cleanup proposals
|
||||
|
||||
@@ -309,6 +309,36 @@ Don't forget: being a maintainer is a time investment. Make sure you
|
||||
will have time to make yourself available. You don't have to be a
|
||||
maintainer to make a difference on the project!
|
||||
|
||||
### Manage issues and pull requests using the Derek bot
|
||||
|
||||
If you want to help label, assign, close or reopen issues or pull requests
|
||||
without commit rights, ask a maintainer to add your Github handle to the
|
||||
`.DEREK.yml` file. [Derek](https://github.com/alexellis/derek) is a bot that extends
|
||||
Github's user permissions to help non-committers to manage issues and pull requests simply by commenting.
|
||||
|
||||
For example:
|
||||
|
||||
* Labels
|
||||
|
||||
```
|
||||
Derek add label: kind/question
|
||||
Derek remove label: status/claimed
|
||||
```
|
||||
|
||||
* Assign work
|
||||
|
||||
```
|
||||
Derek assign: username
|
||||
Derek unassign: me
|
||||
```
|
||||
|
||||
* Manage issues and PRs
|
||||
|
||||
```
|
||||
Derek close
|
||||
Derek reopen
|
||||
```
|
||||
|
||||
## Moby community guidelines
|
||||
|
||||
We want to keep the Moby community awesome, growing and collaborative. We need
|
||||
@@ -422,6 +452,6 @@ The rules:
|
||||
guidelines. Since you've read all the rules, you now know that.
|
||||
|
||||
If you are having trouble getting into the mood of idiomatic Go, we recommend
|
||||
reading through [Effective Go](https://go.dev/doc/effective_go). The
|
||||
[Go Blog](https://go.dev/blog/) is also a great resource. Drinking the
|
||||
reading through [Effective Go](https://golang.org/doc/effective_go.html). The
|
||||
[Go Blog](https://blog.golang.org) is also a great resource. Drinking the
|
||||
kool-aid is a lot easier than going thirsty.
|
||||
|
||||
687
Dockerfile
687
Dockerfile
@@ -1,41 +1,19 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
ARG GO_VERSION=1.20.13
|
||||
ARG BASE_DEBIAN_DISTRO="bullseye"
|
||||
ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}"
|
||||
ARG XX_VERSION=1.2.1
|
||||
|
||||
ARG CROSS="false"
|
||||
ARG SYSTEMD="false"
|
||||
ARG GO_VERSION=1.19.3
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
ARG VPNKIT_VERSION=0.5.0
|
||||
|
||||
ARG DOCKERCLI_REPOSITORY="https://github.com/docker/cli.git"
|
||||
ARG DOCKERCLI_VERSION=v24.0.2
|
||||
# cli version used for integration-cli tests
|
||||
ARG DOCKERCLI_INTEGRATION_REPOSITORY="https://github.com/docker/cli.git"
|
||||
ARG DOCKERCLI_INTEGRATION_VERSION=v17.06.2-ce
|
||||
ARG BUILDX_VERSION=0.11.2
|
||||
ARG BASE_DEBIAN_DISTRO="bullseye"
|
||||
ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}"
|
||||
|
||||
ARG SYSTEMD="false"
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
ARG DOCKER_STATIC=1
|
||||
|
||||
# cross compilation helper
|
||||
FROM --platform=$BUILDPLATFORM tonistiigi/xx:${XX_VERSION} AS xx
|
||||
|
||||
# dummy stage to make sure the image is built for deps that don't support some
|
||||
# architectures
|
||||
FROM --platform=$BUILDPLATFORM busybox AS build-dummy
|
||||
RUN mkdir -p /build
|
||||
FROM scratch AS binary-dummy
|
||||
COPY --from=build-dummy /build /build
|
||||
|
||||
# base
|
||||
FROM --platform=$BUILDPLATFORM ${GOLANG_IMAGE} AS base
|
||||
COPY --from=xx / /
|
||||
FROM ${GOLANG_IMAGE} AS base
|
||||
RUN echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache
|
||||
ARG APT_MIRROR
|
||||
RUN test -n "$APT_MIRROR" && sed -ri "s#(httpredir|deb|security).debian.org#${APT_MIRROR}#g" /etc/apt/sources.list || true
|
||||
ARG DEBIAN_FRONTEND
|
||||
RUN apt-get update && apt-get install --no-install-recommends -y file
|
||||
RUN sed -ri "s/(httpredir|deb).debian.org/${APT_MIRROR:-deb.debian.org}/g" /etc/apt/sources.list \
|
||||
&& sed -ri "s/(security).debian.org/${APT_MIRROR:-security.debian.org}/g" /etc/apt/sources.list
|
||||
ENV GO111MODULE=off
|
||||
|
||||
FROM base AS criu
|
||||
@@ -48,67 +26,53 @@ RUN --mount=type=cache,sharing=locked,id=moby-criu-aptlib,target=/var/lib/apt \
|
||||
&& apt-get install -y --no-install-recommends criu \
|
||||
&& install -D /usr/sbin/criu /build/criu
|
||||
|
||||
# registry
|
||||
FROM base AS registry-src
|
||||
WORKDIR /usr/src/registry
|
||||
RUN git init . && git remote add origin "https://github.com/distribution/distribution.git"
|
||||
|
||||
FROM base AS registry
|
||||
WORKDIR /go/src/github.com/docker/distribution
|
||||
|
||||
# REGISTRY_VERSION specifies the version of the registry to build and install
|
||||
# from the https://github.com/docker/distribution repository. This version of
|
||||
# the registry is used to test both schema 1 and schema 2 manifests. Generally,
|
||||
# the version specified here should match a current release.
|
||||
ARG REGISTRY_VERSION=v2.3.0
|
||||
|
||||
# REGISTRY_VERSION_SCHEMA1 specifies the version of the registry to build and
|
||||
# install from the https://github.com/docker/distribution repository. This is
|
||||
# an older (pre v2.3.0) version of the registry that only supports schema1
|
||||
# manifests. This version of the registry is not working on arm64, so installation
|
||||
# is skipped on that architecture.
|
||||
ARG REGISTRY_VERSION_SCHEMA1=v2.1.0
|
||||
ARG TARGETPLATFORM
|
||||
RUN --mount=from=registry-src,src=/usr/src/registry,rw \
|
||||
--mount=type=cache,target=/root/.cache/go-build,id=registry-build-$TARGETPLATFORM \
|
||||
RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||
--mount=type=cache,target=/go/pkg/mod \
|
||||
--mount=type=tmpfs,target=/go/src <<EOT
|
||||
set -ex
|
||||
git fetch -q --depth 1 origin "${REGISTRY_VERSION}" +refs/tags/*:refs/tags/*
|
||||
git checkout -q FETCH_HEAD
|
||||
export GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH"
|
||||
CGO_ENABLED=0 xx-go build -o /build/registry-v2 -v ./cmd/registry
|
||||
xx-verify /build/registry-v2
|
||||
case $TARGETPLATFORM in
|
||||
linux/amd64|linux/arm/v7|linux/ppc64le|linux/s390x)
|
||||
git fetch -q --depth 1 origin "${REGISTRY_VERSION_SCHEMA1}" +refs/tags/*:refs/tags/*
|
||||
git checkout -q FETCH_HEAD
|
||||
CGO_ENABLED=0 xx-go build -o /build/registry-v2-schema1 -v ./cmd/registry
|
||||
xx-verify /build/registry-v2-schema1
|
||||
;;
|
||||
esac
|
||||
EOT
|
||||
|
||||
# go-swagger
|
||||
FROM base AS swagger-src
|
||||
WORKDIR /usr/src/swagger
|
||||
# Currently uses a fork from https://github.com/kolyshkin/go-swagger/tree/golang-1.13-fix
|
||||
# TODO: move to under moby/ or fix upstream go-swagger to work for us.
|
||||
RUN git init . && git remote add origin "https://github.com/kolyshkin/go-swagger.git"
|
||||
# GO_SWAGGER_COMMIT specifies the version of the go-swagger binary to build and
|
||||
# install. Go-swagger is used in CI for validating swagger.yaml in hack/validate/swagger-gen
|
||||
ARG GO_SWAGGER_COMMIT=c56166c036004ba7a3a321e5951ba472b9ae298c
|
||||
RUN git fetch -q --depth 1 origin "${GO_SWAGGER_COMMIT}" && git checkout -q FETCH_HEAD
|
||||
--mount=type=tmpfs,target=/go/src/ \
|
||||
set -x \
|
||||
&& git clone https://github.com/docker/distribution.git . \
|
||||
&& git checkout -q "$REGISTRY_VERSION" \
|
||||
&& GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \
|
||||
go build -buildmode=pie -o /build/registry-v2 github.com/docker/distribution/cmd/registry \
|
||||
&& case $(dpkg --print-architecture) in \
|
||||
amd64|armhf|ppc64*|s390x) \
|
||||
git checkout -q "$REGISTRY_VERSION_SCHEMA1"; \
|
||||
GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH"; \
|
||||
go build -buildmode=pie -o /build/registry-v2-schema1 github.com/docker/distribution/cmd/registry; \
|
||||
;; \
|
||||
esac
|
||||
|
||||
FROM base AS swagger
|
||||
WORKDIR /go/src/github.com/go-swagger/go-swagger
|
||||
ARG TARGETPLATFORM
|
||||
RUN --mount=from=swagger-src,src=/usr/src/swagger,rw \
|
||||
--mount=type=cache,target=/root/.cache/go-build,id=swagger-build-$TARGETPLATFORM \
|
||||
WORKDIR $GOPATH/src/github.com/go-swagger/go-swagger
|
||||
|
||||
# GO_SWAGGER_COMMIT specifies the version of the go-swagger binary to build and
|
||||
# install. Go-swagger is used in CI for validating swagger.yaml in hack/validate/swagger-gen
|
||||
#
|
||||
# Currently uses a fork from https://github.com/kolyshkin/go-swagger/tree/golang-1.13-fix,
|
||||
# TODO: move to under moby/ or fix upstream go-swagger to work for us.
|
||||
ENV GO_SWAGGER_COMMIT c56166c036004ba7a3a321e5951ba472b9ae298c
|
||||
RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||
--mount=type=cache,target=/go/pkg/mod \
|
||||
--mount=type=tmpfs,target=/go/src/ <<EOT
|
||||
set -e
|
||||
xx-go build -o /build/swagger ./cmd/swagger
|
||||
xx-verify /build/swagger
|
||||
EOT
|
||||
--mount=type=tmpfs,target=/go/src/ \
|
||||
set -x \
|
||||
&& git clone https://github.com/kolyshkin/go-swagger.git . \
|
||||
&& git checkout -q "$GO_SWAGGER_COMMIT" \
|
||||
&& go build -o /build/swagger github.com/go-swagger/go-swagger/cmd/swagger
|
||||
|
||||
# frozen-images
|
||||
# See also frozenImages in "testutil/environment/protect.go" (which needs to
|
||||
@@ -132,38 +96,80 @@ RUN /download-frozen-image-v2.sh /build \
|
||||
hello-world:latest@sha256:d58e752213a51785838f9eed2b7a498ffa1cb3aa7f946dda11af39286c3db9a9 \
|
||||
arm32v7/hello-world:latest@sha256:50b8560ad574c779908da71f7ce370c0a2471c098d44d1c8f6b513c5a55eeeb1
|
||||
|
||||
# delve
|
||||
FROM base AS delve-src
|
||||
WORKDIR /usr/src/delve
|
||||
RUN git init . && git remote add origin "https://github.com/go-delve/delve.git"
|
||||
FROM base AS cross-false
|
||||
|
||||
FROM --platform=linux/amd64 base AS cross-true
|
||||
ARG DEBIAN_FRONTEND
|
||||
RUN dpkg --add-architecture arm64
|
||||
RUN dpkg --add-architecture armel
|
||||
RUN dpkg --add-architecture armhf
|
||||
RUN dpkg --add-architecture ppc64el
|
||||
RUN dpkg --add-architecture s390x
|
||||
RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \
|
||||
--mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \
|
||||
apt-get update && apt-get install -y --no-install-recommends \
|
||||
crossbuild-essential-arm64 \
|
||||
crossbuild-essential-armel \
|
||||
crossbuild-essential-armhf \
|
||||
crossbuild-essential-ppc64el \
|
||||
crossbuild-essential-s390x
|
||||
|
||||
FROM cross-${CROSS} AS dev-base
|
||||
|
||||
FROM dev-base AS runtime-dev-cross-false
|
||||
ARG DEBIAN_FRONTEND
|
||||
RUN --mount=type=cache,sharing=locked,id=moby-cross-false-aptlib,target=/var/lib/apt \
|
||||
--mount=type=cache,sharing=locked,id=moby-cross-false-aptcache,target=/var/cache/apt \
|
||||
apt-get update && apt-get install -y --no-install-recommends \
|
||||
binutils-mingw-w64 \
|
||||
g++-mingw-w64-x86-64 \
|
||||
libapparmor-dev \
|
||||
libbtrfs-dev \
|
||||
libdevmapper-dev \
|
||||
libseccomp-dev \
|
||||
libsystemd-dev \
|
||||
libudev-dev
|
||||
|
||||
FROM --platform=linux/amd64 runtime-dev-cross-false AS runtime-dev-cross-true
|
||||
ARG DEBIAN_FRONTEND
|
||||
# These crossbuild packages rely on gcc-<arch>, but this doesn't want to install
|
||||
# on non-amd64 systems, so other architectures cannot crossbuild amd64.
|
||||
RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \
|
||||
--mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \
|
||||
apt-get update && apt-get install -y --no-install-recommends \
|
||||
libapparmor-dev:arm64 \
|
||||
libapparmor-dev:armel \
|
||||
libapparmor-dev:armhf \
|
||||
libapparmor-dev:ppc64el \
|
||||
libapparmor-dev:s390x \
|
||||
libseccomp-dev:arm64 \
|
||||
libseccomp-dev:armel \
|
||||
libseccomp-dev:armhf \
|
||||
libseccomp-dev:ppc64el \
|
||||
libseccomp-dev:s390x
|
||||
|
||||
FROM runtime-dev-cross-${CROSS} AS runtime-dev
|
||||
|
||||
FROM base AS delve
|
||||
# DELVE_VERSION specifies the version of the Delve debugger binary
|
||||
# from the https://github.com/go-delve/delve repository.
|
||||
# It can be used to run Docker with a possibility of
|
||||
# attaching debugger to it.
|
||||
ARG DELVE_VERSION=v1.20.1
|
||||
RUN git fetch -q --depth 1 origin "${DELVE_VERSION}" +refs/tags/*:refs/tags/* && git checkout -q FETCH_HEAD
|
||||
|
||||
FROM base AS delve-build
|
||||
WORKDIR /usr/src/delve
|
||||
ARG TARGETPLATFORM
|
||||
RUN --mount=from=delve-src,src=/usr/src/delve,rw \
|
||||
--mount=type=cache,target=/root/.cache/go-build,id=delve-build-$TARGETPLATFORM \
|
||||
--mount=type=cache,target=/go/pkg/mod <<EOT
|
||||
set -e
|
||||
GO111MODULE=on xx-go build -o /build/dlv ./cmd/dlv
|
||||
xx-verify /build/dlv
|
||||
EOT
|
||||
|
||||
# delve is currently only supported on linux/amd64 and linux/arm64;
|
||||
#
|
||||
ARG DELVE_VERSION=v1.8.1
|
||||
# Delve on Linux is currently only supported on amd64 and arm64;
|
||||
# https://github.com/go-delve/delve/blob/v1.8.1/pkg/proc/native/support_sentinel.go#L1-L6
|
||||
FROM binary-dummy AS delve-windows
|
||||
FROM binary-dummy AS delve-linux-arm
|
||||
FROM binary-dummy AS delve-linux-ppc64le
|
||||
FROM binary-dummy AS delve-linux-s390x
|
||||
FROM delve-build AS delve-linux-amd64
|
||||
FROM delve-build AS delve-linux-arm64
|
||||
FROM delve-linux-${TARGETARCH} AS delve-linux
|
||||
FROM delve-${TARGETOS} AS delve
|
||||
RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||
--mount=type=cache,target=/go/pkg/mod \
|
||||
case $(dpkg --print-architecture) in \
|
||||
amd64|arm64) \
|
||||
GOBIN=/build/ GO111MODULE=on go install "github.com/go-delve/delve/cmd/dlv@${DELVE_VERSION}" \
|
||||
&& /build/dlv --help \
|
||||
;; \
|
||||
*) \
|
||||
mkdir -p /build/ \
|
||||
;; \
|
||||
esac
|
||||
|
||||
FROM base AS tomll
|
||||
# GOTOML_VERSION specifies the version of the tomll binary to build and install
|
||||
@@ -180,55 +186,27 @@ RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||
|
||||
FROM base AS gowinres
|
||||
# GOWINRES_VERSION defines go-winres tool version
|
||||
ARG GOWINRES_VERSION=v0.3.1
|
||||
ARG GOWINRES_VERSION=v0.3.0
|
||||
RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||
--mount=type=cache,target=/go/pkg/mod \
|
||||
GOBIN=/build/ GO111MODULE=on go install "github.com/tc-hib/go-winres@${GOWINRES_VERSION}" \
|
||||
&& /build/go-winres --help
|
||||
|
||||
# containerd
|
||||
FROM base AS containerd-src
|
||||
WORKDIR /usr/src/containerd
|
||||
RUN git init . && git remote add origin "https://github.com/containerd/containerd.git"
|
||||
# CONTAINERD_VERSION is used to build containerd binaries, and used for the
|
||||
# integration tests. The distributed docker .deb and .rpm packages depend on a
|
||||
# separate (containerd.io) package, which may be a different version as is
|
||||
# specified here. The containerd golang package is also pinned in vendor.mod.
|
||||
# When updating the binary version you may also need to update the vendor
|
||||
# version to pick up bug fixes or new APIs, however, usually the Go packages
|
||||
# are built from a commit from the master branch.
|
||||
ARG CONTAINERD_VERSION=v1.7.13
|
||||
RUN git fetch -q --depth 1 origin "${CONTAINERD_VERSION}" +refs/tags/*:refs/tags/* && git checkout -q FETCH_HEAD
|
||||
|
||||
FROM base AS containerd-build
|
||||
WORKDIR /go/src/github.com/containerd/containerd
|
||||
FROM dev-base AS containerd
|
||||
ARG DEBIAN_FRONTEND
|
||||
ARG TARGETPLATFORM
|
||||
RUN --mount=type=cache,sharing=locked,id=moby-containerd-aptlib,target=/var/lib/apt \
|
||||
--mount=type=cache,sharing=locked,id=moby-containerd-aptcache,target=/var/cache/apt \
|
||||
apt-get update && xx-apt-get install -y --no-install-recommends \
|
||||
gcc libbtrfs-dev libsecret-1-dev
|
||||
ARG DOCKER_STATIC
|
||||
RUN --mount=from=containerd-src,src=/usr/src/containerd,rw \
|
||||
--mount=type=cache,target=/root/.cache/go-build,id=containerd-build-$TARGETPLATFORM <<EOT
|
||||
set -e
|
||||
export CC=$(xx-info)-gcc
|
||||
export CGO_ENABLED=$([ "$DOCKER_STATIC" = "1" ] && echo "0" || echo "1")
|
||||
xx-go --wrap
|
||||
make $([ "$DOCKER_STATIC" = "1" ] && echo "STATIC=1") binaries
|
||||
xx-verify $([ "$DOCKER_STATIC" = "1" ] && echo "--static") bin/containerd
|
||||
xx-verify $([ "$DOCKER_STATIC" = "1" ] && echo "--static") bin/containerd-shim-runc-v2
|
||||
xx-verify $([ "$DOCKER_STATIC" = "1" ] && echo "--static") bin/ctr
|
||||
mkdir /build
|
||||
mv bin/containerd bin/containerd-shim-runc-v2 bin/ctr /build
|
||||
EOT
|
||||
|
||||
FROM containerd-build AS containerd-linux
|
||||
FROM binary-dummy AS containerd-windows
|
||||
FROM containerd-${TARGETOS} AS containerd
|
||||
apt-get update && apt-get install -y --no-install-recommends \
|
||||
libbtrfs-dev
|
||||
ARG CONTAINERD_VERSION
|
||||
COPY /hack/dockerfile/install/install.sh /hack/dockerfile/install/containerd.installer /
|
||||
RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||
--mount=type=cache,target=/go/pkg/mod \
|
||||
PREFIX=/build /install.sh containerd
|
||||
|
||||
FROM base AS golangci_lint
|
||||
ARG GOLANGCI_LINT_VERSION=v1.55.2
|
||||
# FIXME: when updating golangci-lint, remove the temporary "nolint" in https://github.com/moby/moby/blob/7860686a8df15eea9def9e6189c6f9eca031bb6f/libnetwork/networkdb/cluster.go#L246
|
||||
ARG GOLANGCI_LINT_VERSION=v1.49.0
|
||||
RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||
--mount=type=cache,target=/go/pkg/mod \
|
||||
GOBIN=/build/ GO111MODULE=on go install "github.com/golangci/golangci-lint/cmd/golangci-lint@${GOLANGCI_LINT_VERSION}" \
|
||||
@@ -242,138 +220,52 @@ RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||
&& /build/gotestsum --version
|
||||
|
||||
FROM base AS shfmt
|
||||
ARG SHFMT_VERSION=v3.6.0
|
||||
ARG SHFMT_VERSION=v3.0.2
|
||||
RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||
--mount=type=cache,target=/go/pkg/mod \
|
||||
GOBIN=/build/ GO111MODULE=on go install "mvdan.cc/sh/v3/cmd/shfmt@${SHFMT_VERSION}" \
|
||||
&& /build/shfmt --version
|
||||
|
||||
FROM base AS dockercli
|
||||
WORKDIR /go/src/github.com/docker/cli
|
||||
ARG DOCKERCLI_REPOSITORY
|
||||
FROM dev-base AS dockercli
|
||||
ARG DOCKERCLI_CHANNEL
|
||||
ARG DOCKERCLI_VERSION
|
||||
ARG TARGETPLATFORM
|
||||
RUN --mount=source=hack/dockerfile/cli.sh,target=/download-or-build-cli.sh \
|
||||
--mount=type=cache,id=dockercli-git-$TARGETPLATFORM,sharing=locked,target=./.git \
|
||||
--mount=type=cache,target=/root/.cache/go-build,id=dockercli-build-$TARGETPLATFORM \
|
||||
rm -f ./.git/*.lock \
|
||||
&& /download-or-build-cli.sh ${DOCKERCLI_VERSION} ${DOCKERCLI_REPOSITORY} /build \
|
||||
&& /build/docker --version
|
||||
|
||||
FROM base AS dockercli-integration
|
||||
WORKDIR /go/src/github.com/docker/cli
|
||||
ARG DOCKERCLI_INTEGRATION_REPOSITORY
|
||||
ARG DOCKERCLI_INTEGRATION_VERSION
|
||||
ARG TARGETPLATFORM
|
||||
RUN --mount=source=hack/dockerfile/cli.sh,target=/download-or-build-cli.sh \
|
||||
--mount=type=cache,id=dockercli-git-$TARGETPLATFORM,sharing=locked,target=./.git \
|
||||
--mount=type=cache,target=/root/.cache/go-build,id=dockercli-build-$TARGETPLATFORM \
|
||||
rm -f ./.git/*.lock \
|
||||
&& /download-or-build-cli.sh ${DOCKERCLI_INTEGRATION_VERSION} ${DOCKERCLI_INTEGRATION_REPOSITORY} /build \
|
||||
&& /build/docker --version
|
||||
|
||||
# runc
|
||||
FROM base AS runc-src
|
||||
WORKDIR /usr/src/runc
|
||||
RUN git init . && git remote add origin "https://github.com/opencontainers/runc.git"
|
||||
# RUNC_VERSION should match the version that is used by the containerd version
|
||||
# that is used. If you need to update runc, open a pull request in the containerd
|
||||
# project first, and update both after that is merged. When updating RUNC_VERSION,
|
||||
# consider updating runc in vendor.mod accordingly.
|
||||
ARG RUNC_VERSION=v1.1.12
|
||||
RUN git fetch -q --depth 1 origin "${RUNC_VERSION}" +refs/tags/*:refs/tags/* && git checkout -q FETCH_HEAD
|
||||
|
||||
FROM base AS runc-build
|
||||
WORKDIR /go/src/github.com/opencontainers/runc
|
||||
ARG DEBIAN_FRONTEND
|
||||
ARG TARGETPLATFORM
|
||||
RUN --mount=type=cache,sharing=locked,id=moby-runc-aptlib,target=/var/lib/apt \
|
||||
--mount=type=cache,sharing=locked,id=moby-runc-aptcache,target=/var/cache/apt \
|
||||
apt-get update && xx-apt-get install -y --no-install-recommends \
|
||||
dpkg-dev gcc libc6-dev libseccomp-dev
|
||||
ARG DOCKER_STATIC
|
||||
RUN --mount=from=runc-src,src=/usr/src/runc,rw \
|
||||
--mount=type=cache,target=/root/.cache/go-build,id=runc-build-$TARGETPLATFORM <<EOT
|
||||
set -e
|
||||
xx-go --wrap
|
||||
CGO_ENABLED=1 make "$([ "$DOCKER_STATIC" = "1" ] && echo "static" || echo "runc")"
|
||||
xx-verify $([ "$DOCKER_STATIC" = "1" ] && echo "--static") runc
|
||||
mkdir /build
|
||||
mv runc /build/
|
||||
EOT
|
||||
|
||||
FROM runc-build AS runc-linux
|
||||
FROM binary-dummy AS runc-windows
|
||||
FROM runc-${TARGETOS} AS runc
|
||||
|
||||
# tini
|
||||
FROM base AS tini-src
|
||||
WORKDIR /usr/src/tini
|
||||
RUN git init . && git remote add origin "https://github.com/krallin/tini.git"
|
||||
# TINI_VERSION specifies the version of tini (docker-init) to build. This
|
||||
# binary is used when starting containers with the `--init` option.
|
||||
ARG TINI_VERSION=v0.19.0
|
||||
RUN git fetch -q --depth 1 origin "${TINI_VERSION}" +refs/tags/*:refs/tags/* && git checkout -q FETCH_HEAD
|
||||
|
||||
FROM base AS tini-build
|
||||
WORKDIR /go/src/github.com/krallin/tini
|
||||
ARG DEBIAN_FRONTEND
|
||||
RUN --mount=type=cache,sharing=locked,id=moby-tini-aptlib,target=/var/lib/apt \
|
||||
--mount=type=cache,sharing=locked,id=moby-tini-aptcache,target=/var/cache/apt \
|
||||
apt-get update && apt-get install -y --no-install-recommends cmake
|
||||
ARG TARGETPLATFORM
|
||||
RUN --mount=type=cache,sharing=locked,id=moby-tini-aptlib,target=/var/lib/apt \
|
||||
--mount=type=cache,sharing=locked,id=moby-tini-aptcache,target=/var/cache/apt \
|
||||
xx-apt-get install -y --no-install-recommends \
|
||||
gcc libc6-dev
|
||||
RUN --mount=from=tini-src,src=/usr/src/tini,rw \
|
||||
--mount=type=cache,target=/root/.cache/go-build,id=tini-build-$TARGETPLATFORM <<EOT
|
||||
set -e
|
||||
CC=$(xx-info)-gcc cmake .
|
||||
make tini-static
|
||||
xx-verify --static tini-static
|
||||
mkdir /build
|
||||
mv tini-static /build/docker-init
|
||||
EOT
|
||||
|
||||
FROM tini-build AS tini-linux
|
||||
FROM binary-dummy AS tini-windows
|
||||
FROM tini-${TARGETOS} AS tini
|
||||
|
||||
# rootlesskit
|
||||
FROM base AS rootlesskit-src
|
||||
WORKDIR /usr/src/rootlesskit
|
||||
RUN git init . && git remote add origin "https://github.com/rootless-containers/rootlesskit.git"
|
||||
# When updating, also update vendor.mod and hack/dockerfile/install/rootlesskit.installer accordingly.
|
||||
ARG ROOTLESSKIT_VERSION=v1.1.1
|
||||
RUN git fetch -q --depth 1 origin "${ROOTLESSKIT_VERSION}" +refs/tags/*:refs/tags/* && git checkout -q FETCH_HEAD
|
||||
|
||||
FROM base AS rootlesskit-build
|
||||
WORKDIR /go/src/github.com/rootless-containers/rootlesskit
|
||||
ARG DEBIAN_FRONTEND
|
||||
ARG TARGETPLATFORM
|
||||
RUN --mount=type=cache,sharing=locked,id=moby-rootlesskit-aptlib,target=/var/lib/apt \
|
||||
--mount=type=cache,sharing=locked,id=moby-rootlesskit-aptcache,target=/var/cache/apt \
|
||||
apt-get update && xx-apt-get install -y --no-install-recommends \
|
||||
gcc libc6-dev
|
||||
ENV GO111MODULE=on
|
||||
ARG DOCKER_STATIC
|
||||
RUN --mount=from=rootlesskit-src,src=/usr/src/rootlesskit,rw \
|
||||
COPY /hack/dockerfile/install/install.sh /hack/dockerfile/install/dockercli.installer /
|
||||
RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||
--mount=type=cache,target=/go/pkg/mod \
|
||||
--mount=type=cache,target=/root/.cache/go-build,id=rootlesskit-build-$TARGETPLATFORM <<EOT
|
||||
set -e
|
||||
export CGO_ENABLED=$([ "$DOCKER_STATIC" = "1" ] && echo "0" || echo "1")
|
||||
xx-go build -o /build/rootlesskit -ldflags="$([ "$DOCKER_STATIC" != "1" ] && echo "-linkmode=external")" ./cmd/rootlesskit
|
||||
xx-verify $([ "$DOCKER_STATIC" = "1" ] && echo "--static") /build/rootlesskit
|
||||
xx-go build -o /build/rootlesskit-docker-proxy -ldflags="$([ "$DOCKER_STATIC" != "1" ] && echo "-linkmode=external")" ./cmd/rootlesskit-docker-proxy
|
||||
xx-verify $([ "$DOCKER_STATIC" = "1" ] && echo "--static") /build/rootlesskit-docker-proxy
|
||||
EOT
|
||||
COPY --link ./contrib/dockerd-rootless.sh /build/
|
||||
COPY --link ./contrib/dockerd-rootless-setuptool.sh /build/
|
||||
PREFIX=/build /install.sh dockercli
|
||||
|
||||
FROM rootlesskit-build AS rootlesskit-linux
|
||||
FROM binary-dummy AS rootlesskit-windows
|
||||
FROM rootlesskit-${TARGETOS} AS rootlesskit
|
||||
FROM runtime-dev AS runc
|
||||
ARG RUNC_VERSION
|
||||
ARG RUNC_BUILDTAGS
|
||||
COPY /hack/dockerfile/install/install.sh /hack/dockerfile/install/runc.installer /
|
||||
RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||
--mount=type=cache,target=/go/pkg/mod \
|
||||
PREFIX=/build /install.sh runc
|
||||
|
||||
FROM dev-base AS tini
|
||||
ARG DEBIAN_FRONTEND
|
||||
ARG TINI_VERSION
|
||||
RUN --mount=type=cache,sharing=locked,id=moby-tini-aptlib,target=/var/lib/apt \
|
||||
--mount=type=cache,sharing=locked,id=moby-tini-aptcache,target=/var/cache/apt \
|
||||
apt-get update && apt-get install -y --no-install-recommends \
|
||||
cmake \
|
||||
vim-common
|
||||
COPY /hack/dockerfile/install/install.sh /hack/dockerfile/install/tini.installer /
|
||||
RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||
--mount=type=cache,target=/go/pkg/mod \
|
||||
PREFIX=/build /install.sh tini
|
||||
|
||||
FROM dev-base AS rootlesskit
|
||||
ARG ROOTLESSKIT_VERSION
|
||||
ARG PREFIX=/build
|
||||
COPY /hack/dockerfile/install/install.sh /hack/dockerfile/install/rootlesskit.installer /
|
||||
RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||
--mount=type=cache,target=/go/pkg/mod \
|
||||
/install.sh rootlesskit \
|
||||
&& "${PREFIX}"/rootlesskit --version \
|
||||
&& "${PREFIX}"/rootlesskit-docker-proxy --help
|
||||
COPY ./contrib/dockerd-rootless.sh /build
|
||||
COPY ./contrib/dockerd-rootless-setuptool.sh /build
|
||||
|
||||
FROM base AS crun
|
||||
ARG CRUN_VERSION=1.4.5
|
||||
@@ -413,79 +305,8 @@ FROM djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-linux-arm64
|
||||
FROM vpnkit-linux-${TARGETARCH} AS vpnkit-linux
|
||||
FROM vpnkit-${TARGETOS} AS vpnkit
|
||||
|
||||
# containerutility
|
||||
FROM base AS containerutil-src
|
||||
WORKDIR /usr/src/containerutil
|
||||
RUN git init . && git remote add origin "https://github.com/docker-archive/windows-container-utility.git"
|
||||
ARG CONTAINERUTILITY_VERSION=aa1ba87e99b68e0113bd27ec26c60b88f9d4ccd9
|
||||
RUN git fetch -q --depth 1 origin "${CONTAINERUTILITY_VERSION}" +refs/tags/*:refs/tags/* && git checkout -q FETCH_HEAD
|
||||
|
||||
FROM base AS containerutil-build
|
||||
WORKDIR /usr/src/containerutil
|
||||
ARG TARGETPLATFORM
|
||||
RUN xx-apt-get install -y --no-install-recommends gcc g++ libc6-dev
|
||||
RUN --mount=from=containerutil-src,src=/usr/src/containerutil,rw \
|
||||
--mount=type=cache,target=/root/.cache/go-build,id=containerutil-build-$TARGETPLATFORM <<EOT
|
||||
set -e
|
||||
CC="$(xx-info)-gcc" CXX="$(xx-info)-g++" make
|
||||
xx-verify --static containerutility.exe
|
||||
mkdir /build
|
||||
mv containerutility.exe /build/
|
||||
EOT
|
||||
|
||||
FROM binary-dummy AS containerutil-linux
|
||||
FROM containerutil-build AS containerutil-windows-amd64
|
||||
FROM containerutil-windows-${TARGETARCH} AS containerutil-windows
|
||||
FROM containerutil-${TARGETOS} AS containerutil
|
||||
FROM docker/buildx-bin:${BUILDX_VERSION} as buildx
|
||||
|
||||
FROM base AS dev-systemd-false
|
||||
COPY --link --from=frozen-images /build/ /docker-frozen-images
|
||||
COPY --link --from=swagger /build/ /usr/local/bin/
|
||||
COPY --link --from=delve /build/ /usr/local/bin/
|
||||
COPY --link --from=tomll /build/ /usr/local/bin/
|
||||
COPY --link --from=gowinres /build/ /usr/local/bin/
|
||||
COPY --link --from=tini /build/ /usr/local/bin/
|
||||
COPY --link --from=registry /build/ /usr/local/bin/
|
||||
|
||||
# Skip the CRIU stage for now, as the opensuse package repository is sometimes
|
||||
# unstable, and we're currently not using it in CI.
|
||||
#
|
||||
# FIXME(thaJeztah): re-enable this stage when https://github.com/moby/moby/issues/38963 is resolved (see https://github.com/moby/moby/pull/38984)
|
||||
# COPY --link --from=criu /build/ /usr/local/bin/
|
||||
COPY --link --from=gotestsum /build/ /usr/local/bin/
|
||||
COPY --link --from=golangci_lint /build/ /usr/local/bin/
|
||||
COPY --link --from=shfmt /build/ /usr/local/bin/
|
||||
COPY --link --from=runc /build/ /usr/local/bin/
|
||||
COPY --link --from=containerd /build/ /usr/local/bin/
|
||||
COPY --link --from=rootlesskit /build/ /usr/local/bin/
|
||||
COPY --link --from=vpnkit / /usr/local/bin/
|
||||
COPY --link --from=containerutil /build/ /usr/local/bin/
|
||||
COPY --link --from=crun /build/ /usr/local/bin/
|
||||
COPY --link hack/dockerfile/etc/docker/ /etc/docker/
|
||||
COPY --link --from=buildx /buildx /usr/local/libexec/docker/cli-plugins/docker-buildx
|
||||
|
||||
ENV PATH=/usr/local/cli:$PATH
|
||||
ENV TEST_CLIENT_BINARY=/usr/local/cli-integration/docker
|
||||
ENV CONTAINERD_ADDRESS=/run/docker/containerd/containerd.sock
|
||||
ENV CONTAINERD_NAMESPACE=moby
|
||||
WORKDIR /go/src/github.com/docker/docker
|
||||
VOLUME /var/lib/docker
|
||||
VOLUME /home/unprivilegeduser/.local/share/docker
|
||||
# Wrap all commands in the "docker-in-docker" script to allow nested containers
|
||||
ENTRYPOINT ["hack/dind"]
|
||||
|
||||
FROM dev-systemd-false AS dev-systemd-true
|
||||
RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \
|
||||
--mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \
|
||||
apt-get update && apt-get install -y --no-install-recommends \
|
||||
dbus \
|
||||
dbus-user-session \
|
||||
systemd \
|
||||
systemd-sysv
|
||||
ENTRYPOINT ["hack/dind-systemd"]
|
||||
|
||||
FROM dev-systemd-${SYSTEMD} AS dev-base
|
||||
# TODO: Some of this is only really needed for testing, it would be nice to split this up
|
||||
FROM runtime-dev AS dev-systemd-false
|
||||
ARG DEBIAN_FRONTEND
|
||||
RUN groupadd -r docker
|
||||
RUN useradd --create-home --gid docker unprivilegeduser \
|
||||
@@ -497,9 +318,6 @@ RUN ln -sfv /go/src/github.com/docker/docker/.bashrc ~/.bashrc
|
||||
RUN echo "source /usr/share/bash-completion/bash_completion" >> /etc/bash.bashrc
|
||||
RUN ln -s /usr/local/completion/bash/docker /etc/bash_completion.d/docker
|
||||
RUN ldconfig
|
||||
# Set dev environment as safe git directory to prevent "dubious ownership" errors
|
||||
# when bind-mounting the source into the dev-container. See https://github.com/moby/moby/pull/44930
|
||||
RUN git config --global --add safe.directory $GOPATH/src/github.com/docker/docker
|
||||
# This should only install packages that are specifically needed for the dev environment and nothing else
|
||||
# Do you really need to add another package here? Can it be done in a different build stage?
|
||||
RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \
|
||||
@@ -524,7 +342,6 @@ RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \
|
||||
python3-setuptools \
|
||||
python3-wheel \
|
||||
sudo \
|
||||
systemd-journal-remote \
|
||||
thin-provisioning-tools \
|
||||
uidmap \
|
||||
vim \
|
||||
@@ -533,122 +350,116 @@ RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \
|
||||
xz-utils \
|
||||
zip \
|
||||
zstd
|
||||
|
||||
|
||||
# Switch to use iptables instead of nftables (to match the CI hosts)
|
||||
# TODO use some kind of runtime auto-detection instead if/when nftables is supported (https://github.com/moby/moby/issues/26824)
|
||||
RUN update-alternatives --set iptables /usr/sbin/iptables-legacy || true \
|
||||
&& update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy || true \
|
||||
&& update-alternatives --set arptables /usr/sbin/arptables-legacy || true
|
||||
|
||||
ARG YAMLLINT_VERSION=1.27.1
|
||||
RUN pip3 install yamllint==${YAMLLINT_VERSION}
|
||||
|
||||
COPY --from=dockercli /build/ /usr/local/cli
|
||||
COPY --from=frozen-images /build/ /docker-frozen-images
|
||||
COPY --from=swagger /build/ /usr/local/bin/
|
||||
COPY --from=delve /build/ /usr/local/bin/
|
||||
COPY --from=tomll /build/ /usr/local/bin/
|
||||
COPY --from=gowinres /build/ /usr/local/bin/
|
||||
COPY --from=tini /build/ /usr/local/bin/
|
||||
COPY --from=registry /build/ /usr/local/bin/
|
||||
COPY --from=criu /build/ /usr/local/bin/
|
||||
COPY --from=gotestsum /build/ /usr/local/bin/
|
||||
COPY --from=golangci_lint /build/ /usr/local/bin/
|
||||
COPY --from=shfmt /build/ /usr/local/bin/
|
||||
COPY --from=runc /build/ /usr/local/bin/
|
||||
COPY --from=containerd /build/ /usr/local/bin/
|
||||
COPY --from=rootlesskit /build/ /usr/local/bin/
|
||||
COPY --from=vpnkit / /usr/local/bin/
|
||||
COPY --from=crun /build/ /usr/local/bin/
|
||||
COPY hack/dockerfile/etc/docker/ /etc/docker/
|
||||
ENV PATH=/usr/local/cli:$PATH
|
||||
ARG DOCKER_BUILDTAGS
|
||||
ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}"
|
||||
WORKDIR /go/src/github.com/docker/docker
|
||||
VOLUME /var/lib/docker
|
||||
VOLUME /home/unprivilegeduser/.local/share/docker
|
||||
# Wrap all commands in the "docker-in-docker" script to allow nested containers
|
||||
ENTRYPOINT ["hack/dind"]
|
||||
|
||||
FROM dev-systemd-false AS dev-systemd-true
|
||||
RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \
|
||||
--mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \
|
||||
apt-get update && apt-get install --no-install-recommends -y \
|
||||
gcc \
|
||||
pkg-config \
|
||||
dpkg-dev \
|
||||
libapparmor-dev \
|
||||
libdevmapper-dev \
|
||||
libseccomp-dev \
|
||||
libsecret-1-dev \
|
||||
libsystemd-dev \
|
||||
libudev-dev
|
||||
COPY --link --from=dockercli /build/ /usr/local/cli
|
||||
COPY --link --from=dockercli-integration /build/ /usr/local/cli-integration
|
||||
apt-get update && apt-get install -y --no-install-recommends \
|
||||
dbus \
|
||||
dbus-user-session \
|
||||
systemd \
|
||||
systemd-sysv
|
||||
RUN mkdir -p hack \
|
||||
&& curl -o hack/dind-systemd https://raw.githubusercontent.com/AkihiroSuda/containerized-systemd/b70bac0daeea120456764248164c21684ade7d0d/docker-entrypoint.sh \
|
||||
&& chmod +x hack/dind-systemd
|
||||
ENTRYPOINT ["hack/dind-systemd"]
|
||||
|
||||
FROM base AS build
|
||||
COPY --from=gowinres /build/ /usr/local/bin/
|
||||
WORKDIR /go/src/github.com/docker/docker
|
||||
ENV GO111MODULE=off
|
||||
ENV CGO_ENABLED=1
|
||||
ARG DEBIAN_FRONTEND
|
||||
RUN --mount=type=cache,sharing=locked,id=moby-build-aptlib,target=/var/lib/apt \
|
||||
--mount=type=cache,sharing=locked,id=moby-build-aptcache,target=/var/cache/apt \
|
||||
apt-get update && apt-get install --no-install-recommends -y \
|
||||
clang \
|
||||
lld \
|
||||
llvm
|
||||
ARG TARGETPLATFORM
|
||||
RUN --mount=type=cache,sharing=locked,id=moby-build-aptlib,target=/var/lib/apt \
|
||||
--mount=type=cache,sharing=locked,id=moby-build-aptcache,target=/var/cache/apt \
|
||||
xx-apt-get install --no-install-recommends -y \
|
||||
dpkg-dev \
|
||||
gcc \
|
||||
libapparmor-dev \
|
||||
libc6-dev \
|
||||
libdevmapper-dev \
|
||||
libseccomp-dev \
|
||||
libsecret-1-dev \
|
||||
libsystemd-dev \
|
||||
libudev-dev
|
||||
ARG DOCKER_BUILDTAGS
|
||||
ARG DOCKER_DEBUG
|
||||
FROM dev-systemd-${SYSTEMD} AS dev
|
||||
|
||||
FROM runtime-dev AS binary-base
|
||||
ARG DOCKER_GITCOMMIT=HEAD
|
||||
ARG DOCKER_LDFLAGS
|
||||
ARG DOCKER_STATIC
|
||||
ENV DOCKER_GITCOMMIT=${DOCKER_GITCOMMIT}
|
||||
ARG VERSION
|
||||
ENV VERSION=${VERSION}
|
||||
ARG PLATFORM
|
||||
ENV PLATFORM=${PLATFORM}
|
||||
ARG PRODUCT
|
||||
ENV PRODUCT=${PRODUCT}
|
||||
ARG DEFAULT_PRODUCT_LICENSE
|
||||
ENV DEFAULT_PRODUCT_LICENSE=${DEFAULT_PRODUCT_LICENSE}
|
||||
ARG PACKAGER_NAME
|
||||
# PREFIX overrides DEST dir in make.sh script otherwise it fails because of
|
||||
# read only mount in current work dir
|
||||
ENV PREFIX=/tmp
|
||||
RUN <<EOT
|
||||
# in bullseye arm64 target does not link with lld so configure it to use ld instead
|
||||
if [ "$(xx-info arch)" = "arm64" ]; then
|
||||
XX_CC_PREFER_LINKER=ld xx-clang --setup-target-triple
|
||||
fi
|
||||
EOT
|
||||
RUN --mount=type=bind,target=.,rw \
|
||||
ENV PACKAGER_NAME=${PACKAGER_NAME}
|
||||
ARG DOCKER_BUILDTAGS
|
||||
ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}"
|
||||
ENV PREFIX=/build
|
||||
# TODO: This is here because hack/make.sh binary copies these extras binaries
|
||||
# from $PATH into the bundles dir.
|
||||
# It would be nice to handle this in a different way.
|
||||
COPY --from=tini /build/ /usr/local/bin/
|
||||
COPY --from=runc /build/ /usr/local/bin/
|
||||
COPY --from=containerd /build/ /usr/local/bin/
|
||||
COPY --from=rootlesskit /build/ /usr/local/bin/
|
||||
COPY --from=vpnkit / /usr/local/bin/
|
||||
COPY --from=gowinres /build/ /usr/local/bin/
|
||||
WORKDIR /go/src/github.com/docker/docker
|
||||
|
||||
FROM binary-base AS build-binary
|
||||
RUN --mount=type=cache,target=/root/.cache \
|
||||
--mount=type=bind,target=.,ro \
|
||||
--mount=type=tmpfs,target=cli/winresources/dockerd \
|
||||
--mount=type=tmpfs,target=cli/winresources/docker-proxy \
|
||||
--mount=type=cache,target=/root/.cache/go-build,id=moby-build-$TARGETPLATFORM <<EOT
|
||||
set -e
|
||||
target=$([ "$DOCKER_STATIC" = "1" ] && echo "binary" || echo "dynbinary")
|
||||
xx-go --wrap
|
||||
PKG_CONFIG=$(xx-go env PKG_CONFIG) ./hack/make.sh $target
|
||||
xx-verify $([ "$DOCKER_STATIC" = "1" ] && echo "--static") /tmp/bundles/${target}-daemon/dockerd$([ "$(xx-info os)" = "windows" ] && echo ".exe")
|
||||
xx-verify $([ "$DOCKER_STATIC" = "1" ] && echo "--static") /tmp/bundles/${target}-daemon/docker-proxy$([ "$(xx-info os)" = "windows" ] && echo ".exe")
|
||||
mkdir /build
|
||||
mv /tmp/bundles/${target}-daemon/* /build/
|
||||
EOT
|
||||
hack/make.sh binary
|
||||
|
||||
FROM binary-base AS build-dynbinary
|
||||
RUN --mount=type=cache,target=/root/.cache \
|
||||
--mount=type=bind,target=.,ro \
|
||||
--mount=type=tmpfs,target=cli/winresources/dockerd \
|
||||
--mount=type=tmpfs,target=cli/winresources/docker-proxy \
|
||||
hack/make.sh dynbinary
|
||||
|
||||
FROM binary-base AS build-cross
|
||||
ARG DOCKER_CROSSPLATFORMS
|
||||
RUN --mount=type=cache,target=/root/.cache \
|
||||
--mount=type=bind,target=.,ro \
|
||||
--mount=type=tmpfs,target=cli/winresources/dockerd \
|
||||
--mount=type=tmpfs,target=cli/winresources/docker-proxy \
|
||||
hack/make.sh cross
|
||||
|
||||
# usage:
|
||||
# > docker buildx bake binary
|
||||
# > DOCKER_STATIC=0 docker buildx bake binary
|
||||
# or
|
||||
# > make binary
|
||||
# > make dynbinary
|
||||
FROM scratch AS binary
|
||||
COPY --from=build /build/ /
|
||||
COPY --from=build-binary /build/bundles/ /
|
||||
|
||||
# usage:
|
||||
# > docker buildx bake all
|
||||
FROM scratch AS all
|
||||
COPY --link --from=tini /build/ /
|
||||
COPY --link --from=runc /build/ /
|
||||
COPY --link --from=containerd /build/ /
|
||||
COPY --link --from=rootlesskit /build/ /
|
||||
COPY --link --from=containerutil /build/ /
|
||||
COPY --link --from=vpnkit / /
|
||||
COPY --link --from=build /build /
|
||||
FROM scratch AS dynbinary
|
||||
COPY --from=build-dynbinary /build/bundles/ /
|
||||
|
||||
# smoke tests
|
||||
# usage:
|
||||
# > docker buildx bake binary-smoketest
|
||||
FROM --platform=$TARGETPLATFORM base AS smoketest
|
||||
WORKDIR /usr/local/bin
|
||||
COPY --from=build /build .
|
||||
RUN <<EOT
|
||||
set -ex
|
||||
file dockerd
|
||||
dockerd --version
|
||||
file docker-proxy
|
||||
docker-proxy --version
|
||||
EOT
|
||||
FROM scratch AS cross
|
||||
COPY --from=build-cross /build/bundles/ /
|
||||
|
||||
# usage:
|
||||
# > make shell
|
||||
# > SYSTEMD=true make shell
|
||||
FROM dev-base AS dev
|
||||
COPY --link . .
|
||||
FROM dev AS final
|
||||
COPY . /go/src/github.com/docker/docker
|
||||
|
||||
85
Dockerfile.e2e
Normal file
85
Dockerfile.e2e
Normal file
@@ -0,0 +1,85 @@
|
||||
ARG GO_VERSION=1.19.3
|
||||
|
||||
FROM golang:${GO_VERSION}-alpine AS base
|
||||
ENV GO111MODULE=off
|
||||
RUN apk --no-cache add \
|
||||
bash \
|
||||
btrfs-progs-dev \
|
||||
build-base \
|
||||
curl \
|
||||
lvm2-dev \
|
||||
jq
|
||||
|
||||
RUN mkdir -p /build/
|
||||
RUN mkdir -p /go/src/github.com/docker/docker/
|
||||
WORKDIR /go/src/github.com/docker/docker/
|
||||
|
||||
FROM base AS frozen-images
|
||||
# Get useful and necessary Hub images so we can "docker load" locally instead of pulling
|
||||
COPY contrib/download-frozen-image-v2.sh /
|
||||
RUN /download-frozen-image-v2.sh /build \
|
||||
busybox:latest@sha256:95cf004f559831017cdf4628aaf1bb30133677be8702a8c5f2994629f637a209 \
|
||||
busybox:latest@sha256:95cf004f559831017cdf4628aaf1bb30133677be8702a8c5f2994629f637a209 \
|
||||
debian:bullseye-slim@sha256:dacf278785a4daa9de07596ec739dbc07131e189942772210709c5c0777e8437 \
|
||||
hello-world:latest@sha256:d58e752213a51785838f9eed2b7a498ffa1cb3aa7f946dda11af39286c3db9a9 \
|
||||
arm32v7/hello-world:latest@sha256:50b8560ad574c779908da71f7ce370c0a2471c098d44d1c8f6b513c5a55eeeb1
|
||||
# See also frozenImages in "testutil/environment/protect.go" (which needs to be updated when adding images to this list)
|
||||
|
||||
FROM base AS dockercli
|
||||
COPY hack/dockerfile/install/install.sh ./install.sh
|
||||
COPY hack/dockerfile/install/dockercli.installer ./
|
||||
RUN PREFIX=/build ./install.sh dockercli
|
||||
|
||||
# TestDockerCLIBuildSuite dependency
|
||||
FROM base AS contrib
|
||||
COPY contrib/syscall-test /build/syscall-test
|
||||
COPY contrib/httpserver/Dockerfile /build/httpserver/Dockerfile
|
||||
COPY contrib/httpserver contrib/httpserver
|
||||
RUN CGO_ENABLED=0 go build -buildmode=pie -o /build/httpserver/httpserver github.com/docker/docker/contrib/httpserver
|
||||
|
||||
# Build the integration tests and copy the resulting binaries to /build/tests
|
||||
FROM base AS builder
|
||||
|
||||
# Set tag and add sources
|
||||
COPY . .
|
||||
# Copy test sources tests that use assert can print errors
|
||||
RUN mkdir -p /build${PWD} && find integration integration-cli -name \*_test.go -exec cp --parents '{}' /build${PWD} \;
|
||||
# Build and install test binaries
|
||||
ARG DOCKER_GITCOMMIT=undefined
|
||||
RUN hack/make.sh build-integration-test-binary
|
||||
RUN mkdir -p /build/tests && find . -name test.main -exec cp --parents '{}' /build/tests \;
|
||||
|
||||
## Generate testing image
|
||||
FROM alpine:3.10 as runner
|
||||
|
||||
ENV DOCKER_REMOTE_DAEMON=1
|
||||
ENV DOCKER_INTEGRATION_DAEMON_DEST=/
|
||||
ENTRYPOINT ["/scripts/run.sh"]
|
||||
|
||||
# Add an unprivileged user to be used for tests which need it
|
||||
RUN addgroup docker && adduser -D -G docker unprivilegeduser -s /bin/ash
|
||||
|
||||
# GNU tar is used for generating the emptyfs image
|
||||
RUN apk --no-cache add \
|
||||
bash \
|
||||
ca-certificates \
|
||||
g++ \
|
||||
git \
|
||||
inetutils-ping \
|
||||
iptables \
|
||||
libcap2-bin \
|
||||
pigz \
|
||||
tar \
|
||||
xz
|
||||
|
||||
COPY hack/test/e2e-run.sh /scripts/run.sh
|
||||
COPY hack/make/.ensure-emptyfs /scripts/ensure-emptyfs.sh
|
||||
|
||||
COPY integration/testdata /tests/integration/testdata
|
||||
COPY integration/build/testdata /tests/integration/build/testdata
|
||||
COPY integration-cli/fixtures /tests/integration-cli/fixtures
|
||||
|
||||
COPY --from=frozen-images /build/ /docker-frozen-images
|
||||
COPY --from=dockercli /build/ /usr/bin/
|
||||
COPY --from=contrib /build/ /tests/contrib/
|
||||
COPY --from=builder /build/ /
|
||||
@@ -5,7 +5,7 @@
|
||||
|
||||
# This represents the bare minimum required to build and test Docker.
|
||||
|
||||
ARG GO_VERSION=1.20.13
|
||||
ARG GO_VERSION=1.19.3
|
||||
|
||||
ARG BASE_DEBIAN_DISTRO="bullseye"
|
||||
ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}"
|
||||
@@ -13,9 +13,9 @@ ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}"
|
||||
FROM ${GOLANG_IMAGE}
|
||||
ENV GO111MODULE=off
|
||||
|
||||
# allow replacing debian mirror
|
||||
ARG APT_MIRROR
|
||||
RUN test -n "$APT_MIRROR" && sed -ri "s#(httpredir|deb|security).debian.org#${APT_MIRROR}#g" /etc/apt/sources.list || true
|
||||
# allow replacing httpredir or deb mirror
|
||||
ARG APT_MIRROR=deb.debian.org
|
||||
RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list
|
||||
|
||||
# Compile and runtime deps
|
||||
# https://github.com/docker/docker/blob/master/project/PACKAGERS.md#build-dependencies
|
||||
@@ -24,8 +24,10 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
build-essential \
|
||||
curl \
|
||||
cmake \
|
||||
gcc \
|
||||
git \
|
||||
libapparmor-dev \
|
||||
libbtrfs-dev \
|
||||
libdevmapper-dev \
|
||||
libseccomp-dev \
|
||||
ca-certificates \
|
||||
|
||||
@@ -165,10 +165,10 @@ FROM microsoft/windowsservercore
|
||||
# Use PowerShell as the default shell
|
||||
SHELL ["powershell", "-Command", "$ErrorActionPreference = 'Stop'; $ProgressPreference = 'SilentlyContinue';"]
|
||||
|
||||
ARG GO_VERSION=1.20.13
|
||||
ARG GO_VERSION=1.19.3
|
||||
ARG GOTESTSUM_VERSION=v1.8.2
|
||||
ARG GOWINRES_VERSION=v0.3.1
|
||||
ARG CONTAINERD_VERSION=v1.7.13
|
||||
ARG GOWINRES_VERSION=v0.3.0
|
||||
ARG CONTAINERD_VERSION=v1.6.10
|
||||
|
||||
# Environment variable notes:
|
||||
# - GO_VERSION must be consistent with 'Dockerfile' used by Linux.
|
||||
@@ -224,7 +224,7 @@ RUN `
|
||||
`
|
||||
Write-Host INFO: Downloading go...; `
|
||||
$dlGoVersion=$Env:GO_VERSION -replace '\.0$',''; `
|
||||
Download-File "https://go.dev/dl/go${dlGoVersion}.windows-amd64.zip" C:\go.zip; `
|
||||
Download-File "https://golang.org/dl/go${dlGoVersion}.windows-amd64.zip" C:\go.zip; `
|
||||
`
|
||||
Write-Host INFO: Downloading compiler 1 of 3...; `
|
||||
Download-File https://raw.githubusercontent.com/moby/docker-tdmgcc/master/gcc.zip C:\gcc.zip; `
|
||||
|
||||
414
Jenkinsfile
vendored
414
Jenkinsfile
vendored
@@ -9,12 +9,15 @@ pipeline {
|
||||
}
|
||||
parameters {
|
||||
booleanParam(name: 'arm64', defaultValue: true, description: 'ARM (arm64) Build/Test')
|
||||
booleanParam(name: 's390x', defaultValue: false, description: 'IBM Z (s390x) Build/Test')
|
||||
booleanParam(name: 'ppc64le', defaultValue: false, description: 'PowerPC (ppc64le) Build/Test')
|
||||
booleanParam(name: 'dco', defaultValue: true, description: 'Run the DCO check')
|
||||
}
|
||||
environment {
|
||||
DOCKER_BUILDKIT = '1'
|
||||
DOCKER_EXPERIMENTAL = '1'
|
||||
DOCKER_GRAPHDRIVER = 'overlay2'
|
||||
APT_MIRROR = 'cdn-fastly.deb.debian.org'
|
||||
CHECK_CONFIG_COMMIT = '33a3680e08d1007e72c3b3f1454f823d8e9948ee'
|
||||
TESTDEBUG = '0'
|
||||
TIMEOUT = '120m'
|
||||
@@ -49,15 +52,16 @@ pipeline {
|
||||
}
|
||||
stage('Build') {
|
||||
parallel {
|
||||
stage('arm64') {
|
||||
stage('s390x') {
|
||||
when {
|
||||
beforeAgent true
|
||||
expression { params.arm64 }
|
||||
}
|
||||
agent { label 'arm64 && ubuntu-2004' }
|
||||
environment {
|
||||
TEST_SKIP_INTEGRATION_CLI = '1'
|
||||
// Skip this stage on PRs unless the checkbox is selected
|
||||
anyOf {
|
||||
not { changeRequest() }
|
||||
expression { params.s390x }
|
||||
}
|
||||
}
|
||||
agent { label 's390x-ubuntu-2004' }
|
||||
|
||||
stages {
|
||||
stage("Print info") {
|
||||
@@ -73,7 +77,402 @@ pipeline {
|
||||
}
|
||||
stage("Build dev image") {
|
||||
steps {
|
||||
sh 'docker build --force-rm -t docker:${GIT_COMMIT} .'
|
||||
sh '''
|
||||
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage("Unit tests") {
|
||||
steps {
|
||||
sh '''
|
||||
sudo modprobe ip6table_filter
|
||||
'''
|
||||
sh '''
|
||||
docker run --rm -t --privileged \
|
||||
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
|
||||
--name docker-pr$BUILD_NUMBER \
|
||||
-e DOCKER_EXPERIMENTAL \
|
||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
-e VALIDATE_REPO=${GIT_URL} \
|
||||
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
|
||||
docker:${GIT_COMMIT} \
|
||||
hack/test/unit
|
||||
'''
|
||||
}
|
||||
post {
|
||||
always {
|
||||
junit testResults: 'bundles/junit-report*.xml', allowEmptyResults: true
|
||||
}
|
||||
}
|
||||
}
|
||||
stage("Integration tests") {
|
||||
environment { TEST_SKIP_INTEGRATION_CLI = '1' }
|
||||
steps {
|
||||
sh '''
|
||||
docker run --rm -t --privileged \
|
||||
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
|
||||
--name docker-pr$BUILD_NUMBER \
|
||||
-e DOCKER_EXPERIMENTAL \
|
||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
-e TESTDEBUG \
|
||||
-e TEST_SKIP_INTEGRATION_CLI \
|
||||
-e TIMEOUT \
|
||||
-e VALIDATE_REPO=${GIT_URL} \
|
||||
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
|
||||
docker:${GIT_COMMIT} \
|
||||
hack/make.sh \
|
||||
dynbinary \
|
||||
test-integration
|
||||
'''
|
||||
}
|
||||
post {
|
||||
always {
|
||||
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
post {
|
||||
always {
|
||||
sh '''
|
||||
echo "Ensuring container killed."
|
||||
docker rm -vf docker-pr$BUILD_NUMBER || true
|
||||
'''
|
||||
|
||||
sh '''
|
||||
echo "Chowning /workspace to jenkins user"
|
||||
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
|
||||
'''
|
||||
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
|
||||
sh '''
|
||||
bundleName=s390x-integration
|
||||
echo "Creating ${bundleName}-bundles.tar.gz"
|
||||
# exclude overlay2 directories
|
||||
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
|
||||
'''
|
||||
|
||||
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
|
||||
}
|
||||
}
|
||||
cleanup {
|
||||
sh 'make clean'
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('s390x integration-cli') {
|
||||
when {
|
||||
beforeAgent true
|
||||
// Skip this stage on PRs unless the checkbox is selected
|
||||
anyOf {
|
||||
not { changeRequest() }
|
||||
expression { params.s390x }
|
||||
}
|
||||
}
|
||||
agent { label 's390x-ubuntu-2004' }
|
||||
|
||||
stages {
|
||||
stage("Print info") {
|
||||
steps {
|
||||
sh 'docker version'
|
||||
sh 'docker info'
|
||||
sh '''
|
||||
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
|
||||
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
|
||||
&& bash ${WORKSPACE}/check-config.sh || true
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage("Build dev image") {
|
||||
steps {
|
||||
sh '''
|
||||
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage("Integration-cli tests") {
|
||||
environment { TEST_SKIP_INTEGRATION = '1' }
|
||||
steps {
|
||||
sh '''
|
||||
docker run --rm -t --privileged \
|
||||
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
|
||||
--name docker-pr$BUILD_NUMBER \
|
||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
-e TEST_SKIP_INTEGRATION \
|
||||
-e TIMEOUT \
|
||||
-e VALIDATE_REPO=${GIT_URL} \
|
||||
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
|
||||
docker:${GIT_COMMIT} \
|
||||
hack/make.sh \
|
||||
dynbinary \
|
||||
test-integration
|
||||
'''
|
||||
}
|
||||
post {
|
||||
always {
|
||||
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
post {
|
||||
always {
|
||||
sh '''
|
||||
echo "Ensuring container killed."
|
||||
docker rm -vf docker-pr$BUILD_NUMBER || true
|
||||
'''
|
||||
|
||||
sh '''
|
||||
echo "Chowning /workspace to jenkins user"
|
||||
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
|
||||
'''
|
||||
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
|
||||
sh '''
|
||||
bundleName=s390x-integration-cli
|
||||
echo "Creating ${bundleName}-bundles.tar.gz"
|
||||
# exclude overlay2 directories
|
||||
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
|
||||
'''
|
||||
|
||||
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
|
||||
}
|
||||
}
|
||||
cleanup {
|
||||
sh 'make clean'
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('ppc64le') {
|
||||
when {
|
||||
beforeAgent true
|
||||
// Skip this stage on PRs unless the checkbox is selected
|
||||
anyOf {
|
||||
not { changeRequest() }
|
||||
expression { params.ppc64le }
|
||||
}
|
||||
}
|
||||
agent { label 'ppc64le-ubuntu-1604' }
|
||||
|
||||
stages {
|
||||
stage("Print info") {
|
||||
steps {
|
||||
sh 'docker version'
|
||||
sh 'docker info'
|
||||
sh '''
|
||||
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
|
||||
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
|
||||
&& bash ${WORKSPACE}/check-config.sh || true
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage("Build dev image") {
|
||||
steps {
|
||||
sh '''
|
||||
docker buildx build --load --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage("Unit tests") {
|
||||
steps {
|
||||
sh '''
|
||||
sudo modprobe ip6table_filter
|
||||
'''
|
||||
sh '''
|
||||
docker run --rm -t --privileged \
|
||||
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
|
||||
--name docker-pr$BUILD_NUMBER \
|
||||
-e DOCKER_EXPERIMENTAL \
|
||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
-e VALIDATE_REPO=${GIT_URL} \
|
||||
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
|
||||
docker:${GIT_COMMIT} \
|
||||
hack/test/unit
|
||||
'''
|
||||
}
|
||||
post {
|
||||
always {
|
||||
junit testResults: 'bundles/junit-report*.xml', allowEmptyResults: true
|
||||
}
|
||||
}
|
||||
}
|
||||
stage("Integration tests") {
|
||||
environment { TEST_SKIP_INTEGRATION_CLI = '1' }
|
||||
steps {
|
||||
sh '''
|
||||
docker run --rm -t --privileged \
|
||||
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
|
||||
--name docker-pr$BUILD_NUMBER \
|
||||
-e DOCKER_EXPERIMENTAL \
|
||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
-e TESTDEBUG \
|
||||
-e TEST_SKIP_INTEGRATION_CLI \
|
||||
-e TIMEOUT \
|
||||
-e VALIDATE_REPO=${GIT_URL} \
|
||||
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
|
||||
docker:${GIT_COMMIT} \
|
||||
hack/make.sh \
|
||||
dynbinary \
|
||||
test-integration
|
||||
'''
|
||||
}
|
||||
post {
|
||||
always {
|
||||
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
post {
|
||||
always {
|
||||
sh '''
|
||||
echo "Ensuring container killed."
|
||||
docker rm -vf docker-pr$BUILD_NUMBER || true
|
||||
'''
|
||||
|
||||
sh '''
|
||||
echo "Chowning /workspace to jenkins user"
|
||||
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
|
||||
'''
|
||||
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
|
||||
sh '''
|
||||
bundleName=ppc64le-integration
|
||||
echo "Creating ${bundleName}-bundles.tar.gz"
|
||||
# exclude overlay2 directories
|
||||
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
|
||||
'''
|
||||
|
||||
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
|
||||
}
|
||||
}
|
||||
cleanup {
|
||||
sh 'make clean'
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('ppc64le integration-cli') {
|
||||
when {
|
||||
beforeAgent true
|
||||
// Skip this stage on PRs unless the checkbox is selected
|
||||
anyOf {
|
||||
not { changeRequest() }
|
||||
expression { params.ppc64le }
|
||||
}
|
||||
}
|
||||
agent { label 'ppc64le-ubuntu-1604' }
|
||||
|
||||
stages {
|
||||
stage("Print info") {
|
||||
steps {
|
||||
sh 'docker version'
|
||||
sh 'docker info'
|
||||
sh '''
|
||||
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
|
||||
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
|
||||
&& bash ${WORKSPACE}/check-config.sh || true
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage("Build dev image") {
|
||||
steps {
|
||||
sh '''
|
||||
docker buildx build --load --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage("Integration-cli tests") {
|
||||
environment { TEST_SKIP_INTEGRATION = '1' }
|
||||
steps {
|
||||
sh '''
|
||||
docker run --rm -t --privileged \
|
||||
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
|
||||
--name docker-pr$BUILD_NUMBER \
|
||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
-e TEST_SKIP_INTEGRATION \
|
||||
-e TIMEOUT \
|
||||
-e VALIDATE_REPO=${GIT_URL} \
|
||||
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
|
||||
docker:${GIT_COMMIT} \
|
||||
hack/make.sh \
|
||||
dynbinary \
|
||||
test-integration
|
||||
'''
|
||||
}
|
||||
post {
|
||||
always {
|
||||
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
post {
|
||||
always {
|
||||
sh '''
|
||||
echo "Ensuring container killed."
|
||||
docker rm -vf docker-pr$BUILD_NUMBER || true
|
||||
'''
|
||||
|
||||
sh '''
|
||||
echo "Chowning /workspace to jenkins user"
|
||||
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
|
||||
'''
|
||||
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
|
||||
sh '''
|
||||
bundleName=ppc64le-integration-cli
|
||||
echo "Creating ${bundleName}-bundles.tar.gz"
|
||||
# exclude overlay2 directories
|
||||
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
|
||||
'''
|
||||
|
||||
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
|
||||
}
|
||||
}
|
||||
cleanup {
|
||||
sh 'make clean'
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('arm64') {
|
||||
when {
|
||||
beforeAgent true
|
||||
expression { params.arm64 }
|
||||
}
|
||||
agent { label 'arm64 && ubuntu-2004' }
|
||||
environment {
|
||||
TEST_SKIP_INTEGRATION_CLI = '1'
|
||||
}
|
||||
|
||||
stages {
|
||||
stage("Print info") {
|
||||
steps {
|
||||
sh 'docker version'
|
||||
sh 'docker info'
|
||||
sh '''
|
||||
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
|
||||
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
|
||||
&& bash ${WORKSPACE}/check-config.sh || true
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage("Build dev image") {
|
||||
steps {
|
||||
sh 'docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .'
|
||||
}
|
||||
}
|
||||
stage("Unit tests") {
|
||||
@@ -111,7 +510,6 @@ pipeline {
|
||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
-e TESTDEBUG \
|
||||
-e TEST_INTEGRATION_USE_SNAPSHOTTER \
|
||||
-e TEST_SKIP_INTEGRATION_CLI \
|
||||
-e TIMEOUT \
|
||||
-e VALIDATE_REPO=${GIT_URL} \
|
||||
|
||||
48
MAINTAINERS
48
MAINTAINERS
@@ -27,15 +27,12 @@
|
||||
"akihirosuda",
|
||||
"anusha",
|
||||
"coolljt0725",
|
||||
"corhere",
|
||||
"cpuguy83",
|
||||
"estesp",
|
||||
"johnstep",
|
||||
"justincormack",
|
||||
"kolyshkin",
|
||||
"mhbauer",
|
||||
"neersighted",
|
||||
"rumpl",
|
||||
"runcom",
|
||||
"samuelkarp",
|
||||
"stevvooe",
|
||||
@@ -46,7 +43,6 @@
|
||||
"unclejack",
|
||||
"vdemeester",
|
||||
"vieux",
|
||||
"vvoland",
|
||||
"yongtang"
|
||||
]
|
||||
|
||||
@@ -63,18 +59,17 @@
|
||||
# - close an issue or pull request when it's inappropriate or off-topic
|
||||
|
||||
people = [
|
||||
"akerouanton",
|
||||
"alexellis",
|
||||
"andrewhsu",
|
||||
"bsousaa",
|
||||
"crazy-max",
|
||||
"corhere",
|
||||
"fntlnz",
|
||||
"gianarb",
|
||||
"laurazard",
|
||||
"ndeloof",
|
||||
"neersighted",
|
||||
"olljanat",
|
||||
"programmerq",
|
||||
"ripcurld",
|
||||
"sam-thibault",
|
||||
"rumpl",
|
||||
"samwhited",
|
||||
"thajeztah"
|
||||
]
|
||||
@@ -282,11 +277,6 @@
|
||||
Email = "aaron.lehmann@docker.com"
|
||||
GitHub = "aaronlehmann"
|
||||
|
||||
[people.akerouanton]
|
||||
Name = "Albin Kerouanton"
|
||||
Email = "albinker@gmail.com"
|
||||
GitHub = "akerouanton"
|
||||
|
||||
[people.alexellis]
|
||||
Name = "Alex Ellis"
|
||||
Email = "alexellis2@gmail.com"
|
||||
@@ -316,11 +306,6 @@
|
||||
Name = "Anusha Ragunathan"
|
||||
Email = "anusha@docker.com"
|
||||
GitHub = "anusha-ragunathan"
|
||||
|
||||
[people.bsousaa]
|
||||
Name = "Bruno de Sousa"
|
||||
Email = "bruno.sousa@docker.com"
|
||||
GitHub = "bsousaa"
|
||||
|
||||
[people.calavera]
|
||||
Name = "David Calavera"
|
||||
@@ -342,11 +327,6 @@
|
||||
Email = "cpuguy83@gmail.com"
|
||||
GitHub = "cpuguy83"
|
||||
|
||||
[people.crazy-max]
|
||||
Name = "Kevin Alvarez"
|
||||
Email = "contact@crazymax.dev"
|
||||
GitHub = "crazy-max"
|
||||
|
||||
[people.crosbymichael]
|
||||
Name = "Michael Crosby"
|
||||
Email = "crosbymichael@gmail.com"
|
||||
@@ -417,11 +397,6 @@
|
||||
Email = "kolyshkin@gmail.com"
|
||||
GitHub = "kolyshkin"
|
||||
|
||||
[people.laurazard]
|
||||
Name = "Laura Brehm"
|
||||
Email = "laura.brehm@docker.com"
|
||||
GitHub = "laurazard"
|
||||
|
||||
[people.lk4d4]
|
||||
Name = "Alexander Morozov"
|
||||
Email = "lk4d4@docker.com"
|
||||
@@ -457,6 +432,11 @@
|
||||
Email = "mrjana@docker.com"
|
||||
GitHub = "mrjana"
|
||||
|
||||
[people.ndeloof]
|
||||
Name = "Nicolas De Loof"
|
||||
Email = "nicolas.deloof@gmail.com"
|
||||
GitHub = "ndeloof"
|
||||
|
||||
[people.neersighted]
|
||||
Name = "Bjorn Neergaard"
|
||||
Email = "bneergaard@mirantis.com"
|
||||
@@ -487,11 +467,6 @@
|
||||
Email = "runcom@redhat.com"
|
||||
GitHub = "runcom"
|
||||
|
||||
[people.sam-thibault]
|
||||
Name = "Sam Thibault"
|
||||
Email = "sam.thibault@docker.com"
|
||||
GitHub = "sam-thibault"
|
||||
|
||||
[people.samuelkarp]
|
||||
Name = "Samuel Karp"
|
||||
Email = "me@samuelkarp.com"
|
||||
@@ -561,11 +536,6 @@
|
||||
Name = "Vishnu Kannan"
|
||||
Email = "vishnuk@google.com"
|
||||
GitHub = "vishh"
|
||||
|
||||
[people.vvoland]
|
||||
Name = "Paweł Gronowski"
|
||||
Email = "pawel.gronowski@docker.com"
|
||||
GitHub = "vvoland"
|
||||
|
||||
[people.yongtang]
|
||||
Name = "Yong Tang"
|
||||
|
||||
55
Makefile
55
Makefile
@@ -4,10 +4,14 @@ DOCKER ?= docker
|
||||
BUILDX ?= $(DOCKER) buildx
|
||||
|
||||
# set the graph driver as the current graphdriver if not set
|
||||
DOCKER_GRAPHDRIVER := $(if $(DOCKER_GRAPHDRIVER),$(DOCKER_GRAPHDRIVER),$(shell docker info -f {{ .Driver }} 2>&1))
|
||||
DOCKER_GRAPHDRIVER := $(if $(DOCKER_GRAPHDRIVER),$(DOCKER_GRAPHDRIVER),$(shell docker info 2>&1 | grep "Storage Driver" | sed 's/.*: //'))
|
||||
export DOCKER_GRAPHDRIVER
|
||||
|
||||
DOCKER_GITCOMMIT := $(shell git rev-parse HEAD)
|
||||
# get OS/Arch of docker engine
|
||||
DOCKER_OSARCH := $(shell bash -c 'source hack/make/.detect-daemon-osarch && echo $${DOCKER_ENGINE_OSARCH}')
|
||||
DOCKERFILE := $(shell bash -c 'source hack/make/.detect-daemon-osarch && echo $${DOCKERFILE}')
|
||||
|
||||
DOCKER_GITCOMMIT := $(shell git rev-parse --short HEAD || echo unsupported)
|
||||
export DOCKER_GITCOMMIT
|
||||
|
||||
# allow overriding the repository and branch that validation scripts are running
|
||||
@@ -27,6 +31,7 @@ export VALIDATE_ORIGIN_BRANCH
|
||||
# make DOCKER_LDFLAGS="-X github.com/docker/docker/daemon/graphdriver.priority=overlay2,devicemapper" dynbinary
|
||||
#
|
||||
DOCKER_ENVS := \
|
||||
-e DOCKER_CROSSPLATFORMS \
|
||||
-e BUILD_APT_MIRROR \
|
||||
-e BUILDFLAGS \
|
||||
-e KEEPBUNDLE \
|
||||
@@ -37,10 +42,6 @@ DOCKER_ENVS := \
|
||||
-e DOCKER_BUILDKIT \
|
||||
-e DOCKER_BASH_COMPLETION_PATH \
|
||||
-e DOCKER_CLI_PATH \
|
||||
-e DOCKERCLI_VERSION \
|
||||
-e DOCKERCLI_REPOSITORY \
|
||||
-e DOCKERCLI_INTEGRATION_VERSION \
|
||||
-e DOCKERCLI_INTEGRATION_REPOSITORY \
|
||||
-e DOCKER_DEBUG \
|
||||
-e DOCKER_EXPERIMENTAL \
|
||||
-e DOCKER_GITCOMMIT \
|
||||
@@ -57,8 +58,6 @@ DOCKER_ENVS := \
|
||||
-e GITHUB_ACTIONS \
|
||||
-e TEST_FORCE_VALIDATE \
|
||||
-e TEST_INTEGRATION_DIR \
|
||||
-e TEST_INTEGRATION_USE_SNAPSHOTTER \
|
||||
-e TEST_INTEGRATION_FAIL_FAST \
|
||||
-e TEST_SKIP_INTEGRATION \
|
||||
-e TEST_SKIP_INTEGRATION_CLI \
|
||||
-e TESTCOVERAGE \
|
||||
@@ -137,18 +136,23 @@ endif
|
||||
DOCKER_RUN_DOCKER := $(DOCKER_FLAGS) "$(DOCKER_IMAGE)"
|
||||
|
||||
DOCKER_BUILD_ARGS += --build-arg=GO_VERSION
|
||||
DOCKER_BUILD_ARGS += --build-arg=APT_MIRROR
|
||||
DOCKER_BUILD_ARGS += --build-arg=DOCKERCLI_VERSION
|
||||
DOCKER_BUILD_ARGS += --build-arg=DOCKERCLI_REPOSITORY
|
||||
DOCKER_BUILD_ARGS += --build-arg=DOCKERCLI_INTEGRATION_VERSION
|
||||
DOCKER_BUILD_ARGS += --build-arg=DOCKERCLI_INTEGRATION_REPOSITORY
|
||||
ifdef DOCKER_SYSTEMD
|
||||
DOCKER_BUILD_ARGS += --build-arg=SYSTEMD=true
|
||||
endif
|
||||
|
||||
BUILD_OPTS := ${BUILD_APT_MIRROR} ${DOCKER_BUILD_ARGS} ${DOCKER_BUILD_OPTS}
|
||||
BUILD_OPTS := ${BUILD_APT_MIRROR} ${DOCKER_BUILD_ARGS} ${DOCKER_BUILD_OPTS} -f "$(DOCKERFILE)"
|
||||
BUILD_CMD := $(BUILDX) build
|
||||
BAKE_CMD := $(BUILDX) bake
|
||||
|
||||
# This is used for the legacy "build" target and anything still depending on it
|
||||
BUILD_CROSS =
|
||||
ifdef DOCKER_CROSS
|
||||
BUILD_CROSS = --build-arg CROSS=$(DOCKER_CROSS)
|
||||
endif
|
||||
ifdef DOCKER_CROSSPLATFORMS
|
||||
BUILD_CROSS = --build-arg CROSS=true
|
||||
endif
|
||||
|
||||
VERSION_AUTOGEN_ARGS = --build-arg VERSION --build-arg DOCKER_GITCOMMIT --build-arg PRODUCT --build-arg PLATFORM --build-arg DEFAULT_PRODUCT_LICENSE --build-arg PACKAGER_NAME
|
||||
|
||||
default: binary
|
||||
|
||||
@@ -156,13 +160,14 @@ all: build ## validate all checks, build linux binaries, run all tests,\ncross b
|
||||
$(DOCKER_RUN_DOCKER) bash -c 'hack/validate/default && hack/make.sh'
|
||||
|
||||
binary: bundles ## build statically linked linux binaries
|
||||
$(BAKE_CMD) binary
|
||||
$(BUILD_CMD) $(BUILD_OPTS) --output=bundles/ --target=$@ $(VERSION_AUTOGEN_ARGS) .
|
||||
|
||||
dynbinary: bundles ## build dynamically linked linux binaries
|
||||
$(BAKE_CMD) dynbinary
|
||||
$(BUILD_CMD) $(BUILD_OPTS) --output=bundles/ --target=$@ $(VERSION_AUTOGEN_ARGS) .
|
||||
|
||||
cross: bundles ## cross build the binaries
|
||||
$(BAKE_CMD) binary-cross
|
||||
cross: BUILD_OPTS += --build-arg CROSS=true --build-arg DOCKER_CROSSPLATFORMS
|
||||
cross: bundles ## cross build the binaries for darwin, freebsd and\nwindows
|
||||
$(BUILD_CMD) $(BUILD_OPTS) --output=bundles/ --target=$@ $(VERSION_AUTOGEN_ARGS) .
|
||||
|
||||
bundles:
|
||||
mkdir bundles
|
||||
@@ -185,18 +190,18 @@ run: build ## run the docker daemon in a container
|
||||
|
||||
.PHONY: build
|
||||
ifeq ($(BIND_DIR), .)
|
||||
build: shell_target := --target=dev-base
|
||||
else
|
||||
build: shell_target := --target=dev
|
||||
else
|
||||
build: shell_target := --target=final
|
||||
endif
|
||||
build: bundles
|
||||
$(BUILD_CMD) $(BUILD_OPTS) $(shell_target) --load -t "$(DOCKER_IMAGE)" .
|
||||
$(BUILD_CMD) $(BUILD_OPTS) $(shell_target) --load $(BUILD_CROSS) -t "$(DOCKER_IMAGE)" .
|
||||
|
||||
shell: build ## start a shell inside the build env
|
||||
$(DOCKER_RUN_DOCKER) bash
|
||||
|
||||
test: build test-unit ## run the unit, integration and docker-py tests
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary test-integration test-docker-py
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary cross test-integration test-docker-py
|
||||
|
||||
test-docker-py: build ## run the docker-py tests
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary test-docker-py
|
||||
@@ -223,8 +228,8 @@ validate: build ## validate DCO, Seccomp profile generation, gofmt,\n./pkg/ isol
|
||||
validate-%: build ## validate specific check
|
||||
$(DOCKER_RUN_DOCKER) hack/validate/$*
|
||||
|
||||
win: bundles ## cross build the binary for windows
|
||||
$(BAKE_CMD) --set *.platform=windows/amd64 binary
|
||||
win: build ## cross build the binary for windows
|
||||
$(DOCKER_RUN_DOCKER) DOCKER_CROSSPLATFORMS=windows/amd64 hack/make.sh cross
|
||||
|
||||
.PHONY: swagger-gen
|
||||
swagger-gen:
|
||||
|
||||
@@ -14,7 +14,7 @@ Moby is an open project guided by strong principles, aiming to be modular, flexi
|
||||
It is open to the community to help set its direction.
|
||||
|
||||
- Modular: the project includes lots of components that have well-defined functions and APIs that work together.
|
||||
- Batteries included but swappable: Moby includes enough components to build fully featured container systems, but its modular architecture ensures that most of the components can be swapped by different implementations.
|
||||
- Batteries included but swappable: Moby includes enough components to build fully featured container system, but its modular architecture ensures that most of the components can be swapped by different implementations.
|
||||
- Usable security: Moby provides secure defaults without compromising usability.
|
||||
- Developer focused: The APIs are intended to be functional and useful to build powerful tools.
|
||||
They are not necessarily intended as end user tools but as components aimed at developers.
|
||||
|
||||
@@ -3,7 +3,7 @@ package api // import "github.com/docker/docker/api"
|
||||
// Common constants for daemon and client.
|
||||
const (
|
||||
// DefaultVersion of Current REST API
|
||||
DefaultVersion = "1.43"
|
||||
DefaultVersion = "1.42"
|
||||
|
||||
// NoBaseImageSpecifier is the symbol used by the FROM
|
||||
// command to specify that no base image is to be used.
|
||||
|
||||
@@ -21,7 +21,7 @@ import (
|
||||
// ImageComponent provides an interface for working with images
|
||||
type ImageComponent interface {
|
||||
SquashImage(from string, to string) (string, error)
|
||||
TagImage(context.Context, image.ID, reference.Named) error
|
||||
TagImageWithReference(image.ID, reference.Named) error
|
||||
}
|
||||
|
||||
// Builder defines interface for running a build
|
||||
@@ -54,7 +54,7 @@ func (b *Backend) Build(ctx context.Context, config backend.BuildConfig) (string
|
||||
options := config.Options
|
||||
useBuildKit := options.Version == types.BuilderBuildKit
|
||||
|
||||
tags, err := sanitizeRepoAndTags(options.Tags)
|
||||
tagger, err := NewTagger(b.imageComponent, config.ProgressWriter.StdoutFormatter, options.Tags)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -92,8 +92,8 @@ func (b *Backend) Build(ctx context.Context, config backend.BuildConfig) (string
|
||||
stdout := config.ProgressWriter.StdoutFormatter
|
||||
fmt.Fprintf(stdout, "Successfully built %s\n", stringid.TruncateID(imageID))
|
||||
}
|
||||
if imageID != "" && !useBuildKit {
|
||||
err = tagImages(ctx, b.imageComponent, config.ProgressWriter.StdoutFormatter, image.ID(imageID), tags)
|
||||
if imageID != "" {
|
||||
err = tagger.TagImages(image.ID(imageID))
|
||||
}
|
||||
return imageID, err
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package build // import "github.com/docker/docker/api/server/backend/build"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
@@ -10,22 +9,47 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// tagImages creates image tags for the imageID.
|
||||
func tagImages(ctx context.Context, ic ImageComponent, stdout io.Writer, imageID image.ID, repoAndTags []reference.Named) error {
|
||||
for _, rt := range repoAndTags {
|
||||
if err := ic.TagImage(ctx, imageID, rt); err != nil {
|
||||
// Tagger is responsible for tagging an image created by a builder
|
||||
type Tagger struct {
|
||||
imageComponent ImageComponent
|
||||
stdout io.Writer
|
||||
repoAndTags []reference.Named
|
||||
}
|
||||
|
||||
// NewTagger returns a new Tagger for tagging the images of a build.
|
||||
// If any of the names are invalid tags an error is returned.
|
||||
func NewTagger(backend ImageComponent, stdout io.Writer, names []string) (*Tagger, error) {
|
||||
reposAndTags, err := sanitizeRepoAndTags(names)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Tagger{
|
||||
imageComponent: backend,
|
||||
stdout: stdout,
|
||||
repoAndTags: reposAndTags,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// TagImages creates image tags for the imageID
|
||||
func (bt *Tagger) TagImages(imageID image.ID) error {
|
||||
for _, rt := range bt.repoAndTags {
|
||||
if err := bt.imageComponent.TagImageWithReference(imageID, rt); err != nil {
|
||||
return err
|
||||
}
|
||||
_, _ = fmt.Fprintln(stdout, "Successfully tagged", reference.FamiliarString(rt))
|
||||
fmt.Fprintf(bt.stdout, "Successfully tagged %s\n", reference.FamiliarString(rt))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// sanitizeRepoAndTags parses the raw "t" parameter received from the client
|
||||
// to a slice of repoAndTag. It removes duplicates, and validates each name
|
||||
// to not contain a digest.
|
||||
func sanitizeRepoAndTags(names []string) (repoAndTags []reference.Named, err error) {
|
||||
uniqNames := map[string]struct{}{}
|
||||
// to a slice of repoAndTag.
|
||||
// It also validates each repoName and tag.
|
||||
func sanitizeRepoAndTags(names []string) ([]reference.Named, error) {
|
||||
var (
|
||||
repoAndTags []reference.Named
|
||||
// This map is used for deduplicating the "-t" parameter.
|
||||
uniqNames = make(map[string]struct{})
|
||||
)
|
||||
for _, repo := range names {
|
||||
if repo == "" {
|
||||
continue
|
||||
@@ -36,12 +60,14 @@ func sanitizeRepoAndTags(names []string) (repoAndTags []reference.Named, err err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if _, ok := ref.(reference.Digested); ok {
|
||||
if _, isCanonical := ref.(reference.Canonical); isCanonical {
|
||||
return nil, errors.New("build tag cannot contain a digest")
|
||||
}
|
||||
|
||||
ref = reference.TagNameOnly(ref)
|
||||
|
||||
nameWithTag := ref.String()
|
||||
|
||||
if _, exists := uniqNames[nameWithTag]; !exists {
|
||||
uniqNames[nameWithTag] = struct{}{}
|
||||
repoAndTags = append(repoAndTags, ref)
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
cerrdefs "github.com/containerd/containerd/errdefs"
|
||||
containerderrors "github.com/containerd/containerd/errdefs"
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -132,17 +132,17 @@ func statusCodeFromDistributionError(err error) int {
|
||||
// consumed directly (not through gRPC)
|
||||
func statusCodeFromContainerdError(err error) int {
|
||||
switch {
|
||||
case cerrdefs.IsInvalidArgument(err):
|
||||
case containerderrors.IsInvalidArgument(err):
|
||||
return http.StatusBadRequest
|
||||
case cerrdefs.IsNotFound(err):
|
||||
case containerderrors.IsNotFound(err):
|
||||
return http.StatusNotFound
|
||||
case cerrdefs.IsAlreadyExists(err):
|
||||
case containerderrors.IsAlreadyExists(err):
|
||||
return http.StatusConflict
|
||||
case cerrdefs.IsFailedPrecondition(err):
|
||||
case containerderrors.IsFailedPrecondition(err):
|
||||
return http.StatusPreconditionFailed
|
||||
case cerrdefs.IsUnavailable(err):
|
||||
case containerderrors.IsUnavailable(err):
|
||||
return http.StatusServiceUnavailable
|
||||
case cerrdefs.IsNotImplemented(err):
|
||||
case containerderrors.IsNotImplemented(err):
|
||||
return http.StatusNotImplemented
|
||||
default:
|
||||
return http.StatusInternalServerError
|
||||
|
||||
@@ -1,12 +1,9 @@
|
||||
package httputils // import "github.com/docker/docker/api/server/httputils"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/distribution/reference"
|
||||
)
|
||||
|
||||
// BoolValue transforms a form value in different formats into a boolean type.
|
||||
@@ -44,38 +41,6 @@ func Int64ValueOrDefault(r *http.Request, field string, def int64) (int64, error
|
||||
return def, nil
|
||||
}
|
||||
|
||||
// RepoTagReference parses form values "repo" and "tag" and returns a valid
|
||||
// reference with repository and tag.
|
||||
// If repo is empty, then a nil reference is returned.
|
||||
// If no tag is given, then the default "latest" tag is set.
|
||||
func RepoTagReference(repo, tag string) (reference.NamedTagged, error) {
|
||||
if repo == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
ref, err := reference.ParseNormalizedNamed(repo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if _, isDigested := ref.(reference.Digested); isDigested {
|
||||
return nil, fmt.Errorf("cannot import digest reference")
|
||||
}
|
||||
|
||||
if tag != "" {
|
||||
return reference.WithTag(ref, tag)
|
||||
}
|
||||
|
||||
withDefaultTag := reference.TagNameOnly(ref)
|
||||
|
||||
namedTagged, ok := withDefaultTag.(reference.NamedTagged)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected reference: %q", ref.String())
|
||||
}
|
||||
|
||||
return namedTagged, nil
|
||||
}
|
||||
|
||||
// ArchiveOptions stores archive information for different operations.
|
||||
type ArchiveOptions struct {
|
||||
Name string
|
||||
|
||||
@@ -33,7 +33,7 @@ func TestJsonContentType(t *testing.T) {
|
||||
|
||||
func TestReadJSON(t *testing.T) {
|
||||
t.Run("nil body", func(t *testing.T) {
|
||||
req, err := http.NewRequest(http.MethodPost, "https://example.com/some/path", nil)
|
||||
req, err := http.NewRequest("POST", "https://example.com/some/path", nil)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
@@ -45,7 +45,7 @@ func TestReadJSON(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("empty body", func(t *testing.T) {
|
||||
req, err := http.NewRequest(http.MethodPost, "https://example.com/some/path", strings.NewReader(""))
|
||||
req, err := http.NewRequest("POST", "https://example.com/some/path", strings.NewReader(""))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
@@ -60,7 +60,7 @@ func TestReadJSON(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("with valid request", func(t *testing.T) {
|
||||
req, err := http.NewRequest(http.MethodPost, "https://example.com/some/path", strings.NewReader(`{"SomeField":"some value"}`))
|
||||
req, err := http.NewRequest("POST", "https://example.com/some/path", strings.NewReader(`{"SomeField":"some value"}`))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
@@ -75,7 +75,7 @@ func TestReadJSON(t *testing.T) {
|
||||
}
|
||||
})
|
||||
t.Run("with whitespace", func(t *testing.T) {
|
||||
req, err := http.NewRequest(http.MethodPost, "https://example.com/some/path", strings.NewReader(`
|
||||
req, err := http.NewRequest("POST", "https://example.com/some/path", strings.NewReader(`
|
||||
|
||||
{"SomeField":"some value"}
|
||||
|
||||
@@ -95,7 +95,7 @@ func TestReadJSON(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("with extra content", func(t *testing.T) {
|
||||
req, err := http.NewRequest(http.MethodPost, "https://example.com/some/path", strings.NewReader(`{"SomeField":"some value"} and more content`))
|
||||
req, err := http.NewRequest("POST", "https://example.com/some/path", strings.NewReader(`{"SomeField":"some value"} and more content`))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
@@ -112,7 +112,7 @@ func TestReadJSON(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("invalid JSON", func(t *testing.T) {
|
||||
req, err := http.NewRequest(http.MethodPost, "https://example.com/some/path", strings.NewReader(`{invalid json`))
|
||||
req, err := http.NewRequest("POST", "https://example.com/some/path", strings.NewReader(`{invalid json`))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"net/http"
|
||||
|
||||
"github.com/docker/docker/api/types/registry"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@@ -31,7 +30,7 @@ func (c CORSMiddleware) WrapHandler(handler func(ctx context.Context, w http.Res
|
||||
|
||||
logrus.Debugf("CORS header is enabled and set to: %s", corsHeaders)
|
||||
w.Header().Add("Access-Control-Allow-Origin", corsHeaders)
|
||||
w.Header().Add("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept, "+registry.AuthHeader)
|
||||
w.Header().Add("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept, X-Registry-Auth")
|
||||
w.Header().Add("Access-Control-Allow-Methods", "HEAD, GET, POST, DELETE, PUT, OPTIONS")
|
||||
return handler(ctx, w, r, vars)
|
||||
}
|
||||
|
||||
@@ -19,8 +19,8 @@ import (
|
||||
"github.com/docker/docker/api/types/backend"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/registry"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/docker/docker/pkg/progress"
|
||||
"github.com/docker/docker/pkg/streamformatter"
|
||||
@@ -29,11 +29,13 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type invalidParam struct {
|
||||
error
|
||||
type invalidIsolationError string
|
||||
|
||||
func (e invalidIsolationError) Error() string {
|
||||
return fmt.Sprintf("Unsupported isolation: %q", string(e))
|
||||
}
|
||||
|
||||
func (e invalidParam) InvalidParameter() {}
|
||||
func (e invalidIsolationError) InvalidParameter() {}
|
||||
|
||||
func newImageBuildOptions(ctx context.Context, r *http.Request) (*types.ImageBuildOptions, error) {
|
||||
options := &types.ImageBuildOptions{
|
||||
@@ -62,8 +64,7 @@ func newImageBuildOptions(ctx context.Context, r *http.Request) (*types.ImageBui
|
||||
}
|
||||
|
||||
if runtime.GOOS != "windows" && options.SecurityOpt != nil {
|
||||
// SecurityOpt only supports "credentials-spec" on Windows, and not used on other platforms.
|
||||
return nil, invalidParam{errors.New("security options are not supported on " + runtime.GOOS)}
|
||||
return nil, errdefs.InvalidParameter(errors.New("The daemon on this platform does not support setting security options on build"))
|
||||
}
|
||||
|
||||
version := httputils.VersionFromContext(ctx)
|
||||
@@ -85,7 +86,7 @@ func newImageBuildOptions(ctx context.Context, r *http.Request) (*types.ImageBui
|
||||
if outputsJSON != "" {
|
||||
var outputs []types.ImageBuildOutput
|
||||
if err := json.Unmarshal([]byte(outputsJSON), &outputs); err != nil {
|
||||
return nil, invalidParam{errors.Wrap(err, "invalid outputs specified")}
|
||||
return nil, err
|
||||
}
|
||||
options.Outputs = outputs
|
||||
}
|
||||
@@ -102,14 +103,14 @@ func newImageBuildOptions(ctx context.Context, r *http.Request) (*types.ImageBui
|
||||
if i := r.FormValue("isolation"); i != "" {
|
||||
options.Isolation = container.Isolation(i)
|
||||
if !options.Isolation.IsValid() {
|
||||
return nil, invalidParam{errors.Errorf("unsupported isolation: %q", i)}
|
||||
return nil, invalidIsolationError(options.Isolation)
|
||||
}
|
||||
}
|
||||
|
||||
if ulimitsJSON := r.FormValue("ulimits"); ulimitsJSON != "" {
|
||||
var buildUlimits = []*units.Ulimit{}
|
||||
if err := json.Unmarshal([]byte(ulimitsJSON), &buildUlimits); err != nil {
|
||||
return nil, invalidParam{errors.Wrap(err, "error reading ulimit settings")}
|
||||
return nil, errors.Wrap(errdefs.InvalidParameter(err), "error reading ulimit settings")
|
||||
}
|
||||
options.Ulimits = buildUlimits
|
||||
}
|
||||
@@ -129,7 +130,7 @@ func newImageBuildOptions(ctx context.Context, r *http.Request) (*types.ImageBui
|
||||
if buildArgsJSON := r.FormValue("buildargs"); buildArgsJSON != "" {
|
||||
var buildArgs = map[string]*string{}
|
||||
if err := json.Unmarshal([]byte(buildArgsJSON), &buildArgs); err != nil {
|
||||
return nil, invalidParam{errors.Wrap(err, "error reading build args")}
|
||||
return nil, errors.Wrap(errdefs.InvalidParameter(err), "error reading build args")
|
||||
}
|
||||
options.BuildArgs = buildArgs
|
||||
}
|
||||
@@ -137,7 +138,7 @@ func newImageBuildOptions(ctx context.Context, r *http.Request) (*types.ImageBui
|
||||
if labelsJSON := r.FormValue("labels"); labelsJSON != "" {
|
||||
var labels = map[string]string{}
|
||||
if err := json.Unmarshal([]byte(labelsJSON), &labels); err != nil {
|
||||
return nil, invalidParam{errors.Wrap(err, "error reading labels")}
|
||||
return nil, errors.Wrap(errdefs.InvalidParameter(err), "error reading labels")
|
||||
}
|
||||
options.Labels = labels
|
||||
}
|
||||
@@ -145,7 +146,7 @@ func newImageBuildOptions(ctx context.Context, r *http.Request) (*types.ImageBui
|
||||
if cacheFromJSON := r.FormValue("cachefrom"); cacheFromJSON != "" {
|
||||
var cacheFrom = []string{}
|
||||
if err := json.Unmarshal([]byte(cacheFromJSON), &cacheFrom); err != nil {
|
||||
return nil, invalidParam{errors.Wrap(err, "error reading cache-from")}
|
||||
return nil, err
|
||||
}
|
||||
options.CacheFrom = cacheFrom
|
||||
}
|
||||
@@ -168,7 +169,7 @@ func parseVersion(s string) (types.BuilderVersion, error) {
|
||||
case types.BuilderBuildKit:
|
||||
return types.BuilderBuildKit, nil
|
||||
default:
|
||||
return "", invalidParam{errors.Errorf("invalid version %q", s)}
|
||||
return "", errors.Errorf("invalid version %q", s)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -178,7 +179,7 @@ func (br *buildRouter) postPrune(ctx context.Context, w http.ResponseWriter, r *
|
||||
}
|
||||
fltrs, err := filters.FromJSON(r.Form.Get("filters"))
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "could not parse filters")
|
||||
}
|
||||
ksfv := r.FormValue("keep-storage")
|
||||
if ksfv == "" {
|
||||
@@ -186,7 +187,7 @@ func (br *buildRouter) postPrune(ctx context.Context, w http.ResponseWriter, r *
|
||||
}
|
||||
ks, err := strconv.Atoi(ksfv)
|
||||
if err != nil {
|
||||
return invalidParam{errors.Wrapf(err, "keep-storage is in bytes and expects an integer, got %v", ksfv)}
|
||||
return errors.Wrapf(err, "keep-storage is in bytes and expects an integer, got %v", ksfv)
|
||||
}
|
||||
|
||||
opts := types.BuildCachePruneOptions{
|
||||
@@ -207,7 +208,7 @@ func (br *buildRouter) postCancel(ctx context.Context, w http.ResponseWriter, r
|
||||
|
||||
id := r.FormValue("id")
|
||||
if id == "" {
|
||||
return invalidParam{errors.New("build ID not provided")}
|
||||
return errors.Errorf("build ID not provided")
|
||||
}
|
||||
|
||||
return br.backend.Cancel(ctx, id)
|
||||
@@ -260,7 +261,7 @@ func (br *buildRouter) postBuild(ctx context.Context, w http.ResponseWriter, r *
|
||||
buildOptions.AuthConfigs = getAuthConfigs(r.Header)
|
||||
|
||||
if buildOptions.Squash && !br.daemon.HasExperimental() {
|
||||
return invalidParam{errors.New("squash is only supported with experimental mode")}
|
||||
return errdefs.InvalidParameter(errors.New("squash is only supported with experimental mode"))
|
||||
}
|
||||
|
||||
out := io.Writer(output)
|
||||
@@ -294,8 +295,8 @@ func (br *buildRouter) postBuild(ctx context.Context, w http.ResponseWriter, r *
|
||||
return nil
|
||||
}
|
||||
|
||||
func getAuthConfigs(header http.Header) map[string]registry.AuthConfig {
|
||||
authConfigs := map[string]registry.AuthConfig{}
|
||||
func getAuthConfigs(header http.Header) map[string]types.AuthConfig {
|
||||
authConfigs := map[string]types.AuthConfig{}
|
||||
authConfigsEncoded := header.Get("X-Registry-Config")
|
||||
|
||||
if authConfigsEncoded == "" {
|
||||
|
||||
@@ -25,21 +25,21 @@ type execBackend interface {
|
||||
type copyBackend interface {
|
||||
ContainerArchivePath(name string, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error)
|
||||
ContainerCopy(name string, res string) (io.ReadCloser, error)
|
||||
ContainerExport(ctx context.Context, name string, out io.Writer) error
|
||||
ContainerExport(name string, out io.Writer) error
|
||||
ContainerExtractToDir(name, path string, copyUIDGID, noOverwriteDirNonDir bool, content io.Reader) error
|
||||
ContainerStatPath(name string, path string) (stat *types.ContainerPathStat, err error)
|
||||
}
|
||||
|
||||
// stateBackend includes functions to implement to provide container state lifecycle functionality.
|
||||
type stateBackend interface {
|
||||
ContainerCreate(ctx context.Context, config types.ContainerCreateConfig) (container.CreateResponse, error)
|
||||
ContainerCreate(config types.ContainerCreateConfig) (container.CreateResponse, error)
|
||||
ContainerKill(name string, signal string) error
|
||||
ContainerPause(name string) error
|
||||
ContainerRename(oldName, newName string) error
|
||||
ContainerResize(name string, height, width int) error
|
||||
ContainerRestart(ctx context.Context, name string, options container.StopOptions) error
|
||||
ContainerRm(name string, config *types.ContainerRmConfig) error
|
||||
ContainerStart(ctx context.Context, name string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error
|
||||
ContainerStart(name string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error
|
||||
ContainerStop(ctx context.Context, name string, options container.StopOptions) error
|
||||
ContainerUnpause(name string) error
|
||||
ContainerUpdate(name string, hostConfig *container.HostConfig) (container.ContainerUpdateOKBody, error)
|
||||
@@ -48,13 +48,13 @@ type stateBackend interface {
|
||||
|
||||
// monitorBackend includes functions to implement to provide containers monitoring functionality.
|
||||
type monitorBackend interface {
|
||||
ContainerChanges(ctx context.Context, name string) ([]archive.Change, error)
|
||||
ContainerInspect(ctx context.Context, name string, size bool, version string) (interface{}, error)
|
||||
ContainerChanges(name string) ([]archive.Change, error)
|
||||
ContainerInspect(name string, size bool, version string) (interface{}, error)
|
||||
ContainerLogs(ctx context.Context, name string, config *types.ContainerLogsOptions) (msgs <-chan *backend.LogMessage, tty bool, err error)
|
||||
ContainerStats(ctx context.Context, name string, config *backend.ContainerStatsConfig) error
|
||||
ContainerTop(name string, psArgs string) (*container.ContainerTopOKBody, error)
|
||||
|
||||
Containers(ctx context.Context, config *types.ContainerListOptions) ([]*types.Container, error)
|
||||
Containers(config *types.ContainerListOptions) ([]*types.Container, error)
|
||||
}
|
||||
|
||||
// attachBackend includes function to implement to provide container attaching functionality.
|
||||
@@ -68,7 +68,7 @@ type systemBackend interface {
|
||||
}
|
||||
|
||||
type commitBackend interface {
|
||||
CreateImageFromContainer(ctx context.Context, name string, config *backend.CreateImageConfig) (imageID string, err error)
|
||||
CreateImageFromContainer(name string, config *backend.CreateImageConfig) (imageID string, err error)
|
||||
}
|
||||
|
||||
// Backend is all the methods that need to be implemented to provide container specific functionality.
|
||||
|
||||
@@ -21,7 +21,7 @@ import (
|
||||
containerpkg "github.com/docker/docker/container"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/net/websocket"
|
||||
@@ -44,23 +44,21 @@ func (s *containerRouter) postCommit(ctx context.Context, w http.ResponseWriter,
|
||||
}
|
||||
|
||||
config, _, _, err := s.decoder.DecodeConfig(r.Body)
|
||||
if err != nil && !errors.Is(err, io.EOF) { // Do not fail if body is empty.
|
||||
if err != nil && err != io.EOF { // Do not fail if body is empty.
|
||||
return err
|
||||
}
|
||||
|
||||
ref, err := httputils.RepoTagReference(r.Form.Get("repo"), r.Form.Get("tag"))
|
||||
if err != nil {
|
||||
return errdefs.InvalidParameter(err)
|
||||
}
|
||||
|
||||
imgID, err := s.backend.CreateImageFromContainer(ctx, r.Form.Get("container"), &backend.CreateImageConfig{
|
||||
commitCfg := &backend.CreateImageConfig{
|
||||
Pause: pause,
|
||||
Tag: ref,
|
||||
Repo: r.Form.Get("repo"),
|
||||
Tag: r.Form.Get("tag"),
|
||||
Author: r.Form.Get("author"),
|
||||
Comment: r.Form.Get("comment"),
|
||||
Config: config,
|
||||
Changes: r.Form["changes"],
|
||||
})
|
||||
}
|
||||
|
||||
imgID, err := s.backend.CreateImageFromContainer(r.Form.Get("container"), commitCfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -93,7 +91,7 @@ func (s *containerRouter) getContainersJSON(ctx context.Context, w http.Response
|
||||
config.Limit = limit
|
||||
}
|
||||
|
||||
containers, err := s.backend.Containers(ctx, config)
|
||||
containers, err := s.backend.Containers(config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -172,7 +170,7 @@ func (s *containerRouter) getContainersLogs(ctx context.Context, w http.Response
|
||||
}
|
||||
|
||||
func (s *containerRouter) getContainersExport(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
return s.backend.ContainerExport(ctx, vars["name"], w)
|
||||
return s.backend.ContainerExport(vars["name"], w)
|
||||
}
|
||||
|
||||
type bodyOnStartError struct{}
|
||||
@@ -216,7 +214,7 @@ func (s *containerRouter) postContainersStart(ctx context.Context, w http.Respon
|
||||
|
||||
checkpoint := r.Form.Get("checkpoint")
|
||||
checkpointDir := r.Form.Get("checkpoint-dir")
|
||||
if err := s.backend.ContainerStart(ctx, vars["name"], hostConfig, checkpoint, checkpointDir); err != nil {
|
||||
if err := s.backend.ContainerStart(vars["name"], hostConfig, checkpoint, checkpointDir); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -399,7 +397,7 @@ func (s *containerRouter) postContainersWait(ctx context.Context, w http.Respons
|
||||
}
|
||||
|
||||
func (s *containerRouter) getContainersChanges(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
changes, err := s.backend.ContainerChanges(ctx, vars["name"])
|
||||
changes, err := s.backend.ContainerChanges(vars["name"])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -486,9 +484,6 @@ func (s *containerRouter) postContainersCreate(ctx context.Context, w http.Respo
|
||||
|
||||
config, hostConfig, networkingConfig, err := s.decoder.DecodeConfig(r.Body)
|
||||
if err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
return errdefs.InvalidParameter(errors.New("invalid JSON: got EOF while reading request body"))
|
||||
}
|
||||
return err
|
||||
}
|
||||
version := httputils.VersionFromContext(ctx)
|
||||
@@ -564,12 +559,7 @@ func (s *containerRouter) postContainersCreate(ctx context.Context, w http.Respo
|
||||
hostConfig.ConsoleSize = [2]uint{0, 0}
|
||||
}
|
||||
|
||||
if hostConfig != nil && versions.LessThan(version, "1.43") {
|
||||
// Ignore Annotations because it was added in API v1.43.
|
||||
hostConfig.Annotations = nil
|
||||
}
|
||||
|
||||
var platform *ocispec.Platform
|
||||
var platform *specs.Platform
|
||||
if versions.GreaterThanOrEqualTo(version, "1.41") {
|
||||
if v := r.Form.Get("platform"); v != "" {
|
||||
p, err := platforms.Parse(v)
|
||||
@@ -588,7 +578,7 @@ func (s *containerRouter) postContainersCreate(ctx context.Context, w http.Respo
|
||||
hostConfig.PidsLimit = nil
|
||||
}
|
||||
|
||||
ccr, err := s.backend.ContainerCreate(ctx, types.ContainerCreateConfig{
|
||||
ccr, err := s.backend.ContainerCreate(types.ContainerCreateConfig{
|
||||
Name: name,
|
||||
Config: config,
|
||||
HostConfig: hostConfig,
|
||||
|
||||
@@ -12,7 +12,7 @@ func (s *containerRouter) getContainersByName(ctx context.Context, w http.Respon
|
||||
displaySize := httputils.BoolValue(r, "size")
|
||||
|
||||
version := httputils.VersionFromContext(ctx)
|
||||
json, err := s.backend.ContainerInspect(ctx, vars["name"], displaySize, version)
|
||||
json, err := s.backend.ContainerInspect(vars["name"], displaySize, version)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -5,11 +5,11 @@ import (
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/docker/api/types/registry"
|
||||
"github.com/docker/docker/api/types"
|
||||
)
|
||||
|
||||
// Backend is all the methods that need to be implemented
|
||||
// to provide image specific functionality.
|
||||
type Backend interface {
|
||||
GetRepository(context.Context, reference.Named, *registry.AuthConfig) (distribution.Repository, error)
|
||||
GetRepository(context.Context, reference.Named, *types.AuthConfig) (distribution.Repository, error)
|
||||
}
|
||||
|
||||
@@ -2,17 +2,20 @@ package distribution // import "github.com/docker/docker/api/server/router/distr
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/distribution/manifest/manifestlist"
|
||||
"github.com/docker/distribution/manifest/schema1"
|
||||
"github.com/docker/distribution/manifest/schema2"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/docker/api/server/httputils"
|
||||
"github.com/docker/docker/api/types/registry"
|
||||
"github.com/docker/docker/api/types"
|
||||
registrytypes "github.com/docker/docker/api/types/registry"
|
||||
"github.com/docker/docker/errdefs"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@@ -23,6 +26,21 @@ func (s *distributionRouter) getDistributionInfo(ctx context.Context, w http.Res
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
var (
|
||||
config = &types.AuthConfig{}
|
||||
authEncoded = r.Header.Get("X-Registry-Auth")
|
||||
distributionInspect registrytypes.DistributionInspect
|
||||
)
|
||||
|
||||
if authEncoded != "" {
|
||||
authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded))
|
||||
if err := json.NewDecoder(authJSON).Decode(&config); err != nil {
|
||||
// for a search it is not an error if no auth was given
|
||||
// to increase compatibility with the existing api it is defaulting to be empty
|
||||
config = &types.AuthConfig{}
|
||||
}
|
||||
}
|
||||
|
||||
image := vars["name"]
|
||||
|
||||
// TODO why is reference.ParseAnyReference() / reference.ParseNormalizedNamed() not using the reference.ErrTagInvalidFormat (and so on) errors?
|
||||
@@ -39,16 +57,12 @@ func (s *distributionRouter) getDistributionInfo(ctx context.Context, w http.Res
|
||||
return errdefs.InvalidParameter(errors.Errorf("unknown image reference format: %s", image))
|
||||
}
|
||||
|
||||
// For a search it is not an error if no auth was given. Ignore invalid
|
||||
// AuthConfig to increase compatibility with the existing API.
|
||||
authConfig, _ := registry.DecodeAuthConfig(r.Header.Get(registry.AuthHeader))
|
||||
distrepo, err := s.backend.GetRepository(ctx, namedRef, authConfig)
|
||||
distrepo, err := s.backend.GetRepository(ctx, namedRef, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
blobsrvc := distrepo.Blobs(ctx)
|
||||
|
||||
var distributionInspect registry.DistributionInspect
|
||||
if canonicalRef, ok := namedRef.(reference.Canonical); !ok {
|
||||
namedRef = reference.TagNameOnly(namedRef)
|
||||
|
||||
@@ -61,7 +75,7 @@ func (s *distributionRouter) getDistributionInfo(ctx context.Context, w http.Res
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
distributionInspect.Descriptor = ocispec.Descriptor{
|
||||
distributionInspect.Descriptor = v1.Descriptor{
|
||||
MediaType: descriptor.MediaType,
|
||||
Digest: descriptor.Digest,
|
||||
Size: descriptor.Size,
|
||||
@@ -107,7 +121,7 @@ func (s *distributionRouter) getDistributionInfo(ctx context.Context, w http.Res
|
||||
switch mnfstObj := mnfst.(type) {
|
||||
case *manifestlist.DeserializedManifestList:
|
||||
for _, m := range mnfstObj.Manifests {
|
||||
distributionInspect.Platforms = append(distributionInspect.Platforms, ocispec.Platform{
|
||||
distributionInspect.Platforms = append(distributionInspect.Platforms, v1.Platform{
|
||||
Architecture: m.Platform.Architecture,
|
||||
OS: m.Platform.OS,
|
||||
OSVersion: m.Platform.OSVersion,
|
||||
@@ -117,7 +131,7 @@ func (s *distributionRouter) getDistributionInfo(ctx context.Context, w http.Res
|
||||
}
|
||||
case *schema2.DeserializedManifest:
|
||||
configJSON, err := blobsrvc.Get(ctx, mnfstObj.Config.Digest)
|
||||
var platform ocispec.Platform
|
||||
var platform v1.Platform
|
||||
if err == nil {
|
||||
err := json.Unmarshal(configJSON, &platform)
|
||||
if err == nil && (platform.OS != "" || platform.Architecture != "") {
|
||||
@@ -125,7 +139,7 @@ func (s *distributionRouter) getDistributionInfo(ctx context.Context, w http.Res
|
||||
}
|
||||
}
|
||||
case *schema1.SignedManifest:
|
||||
platform := ocispec.Platform{
|
||||
platform := v1.Platform{
|
||||
Architecture: mnfstObj.Architecture,
|
||||
OS: "linux",
|
||||
}
|
||||
|
||||
@@ -4,13 +4,12 @@ import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/image"
|
||||
"github.com/docker/docker/api/types/registry"
|
||||
dockerimage "github.com/docker/docker/image"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// Backend is all the methods that need to be implemented
|
||||
@@ -22,25 +21,22 @@ type Backend interface {
|
||||
}
|
||||
|
||||
type imageBackend interface {
|
||||
ImageDelete(ctx context.Context, imageRef string, force, prune bool) ([]types.ImageDeleteResponseItem, error)
|
||||
ImageHistory(ctx context.Context, imageName string) ([]*image.HistoryResponseItem, error)
|
||||
ImageDelete(imageRef string, force, prune bool) ([]types.ImageDeleteResponseItem, error)
|
||||
ImageHistory(imageName string) ([]*image.HistoryResponseItem, error)
|
||||
Images(ctx context.Context, opts types.ImageListOptions) ([]*types.ImageSummary, error)
|
||||
GetImage(ctx context.Context, refOrID string, options image.GetImageOpts) (*dockerimage.Image, error)
|
||||
TagImage(ctx context.Context, id dockerimage.ID, newRef reference.Named) error
|
||||
GetImage(refOrID string, platform *specs.Platform) (retImg *dockerimage.Image, retErr error)
|
||||
TagImage(imageName, repository, tag string) (string, error)
|
||||
ImagesPrune(ctx context.Context, pruneFilters filters.Args) (*types.ImagesPruneReport, error)
|
||||
}
|
||||
|
||||
type importExportBackend interface {
|
||||
LoadImage(ctx context.Context, inTar io.ReadCloser, outStream io.Writer, quiet bool) error
|
||||
ImportImage(ctx context.Context, ref reference.Named, platform *ocispec.Platform, msg string, layerReader io.Reader, changes []string) (dockerimage.ID, error)
|
||||
ExportImage(ctx context.Context, names []string, outStream io.Writer) error
|
||||
LoadImage(inTar io.ReadCloser, outStream io.Writer, quiet bool) error
|
||||
ImportImage(src string, repository string, platform *specs.Platform, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, changes []string) error
|
||||
ExportImage(names []string, outStream io.Writer) error
|
||||
}
|
||||
|
||||
type registryBackend interface {
|
||||
PullImage(ctx context.Context, image, tag string, platform *ocispec.Platform, metaHeaders map[string][]string, authConfig *registry.AuthConfig, outStream io.Writer) error
|
||||
PushImage(ctx context.Context, ref reference.Named, metaHeaders map[string][]string, authConfig *registry.AuthConfig, outStream io.Writer) error
|
||||
}
|
||||
|
||||
type Searcher interface {
|
||||
Search(ctx context.Context, searchFilters filters.Args, term string, limit int, authConfig *registry.AuthConfig, headers map[string][]string) ([]registry.SearchResult, error)
|
||||
PullImage(ctx context.Context, image, tag string, platform *specs.Platform, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error
|
||||
PushImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error
|
||||
SearchRegistryForImages(ctx context.Context, searchFilters filters.Args, term string, limit int, authConfig *types.AuthConfig, metaHeaders map[string][]string) (*registry.SearchResults, error)
|
||||
}
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
// imageRouter is a router to talk with the image controller
|
||||
type imageRouter struct {
|
||||
backend Backend
|
||||
searcher Searcher
|
||||
referenceBackend reference.Store
|
||||
imageStore image.Store
|
||||
layerStore layer.Store
|
||||
@@ -18,40 +17,39 @@ type imageRouter struct {
|
||||
}
|
||||
|
||||
// NewRouter initializes a new image router
|
||||
func NewRouter(backend Backend, searcher Searcher, referenceBackend reference.Store, imageStore image.Store, layerStore layer.Store) router.Router {
|
||||
ir := &imageRouter{
|
||||
func NewRouter(backend Backend, referenceBackend reference.Store, imageStore image.Store, layerStore layer.Store) router.Router {
|
||||
r := &imageRouter{
|
||||
backend: backend,
|
||||
searcher: searcher,
|
||||
referenceBackend: referenceBackend,
|
||||
imageStore: imageStore,
|
||||
layerStore: layerStore,
|
||||
}
|
||||
ir.initRoutes()
|
||||
return ir
|
||||
r.initRoutes()
|
||||
return r
|
||||
}
|
||||
|
||||
// Routes returns the available routes to the image controller
|
||||
func (ir *imageRouter) Routes() []router.Route {
|
||||
return ir.routes
|
||||
func (r *imageRouter) Routes() []router.Route {
|
||||
return r.routes
|
||||
}
|
||||
|
||||
// initRoutes initializes the routes in the image router
|
||||
func (ir *imageRouter) initRoutes() {
|
||||
ir.routes = []router.Route{
|
||||
func (r *imageRouter) initRoutes() {
|
||||
r.routes = []router.Route{
|
||||
// GET
|
||||
router.NewGetRoute("/images/json", ir.getImagesJSON),
|
||||
router.NewGetRoute("/images/search", ir.getImagesSearch),
|
||||
router.NewGetRoute("/images/get", ir.getImagesGet),
|
||||
router.NewGetRoute("/images/{name:.*}/get", ir.getImagesGet),
|
||||
router.NewGetRoute("/images/{name:.*}/history", ir.getImagesHistory),
|
||||
router.NewGetRoute("/images/{name:.*}/json", ir.getImagesByName),
|
||||
router.NewGetRoute("/images/json", r.getImagesJSON),
|
||||
router.NewGetRoute("/images/search", r.getImagesSearch),
|
||||
router.NewGetRoute("/images/get", r.getImagesGet),
|
||||
router.NewGetRoute("/images/{name:.*}/get", r.getImagesGet),
|
||||
router.NewGetRoute("/images/{name:.*}/history", r.getImagesHistory),
|
||||
router.NewGetRoute("/images/{name:.*}/json", r.getImagesByName),
|
||||
// POST
|
||||
router.NewPostRoute("/images/load", ir.postImagesLoad),
|
||||
router.NewPostRoute("/images/create", ir.postImagesCreate),
|
||||
router.NewPostRoute("/images/{name:.*}/push", ir.postImagesPush),
|
||||
router.NewPostRoute("/images/{name:.*}/tag", ir.postImagesTag),
|
||||
router.NewPostRoute("/images/prune", ir.postImagesPrune),
|
||||
router.NewPostRoute("/images/load", r.postImagesLoad),
|
||||
router.NewPostRoute("/images/create", r.postImagesCreate),
|
||||
router.NewPostRoute("/images/{name:.*}/push", r.postImagesPush),
|
||||
router.NewPostRoute("/images/{name:.*}/tag", r.postImagesTag),
|
||||
router.NewPostRoute("/images/prune", r.postImagesPrune),
|
||||
// DELETE
|
||||
router.NewDeleteRoute("/images/{name:.*}", ir.deleteImages),
|
||||
router.NewDeleteRoute("/images/{name:.*}", r.deleteImages),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,9 +2,9 @@ package image // import "github.com/docker/docker/api/server/router/image"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -14,34 +14,31 @@ import (
|
||||
"github.com/docker/docker/api/server/httputils"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
opts "github.com/docker/docker/api/types/image"
|
||||
"github.com/docker/docker/api/types/registry"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
"github.com/docker/docker/builder/remotecontext"
|
||||
"github.com/docker/docker/dockerversion"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/layer"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/docker/docker/pkg/progress"
|
||||
"github.com/docker/docker/pkg/streamformatter"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Creates an image from Pull or from Import
|
||||
func (ir *imageRouter) postImagesCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
func (s *imageRouter) postImagesCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
|
||||
if err := httputils.ParseForm(r); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var (
|
||||
img = r.Form.Get("fromImage")
|
||||
image = r.Form.Get("fromImage")
|
||||
repo = r.Form.Get("repo")
|
||||
tag = r.Form.Get("tag")
|
||||
comment = r.Form.Get("message")
|
||||
message = r.Form.Get("message")
|
||||
progressErr error
|
||||
output = ioutils.NewWriteFlusher(w)
|
||||
platform *ocispec.Platform
|
||||
platform *specs.Platform
|
||||
)
|
||||
defer output.Close()
|
||||
|
||||
@@ -58,7 +55,7 @@ func (ir *imageRouter) postImagesCreate(ctx context.Context, w http.ResponseWrit
|
||||
}
|
||||
}
|
||||
|
||||
if img != "" { // pull
|
||||
if image != "" { // pull
|
||||
metaHeaders := map[string][]string{}
|
||||
for k, v := range r.Header {
|
||||
if strings.HasPrefix(k, "X-Meta-") {
|
||||
@@ -66,51 +63,20 @@ func (ir *imageRouter) postImagesCreate(ctx context.Context, w http.ResponseWrit
|
||||
}
|
||||
}
|
||||
|
||||
// For a pull it is not an error if no auth was given. Ignore invalid
|
||||
// AuthConfig to increase compatibility with the existing API.
|
||||
authConfig, _ := registry.DecodeAuthConfig(r.Header.Get(registry.AuthHeader))
|
||||
progressErr = ir.backend.PullImage(ctx, img, tag, platform, metaHeaders, authConfig, output)
|
||||
authEncoded := r.Header.Get("X-Registry-Auth")
|
||||
authConfig := &types.AuthConfig{}
|
||||
if authEncoded != "" {
|
||||
authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded))
|
||||
if err := json.NewDecoder(authJSON).Decode(authConfig); err != nil {
|
||||
// for a pull it is not an error if no auth was given
|
||||
// to increase compatibility with the existing api it is defaulting to be empty
|
||||
authConfig = &types.AuthConfig{}
|
||||
}
|
||||
}
|
||||
progressErr = s.backend.PullImage(ctx, image, tag, platform, metaHeaders, authConfig, output)
|
||||
} else { // import
|
||||
src := r.Form.Get("fromSrc")
|
||||
|
||||
tagRef, err := httputils.RepoTagReference(repo, tag)
|
||||
if err != nil {
|
||||
return errdefs.InvalidParameter(err)
|
||||
}
|
||||
|
||||
if len(comment) == 0 {
|
||||
comment = "Imported from " + src
|
||||
}
|
||||
|
||||
var layerReader io.ReadCloser
|
||||
defer r.Body.Close()
|
||||
if src == "-" {
|
||||
layerReader = r.Body
|
||||
} else {
|
||||
if len(strings.Split(src, "://")) == 1 {
|
||||
src = "http://" + src
|
||||
}
|
||||
u, err := url.Parse(src)
|
||||
if err != nil {
|
||||
return errdefs.InvalidParameter(err)
|
||||
}
|
||||
|
||||
resp, err := remotecontext.GetWithStatusError(u.String())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
output.Write(streamformatter.FormatStatus("", "Downloading from %s", u))
|
||||
progressOutput := streamformatter.NewJSONProgressOutput(output, true)
|
||||
layerReader = progress.NewProgressReader(resp.Body, progressOutput, resp.ContentLength, "", "Importing")
|
||||
defer layerReader.Close()
|
||||
}
|
||||
|
||||
var id image.ID
|
||||
id, progressErr = ir.backend.ImportImage(ctx, tagRef, platform, comment, layerReader, r.Form["changes"])
|
||||
|
||||
if progressErr == nil {
|
||||
output.Write(streamformatter.FormatStatus("", id.String()))
|
||||
}
|
||||
progressErr = s.backend.ImportImage(src, repo, platform, tag, message, r.Body, output, r.Form["changes"])
|
||||
}
|
||||
if progressErr != nil {
|
||||
if !output.Flushed() {
|
||||
@@ -122,7 +88,7 @@ func (ir *imageRouter) postImagesCreate(ctx context.Context, w http.ResponseWrit
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ir *imageRouter) postImagesPush(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
func (s *imageRouter) postImagesPush(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
metaHeaders := map[string][]string{}
|
||||
for k, v := range r.Header {
|
||||
if strings.HasPrefix(k, "X-Meta-") {
|
||||
@@ -132,47 +98,32 @@ func (ir *imageRouter) postImagesPush(ctx context.Context, w http.ResponseWriter
|
||||
if err := httputils.ParseForm(r); err != nil {
|
||||
return err
|
||||
}
|
||||
authConfig := &types.AuthConfig{}
|
||||
|
||||
var authConfig *registry.AuthConfig
|
||||
if authEncoded := r.Header.Get(registry.AuthHeader); authEncoded != "" {
|
||||
// the new format is to handle the authConfig as a header. Ignore invalid
|
||||
// AuthConfig to increase compatibility with the existing API.
|
||||
authConfig, _ = registry.DecodeAuthConfig(authEncoded)
|
||||
authEncoded := r.Header.Get("X-Registry-Auth")
|
||||
if authEncoded != "" {
|
||||
// the new format is to handle the authConfig as a header
|
||||
authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded))
|
||||
if err := json.NewDecoder(authJSON).Decode(authConfig); err != nil {
|
||||
// to increase compatibility to existing api it is defaulting to be empty
|
||||
authConfig = &types.AuthConfig{}
|
||||
}
|
||||
} else {
|
||||
// the old format is supported for compatibility if there was no authConfig header
|
||||
var err error
|
||||
authConfig, err = registry.DecodeAuthConfigBody(r.Body)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "bad parameters and missing X-Registry-Auth")
|
||||
if err := json.NewDecoder(r.Body).Decode(authConfig); err != nil {
|
||||
return errors.Wrap(errdefs.InvalidParameter(err), "Bad parameters and missing X-Registry-Auth")
|
||||
}
|
||||
}
|
||||
|
||||
image := vars["name"]
|
||||
tag := r.Form.Get("tag")
|
||||
|
||||
output := ioutils.NewWriteFlusher(w)
|
||||
defer output.Close()
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
img := vars["name"]
|
||||
tag := r.Form.Get("tag")
|
||||
|
||||
var ref reference.Named
|
||||
|
||||
// Tag is empty only in case ImagePushOptions.All is true.
|
||||
if tag != "" {
|
||||
r, err := httputils.RepoTagReference(img, tag)
|
||||
if err != nil {
|
||||
return errdefs.InvalidParameter(err)
|
||||
}
|
||||
ref = r
|
||||
} else {
|
||||
r, err := reference.ParseNormalizedNamed(img)
|
||||
if err != nil {
|
||||
return errdefs.InvalidParameter(err)
|
||||
}
|
||||
ref = r
|
||||
}
|
||||
|
||||
if err := ir.backend.PushImage(ctx, ref, metaHeaders, authConfig, output); err != nil {
|
||||
if err := s.backend.PushImage(ctx, image, tag, metaHeaders, authConfig, output); err != nil {
|
||||
if !output.Flushed() {
|
||||
return err
|
||||
}
|
||||
@@ -181,7 +132,7 @@ func (ir *imageRouter) postImagesPush(ctx context.Context, w http.ResponseWriter
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ir *imageRouter) getImagesGet(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
func (s *imageRouter) getImagesGet(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
if err := httputils.ParseForm(r); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -197,7 +148,7 @@ func (ir *imageRouter) getImagesGet(ctx context.Context, w http.ResponseWriter,
|
||||
names = r.Form["names"]
|
||||
}
|
||||
|
||||
if err := ir.backend.ExportImage(ctx, names, output); err != nil {
|
||||
if err := s.backend.ExportImage(names, output); err != nil {
|
||||
if !output.Flushed() {
|
||||
return err
|
||||
}
|
||||
@@ -206,7 +157,7 @@ func (ir *imageRouter) getImagesGet(ctx context.Context, w http.ResponseWriter,
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ir *imageRouter) postImagesLoad(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
func (s *imageRouter) postImagesLoad(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
if err := httputils.ParseForm(r); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -216,7 +167,7 @@ func (ir *imageRouter) postImagesLoad(ctx context.Context, w http.ResponseWriter
|
||||
|
||||
output := ioutils.NewWriteFlusher(w)
|
||||
defer output.Close()
|
||||
if err := ir.backend.LoadImage(ctx, r.Body, output, quiet); err != nil {
|
||||
if err := s.backend.LoadImage(r.Body, output, quiet); err != nil {
|
||||
_, _ = output.Write(streamformatter.FormatError(err))
|
||||
}
|
||||
return nil
|
||||
@@ -230,7 +181,7 @@ func (missingImageError) Error() string {
|
||||
|
||||
func (missingImageError) InvalidParameter() {}
|
||||
|
||||
func (ir *imageRouter) deleteImages(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
func (s *imageRouter) deleteImages(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
if err := httputils.ParseForm(r); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -244,7 +195,7 @@ func (ir *imageRouter) deleteImages(ctx context.Context, w http.ResponseWriter,
|
||||
force := httputils.BoolValue(r, "force")
|
||||
prune := !httputils.BoolValue(r, "noprune")
|
||||
|
||||
list, err := ir.backend.ImageDelete(ctx, name, force, prune)
|
||||
list, err := s.backend.ImageDelete(name, force, prune)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -252,13 +203,13 @@ func (ir *imageRouter) deleteImages(ctx context.Context, w http.ResponseWriter,
|
||||
return httputils.WriteJSON(w, http.StatusOK, list)
|
||||
}
|
||||
|
||||
func (ir *imageRouter) getImagesByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
img, err := ir.backend.GetImage(ctx, vars["name"], opts.GetImageOpts{Details: true})
|
||||
func (s *imageRouter) getImagesByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
image, err := s.backend.GetImage(vars["name"], nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
imageInspect, err := ir.toImageInspect(img)
|
||||
imageInspect, err := s.toImageInspect(image)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -266,9 +217,11 @@ func (ir *imageRouter) getImagesByName(ctx context.Context, w http.ResponseWrite
|
||||
return httputils.WriteJSON(w, http.StatusOK, imageInspect)
|
||||
}
|
||||
|
||||
func (ir *imageRouter) toImageInspect(img *image.Image) (*types.ImageInspect, error) {
|
||||
var repoTags, repoDigests []string
|
||||
for _, ref := range img.Details.References {
|
||||
func (s *imageRouter) toImageInspect(img *image.Image) (*types.ImageInspect, error) {
|
||||
refs := s.referenceBackend.References(img.ID().Digest())
|
||||
repoTags := []string{}
|
||||
repoDigests := []string{}
|
||||
for _, ref := range refs {
|
||||
switch ref.(type) {
|
||||
case reference.NamedTagged:
|
||||
repoTags = append(repoTags, reference.FamiliarString(ref))
|
||||
@@ -277,17 +230,30 @@ func (ir *imageRouter) toImageInspect(img *image.Image) (*types.ImageInspect, er
|
||||
}
|
||||
}
|
||||
|
||||
var size int64
|
||||
var layerMetadata map[string]string
|
||||
layerID := img.RootFS.ChainID()
|
||||
if layerID != "" {
|
||||
l, err := s.layerStore.Get(layerID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer layer.ReleaseAndLog(s.layerStore, l)
|
||||
size = l.Size()
|
||||
layerMetadata, err = l.Metadata()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
comment := img.Comment
|
||||
if len(comment) == 0 && len(img.History) > 0 {
|
||||
comment = img.History[len(img.History)-1].Comment
|
||||
}
|
||||
|
||||
// Make sure we output empty arrays instead of nil.
|
||||
if repoTags == nil {
|
||||
repoTags = []string{}
|
||||
}
|
||||
if repoDigests == nil {
|
||||
repoDigests = []string{}
|
||||
lastUpdated, err := s.imageStore.GetLastUpdated(img.ID())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &types.ImageInspect{
|
||||
@@ -306,15 +272,15 @@ func (ir *imageRouter) toImageInspect(img *image.Image) (*types.ImageInspect, er
|
||||
Variant: img.Variant,
|
||||
Os: img.OperatingSystem(),
|
||||
OsVersion: img.OSVersion,
|
||||
Size: img.Details.Size,
|
||||
VirtualSize: img.Details.Size, //nolint:staticcheck // ignore SA1019: field is deprecated, but still set on API < v1.44.
|
||||
Size: size,
|
||||
VirtualSize: size, // TODO: field unused, deprecate
|
||||
GraphDriver: types.GraphDriverData{
|
||||
Name: img.Details.Driver,
|
||||
Data: img.Details.Metadata,
|
||||
Name: s.layerStore.DriverName(),
|
||||
Data: layerMetadata,
|
||||
},
|
||||
RootFS: rootFSToAPIType(img.RootFS),
|
||||
Metadata: types.ImageMetadata{
|
||||
LastTagTime: img.Details.LastUpdated,
|
||||
LastTagTime: lastUpdated,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
@@ -330,7 +296,7 @@ func rootFSToAPIType(rootfs *image.RootFS) types.RootFS {
|
||||
}
|
||||
}
|
||||
|
||||
func (ir *imageRouter) getImagesJSON(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
func (s *imageRouter) getImagesJSON(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
if err := httputils.ParseForm(r); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -355,7 +321,7 @@ func (ir *imageRouter) getImagesJSON(ctx context.Context, w http.ResponseWriter,
|
||||
sharedSize = httputils.BoolValue(r, "shared-size")
|
||||
}
|
||||
|
||||
images, err := ir.backend.Images(ctx, types.ImageListOptions{
|
||||
images, err := s.backend.Images(ctx, types.ImageListOptions{
|
||||
All: httputils.BoolValue(r, "all"),
|
||||
Filters: imageFilters,
|
||||
SharedSize: sharedSize,
|
||||
@@ -364,28 +330,12 @@ func (ir *imageRouter) getImagesJSON(ctx context.Context, w http.ResponseWriter,
|
||||
return err
|
||||
}
|
||||
|
||||
useNone := versions.LessThan(version, "1.43")
|
||||
for _, img := range images {
|
||||
if useNone {
|
||||
if len(img.RepoTags) == 0 && len(img.RepoDigests) == 0 {
|
||||
img.RepoTags = append(img.RepoTags, "<none>:<none>")
|
||||
img.RepoDigests = append(img.RepoDigests, "<none>@<none>")
|
||||
}
|
||||
} else {
|
||||
if img.RepoTags == nil {
|
||||
img.RepoTags = []string{}
|
||||
}
|
||||
if img.RepoDigests == nil {
|
||||
img.RepoDigests = []string{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return httputils.WriteJSON(w, http.StatusOK, images)
|
||||
}
|
||||
|
||||
func (ir *imageRouter) getImagesHistory(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
history, err := ir.backend.ImageHistory(ctx, vars["name"])
|
||||
func (s *imageRouter) getImagesHistory(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
name := vars["name"]
|
||||
history, err := s.backend.ImageHistory(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -393,32 +343,40 @@ func (ir *imageRouter) getImagesHistory(ctx context.Context, w http.ResponseWrit
|
||||
return httputils.WriteJSON(w, http.StatusOK, history)
|
||||
}
|
||||
|
||||
func (ir *imageRouter) postImagesTag(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
func (s *imageRouter) postImagesTag(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
if err := httputils.ParseForm(r); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ref, err := httputils.RepoTagReference(r.Form.Get("repo"), r.Form.Get("tag"))
|
||||
if ref == nil || err != nil {
|
||||
return errdefs.InvalidParameter(err)
|
||||
}
|
||||
|
||||
img, err := ir.backend.GetImage(ctx, vars["name"], opts.GetImageOpts{})
|
||||
if err != nil {
|
||||
return errdefs.NotFound(err)
|
||||
}
|
||||
|
||||
if err := ir.backend.TagImage(ctx, img.ID(), ref); err != nil {
|
||||
if _, err := s.backend.TagImage(vars["name"], r.Form.Get("repo"), r.Form.Get("tag")); err != nil {
|
||||
return err
|
||||
}
|
||||
w.WriteHeader(http.StatusCreated)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ir *imageRouter) getImagesSearch(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
func (s *imageRouter) getImagesSearch(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
if err := httputils.ParseForm(r); err != nil {
|
||||
return err
|
||||
}
|
||||
var (
|
||||
config *types.AuthConfig
|
||||
authEncoded = r.Header.Get("X-Registry-Auth")
|
||||
headers = map[string][]string{}
|
||||
)
|
||||
|
||||
if authEncoded != "" {
|
||||
authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded))
|
||||
if err := json.NewDecoder(authJSON).Decode(&config); err != nil {
|
||||
// for a search it is not an error if no auth was given
|
||||
// to increase compatibility with the existing api it is defaulting to be empty
|
||||
config = &types.AuthConfig{}
|
||||
}
|
||||
}
|
||||
for k, v := range r.Header {
|
||||
if strings.HasPrefix(k, "X-Meta-") {
|
||||
headers[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
var limit int
|
||||
if r.Form.Get("limit") != "" {
|
||||
@@ -433,26 +391,14 @@ func (ir *imageRouter) getImagesSearch(ctx context.Context, w http.ResponseWrite
|
||||
return err
|
||||
}
|
||||
|
||||
// For a search it is not an error if no auth was given. Ignore invalid
|
||||
// AuthConfig to increase compatibility with the existing API.
|
||||
authConfig, _ := registry.DecodeAuthConfig(r.Header.Get(registry.AuthHeader))
|
||||
|
||||
var headers = http.Header{}
|
||||
for k, v := range r.Header {
|
||||
k = http.CanonicalHeaderKey(k)
|
||||
if strings.HasPrefix(k, "X-Meta-") {
|
||||
headers[k] = v
|
||||
}
|
||||
}
|
||||
headers.Set("User-Agent", dockerversion.DockerUserAgent(ctx))
|
||||
res, err := ir.searcher.Search(ctx, searchFilters, r.Form.Get("term"), limit, authConfig, headers)
|
||||
query, err := s.backend.SearchRegistryForImages(ctx, searchFilters, r.Form.Get("term"), limit, config, headers)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return httputils.WriteJSON(w, http.StatusOK, res)
|
||||
return httputils.WriteJSON(w, http.StatusOK, query.Results)
|
||||
}
|
||||
|
||||
func (ir *imageRouter) postImagesPrune(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
func (s *imageRouter) postImagesPrune(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
if err := httputils.ParseForm(r); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -462,7 +408,7 @@ func (ir *imageRouter) postImagesPrune(ctx context.Context, w http.ResponseWrite
|
||||
return err
|
||||
}
|
||||
|
||||
pruneReport, err := ir.backend.ImagesPrune(ctx, pruneFilters)
|
||||
pruneReport, err := s.backend.ImagesPrune(ctx, pruneFilters)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -6,23 +6,22 @@ import (
|
||||
"net/http"
|
||||
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/docker/api/types"
|
||||
enginetypes "github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/registry"
|
||||
"github.com/docker/docker/plugin"
|
||||
)
|
||||
|
||||
// Backend for Plugin
|
||||
type Backend interface {
|
||||
Disable(name string, config *types.PluginDisableConfig) error
|
||||
Enable(name string, config *types.PluginEnableConfig) error
|
||||
List(filters.Args) ([]types.Plugin, error)
|
||||
Inspect(name string) (*types.Plugin, error)
|
||||
Remove(name string, config *types.PluginRmConfig) error
|
||||
Disable(name string, config *enginetypes.PluginDisableConfig) error
|
||||
Enable(name string, config *enginetypes.PluginEnableConfig) error
|
||||
List(filters.Args) ([]enginetypes.Plugin, error)
|
||||
Inspect(name string) (*enginetypes.Plugin, error)
|
||||
Remove(name string, config *enginetypes.PluginRmConfig) error
|
||||
Set(name string, args []string) error
|
||||
Privileges(ctx context.Context, ref reference.Named, metaHeaders http.Header, authConfig *registry.AuthConfig) (types.PluginPrivileges, error)
|
||||
Pull(ctx context.Context, ref reference.Named, name string, metaHeaders http.Header, authConfig *registry.AuthConfig, privileges types.PluginPrivileges, outStream io.Writer, opts ...plugin.CreateOpt) error
|
||||
Push(ctx context.Context, name string, metaHeaders http.Header, authConfig *registry.AuthConfig, outStream io.Writer) error
|
||||
Upgrade(ctx context.Context, ref reference.Named, name string, metaHeaders http.Header, authConfig *registry.AuthConfig, privileges types.PluginPrivileges, outStream io.Writer) error
|
||||
CreateFromContext(ctx context.Context, tarCtx io.ReadCloser, options *types.PluginCreateOptions) error
|
||||
Privileges(ctx context.Context, ref reference.Named, metaHeaders http.Header, authConfig *enginetypes.AuthConfig) (enginetypes.PluginPrivileges, error)
|
||||
Pull(ctx context.Context, ref reference.Named, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, privileges enginetypes.PluginPrivileges, outStream io.Writer, opts ...plugin.CreateOpt) error
|
||||
Push(ctx context.Context, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, outStream io.Writer) error
|
||||
Upgrade(ctx context.Context, ref reference.Named, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, privileges enginetypes.PluginPrivileges, outStream io.Writer) error
|
||||
CreateFromContext(ctx context.Context, tarCtx io.ReadCloser, options *enginetypes.PluginCreateOptions) error
|
||||
}
|
||||
|
||||
@@ -2,6 +2,8 @@ package plugin // import "github.com/docker/docker/api/server/router/plugin"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -10,13 +12,13 @@ import (
|
||||
"github.com/docker/docker/api/server/httputils"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/registry"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/docker/docker/pkg/streamformatter"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func parseHeaders(headers http.Header) (map[string][]string, *registry.AuthConfig) {
|
||||
func parseHeaders(headers http.Header) (map[string][]string, *types.AuthConfig) {
|
||||
|
||||
metaHeaders := map[string][]string{}
|
||||
for k, v := range headers {
|
||||
if strings.HasPrefix(k, "X-Meta-") {
|
||||
@@ -24,8 +26,16 @@ func parseHeaders(headers http.Header) (map[string][]string, *registry.AuthConfi
|
||||
}
|
||||
}
|
||||
|
||||
// Ignore invalid AuthConfig to increase compatibility with the existing API.
|
||||
authConfig, _ := registry.DecodeAuthConfig(headers.Get(registry.AuthHeader))
|
||||
// Get X-Registry-Auth
|
||||
authEncoded := headers.Get("X-Registry-Auth")
|
||||
authConfig := &types.AuthConfig{}
|
||||
if authEncoded != "" {
|
||||
authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded))
|
||||
if err := json.NewDecoder(authJSON).Decode(authConfig); err != nil {
|
||||
authConfig = &types.AuthConfig{}
|
||||
}
|
||||
}
|
||||
|
||||
return metaHeaders, authConfig
|
||||
}
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
type Backend interface {
|
||||
Init(req types.InitRequest) (string, error)
|
||||
Join(req types.JoinRequest) error
|
||||
Leave(ctx context.Context, force bool) error
|
||||
Leave(force bool) error
|
||||
Inspect() (types.Swarm, error)
|
||||
Update(uint64, types.Spec, types.UpdateFlags) error
|
||||
GetUnlockKey() (string, error)
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
basictypes "github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/backend"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/registry"
|
||||
types "github.com/docker/docker/api/types/swarm"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
"github.com/docker/docker/errdefs"
|
||||
@@ -36,7 +35,7 @@ func (sr *swarmRouter) initCluster(ctx context.Context, w http.ResponseWriter, r
|
||||
}
|
||||
nodeID, err := sr.backend.Init(req)
|
||||
if err != nil {
|
||||
logrus.WithContext(ctx).WithError(err).Debug("Error initializing swarm")
|
||||
logrus.Errorf("Error initializing swarm: %v", err)
|
||||
return err
|
||||
}
|
||||
return httputils.WriteJSON(w, http.StatusOK, nodeID)
|
||||
@@ -56,13 +55,13 @@ func (sr *swarmRouter) leaveCluster(ctx context.Context, w http.ResponseWriter,
|
||||
}
|
||||
|
||||
force := httputils.BoolValue(r, "force")
|
||||
return sr.backend.Leave(ctx, force)
|
||||
return sr.backend.Leave(force)
|
||||
}
|
||||
|
||||
func (sr *swarmRouter) inspectCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
swarm, err := sr.backend.Inspect()
|
||||
if err != nil {
|
||||
logrus.WithContext(ctx).WithError(err).Debug("Error getting swarm")
|
||||
logrus.Errorf("Error getting swarm: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -114,7 +113,7 @@ func (sr *swarmRouter) updateCluster(ctx context.Context, w http.ResponseWriter,
|
||||
}
|
||||
|
||||
if err := sr.backend.Update(version, swarm, flags); err != nil {
|
||||
logrus.WithContext(ctx).WithError(err).Debug("Error configuring swarm")
|
||||
logrus.Errorf("Error configuring swarm: %v", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@@ -127,7 +126,7 @@ func (sr *swarmRouter) unlockCluster(ctx context.Context, w http.ResponseWriter,
|
||||
}
|
||||
|
||||
if err := sr.backend.UnlockSwarm(req); err != nil {
|
||||
logrus.WithContext(ctx).WithError(err).Debug("Error unlocking swarm")
|
||||
logrus.Errorf("Error unlocking swarm: %v", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@@ -136,7 +135,7 @@ func (sr *swarmRouter) unlockCluster(ctx context.Context, w http.ResponseWriter,
|
||||
func (sr *swarmRouter) getUnlockKey(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
unlockKey, err := sr.backend.GetUnlockKey()
|
||||
if err != nil {
|
||||
logrus.WithContext(ctx).WithError(err).Debug("Error retrieving swarm unlock key")
|
||||
logrus.WithError(err).Errorf("Error retrieving swarm unlock key")
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -168,7 +167,7 @@ func (sr *swarmRouter) getServices(ctx context.Context, w http.ResponseWriter, r
|
||||
|
||||
services, err := sr.backend.GetServices(basictypes.ServiceListOptions{Filters: filter, Status: status})
|
||||
if err != nil {
|
||||
logrus.WithContext(ctx).WithError(err).Debug("Error getting services")
|
||||
logrus.Errorf("Error getting services: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -194,10 +193,7 @@ func (sr *swarmRouter) getService(ctx context.Context, w http.ResponseWriter, r
|
||||
|
||||
service, err := sr.backend.GetService(vars["id"], insertDefaults)
|
||||
if err != nil {
|
||||
logrus.WithContext(ctx).WithFields(logrus.Fields{
|
||||
"error": err,
|
||||
"service-id": vars["id"],
|
||||
}).Debug("Error getting service")
|
||||
logrus.Errorf("Error getting service %s: %v", vars["id"], err)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -211,7 +207,7 @@ func (sr *swarmRouter) createService(ctx context.Context, w http.ResponseWriter,
|
||||
}
|
||||
|
||||
// Get returns "" if the header does not exist
|
||||
encodedAuth := r.Header.Get(registry.AuthHeader)
|
||||
encodedAuth := r.Header.Get("X-Registry-Auth")
|
||||
queryRegistry := false
|
||||
if v := httputils.VersionFromContext(ctx); v != "" {
|
||||
if versions.LessThan(v, "1.30") {
|
||||
@@ -221,10 +217,7 @@ func (sr *swarmRouter) createService(ctx context.Context, w http.ResponseWriter,
|
||||
}
|
||||
resp, err := sr.backend.CreateService(service, encodedAuth, queryRegistry)
|
||||
if err != nil {
|
||||
logrus.WithContext(ctx).WithFields(logrus.Fields{
|
||||
"error": err,
|
||||
"service-name": service.Name,
|
||||
}).Debug("Error creating service")
|
||||
logrus.Errorf("Error creating service %s: %v", service.Name, err)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -247,7 +240,7 @@ func (sr *swarmRouter) updateService(ctx context.Context, w http.ResponseWriter,
|
||||
var flags basictypes.ServiceUpdateOptions
|
||||
|
||||
// Get returns "" if the header does not exist
|
||||
flags.EncodedRegistryAuth = r.Header.Get(registry.AuthHeader)
|
||||
flags.EncodedRegistryAuth = r.Header.Get("X-Registry-Auth")
|
||||
flags.RegistryAuthFrom = r.URL.Query().Get("registryAuthFrom")
|
||||
flags.Rollback = r.URL.Query().Get("rollback")
|
||||
queryRegistry := false
|
||||
@@ -260,10 +253,7 @@ func (sr *swarmRouter) updateService(ctx context.Context, w http.ResponseWriter,
|
||||
|
||||
resp, err := sr.backend.UpdateService(vars["id"], version, service, flags, queryRegistry)
|
||||
if err != nil {
|
||||
logrus.WithContext(ctx).WithFields(logrus.Fields{
|
||||
"error": err,
|
||||
"service-id": vars["id"],
|
||||
}).Debug("Error updating service")
|
||||
logrus.Errorf("Error updating service %s: %v", vars["id"], err)
|
||||
return err
|
||||
}
|
||||
return httputils.WriteJSON(w, http.StatusOK, resp)
|
||||
@@ -271,10 +261,7 @@ func (sr *swarmRouter) updateService(ctx context.Context, w http.ResponseWriter,
|
||||
|
||||
func (sr *swarmRouter) removeService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
if err := sr.backend.RemoveService(vars["id"]); err != nil {
|
||||
logrus.WithContext(ctx).WithFields(logrus.Fields{
|
||||
"error": err,
|
||||
"service-id": vars["id"],
|
||||
}).Debug("Error removing service")
|
||||
logrus.Errorf("Error removing service %s: %v", vars["id"], err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@@ -315,7 +302,7 @@ func (sr *swarmRouter) getNodes(ctx context.Context, w http.ResponseWriter, r *h
|
||||
|
||||
nodes, err := sr.backend.GetNodes(basictypes.NodeListOptions{Filters: filter})
|
||||
if err != nil {
|
||||
logrus.WithContext(ctx).WithError(err).Debug("Error getting nodes")
|
||||
logrus.Errorf("Error getting nodes: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -325,10 +312,7 @@ func (sr *swarmRouter) getNodes(ctx context.Context, w http.ResponseWriter, r *h
|
||||
func (sr *swarmRouter) getNode(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
node, err := sr.backend.GetNode(vars["id"])
|
||||
if err != nil {
|
||||
logrus.WithContext(ctx).WithFields(logrus.Fields{
|
||||
"error": err,
|
||||
"node-id": vars["id"],
|
||||
}).Debug("Error getting node")
|
||||
logrus.Errorf("Error getting node %s: %v", vars["id"], err)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -349,10 +333,7 @@ func (sr *swarmRouter) updateNode(ctx context.Context, w http.ResponseWriter, r
|
||||
}
|
||||
|
||||
if err := sr.backend.UpdateNode(vars["id"], version, node); err != nil {
|
||||
logrus.WithContext(ctx).WithFields(logrus.Fields{
|
||||
"error": err,
|
||||
"node-id": vars["id"],
|
||||
}).Debug("Error updating node")
|
||||
logrus.Errorf("Error updating node %s: %v", vars["id"], err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@@ -366,10 +347,7 @@ func (sr *swarmRouter) removeNode(ctx context.Context, w http.ResponseWriter, r
|
||||
force := httputils.BoolValue(r, "force")
|
||||
|
||||
if err := sr.backend.RemoveNode(vars["id"], force); err != nil {
|
||||
logrus.WithContext(ctx).WithFields(logrus.Fields{
|
||||
"error": err,
|
||||
"node-id": vars["id"],
|
||||
}).Debug("Error removing node")
|
||||
logrus.Errorf("Error removing node %s: %v", vars["id"], err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@@ -386,7 +364,7 @@ func (sr *swarmRouter) getTasks(ctx context.Context, w http.ResponseWriter, r *h
|
||||
|
||||
tasks, err := sr.backend.GetTasks(basictypes.TaskListOptions{Filters: filter})
|
||||
if err != nil {
|
||||
logrus.WithContext(ctx).WithError(err).Debug("Error getting tasks")
|
||||
logrus.Errorf("Error getting tasks: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -396,10 +374,7 @@ func (sr *swarmRouter) getTasks(ctx context.Context, w http.ResponseWriter, r *h
|
||||
func (sr *swarmRouter) getTask(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
task, err := sr.backend.GetTask(vars["id"])
|
||||
if err != nil {
|
||||
logrus.WithContext(ctx).WithFields(logrus.Fields{
|
||||
"error": err,
|
||||
"task-id": vars["id"],
|
||||
}).Debug("Error getting task")
|
||||
logrus.Errorf("Error getting task %s: %v", vars["id"], err)
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/events"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/registry"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
)
|
||||
|
||||
@@ -31,7 +30,7 @@ type Backend interface {
|
||||
SystemDiskUsage(ctx context.Context, opts DiskUsageOptions) (*types.DiskUsage, error)
|
||||
SubscribeToEvents(since, until time.Time, ef filters.Args) ([]events.Message, chan interface{})
|
||||
UnsubscribeFromEvents(chan interface{})
|
||||
AuthenticateToRegistry(ctx context.Context, authConfig *registry.AuthConfig) (string, string, error)
|
||||
AuthenticateToRegistry(ctx context.Context, authConfig *types.AuthConfig) (string, string, error)
|
||||
}
|
||||
|
||||
// ClusterBackend is all the methods that need to be implemented
|
||||
|
||||
@@ -1,6 +1,3 @@
|
||||
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
|
||||
//go:build go1.19
|
||||
|
||||
package system // import "github.com/docker/docker/api/server/router/system"
|
||||
|
||||
import (
|
||||
|
||||
@@ -116,7 +116,7 @@ func (s *systemRouter) getDiskUsage(ctx context.Context, w http.ResponseWriter,
|
||||
var getContainers, getImages, getVolumes, getBuildCache bool
|
||||
typeStrs, ok := r.Form["type"]
|
||||
if versions.LessThan(version, "1.42") || !ok {
|
||||
getContainers, getImages, getVolumes, getBuildCache = true, true, true, s.builder != nil
|
||||
getContainers, getImages, getVolumes, getBuildCache = true, true, true, true
|
||||
} else {
|
||||
for _, typ := range typeStrs {
|
||||
switch types.DiskUsageObject(typ) {
|
||||
@@ -290,7 +290,7 @@ func (s *systemRouter) getEvents(ctx context.Context, w http.ResponseWriter, r *
|
||||
}
|
||||
|
||||
func (s *systemRouter) postAuth(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
var config *registry.AuthConfig
|
||||
var config *types.AuthConfig
|
||||
err := json.NewDecoder(r.Body).Decode(&config)
|
||||
r.Body.Close()
|
||||
if err != nil {
|
||||
|
||||
@@ -159,29 +159,25 @@ func (v *volumeRouter) deleteVolumes(ctx context.Context, w http.ResponseWriter,
|
||||
}
|
||||
force := httputils.BoolValue(r, "force")
|
||||
|
||||
// First we try deleting local volume. The volume may not be found as a
|
||||
// local volume, but could be a cluster volume, so we ignore "not found"
|
||||
// errors at this stage. Note that no "not found" error is produced if
|
||||
// "force" is enabled.
|
||||
err := v.backend.Remove(ctx, vars["name"], opts.WithPurgeOnError(force))
|
||||
if err != nil && !errdefs.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
version := httputils.VersionFromContext(ctx)
|
||||
|
||||
// If no volume was found, the volume may be a cluster volume. If force
|
||||
// is enabled, the volume backend won't return an error for non-existing
|
||||
// volumes, so we don't know if removal succeeded (or not volume existed).
|
||||
// In that case we always try to delete cluster volumes as well.
|
||||
if errdefs.IsNotFound(err) || force {
|
||||
version := httputils.VersionFromContext(ctx)
|
||||
err := v.backend.Remove(ctx, vars["name"], opts.WithPurgeOnError(force))
|
||||
// when a removal is forced, if the volume does not exist, no error will be
|
||||
// returned. this means that to ensure forcing works on swarm volumes as
|
||||
// well, we should always also force remove against the cluster.
|
||||
if err != nil || force {
|
||||
if versions.GreaterThanOrEqualTo(version, clusterVolumesVersion) && v.cluster.IsManager() {
|
||||
err = v.cluster.RemoveVolume(vars["name"], force)
|
||||
if errdefs.IsNotFound(err) || force {
|
||||
err := v.cluster.RemoveVolume(vars["name"], force)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -2,7 +2,11 @@ package server // import "github.com/docker/docker/api/server"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"net"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/server/httpstatus"
|
||||
"github.com/docker/docker/api/server/httputils"
|
||||
@@ -18,17 +22,104 @@ import (
|
||||
// when a request is about to be served.
|
||||
const versionMatcher = "/v{version:[0-9.]+}"
|
||||
|
||||
// Config provides the configuration for the API server
|
||||
type Config struct {
|
||||
CorsHeaders string
|
||||
Version string
|
||||
SocketGroup string
|
||||
TLSConfig *tls.Config
|
||||
// Hosts is a list of addresses for the API to listen on.
|
||||
Hosts []string
|
||||
}
|
||||
|
||||
// Server contains instance details for the server
|
||||
type Server struct {
|
||||
cfg *Config
|
||||
servers []*HTTPServer
|
||||
routers []router.Router
|
||||
middlewares []middleware.Middleware
|
||||
}
|
||||
|
||||
// New returns a new instance of the server based on the specified configuration.
|
||||
// It allocates resources which will be needed for ServeAPI(ports, unix-sockets).
|
||||
func New(cfg *Config) *Server {
|
||||
return &Server{
|
||||
cfg: cfg,
|
||||
}
|
||||
}
|
||||
|
||||
// UseMiddleware appends a new middleware to the request chain.
|
||||
// This needs to be called before the API routes are configured.
|
||||
func (s *Server) UseMiddleware(m middleware.Middleware) {
|
||||
s.middlewares = append(s.middlewares, m)
|
||||
}
|
||||
|
||||
// Accept sets a listener the server accepts connections into.
|
||||
func (s *Server) Accept(addr string, listeners ...net.Listener) {
|
||||
for _, listener := range listeners {
|
||||
httpServer := &HTTPServer{
|
||||
srv: &http.Server{
|
||||
Addr: addr,
|
||||
ReadHeaderTimeout: 5 * time.Minute, // "G112: Potential Slowloris Attack (gosec)"; not a real concern for our use, so setting a long timeout.
|
||||
},
|
||||
l: listener,
|
||||
}
|
||||
s.servers = append(s.servers, httpServer)
|
||||
}
|
||||
}
|
||||
|
||||
// Close closes servers and thus stop receiving requests
|
||||
func (s *Server) Close() {
|
||||
for _, srv := range s.servers {
|
||||
if err := srv.Close(); err != nil {
|
||||
logrus.Error(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// serveAPI loops through all initialized servers and spawns goroutine
|
||||
// with Serve method for each. It sets createMux() as Handler also.
|
||||
func (s *Server) serveAPI() error {
|
||||
var chErrors = make(chan error, len(s.servers))
|
||||
for _, srv := range s.servers {
|
||||
srv.srv.Handler = s.createMux()
|
||||
go func(srv *HTTPServer) {
|
||||
var err error
|
||||
logrus.Infof("API listen on %s", srv.l.Addr())
|
||||
if err = srv.Serve(); err != nil && strings.Contains(err.Error(), "use of closed network connection") {
|
||||
err = nil
|
||||
}
|
||||
chErrors <- err
|
||||
}(srv)
|
||||
}
|
||||
|
||||
for range s.servers {
|
||||
err := <-chErrors
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// HTTPServer contains an instance of http server and the listener.
|
||||
// srv *http.Server, contains configuration to create an http server and a mux router with all api end points.
|
||||
// l net.Listener, is a TCP or Socket listener that dispatches incoming request to the router.
|
||||
type HTTPServer struct {
|
||||
srv *http.Server
|
||||
l net.Listener
|
||||
}
|
||||
|
||||
// Serve starts listening for inbound requests.
|
||||
func (s *HTTPServer) Serve() error {
|
||||
return s.srv.Serve(s.l)
|
||||
}
|
||||
|
||||
// Close closes the HTTPServer from listening for the inbound requests.
|
||||
func (s *HTTPServer) Close() error {
|
||||
return s.l.Close()
|
||||
}
|
||||
|
||||
func (s *Server) makeHTTPHandler(handler httputils.APIFunc) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
// Define the context that we'll pass around to share info
|
||||
@@ -60,6 +151,12 @@ func (s *Server) makeHTTPHandler(handler httputils.APIFunc) http.HandlerFunc {
|
||||
}
|
||||
}
|
||||
|
||||
// InitRouter initializes the list of routers for the server.
|
||||
// This method also enables the Go profiler.
|
||||
func (s *Server) InitRouter(routers ...router.Router) {
|
||||
s.routers = append(s.routers, routers...)
|
||||
}
|
||||
|
||||
type pageNotFoundError struct{}
|
||||
|
||||
func (pageNotFoundError) Error() string {
|
||||
@@ -68,12 +165,12 @@ func (pageNotFoundError) Error() string {
|
||||
|
||||
func (pageNotFoundError) NotFound() {}
|
||||
|
||||
// CreateMux returns a new mux with all the routers registered.
|
||||
func (s *Server) CreateMux(routers ...router.Router) *mux.Router {
|
||||
// createMux initializes the main router the server uses.
|
||||
func (s *Server) createMux() *mux.Router {
|
||||
m := mux.NewRouter()
|
||||
|
||||
logrus.Debug("Registering routers")
|
||||
for _, apiRouter := range routers {
|
||||
for _, apiRouter := range s.routers {
|
||||
for _, r := range apiRouter.Routes() {
|
||||
f := s.makeHTTPHandler(r.Handler())
|
||||
|
||||
@@ -84,6 +181,7 @@ func (s *Server) CreateMux(routers ...router.Router) *mux.Router {
|
||||
}
|
||||
|
||||
debugRouter := debug.NewRouter()
|
||||
s.routers = append(s.routers, debugRouter)
|
||||
for _, r := range debugRouter.Routes() {
|
||||
f := s.makeHTTPHandler(r.Handler())
|
||||
m.Path("/debug" + r.Path()).Handler(f)
|
||||
@@ -96,3 +194,15 @@ func (s *Server) CreateMux(routers ...router.Router) *mux.Router {
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
// Wait blocks the server goroutine until it exits.
|
||||
// It sends an error message if there is any error during
|
||||
// the API execution.
|
||||
func (s *Server) Wait(waitChan chan error) {
|
||||
if err := s.serveAPI(); err != nil {
|
||||
logrus.Errorf("ServeAPI error: %v", err)
|
||||
waitChan <- err
|
||||
return
|
||||
}
|
||||
waitChan <- nil
|
||||
}
|
||||
|
||||
@@ -13,7 +13,12 @@ import (
|
||||
)
|
||||
|
||||
func TestMiddlewares(t *testing.T) {
|
||||
srv := &Server{}
|
||||
cfg := &Config{
|
||||
Version: "0.1omega2",
|
||||
}
|
||||
srv := &Server{
|
||||
cfg: cfg,
|
||||
}
|
||||
|
||||
srv.UseMiddleware(middleware.NewVersionMiddleware("0.1omega2", api.DefaultVersion, api.MinVersion))
|
||||
|
||||
|
||||
159
api/swagger.yaml
159
api/swagger.yaml
@@ -19,10 +19,10 @@ produces:
|
||||
consumes:
|
||||
- "application/json"
|
||||
- "text/plain"
|
||||
basePath: "/v1.43"
|
||||
basePath: "/v1.42"
|
||||
info:
|
||||
title: "Docker Engine API"
|
||||
version: "1.43"
|
||||
version: "1.42"
|
||||
x-logo:
|
||||
url: "https://docs.docker.com/assets/images/logo-docker-main.png"
|
||||
description: |
|
||||
@@ -55,8 +55,8 @@ info:
|
||||
the URL is not supported by the daemon, a HTTP `400 Bad Request` error message
|
||||
is returned.
|
||||
|
||||
If you omit the version-prefix, the current version of the API (v1.43) is used.
|
||||
For example, calling `/info` is the same as calling `/v1.43/info`. Using the
|
||||
If you omit the version-prefix, the current version of the API (v1.42) is used.
|
||||
For example, calling `/info` is the same as calling `/v1.42/info`. Using the
|
||||
API without a version-prefix is deprecated and will be removed in a future release.
|
||||
|
||||
Engine releases in the near future should support this version of the API,
|
||||
@@ -976,13 +976,6 @@ definitions:
|
||||
items:
|
||||
type: "integer"
|
||||
minimum: 0
|
||||
Annotations:
|
||||
type: "object"
|
||||
description: |
|
||||
Arbitrary non-identifying metadata attached to container and
|
||||
provided to the runtime when the container is started.
|
||||
additionalProperties:
|
||||
type: "string"
|
||||
|
||||
# Applicable to UNIX platforms
|
||||
CapAdd:
|
||||
@@ -1129,7 +1122,6 @@ definitions:
|
||||
remapping option is enabled.
|
||||
ShmSize:
|
||||
type: "integer"
|
||||
format: "int64"
|
||||
description: |
|
||||
Size of `/dev/shm` in bytes. If omitted, the system uses 64MB.
|
||||
minimum: 0
|
||||
@@ -1618,34 +1610,6 @@ definitions:
|
||||
"WorkDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/work"
|
||||
}
|
||||
|
||||
FilesystemChange:
|
||||
description: |
|
||||
Change in the container's filesystem.
|
||||
type: "object"
|
||||
required: [Path, Kind]
|
||||
properties:
|
||||
Path:
|
||||
description: |
|
||||
Path to file or directory that has changed.
|
||||
type: "string"
|
||||
x-nullable: false
|
||||
Kind:
|
||||
$ref: "#/definitions/ChangeType"
|
||||
|
||||
ChangeType:
|
||||
description: |
|
||||
Kind of change
|
||||
|
||||
Can be one of:
|
||||
|
||||
- `0`: Modified ("C")
|
||||
- `1`: Added ("A")
|
||||
- `2`: Deleted ("D")
|
||||
type: "integer"
|
||||
format: "uint8"
|
||||
enum: [0, 1, 2]
|
||||
x-nullable: false
|
||||
|
||||
ImageInspect:
|
||||
description: |
|
||||
Information about an image in the local image cache.
|
||||
@@ -1782,14 +1746,15 @@ definitions:
|
||||
Total size of the image including all layers it is composed of.
|
||||
|
||||
In versions of Docker before v1.10, this field was calculated from
|
||||
the image itself and all of its parent images. Images are now stored
|
||||
self-contained, and no longer use a parent-chain, making this field
|
||||
an equivalent of the Size field.
|
||||
the image itself and all of its parent images. Docker v1.10 and up
|
||||
store images self-contained, and no longer use a parent-chain, making
|
||||
this field an equivalent of the Size field.
|
||||
|
||||
> **Deprecated**: this field is kept for backward compatibility, but
|
||||
> will be removed in API v1.44.
|
||||
This field is kept for backward compatibility, but may be removed in
|
||||
a future version of the API.
|
||||
type: "integer"
|
||||
format: "int64"
|
||||
x-nullable: false
|
||||
example: 1239828
|
||||
GraphDriver:
|
||||
$ref: "#/definitions/GraphDriverData"
|
||||
@@ -1837,6 +1802,7 @@ definitions:
|
||||
- Created
|
||||
- Size
|
||||
- SharedSize
|
||||
- VirtualSize
|
||||
- Labels
|
||||
- Containers
|
||||
properties:
|
||||
@@ -1922,17 +1888,19 @@ definitions:
|
||||
x-nullable: false
|
||||
example: 1239828
|
||||
VirtualSize:
|
||||
description: |-
|
||||
description: |
|
||||
Total size of the image including all layers it is composed of.
|
||||
|
||||
In versions of Docker before v1.10, this field was calculated from
|
||||
the image itself and all of its parent images. Images are now stored
|
||||
self-contained, and no longer use a parent-chain, making this field
|
||||
an equivalent of the Size field.
|
||||
the image itself and all of its parent images. Docker v1.10 and up
|
||||
store images self-contained, and no longer use a parent-chain, making
|
||||
this field an equivalent of the Size field.
|
||||
|
||||
Deprecated: this field is kept for backward compatibility, and will be removed in API v1.44.
|
||||
This field is kept for backward compatibility, but may be removed in
|
||||
a future version of the API.
|
||||
type: "integer"
|
||||
format: "int64"
|
||||
x-nullable: false
|
||||
example: 172064416
|
||||
Labels:
|
||||
description: "User-defined key/value metadata."
|
||||
@@ -2375,8 +2343,6 @@ definitions:
|
||||
type: "string"
|
||||
error:
|
||||
type: "string"
|
||||
errorDetail:
|
||||
$ref: "#/definitions/ErrorDetail"
|
||||
status:
|
||||
type: "string"
|
||||
progress:
|
||||
@@ -3011,6 +2977,8 @@ definitions:
|
||||
Name: "journald"
|
||||
- Type: "Log"
|
||||
Name: "json-file"
|
||||
- Type: "Log"
|
||||
Name: "logentries"
|
||||
- Type: "Log"
|
||||
Name: "splunk"
|
||||
- Type: "Log"
|
||||
@@ -4682,8 +4650,7 @@ definitions:
|
||||
example: false
|
||||
OOMKilled:
|
||||
description: |
|
||||
Whether a process within this container has been killed because it ran
|
||||
out of memory since the container was last started.
|
||||
Whether this container has been killed because it ran out of memory.
|
||||
type: "boolean"
|
||||
example: false
|
||||
Dead:
|
||||
@@ -5066,7 +5033,7 @@ definitions:
|
||||
Go runtime (`GOOS`).
|
||||
|
||||
Currently returned values are "linux" and "windows". A full list of
|
||||
possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment).
|
||||
possible values can be found in the [Go documentation](https://golang.org/doc/install/source#environment).
|
||||
type: "string"
|
||||
example: "linux"
|
||||
Architecture:
|
||||
@@ -5074,7 +5041,7 @@ definitions:
|
||||
Hardware architecture of the host, as returned by the Go runtime
|
||||
(`GOARCH`).
|
||||
|
||||
A full list of possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment).
|
||||
A full list of possible values can be found in the [Go documentation](https://golang.org/doc/install/source#environment).
|
||||
type: "string"
|
||||
example: "x86_64"
|
||||
NCPU:
|
||||
@@ -5160,8 +5127,42 @@ definitions:
|
||||
ServerVersion:
|
||||
description: |
|
||||
Version string of the daemon.
|
||||
|
||||
> **Note**: the [standalone Swarm API](https://docs.docker.com/swarm/swarm-api/)
|
||||
> returns the Swarm version instead of the daemon version, for example
|
||||
> `swarm/1.2.8`.
|
||||
type: "string"
|
||||
example: "24.0.2"
|
||||
example: "17.06.0-ce"
|
||||
ClusterStore:
|
||||
description: |
|
||||
URL of the distributed storage backend.
|
||||
|
||||
|
||||
The storage backend is used for multihost networking (to store
|
||||
network and endpoint information) and by the node discovery mechanism.
|
||||
|
||||
<p><br /></p>
|
||||
|
||||
> **Deprecated**: This field is only propagated when using standalone Swarm
|
||||
> mode, and overlay networking using an external k/v store. Overlay
|
||||
> networks with Swarm mode enabled use the built-in raft store, and
|
||||
> this field will be empty.
|
||||
type: "string"
|
||||
example: "consul://consul.corp.example.com:8600/some/path"
|
||||
ClusterAdvertise:
|
||||
description: |
|
||||
The network endpoint that the Engine advertises for the purpose of
|
||||
node discovery. ClusterAdvertise is a `host:port` combination on which
|
||||
the daemon is reachable by other hosts.
|
||||
|
||||
<p><br /></p>
|
||||
|
||||
> **Deprecated**: This field is only propagated when using standalone Swarm
|
||||
> mode, and overlay networking using an external k/v store. Overlay
|
||||
> networks with Swarm mode enabled use the built-in raft store, and
|
||||
> this field will be empty.
|
||||
type: "string"
|
||||
example: "node5.corp.example.com:8000"
|
||||
Runtimes:
|
||||
description: |
|
||||
List of [OCI compliant](https://github.com/opencontainers/runtime-spec)
|
||||
@@ -5239,8 +5240,7 @@ definitions:
|
||||
SecurityOptions:
|
||||
description: |
|
||||
List of security features that are enabled on the daemon, such as
|
||||
apparmor, seccomp, SELinux, user-namespaces (userns), rootless and
|
||||
no-new-privileges.
|
||||
apparmor, seccomp, SELinux, user-namespaces (userns), and rootless.
|
||||
|
||||
Additional configuration options for each security feature may
|
||||
be present, and are included as a comma-separated list of key/value
|
||||
@@ -5332,7 +5332,7 @@ definitions:
|
||||
type: "array"
|
||||
items:
|
||||
type: "string"
|
||||
example: ["awslogs", "fluentd", "gcplogs", "gelf", "journald", "json-file", "splunk", "syslog"]
|
||||
example: ["awslogs", "fluentd", "gcplogs", "gelf", "journald", "json-file", "logentries", "splunk", "syslog"]
|
||||
|
||||
|
||||
RegistryServiceConfig:
|
||||
@@ -6873,9 +6873,9 @@ paths:
|
||||
Returns which files in a container's filesystem have been added, deleted,
|
||||
or modified. The `Kind` of modification can be one of:
|
||||
|
||||
- `0`: Modified ("C")
|
||||
- `1`: Added ("A")
|
||||
- `2`: Deleted ("D")
|
||||
- `0`: Modified
|
||||
- `1`: Added
|
||||
- `2`: Deleted
|
||||
operationId: "ContainerChanges"
|
||||
produces: ["application/json"]
|
||||
responses:
|
||||
@@ -6884,7 +6884,22 @@ paths:
|
||||
schema:
|
||||
type: "array"
|
||||
items:
|
||||
$ref: "#/definitions/FilesystemChange"
|
||||
type: "object"
|
||||
x-go-name: "ContainerChangeResponseItem"
|
||||
title: "ContainerChangeResponseItem"
|
||||
description: "change item in response to ContainerChanges operation"
|
||||
required: [Path, Kind]
|
||||
properties:
|
||||
Path:
|
||||
description: "Path to file that has changed"
|
||||
type: "string"
|
||||
x-nullable: false
|
||||
Kind:
|
||||
description: "Kind of change"
|
||||
type: "integer"
|
||||
format: "uint8"
|
||||
enum: [0, 1, 2]
|
||||
x-nullable: false
|
||||
examples:
|
||||
application/json:
|
||||
- Path: "/dev"
|
||||
@@ -8211,7 +8226,7 @@ paths:
|
||||
|
||||
Available filters:
|
||||
|
||||
- `until=<timestamp>` remove cache older than `<timestamp>`. The `<timestamp>` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon's local time.
|
||||
- `until=<duration>`: duration relative to daemon's time, during which build cache was not used, in Go's duration format (e.g., '24h')
|
||||
- `id=<id>`
|
||||
- `parent=<id>`
|
||||
- `type=<string>`
|
||||
@@ -8710,10 +8725,6 @@ paths:
|
||||
IdentityToken: "9cbaf023786cd7..."
|
||||
204:
|
||||
description: "No error"
|
||||
401:
|
||||
description: "Auth error"
|
||||
schema:
|
||||
$ref: "#/definitions/ErrorResponse"
|
||||
500:
|
||||
description: "Server error"
|
||||
schema:
|
||||
@@ -9894,9 +9905,7 @@ paths:
|
||||
Id: "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30"
|
||||
Warning: ""
|
||||
403:
|
||||
description: |
|
||||
Forbidden operation. This happens when trying to create a network named after a pre-defined network,
|
||||
or when trying to create an overlay network on a daemon which is not part of a Swarm cluster.
|
||||
description: "operation not supported for pre-defined networks"
|
||||
schema:
|
||||
$ref: "#/definitions/ErrorResponse"
|
||||
404:
|
||||
@@ -10359,12 +10368,6 @@ paths:
|
||||
default if omitted.
|
||||
required: true
|
||||
type: "string"
|
||||
- name: "force"
|
||||
in: "query"
|
||||
description: |
|
||||
Force disable a plugin even if still in use.
|
||||
required: false
|
||||
type: "boolean"
|
||||
tags: ["Plugin"]
|
||||
/plugins/{name}/upgrade:
|
||||
post:
|
||||
|
||||
@@ -1,7 +1,22 @@
|
||||
package types // import "github.com/docker/docker/api/types"
|
||||
import "github.com/docker/docker/api/types/registry"
|
||||
|
||||
// AuthConfig contains authorization information for connecting to a Registry.
|
||||
//
|
||||
// Deprecated: use github.com/docker/docker/api/types/registry.AuthConfig
|
||||
type AuthConfig = registry.AuthConfig
|
||||
// AuthConfig contains authorization information for connecting to a Registry
|
||||
type AuthConfig struct {
|
||||
Username string `json:"username,omitempty"`
|
||||
Password string `json:"password,omitempty"`
|
||||
Auth string `json:"auth,omitempty"`
|
||||
|
||||
// Email is an optional value associated with the username.
|
||||
// This field is deprecated and will be removed in a later
|
||||
// version of docker.
|
||||
Email string `json:"email,omitempty"`
|
||||
|
||||
ServerAddress string `json:"serveraddress,omitempty"`
|
||||
|
||||
// IdentityToken is used to authenticate the user and get
|
||||
// an access token for the registry.
|
||||
IdentityToken string `json:"identitytoken,omitempty"`
|
||||
|
||||
// RegistryToken is a bearer token to be sent to a registry
|
||||
RegistryToken string `json:"registrytoken,omitempty"`
|
||||
}
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
)
|
||||
|
||||
@@ -103,7 +102,8 @@ type ExecProcessConfig struct {
|
||||
// CreateImageConfig is the configuration for creating an image from a
|
||||
// container.
|
||||
type CreateImageConfig struct {
|
||||
Tag reference.NamedTagged
|
||||
Repo string
|
||||
Tag string
|
||||
Pause bool
|
||||
Author string
|
||||
Comment string
|
||||
|
||||
@@ -4,9 +4,8 @@ import (
|
||||
"io"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/registry"
|
||||
"github.com/docker/docker/pkg/streamformatter"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// PullOption defines different modes for accessing images
|
||||
@@ -40,7 +39,7 @@ type BuildConfig struct {
|
||||
// GetImageAndLayerOptions are the options supported by GetImageAndReleasableLayer
|
||||
type GetImageAndLayerOptions struct {
|
||||
PullOption PullOption
|
||||
AuthConfig map[string]registry.AuthConfig
|
||||
AuthConfig map[string]types.AuthConfig
|
||||
Output io.Writer
|
||||
Platform *ocispec.Platform
|
||||
Platform *specs.Platform
|
||||
}
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/registry"
|
||||
units "github.com/docker/go-units"
|
||||
)
|
||||
|
||||
@@ -181,7 +180,7 @@ type ImageBuildOptions struct {
|
||||
// at all (nil). See the parsing of buildArgs in
|
||||
// api/server/router/build/build_routes.go for even more info.
|
||||
BuildArgs map[string]*string
|
||||
AuthConfigs map[string]registry.AuthConfig
|
||||
AuthConfigs map[string]AuthConfig
|
||||
Context io.Reader
|
||||
Labels map[string]string
|
||||
// squash the resulting image's layers to the parent
|
||||
|
||||
@@ -3,7 +3,7 @@ package types // import "github.com/docker/docker/api/types"
|
||||
import (
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/network"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// configs holds structs used for internal communication between the
|
||||
@@ -16,7 +16,7 @@ type ContainerCreateConfig struct {
|
||||
Config *container.Config
|
||||
HostConfig *container.HostConfig
|
||||
NetworkingConfig *network.NetworkingConfig
|
||||
Platform *ocispec.Platform
|
||||
Platform *specs.Platform
|
||||
AdjustCPUShares bool
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
package container
|
||||
|
||||
// ContainerChangeResponseItem change item in response to ContainerChanges operation
|
||||
//
|
||||
// Deprecated: use [FilesystemChange].
|
||||
type ContainerChangeResponseItem = FilesystemChange
|
||||
@@ -1,15 +0,0 @@
|
||||
package container
|
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
// ChangeType Kind of change
|
||||
//
|
||||
// Can be one of:
|
||||
//
|
||||
// - `0`: Modified ("C")
|
||||
// - `1`: Added ("A")
|
||||
// - `2`: Deleted ("D")
|
||||
//
|
||||
// swagger:model ChangeType
|
||||
type ChangeType uint8
|
||||
@@ -1,23 +0,0 @@
|
||||
package container
|
||||
|
||||
const (
|
||||
// ChangeModify represents the modify operation.
|
||||
ChangeModify ChangeType = 0
|
||||
// ChangeAdd represents the add operation.
|
||||
ChangeAdd ChangeType = 1
|
||||
// ChangeDelete represents the delete operation.
|
||||
ChangeDelete ChangeType = 2
|
||||
)
|
||||
|
||||
func (ct ChangeType) String() string {
|
||||
switch ct {
|
||||
case ChangeModify:
|
||||
return "C"
|
||||
case ChangeAdd:
|
||||
return "A"
|
||||
case ChangeDelete:
|
||||
return "D"
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
20
api/types/container/container_changes.go
Normal file
20
api/types/container/container_changes.go
Normal file
@@ -0,0 +1,20 @@
|
||||
package container // import "github.com/docker/docker/api/types/container"
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Code generated by `swagger generate operation`. DO NOT EDIT.
|
||||
//
|
||||
// See hack/generate-swagger-api.sh
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
// ContainerChangeResponseItem change item in response to ContainerChanges operation
|
||||
// swagger:model ContainerChangeResponseItem
|
||||
type ContainerChangeResponseItem struct {
|
||||
|
||||
// Kind of change
|
||||
// Required: true
|
||||
Kind uint8 `json:"Kind"`
|
||||
|
||||
// Path to file that has changed
|
||||
// Required: true
|
||||
Path string `json:"Path"`
|
||||
}
|
||||
16
api/types/container/deprecated.go
Normal file
16
api/types/container/deprecated.go
Normal file
@@ -0,0 +1,16 @@
|
||||
package container // import "github.com/docker/docker/api/types/container"
|
||||
|
||||
// ContainerCreateCreatedBody OK response to ContainerCreate operation
|
||||
//
|
||||
// Deprecated: use CreateResponse
|
||||
type ContainerCreateCreatedBody = CreateResponse
|
||||
|
||||
// ContainerWaitOKBody OK response to ContainerWait operation
|
||||
//
|
||||
// Deprecated: use WaitResponse
|
||||
type ContainerWaitOKBody = WaitResponse
|
||||
|
||||
// ContainerWaitOKBodyError container waiting error, if any
|
||||
//
|
||||
// Deprecated: use WaitExitError
|
||||
type ContainerWaitOKBodyError = WaitExitError
|
||||
@@ -1,19 +0,0 @@
|
||||
package container
|
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
// FilesystemChange Change in the container's filesystem.
|
||||
//
|
||||
// swagger:model FilesystemChange
|
||||
type FilesystemChange struct {
|
||||
|
||||
// kind
|
||||
// Required: true
|
||||
Kind ChangeType `json:"Kind"`
|
||||
|
||||
// Path to file or directory that has changed.
|
||||
//
|
||||
// Required: true
|
||||
Path string `json:"Path"`
|
||||
}
|
||||
@@ -101,8 +101,7 @@ func (n IpcMode) IsShareable() bool {
|
||||
|
||||
// IsContainer indicates whether the container uses another container's ipc namespace.
|
||||
func (n IpcMode) IsContainer() bool {
|
||||
_, ok := containerID(string(n))
|
||||
return ok
|
||||
return strings.HasPrefix(string(n), string(IPCModeContainer)+":")
|
||||
}
|
||||
|
||||
// IsNone indicates whether container IpcMode is set to "none".
|
||||
@@ -117,14 +116,15 @@ func (n IpcMode) IsEmpty() bool {
|
||||
|
||||
// Valid indicates whether the ipc mode is valid.
|
||||
func (n IpcMode) Valid() bool {
|
||||
// TODO(thaJeztah): align with PidMode, and consider container-mode without a container name/ID to be invalid.
|
||||
return n.IsEmpty() || n.IsNone() || n.IsPrivate() || n.IsHost() || n.IsShareable() || n.IsContainer()
|
||||
}
|
||||
|
||||
// Container returns the name of the container ipc stack is going to be used.
|
||||
func (n IpcMode) Container() (idOrName string) {
|
||||
idOrName, _ = containerID(string(n))
|
||||
return idOrName
|
||||
func (n IpcMode) Container() string {
|
||||
if n.IsContainer() {
|
||||
return strings.TrimPrefix(string(n), string(IPCModeContainer)+":")
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// NetworkMode represents the container network stack.
|
||||
@@ -147,14 +147,17 @@ func (n NetworkMode) IsPrivate() bool {
|
||||
|
||||
// IsContainer indicates whether container uses a container network stack.
|
||||
func (n NetworkMode) IsContainer() bool {
|
||||
_, ok := containerID(string(n))
|
||||
return ok
|
||||
parts := strings.SplitN(string(n), ":", 2)
|
||||
return len(parts) > 1 && parts[0] == "container"
|
||||
}
|
||||
|
||||
// ConnectedContainer is the id of the container which network this container is connected to.
|
||||
func (n NetworkMode) ConnectedContainer() (idOrName string) {
|
||||
idOrName, _ = containerID(string(n))
|
||||
return idOrName
|
||||
func (n NetworkMode) ConnectedContainer() string {
|
||||
parts := strings.SplitN(string(n), ":", 2)
|
||||
if len(parts) > 1 {
|
||||
return parts[1]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// UserDefined indicates user-created network
|
||||
@@ -175,12 +178,18 @@ func (n UsernsMode) IsHost() bool {
|
||||
|
||||
// IsPrivate indicates whether the container uses the a private userns.
|
||||
func (n UsernsMode) IsPrivate() bool {
|
||||
return !n.IsHost()
|
||||
return !(n.IsHost())
|
||||
}
|
||||
|
||||
// Valid indicates whether the userns is valid.
|
||||
func (n UsernsMode) Valid() bool {
|
||||
return n == "" || n.IsHost()
|
||||
parts := strings.Split(string(n), ":")
|
||||
switch mode := parts[0]; mode {
|
||||
case "", "host":
|
||||
default:
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// CgroupSpec represents the cgroup to use for the container.
|
||||
@@ -188,20 +197,22 @@ type CgroupSpec string
|
||||
|
||||
// IsContainer indicates whether the container is using another container cgroup
|
||||
func (c CgroupSpec) IsContainer() bool {
|
||||
_, ok := containerID(string(c))
|
||||
return ok
|
||||
parts := strings.SplitN(string(c), ":", 2)
|
||||
return len(parts) > 1 && parts[0] == "container"
|
||||
}
|
||||
|
||||
// Valid indicates whether the cgroup spec is valid.
|
||||
func (c CgroupSpec) Valid() bool {
|
||||
// TODO(thaJeztah): align with PidMode, and consider container-mode without a container name/ID to be invalid.
|
||||
return c == "" || c.IsContainer()
|
||||
return c.IsContainer() || c == ""
|
||||
}
|
||||
|
||||
// Container returns the ID or name of the container whose cgroup will be used.
|
||||
func (c CgroupSpec) Container() (idOrName string) {
|
||||
idOrName, _ = containerID(string(c))
|
||||
return idOrName
|
||||
// Container returns the name of the container whose cgroup will be used.
|
||||
func (c CgroupSpec) Container() string {
|
||||
parts := strings.SplitN(string(c), ":", 2)
|
||||
if len(parts) > 1 {
|
||||
return parts[1]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// UTSMode represents the UTS namespace of the container.
|
||||
@@ -209,7 +220,7 @@ type UTSMode string
|
||||
|
||||
// IsPrivate indicates whether the container uses its private UTS namespace.
|
||||
func (n UTSMode) IsPrivate() bool {
|
||||
return !n.IsHost()
|
||||
return !(n.IsHost())
|
||||
}
|
||||
|
||||
// IsHost indicates whether the container uses the host's UTS namespace.
|
||||
@@ -219,7 +230,13 @@ func (n UTSMode) IsHost() bool {
|
||||
|
||||
// Valid indicates whether the UTS namespace is valid.
|
||||
func (n UTSMode) Valid() bool {
|
||||
return n == "" || n.IsHost()
|
||||
parts := strings.Split(string(n), ":")
|
||||
switch mode := parts[0]; mode {
|
||||
case "", "host":
|
||||
default:
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// PidMode represents the pid namespace of the container.
|
||||
@@ -237,19 +254,32 @@ func (n PidMode) IsHost() bool {
|
||||
|
||||
// IsContainer indicates whether the container uses a container's pid namespace.
|
||||
func (n PidMode) IsContainer() bool {
|
||||
_, ok := containerID(string(n))
|
||||
return ok
|
||||
parts := strings.SplitN(string(n), ":", 2)
|
||||
return len(parts) > 1 && parts[0] == "container"
|
||||
}
|
||||
|
||||
// Valid indicates whether the pid namespace is valid.
|
||||
func (n PidMode) Valid() bool {
|
||||
return n == "" || n.IsHost() || validContainer(string(n))
|
||||
parts := strings.Split(string(n), ":")
|
||||
switch mode := parts[0]; mode {
|
||||
case "", "host":
|
||||
case "container":
|
||||
if len(parts) != 2 || parts[1] == "" {
|
||||
return false
|
||||
}
|
||||
default:
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Container returns the name of the container whose pid namespace is going to be used.
|
||||
func (n PidMode) Container() (idOrName string) {
|
||||
idOrName, _ = containerID(string(n))
|
||||
return idOrName
|
||||
func (n PidMode) Container() string {
|
||||
parts := strings.SplitN(string(n), ":", 2)
|
||||
if len(parts) > 1 {
|
||||
return parts[1]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// DeviceRequest represents a request for devices from a device driver.
|
||||
@@ -378,17 +408,16 @@ type UpdateConfig struct {
|
||||
// Portable information *should* appear in Config.
|
||||
type HostConfig struct {
|
||||
// Applicable to all platforms
|
||||
Binds []string // List of volume bindings for this container
|
||||
ContainerIDFile string // File (path) where the containerId is written
|
||||
LogConfig LogConfig // Configuration of the logs for this container
|
||||
NetworkMode NetworkMode // Network mode to use for the container
|
||||
PortBindings nat.PortMap // Port mapping between the exposed port (container) and the host
|
||||
RestartPolicy RestartPolicy // Restart policy to be used for the container
|
||||
AutoRemove bool // Automatically remove container when it exits
|
||||
VolumeDriver string // Name of the volume driver used to mount volumes
|
||||
VolumesFrom []string // List of volumes to take from other container
|
||||
ConsoleSize [2]uint // Initial console size (height,width)
|
||||
Annotations map[string]string `json:",omitempty"` // Arbitrary non-identifying metadata attached to container and provided to the runtime
|
||||
Binds []string // List of volume bindings for this container
|
||||
ContainerIDFile string // File (path) where the containerId is written
|
||||
LogConfig LogConfig // Configuration of the logs for this container
|
||||
NetworkMode NetworkMode // Network mode to use for the container
|
||||
PortBindings nat.PortMap // Port mapping between the exposed port (container) and the host
|
||||
RestartPolicy RestartPolicy // Restart policy to be used for the container
|
||||
AutoRemove bool // Automatically remove container when it exits
|
||||
VolumeDriver string // Name of the volume driver used to mount volumes
|
||||
VolumesFrom []string // List of volumes to take from other container
|
||||
ConsoleSize [2]uint // Initial console size (height,width)
|
||||
|
||||
// Applicable to UNIX platforms
|
||||
CapAdd strslice.StrSlice // List of kernel capabilities to add to the container
|
||||
@@ -434,23 +463,3 @@ type HostConfig struct {
|
||||
// Run a custom init inside the container, if null, use the daemon's configured settings
|
||||
Init *bool `json:",omitempty"`
|
||||
}
|
||||
|
||||
// containerID splits "container:<ID|name>" values. It returns the container
|
||||
// ID or name, and whether an ID/name was found. It returns an empty string and
|
||||
// a "false" if the value does not have a "container:" prefix. Further validation
|
||||
// of the returned, including checking if the value is empty, should be handled
|
||||
// by the caller.
|
||||
func containerID(val string) (idOrName string, ok bool) {
|
||||
k, v, hasSep := strings.Cut(val, ":")
|
||||
if !hasSep || k != "container" {
|
||||
return "", false
|
||||
}
|
||||
return v, true
|
||||
}
|
||||
|
||||
// validContainer checks if the given value is a "container:" mode with
|
||||
// a non-empty name/ID.
|
||||
func validContainer(val string) bool {
|
||||
id, ok := containerID(val)
|
||||
return ok && id != ""
|
||||
}
|
||||
@@ -1,232 +0,0 @@
|
||||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
package container
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"gotest.tools/v3/assert"
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
)
|
||||
|
||||
func TestCgroupnsMode(t *testing.T) {
|
||||
modes := map[CgroupnsMode]struct{ valid, private, host, empty bool }{
|
||||
"": {valid: true, empty: true},
|
||||
":": {valid: false},
|
||||
"something": {valid: false},
|
||||
"something:": {valid: false},
|
||||
"something:weird": {valid: false},
|
||||
":weird": {valid: false},
|
||||
"host": {valid: true, host: true},
|
||||
"host:": {valid: false},
|
||||
"host:name": {valid: false},
|
||||
"private": {valid: true, private: true},
|
||||
"private:name": {valid: false, private: false},
|
||||
}
|
||||
for mode, expected := range modes {
|
||||
t.Run("mode="+string(mode), func(t *testing.T) {
|
||||
assert.Check(t, is.Equal(mode.IsPrivate(), expected.private))
|
||||
assert.Check(t, is.Equal(mode.IsHost(), expected.host))
|
||||
assert.Check(t, is.Equal(mode.IsEmpty(), expected.empty))
|
||||
assert.Check(t, is.Equal(mode.Valid(), expected.valid))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCgroupSpec(t *testing.T) {
|
||||
modes := map[CgroupSpec]struct {
|
||||
valid bool
|
||||
private bool
|
||||
host bool
|
||||
container bool
|
||||
shareable bool
|
||||
ctrName string
|
||||
}{
|
||||
"": {valid: true},
|
||||
":": {valid: false},
|
||||
"something": {valid: false},
|
||||
"something:": {valid: false},
|
||||
"something:weird": {valid: false},
|
||||
":weird": {valid: false},
|
||||
"container": {valid: false},
|
||||
"container:": {valid: true, container: true, ctrName: ""},
|
||||
"container:name": {valid: true, container: true, ctrName: "name"},
|
||||
"container:name1:name2": {valid: true, container: true, ctrName: "name1:name2"},
|
||||
}
|
||||
|
||||
for mode, expected := range modes {
|
||||
t.Run("mode="+string(mode), func(t *testing.T) {
|
||||
assert.Check(t, is.Equal(mode.Valid(), expected.valid))
|
||||
assert.Check(t, is.Equal(mode.IsContainer(), expected.container))
|
||||
assert.Check(t, is.Equal(mode.Container(), expected.ctrName))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TODO Windows: This will need addressing for a Windows daemon.
|
||||
func TestNetworkMode(t *testing.T) {
|
||||
// TODO(thaJeztah): we should consider the cases with a colon (":") in the network name to be invalid.
|
||||
modes := map[NetworkMode]struct {
|
||||
private, bridge, host, container, none, isDefault bool
|
||||
name, ctrName string
|
||||
}{
|
||||
"": {private: true, name: ""},
|
||||
":": {private: true, name: ":"},
|
||||
"something": {private: true, name: "something"},
|
||||
"something:": {private: true, name: "something:"},
|
||||
"something:weird": {private: true, name: "something:weird"},
|
||||
":weird": {private: true, name: ":weird"},
|
||||
"bridge": {private: true, bridge: true, name: "bridge"},
|
||||
"host": {private: false, host: true, name: "host"},
|
||||
"none": {private: true, none: true, name: "none"},
|
||||
"default": {private: true, isDefault: true, name: "default"},
|
||||
"container": {private: true, container: false, name: "container", ctrName: ""},
|
||||
"container:": {private: false, container: true, name: "container", ctrName: ""},
|
||||
"container:name": {private: false, container: true, name: "container", ctrName: "name"},
|
||||
"container:name1:name2": {private: false, container: true, name: "container", ctrName: "name1:name2"},
|
||||
}
|
||||
for mode, expected := range modes {
|
||||
t.Run("mode="+string(mode), func(t *testing.T) {
|
||||
assert.Check(t, is.Equal(mode.IsPrivate(), expected.private))
|
||||
assert.Check(t, is.Equal(mode.IsBridge(), expected.bridge))
|
||||
assert.Check(t, is.Equal(mode.IsHost(), expected.host))
|
||||
assert.Check(t, is.Equal(mode.IsContainer(), expected.container))
|
||||
assert.Check(t, is.Equal(mode.IsNone(), expected.none))
|
||||
assert.Check(t, is.Equal(mode.IsDefault(), expected.isDefault))
|
||||
assert.Check(t, is.Equal(mode.NetworkName(), expected.name))
|
||||
assert.Check(t, is.Equal(mode.ConnectedContainer(), expected.ctrName))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIpcMode(t *testing.T) {
|
||||
ipcModes := map[IpcMode]struct {
|
||||
valid bool
|
||||
private bool
|
||||
host bool
|
||||
container bool
|
||||
shareable bool
|
||||
ctrName string
|
||||
}{
|
||||
"": {valid: true},
|
||||
":": {valid: false},
|
||||
"something": {valid: false},
|
||||
"something:": {valid: false},
|
||||
"something:weird": {valid: false},
|
||||
":weird": {valid: false},
|
||||
"private": {valid: true, private: true},
|
||||
"host": {valid: true, host: true},
|
||||
"host:": {valid: false},
|
||||
"host:name": {valid: false},
|
||||
"container": {valid: false},
|
||||
"container:": {valid: true, container: true, ctrName: ""},
|
||||
"container:name": {valid: true, container: true, ctrName: "name"},
|
||||
"container:name1:name2": {valid: true, container: true, ctrName: "name1:name2"},
|
||||
"shareable": {valid: true, shareable: true},
|
||||
}
|
||||
|
||||
for mode, expected := range ipcModes {
|
||||
t.Run("mode="+string(mode), func(t *testing.T) {
|
||||
assert.Check(t, is.Equal(mode.Valid(), expected.valid))
|
||||
assert.Check(t, is.Equal(mode.IsPrivate(), expected.private))
|
||||
assert.Check(t, is.Equal(mode.IsHost(), expected.host))
|
||||
assert.Check(t, is.Equal(mode.IsContainer(), expected.container))
|
||||
assert.Check(t, is.Equal(mode.IsShareable(), expected.shareable))
|
||||
assert.Check(t, is.Equal(mode.Container(), expected.ctrName))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestUTSMode(t *testing.T) {
|
||||
modes := map[UTSMode]struct{ valid, private, host bool }{
|
||||
"": {valid: true, private: true},
|
||||
":": {valid: false, private: true},
|
||||
"something": {valid: false, private: true},
|
||||
"something:": {valid: false, private: true},
|
||||
"something:weird": {valid: false, private: true},
|
||||
":weird": {valid: false, private: true},
|
||||
"host": {valid: true, private: false, host: true},
|
||||
"host:": {valid: false, private: true},
|
||||
"host:name": {valid: false, private: true},
|
||||
}
|
||||
for mode, expected := range modes {
|
||||
t.Run("mode="+string(mode), func(t *testing.T) {
|
||||
assert.Check(t, is.Equal(mode.IsPrivate(), expected.private))
|
||||
assert.Check(t, is.Equal(mode.IsHost(), expected.host))
|
||||
assert.Check(t, is.Equal(mode.Valid(), expected.valid))
|
||||
})
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func TestUsernsMode(t *testing.T) {
|
||||
modes := map[UsernsMode]struct{ valid, private, host bool }{
|
||||
"": {valid: true, private: true},
|
||||
":": {valid: false, private: true},
|
||||
"something": {valid: false, private: true},
|
||||
"something:": {valid: false, private: true},
|
||||
"something:weird": {valid: false, private: true},
|
||||
":weird": {valid: false, private: true},
|
||||
"host": {valid: true, private: false, host: true},
|
||||
"host:": {valid: false, private: true},
|
||||
"host:name": {valid: false, private: true},
|
||||
}
|
||||
for mode, expected := range modes {
|
||||
t.Run("mode="+string(mode), func(t *testing.T) {
|
||||
assert.Check(t, is.Equal(mode.Valid(), expected.valid))
|
||||
assert.Check(t, is.Equal(mode.IsPrivate(), expected.private))
|
||||
assert.Check(t, is.Equal(mode.IsHost(), expected.host))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPidMode(t *testing.T) {
|
||||
modes := map[PidMode]struct {
|
||||
valid bool
|
||||
private bool
|
||||
host bool
|
||||
container bool
|
||||
ctrName string
|
||||
}{
|
||||
"": {valid: true, private: true},
|
||||
":": {valid: false, private: true},
|
||||
"something": {valid: false, private: true},
|
||||
"something:": {valid: false, private: true},
|
||||
"something:weird": {valid: false, private: true},
|
||||
":weird": {valid: false, private: true},
|
||||
"host": {valid: true, private: false, host: true},
|
||||
"host:": {valid: false, private: true},
|
||||
"host:name": {valid: false, private: true},
|
||||
"container": {valid: false, private: true},
|
||||
"container:": {valid: false, private: false, container: true, ctrName: ""},
|
||||
"container:name": {valid: true, private: false, container: true, ctrName: "name"},
|
||||
"container:name1:name2": {valid: true, private: false, container: true, ctrName: "name1:name2"},
|
||||
}
|
||||
for mode, expected := range modes {
|
||||
t.Run("mode="+string(mode), func(t *testing.T) {
|
||||
assert.Check(t, is.Equal(mode.Valid(), expected.valid))
|
||||
assert.Check(t, is.Equal(mode.IsPrivate(), expected.private))
|
||||
assert.Check(t, is.Equal(mode.IsHost(), expected.host))
|
||||
assert.Check(t, is.Equal(mode.IsContainer(), expected.container))
|
||||
assert.Check(t, is.Equal(mode.Container(), expected.ctrName))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRestartPolicy(t *testing.T) {
|
||||
policies := map[RestartPolicy]struct{ none, always, onFailure bool }{
|
||||
{Name: "", MaximumRetryCount: 0}: {none: true, always: false, onFailure: false},
|
||||
{Name: "something", MaximumRetryCount: 0}: {none: false, always: false, onFailure: false},
|
||||
{Name: "no", MaximumRetryCount: 0}: {none: true, always: false, onFailure: false},
|
||||
{Name: "always", MaximumRetryCount: 0}: {none: false, always: true, onFailure: false},
|
||||
{Name: "on-failure", MaximumRetryCount: 0}: {none: false, always: false, onFailure: true},
|
||||
}
|
||||
for policy, expected := range policies {
|
||||
t.Run("policy="+policy.Name, func(t *testing.T) {
|
||||
assert.Check(t, is.Equal(policy.IsNone(), expected.none))
|
||||
assert.Check(t, is.Equal(policy.IsAlways(), expected.always))
|
||||
assert.Check(t, is.Equal(policy.IsOnFailure(), expected.onFailure))
|
||||
})
|
||||
}
|
||||
}
|
||||
14
api/types/deprecated.go
Normal file
14
api/types/deprecated.go
Normal file
@@ -0,0 +1,14 @@
|
||||
package types // import "github.com/docker/docker/api/types"
|
||||
|
||||
import "github.com/docker/docker/api/types/volume"
|
||||
|
||||
// Volume volume
|
||||
//
|
||||
// Deprecated: use github.com/docker/docker/api/types/volume.Volume
|
||||
type Volume = volume.Volume
|
||||
|
||||
// VolumeUsageData Usage details about the volume. This information is used by the
|
||||
// `GET /system/df` endpoint, and omitted in other endpoints.
|
||||
//
|
||||
// Deprecated: use github.com/docker/docker/api/types/volume.UsageData
|
||||
type VolumeUsageData = volume.UsageData
|
||||
@@ -1,37 +0,0 @@
|
||||
package filters
|
||||
|
||||
import "fmt"
|
||||
|
||||
// invalidFilter indicates that the provided filter or its value is invalid
|
||||
type invalidFilter struct {
|
||||
Filter string
|
||||
Value []string
|
||||
}
|
||||
|
||||
func (e invalidFilter) Error() string {
|
||||
msg := "invalid filter"
|
||||
if e.Filter != "" {
|
||||
msg += " '" + e.Filter
|
||||
if e.Value != nil {
|
||||
msg = fmt.Sprintf("%s=%s", msg, e.Value)
|
||||
}
|
||||
msg += "'"
|
||||
}
|
||||
return msg
|
||||
}
|
||||
|
||||
// InvalidParameter marks this error as ErrInvalidParameter
|
||||
func (e invalidFilter) InvalidParameter() {}
|
||||
|
||||
// unreachableCode is an error indicating that the code path was not expected to be reached.
|
||||
type unreachableCode struct {
|
||||
Filter string
|
||||
Value []string
|
||||
}
|
||||
|
||||
// System marks this error as ErrSystem
|
||||
func (e unreachableCode) System() {}
|
||||
|
||||
func (e unreachableCode) Error() string {
|
||||
return fmt.Sprintf("unreachable code reached for filter: %q with values: %s", e.Filter, e.Value)
|
||||
}
|
||||
@@ -1,5 +1,4 @@
|
||||
package filters // import "github.com/docker/docker/api/types/filters"
|
||||
import "fmt"
|
||||
|
||||
func ExampleArgs_MatchKVList() {
|
||||
args := NewArgs(
|
||||
@@ -7,29 +6,19 @@ func ExampleArgs_MatchKVList() {
|
||||
Arg("label", "state=running"))
|
||||
|
||||
// returns true because there are no values for bogus
|
||||
b := args.MatchKVList("bogus", nil)
|
||||
fmt.Println(b)
|
||||
args.MatchKVList("bogus", nil)
|
||||
|
||||
// returns false because there are no sources
|
||||
b = args.MatchKVList("label", nil)
|
||||
fmt.Println(b)
|
||||
args.MatchKVList("label", nil)
|
||||
|
||||
// returns true because all sources are matched
|
||||
b = args.MatchKVList("label", map[string]string{
|
||||
args.MatchKVList("label", map[string]string{
|
||||
"image": "foo",
|
||||
"state": "running",
|
||||
})
|
||||
fmt.Println(b)
|
||||
|
||||
// returns false because the values do not match
|
||||
b = args.MatchKVList("label", map[string]string{
|
||||
args.MatchKVList("label", map[string]string{
|
||||
"image": "other",
|
||||
})
|
||||
fmt.Println(b)
|
||||
|
||||
// Output:
|
||||
// true
|
||||
// false
|
||||
// true
|
||||
// false
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Args stores a mapping of keys to a set of multiple values.
|
||||
@@ -49,7 +50,7 @@ func (args Args) Keys() []string {
|
||||
// MarshalJSON returns a JSON byte representation of the Args
|
||||
func (args Args) MarshalJSON() ([]byte, error) {
|
||||
if len(args.fields) == 0 {
|
||||
return []byte("{}"), nil
|
||||
return []byte{}, nil
|
||||
}
|
||||
return json.Marshal(args.fields)
|
||||
}
|
||||
@@ -98,7 +99,7 @@ func FromJSON(p string) (Args, error) {
|
||||
// Fallback to parsing arguments in the legacy slice format
|
||||
deprecated := map[string][]string{}
|
||||
if legacyErr := json.Unmarshal(raw, &deprecated); legacyErr != nil {
|
||||
return args, &invalidFilter{}
|
||||
return args, invalidFilter{errors.Wrap(err, "invalid filter")}
|
||||
}
|
||||
|
||||
args.fields = deprecatedArgs(deprecated)
|
||||
@@ -107,6 +108,9 @@ func FromJSON(p string) (Args, error) {
|
||||
|
||||
// UnmarshalJSON populates the Args from JSON encode bytes
|
||||
func (args Args) UnmarshalJSON(raw []byte) error {
|
||||
if len(raw) == 0 {
|
||||
return nil
|
||||
}
|
||||
return json.Unmarshal(raw, &args.fields)
|
||||
}
|
||||
|
||||
@@ -162,13 +166,13 @@ func (args Args) MatchKVList(key string, sources map[string]string) bool {
|
||||
}
|
||||
|
||||
for value := range fieldValues {
|
||||
testK, testV, hasValue := strings.Cut(value, "=")
|
||||
testKV := strings.SplitN(value, "=", 2)
|
||||
|
||||
v, ok := sources[testK]
|
||||
v, ok := sources[testKV[0]]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
if hasValue && testV != v {
|
||||
if len(testKV) == 2 && testKV[1] != v {
|
||||
return false
|
||||
}
|
||||
}
|
||||
@@ -195,38 +199,6 @@ func (args Args) Match(field, source string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// GetBoolOrDefault returns a boolean value of the key if the key is present
|
||||
// and is intepretable as a boolean value. Otherwise the default value is returned.
|
||||
// Error is not nil only if the filter values are not valid boolean or are conflicting.
|
||||
func (args Args) GetBoolOrDefault(key string, defaultValue bool) (bool, error) {
|
||||
fieldValues, ok := args.fields[key]
|
||||
|
||||
if !ok {
|
||||
return defaultValue, nil
|
||||
}
|
||||
|
||||
if len(fieldValues) == 0 {
|
||||
return defaultValue, &invalidFilter{key, nil}
|
||||
}
|
||||
|
||||
isFalse := fieldValues["0"] || fieldValues["false"]
|
||||
isTrue := fieldValues["1"] || fieldValues["true"]
|
||||
|
||||
conflicting := isFalse && isTrue
|
||||
invalid := !isFalse && !isTrue
|
||||
|
||||
if conflicting || invalid {
|
||||
return defaultValue, &invalidFilter{key, args.Get(key)}
|
||||
} else if isFalse {
|
||||
return false, nil
|
||||
} else if isTrue {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// This code shouldn't be reached.
|
||||
return defaultValue, &unreachableCode{Filter: key, Value: args.Get(key)}
|
||||
}
|
||||
|
||||
// ExactMatch returns true if the source matches exactly one of the values.
|
||||
func (args Args) ExactMatch(key, source string) bool {
|
||||
fieldValues, ok := args.fields[key]
|
||||
@@ -277,12 +249,20 @@ func (args Args) Contains(field string) bool {
|
||||
return ok
|
||||
}
|
||||
|
||||
type invalidFilter struct{ error }
|
||||
|
||||
func (e invalidFilter) Error() string {
|
||||
return e.error.Error()
|
||||
}
|
||||
|
||||
func (invalidFilter) InvalidParameter() {}
|
||||
|
||||
// Validate compared the set of accepted keys against the keys in the mapping.
|
||||
// An error is returned if any mapping keys are not in the accepted set.
|
||||
func (args Args) Validate(accepted map[string]bool) error {
|
||||
for name := range args.fields {
|
||||
if !accepted[name] {
|
||||
return &invalidFilter{name, nil}
|
||||
return invalidFilter{errors.New("invalid filter '" + name + "'")}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -1,36 +1,13 @@
|
||||
package filters // import "github.com/docker/docker/api/types/filters"
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"gotest.tools/v3/assert"
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
)
|
||||
|
||||
func TestMarshalJSON(t *testing.T) {
|
||||
fields := map[string]map[string]bool{
|
||||
"created": {"today": true},
|
||||
"image.name": {"ubuntu*": true, "*untu": true},
|
||||
}
|
||||
a := Args{fields: fields}
|
||||
|
||||
_, err := a.MarshalJSON()
|
||||
if err != nil {
|
||||
t.Errorf("failed to marshal the filters: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalJSONWithEmpty(t *testing.T) {
|
||||
_, err := json.Marshal(NewArgs())
|
||||
if err != nil {
|
||||
t.Errorf("failed to marshal the filters: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestToJSON(t *testing.T) {
|
||||
fields := map[string]map[string]bool{
|
||||
"created": {"today": true},
|
||||
@@ -96,19 +73,15 @@ func TestFromJSON(t *testing.T) {
|
||||
if err == nil {
|
||||
t.Fatalf("Expected an error with %v, got nothing", invalid)
|
||||
}
|
||||
var invalidFilterError *invalidFilter
|
||||
var invalidFilterError invalidFilter
|
||||
if !errors.As(err, &invalidFilterError) {
|
||||
t.Fatalf("Expected an invalidFilter error, got %T", err)
|
||||
}
|
||||
wrappedErr := fmt.Errorf("something went wrong: %w", err)
|
||||
if !errors.Is(wrappedErr, err) {
|
||||
t.Errorf("Expected a wrapped error to be detected as invalidFilter")
|
||||
}
|
||||
}
|
||||
|
||||
for expectedArgs, matchers := range valid {
|
||||
for _, jsonString := range matchers {
|
||||
args, err := FromJSON(jsonString)
|
||||
for _, json := range matchers {
|
||||
args, err := FromJSON(json)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -363,13 +336,9 @@ func TestValidate(t *testing.T) {
|
||||
if err == nil {
|
||||
t.Fatal("Expected to return an error, got nil")
|
||||
}
|
||||
var invalidFilterError *invalidFilter
|
||||
var invalidFilterError invalidFilter
|
||||
if !errors.As(err, &invalidFilterError) {
|
||||
t.Errorf("Expected an invalidFilter error, got %T", err)
|
||||
}
|
||||
wrappedErr := fmt.Errorf("something went wrong: %w", err)
|
||||
if !errors.Is(wrappedErr, err) {
|
||||
t.Errorf("Expected a wrapped error to be detected as invalidFilter")
|
||||
t.Fatalf("Expected an invalidFilter error, got %T", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -428,121 +397,3 @@ func TestClone(t *testing.T) {
|
||||
f2.Add("baz", "qux")
|
||||
assert.Check(t, is.Len(f.Get("baz"), 0))
|
||||
}
|
||||
|
||||
func TestGetBoolOrDefault(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
args map[string][]string
|
||||
defValue bool
|
||||
expectedErr error
|
||||
expectedValue bool
|
||||
}{
|
||||
{
|
||||
name: "single true",
|
||||
args: map[string][]string{
|
||||
"dangling": {"true"},
|
||||
},
|
||||
defValue: false,
|
||||
expectedErr: nil,
|
||||
expectedValue: true,
|
||||
},
|
||||
{
|
||||
name: "single false",
|
||||
args: map[string][]string{
|
||||
"dangling": {"false"},
|
||||
},
|
||||
defValue: true,
|
||||
expectedErr: nil,
|
||||
expectedValue: false,
|
||||
},
|
||||
{
|
||||
name: "single bad value",
|
||||
args: map[string][]string{
|
||||
"dangling": {"potato"},
|
||||
},
|
||||
defValue: true,
|
||||
expectedErr: &invalidFilter{Filter: "dangling", Value: []string{"potato"}},
|
||||
expectedValue: true,
|
||||
},
|
||||
{
|
||||
name: "two bad values",
|
||||
args: map[string][]string{
|
||||
"dangling": {"banana", "potato"},
|
||||
},
|
||||
defValue: true,
|
||||
expectedErr: &invalidFilter{Filter: "dangling", Value: []string{"banana", "potato"}},
|
||||
expectedValue: true,
|
||||
},
|
||||
{
|
||||
name: "two conflicting values",
|
||||
args: map[string][]string{
|
||||
"dangling": {"false", "true"},
|
||||
},
|
||||
defValue: false,
|
||||
expectedErr: &invalidFilter{Filter: "dangling", Value: []string{"false", "true"}},
|
||||
expectedValue: false,
|
||||
},
|
||||
{
|
||||
name: "multiple conflicting values",
|
||||
args: map[string][]string{
|
||||
"dangling": {"false", "true", "1"},
|
||||
},
|
||||
defValue: true,
|
||||
expectedErr: &invalidFilter{Filter: "dangling", Value: []string{"false", "true", "1"}},
|
||||
expectedValue: true,
|
||||
},
|
||||
{
|
||||
name: "1 means true",
|
||||
args: map[string][]string{
|
||||
"dangling": {"1"},
|
||||
},
|
||||
defValue: false,
|
||||
expectedErr: nil,
|
||||
expectedValue: true,
|
||||
},
|
||||
{
|
||||
name: "0 means false",
|
||||
args: map[string][]string{
|
||||
"dangling": {"0"},
|
||||
},
|
||||
defValue: true,
|
||||
expectedErr: nil,
|
||||
expectedValue: false,
|
||||
},
|
||||
} {
|
||||
tc := tc
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
a := NewArgs()
|
||||
|
||||
for key, values := range tc.args {
|
||||
for _, value := range values {
|
||||
a.Add(key, value)
|
||||
}
|
||||
}
|
||||
|
||||
value, err := a.GetBoolOrDefault("dangling", tc.defValue)
|
||||
|
||||
if tc.expectedErr == nil {
|
||||
assert.Check(t, is.Nil(err))
|
||||
} else {
|
||||
assert.Check(t, is.ErrorType(err, tc.expectedErr))
|
||||
|
||||
// Check if error is the same.
|
||||
expected := tc.expectedErr.(*invalidFilter)
|
||||
actual := err.(*invalidFilter)
|
||||
|
||||
assert.Check(t, is.Equal(expected.Filter, actual.Filter))
|
||||
|
||||
sort.Strings(expected.Value)
|
||||
sort.Strings(actual.Value)
|
||||
assert.Check(t, is.DeepEqual(expected.Value, actual.Value))
|
||||
|
||||
wrappedErr := fmt.Errorf("something went wrong: %w", err)
|
||||
assert.Check(t, errors.Is(wrappedErr, err), "Expected a wrapped error to be detected as invalidFilter")
|
||||
}
|
||||
|
||||
assert.Check(t, is.Equal(tc.expectedValue, value))
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -1,9 +0,0 @@
|
||||
package image
|
||||
|
||||
import ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
||||
// GetImageOpts holds parameters to inspect an image.
|
||||
type GetImageOpts struct {
|
||||
Platform *ocispec.Platform
|
||||
Details bool
|
||||
}
|
||||
@@ -85,10 +85,13 @@ type ImageSummary struct {
|
||||
// Total size of the image including all layers it is composed of.
|
||||
//
|
||||
// In versions of Docker before v1.10, this field was calculated from
|
||||
// the image itself and all of its parent images. Images are now stored
|
||||
// self-contained, and no longer use a parent-chain, making this field
|
||||
// an equivalent of the Size field.
|
||||
// the image itself and all of its parent images. Docker v1.10 and up
|
||||
// store images self-contained, and no longer use a parent-chain, making
|
||||
// this field an equivalent of the Size field.
|
||||
//
|
||||
// Deprecated: this field is kept for backward compatibility, and will be removed in API v1.44.
|
||||
VirtualSize int64 `json:"VirtualSize,omitempty"`
|
||||
// This field is kept for backward compatibility, but may be removed in
|
||||
// a future version of the API.
|
||||
//
|
||||
// Required: true
|
||||
VirtualSize int64 `json:"VirtualSize"`
|
||||
}
|
||||
|
||||
@@ -1,99 +0,0 @@
|
||||
package registry // import "github.com/docker/docker/api/types/registry"
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// AuthHeader is the name of the header used to send encoded registry
|
||||
// authorization credentials for registry operations (push/pull).
|
||||
const AuthHeader = "X-Registry-Auth"
|
||||
|
||||
// AuthConfig contains authorization information for connecting to a Registry.
|
||||
type AuthConfig struct {
|
||||
Username string `json:"username,omitempty"`
|
||||
Password string `json:"password,omitempty"`
|
||||
Auth string `json:"auth,omitempty"`
|
||||
|
||||
// Email is an optional value associated with the username.
|
||||
// This field is deprecated and will be removed in a later
|
||||
// version of docker.
|
||||
Email string `json:"email,omitempty"`
|
||||
|
||||
ServerAddress string `json:"serveraddress,omitempty"`
|
||||
|
||||
// IdentityToken is used to authenticate the user and get
|
||||
// an access token for the registry.
|
||||
IdentityToken string `json:"identitytoken,omitempty"`
|
||||
|
||||
// RegistryToken is a bearer token to be sent to a registry
|
||||
RegistryToken string `json:"registrytoken,omitempty"`
|
||||
}
|
||||
|
||||
// EncodeAuthConfig serializes the auth configuration as a base64url encoded
|
||||
// RFC4648, section 5) JSON string for sending through the X-Registry-Auth header.
|
||||
//
|
||||
// For details on base64url encoding, see:
|
||||
// - RFC4648, section 5: https://tools.ietf.org/html/rfc4648#section-5
|
||||
func EncodeAuthConfig(authConfig AuthConfig) (string, error) {
|
||||
buf, err := json.Marshal(authConfig)
|
||||
if err != nil {
|
||||
return "", errInvalidParameter{err}
|
||||
}
|
||||
return base64.URLEncoding.EncodeToString(buf), nil
|
||||
}
|
||||
|
||||
// DecodeAuthConfig decodes base64url encoded (RFC4648, section 5) JSON
|
||||
// authentication information as sent through the X-Registry-Auth header.
|
||||
//
|
||||
// This function always returns an AuthConfig, even if an error occurs. It is up
|
||||
// to the caller to decide if authentication is required, and if the error can
|
||||
// be ignored.
|
||||
//
|
||||
// For details on base64url encoding, see:
|
||||
// - RFC4648, section 5: https://tools.ietf.org/html/rfc4648#section-5
|
||||
func DecodeAuthConfig(authEncoded string) (*AuthConfig, error) {
|
||||
if authEncoded == "" {
|
||||
return &AuthConfig{}, nil
|
||||
}
|
||||
|
||||
authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded))
|
||||
return decodeAuthConfigFromReader(authJSON)
|
||||
}
|
||||
|
||||
// DecodeAuthConfigBody decodes authentication information as sent as JSON in the
|
||||
// body of a request. This function is to provide backward compatibility with old
|
||||
// clients and API versions. Current clients and API versions expect authentication
|
||||
// to be provided through the X-Registry-Auth header.
|
||||
//
|
||||
// Like DecodeAuthConfig, this function always returns an AuthConfig, even if an
|
||||
// error occurs. It is up to the caller to decide if authentication is required,
|
||||
// and if the error can be ignored.
|
||||
func DecodeAuthConfigBody(rdr io.ReadCloser) (*AuthConfig, error) {
|
||||
return decodeAuthConfigFromReader(rdr)
|
||||
}
|
||||
|
||||
func decodeAuthConfigFromReader(rdr io.Reader) (*AuthConfig, error) {
|
||||
authConfig := &AuthConfig{}
|
||||
if err := json.NewDecoder(rdr).Decode(authConfig); err != nil {
|
||||
// always return an (empty) AuthConfig to increase compatibility with
|
||||
// the existing API.
|
||||
return &AuthConfig{}, invalid(err)
|
||||
}
|
||||
return authConfig, nil
|
||||
}
|
||||
|
||||
func invalid(err error) error {
|
||||
return errInvalidParameter{errors.Wrap(err, "invalid X-Registry-Auth header")}
|
||||
}
|
||||
|
||||
type errInvalidParameter struct{ error }
|
||||
|
||||
func (errInvalidParameter) InvalidParameter() {}
|
||||
|
||||
func (e errInvalidParameter) Cause() error { return e.error }
|
||||
|
||||
func (e errInvalidParameter) Unwrap() error { return e.error }
|
||||
@@ -1,59 +0,0 @@
|
||||
package registry // import "github.com/docker/docker/api/types/registry"
|
||||
import (
|
||||
"io"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"gotest.tools/v3/assert"
|
||||
)
|
||||
|
||||
const (
|
||||
unencoded = `{"username":"testuser","password":"testpassword","serveraddress":"example.com"}`
|
||||
encoded = `eyJ1c2VybmFtZSI6InRlc3R1c2VyIiwicGFzc3dvcmQiOiJ0ZXN0cGFzc3dvcmQiLCJzZXJ2ZXJhZGRyZXNzIjoiZXhhbXBsZS5jb20ifQ==`
|
||||
encodedNoPadding = `eyJ1c2VybmFtZSI6InRlc3R1c2VyIiwicGFzc3dvcmQiOiJ0ZXN0cGFzc3dvcmQiLCJzZXJ2ZXJhZGRyZXNzIjoiZXhhbXBsZS5jb20ifQ`
|
||||
)
|
||||
|
||||
var expected = AuthConfig{
|
||||
Username: "testuser",
|
||||
Password: "testpassword",
|
||||
ServerAddress: "example.com",
|
||||
}
|
||||
|
||||
func TestDecodeAuthConfig(t *testing.T) {
|
||||
t.Run("valid", func(t *testing.T) {
|
||||
token, err := DecodeAuthConfig(encoded)
|
||||
assert.NilError(t, err)
|
||||
assert.Equal(t, *token, expected)
|
||||
})
|
||||
|
||||
t.Run("empty", func(t *testing.T) {
|
||||
token, err := DecodeAuthConfig("")
|
||||
assert.NilError(t, err)
|
||||
assert.Equal(t, *token, AuthConfig{})
|
||||
})
|
||||
|
||||
// We currently only support base64url encoding with padding, so
|
||||
// un-padded should produce an error.
|
||||
//
|
||||
// RFC4648, section 5: https://tools.ietf.org/html/rfc4648#section-5
|
||||
// RFC4648, section 3.2: https://tools.ietf.org/html/rfc4648#section-3.2
|
||||
t.Run("invalid encoding", func(t *testing.T) {
|
||||
token, err := DecodeAuthConfig(encodedNoPadding)
|
||||
|
||||
assert.ErrorType(t, err, errInvalidParameter{})
|
||||
assert.ErrorContains(t, err, "invalid X-Registry-Auth header: unexpected EOF")
|
||||
assert.Equal(t, *token, AuthConfig{})
|
||||
})
|
||||
}
|
||||
|
||||
func TestDecodeAuthConfigBody(t *testing.T) {
|
||||
token, err := DecodeAuthConfigBody(io.NopCloser(strings.NewReader(unencoded)))
|
||||
assert.NilError(t, err)
|
||||
assert.Equal(t, *token, expected)
|
||||
}
|
||||
|
||||
func TestEncodeAuthConfig(t *testing.T) {
|
||||
token, err := EncodeAuthConfig(expected)
|
||||
assert.NilError(t, err)
|
||||
assert.Equal(t, token, encoded)
|
||||
}
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"encoding/json"
|
||||
"net"
|
||||
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// ServiceConfig stores daemon registry services configuration.
|
||||
@@ -113,8 +113,8 @@ type SearchResults struct {
|
||||
type DistributionInspect struct {
|
||||
// Descriptor contains information about the manifest, including
|
||||
// the content addressable digest
|
||||
Descriptor ocispec.Descriptor
|
||||
Descriptor v1.Descriptor
|
||||
// Platforms contains the list of platforms supported by the image,
|
||||
// obtained by parsing the manifest
|
||||
Platforms []ocispec.Platform
|
||||
Platforms []v1.Platform
|
||||
}
|
||||
|
||||
@@ -95,37 +95,37 @@ func GetTimestamp(value string, reference time.Time) (string, error) {
|
||||
return fmt.Sprintf("%d.%09d", t.Unix(), int64(t.Nanosecond())), nil
|
||||
}
|
||||
|
||||
// ParseTimestamps returns seconds and nanoseconds from a timestamp that has
|
||||
// the format ("%d.%09d", time.Unix(), int64(time.Nanosecond())).
|
||||
// If the incoming nanosecond portion is longer than 9 digits it is truncated.
|
||||
// The expectation is that the seconds and nanoseconds will be used to create a
|
||||
// time variable. For example:
|
||||
// ParseTimestamps returns seconds and nanoseconds from a timestamp that has the
|
||||
// format "%d.%09d", time.Unix(), int64(time.Nanosecond()))
|
||||
// if the incoming nanosecond portion is longer or shorter than 9 digits it is
|
||||
// converted to nanoseconds. The expectation is that the seconds and
|
||||
// seconds will be used to create a time variable. For example:
|
||||
//
|
||||
// seconds, nanoseconds, _ := ParseTimestamp("1136073600.000000001",0)
|
||||
// since := time.Unix(seconds, nanoseconds)
|
||||
// seconds, nanoseconds, err := ParseTimestamp("1136073600.000000001",0)
|
||||
// if err == nil since := time.Unix(seconds, nanoseconds)
|
||||
//
|
||||
// returns seconds as defaultSeconds if value == ""
|
||||
func ParseTimestamps(value string, defaultSeconds int64) (seconds int64, nanoseconds int64, err error) {
|
||||
// returns seconds as def(aultSeconds) if value == ""
|
||||
func ParseTimestamps(value string, def int64) (int64, int64, error) {
|
||||
if value == "" {
|
||||
return defaultSeconds, 0, nil
|
||||
return def, 0, nil
|
||||
}
|
||||
return parseTimestamp(value)
|
||||
}
|
||||
|
||||
func parseTimestamp(value string) (sec int64, nsec int64, err error) {
|
||||
s, n, ok := strings.Cut(value, ".")
|
||||
sec, err = strconv.ParseInt(s, 10, 64)
|
||||
func parseTimestamp(value string) (int64, int64, error) {
|
||||
sa := strings.SplitN(value, ".", 2)
|
||||
s, err := strconv.ParseInt(sa[0], 10, 64)
|
||||
if err != nil {
|
||||
return sec, 0, err
|
||||
return s, 0, err
|
||||
}
|
||||
if !ok {
|
||||
return sec, 0, nil
|
||||
if len(sa) != 2 {
|
||||
return s, 0, nil
|
||||
}
|
||||
nsec, err = strconv.ParseInt(n, 10, 64)
|
||||
n, err := strconv.ParseInt(sa[1], 10, 64)
|
||||
if err != nil {
|
||||
return sec, nsec, err
|
||||
return s, n, err
|
||||
}
|
||||
// should already be in nanoseconds but just in case convert n to nanoseconds
|
||||
nsec = int64(float64(nsec) * math.Pow(float64(10), float64(9-len(n))))
|
||||
return sec, nsec, nil
|
||||
n = int64(float64(n) * math.Pow(float64(10), float64(9-len(sa[1]))))
|
||||
return s, n, nil
|
||||
}
|
||||
|
||||
@@ -74,8 +74,6 @@ func TestParseTimestamps(t *testing.T) {
|
||||
{"1136073600", 0, 1136073600, 0, false},
|
||||
{"1136073600.000000001", 0, 1136073600, 1, false},
|
||||
{"1136073600.0000000010", 0, 1136073600, 1, false},
|
||||
{"1136073600.0000000001", 0, 1136073600, 0, false},
|
||||
{"1136073600.0000000009", 0, 1136073600, 0, false},
|
||||
{"1136073600.00000001", 0, 1136073600, 10, false},
|
||||
{"foo.bar", 0, 0, 0, true},
|
||||
{"1136073600.bar", 0, 1136073600, 0, true},
|
||||
|
||||
@@ -123,8 +123,9 @@ type ImageInspect struct {
|
||||
// store images self-contained, and no longer use a parent-chain, making
|
||||
// this field an equivalent of the Size field.
|
||||
//
|
||||
// Deprecated: Unused in API 1.43 and up, but kept for backward compatibility with older API versions.
|
||||
VirtualSize int64 `json:"VirtualSize,omitempty"`
|
||||
// This field is kept for backward compatibility, but may be removed in
|
||||
// a future version of the API.
|
||||
VirtualSize int64 // TODO(thaJeztah): deprecate this field
|
||||
|
||||
// GraphDriver holds information about the storage driver used to store the
|
||||
// container's and image's filesystem.
|
||||
@@ -296,6 +297,8 @@ type Info struct {
|
||||
Labels []string
|
||||
ExperimentalBuild bool
|
||||
ServerVersion string
|
||||
ClusterStore string `json:",omitempty"` // Deprecated: host-discovery and overlay networks with external k/v stores are deprecated
|
||||
ClusterAdvertise string `json:",omitempty"` // Deprecated: host-discovery and overlay networks with external k/v stores are deprecated
|
||||
Runtimes map[string]Runtime
|
||||
DefaultRuntime string
|
||||
Swarm swarm.Info
|
||||
@@ -347,19 +350,20 @@ func DecodeSecurityOptions(opts []string) ([]SecurityOpt, error) {
|
||||
continue
|
||||
}
|
||||
secopt := SecurityOpt{}
|
||||
for _, s := range strings.Split(opt, ",") {
|
||||
k, v, ok := strings.Cut(s, "=")
|
||||
if !ok {
|
||||
split := strings.Split(opt, ",")
|
||||
for _, s := range split {
|
||||
kv := strings.SplitN(s, "=", 2)
|
||||
if len(kv) != 2 {
|
||||
return nil, fmt.Errorf("invalid security option %q", s)
|
||||
}
|
||||
if k == "" || v == "" {
|
||||
if kv[0] == "" || kv[1] == "" {
|
||||
return nil, errors.New("invalid empty security option")
|
||||
}
|
||||
if k == "name" {
|
||||
secopt.Name = v
|
||||
if kv[0] == "name" {
|
||||
secopt.Name = kv[1]
|
||||
continue
|
||||
}
|
||||
secopt.Options = append(secopt.Options, KeyValue{Key: k, Value: v})
|
||||
secopt.Options = append(secopt.Options, KeyValue{Key: kv[0], Value: kv[1]})
|
||||
}
|
||||
so = append(so, secopt)
|
||||
}
|
||||
@@ -652,18 +656,12 @@ type Checkpoint struct {
|
||||
|
||||
// Runtime describes an OCI runtime
|
||||
type Runtime struct {
|
||||
// "Legacy" runtime configuration for runc-compatible runtimes.
|
||||
|
||||
Path string `json:"path,omitempty"`
|
||||
Path string `json:"path"`
|
||||
Args []string `json:"runtimeArgs,omitempty"`
|
||||
|
||||
// Shimv2 runtime configuration. Mutually exclusive with the legacy config above.
|
||||
|
||||
Type string `json:"runtimeType,omitempty"`
|
||||
Options map[string]interface{} `json:"options,omitempty"`
|
||||
|
||||
// This is exposed here only for internal use
|
||||
ShimConfig *ShimConfig `json:"-"`
|
||||
// It is not currently supported to specify custom shim configs
|
||||
Shim *ShimConfig `json:"-"`
|
||||
}
|
||||
|
||||
// ShimConfig is used by runtime to configure containerd shims
|
||||
|
||||
@@ -16,11 +16,11 @@ func compare(v1, v2 string) int {
|
||||
otherTab = strings.Split(v2, ".")
|
||||
)
|
||||
|
||||
maxVer := len(currTab)
|
||||
if len(otherTab) > maxVer {
|
||||
maxVer = len(otherTab)
|
||||
max := len(currTab)
|
||||
if len(otherTab) > max {
|
||||
max = len(otherTab)
|
||||
}
|
||||
for i := 0; i < maxVer; i++ {
|
||||
for i := 0; i < max; i++ {
|
||||
var currInt, otherInt int
|
||||
|
||||
if len(currTab) > i {
|
||||
|
||||
11
api/types/volume/deprecated.go
Normal file
11
api/types/volume/deprecated.go
Normal file
@@ -0,0 +1,11 @@
|
||||
package volume // import "github.com/docker/docker/api/types/volume"
|
||||
|
||||
// VolumeCreateBody Volume configuration
|
||||
//
|
||||
// Deprecated: use CreateOptions
|
||||
type VolumeCreateBody = CreateOptions
|
||||
|
||||
// VolumeListOKBody Volume list response
|
||||
//
|
||||
// Deprecated: use ListResponse
|
||||
type VolumeListOKBody = ListResponse
|
||||
@@ -1,6 +1,3 @@
|
||||
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
|
||||
//go:build go1.19
|
||||
|
||||
package containerimage
|
||||
|
||||
import (
|
||||
@@ -13,7 +10,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
cerrdefs "github.com/containerd/containerd/errdefs"
|
||||
containerderrors "github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/gc"
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/leases"
|
||||
@@ -715,7 +712,7 @@ func showProgress(ctx context.Context, ongoing *jobs, cs content.Store, pw progr
|
||||
if !j.done {
|
||||
info, err := cs.Info(context.TODO(), j.Digest)
|
||||
if err != nil {
|
||||
if cerrdefs.IsNotFound(err) {
|
||||
if containerderrors.IsNotFound(err) {
|
||||
// _ = pw.Write(j.Digest.String(), progress.Status{
|
||||
// Action: "waiting",
|
||||
// })
|
||||
|
||||
@@ -18,7 +18,7 @@ import (
|
||||
"github.com/moby/buildkit/solver"
|
||||
"github.com/moby/buildkit/worker"
|
||||
"github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@@ -26,9 +26,9 @@ import (
|
||||
func ResolveCacheImporterFunc(sm *session.Manager, resolverFunc docker.RegistryHosts, cs content.Store, rs reference.Store, is imagestore.Store) remotecache.ResolveCacheImporterFunc {
|
||||
upstream := registryremotecache.ResolveCacheImporterFunc(sm, cs, resolverFunc)
|
||||
|
||||
return func(ctx context.Context, group session.Group, attrs map[string]string) (remotecache.Importer, ocispec.Descriptor, error) {
|
||||
return func(ctx context.Context, group session.Group, attrs map[string]string) (remotecache.Importer, specs.Descriptor, error) {
|
||||
if dt, err := tryImportLocal(rs, is, attrs["ref"]); err == nil {
|
||||
return newLocalImporter(dt), ocispec.Descriptor{}, nil
|
||||
return newLocalImporter(dt), specs.Descriptor{}, nil
|
||||
}
|
||||
return upstream(ctx, group, attrs)
|
||||
}
|
||||
@@ -59,7 +59,7 @@ type localImporter struct {
|
||||
dt []byte
|
||||
}
|
||||
|
||||
func (li *localImporter) Resolve(ctx context.Context, _ ocispec.Descriptor, id string, w worker.Worker) (solver.CacheManager, error) {
|
||||
func (li *localImporter) Resolve(ctx context.Context, _ specs.Descriptor, id string, w worker.Worker) (solver.CacheManager, error) {
|
||||
cc := v1.NewCacheChains()
|
||||
if err := li.importInlineCache(ctx, li.dt, cc); err != nil {
|
||||
return nil, err
|
||||
@@ -96,7 +96,7 @@ func (li *localImporter) importInlineCache(ctx context.Context, dt []byte, cc so
|
||||
layers := v1.DescriptorProvider{}
|
||||
for i, diffID := range img.Rootfs.DiffIDs {
|
||||
dgst := digest.Digest(diffID.String())
|
||||
desc := ocispec.Descriptor{
|
||||
desc := specs.Descriptor{
|
||||
Digest: dgst,
|
||||
Size: -1,
|
||||
MediaType: images.MediaTypeDockerSchema2Layer,
|
||||
@@ -157,6 +157,6 @@ func parseCreatedLayerInfo(img image) ([]string, []string, error) {
|
||||
type emptyProvider struct {
|
||||
}
|
||||
|
||||
func (p *emptyProvider) ReaderAt(ctx context.Context, dec ocispec.Descriptor) (content.ReaderAt, error) {
|
||||
func (p *emptyProvider) ReaderAt(ctx context.Context, dec specs.Descriptor) (content.ReaderAt, error) {
|
||||
return nil, errors.Errorf("ReaderAt not implemented for empty provider")
|
||||
}
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/docker/layer"
|
||||
"github.com/docker/docker/pkg/longpath"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/pkg/errors"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
"golang.org/x/sync/errgroup"
|
||||
@@ -55,7 +55,7 @@ func (s *snapshotter) EnsureLayer(ctx context.Context, key string) ([]layer.Diff
|
||||
})
|
||||
}
|
||||
|
||||
tmpDir, err := longpath.MkdirTemp("", "docker-tarsplit")
|
||||
tmpDir, err := ioutils.TempDir("", "docker-tarsplit")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
cerrdefs "github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/leases"
|
||||
"github.com/containerd/containerd/mount"
|
||||
"github.com/containerd/containerd/snapshots"
|
||||
@@ -204,7 +204,7 @@ func (s *snapshotter) getGraphDriverID(key string) (string, bool) {
|
||||
if err := s.db.View(func(tx *bolt.Tx) error {
|
||||
b := tx.Bucket([]byte(key))
|
||||
if b == nil {
|
||||
return errors.Wrapf(cerrdefs.ErrNotFound, "key %s", key)
|
||||
return errors.Wrapf(errdefs.ErrNotFound, "key %s", key)
|
||||
}
|
||||
v := b.Get(keyCommitted)
|
||||
if v != nil {
|
||||
@@ -248,7 +248,7 @@ func (s *snapshotter) Stat(ctx context.Context, key string) (snapshots.Info, err
|
||||
if err := s.db.View(func(tx *bolt.Tx) error {
|
||||
b := tx.Bucket([]byte(id))
|
||||
if b == nil && l == nil {
|
||||
return errors.Wrapf(cerrdefs.ErrNotFound, "snapshot %s", id)
|
||||
return errors.Wrapf(errdefs.ErrNotFound, "snapshot %s", id)
|
||||
}
|
||||
inf.Name = key
|
||||
if b != nil {
|
||||
@@ -291,7 +291,7 @@ func (s *snapshotter) Mounts(ctx context.Context, key string) (snapshot.Mountabl
|
||||
return nil, nil, err
|
||||
}
|
||||
return []mount.Mount{{
|
||||
Source: rootfs,
|
||||
Source: rootfs.Path(),
|
||||
Type: "bind",
|
||||
Options: []string{"rbind"},
|
||||
}}, func() error {
|
||||
@@ -312,7 +312,7 @@ func (s *snapshotter) Mounts(ctx context.Context, key string) (snapshot.Mountabl
|
||||
return nil, nil, err
|
||||
}
|
||||
return []mount.Mount{{
|
||||
Source: rootfs,
|
||||
Source: rootfs.Path(),
|
||||
Type: "bind",
|
||||
Options: []string{"rbind"},
|
||||
}}, func() error {
|
||||
|
||||
@@ -14,15 +14,10 @@ import (
|
||||
"github.com/containerd/containerd/remotes/docker"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/backend"
|
||||
timetypes "github.com/docker/docker/api/types/time"
|
||||
"github.com/docker/docker/builder"
|
||||
"github.com/docker/docker/builder/builder-next/exporter"
|
||||
"github.com/docker/docker/builder/builder-next/exporter/mobyexporter"
|
||||
"github.com/docker/docker/builder/builder-next/exporter/overrides"
|
||||
"github.com/docker/docker/daemon/config"
|
||||
"github.com/docker/docker/daemon/images"
|
||||
"github.com/docker/docker/libnetwork"
|
||||
"github.com/docker/docker/opts"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/streamformatter"
|
||||
"github.com/docker/go-units"
|
||||
@@ -55,12 +50,6 @@ func (e errConflictFilter) Error() string {
|
||||
|
||||
func (errConflictFilter) InvalidParameter() {}
|
||||
|
||||
type errInvalidFilterValue struct {
|
||||
error
|
||||
}
|
||||
|
||||
func (errInvalidFilterValue) InvalidParameter() {}
|
||||
|
||||
var cacheFields = map[string]bool{
|
||||
"id": true,
|
||||
"parent": true,
|
||||
@@ -78,10 +67,8 @@ var cacheFields = map[string]bool{
|
||||
type Opt struct {
|
||||
SessionManager *session.Manager
|
||||
Root string
|
||||
EngineID string
|
||||
Dist images.DistributionServices
|
||||
ImageTagger mobyexporter.ImageTagger
|
||||
NetworkController *libnetwork.Controller
|
||||
NetworkController libnetwork.NetworkController
|
||||
DefaultCgroupParent string
|
||||
RegistryHosts docker.RegistryHosts
|
||||
BuilderConfig config.BuilderConfig
|
||||
@@ -89,37 +76,29 @@ type Opt struct {
|
||||
IdentityMapping idtools.IdentityMapping
|
||||
DNSConfig config.DNSConfig
|
||||
ApparmorProfile string
|
||||
UseSnapshotter bool
|
||||
Snapshotter string
|
||||
ContainerdAddress string
|
||||
ContainerdNamespace string
|
||||
}
|
||||
|
||||
// Builder can build using BuildKit backend
|
||||
type Builder struct {
|
||||
controller *control.Controller
|
||||
dnsconfig config.DNSConfig
|
||||
reqBodyHandler *reqBodyHandler
|
||||
|
||||
mu sync.Mutex
|
||||
jobs map[string]*buildJob
|
||||
useSnapshotter bool
|
||||
mu sync.Mutex
|
||||
jobs map[string]*buildJob
|
||||
}
|
||||
|
||||
// New creates a new builder
|
||||
func New(ctx context.Context, opt Opt) (*Builder, error) {
|
||||
func New(opt Opt) (*Builder, error) {
|
||||
reqHandler := newReqBodyHandler(tracing.DefaultTransport)
|
||||
|
||||
c, err := newController(ctx, reqHandler, opt)
|
||||
c, err := newController(reqHandler, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b := &Builder{
|
||||
controller: c,
|
||||
dnsconfig: opt.DNSConfig,
|
||||
reqBodyHandler: reqHandler,
|
||||
jobs: map[string]*buildJob{},
|
||||
useSnapshotter: opt.UseSnapshotter,
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
@@ -220,11 +199,8 @@ func (b *Builder) Prune(ctx context.Context, opts types.BuildCachePruneOptions)
|
||||
|
||||
// Build executes a build request
|
||||
func (b *Builder) Build(ctx context.Context, opt backend.BuildConfig) (*builder.Result, error) {
|
||||
if len(opt.Options.Outputs) > 1 {
|
||||
return nil, errors.Errorf("multiple outputs not supported")
|
||||
}
|
||||
|
||||
var rc = opt.Source
|
||||
|
||||
if buildID := opt.Options.BuildID; buildID != "" {
|
||||
b.mu.Lock()
|
||||
|
||||
@@ -335,7 +311,7 @@ func (b *Builder) Build(ctx context.Context, opt backend.BuildConfig) (*builder.
|
||||
return nil, errors.Errorf("network mode %q not supported by buildkit", opt.Options.NetworkMode)
|
||||
}
|
||||
|
||||
extraHosts, err := toBuildkitExtraHosts(opt.Options.ExtraHosts, b.dnsconfig.HostGatewayIP)
|
||||
extraHosts, err := toBuildkitExtraHosts(opt.Options.ExtraHosts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -354,8 +330,11 @@ func (b *Builder) Build(ctx context.Context, opt backend.BuildConfig) (*builder.
|
||||
|
||||
exporterName := ""
|
||||
exporterAttrs := map[string]string{}
|
||||
if len(opt.Options.Outputs) == 0 {
|
||||
exporterName = exporter.Moby
|
||||
|
||||
if len(opt.Options.Outputs) > 1 {
|
||||
return nil, errors.Errorf("multiple outputs not supported")
|
||||
} else if len(opt.Options.Outputs) == 0 {
|
||||
exporterName = "moby"
|
||||
} else {
|
||||
// cacheonly is a special type for triggering skipping all exporters
|
||||
if opt.Options.Outputs[0].Type != "cacheonly" {
|
||||
@@ -364,18 +343,14 @@ func (b *Builder) Build(ctx context.Context, opt backend.BuildConfig) (*builder.
|
||||
}
|
||||
}
|
||||
|
||||
if (exporterName == client.ExporterImage || exporterName == exporter.Moby) && len(opt.Options.Tags) > 0 {
|
||||
nameAttr, err := overrides.SanitizeRepoAndTags(opt.Options.Tags)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if exporterName == "moby" {
|
||||
if len(opt.Options.Tags) > 0 {
|
||||
exporterAttrs["name"] = strings.Join(opt.Options.Tags, ",")
|
||||
}
|
||||
if exporterAttrs == nil {
|
||||
exporterAttrs = make(map[string]string)
|
||||
}
|
||||
exporterAttrs["name"] = strings.Join(nameAttr, ",")
|
||||
}
|
||||
|
||||
cache := controlapi.CacheOptions{}
|
||||
|
||||
if inlineCache := opt.Options.BuildArgs["BUILDKIT_INLINE_CACHE"]; inlineCache != nil {
|
||||
if b, err := strconv.ParseBool(*inlineCache); err == nil && b {
|
||||
cache.Exports = append(cache.Exports, &controlapi.CacheOptionsEntry{
|
||||
@@ -407,7 +382,7 @@ func (b *Builder) Build(ctx context.Context, opt backend.BuildConfig) (*builder.
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if exporterName != exporter.Moby && exporterName != client.ExporterImage {
|
||||
if exporterName != "moby" {
|
||||
return nil
|
||||
}
|
||||
id, ok := resp.ExporterResponse["containerimage.digest"]
|
||||
@@ -576,28 +551,18 @@ func (j *buildJob) SetUpload(ctx context.Context, rc io.ReadCloser) error {
|
||||
}
|
||||
|
||||
// toBuildkitExtraHosts converts hosts from docker key:value format to buildkit's csv format
|
||||
func toBuildkitExtraHosts(inp []string, hostGatewayIP net.IP) (string, error) {
|
||||
func toBuildkitExtraHosts(inp []string) (string, error) {
|
||||
if len(inp) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
hosts := make([]string, 0, len(inp))
|
||||
for _, h := range inp {
|
||||
host, ip, ok := strings.Cut(h, ":")
|
||||
if !ok || host == "" || ip == "" {
|
||||
parts := strings.Split(h, ":")
|
||||
|
||||
if len(parts) != 2 || parts[0] == "" || net.ParseIP(parts[1]) == nil {
|
||||
return "", errors.Errorf("invalid host %s", h)
|
||||
}
|
||||
// If the IP Address is a "host-gateway", replace this value with the
|
||||
// IP address stored in the daemon level HostGatewayIP config variable.
|
||||
if ip == opts.HostGatewayName {
|
||||
gateway := hostGatewayIP.String()
|
||||
if gateway == "" {
|
||||
return "", fmt.Errorf("unable to derive the IP value for host-gateway")
|
||||
}
|
||||
ip = gateway
|
||||
} else if net.ParseIP(ip) == nil {
|
||||
return "", fmt.Errorf("invalid host %s", h)
|
||||
}
|
||||
hosts = append(hosts, host+"="+ip)
|
||||
hosts = append(hosts, parts[0]+"="+parts[1])
|
||||
}
|
||||
return strings.Join(hosts, ","), nil
|
||||
}
|
||||
@@ -632,20 +597,11 @@ func toBuildkitPruneInfo(opts types.BuildCachePruneOptions) (client.PruneInfo, e
|
||||
case 0:
|
||||
// nothing to do
|
||||
case 1:
|
||||
ts, err := timetypes.GetTimestamp(untilValues[0], time.Now())
|
||||
var err error
|
||||
until, err = time.ParseDuration(untilValues[0])
|
||||
if err != nil {
|
||||
return client.PruneInfo{}, errInvalidFilterValue{
|
||||
errors.Wrapf(err, "%q filter expects a duration (e.g., '24h') or a timestamp", filterKey),
|
||||
}
|
||||
return client.PruneInfo{}, errors.Wrapf(err, "%q filter expects a duration (e.g., '24h')", filterKey)
|
||||
}
|
||||
seconds, nanoseconds, err := timetypes.ParseTimestamps(ts, 0)
|
||||
if err != nil {
|
||||
return client.PruneInfo{}, errInvalidFilterValue{
|
||||
errors.Wrapf(err, "failed to parse timestamp %q", ts),
|
||||
}
|
||||
}
|
||||
|
||||
until = time.Since(time.Unix(seconds, nanoseconds))
|
||||
default:
|
||||
return client.PruneInfo{}, errMultipleFilterValues{}
|
||||
}
|
||||
|
||||
@@ -5,9 +5,7 @@ import (
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
ctd "github.com/containerd/containerd"
|
||||
"github.com/containerd/containerd/content/local"
|
||||
ctdmetadata "github.com/containerd/containerd/metadata"
|
||||
"github.com/containerd/containerd/snapshots"
|
||||
@@ -16,22 +14,18 @@ import (
|
||||
"github.com/docker/docker/builder/builder-next/adapters/containerimage"
|
||||
"github.com/docker/docker/builder/builder-next/adapters/localinlinecache"
|
||||
"github.com/docker/docker/builder/builder-next/adapters/snapshot"
|
||||
"github.com/docker/docker/builder/builder-next/exporter/mobyexporter"
|
||||
containerimageexp "github.com/docker/docker/builder/builder-next/exporter"
|
||||
"github.com/docker/docker/builder/builder-next/imagerefchecker"
|
||||
mobyworker "github.com/docker/docker/builder/builder-next/worker"
|
||||
wlabel "github.com/docker/docker/builder/builder-next/worker/label"
|
||||
"github.com/docker/docker/daemon/config"
|
||||
"github.com/docker/docker/daemon/graphdriver"
|
||||
units "github.com/docker/go-units"
|
||||
"github.com/moby/buildkit/cache"
|
||||
"github.com/moby/buildkit/cache/metadata"
|
||||
"github.com/moby/buildkit/cache/remotecache"
|
||||
"github.com/moby/buildkit/cache/remotecache/gha"
|
||||
inlineremotecache "github.com/moby/buildkit/cache/remotecache/inline"
|
||||
localremotecache "github.com/moby/buildkit/cache/remotecache/local"
|
||||
registryremotecache "github.com/moby/buildkit/cache/remotecache/registry"
|
||||
"github.com/moby/buildkit/client"
|
||||
bkconfig "github.com/moby/buildkit/cmd/buildkitd/config"
|
||||
"github.com/moby/buildkit/control"
|
||||
"github.com/moby/buildkit/frontend"
|
||||
dockerfile "github.com/moby/buildkit/frontend/dockerfile/builder"
|
||||
@@ -42,126 +36,12 @@ import (
|
||||
"github.com/moby/buildkit/util/archutil"
|
||||
"github.com/moby/buildkit/util/entitlements"
|
||||
"github.com/moby/buildkit/util/leaseutil"
|
||||
"github.com/moby/buildkit/util/network/netproviders"
|
||||
"github.com/moby/buildkit/worker"
|
||||
"github.com/moby/buildkit/worker/containerd"
|
||||
"github.com/moby/buildkit/worker/label"
|
||||
"github.com/pkg/errors"
|
||||
"go.etcd.io/bbolt"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
|
||||
"github.com/moby/buildkit/solver/pb"
|
||||
"github.com/moby/buildkit/util/apicaps"
|
||||
)
|
||||
|
||||
func newController(ctx context.Context, rt http.RoundTripper, opt Opt) (*control.Controller, error) {
|
||||
if opt.UseSnapshotter {
|
||||
return newSnapshotterController(ctx, rt, opt)
|
||||
}
|
||||
return newGraphDriverController(ctx, rt, opt)
|
||||
}
|
||||
|
||||
func newSnapshotterController(ctx context.Context, rt http.RoundTripper, opt Opt) (*control.Controller, error) {
|
||||
if err := os.MkdirAll(opt.Root, 0o711); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
historyDB, historyConf, err := openHistoryDB(opt.Root, opt.BuilderConfig.History)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cacheStorage, err := bboltcachestorage.NewStore(filepath.Join(opt.Root, "cache.db"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
nc := netproviders.Opt{
|
||||
Mode: "host",
|
||||
}
|
||||
dns := getDNSConfig(opt.DNSConfig)
|
||||
|
||||
wo, err := containerd.NewWorkerOpt(opt.Root, opt.ContainerdAddress, opt.Snapshotter, opt.ContainerdNamespace,
|
||||
opt.Rootless, map[string]string{
|
||||
label.Snapshotter: opt.Snapshotter,
|
||||
}, dns, nc, opt.ApparmorProfile, false, nil, "", ctd.WithTimeout(60*time.Second))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
policy, err := getGCPolicy(opt.BuilderConfig, opt.Root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
wo.GCPolicy = policy
|
||||
wo.RegistryHosts = opt.RegistryHosts
|
||||
wo.Labels = getLabels(opt, wo.Labels)
|
||||
|
||||
exec, err := newExecutor(opt.Root, opt.DefaultCgroupParent, opt.NetworkController, dns, opt.Rootless, opt.IdentityMapping, opt.ApparmorProfile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
wo.Executor = exec
|
||||
|
||||
w, err := mobyworker.NewContainerdWorker(ctx, wo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
wc := &worker.Controller{}
|
||||
|
||||
err = wc.Add(w)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
frontends := map[string]frontend.Frontend{
|
||||
"dockerfile.v0": forwarder.NewGatewayForwarder(wc, dockerfile.Build),
|
||||
"gateway.v0": gateway.NewGatewayFrontend(wc),
|
||||
}
|
||||
|
||||
return control.NewController(control.Opt{
|
||||
SessionManager: opt.SessionManager,
|
||||
WorkerController: wc,
|
||||
Frontends: frontends,
|
||||
CacheKeyStorage: cacheStorage,
|
||||
ResolveCacheImporterFuncs: map[string]remotecache.ResolveCacheImporterFunc{
|
||||
"gha": gha.ResolveCacheImporterFunc(),
|
||||
"local": localremotecache.ResolveCacheImporterFunc(opt.SessionManager),
|
||||
"registry": registryremotecache.ResolveCacheImporterFunc(opt.SessionManager, wo.ContentStore, opt.RegistryHosts),
|
||||
},
|
||||
ResolveCacheExporterFuncs: map[string]remotecache.ResolveCacheExporterFunc{
|
||||
"gha": gha.ResolveCacheExporterFunc(),
|
||||
"inline": inlineremotecache.ResolveCacheExporterFunc(),
|
||||
"local": localremotecache.ResolveCacheExporterFunc(opt.SessionManager),
|
||||
"registry": registryremotecache.ResolveCacheExporterFunc(opt.SessionManager, opt.RegistryHosts),
|
||||
},
|
||||
Entitlements: getEntitlements(opt.BuilderConfig),
|
||||
HistoryDB: historyDB,
|
||||
HistoryConfig: historyConf,
|
||||
LeaseManager: wo.LeaseManager,
|
||||
ContentStore: wo.ContentStore,
|
||||
})
|
||||
}
|
||||
|
||||
func openHistoryDB(root string, cfg *config.BuilderHistoryConfig) (*bolt.DB, *bkconfig.HistoryConfig, error) {
|
||||
db, err := bbolt.Open(filepath.Join(root, "history.db"), 0o600, nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
var conf *bkconfig.HistoryConfig
|
||||
if cfg != nil {
|
||||
conf = &bkconfig.HistoryConfig{
|
||||
MaxAge: cfg.MaxAge,
|
||||
MaxEntries: cfg.MaxEntries,
|
||||
}
|
||||
}
|
||||
|
||||
return db, conf, nil
|
||||
}
|
||||
|
||||
func newGraphDriverController(ctx context.Context, rt http.RoundTripper, opt Opt) (*control.Controller, error) {
|
||||
func newController(rt http.RoundTripper, opt Opt) (*control.Controller, error) {
|
||||
if err := os.MkdirAll(opt.Root, 0711); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -169,18 +49,6 @@ func newGraphDriverController(ctx context.Context, rt http.RoundTripper, opt Opt
|
||||
dist := opt.Dist
|
||||
root := opt.Root
|
||||
|
||||
pb.Caps.Init(apicaps.Cap{
|
||||
ID: pb.CapMergeOp,
|
||||
Enabled: false,
|
||||
DisabledReasonMsg: "only enabled with containerd image store backend",
|
||||
})
|
||||
|
||||
pb.Caps.Init(apicaps.Cap{
|
||||
ID: pb.CapDiffOp,
|
||||
Enabled: false,
|
||||
DisabledReasonMsg: "only enabled with containerd image store backend",
|
||||
})
|
||||
|
||||
var driver graphdriver.Driver
|
||||
if ls, ok := dist.LayerStore.(interface {
|
||||
Driver() graphdriver.Driver
|
||||
@@ -270,15 +138,15 @@ func newGraphDriverController(ctx context.Context, rt http.RoundTripper, opt Opt
|
||||
return nil, err
|
||||
}
|
||||
|
||||
differ, ok := snapshotter.(mobyexporter.Differ)
|
||||
differ, ok := snapshotter.(containerimageexp.Differ)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("snapshotter doesn't support differ")
|
||||
}
|
||||
|
||||
exp, err := mobyexporter.New(mobyexporter.Opt{
|
||||
ImageStore: dist.ImageStore,
|
||||
Differ: differ,
|
||||
ImageTagger: opt.ImageTagger,
|
||||
exp, err := containerimageexp.New(containerimageexp.Opt{
|
||||
ImageStore: dist.ImageStore,
|
||||
ReferenceStore: dist.ReferenceStore,
|
||||
Differ: differ,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -289,11 +157,6 @@ func newGraphDriverController(ctx context.Context, rt http.RoundTripper, opt Opt
|
||||
return nil, err
|
||||
}
|
||||
|
||||
historyDB, historyConf, err := openHistoryDB(opt.Root, opt.BuilderConfig.History)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
gcPolicy, err := getGCPolicy(opt.BuilderConfig, root)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get builder GC policy")
|
||||
@@ -304,16 +167,16 @@ func newGraphDriverController(ctx context.Context, rt http.RoundTripper, opt Opt
|
||||
return nil, errors.Errorf("snapshotter doesn't support differ")
|
||||
}
|
||||
|
||||
leases, err := lm.List(ctx, `labels."buildkit/lease.temporary"`)
|
||||
leases, err := lm.List(context.TODO(), "labels.\"buildkit/lease.temporary\"")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, l := range leases {
|
||||
lm.Delete(ctx, l)
|
||||
lm.Delete(context.TODO(), l)
|
||||
}
|
||||
|
||||
wopt := mobyworker.Opt{
|
||||
ID: opt.EngineID,
|
||||
ID: "moby",
|
||||
ContentStore: store,
|
||||
CacheManager: cm,
|
||||
GCPolicy: gcPolicy,
|
||||
@@ -326,8 +189,6 @@ func newGraphDriverController(ctx context.Context, rt http.RoundTripper, opt Opt
|
||||
Transport: rt,
|
||||
Layers: layers,
|
||||
Platforms: archutil.SupportedPlatforms(true),
|
||||
LeaseManager: lm,
|
||||
Labels: getLabels(opt, nil),
|
||||
}
|
||||
|
||||
wc := &worker.Controller{}
|
||||
@@ -354,11 +215,7 @@ func newGraphDriverController(ctx context.Context, rt http.RoundTripper, opt Opt
|
||||
ResolveCacheExporterFuncs: map[string]remotecache.ResolveCacheExporterFunc{
|
||||
"inline": inlineremotecache.ResolveCacheExporterFunc(),
|
||||
},
|
||||
Entitlements: getEntitlements(opt.BuilderConfig),
|
||||
LeaseManager: lm,
|
||||
ContentStore: store,
|
||||
HistoryDB: historyDB,
|
||||
HistoryConfig: historyConf,
|
||||
Entitlements: getEntitlements(opt.BuilderConfig),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -414,11 +271,3 @@ func getEntitlements(conf config.BuilderConfig) []string {
|
||||
}
|
||||
return ents
|
||||
}
|
||||
|
||||
func getLabels(opt Opt, labels map[string]string) map[string]string {
|
||||
if labels == nil {
|
||||
labels = make(map[string]string)
|
||||
}
|
||||
labels[wlabel.HostGatewayIP] = opt.DNSConfig.HostGatewayIP.String()
|
||||
return labels
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@
|
||||
package buildkit
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
@@ -26,10 +25,10 @@ import (
|
||||
|
||||
const networkName = "bridge"
|
||||
|
||||
func newExecutor(root, cgroupParent string, net *libnetwork.Controller, dnsConfig *oci.DNSConfig, rootless bool, idmap idtools.IdentityMapping, apparmorProfile string) (executor.Executor, error) {
|
||||
func newExecutor(root, cgroupParent string, net libnetwork.NetworkController, dnsConfig *oci.DNSConfig, rootless bool, idmap idtools.IdentityMapping, apparmorProfile string) (executor.Executor, error) {
|
||||
netRoot := filepath.Join(root, "net")
|
||||
networkProviders := map[pb.NetMode]network.Provider{
|
||||
pb.NetMode_UNSET: &bridgeProvider{Controller: net, Root: netRoot},
|
||||
pb.NetMode_UNSET: &bridgeProvider{NetworkController: net, Root: netRoot},
|
||||
pb.NetMode_HOST: network.NewHostProvider(),
|
||||
pb.NetMode_NONE: network.NewNoneProvider(),
|
||||
}
|
||||
@@ -65,11 +64,11 @@ func newExecutor(root, cgroupParent string, net *libnetwork.Controller, dnsConfi
|
||||
}
|
||||
|
||||
type bridgeProvider struct {
|
||||
*libnetwork.Controller
|
||||
libnetwork.NetworkController
|
||||
Root string
|
||||
}
|
||||
|
||||
func (p *bridgeProvider) New(ctx context.Context, hostname string) (network.Namespace, error) {
|
||||
func (p *bridgeProvider) New() (network.Namespace, error) {
|
||||
n, err := p.NetworkByName(networkName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -77,26 +76,22 @@ func (p *bridgeProvider) New(ctx context.Context, hostname string) (network.Name
|
||||
|
||||
iface := &lnInterface{ready: make(chan struct{}), provider: p}
|
||||
iface.Once.Do(func() {
|
||||
go iface.init(p.Controller, n)
|
||||
go iface.init(p.NetworkController, n)
|
||||
})
|
||||
|
||||
return iface, nil
|
||||
}
|
||||
|
||||
func (p *bridgeProvider) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type lnInterface struct {
|
||||
ep *libnetwork.Endpoint
|
||||
sbx *libnetwork.Sandbox
|
||||
ep libnetwork.Endpoint
|
||||
sbx libnetwork.Sandbox
|
||||
sync.Once
|
||||
err error
|
||||
ready chan struct{}
|
||||
provider *bridgeProvider
|
||||
}
|
||||
|
||||
func (iface *lnInterface) init(c *libnetwork.Controller, n libnetwork.Network) {
|
||||
func (iface *lnInterface) init(c libnetwork.NetworkController, n libnetwork.Network) {
|
||||
defer close(iface.ready)
|
||||
id := identity.NewID()
|
||||
|
||||
@@ -128,12 +123,12 @@ func (iface *lnInterface) Set(s *specs.Spec) error {
|
||||
logrus.WithError(iface.err).Error("failed to set networking spec")
|
||||
return iface.err
|
||||
}
|
||||
shortNetCtlrID := stringid.TruncateID(iface.provider.Controller.ID())
|
||||
shortNetCtlrID := stringid.TruncateID(iface.provider.NetworkController.ID())
|
||||
// attach netns to bridge within the container namespace, using reexec in a prestart hook
|
||||
s.Hooks = &specs.Hooks{
|
||||
Prestart: []specs.Hook{{
|
||||
Path: filepath.Join("/proc", strconv.Itoa(os.Getpid()), "exe"),
|
||||
Args: []string{"libnetwork-setkey", "-exec-root=" + iface.provider.Config().ExecRoot, iface.sbx.ContainerID(), shortNetCtlrID},
|
||||
Args: []string{"libnetwork-setkey", "-exec-root=" + iface.provider.Config().Daemon.ExecRoot, iface.sbx.ContainerID(), shortNetCtlrID},
|
||||
}},
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
"github.com/moby/buildkit/executor/oci"
|
||||
)
|
||||
|
||||
func newExecutor(_, _ string, _ *libnetwork.Controller, _ *oci.DNSConfig, _ bool, _ idtools.IdentityMapping, _ string) (executor.Executor, error) {
|
||||
func newExecutor(_, _ string, _ libnetwork.NetworkController, _ *oci.DNSConfig, _ bool, _ idtools.IdentityMapping, _ string) (executor.Executor, error) {
|
||||
return &winExecutor{}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -1,22 +1,27 @@
|
||||
package mobyexporter
|
||||
package containerimage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
distref "github.com/docker/distribution/reference"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/layer"
|
||||
"github.com/docker/docker/reference"
|
||||
"github.com/moby/buildkit/exporter"
|
||||
"github.com/moby/buildkit/exporter/containerimage/exptypes"
|
||||
"github.com/moby/buildkit/util/compression"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
keyImageName = "name"
|
||||
keyImageName = "name"
|
||||
keyBuildInfo = "buildinfo"
|
||||
keyBuildInfoAttrs = "buildinfo-attrs"
|
||||
)
|
||||
|
||||
// Differ can make a moby layer from a snapshot
|
||||
@@ -24,15 +29,11 @@ type Differ interface {
|
||||
EnsureLayer(ctx context.Context, key string) ([]layer.DiffID, error)
|
||||
}
|
||||
|
||||
type ImageTagger interface {
|
||||
TagImage(ctx context.Context, imageID image.ID, newTag distref.Named) error
|
||||
}
|
||||
|
||||
// Opt defines a struct for creating new exporter
|
||||
type Opt struct {
|
||||
ImageStore image.Store
|
||||
Differ Differ
|
||||
ImageTagger ImageTagger
|
||||
ImageStore image.Store
|
||||
ReferenceStore reference.Store
|
||||
Differ Differ
|
||||
}
|
||||
|
||||
type imageExporter struct {
|
||||
@@ -48,6 +49,7 @@ func New(opt Opt) (exporter.Exporter, error) {
|
||||
func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exporter.ExporterInstance, error) {
|
||||
i := &imageExporterInstance{
|
||||
imageExporter: e,
|
||||
buildInfo: true,
|
||||
}
|
||||
for k, v := range opt {
|
||||
switch k {
|
||||
@@ -59,6 +61,26 @@ func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exp
|
||||
}
|
||||
i.targetNames = append(i.targetNames, ref)
|
||||
}
|
||||
case keyBuildInfo:
|
||||
if v == "" {
|
||||
i.buildInfo = true
|
||||
continue
|
||||
}
|
||||
b, err := strconv.ParseBool(v)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "non-bool value specified for %s", k)
|
||||
}
|
||||
i.buildInfo = b
|
||||
case keyBuildInfoAttrs:
|
||||
if v == "" {
|
||||
i.buildInfoAttrs = false
|
||||
continue
|
||||
}
|
||||
b, err := strconv.ParseBool(v)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "non-bool value specified for %s", k)
|
||||
}
|
||||
i.buildInfoAttrs = b
|
||||
default:
|
||||
if i.meta == nil {
|
||||
i.meta = make(map[string][]byte)
|
||||
@@ -71,26 +93,32 @@ func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exp
|
||||
|
||||
type imageExporterInstance struct {
|
||||
*imageExporter
|
||||
targetNames []distref.Named
|
||||
meta map[string][]byte
|
||||
targetNames []distref.Named
|
||||
meta map[string][]byte
|
||||
buildInfo bool
|
||||
buildInfoAttrs bool
|
||||
}
|
||||
|
||||
func (e *imageExporterInstance) Name() string {
|
||||
return "exporting to image"
|
||||
}
|
||||
|
||||
func (e *imageExporterInstance) Config() *exporter.Config {
|
||||
return exporter.NewConfig()
|
||||
func (e *imageExporterInstance) Config() exporter.Config {
|
||||
return exporter.Config{
|
||||
Compression: compression.Config{
|
||||
Type: compression.Default,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (e *imageExporterInstance) Export(ctx context.Context, inp *exporter.Source, sessionID string) (map[string]string, exporter.DescriptorReference, error) {
|
||||
func (e *imageExporterInstance) Export(ctx context.Context, inp exporter.Source, sessionID string) (map[string]string, error) {
|
||||
if len(inp.Refs) > 1 {
|
||||
return nil, nil, fmt.Errorf("exporting multiple references to image store is currently unsupported")
|
||||
return nil, fmt.Errorf("exporting multiple references to image store is currently unsupported")
|
||||
}
|
||||
|
||||
ref := inp.Ref
|
||||
if ref != nil && len(inp.Refs) == 1 {
|
||||
return nil, nil, fmt.Errorf("invalid exporter input: Ref and Refs are mutually exclusive")
|
||||
return nil, fmt.Errorf("invalid exporter input: Ref and Refs are mutually exclusive")
|
||||
}
|
||||
|
||||
// only one loop
|
||||
@@ -99,22 +127,29 @@ func (e *imageExporterInstance) Export(ctx context.Context, inp *exporter.Source
|
||||
}
|
||||
|
||||
var config []byte
|
||||
var buildInfo []byte
|
||||
switch len(inp.Refs) {
|
||||
case 0:
|
||||
config = inp.Metadata[exptypes.ExporterImageConfigKey]
|
||||
if v, ok := inp.Metadata[exptypes.ExporterBuildInfo]; ok {
|
||||
buildInfo = v
|
||||
}
|
||||
case 1:
|
||||
platformsBytes, ok := inp.Metadata[exptypes.ExporterPlatformsKey]
|
||||
if !ok {
|
||||
return nil, nil, fmt.Errorf("cannot export image, missing platforms mapping")
|
||||
return nil, fmt.Errorf("cannot export image, missing platforms mapping")
|
||||
}
|
||||
var p exptypes.Platforms
|
||||
if err := json.Unmarshal(platformsBytes, &p); err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "failed to parse platforms passed to exporter")
|
||||
return nil, errors.Wrapf(err, "failed to parse platforms passed to exporter")
|
||||
}
|
||||
if len(p.Platforms) != len(inp.Refs) {
|
||||
return nil, nil, errors.Errorf("number of platforms does not match references %d %d", len(p.Platforms), len(inp.Refs))
|
||||
return nil, errors.Errorf("number of platforms does not match references %d %d", len(p.Platforms), len(inp.Refs))
|
||||
}
|
||||
config = inp.Metadata[fmt.Sprintf("%s/%s", exptypes.ExporterImageConfigKey, p.Platforms[0].ID)]
|
||||
if v, ok := inp.Metadata[fmt.Sprintf("%s/%s", exptypes.ExporterBuildInfo, p.Platforms[0].ID)]; ok {
|
||||
buildInfo = v
|
||||
}
|
||||
}
|
||||
|
||||
var diffs []digest.Digest
|
||||
@@ -122,16 +157,16 @@ func (e *imageExporterInstance) Export(ctx context.Context, inp *exporter.Source
|
||||
layersDone := oneOffProgress(ctx, "exporting layers")
|
||||
|
||||
if err := ref.Finalize(ctx); err != nil {
|
||||
return nil, nil, layersDone(err)
|
||||
return nil, layersDone(err)
|
||||
}
|
||||
|
||||
if err := ref.Extract(ctx, nil); err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
diffIDs, err := e.opt.Differ.EnsureLayer(ctx, ref.ID())
|
||||
if err != nil {
|
||||
return nil, nil, layersDone(err)
|
||||
return nil, layersDone(err)
|
||||
}
|
||||
|
||||
diffs = make([]digest.Digest, len(diffIDs))
|
||||
@@ -146,20 +181,20 @@ func (e *imageExporterInstance) Export(ctx context.Context, inp *exporter.Source
|
||||
var err error
|
||||
config, err = emptyImageConfig()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
history, err := parseHistoryFromConfig(config)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
diffs, history = normalizeLayersAndHistory(diffs, history, ref)
|
||||
|
||||
config, err = patchImageConfig(config, diffs, history, inp.Metadata[exptypes.ExporterInlineCache])
|
||||
config, err = patchImageConfig(config, diffs, history, inp.Metadata[exptypes.ExporterInlineCache], buildInfo)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
configDigest := digest.FromBytes(config)
|
||||
@@ -167,15 +202,15 @@ func (e *imageExporterInstance) Export(ctx context.Context, inp *exporter.Source
|
||||
configDone := oneOffProgress(ctx, fmt.Sprintf("writing image %s", configDigest))
|
||||
id, err := e.opt.ImageStore.Create(config)
|
||||
if err != nil {
|
||||
return nil, nil, configDone(err)
|
||||
return nil, configDone(err)
|
||||
}
|
||||
_ = configDone(nil)
|
||||
|
||||
if e.opt.ImageTagger != nil {
|
||||
if e.opt.ReferenceStore != nil {
|
||||
for _, targetName := range e.targetNames {
|
||||
tagDone := oneOffProgress(ctx, "naming to "+targetName.String())
|
||||
if err := e.opt.ImageTagger.TagImage(ctx, image.ID(digest.Digest(id)), targetName); err != nil {
|
||||
return nil, nil, tagDone(err)
|
||||
if err := e.opt.ReferenceStore.AddTag(targetName, digest.Digest(id), true); err != nil {
|
||||
return nil, tagDone(err)
|
||||
}
|
||||
_ = tagDone(nil)
|
||||
}
|
||||
@@ -184,5 +219,5 @@ func (e *imageExporterInstance) Export(ctx context.Context, inp *exporter.Source
|
||||
return map[string]string{
|
||||
exptypes.ExporterImageConfigDigestKey: configDigest.String(),
|
||||
exptypes.ExporterImageDigestKey: id.String(),
|
||||
}, nil, nil
|
||||
}, nil
|
||||
}
|
||||
@@ -1,3 +0,0 @@
|
||||
package exporter
|
||||
|
||||
const Moby = "moby"
|
||||
@@ -1,34 +0,0 @@
|
||||
package overrides
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/docker/distribution/reference"
|
||||
)
|
||||
|
||||
// SanitizeRepoAndTags parses the raw names to a slice of repoAndTag.
|
||||
// It removes duplicates and validates each repoName and tag to not contain a digest.
|
||||
func SanitizeRepoAndTags(names []string) (repoAndTags []string, err error) {
|
||||
uniqNames := map[string]struct{}{}
|
||||
for _, repo := range names {
|
||||
if repo == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
ref, err := reference.ParseNormalizedNamed(repo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if _, ok := ref.(reference.Digested); ok {
|
||||
return nil, errors.New("build tag cannot contain a digest")
|
||||
}
|
||||
|
||||
nameWithTag := reference.TagNameOnly(ref).String()
|
||||
if _, exists := uniqNames[nameWithTag]; !exists {
|
||||
uniqNames[nameWithTag] = struct{}{}
|
||||
repoAndTags = append(repoAndTags, nameWithTag)
|
||||
}
|
||||
}
|
||||
return repoAndTags, nil
|
||||
}
|
||||
@@ -1,44 +0,0 @@
|
||||
package overrides
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
"github.com/moby/buildkit/exporter"
|
||||
)
|
||||
|
||||
// TODO(vvoland): Use buildkit consts once they're public
|
||||
// https://github.com/moby/buildkit/pull/3694
|
||||
const (
|
||||
keyImageName = "name"
|
||||
keyUnpack = "unpack"
|
||||
keyDanglingPrefix = "dangling-name-prefix"
|
||||
)
|
||||
|
||||
// Wraps the containerimage exporter's Resolve method to apply moby-specific
|
||||
// overrides to the exporter attributes.
|
||||
type imageExporterMobyWrapper struct {
|
||||
exp exporter.Exporter
|
||||
}
|
||||
|
||||
func NewExporterWrapper(exp exporter.Exporter) (exporter.Exporter, error) {
|
||||
return &imageExporterMobyWrapper{exp: exp}, nil
|
||||
}
|
||||
|
||||
// Resolve applies moby specific attributes to the request.
|
||||
func (e *imageExporterMobyWrapper) Resolve(ctx context.Context, exporterAttrs map[string]string) (exporter.ExporterInstance, error) {
|
||||
if exporterAttrs == nil {
|
||||
exporterAttrs = make(map[string]string)
|
||||
}
|
||||
reposAndTags, err := SanitizeRepoAndTags(strings.Split(exporterAttrs[keyImageName], ","))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
exporterAttrs[keyImageName] = strings.Join(reposAndTags, ",")
|
||||
exporterAttrs[keyUnpack] = "true"
|
||||
if _, has := exporterAttrs[keyDanglingPrefix]; !has {
|
||||
exporterAttrs[keyDanglingPrefix] = "moby-dangling"
|
||||
}
|
||||
|
||||
return e.exp.Resolve(ctx, exporterAttrs)
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package mobyexporter
|
||||
package containerimage
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/moby/buildkit/cache"
|
||||
binfotypes "github.com/moby/buildkit/util/buildinfo/types"
|
||||
"github.com/moby/buildkit/util/progress"
|
||||
"github.com/moby/buildkit/util/system"
|
||||
"github.com/opencontainers/go-digest"
|
||||
@@ -15,6 +16,10 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// const (
|
||||
// emptyGZLayer = digest.Digest("sha256:4f4fb700ef54461cfa02571ae0db9a0dc1e0cdb5577484a6d75e68dc38e8acc1")
|
||||
// )
|
||||
|
||||
func emptyImageConfig() ([]byte, error) {
|
||||
pl := platforms.Normalize(platforms.DefaultSpec())
|
||||
img := ocispec.Image{}
|
||||
@@ -38,7 +43,7 @@ func parseHistoryFromConfig(dt []byte) ([]ocispec.History, error) {
|
||||
return config.History, nil
|
||||
}
|
||||
|
||||
func patchImageConfig(dt []byte, dps []digest.Digest, history []ocispec.History, cache []byte) ([]byte, error) {
|
||||
func patchImageConfig(dt []byte, dps []digest.Digest, history []ocispec.History, cache []byte, buildInfo []byte) ([]byte, error) {
|
||||
m := map[string]json.RawMessage{}
|
||||
if err := json.Unmarshal(dt, &m); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to parse image config for patch")
|
||||
@@ -82,6 +87,16 @@ func patchImageConfig(dt []byte, dps []digest.Digest, history []ocispec.History,
|
||||
m["moby.buildkit.cache.v0"] = dt
|
||||
}
|
||||
|
||||
if buildInfo != nil {
|
||||
dt, err := json.Marshal(buildInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m[binfotypes.ImageConfigField] = dt
|
||||
} else {
|
||||
delete(m, binfotypes.ImageConfigField)
|
||||
}
|
||||
|
||||
dt, err = json.Marshal(m)
|
||||
return dt, errors.Wrap(err, "failed to marshal config after patch")
|
||||
}
|
||||
@@ -1,40 +0,0 @@
|
||||
package worker
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
mobyexporter "github.com/docker/docker/builder/builder-next/exporter"
|
||||
"github.com/docker/docker/builder/builder-next/exporter/overrides"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/exporter"
|
||||
"github.com/moby/buildkit/session"
|
||||
"github.com/moby/buildkit/worker/base"
|
||||
)
|
||||
|
||||
// ContainerdWorker is a local worker instance with dedicated snapshotter, cache, and so on.
|
||||
type ContainerdWorker struct {
|
||||
*base.Worker
|
||||
}
|
||||
|
||||
// NewContainerdWorker instantiates a local worker.
|
||||
func NewContainerdWorker(ctx context.Context, wo base.WorkerOpt) (*ContainerdWorker, error) {
|
||||
bw, err := base.NewWorker(ctx, wo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &ContainerdWorker{Worker: bw}, nil
|
||||
}
|
||||
|
||||
// Exporter returns exporter by name
|
||||
func (w *ContainerdWorker) Exporter(name string, sm *session.Manager) (exporter.Exporter, error) {
|
||||
switch name {
|
||||
case mobyexporter.Moby:
|
||||
exp, err := w.Worker.Exporter(client.ExporterImage, sm)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return overrides.NewExporterWrapper(exp)
|
||||
default:
|
||||
return w.Worker.Exporter(name, sm)
|
||||
}
|
||||
}
|
||||
@@ -2,7 +2,6 @@ package worker
|
||||
|
||||
import (
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"github.com/moby/buildkit/client"
|
||||
)
|
||||
@@ -31,12 +30,12 @@ func DefaultGCPolicy(p string, defaultKeepBytes int64) []client.PruneInfo {
|
||||
// if build cache uses more than 512MB delete the most easily reproducible data after it has not been used for 2 days
|
||||
{
|
||||
Filter: []string{"type==source.local,type==exec.cachemount,type==source.git.checkout"},
|
||||
KeepDuration: 48 * time.Hour,
|
||||
KeepDuration: 48 * 3600, // 48h
|
||||
KeepBytes: tempCacheKeepBytes,
|
||||
},
|
||||
// remove any data not used for 60 days
|
||||
{
|
||||
KeepDuration: 60 * 24 * time.Hour,
|
||||
KeepDuration: 60 * 24 * 3600, // 60d
|
||||
KeepBytes: keep,
|
||||
},
|
||||
// keep the unshared build cache under cap
|
||||
|
||||
@@ -1,9 +0,0 @@
|
||||
package label
|
||||
|
||||
// Pre-defined label keys similar to BuildKit ones
|
||||
// https://github.com/moby/buildkit/blob/v0.11.6/worker/label/label.go#L3-L16
|
||||
const (
|
||||
prefix = "org.mobyproject.buildkit.worker.moby."
|
||||
|
||||
HostGatewayIP = prefix + "host-gateway-ip"
|
||||
)
|
||||
@@ -9,18 +9,15 @@ import (
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/leases"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/containerd/containerd/rootfs"
|
||||
"github.com/docker/docker/builder/builder-next/adapters/containerimage"
|
||||
mobyexporter "github.com/docker/docker/builder/builder-next/exporter"
|
||||
distmetadata "github.com/docker/docker/distribution/metadata"
|
||||
"github.com/docker/docker/distribution/xfer"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/layer"
|
||||
pkgprogress "github.com/docker/docker/pkg/progress"
|
||||
"github.com/moby/buildkit/cache"
|
||||
cacheconfig "github.com/moby/buildkit/cache/config"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/client/llb"
|
||||
"github.com/moby/buildkit/executor"
|
||||
@@ -39,9 +36,9 @@ import (
|
||||
"github.com/moby/buildkit/source/http"
|
||||
"github.com/moby/buildkit/source/local"
|
||||
"github.com/moby/buildkit/util/archutil"
|
||||
"github.com/moby/buildkit/util/compression"
|
||||
"github.com/moby/buildkit/util/contentutil"
|
||||
"github.com/moby/buildkit/util/progress"
|
||||
"github.com/moby/buildkit/version"
|
||||
"github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
@@ -49,10 +46,6 @@ import (
|
||||
"golang.org/x/sync/semaphore"
|
||||
)
|
||||
|
||||
func init() {
|
||||
version.Version = "v0.11.7+435cb77e369c"
|
||||
}
|
||||
|
||||
const labelCreatedAt = "buildkit/createdat"
|
||||
|
||||
// LayerAccess provides access to a moby layer from a snapshot
|
||||
@@ -70,7 +63,6 @@ type Opt struct {
|
||||
Snapshotter snapshot.Snapshotter
|
||||
ContentStore content.Store
|
||||
CacheManager cache.Manager
|
||||
LeaseManager leases.Manager
|
||||
ImageSource *containerimage.Source
|
||||
DownloadManager *xfer.LayerDownloadManager
|
||||
V2MetadataService distmetadata.V2MetadataService
|
||||
@@ -87,10 +79,6 @@ type Worker struct {
|
||||
SourceManager *source.Manager
|
||||
}
|
||||
|
||||
var _ interface {
|
||||
GetRemotes(context.Context, cache.ImmutableRef, bool, cacheconfig.RefConfig, bool, session.Group) ([]*solver.Remote, error)
|
||||
} = &Worker{}
|
||||
|
||||
// NewWorker instantiates a local worker
|
||||
func NewWorker(opt Opt) (*Worker, error) {
|
||||
sm, err := source.NewManager()
|
||||
@@ -169,42 +157,17 @@ func (w *Worker) GCPolicy() []client.PruneInfo {
|
||||
return w.Opt.GCPolicy
|
||||
}
|
||||
|
||||
// BuildkitVersion returns BuildKit version
|
||||
func (w *Worker) BuildkitVersion() client.BuildkitVersion {
|
||||
return client.BuildkitVersion{
|
||||
Package: version.Package,
|
||||
Version: version.Version + "-moby",
|
||||
Revision: version.Revision,
|
||||
}
|
||||
}
|
||||
|
||||
// Close closes the worker and releases all resources
|
||||
func (w *Worker) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ContentStore returns content store
|
||||
func (w *Worker) ContentStore() content.Store {
|
||||
return w.Opt.ContentStore
|
||||
}
|
||||
|
||||
// LeaseManager returns leases.Manager for the worker
|
||||
func (w *Worker) LeaseManager() leases.Manager {
|
||||
return w.Opt.LeaseManager
|
||||
}
|
||||
|
||||
// LoadRef loads a reference by ID
|
||||
func (w *Worker) LoadRef(ctx context.Context, id string, hidden bool) (cache.ImmutableRef, error) {
|
||||
var opts []cache.RefOption
|
||||
if hidden {
|
||||
opts = append(opts, cache.NoUpdateLastUsed)
|
||||
}
|
||||
if id == "" {
|
||||
// results can have nil refs if they are optimized out to be equal to scratch,
|
||||
// i.e. Diff(A,A) == scratch
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return w.CacheManager().Get(ctx, id, nil, opts...)
|
||||
}
|
||||
|
||||
@@ -249,7 +212,7 @@ func (w *Worker) Prune(ctx context.Context, ch chan client.UsageInfo, info ...cl
|
||||
// Exporter returns exporter by name
|
||||
func (w *Worker) Exporter(name string, sm *session.Manager) (exporter.Exporter, error) {
|
||||
switch name {
|
||||
case mobyexporter.Moby:
|
||||
case "moby":
|
||||
return w.Opt.Exporter, nil
|
||||
case client.ExporterLocal:
|
||||
return localexporter.New(localexporter.Opt{
|
||||
@@ -264,11 +227,8 @@ func (w *Worker) Exporter(name string, sm *session.Manager) (exporter.Exporter,
|
||||
}
|
||||
}
|
||||
|
||||
// GetRemotes returns the remote snapshot references given a local reference
|
||||
func (w *Worker) GetRemotes(ctx context.Context, ref cache.ImmutableRef, createIfNeeded bool, _ cacheconfig.RefConfig, all bool, s session.Group) ([]*solver.Remote, error) {
|
||||
if ref == nil {
|
||||
return nil, nil
|
||||
}
|
||||
// GetRemote returns a remote snapshot reference for a local one
|
||||
func (w *Worker) GetRemote(ctx context.Context, ref cache.ImmutableRef, createIfNeeded bool, _ compression.Type, s session.Group) (*solver.Remote, error) {
|
||||
var diffIDs []layer.DiffID
|
||||
var err error
|
||||
if !createIfNeeded {
|
||||
@@ -298,10 +258,10 @@ func (w *Worker) GetRemotes(ctx context.Context, ref cache.ImmutableRef, createI
|
||||
}
|
||||
}
|
||||
|
||||
return []*solver.Remote{{
|
||||
return &solver.Remote{
|
||||
Descriptors: descriptors,
|
||||
Provider: &emptyProvider{},
|
||||
}}, nil
|
||||
}, nil
|
||||
}
|
||||
|
||||
// PruneCacheMounts removes the current cache snapshots for specified IDs
|
||||
|
||||
@@ -14,8 +14,7 @@ import (
|
||||
containerpkg "github.com/docker/docker/container"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/layer"
|
||||
"github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/docker/docker/pkg/containerfs"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -27,7 +26,7 @@ const (
|
||||
// instructions in the builder.
|
||||
type Source interface {
|
||||
// Root returns root path for accessing source
|
||||
Root() string
|
||||
Root() containerfs.ContainerFS
|
||||
// Close allows to signal that the filesystem tree won't be used anymore.
|
||||
// For Context implementations using a temporary directory, it is recommended to
|
||||
// delete the temporary directory in Close().
|
||||
@@ -43,11 +42,11 @@ type Backend interface {
|
||||
|
||||
// CommitBuildStep creates a new Docker image from the config generated by
|
||||
// a build step.
|
||||
CommitBuildStep(context.Context, backend.CommitConfig) (image.ID, error)
|
||||
CommitBuildStep(backend.CommitConfig) (image.ID, error)
|
||||
// ContainerCreateWorkdir creates the workdir
|
||||
ContainerCreateWorkdir(containerID string) error
|
||||
|
||||
CreateImage(ctx context.Context, config []byte, parent string, contentStoreDigest digest.Digest) (Image, error)
|
||||
CreateImage(config []byte, parent string) (Image, error)
|
||||
|
||||
ImageCacheBuilder
|
||||
}
|
||||
@@ -62,13 +61,13 @@ type ExecBackend interface {
|
||||
// ContainerAttachRaw attaches to container.
|
||||
ContainerAttachRaw(cID string, stdin io.ReadCloser, stdout, stderr io.Writer, stream bool, attached chan struct{}) error
|
||||
// ContainerCreateIgnoreImagesArgsEscaped creates a new Docker container and returns potential warnings
|
||||
ContainerCreateIgnoreImagesArgsEscaped(ctx context.Context, config types.ContainerCreateConfig) (container.CreateResponse, error)
|
||||
ContainerCreateIgnoreImagesArgsEscaped(config types.ContainerCreateConfig) (container.CreateResponse, error)
|
||||
// ContainerRm removes a container specified by `id`.
|
||||
ContainerRm(name string, config *types.ContainerRmConfig) error
|
||||
// ContainerKill stops the container execution abruptly.
|
||||
ContainerKill(containerID string, sig string) error
|
||||
// ContainerStart starts a new container
|
||||
ContainerStart(ctx context.Context, containerID string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error
|
||||
ContainerStart(containerID string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error
|
||||
// ContainerWait stops processing until the given container is stopped.
|
||||
ContainerWait(ctx context.Context, name string, condition containerpkg.WaitCondition) (<-chan containerpkg.StateStatus, error)
|
||||
}
|
||||
@@ -82,7 +81,7 @@ type Result struct {
|
||||
// ImageCacheBuilder represents a generator for stateful image cache.
|
||||
type ImageCacheBuilder interface {
|
||||
// MakeImageCache creates a stateful image cache.
|
||||
MakeImageCache(ctx context.Context, cacheFrom []string) (ImageCache, error)
|
||||
MakeImageCache(cacheFrom []string) ImageCache
|
||||
}
|
||||
|
||||
// ImageCache abstracts an image cache.
|
||||
@@ -90,7 +89,7 @@ type ImageCacheBuilder interface {
|
||||
type ImageCache interface {
|
||||
// GetCache returns a reference to a cached image whose parent equals `parent`
|
||||
// and runconfig equals `cfg`. A cache miss is expected to return an empty ID and a nil error.
|
||||
GetCache(parentID string, cfg *container.Config, platform ocispec.Platform) (imageID string, err error)
|
||||
GetCache(parentID string, cfg *container.Config) (imageID string, err error)
|
||||
}
|
||||
|
||||
// Image represents a Docker image used by the builder.
|
||||
@@ -106,12 +105,11 @@ type ROLayer interface {
|
||||
Release() error
|
||||
NewRWLayer() (RWLayer, error)
|
||||
DiffID() layer.DiffID
|
||||
ContentStoreDigest() digest.Digest
|
||||
}
|
||||
|
||||
// RWLayer is active layer that can be read/modified
|
||||
type RWLayer interface {
|
||||
Release() error
|
||||
Root() string
|
||||
Root() containerfs.ContainerFS
|
||||
Commit() (ROLayer, error)
|
||||
}
|
||||
|
||||
@@ -21,7 +21,7 @@ import (
|
||||
"github.com/moby/buildkit/frontend/dockerfile/instructions"
|
||||
"github.com/moby/buildkit/frontend/dockerfile/parser"
|
||||
"github.com/moby/buildkit/frontend/dockerfile/shell"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sync/syncmap"
|
||||
@@ -95,7 +95,7 @@ func (bm *BuildManager) Build(ctx context.Context, config backend.BuildConfig) (
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b.build(ctx, source, dockerfile)
|
||||
return b.build(source, dockerfile)
|
||||
}
|
||||
|
||||
// builderOptions are the dependencies required by the builder
|
||||
@@ -117,7 +117,8 @@ type Builder struct {
|
||||
Aux *streamformatter.AuxFormatter
|
||||
Output io.Writer
|
||||
|
||||
docker builder.Backend
|
||||
docker builder.Backend
|
||||
clientCtx context.Context
|
||||
|
||||
idMapping idtools.IdentityMapping
|
||||
disableCommit bool
|
||||
@@ -125,22 +126,18 @@ type Builder struct {
|
||||
pathCache pathCache
|
||||
containerManager *containerManager
|
||||
imageProber ImageProber
|
||||
platform *ocispec.Platform
|
||||
platform *specs.Platform
|
||||
}
|
||||
|
||||
// newBuilder creates a new Dockerfile builder from an optional dockerfile and a Options.
|
||||
func newBuilder(ctx context.Context, options builderOptions) (*Builder, error) {
|
||||
func newBuilder(clientCtx context.Context, options builderOptions) (*Builder, error) {
|
||||
config := options.Options
|
||||
if config == nil {
|
||||
config = new(types.ImageBuildOptions)
|
||||
}
|
||||
|
||||
imageProber, err := newImageProber(ctx, options.Backend, config.CacheFrom, config.NoCache)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
b := &Builder{
|
||||
clientCtx: clientCtx,
|
||||
options: config,
|
||||
Stdout: options.ProgressWriter.StdoutFormatter,
|
||||
Stderr: options.ProgressWriter.StderrFormatter,
|
||||
@@ -148,9 +145,9 @@ func newBuilder(ctx context.Context, options builderOptions) (*Builder, error) {
|
||||
Output: options.ProgressWriter.Output,
|
||||
docker: options.Backend,
|
||||
idMapping: options.IDMapping,
|
||||
imageSources: newImageSources(options),
|
||||
imageSources: newImageSources(clientCtx, options),
|
||||
pathCache: options.PathCache,
|
||||
imageProber: imageProber,
|
||||
imageProber: newImageProber(options.Backend, config.CacheFrom, config.NoCache),
|
||||
containerManager: newContainerManager(options.Backend),
|
||||
}
|
||||
|
||||
@@ -184,7 +181,7 @@ func buildLabelOptions(labels map[string]string, stages []instructions.Stage) {
|
||||
|
||||
// Build runs the Dockerfile builder by parsing the Dockerfile and executing
|
||||
// the instructions from the file.
|
||||
func (b *Builder) build(ctx context.Context, source builder.Source, dockerfile *parser.Result) (*builder.Result, error) {
|
||||
func (b *Builder) build(source builder.Source, dockerfile *parser.Result) (*builder.Result, error) {
|
||||
defer b.imageSources.Unmount()
|
||||
|
||||
stages, metaArgs, err := instructions.Parse(dockerfile.AST)
|
||||
@@ -208,7 +205,7 @@ func (b *Builder) build(ctx context.Context, source builder.Source, dockerfile *
|
||||
buildLabelOptions(b.options.Labels, stages)
|
||||
|
||||
dockerfile.PrintWarnings(b.Stderr)
|
||||
dispatchState, err := b.dispatchDockerfileWithCancellation(ctx, stages, metaArgs, dockerfile.EscapeToken, source)
|
||||
dispatchState, err := b.dispatchDockerfileWithCancellation(stages, metaArgs, dockerfile.EscapeToken, source)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -247,7 +244,7 @@ func printCommand(out io.Writer, currentCommandIndex int, totalCommands int, cmd
|
||||
return currentCommandIndex + 1
|
||||
}
|
||||
|
||||
func (b *Builder) dispatchDockerfileWithCancellation(ctx context.Context, parseResult []instructions.Stage, metaArgs []instructions.ArgCommand, escapeToken rune, source builder.Source) (*dispatchState, error) {
|
||||
func (b *Builder) dispatchDockerfileWithCancellation(parseResult []instructions.Stage, metaArgs []instructions.ArgCommand, escapeToken rune, source builder.Source) (*dispatchState, error) {
|
||||
dispatchRequest := dispatchRequest{}
|
||||
buildArgs := NewBuildArgs(b.options.BuildArgs)
|
||||
totalCommands := len(metaArgs) + len(parseResult)
|
||||
@@ -275,14 +272,14 @@ func (b *Builder) dispatchDockerfileWithCancellation(ctx context.Context, parseR
|
||||
dispatchRequest = newDispatchRequest(b, escapeToken, source, buildArgs, stagesResults)
|
||||
|
||||
currentCommandIndex = printCommand(b.Stdout, currentCommandIndex, totalCommands, stage.SourceCode)
|
||||
if err := initializeStage(ctx, dispatchRequest, &stage); err != nil {
|
||||
if err := initializeStage(dispatchRequest, &stage); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dispatchRequest.state.updateRunConfig()
|
||||
fmt.Fprintf(b.Stdout, " ---> %s\n", stringid.TruncateID(dispatchRequest.state.imageID))
|
||||
for _, cmd := range stage.Commands {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case <-b.clientCtx.Done():
|
||||
logrus.Debug("Builder: build cancelled!")
|
||||
fmt.Fprint(b.Stdout, "Build cancelled\n")
|
||||
buildsFailed.WithValues(metricsBuildCanceled).Inc()
|
||||
@@ -293,7 +290,7 @@ func (b *Builder) dispatchDockerfileWithCancellation(ctx context.Context, parseR
|
||||
|
||||
currentCommandIndex = printCommand(b.Stdout, currentCommandIndex, totalCommands, cmd)
|
||||
|
||||
if err := dispatch(ctx, dispatchRequest, cmd); err != nil {
|
||||
if err := dispatch(dispatchRequest, cmd); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dispatchRequest.state.updateRunConfig()
|
||||
@@ -320,7 +317,7 @@ func (b *Builder) dispatchDockerfileWithCancellation(ctx context.Context, parseR
|
||||
// coming from the query parameter of the same name.
|
||||
//
|
||||
// TODO: Remove?
|
||||
func BuildFromConfig(ctx context.Context, config *container.Config, changes []string, os string) (*container.Config, error) {
|
||||
func BuildFromConfig(config *container.Config, changes []string, os string) (*container.Config, error) {
|
||||
if len(changes) == 0 {
|
||||
return config, nil
|
||||
}
|
||||
@@ -330,7 +327,7 @@ func BuildFromConfig(ctx context.Context, config *container.Config, changes []st
|
||||
return nil, errdefs.InvalidParameter(err)
|
||||
}
|
||||
|
||||
b, err := newBuilder(ctx, builderOptions{
|
||||
b, err := newBuilder(context.Background(), builderOptions{
|
||||
Options: &types.ImageBuildOptions{NoCache: true},
|
||||
})
|
||||
if err != nil {
|
||||
@@ -363,7 +360,7 @@ func BuildFromConfig(ctx context.Context, config *container.Config, changes []st
|
||||
dispatchRequest.state.imageID = config.Image
|
||||
dispatchRequest.state.operatingSystem = os
|
||||
for _, cmd := range commands {
|
||||
err := dispatch(ctx, dispatchRequest, cmd)
|
||||
err := dispatch(dispatchRequest, cmd)
|
||||
if err != nil {
|
||||
return nil, errdefs.InvalidParameter(err)
|
||||
}
|
||||
|
||||
@@ -28,8 +28,8 @@ func newContainerManager(docker builder.ExecBackend) *containerManager {
|
||||
}
|
||||
|
||||
// Create a container
|
||||
func (c *containerManager) Create(ctx context.Context, runConfig *container.Config, hostConfig *container.HostConfig) (container.CreateResponse, error) {
|
||||
container, err := c.backend.ContainerCreateIgnoreImagesArgsEscaped(ctx, types.ContainerCreateConfig{
|
||||
func (c *containerManager) Create(runConfig *container.Config, hostConfig *container.HostConfig) (container.CreateResponse, error) {
|
||||
container, err := c.backend.ContainerCreateIgnoreImagesArgsEscaped(types.ContainerCreateConfig{
|
||||
Config: runConfig,
|
||||
HostConfig: hostConfig,
|
||||
})
|
||||
@@ -69,7 +69,7 @@ func (c *containerManager) Run(ctx context.Context, cID string, stdout, stderr i
|
||||
}
|
||||
}()
|
||||
|
||||
if err := c.backend.ContainerStart(ctx, cID, nil, "", ""); err != nil {
|
||||
if err := c.backend.ContainerStart(cID, nil, "", ""); err != nil {
|
||||
close(finished)
|
||||
logCancellationError(cancelErrCh, "error from ContainerStart: "+err.Error())
|
||||
return err
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package dockerfile // import "github.com/docker/docker/builder/dockerfile"
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"fmt"
|
||||
"io"
|
||||
"mime"
|
||||
@@ -8,6 +9,7 @@ import (
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -18,12 +20,12 @@ import (
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/containerfs"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/longpath"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/docker/docker/pkg/progress"
|
||||
"github.com/docker/docker/pkg/streamformatter"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/moby/buildkit/frontend/dockerfile/instructions"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@@ -37,14 +39,14 @@ type pathCache interface {
|
||||
// copyInfo is a data object which stores the metadata about each source file in
|
||||
// a copyInstruction
|
||||
type copyInfo struct {
|
||||
root string
|
||||
root containerfs.ContainerFS
|
||||
path string
|
||||
hash string
|
||||
noDecompress bool
|
||||
}
|
||||
|
||||
func (c copyInfo) fullPath() (string, error) {
|
||||
return containerfs.ResolveScopedPath(c.root, c.path)
|
||||
return c.root.ResolveScopedPath(c.path, true)
|
||||
}
|
||||
|
||||
func newCopyInfoFromSource(source builder.Source, path string, hash string) copyInfo {
|
||||
@@ -73,7 +75,7 @@ type copier struct {
|
||||
source builder.Source
|
||||
pathCache pathCache
|
||||
download sourceDownloader
|
||||
platform ocispec.Platform
|
||||
platform *specs.Platform
|
||||
// for cleanup. TODO: having copier.cleanup() is error prone and hard to
|
||||
// follow. Code calling performCopy should manage the lifecycle of its params.
|
||||
// Copier should take override source as input, not imageMount.
|
||||
@@ -82,7 +84,19 @@ type copier struct {
|
||||
}
|
||||
|
||||
func copierFromDispatchRequest(req dispatchRequest, download sourceDownloader, imageSource *imageMount) copier {
|
||||
platform := req.builder.getPlatform(req.state)
|
||||
platform := req.builder.platform
|
||||
if platform == nil {
|
||||
// May be nil if not explicitly set in API/dockerfile
|
||||
platform = &specs.Platform{}
|
||||
}
|
||||
if platform.OS == "" {
|
||||
// Default to the dispatch requests operating system if not explicit in API/dockerfile
|
||||
platform.OS = req.state.operatingSystem
|
||||
}
|
||||
if platform.OS == "" {
|
||||
// This is a failsafe just in case. Shouldn't be hit.
|
||||
platform.OS = runtime.GOOS
|
||||
}
|
||||
|
||||
return copier{
|
||||
source: req.source,
|
||||
@@ -145,7 +159,7 @@ func (o *copier) getCopyInfoForSourcePath(orig, dest string) ([]copyInfo, error)
|
||||
}
|
||||
path = unnamedFilename
|
||||
}
|
||||
o.tmpPaths = append(o.tmpPaths, remote.Root())
|
||||
o.tmpPaths = append(o.tmpPaths, remote.Root().Path())
|
||||
|
||||
hash, err := remote.Hash(path)
|
||||
ci := newCopyInfoFromSource(remote, path, hash)
|
||||
@@ -188,7 +202,7 @@ func (o *copier) calcCopyInfo(origPath string, allowWildcards bool) ([]copyInfo,
|
||||
|
||||
o.source, err = remotecontext.NewLazySource(rwLayer.Root())
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to create context for copy from %s", rwLayer.Root())
|
||||
return nil, errors.Wrapf(err, "failed to create context for copy from %s", rwLayer.Root().Path())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -245,7 +259,7 @@ func (o *copier) storeInPathCache(im *imageMount, path string, hash string) {
|
||||
func (o *copier) copyWithWildcards(origPath string) ([]copyInfo, error) {
|
||||
root := o.source.Root()
|
||||
var copyInfos []copyInfo
|
||||
if err := filepath.WalkDir(root, func(path string, _ os.DirEntry, err error) error {
|
||||
if err := root.Walk(root.Path(), func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -257,7 +271,7 @@ func (o *copier) copyWithWildcards(origPath string) ([]copyInfo, error) {
|
||||
if rel == "." {
|
||||
return nil
|
||||
}
|
||||
if match, _ := filepath.Match(origPath, rel); !match {
|
||||
if match, _ := root.Match(origPath, rel); !match {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -303,7 +317,7 @@ func walkSource(source builder.Source, origPath string) ([]string, error) {
|
||||
}
|
||||
// Must be a dir
|
||||
var subfiles []string
|
||||
err = filepath.WalkDir(fp, func(path string, _ os.DirEntry, err error) error {
|
||||
err = source.Root().Walk(fp, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -377,7 +391,7 @@ func downloadSource(output io.Writer, stdout io.Writer, srcURL string) (remote b
|
||||
filename := getFilenameForDownload(u.Path, resp)
|
||||
|
||||
// Prepare file in a tmp dir
|
||||
tmpDir, err := longpath.MkdirTemp("", "docker-remote")
|
||||
tmpDir, err := ioutils.TempDir("", "docker-remote")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -428,14 +442,19 @@ func downloadSource(output io.Writer, stdout io.Writer, srcURL string) (remote b
|
||||
return
|
||||
}
|
||||
|
||||
lc, err := remotecontext.NewLazySource(tmpDir)
|
||||
lc, err := remotecontext.NewLazySource(containerfs.NewLocalContainerFS(tmpDir))
|
||||
return lc, filename, err
|
||||
}
|
||||
|
||||
type copyFileOptions struct {
|
||||
decompress bool
|
||||
identity *idtools.Identity
|
||||
archiver *archive.Archiver
|
||||
archiver Archiver
|
||||
}
|
||||
|
||||
type copyEndpoint struct {
|
||||
driver containerfs.Driver
|
||||
path string
|
||||
}
|
||||
|
||||
func performCopyForInfo(dest copyInfo, source copyInfo, options copyFileOptions) error {
|
||||
@@ -451,77 +470,96 @@ func performCopyForInfo(dest copyInfo, source copyInfo, options copyFileOptions)
|
||||
|
||||
archiver := options.archiver
|
||||
|
||||
src, err := os.Stat(srcPath)
|
||||
srcEndpoint := ©Endpoint{driver: source.root, path: srcPath}
|
||||
destEndpoint := ©Endpoint{driver: dest.root, path: destPath}
|
||||
|
||||
src, err := source.root.Stat(srcPath)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "source path not found")
|
||||
}
|
||||
if src.IsDir() {
|
||||
return copyDirectory(archiver, srcPath, destPath, options.identity)
|
||||
return copyDirectory(archiver, srcEndpoint, destEndpoint, options.identity)
|
||||
}
|
||||
if options.decompress && archive.IsArchivePath(srcPath) && !source.noDecompress {
|
||||
if options.decompress && isArchivePath(source.root, srcPath) && !source.noDecompress {
|
||||
return archiver.UntarPath(srcPath, destPath)
|
||||
}
|
||||
|
||||
destExistsAsDir, err := isExistingDirectory(destPath)
|
||||
destExistsAsDir, err := isExistingDirectory(destEndpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// dest.path must be used because destPath has already been cleaned of any
|
||||
// trailing slash
|
||||
if endsInSlash(dest.path) || destExistsAsDir {
|
||||
if endsInSlash(dest.root, dest.path) || destExistsAsDir {
|
||||
// source.path must be used to get the correct filename when the source
|
||||
// is a symlink
|
||||
destPath = filepath.Join(destPath, filepath.Base(source.path))
|
||||
destPath = dest.root.Join(destPath, source.root.Base(source.path))
|
||||
destEndpoint = ©Endpoint{driver: dest.root, path: destPath}
|
||||
}
|
||||
return copyFile(archiver, srcPath, destPath, options.identity)
|
||||
return copyFile(archiver, srcEndpoint, destEndpoint, options.identity)
|
||||
}
|
||||
|
||||
func copyDirectory(archiver *archive.Archiver, source, dest string, identity *idtools.Identity) error {
|
||||
func isArchivePath(driver containerfs.ContainerFS, path string) bool {
|
||||
file, err := driver.Open(path)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
defer file.Close()
|
||||
rdr, err := archive.DecompressStream(file)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
r := tar.NewReader(rdr)
|
||||
_, err = r.Next()
|
||||
return err == nil
|
||||
}
|
||||
|
||||
func copyDirectory(archiver Archiver, source, dest *copyEndpoint, identity *idtools.Identity) error {
|
||||
destExists, err := isExistingDirectory(dest)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to query destination path")
|
||||
}
|
||||
|
||||
if err := archiver.CopyWithTar(source, dest); err != nil {
|
||||
if err := archiver.CopyWithTar(source.path, dest.path); err != nil {
|
||||
return errors.Wrapf(err, "failed to copy directory")
|
||||
}
|
||||
if identity != nil {
|
||||
return fixPermissions(source, dest, *identity, !destExists)
|
||||
return fixPermissions(source.path, dest.path, *identity, !destExists)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func copyFile(archiver *archive.Archiver, source, dest string, identity *idtools.Identity) error {
|
||||
func copyFile(archiver Archiver, source, dest *copyEndpoint, identity *idtools.Identity) error {
|
||||
if identity == nil {
|
||||
// Use system.MkdirAll here, which is a custom version of os.MkdirAll
|
||||
// modified for use on Windows to handle volume GUID paths. These paths
|
||||
// are of the form \\?\Volume{<GUID>}\<path>. An example would be:
|
||||
// \\?\Volume{dae8d3ac-b9a1-11e9-88eb-e8554b2ba1db}\bin\busybox.exe
|
||||
if err := system.MkdirAll(filepath.Dir(dest), 0755); err != nil {
|
||||
if err := system.MkdirAll(filepath.Dir(dest.path), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := idtools.MkdirAllAndChownNew(filepath.Dir(dest), 0755, *identity); err != nil {
|
||||
if err := idtools.MkdirAllAndChownNew(filepath.Dir(dest.path), 0755, *identity); err != nil {
|
||||
return errors.Wrapf(err, "failed to create new directory")
|
||||
}
|
||||
}
|
||||
|
||||
if err := archiver.CopyFileWithTar(source, dest); err != nil {
|
||||
if err := archiver.CopyFileWithTar(source.path, dest.path); err != nil {
|
||||
return errors.Wrapf(err, "failed to copy file")
|
||||
}
|
||||
if identity != nil {
|
||||
return fixPermissions(source, dest, *identity, false)
|
||||
return fixPermissions(source.path, dest.path, *identity, false)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func endsInSlash(path string) bool {
|
||||
return strings.HasSuffix(path, string(filepath.Separator))
|
||||
func endsInSlash(driver containerfs.Driver, path string) bool {
|
||||
return strings.HasSuffix(path, string(driver.Separator()))
|
||||
}
|
||||
|
||||
// isExistingDirectory returns true if the path exists and is a directory
|
||||
func isExistingDirectory(path string) (bool, error) {
|
||||
destStat, err := os.Stat(path)
|
||||
func isExistingDirectory(point *copyEndpoint) (bool, error) {
|
||||
destStat, err := point.driver.Stat(point.path)
|
||||
switch {
|
||||
case errors.Is(err, os.ErrNotExist):
|
||||
return false, nil
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/pkg/containerfs"
|
||||
"gotest.tools/v3/assert"
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
"gotest.tools/v3/fs"
|
||||
@@ -38,7 +39,7 @@ func TestIsExistingDirectory(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, testcase := range testcases {
|
||||
result, err := isExistingDirectory(testcase.path)
|
||||
result, err := isExistingDirectory(©Endpoint{driver: containerfs.NewLocalDriver(), path: testcase.path})
|
||||
if !assert.Check(t, err) {
|
||||
continue
|
||||
}
|
||||
@@ -53,31 +54,31 @@ func TestGetFilenameForDownload(t *testing.T) {
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
path: "https://www.example.com/",
|
||||
path: "http://www.example.com/",
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
path: "https://www.example.com/xyz",
|
||||
path: "http://www.example.com/xyz",
|
||||
expected: "xyz",
|
||||
},
|
||||
{
|
||||
path: "https://www.example.com/xyz.html",
|
||||
path: "http://www.example.com/xyz.html",
|
||||
expected: "xyz.html",
|
||||
},
|
||||
{
|
||||
path: "https://www.example.com/xyz/",
|
||||
path: "http://www.example.com/xyz/",
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
path: "https://www.example.com/xyz/uvw",
|
||||
path: "http://www.example.com/xyz/uvw",
|
||||
expected: "uvw",
|
||||
},
|
||||
{
|
||||
path: "https://www.example.com/xyz/uvw.html",
|
||||
path: "http://www.example.com/xyz/uvw.html",
|
||||
expected: "uvw.html",
|
||||
},
|
||||
{
|
||||
path: "https://www.example.com/xyz/uvw/",
|
||||
path: "http://www.example.com/xyz/uvw/",
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
@@ -114,23 +115,23 @@ func TestGetFilenameForDownload(t *testing.T) {
|
||||
expected: "xyz.html",
|
||||
},
|
||||
{
|
||||
disposition: `attachment; filename="xyz"`,
|
||||
disposition: "attachment; filename=\"xyz\"",
|
||||
expected: "xyz",
|
||||
},
|
||||
{
|
||||
disposition: `attachment; filename="xyz.html"`,
|
||||
disposition: "attachment; filename=\"xyz.html\"",
|
||||
expected: "xyz.html",
|
||||
},
|
||||
{
|
||||
disposition: `attachment; filename="/xyz.html"`,
|
||||
disposition: "attachment; filename=\"/xyz.html\"",
|
||||
expected: "xyz.html",
|
||||
},
|
||||
{
|
||||
disposition: `attachment; filename="/xyz/uvw"`,
|
||||
disposition: "attachment; filename=\"/xyz/uvw\"",
|
||||
expected: "uvw",
|
||||
},
|
||||
{
|
||||
disposition: `attachment; filename="Naïve file.txt"`,
|
||||
disposition: "attachment; filename=\"Naïve file.txt\"",
|
||||
expected: "Naïve file.txt",
|
||||
},
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/pkg/containerfs"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
)
|
||||
|
||||
@@ -18,7 +19,8 @@ func fixPermissions(source, destination string, identity idtools.Identity, overr
|
||||
err error
|
||||
)
|
||||
if !overrideSkip {
|
||||
skipChownRoot, err = isExistingDirectory(destination)
|
||||
destEndpoint := ©Endpoint{driver: containerfs.NewLocalDriver(), path: destination}
|
||||
skipChownRoot, err = isExistingDirectory(destEndpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -26,7 +28,7 @@ func fixPermissions(source, destination string, identity idtools.Identity, overr
|
||||
|
||||
// We Walk on the source rather than on the destination because we don't
|
||||
// want to change permissions on things we haven't created or modified.
|
||||
return filepath.WalkDir(source, func(fullpath string, _ os.DirEntry, _ error) error {
|
||||
return filepath.Walk(source, func(fullpath string, _ os.FileInfo, _ error) error {
|
||||
// Do not alter the walk root iff. it existed before, as it doesn't fall under
|
||||
// the domain of "things we should chown".
|
||||
if skipChownRoot && source == fullpath {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user