mirror of
https://github.com/moby/moby.git
synced 2026-01-17 10:51:29 +00:00
Compare commits
301 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ed20165a37 | ||
|
|
52cef4bbee | ||
|
|
278cb7aed5 | ||
|
|
613a32482f | ||
|
|
9ae801cfd1 | ||
|
|
df1d66e6ba | ||
|
|
0873c3b57d | ||
|
|
83e7de55aa | ||
|
|
fb471aab26 | ||
|
|
70303ded8e | ||
|
|
16639f549e | ||
|
|
34fa29e8d4 | ||
|
|
16d0807e7e | ||
|
|
75d217961e | ||
|
|
c243ffaa06 | ||
|
|
e6b49956d8 | ||
|
|
4593b400f9 | ||
|
|
b81a2581ff | ||
|
|
9ccba2faf1 | ||
|
|
55b938cb98 | ||
|
|
46fa5a43ff | ||
|
|
75c408cb6c | ||
|
|
23b7bdf785 | ||
|
|
10dd4a25ba | ||
|
|
eee3f67571 | ||
|
|
78207d5380 | ||
|
|
34a8dcae17 | ||
|
|
5c2e0c6f9b | ||
|
|
61e218a502 | ||
|
|
3dd11dd0b5 | ||
|
|
0d0b1e77c0 | ||
|
|
94d56428a6 | ||
|
|
52ec660936 | ||
|
|
30549cdb4b | ||
|
|
62af652a5a | ||
|
|
63966dec02 | ||
|
|
997037964e | ||
|
|
7a82829520 | ||
|
|
678121ef05 | ||
|
|
970fb3c1df | ||
|
|
04ca2e6b92 | ||
|
|
8dbb761fad | ||
|
|
0a5ea9d310 | ||
|
|
c94dca16fa | ||
|
|
cbbff33086 | ||
|
|
949708a745 | ||
|
|
b81c762493 | ||
|
|
002bf3806e | ||
|
|
337f0fc80d | ||
|
|
c6a4351edd | ||
|
|
b40359b551 | ||
|
|
1d0821eee2 | ||
|
|
37536cdfa4 | ||
|
|
3040f3fdbb | ||
|
|
ea508a8574 | ||
|
|
d974674126 | ||
|
|
31cb280682 | ||
|
|
de941c990e | ||
|
|
5da534d8db | ||
|
|
24144dbdc9 | ||
|
|
da9289fb54 | ||
|
|
0b274cf18f | ||
|
|
55fc016efc | ||
|
|
5bed812503 | ||
|
|
3e266baca4 | ||
|
|
191b03834a | ||
|
|
63e03b155f | ||
|
|
130c0746c1 | ||
|
|
624ff4fef4 | ||
|
|
19fa3ab213 | ||
|
|
ab68b5dd9a | ||
|
|
9ad75d26fc | ||
|
|
c3a7556f73 | ||
|
|
89e812a1e6 | ||
|
|
e092ff3f74 | ||
|
|
2b33fe3512 | ||
|
|
93c18c73a3 | ||
|
|
cad2cd71b7 | ||
|
|
96e086dc33 | ||
|
|
cddce2dfa7 | ||
|
|
65f964aa6b | ||
|
|
8fca769bd5 | ||
|
|
ef5dd6e46d | ||
|
|
8533594ad6 | ||
|
|
32802bc7d9 | ||
|
|
4bed01298c | ||
|
|
56ca630f27 | ||
|
|
a02539b3e8 | ||
|
|
0dc7bdc325 | ||
|
|
b61ee6e4af | ||
|
|
56784591bf | ||
|
|
6eeb9ec3d6 | ||
|
|
dd7ef76474 | ||
|
|
0375566412 | ||
|
|
3678438dd8 | ||
|
|
1cc7b3881d | ||
|
|
03b1b078f9 | ||
|
|
5067389c36 | ||
|
|
6d98ef8c69 | ||
|
|
d5088c1488 | ||
|
|
df3689f8d0 | ||
|
|
3fd0be03f0 | ||
|
|
37d9901e0f | ||
|
|
29fe4e58c6 | ||
|
|
685565ad18 | ||
|
|
305b2416ea | ||
|
|
f5b64c3ffe | ||
|
|
9f9dab03c1 | ||
|
|
c7139be62b | ||
|
|
b0ef7422b0 | ||
|
|
1fbed3ffc9 | ||
|
|
dd85af0e12 | ||
|
|
3bbf7b0d4d | ||
|
|
bc9183ba0e | ||
|
|
47517880ec | ||
|
|
7b0cf8b16d | ||
|
|
47a7f762d3 | ||
|
|
8ba31dccd1 | ||
|
|
80376f9e13 | ||
|
|
ee64eae903 | ||
|
|
ff0a0e364b | ||
|
|
791aa3c338 | ||
|
|
6e9aba883c | ||
|
|
2f1984c6df | ||
|
|
640193b2bb | ||
|
|
97ca6434e0 | ||
|
|
c364e5d1ba | ||
|
|
3bf3a1ae65 | ||
|
|
439ed140ee | ||
|
|
a50d77700e | ||
|
|
6b7330dcd4 | ||
|
|
8ecf5409e9 | ||
|
|
6efcd74c6b | ||
|
|
eaa83640fa | ||
|
|
cbdf487768 | ||
|
|
b0f01be33f | ||
|
|
80e2871d21 | ||
|
|
4ef8f6d323 | ||
|
|
56ff8ccc91 | ||
|
|
e01625bc70 | ||
|
|
fa8dd90ceb | ||
|
|
509a793378 | ||
|
|
705d9623b7 | ||
|
|
c687381870 | ||
|
|
1eadbf1bd0 | ||
|
|
685f13f3fd | ||
|
|
638cf86cbe | ||
|
|
d27a919cd2 | ||
|
|
a69cd8239f | ||
|
|
8a2f96096a | ||
|
|
b07f53d0a4 | ||
|
|
e61e107040 | ||
|
|
023166b530 | ||
|
|
884c9e268f | ||
|
|
99678a93ed | ||
|
|
99cd23cefd | ||
|
|
4d3dfd24ec | ||
|
|
21ae66c664 | ||
|
|
da6dddcd04 | ||
|
|
d1b0475d89 | ||
|
|
42757e8794 | ||
|
|
3452f743ab | ||
|
|
b9cd7b59b6 | ||
|
|
8f4b96f19e | ||
|
|
186afe3ce3 | ||
|
|
a0063c534a | ||
|
|
9b97965f22 | ||
|
|
e3f83e7aa7 | ||
|
|
44023afb7d | ||
|
|
29ff2800c3 | ||
|
|
d44a48835f | ||
|
|
275bf7ec03 | ||
|
|
de45ce73eb | ||
|
|
ceb773e1ff | ||
|
|
60013ba69b | ||
|
|
96df6d4d0b | ||
|
|
a33a82b42f | ||
|
|
367870a4d5 | ||
|
|
175013d0cb | ||
|
|
a6905fa2e5 | ||
|
|
510e79ebe9 | ||
|
|
a8d1b4a1ab | ||
|
|
88374fa982 | ||
|
|
049a1090c3 | ||
|
|
020bb75219 | ||
|
|
a24b9087ce | ||
|
|
48786ba842 | ||
|
|
dde48c6715 | ||
|
|
e7c02a0508 | ||
|
|
31722d3f5a | ||
|
|
a81278befe | ||
|
|
cad766f6c7 | ||
|
|
f0f7020b5d | ||
|
|
65ba452bb0 | ||
|
|
76d936ae76 | ||
|
|
d1eae89590 | ||
|
|
7d1414ec3e | ||
|
|
5fbc0a16e2 | ||
|
|
0678d71038 | ||
|
|
746dce1994 | ||
|
|
36f0fe6524 | ||
|
|
737d57bad6 | ||
|
|
287240a965 | ||
|
|
ca602fa7c6 | ||
|
|
36324c3bbd | ||
|
|
21c33eb7e3 | ||
|
|
feb373a216 | ||
|
|
d7080a7a2e | ||
|
|
b915ec1e7b | ||
|
|
2de4afdee5 | ||
|
|
26a35ddcd1 | ||
|
|
d575af39ac | ||
|
|
57b59f876e | ||
|
|
3e057d527d | ||
|
|
9781cceb09 | ||
|
|
d0f4f42bd4 | ||
|
|
d59fb97c5b | ||
|
|
e1e47d090d | ||
|
|
a62d9b9c21 | ||
|
|
a004854097 | ||
|
|
5925508b31 | ||
|
|
5051fe047c | ||
|
|
57a9697161 | ||
|
|
936432326a | ||
|
|
9eeb2b5ef0 | ||
|
|
eaa3e69d14 | ||
|
|
80a35e0bd4 | ||
|
|
cdeef06801 | ||
|
|
181a64a5aa | ||
|
|
63eecadf82 | ||
|
|
2b216674da | ||
|
|
868d87b08e | ||
|
|
a7e03f69be | ||
|
|
96daf37c83 | ||
|
|
3dec835d84 | ||
|
|
4da607559f | ||
|
|
8dd7bd9981 | ||
|
|
7cc3681ad6 | ||
|
|
e205cd89cd | ||
|
|
c56df1abf3 | ||
|
|
d8185417d9 | ||
|
|
7cb78b6259 | ||
|
|
79ac8f95af | ||
|
|
1c346f16a3 | ||
|
|
d347049802 | ||
|
|
939aa52465 | ||
|
|
29c50668b3 | ||
|
|
55c5381584 | ||
|
|
750e0ace06 | ||
|
|
29498693dd | ||
|
|
56e92239a6 | ||
|
|
11319732ab | ||
|
|
853816ae79 | ||
|
|
8f61032ec4 | ||
|
|
bff7e300e6 | ||
|
|
ff44133643 | ||
|
|
9fdccf6a47 | ||
|
|
3f4657f6db | ||
|
|
dcc05fcf3e | ||
|
|
03ce4080a4 | ||
|
|
61828453db | ||
|
|
d371b283c3 | ||
|
|
4784740273 | ||
|
|
31b0688de7 | ||
|
|
6896305b57 | ||
|
|
931c4c1023 | ||
|
|
6cc14f5854 | ||
|
|
1910607215 | ||
|
|
dfa1031015 | ||
|
|
790388a8c5 | ||
|
|
ea09008423 | ||
|
|
8d8904f02b | ||
|
|
2a7513a972 | ||
|
|
c47f2a4a1a | ||
|
|
526a72fd77 | ||
|
|
f76879dd64 | ||
|
|
e7a837120d | ||
|
|
04c51495da | ||
|
|
02baf07d77 | ||
|
|
6d0823af0a | ||
|
|
8493fb18ae | ||
|
|
e8b9a752d3 | ||
|
|
14bb71d508 | ||
|
|
2e95499142 | ||
|
|
8d428458a2 | ||
|
|
5f60a56544 | ||
|
|
a3b4e92d66 | ||
|
|
cedf201aef | ||
|
|
545bc6b4d8 | ||
|
|
620d9d3c75 | ||
|
|
e1b045c25e | ||
|
|
11e2802015 | ||
|
|
cb8d67505d | ||
|
|
7d3405b4ba | ||
|
|
d36c7de19e | ||
|
|
6605a26c75 | ||
|
|
ce9cabf0f0 | ||
|
|
dc6d1ac663 | ||
|
|
1fdd24579c | ||
|
|
3afbf83cc5 | ||
|
|
61a234d562 |
@@ -3,7 +3,6 @@ curators:
|
||||
- alexellis
|
||||
- andrewhsu
|
||||
- anonymuse
|
||||
- arkodg
|
||||
- chanwit
|
||||
- ehazlett
|
||||
- fntlnz
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.git
|
||||
.go-pkg-cache
|
||||
.gopath
|
||||
bundles
|
||||
.gopath
|
||||
vendor/pkg
|
||||
.go-pkg-cache
|
||||
.git
|
||||
|
||||
|
||||
4
.github/CODEOWNERS
vendored
4
.github/CODEOWNERS
vendored
@@ -6,10 +6,10 @@
|
||||
builder/** @tonistiigi
|
||||
contrib/mkimage/** @tianon
|
||||
daemon/graphdriver/devmapper/** @rhvgoyal
|
||||
daemon/graphdriver/lcow/** @johnstep
|
||||
daemon/graphdriver/lcow/** @johnstep @jhowardmsft
|
||||
daemon/graphdriver/overlay/** @dmcgowan
|
||||
daemon/graphdriver/overlay2/** @dmcgowan
|
||||
daemon/graphdriver/windows/** @johnstep
|
||||
daemon/graphdriver/windows/** @johnstep @jhowardmsft
|
||||
daemon/logger/awslogs/** @samuelkarp
|
||||
hack/** @tianon
|
||||
plugin/** @cpuguy83
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -17,6 +17,8 @@ autogen/
|
||||
bundles/
|
||||
cmd/dockerd/dockerd
|
||||
contrib/builder/rpm/*/changelog
|
||||
dockerversion/version_autogen.go
|
||||
dockerversion/version_autogen_unix.go
|
||||
vendor/pkg/
|
||||
go-test-report.json
|
||||
profile.out
|
||||
|
||||
84
.mailmap
84
.mailmap
@@ -10,8 +10,6 @@
|
||||
<mr.wrfly@gmail.com> <wrfly@users.noreply.github.com>
|
||||
Aaron L. Xu <liker.xu@foxmail.com>
|
||||
Abhinandan Prativadi <abhi@docker.com>
|
||||
Adam Dobrawy <naczelnik@jawnosc.tk>
|
||||
Adam Dobrawy <naczelnik@jawnosc.tk> <ad-m@users.noreply.github.com>
|
||||
Adrien Gallouët <adrien@gallouet.fr> <angt@users.noreply.github.com>
|
||||
Ahmed Kamal <email.ahmedkamal@googlemail.com>
|
||||
Ahmet Alp Balkan <ahmetb@microsoft.com> <ahmetalpbalkan@gmail.com>
|
||||
@@ -20,8 +18,8 @@ AJ Bowen <aj@soulshake.net> <aj@gandi.net>
|
||||
AJ Bowen <aj@soulshake.net> <amy@gandi.net>
|
||||
Akihiro Matsushima <amatsusbit@gmail.com> <amatsus@users.noreply.github.com>
|
||||
Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp>
|
||||
Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp> <suda.akihiro@lab.ntt.co.jp>
|
||||
Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp> <suda.kyoto@gmail.com>
|
||||
Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp> <suda.akihiro@lab.ntt.co.jp>
|
||||
Aleksa Sarai <asarai@suse.de>
|
||||
Aleksa Sarai <asarai@suse.de> <asarai@suse.com>
|
||||
Aleksa Sarai <asarai@suse.de> <cyphar@cyphar.com>
|
||||
@@ -37,7 +35,6 @@ Alexandre Beslic <alexandre.beslic@gmail.com> <abronan@docker.com>
|
||||
Alicia Lauerman <alicia@eta.im> <allydevour@me.com>
|
||||
Allen Sun <allensun.shl@alibaba-inc.com> <allen.sun@daocloud.io>
|
||||
Allen Sun <allensun.shl@alibaba-inc.com> <shlallen1990@gmail.com>
|
||||
Andrea Denisse Gómez <crypto.andrea@protonmail.ch>
|
||||
Andrew Weiss <andrew.weiss@docker.com> <andrew.weiss@microsoft.com>
|
||||
Andrew Weiss <andrew.weiss@docker.com> <andrew.weiss@outlook.com>
|
||||
Andrey Kolomentsev <andrey.kolomentsev@docker.com>
|
||||
@@ -54,8 +51,6 @@ Antonio Murdaca <antonio.murdaca@gmail.com> <runcom@users.noreply.github.com>
|
||||
Anuj Bahuguna <anujbahuguna.dev@gmail.com>
|
||||
Anuj Bahuguna <anujbahuguna.dev@gmail.com> <abahuguna@fiberlink.com>
|
||||
Anusha Ragunathan <anusha.ragunathan@docker.com> <anusha@docker.com>
|
||||
Arko Dasgupta <arko.dasgupta@docker.com>
|
||||
Arko Dasgupta <arko.dasgupta@docker.com> <arkodg@users.noreply.github.com>
|
||||
Arnaud Porterie <arnaud.porterie@docker.com>
|
||||
Arnaud Porterie <arnaud.porterie@docker.com> <icecrime@gmail.com>
|
||||
Arthur Gautier <baloo@gandi.net> <superbaloo+registrations.github@superbaloo.net>
|
||||
@@ -63,26 +58,23 @@ Avi Miller <avi.miller@oracle.com> <avi.miller@gmail.com>
|
||||
Ben Bonnefoy <frenchben@docker.com>
|
||||
Ben Golub <ben.golub@dotcloud.com>
|
||||
Ben Toews <mastahyeti@gmail.com> <mastahyeti@users.noreply.github.com>
|
||||
Benny Ng <benny.tpng@gmail.com>
|
||||
Benoit Chesneau <bchesneau@gmail.com>
|
||||
Bevisy Zhang <binbin36520@gmail.com>
|
||||
Bhiraj Butala <abhiraj.butala@gmail.com>
|
||||
Bhumika Bayani <bhumikabayani@gmail.com>
|
||||
Bilal Amarni <bilal.amarni@gmail.com> <bamarni@users.noreply.github.com>
|
||||
Bill Wang <ozbillwang@gmail.com> <SydOps@users.noreply.github.com>
|
||||
Bily Zhang <xcoder@tenxcloud.com>
|
||||
Bill Wang <ozbillwang@gmail.com> <SydOps@users.noreply.github.com>
|
||||
Bin Liu <liubin0329@gmail.com>
|
||||
Bin Liu <liubin0329@gmail.com> <liubin0329@users.noreply.github.com>
|
||||
Bingshen Wang <bingshen.wbs@alibaba-inc.com>
|
||||
Boaz Shuster <ripcurld.github@gmail.com>
|
||||
Boqin Qin <bobbqqin@gmail.com>
|
||||
Brandon Philips <brandon.philips@coreos.com> <brandon@ifup.co>
|
||||
Brandon Philips <brandon.philips@coreos.com> <brandon@ifup.org>
|
||||
Brent Salisbury <brent.salisbury@docker.com> <brent@docker.com>
|
||||
Brian Goff <cpuguy83@gmail.com>
|
||||
Brian Goff <cpuguy83@gmail.com> <bgoff@cpuguy83-mbp.home>
|
||||
Brian Goff <cpuguy83@gmail.com> <bgoff@cpuguy83-mbp.local>
|
||||
Carlos de Paula <me@carlosedp.com>
|
||||
Chander Govindarajan <chandergovind@gmail.com>
|
||||
Chao Wang <wangchao.fnst@cn.fujitsu.com> <chaowang@localhost.localdomain>
|
||||
Charles Hooper <charles.hooper@dotcloud.com> <chooper@plumata.com>
|
||||
@@ -94,14 +86,10 @@ Chen Qiu <cheney-90@hotmail.com> <21321229@zju.edu.cn>
|
||||
Chengfei Shang <cfshang@alauda.io>
|
||||
Chris Dias <cdias@microsoft.com>
|
||||
Chris McKinnel <chris.mckinnel@tangentlabs.co.uk>
|
||||
Chris Price <cprice@mirantis.com>
|
||||
Chris Price <cprice@mirantis.com> <chris.price@docker.com>
|
||||
Christopher Biscardi <biscarch@sketcht.com>
|
||||
Christopher Latham <sudosurootdev@gmail.com>
|
||||
Christy Norman <christy@linux.vnet.ibm.com>
|
||||
Chun Chen <ramichen@tencent.com> <chenchun.feed@gmail.com>
|
||||
Corbin Coleman <corbin.coleman@docker.com>
|
||||
Cristian Ariza <dev@cristianrz.com>
|
||||
Cristian Staretu <cristian.staretu@gmail.com>
|
||||
Cristian Staretu <cristian.staretu@gmail.com> <unclejack@users.noreply.github.com>
|
||||
Cristian Staretu <cristian.staretu@gmail.com> <unclejacksons@gmail.com>
|
||||
@@ -141,12 +129,9 @@ Diego Siqueira <dieg0@live.com>
|
||||
Diogo Monica <diogo@docker.com> <diogo.monica@gmail.com>
|
||||
Dmitry Sharshakov <d3dx12.xx@gmail.com>
|
||||
Dmitry Sharshakov <d3dx12.xx@gmail.com> <sh7dm@outlook.com>
|
||||
Dominic Yin <yindongchao@inspur.com>
|
||||
Dominik Honnef <dominik@honnef.co> <dominikh@fork-bomb.org>
|
||||
Doug Davis <dug@us.ibm.com> <duglin@users.noreply.github.com>
|
||||
Doug Tangren <d.tangren@gmail.com>
|
||||
Drew Erny <derny@mirantis.com>
|
||||
Drew Erny <derny@mirantis.com> <drew.erny@docker.com>
|
||||
Elan Ruusamäe <glen@pld-linux.org>
|
||||
Elan Ruusamäe <glen@pld-linux.org> <glen@delfi.ee>
|
||||
Elango Sivanandam <elango.siva@docker.com>
|
||||
@@ -183,9 +168,6 @@ Giampaolo Mancini <giampaolo@trampolineup.com>
|
||||
Giovan Isa Musthofa <giovanism@outlook.co.id>
|
||||
Gopikannan Venugopalsamy <gopikannan.venugopalsamy@gmail.com>
|
||||
Gou Rao <gou@portworx.com> <gourao@users.noreply.github.com>
|
||||
Grant Millar <rid@cylo.io>
|
||||
Grant Millar <rid@cylo.io> <grant@cylo.io>
|
||||
Grant Millar <rid@cylo.io> <grant@seednet.eu>
|
||||
Greg Stephens <greg@udon.org>
|
||||
Guillaume J. Charmes <guillaume.charmes@docker.com> <charmes.guillaume@gmail.com>
|
||||
Guillaume J. Charmes <guillaume.charmes@docker.com> <guillaume.charmes@dotcloud.com>
|
||||
@@ -200,7 +182,6 @@ Hakan Özler <hakan.ozler@kodcu.com>
|
||||
Hao Shu Wei <haosw@cn.ibm.com>
|
||||
Hao Shu Wei <haosw@cn.ibm.com> <haoshuwei1989@163.com>
|
||||
Harald Albers <github@albersweb.de> <albers@users.noreply.github.com>
|
||||
Harald Niesche <harald@niesche.de>
|
||||
Harold Cooper <hrldcpr@gmail.com>
|
||||
Harry Zhang <harryz@hyper.sh> <harryzhang@zju.edu.cn>
|
||||
Harry Zhang <harryz@hyper.sh> <resouer@163.com>
|
||||
@@ -216,8 +197,6 @@ Hu Keping <hukeping@huawei.com>
|
||||
Huu Nguyen <huu@prismskylabs.com> <whoshuu@gmail.com>
|
||||
Hyzhou Zhy <hyzhou.zhy@alibaba-inc.com>
|
||||
Hyzhou Zhy <hyzhou.zhy@alibaba-inc.com> <1187766782@qq.com>
|
||||
Ian Campbell <ian.campbell@docker.com>
|
||||
Ian Campbell <ian.campbell@docker.com> <ijc@docker.com>
|
||||
Ilya Khlopotov <ilya.khlopotov@gmail.com>
|
||||
Iskander Sharipov <quasilyte@gmail.com>
|
||||
Ivan Markin <sw@nogoegst.net> <twim@riseup.net>
|
||||
@@ -225,8 +204,6 @@ Jack Laxson <jackjrabbit@gmail.com>
|
||||
Jacob Atzen <jacob@jacobatzen.dk> <jatzen@gmail.com>
|
||||
Jacob Tomlinson <jacob@tom.linson.uk> <jacobtomlinson@users.noreply.github.com>
|
||||
Jaivish Kothari <janonymous.codevulture@gmail.com>
|
||||
James Nesbitt <jnesbitt@mirantis.com>
|
||||
James Nesbitt <jnesbitt@mirantis.com> <james.nesbitt@wunderkraut.com>
|
||||
Jamie Hannaford <jamie@limetree.org> <jamie.hannaford@rackspace.com>
|
||||
Jean Rouge <rougej+github@gmail.com> <jer329@cornell.edu>
|
||||
Jean-Baptiste Barth <jeanbaptiste.barth@gmail.com>
|
||||
@@ -235,16 +212,15 @@ Jean-Tiare Le Bigot <jt@yadutaf.fr> <admin@jtlebi.fr>
|
||||
Jeff Anderson <jeff@docker.com> <jefferya@programmerq.net>
|
||||
Jeff Nickoloff <jeff.nickoloff@gmail.com> <jeff@allingeek.com>
|
||||
Jeroen Franse <jeroenfranse@gmail.com>
|
||||
Jessica Frazelle <jess@oxide.computer>
|
||||
Jessica Frazelle <jess@oxide.computer> <acidburn@docker.com>
|
||||
Jessica Frazelle <jess@oxide.computer> <acidburn@google.com>
|
||||
Jessica Frazelle <jess@oxide.computer> <acidburn@microsoft.com>
|
||||
Jessica Frazelle <jess@oxide.computer> <jess@docker.com>
|
||||
Jessica Frazelle <jess@oxide.computer> <jess@mesosphere.com>
|
||||
Jessica Frazelle <jess@oxide.computer> <jessfraz@google.com>
|
||||
Jessica Frazelle <jess@oxide.computer> <jfrazelle@users.noreply.github.com>
|
||||
Jessica Frazelle <jess@oxide.computer> <me@jessfraz.com>
|
||||
Jessica Frazelle <jess@oxide.computer> <princess@docker.com>
|
||||
Jessica Frazelle <acidburn@microsoft.com>
|
||||
Jessica Frazelle <acidburn@microsoft.com> <acidburn@docker.com>
|
||||
Jessica Frazelle <acidburn@microsoft.com> <acidburn@google.com>
|
||||
Jessica Frazelle <acidburn@microsoft.com> <jess@docker.com>
|
||||
Jessica Frazelle <acidburn@microsoft.com> <jess@mesosphere.com>
|
||||
Jessica Frazelle <acidburn@microsoft.com> <jessfraz@google.com>
|
||||
Jessica Frazelle <acidburn@microsoft.com> <jfrazelle@users.noreply.github.com>
|
||||
Jessica Frazelle <acidburn@microsoft.com> <me@jessfraz.com>
|
||||
Jessica Frazelle <acidburn@microsoft.com> <princess@docker.com>
|
||||
Jian Liao <jliao@alauda.io>
|
||||
Jiang Jinyang <jjyruby@gmail.com>
|
||||
Jiang Jinyang <jjyruby@gmail.com> <jiangjinyang@outlook.com>
|
||||
@@ -256,16 +232,15 @@ Joffrey F <joffrey@docker.com> <f.joffrey@gmail.com>
|
||||
Joffrey F <joffrey@docker.com> <joffrey@dotcloud.com>
|
||||
Johan Euphrosine <proppy@google.com> <proppy@aminche.com>
|
||||
John Harris <john@johnharris.io>
|
||||
John Howard <github@lowenna.com>
|
||||
John Howard <github@lowenna.com> <jhoward@microsoft.com>
|
||||
John Howard <github@lowenna.com> <jhoward@ntdev.microsoft.com>
|
||||
John Howard <github@lowenna.com> <jhowardmsft@users.noreply.github.com>
|
||||
John Howard <github@lowenna.com> <John.Howard@microsoft.com>
|
||||
John Howard <github@lowenna.com> <john.howard@microsoft.com>
|
||||
John Howard (VM) <John.Howard@microsoft.com>
|
||||
John Howard (VM) <John.Howard@microsoft.com> <jhoward@microsoft.com>
|
||||
John Howard (VM) <John.Howard@microsoft.com> <jhoward@ntdev.microsoft.com>
|
||||
John Howard (VM) <John.Howard@microsoft.com> <jhowardmsft@users.noreply.github.com>
|
||||
John Howard (VM) <John.Howard@microsoft.com> <john.howard@microsoft.com>
|
||||
John Stephens <johnstep@docker.com> <johnstep@users.noreply.github.com>
|
||||
Jon Surrell <jon.surrell@gmail.com> <jon.surrell@automattic.com>
|
||||
Jonathan Choy <jonathan.j.choy@gmail.com>
|
||||
Jonathan Choy <jonathan.j.choy@gmail.com> <oni@tetsujinlabs.com>
|
||||
Jon Surrell <jon.surrell@gmail.com> <jon.surrell@automattic.com>
|
||||
Jordan Arentsen <blissdev@gmail.com>
|
||||
Jordan Jennings <jjn2009@gmail.com> <jjn2009@users.noreply.github.com>
|
||||
Jorit Kleine-Möllhoff <joppich@bricknet.de> <joppich@users.noreply.github.com>
|
||||
@@ -363,6 +338,7 @@ Maxwell <csuhp007@gmail.com> <csuhqg@foxmail.com>
|
||||
Michael Crosby <michael@docker.com> <crosby.michael@gmail.com>
|
||||
Michael Crosby <michael@docker.com> <crosbymichael@gmail.com>
|
||||
Michael Crosby <michael@docker.com> <michael@crosbymichael.com>
|
||||
Michał Gryko <github@odkurzacz.org>
|
||||
Michael Hudson-Doyle <michael.hudson@canonical.com> <michael.hudson@linaro.org>
|
||||
Michael Huettermann <michael@huettermann.net>
|
||||
Michael Käufl <docker@c.michael-kaeufl.de> <michael-k@users.noreply.github.com>
|
||||
@@ -370,7 +346,6 @@ Michael Nussbaum <michael.nussbaum@getbraintree.com>
|
||||
Michael Nussbaum <michael.nussbaum@getbraintree.com> <code@getbraintree.com>
|
||||
Michael Spetsiotis <michael_spets@hotmail.com>
|
||||
Michal Minář <miminar@redhat.com>
|
||||
Michał Gryko <github@odkurzacz.org>
|
||||
Michiel de Jong <michiel@unhosted.org>
|
||||
Mickaël Fortunato <morsi.morsicus@gmail.com>
|
||||
Miguel Angel Alvarez Cabrerizo <doncicuto@gmail.com> <30386061+doncicuto@users.noreply.github.com>
|
||||
@@ -413,7 +388,6 @@ Peter Waller <p@pwaller.net> <peter@scraperwiki.com>
|
||||
Phil Estes <estesp@linux.vnet.ibm.com> <estesp@gmail.com>
|
||||
Philip Alexander Etling <paetling@gmail.com>
|
||||
Philipp Gillé <philipp.gille@gmail.com> <philippgille@users.noreply.github.com>
|
||||
Prasanna Gautam <prasannagautam@gmail.com>
|
||||
Qiang Huang <h.huangqiang@huawei.com>
|
||||
Qiang Huang <h.huangqiang@huawei.com> <qhuang@10.0.2.15>
|
||||
Ray Tsang <rayt@google.com> <saturnism@users.noreply.github.com>
|
||||
@@ -421,7 +395,6 @@ Renaud Gaubert <rgaubert@nvidia.com> <renaud.gaubert@gmail.com>
|
||||
Robert Terhaar <rterhaar@atlanticdynamic.com> <robbyt@users.noreply.github.com>
|
||||
Roberto G. Hashioka <roberto.hashioka@docker.com> <roberto_hashioka@hotmail.com>
|
||||
Roberto Muñoz Fernández <robertomf@gmail.com> <roberto.munoz.fernandez.contractor@bbva.com>
|
||||
Robin Thoni <robin@rthoni.com>
|
||||
Roman Dudin <katrmr@gmail.com> <decadent@users.noreply.github.com>
|
||||
Rong Zhang <rongzhang@alauda.io>
|
||||
Rongxiang Song <tinysong1226@gmail.com>
|
||||
@@ -442,7 +415,6 @@ Shengbo Song <thomassong@tencent.com>
|
||||
Shengbo Song <thomassong@tencent.com> <mymneo@163.com>
|
||||
Shih-Yuan Lee <fourdollars@gmail.com>
|
||||
Shishir Mahajan <shishir.mahajan@redhat.com> <smahajan@redhat.com>
|
||||
Shu-Wai Chow <shu-wai.chow@seattlechildrens.org>
|
||||
Shukui Yang <yangshukui@huawei.com>
|
||||
Shuwei Hao <haosw@cn.ibm.com>
|
||||
Shuwei Hao <haosw@cn.ibm.com> <haoshuwei24@gmail.com>
|
||||
@@ -461,12 +433,9 @@ Stefan Berger <stefanb@linux.vnet.ibm.com>
|
||||
Stefan Berger <stefanb@linux.vnet.ibm.com> <stefanb@us.ibm.com>
|
||||
Stefan J. Wernli <swernli@microsoft.com> <swernli@ntdev.microsoft.com>
|
||||
Stefan S. <tronicum@user.github.com>
|
||||
Stefan Scherer <stefan.scherer@docker.com>
|
||||
Stefan Scherer <stefan.scherer@docker.com> <scherer_stefan@icloud.com>
|
||||
Stephan Spindler <shutefan@gmail.com> <shutefan@users.noreply.github.com>
|
||||
Stephen Day <stevvooe@gmail.com>
|
||||
Stephen Day <stevvooe@gmail.com> <stephen.day@docker.com>
|
||||
Stephen Day <stevvooe@gmail.com> <stevvooe@users.noreply.github.com>
|
||||
Stephen Day <stephen.day@docker.com>
|
||||
Stephen Day <stephen.day@docker.com> <stevvooe@users.noreply.github.com>
|
||||
Steve Desmond <steve@vtsv.ca> <stevedesmond-ca@users.noreply.github.com>
|
||||
Sun Gengze <690388648@qq.com>
|
||||
Sun Jianbo <wonderflow.sun@gmail.com>
|
||||
@@ -500,7 +469,6 @@ Toli Kuznets <toli@docker.com>
|
||||
Tom Barlow <tomwbarlow@gmail.com>
|
||||
Tom Sweeney <tsweeney@redhat.com>
|
||||
Tõnis Tiigi <tonistiigi@gmail.com>
|
||||
Trace Andreason <tandreason@gmail.com>
|
||||
Trishna Guha <trishnaguha17@gmail.com>
|
||||
Tristan Carel <tristan@cogniteev.com>
|
||||
Tristan Carel <tristan@cogniteev.com> <tristan.carel@gmail.com>
|
||||
@@ -514,19 +482,14 @@ Victor Vieux <victor.vieux@docker.com> <victor@docker.com>
|
||||
Victor Vieux <victor.vieux@docker.com> <victor@dotcloud.com>
|
||||
Victor Vieux <victor.vieux@docker.com> <victorvieux@gmail.com>
|
||||
Victor Vieux <victor.vieux@docker.com> <vieux@docker.com>
|
||||
Vikram bir Singh <vsingh@mirantis.com>
|
||||
Vikram bir Singh <vsingh@mirantis.com> <vikrambir.singh@docker.com>
|
||||
Viktor Vojnovski <viktor.vojnovski@amadeus.com> <vojnovski@gmail.com>
|
||||
Vincent Batts <vbatts@redhat.com> <vbatts@hashbangbash.com>
|
||||
Vincent Bernat <Vincent.Bernat@exoscale.ch> <bernat@luffy.cx>
|
||||
Vincent Bernat <Vincent.Bernat@exoscale.ch> <vincent@bernat.im>
|
||||
Vincent Boulineau <vincent.boulineau@datadoghq.com>
|
||||
Vincent Demeester <vincent.demeester@docker.com> <vincent+github@demeester.fr>
|
||||
Vincent Demeester <vincent.demeester@docker.com> <vincent@demeester.fr>
|
||||
Vincent Demeester <vincent.demeester@docker.com> <vincent@sbr.pm>
|
||||
Vishnu Kannan <vishnuk@google.com>
|
||||
Vitaly Ostrosablin <vostrosablin@virtuozzo.com>
|
||||
Vitaly Ostrosablin <vostrosablin@virtuozzo.com> <tmp6154@yandex.ru>
|
||||
Vladimir Rutsky <altsysrq@gmail.com> <iamironbob@gmail.com>
|
||||
Walter Stanish <walter@pratyeka.org>
|
||||
Wang Chao <chao.wang@ucloud.cn>
|
||||
@@ -542,13 +505,11 @@ Wei Wu <wuwei4455@gmail.com> cizixs <cizixs@163.com>
|
||||
Wenjun Tang <tangwj2@lenovo.com> <dodia@163.com>
|
||||
Wewang Xiaorenfine <wang.xiaoren@zte.com.cn>
|
||||
Will Weaver <monkey@buildingbananas.com>
|
||||
Wing-Kam Wong <wingkwong.code@gmail.com>
|
||||
Xian Chaobo <xianchaobo@huawei.com>
|
||||
Xian Chaobo <xianchaobo@huawei.com> <jimmyxian2004@yahoo.com.cn>
|
||||
Xianglin Gao <xlgao@zju.edu.cn>
|
||||
Xianlu Bird <xianlubird@gmail.com>
|
||||
Xiao YongBiao <xyb4638@gmail.com>
|
||||
Xiaodong Liu <liuxiaodong@loongson.cn>
|
||||
Xiaodong Zhang <a4012017@sina.com>
|
||||
Xiaoyu Zhang <zhang.xiaoyu33@zte.com.cn>
|
||||
Xuecong Liao <satorulogic@gmail.com>
|
||||
@@ -572,10 +533,7 @@ Zachary Jaffee <zjaffee@us.ibm.com> <zij@case.edu>
|
||||
Zachary Jaffee <zjaffee@us.ibm.com> <zjaffee@apache.org>
|
||||
ZhangHang <stevezhang2014@gmail.com>
|
||||
Zhenkun Bi <bi.zhenkun@zte.com.cn>
|
||||
Zhou Hao <zhouhao@cn.fujitsu.com>
|
||||
Zhoulin Xie <zhoulin.xie@daocloud.io>
|
||||
Zhou Hao <zhouhao@cn.fujitsu.com>
|
||||
Zhu Kunjia <zhu.kunjia@zte.com.cn>
|
||||
Ziheng Liu <lzhfromustc@gmail.com>
|
||||
Zou Yu <zouyu7@huawei.com>
|
||||
Zuhayr Elahi <zuhayr.elahi@docker.com>
|
||||
Zuhayr Elahi <zuhayr.elahi@docker.com> <elahi.zuhayr@gmail.com>
|
||||
|
||||
117
AUTHORS
117
AUTHORS
@@ -4,7 +4,6 @@
|
||||
Aanand Prasad <aanand.prasad@gmail.com>
|
||||
Aaron Davidson <aaron@databricks.com>
|
||||
Aaron Feng <aaron.feng@gmail.com>
|
||||
Aaron Hnatiw <aaron@griddio.com>
|
||||
Aaron Huslage <huslage@gmail.com>
|
||||
Aaron L. Xu <liker.xu@foxmail.com>
|
||||
Aaron Lehmann <aaron.lehmann@docker.com>
|
||||
@@ -18,7 +17,6 @@ Abhishek Chanda <abhishek.becs@gmail.com>
|
||||
Abhishek Sharma <abhishek@asharma.me>
|
||||
Abin Shahab <ashahab@altiscale.com>
|
||||
Adam Avilla <aavilla@yp.com>
|
||||
Adam Dobrawy <naczelnik@jawnosc.tk>
|
||||
Adam Eijdenberg <adam.eijdenberg@gmail.com>
|
||||
Adam Kunk <adam.kunk@tiaa-cref.org>
|
||||
Adam Miller <admiller@redhat.com>
|
||||
@@ -45,7 +43,6 @@ AJ Bowen <aj@soulshake.net>
|
||||
Ajey Charantimath <ajey.charantimath@gmail.com>
|
||||
ajneu <ajneu@users.noreply.github.com>
|
||||
Akash Gupta <akagup@microsoft.com>
|
||||
Akhil Mohan <akhil.mohan@mayadata.io>
|
||||
Akihiro Matsushima <amatsusbit@gmail.com>
|
||||
Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp>
|
||||
Akim Demaille <akim.demaille@docker.com>
|
||||
@@ -53,12 +50,10 @@ Akira Koyasu <mail@akirakoyasu.net>
|
||||
Akshay Karle <akshay.a.karle@gmail.com>
|
||||
Al Tobey <al@ooyala.com>
|
||||
alambike <alambike@gmail.com>
|
||||
Alan Hoyle <alan@alanhoyle.com>
|
||||
Alan Scherger <flyinprogrammer@gmail.com>
|
||||
Alan Thompson <cloojure@gmail.com>
|
||||
Albert Callarisa <shark234@gmail.com>
|
||||
Albert Zhang <zhgwenming@gmail.com>
|
||||
Albin Kerouanton <albin@akerouanton.name>
|
||||
Alejandro González Hevia <alejandrgh11@gmail.com>
|
||||
Aleksa Sarai <asarai@suse.de>
|
||||
Aleksandrs Fadins <aleks@s-ko.net>
|
||||
@@ -112,13 +107,11 @@ Amy Lindburg <amy.lindburg@docker.com>
|
||||
Anand Patil <anand.prabhakar.patil@gmail.com>
|
||||
AnandkumarPatel <anandkumarpatel@gmail.com>
|
||||
Anatoly Borodin <anatoly.borodin@gmail.com>
|
||||
Anca Iordache <anca.iordache@docker.com>
|
||||
Anchal Agrawal <aagrawa4@illinois.edu>
|
||||
Anda Xu <anda.xu@docker.com>
|
||||
Anders Janmyr <anders@janmyr.com>
|
||||
Andre Dublin <81dublin@gmail.com>
|
||||
Andre Granovsky <robotciti@live.com>
|
||||
Andrea Denisse Gómez <crypto.andrea@protonmail.ch>
|
||||
Andrea Luzzardi <aluzzardi@gmail.com>
|
||||
Andrea Turli <andrea.turli@gmail.com>
|
||||
Andreas Elvers <andreas@work.de>
|
||||
@@ -183,10 +176,8 @@ Anusha Ragunathan <anusha.ragunathan@docker.com>
|
||||
apocas <petermdias@gmail.com>
|
||||
Arash Deshmeh <adeshmeh@ca.ibm.com>
|
||||
ArikaChen <eaglesora@gmail.com>
|
||||
Arko Dasgupta <arko.dasgupta@docker.com>
|
||||
Arnaud Lefebvre <a.lefebvre@outlook.fr>
|
||||
Arnaud Porterie <arnaud.porterie@docker.com>
|
||||
Arnaud Rebillout <arnaud.rebillout@collabora.com>
|
||||
Arthur Barr <arthur.barr@uk.ibm.com>
|
||||
Arthur Gautier <baloo@gandi.net>
|
||||
Artur Meyster <arthurfbi@yahoo.com>
|
||||
@@ -219,12 +210,10 @@ Benjamin Atkin <ben@benatkin.com>
|
||||
Benjamin Baker <Benjamin.baker@utexas.edu>
|
||||
Benjamin Boudreau <boudreau.benjamin@gmail.com>
|
||||
Benjamin Yolken <yolken@stripe.com>
|
||||
Benny Ng <benny.tpng@gmail.com>
|
||||
Benoit Chesneau <bchesneau@gmail.com>
|
||||
Bernerd Schaefer <bj.schaefer@gmail.com>
|
||||
Bernhard M. Wiedemann <bwiedemann@suse.de>
|
||||
Bert Goethals <bert@bertg.be>
|
||||
Bertrand Roussel <broussel@sierrawireless.com>
|
||||
Bevisy Zhang <binbin36520@gmail.com>
|
||||
Bharath Thiruveedula <bharath_ves@hotmail.com>
|
||||
Bhiraj Butala <abhiraj.butala@gmail.com>
|
||||
@@ -237,7 +226,6 @@ Bingshen Wang <bingshen.wbs@alibaba-inc.com>
|
||||
Blake Geno <blakegeno@gmail.com>
|
||||
Boaz Shuster <ripcurld.github@gmail.com>
|
||||
bobby abbott <ttobbaybbob@gmail.com>
|
||||
Boqin Qin <bobbqqin@gmail.com>
|
||||
Boris Pruessmann <boris@pruessmann.org>
|
||||
Boshi Lian <farmer1992@gmail.com>
|
||||
Bouke Haarsma <bouke@webatoom.nl>
|
||||
@@ -291,7 +279,6 @@ Carl Loa Odin <carlodin@gmail.com>
|
||||
Carl X. Su <bcbcarl@gmail.com>
|
||||
Carlo Mion <mion00@gmail.com>
|
||||
Carlos Alexandro Becker <caarlos0@gmail.com>
|
||||
Carlos de Paula <me@carlosedp.com>
|
||||
Carlos Sanchez <carlos@apache.org>
|
||||
Carol Fager-Higgins <carol.fager-higgins@docker.com>
|
||||
Cary <caryhartline@users.noreply.github.com>
|
||||
@@ -341,7 +328,6 @@ Chris Gibson <chris@chrisg.io>
|
||||
Chris Khoo <chris.khoo@gmail.com>
|
||||
Chris McKinnel <chris.mckinnel@tangentlabs.co.uk>
|
||||
Chris McKinnel <chrismckinnel@gmail.com>
|
||||
Chris Price <cprice@mirantis.com>
|
||||
Chris Seto <chriskseto@gmail.com>
|
||||
Chris Snow <chsnow123@gmail.com>
|
||||
Chris St. Pierre <chris.a.st.pierre@gmail.com>
|
||||
@@ -368,7 +354,7 @@ Christopher Currie <codemonkey+github@gmail.com>
|
||||
Christopher Jones <tophj@linux.vnet.ibm.com>
|
||||
Christopher Latham <sudosurootdev@gmail.com>
|
||||
Christopher Rigor <crigor@gmail.com>
|
||||
Christy Norman <christy@linux.vnet.ibm.com>
|
||||
Christy Perez <christy@linux.vnet.ibm.com>
|
||||
Chun Chen <ramichen@tencent.com>
|
||||
Ciro S. Costa <ciro.costa@usp.br>
|
||||
Clayton Coleman <ccoleman@redhat.com>
|
||||
@@ -388,10 +374,8 @@ Corey Farrell <git@cfware.com>
|
||||
Cory Forsyth <cory.forsyth@gmail.com>
|
||||
cressie176 <github@stephen-cresswell.net>
|
||||
CrimsonGlory <CrimsonGlory@users.noreply.github.com>
|
||||
Cristian Ariza <dev@cristianrz.com>
|
||||
Cristian Staretu <cristian.staretu@gmail.com>
|
||||
cristiano balducci <cristiano.balducci@gmail.com>
|
||||
Cristina Yenyxe Gonzalez Garcia <cristina.yenyxe@gmail.com>
|
||||
Cruceru Calin-Cristian <crucerucalincristian@gmail.com>
|
||||
CUI Wei <ghostplant@qq.com>
|
||||
Cyprian Gracz <cyprian.gracz@micro-jumbo.eu>
|
||||
@@ -418,14 +402,12 @@ Dan Williams <me@deedubs.com>
|
||||
Dani Hodovic <dani.hodovic@gmail.com>
|
||||
Dani Louca <dani.louca@docker.com>
|
||||
Daniel Antlinger <d.antlinger@gmx.at>
|
||||
Daniel Black <daniel@linux.ibm.com>
|
||||
Daniel Dao <dqminh@cloudflare.com>
|
||||
Daniel Exner <dex@dragonslave.de>
|
||||
Daniel Farrell <dfarrell@redhat.com>
|
||||
Daniel Garcia <daniel@danielgarcia.info>
|
||||
Daniel Gasienica <daniel@gasienica.ch>
|
||||
Daniel Grunwell <mwgrunny@gmail.com>
|
||||
Daniel Helfand <helfand.4@gmail.com>
|
||||
Daniel Hiltgen <daniel.hiltgen@docker.com>
|
||||
Daniel J Walsh <dwalsh@redhat.com>
|
||||
Daniel Menet <membership@sontags.ch>
|
||||
@@ -435,14 +417,12 @@ Daniel Norberg <dano@spotify.com>
|
||||
Daniel Nordberg <dnordberg@gmail.com>
|
||||
Daniel Robinson <gottagetmac@gmail.com>
|
||||
Daniel S <dan.streby@gmail.com>
|
||||
Daniel Sweet <danieljsweet@icloud.com>
|
||||
Daniel Von Fange <daniel@leancoder.com>
|
||||
Daniel Watkins <daniel@daniel-watkins.co.uk>
|
||||
Daniel X Moore <yahivin@gmail.com>
|
||||
Daniel YC Lin <dlin.tw@gmail.com>
|
||||
Daniel Zhang <jmzwcn@gmail.com>
|
||||
Danny Berger <dpb587@gmail.com>
|
||||
Danny Milosavljevic <dannym@scratchpost.org>
|
||||
Danny Yates <danny@codeaholics.org>
|
||||
Danyal Khaliq <danyal.khaliq@tenpearls.com>
|
||||
Darren Coxall <darren@darrencoxall.com>
|
||||
@@ -507,7 +487,6 @@ Derek McGowan <derek@mcgstyle.net>
|
||||
Deric Crago <deric.crago@gmail.com>
|
||||
Deshi Xiao <dxiao@redhat.com>
|
||||
devmeyster <arthurfbi@yahoo.com>
|
||||
Devon Estes <devon.estes@klarna.com>
|
||||
Devvyn Murphy <devvyn@devvyn.com>
|
||||
Dharmit Shah <shahdharmit@gmail.com>
|
||||
Dhawal Yogesh Bhanushali <dbhanushali@vmware.com>
|
||||
@@ -537,8 +516,6 @@ Dmitry Smirnov <onlyjob@member.fsf.org>
|
||||
Dmitry V. Krivenok <krivenok.dmitry@gmail.com>
|
||||
Dmitry Vorobev <dimahabr@gmail.com>
|
||||
Dolph Mathews <dolph.mathews@gmail.com>
|
||||
Dominic Tubach <dominic.tubach@to.com>
|
||||
Dominic Yin <yindongchao@inspur.com>
|
||||
Dominik Dingel <dingel@linux.vnet.ibm.com>
|
||||
Dominik Finkbeiner <finkes93@gmail.com>
|
||||
Dominik Honnef <dominik@honnef.co>
|
||||
@@ -557,7 +534,7 @@ Douglas Curtis <dougcurtis1@gmail.com>
|
||||
Dr Nic Williams <drnicwilliams@gmail.com>
|
||||
dragon788 <dragon788@users.noreply.github.com>
|
||||
Dražen Lučanin <kermit666@gmail.com>
|
||||
Drew Erny <derny@mirantis.com>
|
||||
Drew Erny <drew.erny@docker.com>
|
||||
Drew Hubl <drew.hubl@gmail.com>
|
||||
Dustin Sallings <dustin@spy.net>
|
||||
Ed Costello <epc@epcostello.com>
|
||||
@@ -607,7 +584,6 @@ Erik Weathers <erikdw@gmail.com>
|
||||
Erno Hopearuoho <erno.hopearuoho@gmail.com>
|
||||
Erwin van der Koogh <info@erronis.nl>
|
||||
Ethan Bell <ebgamer29@gmail.com>
|
||||
Ethan Mosbaugh <ethan@replicated.com>
|
||||
Euan Kemp <euan.kemp@coreos.com>
|
||||
Eugen Krizo <eugen.krizo@gmail.com>
|
||||
Eugene Yakubovich <eugene.yakubovich@coreos.com>
|
||||
@@ -619,7 +595,6 @@ Evan Phoenix <evan@fallingsnow.net>
|
||||
Evan Wies <evan@neomantra.net>
|
||||
Evelyn Xu <evelynhsu21@gmail.com>
|
||||
Everett Toews <everett.toews@rackspace.com>
|
||||
Evgeniy Makhrov <e.makhrov@corp.badoo.com>
|
||||
Evgeny Shmarnev <shmarnev@gmail.com>
|
||||
Evgeny Vereshchagin <evvers@ya.ru>
|
||||
Ewa Czechowska <ewa@ai-traders.com>
|
||||
@@ -645,7 +620,6 @@ Fareed Dudhia <fareeddudhia@googlemail.com>
|
||||
Fathi Boudra <fathi.boudra@linaro.org>
|
||||
Federico Gimenez <fgimenez@coit.es>
|
||||
Felipe Oliveira <felipeweb.programador@gmail.com>
|
||||
Felipe Ruhland <felipe.ruhland@gmail.com>
|
||||
Felix Abecassis <fabecassis@nvidia.com>
|
||||
Felix Geisendörfer <felix@debuggable.com>
|
||||
Felix Hupfeld <felix@quobyte.com>
|
||||
@@ -666,7 +640,6 @@ Florian <FWirtz@users.noreply.github.com>
|
||||
Florian Klein <florian.klein@free.fr>
|
||||
Florian Maier <marsmensch@users.noreply.github.com>
|
||||
Florian Noeding <noeding@adobe.com>
|
||||
Florian Schmaus <flo@geekplace.eu>
|
||||
Florian Weingarten <flo@hackvalue.de>
|
||||
Florin Asavoaie <florin.asavoaie@gmail.com>
|
||||
Florin Patan <florinpatan@gmail.com>
|
||||
@@ -681,7 +654,6 @@ Frank Groeneveld <frank@ivaldi.nl>
|
||||
Frank Herrmann <fgh@4gh.tv>
|
||||
Frank Macreery <frank@macreery.com>
|
||||
Frank Rosquin <frank.rosquin+github@gmail.com>
|
||||
frankyang <yyb196@gmail.com>
|
||||
Fred Lifton <fred.lifton@docker.com>
|
||||
Frederick F. Kautz IV <fkautz@redhat.com>
|
||||
Frederik Loeffert <frederik@zitrusmedia.de>
|
||||
@@ -703,7 +675,7 @@ Gareth Rushgrove <gareth@morethanseven.net>
|
||||
Garrett Barboza <garrett@garrettbarboza.com>
|
||||
Gary Schaetz <gary@schaetzkc.com>
|
||||
Gaurav <gaurav.gosec@gmail.com>
|
||||
Gaurav Singh <gaurav1086@gmail.com>
|
||||
gautam, prasanna <prasannagautam@gmail.com>
|
||||
Gaël PORTAY <gael.portay@savoirfairelinux.com>
|
||||
Genki Takiuchi <genki@s21g.com>
|
||||
GennadySpb <lipenkov@gmail.com>
|
||||
@@ -729,12 +701,11 @@ Gleb M Borisov <borisov.gleb@gmail.com>
|
||||
Glyn Normington <gnormington@gopivotal.com>
|
||||
GoBella <caili_welcome@163.com>
|
||||
Goffert van Gool <goffert@phusion.nl>
|
||||
Goldwyn Rodrigues <rgoldwyn@suse.com>
|
||||
Gopikannan Venugopalsamy <gopikannan.venugopalsamy@gmail.com>
|
||||
Gosuke Miyashita <gosukenator@gmail.com>
|
||||
Gou Rao <gou@portworx.com>
|
||||
Govinda Fichtner <govinda.fichtner@googlemail.com>
|
||||
Grant Millar <rid@cylo.io>
|
||||
Grant Millar <grant@cylo.io>
|
||||
Grant Reaber <grant.reaber@gmail.com>
|
||||
Graydon Hoare <graydon@pobox.com>
|
||||
Greg Fausak <greg@tacodata.com>
|
||||
@@ -753,17 +724,14 @@ Guruprasad <lgp171188@gmail.com>
|
||||
Gustav Sinder <gustav.sinder@gmail.com>
|
||||
gwx296173 <gaojing3@huawei.com>
|
||||
Günter Zöchbauer <guenter@gzoechbauer.com>
|
||||
Haichao Yang <yang.haichao@zte.com.cn>
|
||||
haikuoliu <haikuo@amazon.com>
|
||||
Hakan Özler <hakan.ozler@kodcu.com>
|
||||
Hamish Hutchings <moredhel@aoeu.me>
|
||||
Hannes Ljungberg <hannes@5monkeys.se>
|
||||
Hans Kristian Flaatten <hans@starefossen.com>
|
||||
Hans Rødtang <hansrodtang@gmail.com>
|
||||
Hao Shu Wei <haosw@cn.ibm.com>
|
||||
Hao Zhang <21521210@zju.edu.cn>
|
||||
Harald Albers <github@albersweb.de>
|
||||
Harald Niesche <harald@niesche.de>
|
||||
Harley Laue <losinggeneration@gmail.com>
|
||||
Harold Cooper <hrldcpr@gmail.com>
|
||||
Harrison Turton <harrisonturton@gmail.com>
|
||||
@@ -783,13 +751,9 @@ Hobofan <goisser94@gmail.com>
|
||||
Hollie Teal <hollie@docker.com>
|
||||
Hong Xu <hong@topbug.net>
|
||||
Hongbin Lu <hongbin034@gmail.com>
|
||||
Hongxu Jia <hongxu.jia@windriver.com>
|
||||
Honza Pokorny <me@honza.ca>
|
||||
Hsing-Hui Hsu <hsinghui@amazon.com>
|
||||
hsinko <21551195@zju.edu.cn>
|
||||
Hu Keping <hukeping@huawei.com>
|
||||
Hu Tao <hutao@cn.fujitsu.com>
|
||||
HuanHuan Ye <logindaveye@gmail.com>
|
||||
Huanzhong Zhang <zhanghuanzhong90@gmail.com>
|
||||
Huayi Zhang <irachex@gmail.com>
|
||||
Hugo Duncan <hugo@hugoduncan.org>
|
||||
@@ -826,7 +790,6 @@ Ingo Gottwald <in.gottwald@gmail.com>
|
||||
Innovimax <innovimax@gmail.com>
|
||||
Isaac Dupree <antispam@idupree.com>
|
||||
Isabel Jimenez <contact.isabeljimenez@gmail.com>
|
||||
Isaiah Grace <irgkenya4@gmail.com>
|
||||
Isao Jonas <isao.jonas@gmail.com>
|
||||
Iskander Sharipov <quasilyte@gmail.com>
|
||||
Ivan Babrou <ibobrik@gmail.com>
|
||||
@@ -842,7 +805,6 @@ Jacob Edelman <edelman.jd@gmail.com>
|
||||
Jacob Tomlinson <jacob@tom.linson.uk>
|
||||
Jacob Vallejo <jakeev@amazon.com>
|
||||
Jacob Wen <jian.w.wen@oracle.com>
|
||||
Jaime Cepeda <jcepedavillamayor@gmail.com>
|
||||
Jaivish Kothari <janonymous.codevulture@gmail.com>
|
||||
Jake Champlin <jake.champlin.27@gmail.com>
|
||||
Jake Moshenko <jake@devtable.com>
|
||||
@@ -857,13 +819,12 @@ James Kyburz <james.kyburz@gmail.com>
|
||||
James Kyle <james@jameskyle.org>
|
||||
James Lal <james@lightsofapollo.com>
|
||||
James Mills <prologic@shortcircuit.net.au>
|
||||
James Nesbitt <jnesbitt@mirantis.com>
|
||||
James Nesbitt <james.nesbitt@wunderkraut.com>
|
||||
James Nugent <james@jen20.com>
|
||||
James Turnbull <james@lovedthanlost.net>
|
||||
James Watkins-Harvey <jwatkins@progi-media.com>
|
||||
Jamie Hannaford <jamie@limetree.org>
|
||||
Jamshid Afshar <jafshar@yahoo.com>
|
||||
Jan Chren <dev.rindeal@gmail.com>
|
||||
Jan Keromnes <janx@linux.com>
|
||||
Jan Koprowski <jan.koprowski@gmail.com>
|
||||
Jan Pazdziora <jpazdziora@redhat.com>
|
||||
@@ -878,7 +839,6 @@ Jared Hocutt <jaredh@netapp.com>
|
||||
Jaroslaw Zabiello <hipertracker@gmail.com>
|
||||
jaseg <jaseg@jaseg.net>
|
||||
Jasmine Hegman <jasmine@jhegman.com>
|
||||
Jason A. Donenfeld <Jason@zx2c4.com>
|
||||
Jason Divock <jdivock@gmail.com>
|
||||
Jason Giedymin <jasong@apache.org>
|
||||
Jason Green <Jason.Green@AverInformatics.Com>
|
||||
@@ -926,7 +886,7 @@ Jeroen Franse <jeroenfranse@gmail.com>
|
||||
Jeroen Jacobs <github@jeroenj.be>
|
||||
Jesse Dearing <jesse.dearing@gmail.com>
|
||||
Jesse Dubay <jesse@thefortytwo.net>
|
||||
Jessica Frazelle <jess@oxide.computer>
|
||||
Jessica Frazelle <acidburn@microsoft.com>
|
||||
Jezeniel Zapanta <jpzapanta22@gmail.com>
|
||||
Jhon Honce <jhonce@redhat.com>
|
||||
Ji.Zhilong <zhilongji@gmail.com>
|
||||
@@ -934,11 +894,9 @@ Jian Liao <jliao@alauda.io>
|
||||
Jian Zhang <zhangjian.fnst@cn.fujitsu.com>
|
||||
Jiang Jinyang <jjyruby@gmail.com>
|
||||
Jie Luo <luo612@zju.edu.cn>
|
||||
Jie Ma <jienius@outlook.com>
|
||||
Jihyun Hwang <jhhwang@telcoware.com>
|
||||
Jilles Oldenbeuving <ojilles@gmail.com>
|
||||
Jim Alateras <jima@comware.com.au>
|
||||
Jim Ehrismann <jim.ehrismann@docker.com>
|
||||
Jim Galasyn <jim.galasyn@docker.com>
|
||||
Jim Minter <jminter@redhat.com>
|
||||
Jim Perrin <jperrin@centos.org>
|
||||
@@ -976,7 +934,7 @@ John Feminella <jxf@jxf.me>
|
||||
John Gardiner Myers <jgmyers@proofpoint.com>
|
||||
John Gossman <johngos@microsoft.com>
|
||||
John Harris <john@johnharris.io>
|
||||
John Howard <github@lowenna.com>
|
||||
John Howard (VM) <John.Howard@microsoft.com>
|
||||
John Laswell <john.n.laswell@gmail.com>
|
||||
John Maguire <jmaguire@duosecurity.com>
|
||||
John Mulhausen <john@docker.com>
|
||||
@@ -990,8 +948,6 @@ John Willis <john.willis@docker.com>
|
||||
Jon Johnson <jonjohnson@google.com>
|
||||
Jon Surrell <jon.surrell@gmail.com>
|
||||
Jon Wedaman <jweede@gmail.com>
|
||||
Jonas Dohse <jonas@dohse.ch>
|
||||
Jonas Heinrich <Jonas@JonasHeinrich.com>
|
||||
Jonas Pfenniger <jonas@pfenniger.name>
|
||||
Jonathan A. Schweder <jonathanschweder@gmail.com>
|
||||
Jonathan A. Sternberg <jonathansternberg@gmail.com>
|
||||
@@ -1041,13 +997,10 @@ Julien Dubois <julien.dubois@gmail.com>
|
||||
Julien Kassar <github@kassisol.com>
|
||||
Julien Maitrehenry <julien.maitrehenry@me.com>
|
||||
Julien Pervillé <julien.perville@perfect-memory.com>
|
||||
Julien Pivotto <roidelapluie@inuits.eu>
|
||||
Julio Guerra <julio@sqreen.com>
|
||||
Julio Montes <imc.coder@gmail.com>
|
||||
Jun-Ru Chang <jrjang@gmail.com>
|
||||
Jussi Nummelin <jussi.nummelin@gmail.com>
|
||||
Justas Brazauskas <brazauskasjustas@gmail.com>
|
||||
Justen Martin <jmart@the-coder.com>
|
||||
Justin Cormack <justin.cormack@docker.com>
|
||||
Justin Force <justin.force@gmail.com>
|
||||
Justin Menga <justin.menga@gmail.com>
|
||||
@@ -1056,7 +1009,6 @@ Justin Simonelis <justin.p.simonelis@gmail.com>
|
||||
Justin Terry <juterry@microsoft.com>
|
||||
Justyn Temme <justyntemme@gmail.com>
|
||||
Jyrki Puttonen <jyrkiput@gmail.com>
|
||||
Jérémy Leherpeur <amenophis@leherpeur.net>
|
||||
Jérôme Petazzoni <jerome.petazzoni@docker.com>
|
||||
Jörg Thalheim <joerg@higgsboson.tk>
|
||||
K. Heller <pestophagous@gmail.com>
|
||||
@@ -1094,7 +1046,6 @@ Ken Reese <krrgithub@gmail.com>
|
||||
Kenfe-Mickaël Laventure <mickael.laventure@gmail.com>
|
||||
Kenjiro Nakayama <nakayamakenjiro@gmail.com>
|
||||
Kent Johnson <kentoj@gmail.com>
|
||||
Kenta Tada <Kenta.Tada@sony.com>
|
||||
Kevin "qwazerty" Houdebert <kevin.houdebert@gmail.com>
|
||||
Kevin Burke <kev@inburke.com>
|
||||
Kevin Clark <kevin.clark@gmail.com>
|
||||
@@ -1105,7 +1056,6 @@ Kevin Kern <kaiwentan@harmonycloud.cn>
|
||||
Kevin Menard <kevin@nirvdrum.com>
|
||||
Kevin Meredith <kevin.m.meredith@gmail.com>
|
||||
Kevin P. Kucharczyk <kevinkucharczyk@gmail.com>
|
||||
Kevin Parsons <kevpar@microsoft.com>
|
||||
Kevin Richardson <kevin@kevinrichardson.co>
|
||||
Kevin Shi <kshi@andrew.cmu.edu>
|
||||
Kevin Wallace <kevin@pentabarf.net>
|
||||
@@ -1196,7 +1146,6 @@ longliqiang88 <394564827@qq.com>
|
||||
Lorenz Leutgeb <lorenz.leutgeb@gmail.com>
|
||||
Lorenzo Fontana <fontanalorenz@gmail.com>
|
||||
Lotus Fenn <fenn.lotus@gmail.com>
|
||||
Louis Delossantos <ldelossa.ld@gmail.com>
|
||||
Louis Opter <kalessin@kalessin.fr>
|
||||
Luca Favatella <luca.favatella@erlang-solutions.com>
|
||||
Luca Marturana <lucamarturana@gmail.com>
|
||||
@@ -1205,11 +1154,9 @@ Luca-Bogdan Grigorescu <Luca-Bogdan Grigorescu>
|
||||
Lucas Chan <lucas-github@lucaschan.com>
|
||||
Lucas Chi <lucas@teacherspayteachers.com>
|
||||
Lucas Molas <lmolas@fundacionsadosky.org.ar>
|
||||
Lucas Silvestre <lukas.silvestre@gmail.com>
|
||||
Luciano Mores <leslau@gmail.com>
|
||||
Luis Martínez de Bartolomé Izquierdo <lmartinez@biicode.com>
|
||||
Luiz Svoboda <luizek@gmail.com>
|
||||
Lukas Heeren <lukas-heeren@hotmail.com>
|
||||
Lukas Waslowski <cr7pt0gr4ph7@gmail.com>
|
||||
lukaspustina <lukas.pustina@centerdevice.com>
|
||||
Lukasz Zajaczkowski <Lukasz.Zajaczkowski@ts.fujitsu.com>
|
||||
@@ -1309,7 +1256,6 @@ Matthieu Hauglustaine <matt.hauglustaine@gmail.com>
|
||||
Mattias Jernberg <nostrad@gmail.com>
|
||||
Mauricio Garavaglia <mauricio@medallia.com>
|
||||
mauriyouth <mauriyouth@gmail.com>
|
||||
Max Harmathy <max.harmathy@web.de>
|
||||
Max Shytikov <mshytikov@gmail.com>
|
||||
Maxim Fedchyshyn <sevmax@gmail.com>
|
||||
Maxim Ivanov <ivanov.maxim@gmail.com>
|
||||
@@ -1350,7 +1296,6 @@ Michael Stapelberg <michael+gh@stapelberg.de>
|
||||
Michael Steinert <mike.steinert@gmail.com>
|
||||
Michael Thies <michaelthies78@gmail.com>
|
||||
Michael West <mwest@mdsol.com>
|
||||
Michael Zhao <michael.zhao@arm.com>
|
||||
Michal Fojtik <mfojtik@redhat.com>
|
||||
Michal Gebauer <mishak@mishak.net>
|
||||
Michal Jemala <michal.jemala@gmail.com>
|
||||
@@ -1367,7 +1312,6 @@ Miguel Morales <mimoralea@gmail.com>
|
||||
Mihai Borobocea <MihaiBorob@gmail.com>
|
||||
Mihuleacc Sergiu <mihuleac.sergiu@gmail.com>
|
||||
Mike Brown <brownwm@us.ibm.com>
|
||||
Mike Bush <mpbush@gmail.com>
|
||||
Mike Casas <mkcsas0@gmail.com>
|
||||
Mike Chelen <michael.chelen@gmail.com>
|
||||
Mike Danese <mikedanese@google.com>
|
||||
@@ -1436,7 +1380,6 @@ Neyazul Haque <nuhaque@gmail.com>
|
||||
Nghia Tran <nghia@google.com>
|
||||
Niall O'Higgins <niallo@unworkable.org>
|
||||
Nicholas E. Rabenau <nerab@gmx.at>
|
||||
Nick Adcock <nick.adcock@docker.com>
|
||||
Nick DeCoursin <n.decoursin@foodpanda.com>
|
||||
Nick Irvine <nfirvine@nfirvine.com>
|
||||
Nick Neisen <nwneisen@gmail.com>
|
||||
@@ -1460,7 +1403,6 @@ Nik Nyby <nikolas@gnu.org>
|
||||
Nikhil Chawla <chawlanikhil24@gmail.com>
|
||||
NikolaMandic <mn080202@gmail.com>
|
||||
Nikolas Garofil <nikolas.garofil@uantwerpen.be>
|
||||
Nikolay Edigaryev <edigaryev@gmail.com>
|
||||
Nikolay Milovanov <nmil@itransformers.net>
|
||||
Nirmal Mehta <nirmalkmehta@gmail.com>
|
||||
Nishant Totla <nishanttotla@gmail.com>
|
||||
@@ -1476,7 +1418,6 @@ Nuutti Kotivuori <naked@iki.fi>
|
||||
nzwsch <hi@nzwsch.com>
|
||||
O.S. Tezer <ostezer@gmail.com>
|
||||
objectified <objectified@gmail.com>
|
||||
Odin Ugedal <odin@ugedal.com>
|
||||
Oguz Bilgic <fisyonet@gmail.com>
|
||||
Oh Jinkyun <tintypemolly@gmail.com>
|
||||
Ohad Schneider <ohadschn@users.noreply.github.com>
|
||||
@@ -1487,7 +1428,6 @@ Oliver Reason <oli@overrateddev.co>
|
||||
Olivier Gambier <dmp42@users.noreply.github.com>
|
||||
Olle Jonsson <olle.jonsson@gmail.com>
|
||||
Olli Janatuinen <olli.janatuinen@gmail.com>
|
||||
Olly Pomeroy <oppomeroy@gmail.com>
|
||||
Omri Shiv <Omri.Shiv@teradata.com>
|
||||
Oriol Francès <oriolfa@gmail.com>
|
||||
Oskar Niburski <oskarniburski@gmail.com>
|
||||
@@ -1497,7 +1437,6 @@ Ovidio Mallo <ovidio.mallo@gmail.com>
|
||||
Panagiotis Moustafellos <pmoust@elastic.co>
|
||||
Paolo G. Giarrusso <p.giarrusso@gmail.com>
|
||||
Pascal <pascalgn@users.noreply.github.com>
|
||||
Pascal Bach <pascal.bach@siemens.com>
|
||||
Pascal Borreli <pascal@borreli.com>
|
||||
Pascal Hartig <phartig@rdrei.net>
|
||||
Patrick Böänziger <patrick.baenziger@bsi-software.com>
|
||||
@@ -1522,7 +1461,6 @@ Paul Nasrat <pnasrat@gmail.com>
|
||||
Paul Weaver <pauweave@cisco.com>
|
||||
Paulo Ribeiro <paigr.io@gmail.com>
|
||||
Pavel Lobashov <ShockwaveNN@gmail.com>
|
||||
Pavel Matěja <pavel@verotel.cz>
|
||||
Pavel Pletenev <cpp.create@gmail.com>
|
||||
Pavel Pospisil <pospispa@gmail.com>
|
||||
Pavel Sutyrin <pavel.sutyrin@gmail.com>
|
||||
@@ -1634,7 +1572,6 @@ Riku Voipio <riku.voipio@linaro.org>
|
||||
Riley Guerin <rileytg.dev@gmail.com>
|
||||
Ritesh H Shukla <sritesh@vmware.com>
|
||||
Riyaz Faizullabhoy <riyaz.faizullabhoy@docker.com>
|
||||
Rob Gulewich <rgulewich@netflix.com>
|
||||
Rob Vesse <rvesse@dotnetrdf.org>
|
||||
Robert Bachmann <rb@robertbachmann.at>
|
||||
Robert Bittle <guywithnose@gmail.com>
|
||||
@@ -1643,13 +1580,11 @@ Robert Schneider <mail@shakeme.info>
|
||||
Robert Stern <lexandro2000@gmail.com>
|
||||
Robert Terhaar <rterhaar@atlanticdynamic.com>
|
||||
Robert Wallis <smilingrob@gmail.com>
|
||||
Robert Wang <robert@arctic.tw>
|
||||
Roberto G. Hashioka <roberto.hashioka@docker.com>
|
||||
Roberto Muñoz Fernández <robertomf@gmail.com>
|
||||
Robin Naundorf <r.naundorf@fh-muenster.de>
|
||||
Robin Schneider <ypid@riseup.net>
|
||||
Robin Speekenbrink <robin@kingsquare.nl>
|
||||
Robin Thoni <robin@rthoni.com>
|
||||
robpc <rpcann@gmail.com>
|
||||
Rodolfo Carvalho <rhcarvalho@gmail.com>
|
||||
Rodrigo Vaz <rodrigo.vaz@gmail.com>
|
||||
@@ -1664,7 +1599,6 @@ Roland Kammerer <roland.kammerer@linbit.com>
|
||||
Roland Moriz <rmoriz@users.noreply.github.com>
|
||||
Roma Sokolov <sokolov.r.v@gmail.com>
|
||||
Roman Dudin <katrmr@gmail.com>
|
||||
Roman Mazur <roman@balena.io>
|
||||
Roman Strashkin <roman.strashkin@gmail.com>
|
||||
Ron Smits <ron.smits@gmail.com>
|
||||
Ron Williams <ron.a.williams@gmail.com>
|
||||
@@ -1684,7 +1618,6 @@ Rozhnov Alexandr <nox73@ya.ru>
|
||||
Rudolph Gottesheim <r.gottesheim@loot.at>
|
||||
Rui Cao <ruicao@alauda.io>
|
||||
Rui Lopes <rgl@ruilopes.com>
|
||||
Ruilin Li <liruilin4@huawei.com>
|
||||
Runshen Zhu <runshen.zhu@gmail.com>
|
||||
Russ Magee <rmagee@gmail.com>
|
||||
Ryan Abrams <rdabrams@gmail.com>
|
||||
@@ -1723,7 +1656,6 @@ Sam J Sharpe <sam.sharpe@digital.cabinet-office.gov.uk>
|
||||
Sam Neirinck <sam@samneirinck.com>
|
||||
Sam Reis <sreis@atlassian.com>
|
||||
Sam Rijs <srijs@airpost.net>
|
||||
Sam Whited <sam@samwhited.com>
|
||||
Sambuddha Basu <sambuddhabasu1@gmail.com>
|
||||
Sami Wagiaalla <swagiaal@redhat.com>
|
||||
Samuel Andaya <samuel@andaya.net>
|
||||
@@ -1738,7 +1670,6 @@ sapphiredev <se.imas.kr@gmail.com>
|
||||
Sargun Dhillon <sargun@netflix.com>
|
||||
Sascha Andres <sascha.andres@outlook.com>
|
||||
Sascha Grunert <sgrunert@suse.com>
|
||||
SataQiu <qiushida@beyondcent.com>
|
||||
Satnam Singh <satnam@raintown.org>
|
||||
Satoshi Amemiya <satoshi_amemiya@voyagegroup.com>
|
||||
Satoshi Tagomori <tagomoris@gmail.com>
|
||||
@@ -1787,7 +1718,6 @@ Shijun Qin <qinshijun16@mails.ucas.ac.cn>
|
||||
Shishir Mahajan <shishir.mahajan@redhat.com>
|
||||
Shoubhik Bose <sbose78@gmail.com>
|
||||
Shourya Sarcar <shourya.sarcar@gmail.com>
|
||||
Shu-Wai Chow <shu-wai.chow@seattlechildrens.org>
|
||||
shuai-z <zs.broccoli@gmail.com>
|
||||
Shukui Yang <yangshukui@huawei.com>
|
||||
Shuwei Hao <haosw@cn.ibm.com>
|
||||
@@ -1798,7 +1728,6 @@ Silas Sewell <silas@sewell.org>
|
||||
Silvan Jegen <s.jegen@gmail.com>
|
||||
Simão Reis <smnrsti@gmail.com>
|
||||
Simei He <hesimei@zju.edu.cn>
|
||||
Simon Barendse <simon.barendse@gmail.com>
|
||||
Simon Eskildsen <sirup@sirupsen.com>
|
||||
Simon Ferquel <simon.ferquel@docker.com>
|
||||
Simon Leinen <simon.leinen@gmail.com>
|
||||
@@ -1807,7 +1736,6 @@ Simon Taranto <simon.taranto@gmail.com>
|
||||
Simon Vikstrom <pullreq@devsn.se>
|
||||
Sindhu S <sindhus@live.in>
|
||||
Sjoerd Langkemper <sjoerd-github@linuxonly.nl>
|
||||
skanehira <sho19921005@gmail.com>
|
||||
Solganik Alexander <solganik@gmail.com>
|
||||
Solomon Hykes <solomon@docker.com>
|
||||
Song Gao <song@gao.io>
|
||||
@@ -1819,21 +1747,18 @@ Sridatta Thatipamala <sthatipamala@gmail.com>
|
||||
Sridhar Ratnakumar <sridharr@activestate.com>
|
||||
Srini Brahmaroutu <srbrahma@us.ibm.com>
|
||||
Srinivasan Srivatsan <srinivasan.srivatsan@hpe.com>
|
||||
Staf Wagemakers <staf@wagemakers.be>
|
||||
Stanislav Bondarenko <stanislav.bondarenko@gmail.com>
|
||||
Stanislav Levin <slev@altlinux.org>
|
||||
Steeve Morin <steeve.morin@gmail.com>
|
||||
Stefan Berger <stefanb@linux.vnet.ibm.com>
|
||||
Stefan J. Wernli <swernli@microsoft.com>
|
||||
Stefan Praszalowicz <stefan@greplin.com>
|
||||
Stefan S. <tronicum@user.github.com>
|
||||
Stefan Scherer <stefan.scherer@docker.com>
|
||||
Stefan Scherer <scherer_stefan@icloud.com>
|
||||
Stefan Staudenmeyer <doerte@instana.com>
|
||||
Stefan Weil <sw@weilnetz.de>
|
||||
Stephan Spindler <shutefan@gmail.com>
|
||||
Stephen Benjamin <stephen@redhat.com>
|
||||
Stephen Crosby <stevecrozz@gmail.com>
|
||||
Stephen Day <stevvooe@gmail.com>
|
||||
Stephen Day <stephen.day@docker.com>
|
||||
Stephen Drake <stephen@xenolith.net>
|
||||
Stephen Rust <srust@blockbridge.com>
|
||||
Steve Desmond <steve@vtsv.ca>
|
||||
@@ -1848,12 +1773,10 @@ Steven Iveson <sjiveson@outlook.com>
|
||||
Steven Merrill <steven.merrill@gmail.com>
|
||||
Steven Richards <steven@axiomzen.co>
|
||||
Steven Taylor <steven.taylor@me.com>
|
||||
Stig Larsson <stig@larsson.dev>
|
||||
Subhajit Ghosh <isubuz.g@gmail.com>
|
||||
Sujith Haridasan <sujith.h@gmail.com>
|
||||
Sun Gengze <690388648@qq.com>
|
||||
Sun Jianbo <wonderflow.sun@gmail.com>
|
||||
Sune Keller <sune.keller@gmail.com>
|
||||
Sunny Gogoi <indiasuny000@gmail.com>
|
||||
Suryakumar Sudar <surya.trunks@gmail.com>
|
||||
Sven Dowideit <SvenDowideit@home.org.au>
|
||||
@@ -1904,8 +1827,6 @@ Tianyi Wang <capkurmagati@gmail.com>
|
||||
Tibor Vass <teabee89@gmail.com>
|
||||
Tiffany Jernigan <tiffany.f.j@gmail.com>
|
||||
Tiffany Low <tiffany@box.com>
|
||||
Till Wegmüller <toasterson@gmail.com>
|
||||
Tim <elatllat@gmail.com>
|
||||
Tim Bart <tim@fewagainstmany.com>
|
||||
Tim Bosse <taim@bosboot.org>
|
||||
Tim Dettrick <t.dettrick@uq.edu.au>
|
||||
@@ -1957,7 +1878,7 @@ Tony Miller <mcfiredrill@gmail.com>
|
||||
toogley <toogley@mailbox.org>
|
||||
Torstein Husebø <torstein@huseboe.net>
|
||||
Tõnis Tiigi <tonistiigi@gmail.com>
|
||||
Trace Andreason <tandreason@gmail.com>
|
||||
tpng <benny.tpng@gmail.com>
|
||||
tracylihui <793912329@qq.com>
|
||||
Trapier Marshall <trapier.marshall@docker.com>
|
||||
Travis Cline <travis.cline@gmail.com>
|
||||
@@ -1980,7 +1901,6 @@ Utz Bacher <utz.bacher@de.ibm.com>
|
||||
vagrant <vagrant@ubuntu-14.04-amd64-vbox>
|
||||
Vaidas Jablonskis <jablonskis@gmail.com>
|
||||
vanderliang <lansheng@meili-inc.com>
|
||||
Velko Ivanov <vivanov@deeperplane.com>
|
||||
Veres Lajos <vlajos@gmail.com>
|
||||
Victor Algaze <valgaze@gmail.com>
|
||||
Victor Coisne <victor.coisne@dotcloud.com>
|
||||
@@ -1992,13 +1912,11 @@ Victor Palma <palma.victor@gmail.com>
|
||||
Victor Vieux <victor.vieux@docker.com>
|
||||
Victoria Bialas <victoria.bialas@docker.com>
|
||||
Vijaya Kumar K <vijayak@caviumnetworks.com>
|
||||
Vikram bir Singh <vsingh@mirantis.com>
|
||||
Viktor Stanchev <me@viktorstanchev.com>
|
||||
Viktor Vojnovski <viktor.vojnovski@amadeus.com>
|
||||
VinayRaghavanKS <raghavan.vinay@gmail.com>
|
||||
Vincent Batts <vbatts@redhat.com>
|
||||
Vincent Bernat <Vincent.Bernat@exoscale.ch>
|
||||
Vincent Boulineau <vincent.boulineau@datadoghq.com>
|
||||
Vincent Demeester <vincent.demeester@docker.com>
|
||||
Vincent Giersch <vincent.giersch@ovh.net>
|
||||
Vincent Mayers <vincent.mayers@inbloom.org>
|
||||
@@ -2029,8 +1947,6 @@ Wang Long <long.wanglong@huawei.com>
|
||||
Wang Ping <present.wp@icloud.com>
|
||||
Wang Xing <hzwangxing@corp.netease.com>
|
||||
Wang Yuexiao <wang.yuexiao@zte.com.cn>
|
||||
Wang Yumu <37442693@qq.com>
|
||||
wanghuaiqing <wanghuaiqing@loongson.cn>
|
||||
Ward Vandewege <ward@jhvc.com>
|
||||
WarheadsSE <max@warheads.net>
|
||||
Wassim Dhif <wassimdhif@gmail.com>
|
||||
@@ -2047,14 +1963,12 @@ Wen Cheng Ma <wenchma@cn.ibm.com>
|
||||
Wendel Fleming <wfleming@usc.edu>
|
||||
Wenjun Tang <tangwj2@lenovo.com>
|
||||
Wenkai Yin <yinw@vmware.com>
|
||||
wenlxie <wenlxie@ebay.com>
|
||||
Wentao Zhang <zhangwentao234@huawei.com>
|
||||
Wenxuan Zhao <viz@linux.com>
|
||||
Wenyu You <21551128@zju.edu.cn>
|
||||
Wenzhi Liang <wenzhi.liang@gmail.com>
|
||||
Wes Morgan <cap10morgan@gmail.com>
|
||||
Wewang Xiaorenfine <wang.xiaoren@zte.com.cn>
|
||||
Wiktor Kwapisiewicz <wiktor@metacode.biz>
|
||||
Will Dietz <w@wdtz.org>
|
||||
Will Rouesnel <w.rouesnel@gmail.com>
|
||||
Will Weaver <monkey@buildingbananas.com>
|
||||
@@ -2065,8 +1979,6 @@ William Hubbs <w.d.hubbs@gmail.com>
|
||||
William Martin <wmartin@pivotal.io>
|
||||
William Riancho <wr.wllm@gmail.com>
|
||||
William Thurston <thurstw@amazon.com>
|
||||
Wilson Júnior <wilsonpjunior@gmail.com>
|
||||
Wing-Kam Wong <wingkwong.code@gmail.com>
|
||||
WiseTrem <shepelyov.g@gmail.com>
|
||||
Wolfgang Powisch <powo@powo.priv.at>
|
||||
Wonjun Kim <wonjun.kim@navercorp.com>
|
||||
@@ -2076,7 +1988,6 @@ Xianglin Gao <xlgao@zju.edu.cn>
|
||||
Xianlu Bird <xianlubird@gmail.com>
|
||||
Xiao YongBiao <xyb4638@gmail.com>
|
||||
XiaoBing Jiang <s7v7nislands@gmail.com>
|
||||
Xiaodong Liu <liuxiaodong@loongson.cn>
|
||||
Xiaodong Zhang <a4012017@sina.com>
|
||||
Xiaoxi He <xxhe@alauda.io>
|
||||
Xiaoxu Chen <chenxiaoxu14@otcaix.iscas.ac.cn>
|
||||
@@ -2085,7 +1996,6 @@ xichengliudui <1693291525@qq.com>
|
||||
xiekeyang <xiekeyang@huawei.com>
|
||||
Ximo Guanter Gonzálbez <joaquin.guantergonzalbez@telefonica.com>
|
||||
Xinbo Weng <xihuanbo_0521@zju.edu.cn>
|
||||
Xinfeng Liu <xinfeng.liu@gmail.com>
|
||||
Xinzi Zhou <imdreamrunner@gmail.com>
|
||||
Xiuming Chen <cc@cxm.cc>
|
||||
Xuecong Liao <satorulogic@gmail.com>
|
||||
@@ -2100,7 +2010,6 @@ Yang Pengfei <yangpengfei4@huawei.com>
|
||||
yangchenliang <yangchenliang@huawei.com>
|
||||
Yanqiang Miao <miao.yanqiang@zte.com.cn>
|
||||
Yao Zaiyong <yaozaiyong@hotmail.com>
|
||||
Yash Murty <yashmurty@gmail.com>
|
||||
Yassine Tijani <yasstij11@gmail.com>
|
||||
Yasunori Mahata <nori@mahata.net>
|
||||
Yazhong Liu <yorkiefixer@gmail.com>
|
||||
@@ -2115,7 +2024,6 @@ Yongxin Li <yxli@alauda.io>
|
||||
Yongzhi Pan <panyongzhi@gmail.com>
|
||||
Yosef Fertel <yfertel@gmail.com>
|
||||
You-Sheng Yang (楊有勝) <vicamo@gmail.com>
|
||||
youcai <omegacoleman@gmail.com>
|
||||
Youcef YEKHLEF <yyekhlef@gmail.com>
|
||||
Yu Changchun <yuchangchun1@huawei.com>
|
||||
Yu Chengxia <yuchengxia@huawei.com>
|
||||
@@ -2147,13 +2055,11 @@ Zhenan Ye <21551168@zju.edu.cn>
|
||||
zhenghenghuo <zhenghenghuo@zju.edu.cn>
|
||||
Zhenhai Gao <gaozh1988@live.com>
|
||||
Zhenkun Bi <bi.zhenkun@zte.com.cn>
|
||||
zhipengzuo <zuozhipeng@baidu.com>
|
||||
Zhou Hao <zhouhao@cn.fujitsu.com>
|
||||
Zhoulin Xie <zhoulin.xie@daocloud.io>
|
||||
Zhu Guihua <zhugh.fnst@cn.fujitsu.com>
|
||||
Zhu Kunjia <zhu.kunjia@zte.com.cn>
|
||||
Zhuoyun Wei <wzyboy@wzyboy.org>
|
||||
Ziheng Liu <lzhfromustc@gmail.com>
|
||||
Zilin Du <zilin.du@gmail.com>
|
||||
zimbatm <zimbatm@zimbatm.com>
|
||||
Ziming Dong <bnudzm@foxmail.com>
|
||||
@@ -2162,13 +2068,12 @@ zmarouf <zeid.marouf@gmail.com>
|
||||
Zoltan Tombol <zoltan.tombol@gmail.com>
|
||||
Zou Yu <zouyu7@huawei.com>
|
||||
zqh <zqhxuyuan@gmail.com>
|
||||
Zuhayr Elahi <zuhayr.elahi@docker.com>
|
||||
Zuhayr Elahi <elahi.zuhayr@gmail.com>
|
||||
Zunayed Ali <zunayed@gmail.com>
|
||||
Álex González <agonzalezro@gmail.com>
|
||||
Álvaro Lázaro <alvaro.lazaro.g@gmail.com>
|
||||
Átila Camurça Alves <camurca.home@gmail.com>
|
||||
尹吉峰 <jifeng.yin@gmail.com>
|
||||
屈骏 <qujun@tiduyun.com>
|
||||
徐俊杰 <paco.xu@daocloud.io>
|
||||
慕陶 <jihui.xjh@alibaba-inc.com>
|
||||
搏通 <yufeng.pyf@alibaba-inc.com>
|
||||
|
||||
@@ -27,10 +27,10 @@ issue, please bring it to their attention right away!
|
||||
Please **DO NOT** file a public issue, instead send your report privately to
|
||||
[security@docker.com](mailto:security@docker.com).
|
||||
|
||||
Security reports are greatly appreciated and we will publicly thank you for it,
|
||||
although we keep your name confidential if you request it. We also like to send
|
||||
gifts—if you're into schwag, make sure to let us know. We currently do not
|
||||
offer a paid security bounty program, but are not ruling it out in the future.
|
||||
Security reports are greatly appreciated and we will publicly thank you for it.
|
||||
We also like to send gifts—if you're into schwag, make sure to let
|
||||
us know. We currently do not offer a paid security bounty program, but are not
|
||||
ruling it out in the future.
|
||||
|
||||
|
||||
## Reporting other issues
|
||||
|
||||
534
Dockerfile
534
Dockerfile
@@ -1,408 +1,274 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
# This file describes the standard way to build Docker, using docker
|
||||
#
|
||||
# Usage:
|
||||
#
|
||||
# # Use make to build a development environment image and run it in a container.
|
||||
# # This is slow the first time.
|
||||
# make BIND_DIR=. shell
|
||||
#
|
||||
# The following commands are executed inside the running container.
|
||||
|
||||
# # Make a dockerd binary.
|
||||
# # hack/make.sh binary
|
||||
#
|
||||
# # Install dockerd to /usr/local/bin
|
||||
# # make install
|
||||
#
|
||||
# # Run unit tests
|
||||
# # hack/test/unit
|
||||
#
|
||||
# # Run tests e.g. integration, py
|
||||
# # hack/make.sh binary test-integration test-docker-py
|
||||
#
|
||||
# Note: AppArmor used to mess with privileged mode, but this is no longer
|
||||
# the case. Therefore, you don't have to disable it anymore.
|
||||
#
|
||||
|
||||
ARG CROSS="false"
|
||||
ARG SYSTEMD="false"
|
||||
ARG GO_VERSION=1.20.10
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
ARG VPNKIT_VERSION=0.5.0
|
||||
ARG DOCKER_BUILDTAGS="apparmor seccomp"
|
||||
ARG GO_VERSION=1.12.8
|
||||
|
||||
ARG BASE_DEBIAN_DISTRO="bullseye"
|
||||
ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}"
|
||||
|
||||
FROM ${GOLANG_IMAGE} AS base
|
||||
RUN echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache
|
||||
FROM golang:${GO_VERSION}-stretch AS base
|
||||
ARG APT_MIRROR
|
||||
RUN test -n "$APT_MIRROR" && sed -ri "s/(httpredir|deb|security).debian.org/${APT_MIRROR}/g" /etc/apt/sources.list || true
|
||||
ENV GO111MODULE=off
|
||||
RUN sed -ri "s/(httpredir|deb).debian.org/${APT_MIRROR:-deb.debian.org}/g" /etc/apt/sources.list \
|
||||
&& sed -ri "s/(security).debian.org/${APT_MIRROR:-security.debian.org}/g" /etc/apt/sources.list
|
||||
|
||||
FROM base AS criu
|
||||
ARG DEBIAN_FRONTEND
|
||||
# Install dependency packages specific to criu
|
||||
RUN --mount=type=cache,sharing=locked,id=moby-criu-aptlib,target=/var/lib/apt \
|
||||
--mount=type=cache,sharing=locked,id=moby-criu-aptcache,target=/var/cache/apt \
|
||||
apt-get update && apt-get install -y --no-install-recommends \
|
||||
libcap-dev \
|
||||
libnet-dev \
|
||||
libnl-3-dev \
|
||||
libprotobuf-c-dev \
|
||||
libprotobuf-dev \
|
||||
protobuf-c-compiler \
|
||||
protobuf-compiler \
|
||||
python3-protobuf
|
||||
|
||||
# Install CRIU for checkpoint/restore support
|
||||
ARG CRIU_VERSION=3.14
|
||||
RUN mkdir -p /usr/src/criu \
|
||||
&& curl -sSL https://github.com/checkpoint-restore/criu/archive/v${CRIU_VERSION}.tar.gz | tar -C /usr/src/criu/ -xz --strip-components=1 \
|
||||
&& cd /usr/src/criu \
|
||||
&& make \
|
||||
&& make PREFIX=/build/ install-criu
|
||||
ENV CRIU_VERSION 3.11
|
||||
# Install dependency packages specific to criu
|
||||
RUN apt-get update && apt-get install -y \
|
||||
libnet-dev \
|
||||
libprotobuf-c0-dev \
|
||||
libprotobuf-dev \
|
||||
libnl-3-dev \
|
||||
libcap-dev \
|
||||
protobuf-compiler \
|
||||
protobuf-c-compiler \
|
||||
python-protobuf \
|
||||
&& mkdir -p /usr/src/criu \
|
||||
&& curl -sSL https://github.com/checkpoint-restore/criu/archive/v${CRIU_VERSION}.tar.gz | tar -C /usr/src/criu/ -xz --strip-components=1 \
|
||||
&& cd /usr/src/criu \
|
||||
&& make \
|
||||
&& make PREFIX=/build/ install-criu
|
||||
|
||||
FROM base AS registry
|
||||
WORKDIR /go/src/github.com/docker/distribution
|
||||
# Install two versions of the registry. The first one is a recent version that
|
||||
# supports both schema 1 and 2 manifests. The second one is an older version that
|
||||
# only supports schema1 manifests. This allows integration-cli tests to cover
|
||||
# push/pull with both schema1 and schema2 manifests.
|
||||
# The old version of the registry is not working on arm64, so installation is
|
||||
# skipped on that architecture.
|
||||
# Install two versions of the registry. The first is an older version that
|
||||
# only supports schema1 manifests. The second is a newer version that supports
|
||||
# both. This allows integration-cli tests to cover push/pull with both schema1
|
||||
# and schema2 manifests.
|
||||
ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd
|
||||
ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827
|
||||
RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||
--mount=type=cache,target=/go/pkg/mod \
|
||||
--mount=type=tmpfs,target=/go/src/ \
|
||||
set -x \
|
||||
&& git clone https://github.com/docker/distribution.git . \
|
||||
&& git checkout -q "$REGISTRY_COMMIT" \
|
||||
&& GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \
|
||||
go build -buildmode=pie -o /build/registry-v2 github.com/docker/distribution/cmd/registry \
|
||||
&& case $(dpkg --print-architecture) in \
|
||||
amd64|armhf|ppc64*|s390x) \
|
||||
git checkout -q "$REGISTRY_COMMIT_SCHEMA1"; \
|
||||
GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH"; \
|
||||
go build -buildmode=pie -o /build/registry-v2-schema1 github.com/docker/distribution/cmd/registry; \
|
||||
;; \
|
||||
esac
|
||||
RUN set -x \
|
||||
&& export GOPATH="$(mktemp -d)" \
|
||||
&& git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \
|
||||
&& (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \
|
||||
&& GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \
|
||||
go build -buildmode=pie -o /build/registry-v2 github.com/docker/distribution/cmd/registry \
|
||||
&& case $(dpkg --print-architecture) in \
|
||||
amd64|ppc64*|s390x) \
|
||||
(cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1"); \
|
||||
GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH"; \
|
||||
go build -buildmode=pie -o /build/registry-v2-schema1 github.com/docker/distribution/cmd/registry; \
|
||||
;; \
|
||||
esac \
|
||||
&& rm -rf "$GOPATH"
|
||||
|
||||
FROM base AS swagger
|
||||
WORKDIR $GOPATH/src/github.com/go-swagger/go-swagger
|
||||
# Install go-swagger for validating swagger.yaml
|
||||
# This is https://github.com/kolyshkin/go-swagger/tree/golang-1.13-fix
|
||||
# TODO: move to under moby/ or fix upstream go-swagger to work for us.
|
||||
ENV GO_SWAGGER_COMMIT c56166c036004ba7a3a321e5951ba472b9ae298c
|
||||
RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||
--mount=type=cache,target=/go/pkg/mod \
|
||||
--mount=type=tmpfs,target=/go/src/ \
|
||||
set -x \
|
||||
&& git clone https://github.com/kolyshkin/go-swagger.git . \
|
||||
&& git checkout -q "$GO_SWAGGER_COMMIT" \
|
||||
&& go build -o /build/swagger github.com/go-swagger/go-swagger/cmd/swagger
|
||||
ENV GO_SWAGGER_COMMIT c28258affb0b6251755d92489ef685af8d4ff3eb
|
||||
RUN set -x \
|
||||
&& export GOPATH="$(mktemp -d)" \
|
||||
&& git clone https://github.com/go-swagger/go-swagger.git "$GOPATH/src/github.com/go-swagger/go-swagger" \
|
||||
&& (cd "$GOPATH/src/github.com/go-swagger/go-swagger" && git checkout -q "$GO_SWAGGER_COMMIT") \
|
||||
&& go build -o /build/swagger github.com/go-swagger/go-swagger/cmd/swagger \
|
||||
&& rm -rf "$GOPATH"
|
||||
|
||||
FROM debian:${BASE_DEBIAN_DISTRO} AS frozen-images
|
||||
ARG DEBIAN_FRONTEND
|
||||
RUN --mount=type=cache,sharing=locked,id=moby-frozen-images-aptlib,target=/var/lib/apt \
|
||||
--mount=type=cache,sharing=locked,id=moby-frozen-images-aptcache,target=/var/cache/apt \
|
||||
apt-get update && apt-get install -y --no-install-recommends \
|
||||
ca-certificates \
|
||||
curl \
|
||||
jq
|
||||
FROM base AS frozen-images
|
||||
RUN apt-get update && apt-get install -y jq ca-certificates --no-install-recommends
|
||||
# Get useful and necessary Hub images so we can "docker load" locally instead of pulling
|
||||
COPY contrib/download-frozen-image-v2.sh /
|
||||
ARG TARGETARCH
|
||||
RUN /download-frozen-image-v2.sh /build \
|
||||
busybox:latest@sha256:95cf004f559831017cdf4628aaf1bb30133677be8702a8c5f2994629f637a209 \
|
||||
busybox:glibc@sha256:1f81263701cddf6402afe9f33fca0266d9fff379e59b1748f33d3072da71ee85 \
|
||||
debian:bullseye-slim@sha256:dacf278785a4daa9de07596ec739dbc07131e189942772210709c5c0777e8437 \
|
||||
hello-world:latest@sha256:d58e752213a51785838f9eed2b7a498ffa1cb3aa7f946dda11af39286c3db9a9 \
|
||||
arm32v7/hello-world:latest@sha256:50b8560ad574c779908da71f7ce370c0a2471c098d44d1c8f6b513c5a55eeeb1
|
||||
# See also frozenImages in "testutil/environment/protect.go" (which needs to be updated when adding images to this list)
|
||||
buildpack-deps:jessie@sha256:dd86dced7c9cd2a724e779730f0a53f93b7ef42228d4344b25ce9a42a1486251 \
|
||||
busybox:latest@sha256:bbc3a03235220b170ba48a157dd097dd1379299370e1ed99ce976df0355d24f0 \
|
||||
busybox:glibc@sha256:0b55a30394294ab23b9afd58fab94e61a923f5834fba7ddbae7f8e0c11ba85e6 \
|
||||
debian:jessie@sha256:287a20c5f73087ab406e6b364833e3fb7b3ae63ca0eb3486555dc27ed32c6e60 \
|
||||
hello-world:latest@sha256:be0cd392e45be79ffeffa6b05338b98ebb16c87b255f48e297ec7f98e123905c
|
||||
# See also ensureFrozenImagesLinux() in "integration-cli/fixtures_linux_daemon_test.go" (which needs to be updated when adding images to this list)
|
||||
|
||||
FROM base AS cross-false
|
||||
|
||||
FROM --platform=linux/amd64 base AS cross-true
|
||||
ARG DEBIAN_FRONTEND
|
||||
FROM base AS cross-true
|
||||
RUN dpkg --add-architecture armhf
|
||||
RUN dpkg --add-architecture arm64
|
||||
RUN dpkg --add-architecture armel
|
||||
RUN dpkg --add-architecture armhf
|
||||
RUN dpkg --add-architecture ppc64el
|
||||
RUN dpkg --add-architecture s390x
|
||||
RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \
|
||||
--mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \
|
||||
apt-get update && apt-get install -y --no-install-recommends \
|
||||
crossbuild-essential-arm64 \
|
||||
crossbuild-essential-armel \
|
||||
crossbuild-essential-armhf \
|
||||
crossbuild-essential-ppc64el \
|
||||
crossbuild-essential-s390x
|
||||
RUN if [ "$(go env GOHOSTARCH)" = "amd64" ]; then \
|
||||
apt-get update \
|
||||
&& apt-get install -y --no-install-recommends \
|
||||
crossbuild-essential-armhf \
|
||||
crossbuild-essential-arm64 \
|
||||
crossbuild-essential-armel; \
|
||||
fi
|
||||
|
||||
FROM cross-${CROSS} as dev-base
|
||||
|
||||
FROM dev-base AS runtime-dev-cross-false
|
||||
ARG DEBIAN_FRONTEND
|
||||
RUN --mount=type=cache,sharing=locked,id=moby-cross-false-aptlib,target=/var/lib/apt \
|
||||
--mount=type=cache,sharing=locked,id=moby-cross-false-aptcache,target=/var/cache/apt \
|
||||
apt-get update && apt-get install -y --no-install-recommends \
|
||||
binutils-mingw-w64 \
|
||||
g++-mingw-w64-x86-64 \
|
||||
libapparmor-dev \
|
||||
libbtrfs-dev \
|
||||
libdevmapper-dev \
|
||||
libseccomp-dev \
|
||||
libsystemd-dev \
|
||||
libudev-dev
|
||||
RUN apt-get update && apt-get install -y \
|
||||
libapparmor-dev \
|
||||
libseccomp-dev
|
||||
|
||||
FROM --platform=linux/amd64 runtime-dev-cross-false AS runtime-dev-cross-true
|
||||
ARG DEBIAN_FRONTEND
|
||||
FROM cross-true AS runtime-dev-cross-true
|
||||
# These crossbuild packages rely on gcc-<arch>, but this doesn't want to install
|
||||
# on non-amd64 systems, so other architectures cannot crossbuild amd64.
|
||||
RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \
|
||||
--mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \
|
||||
apt-get update && apt-get install -y --no-install-recommends \
|
||||
libapparmor-dev:arm64 \
|
||||
libapparmor-dev:armel \
|
||||
libapparmor-dev:armhf \
|
||||
libapparmor-dev:ppc64el \
|
||||
libapparmor-dev:s390x \
|
||||
libseccomp-dev:arm64 \
|
||||
libseccomp-dev:armel \
|
||||
libseccomp-dev:armhf \
|
||||
libseccomp-dev:ppc64el \
|
||||
libseccomp-dev:s390x
|
||||
# on non-amd64 systems.
|
||||
# Additionally, the crossbuild-amd64 is currently only on debian:buster, so
|
||||
# other architectures cannnot crossbuild amd64.
|
||||
RUN if [ "$(go env GOHOSTARCH)" = "amd64" ]; then \
|
||||
apt-get update \
|
||||
&& apt-get install -y \
|
||||
libseccomp-dev:armhf \
|
||||
libseccomp-dev:arm64 \
|
||||
libseccomp-dev:armel \
|
||||
libapparmor-dev:armhf \
|
||||
libapparmor-dev:arm64 \
|
||||
libapparmor-dev:armel \
|
||||
# install this arches seccomp here due to compat issues with the v0 builder
|
||||
# This is as opposed to inheriting from runtime-dev-cross-false
|
||||
libapparmor-dev \
|
||||
libseccomp-dev; \
|
||||
fi
|
||||
|
||||
FROM runtime-dev-cross-${CROSS} AS runtime-dev
|
||||
|
||||
FROM base AS tomlv
|
||||
ARG TOMLV_COMMIT
|
||||
RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||
--mount=type=cache,target=/go/pkg/mod \
|
||||
--mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \
|
||||
PREFIX=/build /tmp/install/install.sh tomlv
|
||||
ENV INSTALL_BINARY_NAME=tomlv
|
||||
COPY hack/dockerfile/install/install.sh ./install.sh
|
||||
COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./
|
||||
RUN PREFIX=/build ./install.sh $INSTALL_BINARY_NAME
|
||||
|
||||
FROM base AS vndr
|
||||
ARG VNDR_VERSION
|
||||
RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||
--mount=type=cache,target=/go/pkg/mod \
|
||||
--mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \
|
||||
PREFIX=/build /tmp/install/install.sh vndr
|
||||
ENV INSTALL_BINARY_NAME=vndr
|
||||
COPY hack/dockerfile/install/install.sh ./install.sh
|
||||
COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./
|
||||
RUN PREFIX=/build ./install.sh $INSTALL_BINARY_NAME
|
||||
|
||||
FROM dev-base AS containerd
|
||||
ARG DEBIAN_FRONTEND
|
||||
RUN --mount=type=cache,sharing=locked,id=moby-containerd-aptlib,target=/var/lib/apt \
|
||||
--mount=type=cache,sharing=locked,id=moby-containerd-aptcache,target=/var/cache/apt \
|
||||
apt-get update && apt-get install -y --no-install-recommends \
|
||||
libbtrfs-dev
|
||||
ARG CONTAINERD_VERSION
|
||||
RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||
--mount=type=cache,target=/go/pkg/mod \
|
||||
--mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \
|
||||
PREFIX=/build /tmp/install/install.sh containerd
|
||||
RUN apt-get update && apt-get install -y btrfs-tools
|
||||
ENV INSTALL_BINARY_NAME=containerd
|
||||
COPY hack/dockerfile/install/install.sh ./install.sh
|
||||
COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./
|
||||
RUN PREFIX=/build ./install.sh $INSTALL_BINARY_NAME
|
||||
|
||||
FROM dev-base AS proxy
|
||||
ARG LIBNETWORK_COMMIT
|
||||
RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||
--mount=type=cache,target=/go/pkg/mod \
|
||||
--mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \
|
||||
PREFIX=/build /tmp/install/install.sh proxy
|
||||
ENV INSTALL_BINARY_NAME=proxy
|
||||
COPY hack/dockerfile/install/install.sh ./install.sh
|
||||
COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./
|
||||
RUN PREFIX=/build ./install.sh $INSTALL_BINARY_NAME
|
||||
|
||||
FROM base AS golangci_lint
|
||||
ARG GOLANGCI_LINT_VERSION
|
||||
RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||
--mount=type=cache,target=/go/pkg/mod \
|
||||
--mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \
|
||||
PREFIX=/build /tmp/install/install.sh golangci_lint
|
||||
FROM base AS gometalinter
|
||||
ENV INSTALL_BINARY_NAME=gometalinter
|
||||
COPY hack/dockerfile/install/install.sh ./install.sh
|
||||
COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./
|
||||
RUN PREFIX=/build ./install.sh $INSTALL_BINARY_NAME
|
||||
|
||||
FROM base AS gotestsum
|
||||
ARG GOTESTSUM_VERSION
|
||||
RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||
--mount=type=cache,target=/go/pkg/mod \
|
||||
--mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \
|
||||
PREFIX=/build /tmp/install/install.sh gotestsum
|
||||
|
||||
FROM base AS shfmt
|
||||
ARG SHFMT_VERSION
|
||||
RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||
--mount=type=cache,target=/go/pkg/mod \
|
||||
--mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \
|
||||
PREFIX=/build /tmp/install/install.sh shfmt
|
||||
ENV INSTALL_BINARY_NAME=gotestsum
|
||||
COPY hack/dockerfile/install/install.sh ./install.sh
|
||||
COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./
|
||||
RUN PREFIX=/build ./install.sh $INSTALL_BINARY_NAME
|
||||
|
||||
FROM dev-base AS dockercli
|
||||
ARG DOCKERCLI_CHANNEL
|
||||
ARG DOCKERCLI_VERSION
|
||||
RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||
--mount=type=cache,target=/go/pkg/mod \
|
||||
--mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \
|
||||
PREFIX=/build /tmp/install/install.sh dockercli
|
||||
ENV INSTALL_BINARY_NAME=dockercli
|
||||
COPY hack/dockerfile/install/install.sh ./install.sh
|
||||
COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./
|
||||
RUN PREFIX=/build ./install.sh $INSTALL_BINARY_NAME
|
||||
|
||||
FROM runtime-dev AS runc
|
||||
ARG RUNC_VERSION
|
||||
ARG RUNC_BUILDTAGS
|
||||
RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||
--mount=type=cache,target=/go/pkg/mod \
|
||||
--mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \
|
||||
PREFIX=/build /tmp/install/install.sh runc
|
||||
ENV INSTALL_BINARY_NAME=runc
|
||||
COPY hack/dockerfile/install/install.sh ./install.sh
|
||||
COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./
|
||||
RUN PREFIX=/build ./install.sh $INSTALL_BINARY_NAME
|
||||
|
||||
FROM dev-base AS tini
|
||||
ARG DEBIAN_FRONTEND
|
||||
ARG TINI_VERSION
|
||||
RUN --mount=type=cache,sharing=locked,id=moby-tini-aptlib,target=/var/lib/apt \
|
||||
--mount=type=cache,sharing=locked,id=moby-tini-aptcache,target=/var/cache/apt \
|
||||
apt-get update && apt-get install -y --no-install-recommends \
|
||||
cmake \
|
||||
vim-common
|
||||
RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||
--mount=type=cache,target=/go/pkg/mod \
|
||||
--mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \
|
||||
PREFIX=/build /tmp/install/install.sh tini
|
||||
RUN apt-get update && apt-get install -y cmake vim-common
|
||||
COPY hack/dockerfile/install/install.sh ./install.sh
|
||||
ENV INSTALL_BINARY_NAME=tini
|
||||
COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./
|
||||
RUN PREFIX=/build ./install.sh $INSTALL_BINARY_NAME
|
||||
|
||||
FROM dev-base AS rootlesskit
|
||||
ARG ROOTLESSKIT_VERSION
|
||||
RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||
--mount=type=cache,target=/go/pkg/mod \
|
||||
--mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \
|
||||
PREFIX=/build /tmp/install/install.sh rootlesskit
|
||||
ENV INSTALL_BINARY_NAME=rootlesskit
|
||||
COPY hack/dockerfile/install/install.sh ./install.sh
|
||||
COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./
|
||||
RUN PREFIX=/build/ ./install.sh $INSTALL_BINARY_NAME
|
||||
COPY ./contrib/dockerd-rootless.sh /build
|
||||
COPY ./contrib/dockerd-rootless-setuptool.sh /build
|
||||
|
||||
FROM --platform=amd64 djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-amd64
|
||||
|
||||
FROM --platform=arm64 djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-arm64
|
||||
|
||||
FROM scratch AS vpnkit
|
||||
COPY --from=vpnkit-amd64 /vpnkit /build/vpnkit.x86_64
|
||||
COPY --from=vpnkit-arm64 /vpnkit /build/vpnkit.aarch64
|
||||
|
||||
# TODO: Some of this is only really needed for testing, it would be nice to split this up
|
||||
FROM runtime-dev AS dev-systemd-false
|
||||
ARG DEBIAN_FRONTEND
|
||||
FROM runtime-dev AS dev
|
||||
RUN groupadd -r docker
|
||||
RUN useradd --create-home --gid docker unprivilegeduser \
|
||||
&& mkdir -p /home/unprivilegeduser/.local/share/docker \
|
||||
&& chown -R unprivilegeduser /home/unprivilegeduser
|
||||
RUN useradd --create-home --gid docker unprivilegeduser
|
||||
# Let us use a .bashrc file
|
||||
RUN ln -sfv /go/src/github.com/docker/docker/.bashrc ~/.bashrc
|
||||
# Activate bash completion and include Docker's completion if mounted with DOCKER_BASH_COMPLETION_PATH
|
||||
RUN echo "source /usr/share/bash-completion/bash_completion" >> /etc/bash.bashrc
|
||||
RUN ln -s /usr/local/completion/bash/docker /etc/bash_completion.d/docker
|
||||
RUN ldconfig
|
||||
# Set dev environment as safe git directory to prevent "dubious ownership" errors
|
||||
# when bind-mounting the source into the dev-container. See https://github.com/moby/moby/pull/44930
|
||||
RUN git config --global --add safe.directory $GOPATH/src/github.com/docker/docker
|
||||
# This should only install packages that are specifically needed for the dev environment and nothing else
|
||||
# Do you really need to add another package here? Can it be done in a different build stage?
|
||||
RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \
|
||||
--mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \
|
||||
apt-get update && apt-get install -y --no-install-recommends \
|
||||
apparmor \
|
||||
bash-completion \
|
||||
bzip2 \
|
||||
inetutils-ping \
|
||||
iproute2 \
|
||||
iptables \
|
||||
jq \
|
||||
libcap2-bin \
|
||||
libnet1 \
|
||||
libnl-3-200 \
|
||||
libprotobuf-c1 \
|
||||
net-tools \
|
||||
patch \
|
||||
pigz \
|
||||
python3-pip \
|
||||
python3-setuptools \
|
||||
python3-wheel \
|
||||
sudo \
|
||||
thin-provisioning-tools \
|
||||
uidmap \
|
||||
vim \
|
||||
vim-common \
|
||||
xfsprogs \
|
||||
xz-utils \
|
||||
zip
|
||||
RUN apt-get update && apt-get install -y \
|
||||
apparmor \
|
||||
aufs-tools \
|
||||
bash-completion \
|
||||
btrfs-tools \
|
||||
iptables \
|
||||
jq \
|
||||
libcap2-bin \
|
||||
libdevmapper-dev \
|
||||
libudev-dev \
|
||||
libsystemd-dev \
|
||||
binutils-mingw-w64 \
|
||||
g++-mingw-w64-x86-64 \
|
||||
net-tools \
|
||||
pigz \
|
||||
python3-pip \
|
||||
python3-setuptools \
|
||||
thin-provisioning-tools \
|
||||
vim \
|
||||
vim-common \
|
||||
xfsprogs \
|
||||
zip \
|
||||
bzip2 \
|
||||
xz-utils \
|
||||
libprotobuf-c1 \
|
||||
libnet1 \
|
||||
libnl-3-200 \
|
||||
--no-install-recommends
|
||||
|
||||
RUN pip3 install yamllint==1.16.0
|
||||
|
||||
# Switch to use iptables instead of nftables (to match the CI hosts)
|
||||
# TODO use some kind of runtime auto-detection instead if/when nftables is supported (https://github.com/moby/moby/issues/26824)
|
||||
RUN update-alternatives --set iptables /usr/sbin/iptables-legacy || true \
|
||||
&& update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy || true \
|
||||
&& update-alternatives --set arptables /usr/sbin/arptables-legacy || true
|
||||
|
||||
RUN pip3 install yamllint==1.26.1
|
||||
|
||||
COPY --from=dockercli /build/ /usr/local/cli
|
||||
COPY --from=swagger /build/swagger* /usr/local/bin/
|
||||
COPY --from=frozen-images /build/ /docker-frozen-images
|
||||
COPY --from=swagger /build/ /usr/local/bin/
|
||||
COPY --from=tomlv /build/ /usr/local/bin/
|
||||
COPY --from=tini /build/ /usr/local/bin/
|
||||
COPY --from=registry /build/ /usr/local/bin/
|
||||
COPY --from=gometalinter /build/ /usr/local/bin/
|
||||
COPY --from=gotestsum /build/ /usr/local/bin/
|
||||
COPY --from=tomlv /build/ /usr/local/bin/
|
||||
COPY --from=vndr /build/ /usr/local/bin/
|
||||
COPY --from=tini /build/ /usr/local/bin/
|
||||
COPY --from=runc /build/ /usr/local/bin/
|
||||
COPY --from=containerd /build/ /usr/local/bin/
|
||||
COPY --from=proxy /build/ /usr/local/bin/
|
||||
COPY --from=dockercli /build/ /usr/local/cli
|
||||
COPY --from=registry /build/registry* /usr/local/bin/
|
||||
COPY --from=criu /build/ /usr/local/
|
||||
COPY --from=rootlesskit /build/ /usr/local/bin/
|
||||
COPY --from=djs55/vpnkit@sha256:e508a17cfacc8fd39261d5b4e397df2b953690da577e2c987a47630cd0c42f8e /vpnkit /usr/local/bin/vpnkit.x86_64
|
||||
|
||||
# Skip the CRIU stage for now, as the opensuse package repository is sometimes
|
||||
# unstable, and we're currently not using it in CI.
|
||||
#
|
||||
# FIXME(thaJeztah): re-enable this stage when https://github.com/moby/moby/issues/38963 is resolved (see https://github.com/moby/moby/pull/38984)
|
||||
# COPY --from=criu /build/ /usr/local/
|
||||
COPY --from=vndr /build/ /usr/local/bin/
|
||||
COPY --from=gotestsum /build/ /usr/local/bin/
|
||||
COPY --from=golangci_lint /build/ /usr/local/bin/
|
||||
COPY --from=shfmt /build/ /usr/local/bin/
|
||||
COPY --from=runc /build/ /usr/local/bin/
|
||||
COPY --from=containerd /build/ /usr/local/bin/
|
||||
COPY --from=rootlesskit /build/ /usr/local/bin/
|
||||
COPY --from=vpnkit /build/ /usr/local/bin/
|
||||
COPY --from=proxy /build/ /usr/local/bin/
|
||||
ENV PATH=/usr/local/cli:$PATH
|
||||
ARG DOCKER_BUILDTAGS
|
||||
ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}"
|
||||
ENV DOCKER_BUILDTAGS apparmor seccomp selinux
|
||||
# Options for hack/validate/gometalinter
|
||||
ENV GOMETALINTER_OPTS="--deadline=2m"
|
||||
WORKDIR /go/src/github.com/docker/docker
|
||||
VOLUME /var/lib/docker
|
||||
VOLUME /home/unprivilegeduser/.local/share/docker
|
||||
# Wrap all commands in the "docker-in-docker" script to allow nested containers
|
||||
ENTRYPOINT ["hack/dind"]
|
||||
|
||||
FROM dev-systemd-false AS dev-systemd-true
|
||||
RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \
|
||||
--mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \
|
||||
apt-get update && apt-get install -y --no-install-recommends \
|
||||
dbus \
|
||||
dbus-user-session \
|
||||
systemd \
|
||||
systemd-sysv
|
||||
RUN mkdir -p hack \
|
||||
&& curl -o hack/dind-systemd https://raw.githubusercontent.com/AkihiroSuda/containerized-systemd/b70bac0daeea120456764248164c21684ade7d0d/docker-entrypoint.sh \
|
||||
&& chmod +x hack/dind-systemd
|
||||
ENTRYPOINT ["hack/dind-systemd"]
|
||||
|
||||
FROM dev-systemd-${SYSTEMD} AS dev
|
||||
|
||||
FROM runtime-dev AS binary-base
|
||||
ARG DOCKER_GITCOMMIT=HEAD
|
||||
ENV DOCKER_GITCOMMIT=${DOCKER_GITCOMMIT}
|
||||
ARG VERSION
|
||||
ENV VERSION=${VERSION}
|
||||
ARG PLATFORM
|
||||
ENV PLATFORM=${PLATFORM}
|
||||
ARG PRODUCT
|
||||
ENV PRODUCT=${PRODUCT}
|
||||
ARG DEFAULT_PRODUCT_LICENSE
|
||||
ENV DEFAULT_PRODUCT_LICENSE=${DEFAULT_PRODUCT_LICENSE}
|
||||
ARG DOCKER_BUILDTAGS
|
||||
ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}"
|
||||
ENV PREFIX=/build
|
||||
# TODO: This is here because hack/make.sh binary copies these extras binaries
|
||||
# from $PATH into the bundles dir.
|
||||
# It would be nice to handle this in a different way.
|
||||
COPY --from=tini /build/ /usr/local/bin/
|
||||
COPY --from=runc /build/ /usr/local/bin/
|
||||
COPY --from=containerd /build/ /usr/local/bin/
|
||||
COPY --from=rootlesskit /build/ /usr/local/bin/
|
||||
COPY --from=proxy /build/ /usr/local/bin/
|
||||
COPY --from=vpnkit /build/ /usr/local/bin/
|
||||
WORKDIR /go/src/github.com/docker/docker
|
||||
|
||||
FROM binary-base AS build-binary
|
||||
RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||
--mount=type=bind,target=/go/src/github.com/docker/docker \
|
||||
hack/make.sh binary
|
||||
|
||||
FROM binary-base AS build-dynbinary
|
||||
RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||
--mount=type=bind,target=/go/src/github.com/docker/docker \
|
||||
hack/make.sh dynbinary
|
||||
|
||||
FROM binary-base AS build-cross
|
||||
ARG DOCKER_CROSSPLATFORMS
|
||||
RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||
--mount=type=bind,target=/go/src/github.com/docker/docker \
|
||||
--mount=type=tmpfs,target=/go/src/github.com/docker/docker/autogen \
|
||||
hack/make.sh cross
|
||||
|
||||
FROM scratch AS binary
|
||||
COPY --from=build-binary /build/bundles/ /
|
||||
|
||||
FROM scratch AS dynbinary
|
||||
COPY --from=build-dynbinary /build/bundles/ /
|
||||
|
||||
FROM scratch AS cross
|
||||
COPY --from=build-cross /build/bundles/ /
|
||||
|
||||
FROM dev AS final
|
||||
# Upload docker source
|
||||
COPY . /go/src/github.com/docker/docker
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
ARG GO_VERSION=1.20.10
|
||||
ARG GO_VERSION=1.12.8
|
||||
|
||||
FROM golang:${GO_VERSION}-alpine AS base
|
||||
ENV GO111MODULE=off
|
||||
|
||||
RUN apk --no-cache add \
|
||||
bash \
|
||||
btrfs-progs-dev \
|
||||
@@ -18,12 +18,12 @@ FROM base AS frozen-images
|
||||
# Get useful and necessary Hub images so we can "docker load" locally instead of pulling
|
||||
COPY contrib/download-frozen-image-v2.sh /
|
||||
RUN /download-frozen-image-v2.sh /build \
|
||||
busybox:latest@sha256:95cf004f559831017cdf4628aaf1bb30133677be8702a8c5f2994629f637a209 \
|
||||
busybox:latest@sha256:95cf004f559831017cdf4628aaf1bb30133677be8702a8c5f2994629f637a209 \
|
||||
busybox:glibc@sha256:1f81263701cddf6402afe9f33fca0266d9fff379e59b1748f33d3072da71ee85 \
|
||||
debian:bullseye-slim@sha256:dacf278785a4daa9de07596ec739dbc07131e189942772210709c5c0777e8437 \
|
||||
hello-world:latest@sha256:d58e752213a51785838f9eed2b7a498ffa1cb3aa7f946dda11af39286c3db9a9
|
||||
# See also frozenImages in "testutil/environment/protect.go" (which needs to be updated when adding images to this list)
|
||||
buildpack-deps:jessie@sha256:dd86dced7c9cd2a724e779730f0a53f93b7ef42228d4344b25ce9a42a1486251 \
|
||||
busybox:latest@sha256:bbc3a03235220b170ba48a157dd097dd1379299370e1ed99ce976df0355d24f0 \
|
||||
busybox:glibc@sha256:0b55a30394294ab23b9afd58fab94e61a923f5834fba7ddbae7f8e0c11ba85e6 \
|
||||
debian:jessie@sha256:287a20c5f73087ab406e6b364833e3fb7b3ae63ca0eb3486555dc27ed32c6e60 \
|
||||
hello-world:latest@sha256:be0cd392e45be79ffeffa6b05338b98ebb16c87b255f48e297ec7f98e123905c
|
||||
# See also ensureFrozenImagesLinux() in "integration-cli/fixtures_linux_daemon_test.go" (which needs to be updated when adding images to this list)
|
||||
|
||||
FROM base AS dockercli
|
||||
ENV INSTALL_BINARY_NAME=dockercli
|
||||
@@ -51,7 +51,7 @@ RUN hack/make.sh build-integration-test-binary
|
||||
RUN mkdir -p /build/tests && find . -name test.main -exec cp --parents '{}' /build/tests \;
|
||||
|
||||
## Generate testing image
|
||||
FROM alpine:3.10 as runner
|
||||
FROM alpine:3.9 as runner
|
||||
|
||||
ENV DOCKER_REMOTE_DAEMON=1
|
||||
ENV DOCKER_INTEGRATION_DAEMON_DEST=/
|
||||
|
||||
@@ -5,10 +5,9 @@
|
||||
|
||||
# This represents the bare minimum required to build and test Docker.
|
||||
|
||||
ARG GO_VERSION=1.20.10
|
||||
ARG GO_VERSION=1.12.8
|
||||
|
||||
FROM golang:${GO_VERSION}-buster
|
||||
ENV GO111MODULE=off
|
||||
FROM golang:${GO_VERSION}-stretch
|
||||
|
||||
# allow replacing httpredir or deb mirror
|
||||
ARG APT_MIRROR=deb.debian.org
|
||||
@@ -18,13 +17,13 @@ RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list
|
||||
# https://github.com/docker/docker/blob/master/project/PACKAGERS.md#build-dependencies
|
||||
# https://github.com/docker/docker/blob/master/project/PACKAGERS.md#runtime-dependencies
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
btrfs-tools \
|
||||
build-essential \
|
||||
curl \
|
||||
cmake \
|
||||
gcc \
|
||||
git \
|
||||
libapparmor-dev \
|
||||
libbtrfs-dev \
|
||||
libdevmapper-dev \
|
||||
libseccomp-dev \
|
||||
ca-certificates \
|
||||
|
||||
@@ -45,8 +45,8 @@
|
||||
#
|
||||
# 1. Clone the sources from github.com:
|
||||
#
|
||||
# >> git clone https://github.com/docker/docker.git C:\gopath\src\github.com\docker\docker
|
||||
# >> Cloning into 'C:\gopath\src\github.com\docker\docker'...
|
||||
# >> git clone https://github.com/docker/docker.git C:\go\src\github.com\docker\docker
|
||||
# >> Cloning into 'C:\go\src\github.com\docker\docker'...
|
||||
# >> remote: Counting objects: 186216, done.
|
||||
# >> remote: Compressing objects: 100% (21/21), done.
|
||||
# >> remote: Total 186216 (delta 5), reused 0 (delta 0), pack-reused 186195
|
||||
@@ -59,7 +59,7 @@
|
||||
#
|
||||
# 2. Change directory to the cloned docker sources:
|
||||
#
|
||||
# >> cd C:\gopath\src\github.com\docker\docker
|
||||
# >> cd C:\go\src\github.com\docker\docker
|
||||
#
|
||||
#
|
||||
# 3. Build a docker image with the components required to build the docker binaries from source
|
||||
@@ -79,8 +79,8 @@
|
||||
# 5. Copy the binaries out of the container, replacing HostPath with an appropriate destination
|
||||
# folder on the host system where you want the binaries to be located.
|
||||
#
|
||||
# >> docker cp binaries:C:\gopath\src\github.com\docker\docker\bundles\docker.exe C:\HostPath\docker.exe
|
||||
# >> docker cp binaries:C:\gopath\src\github.com\docker\docker\bundles\dockerd.exe C:\HostPath\dockerd.exe
|
||||
# >> docker cp binaries:C:\go\src\github.com\docker\docker\bundles\docker.exe C:\HostPath\docker.exe
|
||||
# >> docker cp binaries:C:\go\src\github.com\docker\docker\bundles\dockerd.exe C:\HostPath\dockerd.exe
|
||||
#
|
||||
#
|
||||
# 6. (Optional) Remove the interim container holding the built executable binaries:
|
||||
@@ -147,7 +147,7 @@
|
||||
# The docker integration tests do not currently run in a container on Windows, predominantly
|
||||
# due to Windows not supporting privileged mode, so anything using a volume would fail.
|
||||
# They (along with the rest of the docker CI suite) can be run using
|
||||
# https://github.com/kevpar/docker-w2wCIScripts/blob/master/runCI/Invoke-DockerCI.ps1.
|
||||
# https://github.com/jhowardmsft/docker-w2wCIScripts/blob/master/runCI/Invoke-DockerCI.ps1.
|
||||
#
|
||||
# -----------------------------------------------------------------------------------------
|
||||
|
||||
@@ -165,18 +165,15 @@ FROM microsoft/windowsservercore
|
||||
# Use PowerShell as the default shell
|
||||
SHELL ["powershell", "-Command", "$ErrorActionPreference = 'Stop'; $ProgressPreference = 'SilentlyContinue';"]
|
||||
|
||||
ARG GO_VERSION=1.20.10
|
||||
ARG GOTESTSUM_VERSION=v1.8.2
|
||||
ARG GO_VERSION=1.12.8
|
||||
|
||||
# Environment variable notes:
|
||||
# - GO_VERSION must be consistent with 'Dockerfile' used by Linux.
|
||||
# - FROM_DOCKERFILE is used for detection of building within a container.
|
||||
ENV GO_VERSION=${GO_VERSION} `
|
||||
GIT_VERSION=2.11.1 `
|
||||
GOPATH=C:\gopath `
|
||||
GO111MODULE=off `
|
||||
FROM_DOCKERFILE=1 `
|
||||
GOTESTSUM_VERSION=${GOTESTSUM_VERSION}
|
||||
GOPATH=C:\go `
|
||||
FROM_DOCKERFILE=1
|
||||
|
||||
RUN `
|
||||
Function Test-Nano() { `
|
||||
@@ -205,7 +202,6 @@ RUN `
|
||||
Throw ("Failed to download " + $source) `
|
||||
}`
|
||||
} else { `
|
||||
[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; `
|
||||
$webClient = New-Object System.Net.WebClient; `
|
||||
$webClient.DownloadFile($source, $target); `
|
||||
} `
|
||||
@@ -218,17 +214,16 @@ RUN `
|
||||
Download-File $location C:\gitsetup.zip; `
|
||||
`
|
||||
Write-Host INFO: Downloading go...; `
|
||||
$dlGoVersion=$Env:GO_VERSION -replace '\.0$',''; `
|
||||
Download-File "https://golang.org/dl/go${dlGoVersion}.windows-amd64.zip" C:\go.zip; `
|
||||
Download-File $('https://golang.org/dl/go'+$Env:GO_VERSION+'.windows-amd64.zip') C:\go.zip; `
|
||||
`
|
||||
Write-Host INFO: Downloading compiler 1 of 3...; `
|
||||
Download-File https://raw.githubusercontent.com/moby/docker-tdmgcc/master/gcc.zip C:\gcc.zip; `
|
||||
Download-File https://raw.githubusercontent.com/jhowardmsft/docker-tdmgcc/master/gcc.zip C:\gcc.zip; `
|
||||
`
|
||||
Write-Host INFO: Downloading compiler 2 of 3...; `
|
||||
Download-File https://raw.githubusercontent.com/moby/docker-tdmgcc/master/runtime.zip C:\runtime.zip; `
|
||||
Download-File https://raw.githubusercontent.com/jhowardmsft/docker-tdmgcc/master/runtime.zip C:\runtime.zip; `
|
||||
`
|
||||
Write-Host INFO: Downloading compiler 3 of 3...; `
|
||||
Download-File https://raw.githubusercontent.com/moby/docker-tdmgcc/master/binutils.zip C:\binutils.zip; `
|
||||
Download-File https://raw.githubusercontent.com/jhowardmsft/docker-tdmgcc/master/binutils.zip C:\binutils.zip; `
|
||||
`
|
||||
Write-Host INFO: Extracting git...; `
|
||||
Expand-Archive C:\gitsetup.zip C:\git-tmp; `
|
||||
@@ -252,35 +247,19 @@ RUN `
|
||||
Remove-Item C:\binutils.zip; `
|
||||
Remove-Item C:\gitsetup.zip; `
|
||||
`
|
||||
# Ensure all directories exist that we will require below....
|
||||
$srcDir = """$Env:GOPATH`\src\github.com\docker\docker\bundles"""; `
|
||||
Write-Host INFO: Ensuring existence of directory $srcDir...; `
|
||||
New-Item -Force -ItemType Directory -Path $srcDir | Out-Null; `
|
||||
Write-Host INFO: Creating source directory...; `
|
||||
New-Item -ItemType Directory -Path C:\go\src\github.com\docker\docker | Out-Null; `
|
||||
`
|
||||
Write-Host INFO: Configuring git core.autocrlf...; `
|
||||
C:\git\cmd\git config --global core.autocrlf true;
|
||||
|
||||
RUN `
|
||||
Function Install-GoTestSum() { `
|
||||
$Env:GO111MODULE = 'on'; `
|
||||
$tmpGobin = "${Env:GOBIN_TMP}"; `
|
||||
$Env:GOBIN = """${Env:GOPATH}`\bin"""; `
|
||||
Write-Host "INFO: Installing gotestsum version $Env:GOTESTSUM_VERSION in $Env:GOBIN"; `
|
||||
&go install "gotest.tools/gotestsum@${Env:GOTESTSUM_VERSION}"; `
|
||||
$Env:GOBIN = "${tmpGobin}"; `
|
||||
$Env:GO111MODULE = 'off'; `
|
||||
if ($LASTEXITCODE -ne 0) { `
|
||||
Throw '"gotestsum install failed..."'; `
|
||||
} `
|
||||
} `
|
||||
C:\git\cmd\git config --global core.autocrlf true; `
|
||||
`
|
||||
Install-GoTestSum
|
||||
Write-Host INFO: Completed
|
||||
|
||||
# Make PowerShell the default entrypoint
|
||||
ENTRYPOINT ["powershell.exe"]
|
||||
|
||||
# Set the working directory to the location of the sources
|
||||
WORKDIR ${GOPATH}\src\github.com\docker\docker
|
||||
WORKDIR C:\go\src\github.com\docker\docker
|
||||
|
||||
# Copy the sources into the container
|
||||
COPY . .
|
||||
|
||||
647
Jenkinsfile
vendored
647
Jenkinsfile
vendored
@@ -8,51 +8,22 @@ pipeline {
|
||||
timestamps()
|
||||
}
|
||||
parameters {
|
||||
booleanParam(name: 'unit_validate', defaultValue: true, description: 'amd64 (x86_64) unit tests and vendor check')
|
||||
booleanParam(name: 'validate_force', defaultValue: false, description: 'force validation steps to be run, even if no changes were detected')
|
||||
booleanParam(name: 'amd64', defaultValue: true, description: 'amd64 (x86_64) Build/Test')
|
||||
booleanParam(name: 'rootless', defaultValue: true, description: 'amd64 (x86_64) Build/Test (Rootless mode)')
|
||||
booleanParam(name: 'cgroup2', defaultValue: true, description: 'amd64 (x86_64) Build/Test (cgroup v2)')
|
||||
booleanParam(name: 'arm64', defaultValue: true, description: 'ARM (arm64) Build/Test')
|
||||
booleanParam(name: 'windowsRS5', defaultValue: true, description: 'Windows 2019 (RS5) Build/Test')
|
||||
booleanParam(name: 'dco', defaultValue: true, description: 'Run the DCO check')
|
||||
booleanParam(name: 'unit_validate', defaultValue: true, description: 'x86 unit tests and vendor check')
|
||||
booleanParam(name: 'janky', defaultValue: true, description: 'x86 Build/Test')
|
||||
booleanParam(name: 'z', defaultValue: true, description: 'IBM Z (s390x) Build/Test')
|
||||
booleanParam(name: 'powerpc', defaultValue: true, description: 'PowerPC (ppc64le) Build/Test')
|
||||
booleanParam(name: 'windowsRS1', defaultValue: false, description: 'Windows 2016 (RS1) Build/Test')
|
||||
booleanParam(name: 'windowsRS5', defaultValue: false, description: 'Windows 2019 (RS5) Build/Test')
|
||||
}
|
||||
environment {
|
||||
DOCKER_BUILDKIT = '1'
|
||||
DOCKER_EXPERIMENTAL = '1'
|
||||
DOCKER_GRAPHDRIVER = 'overlay2'
|
||||
APT_MIRROR = 'cdn-fastly.deb.debian.org'
|
||||
CHECK_CONFIG_COMMIT = '78405559cfe5987174aa2cb6463b9b2c1b917255'
|
||||
TESTDEBUG = '0'
|
||||
TIMEOUT = '120m'
|
||||
}
|
||||
stages {
|
||||
stage('pr-hack') {
|
||||
when { changeRequest() }
|
||||
steps {
|
||||
script {
|
||||
echo "Workaround for PR auto-cancel feature. Borrowed from https://issues.jenkins-ci.org/browse/JENKINS-43353"
|
||||
def buildNumber = env.BUILD_NUMBER as int
|
||||
if (buildNumber > 1) milestone(buildNumber - 1)
|
||||
milestone(buildNumber)
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('DCO-check') {
|
||||
when {
|
||||
beforeAgent true
|
||||
expression { params.dco }
|
||||
}
|
||||
agent { label 'arm64 && ubuntu-2004' }
|
||||
steps {
|
||||
sh '''
|
||||
docker run --rm \
|
||||
-v "$WORKSPACE:/workspace" \
|
||||
-e VALIDATE_REPO=${GIT_URL} \
|
||||
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
|
||||
alpine sh -c 'apk add --no-cache -q bash git openssh-client && git config --system --add safe.directory /workspace && cd /workspace && hack/validate/dco'
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage('Build') {
|
||||
parallel {
|
||||
stage('unit-validate') {
|
||||
@@ -60,13 +31,7 @@ pipeline {
|
||||
beforeAgent true
|
||||
expression { params.unit_validate }
|
||||
}
|
||||
agent { label 'amd64 && ubuntu-2004 && overlay2' }
|
||||
environment {
|
||||
// On master ("non-pull-request"), force running some validation checks (vendor, swagger),
|
||||
// even if no files were changed. This allows catching problems caused by pull-requests
|
||||
// that were merged out-of-sequence.
|
||||
TEST_FORCE_VALIDATE = sh returnStdout: true, script: 'if [ "${BRANCH_NAME%%-*}" != "PR" ] || [ "${CHANGE_TARGET:-master}" != "master" ] || [ "${validate_force}" = "true" ]; then echo "1"; fi'
|
||||
}
|
||||
agent { label 'amd64 && ubuntu-1804 && overlay2' }
|
||||
|
||||
stages {
|
||||
stage("Print info") {
|
||||
@@ -95,9 +60,6 @@ pipeline {
|
||||
-e DOCKER_EXPERIMENTAL \
|
||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
-e TEST_FORCE_VALIDATE \
|
||||
-e VALIDATE_REPO=${GIT_URL} \
|
||||
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
|
||||
docker:${GIT_COMMIT} \
|
||||
hack/validate/default
|
||||
'''
|
||||
@@ -112,8 +74,6 @@ pipeline {
|
||||
-e DOCKER_EXPERIMENTAL \
|
||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
-e VALIDATE_REPO=${GIT_URL} \
|
||||
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
|
||||
docker:${GIT_COMMIT} \
|
||||
hack/make.sh \
|
||||
dynbinary-daemon \
|
||||
@@ -134,15 +94,12 @@ pipeline {
|
||||
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
|
||||
'''
|
||||
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
|
||||
sh '''
|
||||
bundleName=docker-py
|
||||
echo "Creating ${bundleName}-bundles.tar.gz"
|
||||
tar -czf ${bundleName}-bundles.tar.gz bundles/test-docker-py/*.xml bundles/test-docker-py/*.log
|
||||
'''
|
||||
sh '''
|
||||
echo 'Creating docker-py-bundles.tar.gz'
|
||||
tar -czf docker-py-bundles.tar.gz bundles/test-docker-py/*.xml bundles/test-docker-py/*.log
|
||||
'''
|
||||
|
||||
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
|
||||
}
|
||||
archiveArtifacts artifacts: 'docker-py-bundles.tar.gz'
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -182,8 +139,6 @@ pipeline {
|
||||
-e DOCKER_EXPERIMENTAL \
|
||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
-e VALIDATE_REPO=${GIT_URL} \
|
||||
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
|
||||
docker:${GIT_COMMIT} \
|
||||
hack/test/unit
|
||||
'''
|
||||
@@ -203,14 +158,19 @@ pipeline {
|
||||
-e DOCKER_EXPERIMENTAL \
|
||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
-e TEST_FORCE_VALIDATE \
|
||||
-e VALIDATE_REPO=${GIT_URL} \
|
||||
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
|
||||
docker:${GIT_COMMIT} \
|
||||
hack/validate/vendor
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage("Build e2e image") {
|
||||
steps {
|
||||
sh '''
|
||||
echo "Building e2e image"
|
||||
docker build --build-arg DOCKER_GITCOMMIT=${GIT_COMMIT} -t moby-e2e-test -f Dockerfile.e2e .
|
||||
'''
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
post {
|
||||
@@ -225,15 +185,12 @@ pipeline {
|
||||
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
|
||||
'''
|
||||
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
|
||||
sh '''
|
||||
bundleName=unit
|
||||
echo "Creating ${bundleName}-bundles.tar.gz"
|
||||
tar -czvf ${bundleName}-bundles.tar.gz bundles/junit-report.xml bundles/go-test-report.json bundles/profile.out
|
||||
'''
|
||||
sh '''
|
||||
echo 'Creating unit-bundles.tar.gz'
|
||||
tar -czvf unit-bundles.tar.gz bundles/junit-report.xml bundles/go-test-report.json bundles/profile.out
|
||||
'''
|
||||
|
||||
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
|
||||
}
|
||||
archiveArtifacts artifacts: 'unit-bundles.tar.gz'
|
||||
}
|
||||
cleanup {
|
||||
sh 'make clean'
|
||||
@@ -241,12 +198,12 @@ pipeline {
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('amd64') {
|
||||
stage('janky') {
|
||||
when {
|
||||
beforeAgent true
|
||||
expression { params.amd64 }
|
||||
expression { params.janky }
|
||||
}
|
||||
agent { label 'amd64 && ubuntu-2004 && overlay2' }
|
||||
agent { label 'amd64 && ubuntu-1804 && overlay2' }
|
||||
|
||||
stages {
|
||||
stage("Print info") {
|
||||
@@ -265,7 +222,7 @@ pipeline {
|
||||
sh '''
|
||||
# todo: include ip_vs in base image
|
||||
sudo modprobe ip_vs
|
||||
|
||||
|
||||
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
|
||||
'''
|
||||
}
|
||||
@@ -275,25 +232,22 @@ pipeline {
|
||||
sh '''#!/bin/bash
|
||||
# bash is needed so 'jobs -p' works properly
|
||||
# it also accepts setting inline envvars for functions without explicitly exporting
|
||||
set -x
|
||||
|
||||
|
||||
run_tests() {
|
||||
[ -n "$TESTDEBUG" ] && rm= || rm=--rm;
|
||||
docker run $rm -t --privileged \
|
||||
-v "$WORKSPACE/bundles/${TEST_INTEGRATION_DEST}:/go/src/github.com/docker/docker/bundles" \
|
||||
-v "$WORKSPACE/bundles/dynbinary-daemon:/go/src/github.com/docker/docker/bundles/dynbinary-daemon" \
|
||||
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
|
||||
-v "$WORKSPACE/.git:/go/src/github.com/docker/docker/.git" \
|
||||
--name "$CONTAINER_NAME" \
|
||||
-e KEEPBUNDLE=1 \
|
||||
-e TESTDEBUG \
|
||||
-e TESTFLAGS \
|
||||
-e TEST_INTEGRATION_DEST \
|
||||
-e TEST_SKIP_INTEGRATION \
|
||||
-e TEST_SKIP_INTEGRATION_CLI \
|
||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
-e TIMEOUT \
|
||||
-e VALIDATE_REPO=${GIT_URL} \
|
||||
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
|
||||
docker:${GIT_COMMIT} \
|
||||
hack/make.sh \
|
||||
"$1" \
|
||||
@@ -320,11 +274,12 @@ pipeline {
|
||||
TEST_INTEGRATION_DEST=1 CONTAINER_NAME=${CONTAINER_NAME}-1 TEST_SKIP_INTEGRATION_CLI=1 run_tests test-integration-flaky &
|
||||
|
||||
# integration-cli first set
|
||||
TEST_INTEGRATION_DEST=2 CONTAINER_NAME=${CONTAINER_NAME}-2 TEST_SKIP_INTEGRATION=1 TESTFLAGS="-test.run Test(DockerSuite|DockerNetworkSuite|DockerHubPullSuite|DockerRegistrySuite|DockerSchema1RegistrySuite|DockerRegistryAuthTokenSuite|DockerRegistryAuthHtpasswdSuite)/" run_tests &
|
||||
TEST_INTEGRATION_DEST=2 CONTAINER_NAME=${CONTAINER_NAME}-2 TEST_SKIP_INTEGRATION=1 TESTFLAGS="-check.f ^(DockerSuite|DockerNetworkSuite|DockerHubPullSuite|DockerRegistrySuite|DockerSchema1RegistrySuite|DockerRegistryAuthTokenSuite|DockerRegistryAuthHtpasswdSuite)" run_tests &
|
||||
|
||||
# integration-cli second set
|
||||
TEST_INTEGRATION_DEST=3 CONTAINER_NAME=${CONTAINER_NAME}-3 TEST_SKIP_INTEGRATION=1 TESTFLAGS="-test.run Test(DockerSwarmSuite|DockerDaemonSuite|DockerExternalVolumeSuite)/" run_tests &
|
||||
TEST_INTEGRATION_DEST=3 CONTAINER_NAME=${CONTAINER_NAME}-3 TEST_SKIP_INTEGRATION=1 TESTFLAGS="-check.f ^(DockerSwarmSuite|DockerDaemonSuite|DockerExternalVolumeSuite)" run_tests &
|
||||
|
||||
set +x
|
||||
c=0
|
||||
for job in $(jobs -p); do
|
||||
wait ${job} || c=$?
|
||||
@@ -332,98 +287,6 @@ pipeline {
|
||||
exit $c
|
||||
'''
|
||||
}
|
||||
post {
|
||||
always {
|
||||
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
post {
|
||||
always {
|
||||
sh '''
|
||||
echo "Ensuring container killed."
|
||||
cids=$(docker ps -aq -f name=docker-pr${BUILD_NUMBER}-*)
|
||||
[ -n "$cids" ] && docker rm -vf $cids || true
|
||||
'''
|
||||
|
||||
sh '''
|
||||
echo "Chowning /workspace to jenkins user"
|
||||
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
|
||||
'''
|
||||
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
|
||||
sh '''
|
||||
bundleName=amd64
|
||||
echo "Creating ${bundleName}-bundles.tar.gz"
|
||||
# exclude overlay2 directories
|
||||
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
|
||||
'''
|
||||
|
||||
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
|
||||
}
|
||||
}
|
||||
cleanup {
|
||||
sh 'make clean'
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('rootless') {
|
||||
when {
|
||||
beforeAgent true
|
||||
expression { params.rootless }
|
||||
}
|
||||
agent { label 'amd64 && ubuntu-2004 && overlay2' }
|
||||
stages {
|
||||
stage("Print info") {
|
||||
steps {
|
||||
sh 'docker version'
|
||||
sh 'docker info'
|
||||
sh '''
|
||||
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
|
||||
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
|
||||
&& bash ${WORKSPACE}/check-config.sh || true
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage("Build dev image") {
|
||||
steps {
|
||||
sh '''
|
||||
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage("Integration tests") {
|
||||
environment {
|
||||
DOCKER_ROOTLESS = '1'
|
||||
TEST_SKIP_INTEGRATION_CLI = '1'
|
||||
}
|
||||
steps {
|
||||
sh '''
|
||||
docker run --rm -t --privileged \
|
||||
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
|
||||
--name docker-pr$BUILD_NUMBER \
|
||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
-e DOCKER_EXPERIMENTAL \
|
||||
-e DOCKER_ROOTLESS \
|
||||
-e TEST_SKIP_INTEGRATION_CLI \
|
||||
-e TIMEOUT \
|
||||
-e VALIDATE_REPO=${GIT_URL} \
|
||||
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
|
||||
docker:${GIT_COMMIT} \
|
||||
hack/make.sh \
|
||||
dynbinary \
|
||||
test-integration
|
||||
'''
|
||||
}
|
||||
post {
|
||||
always {
|
||||
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -439,16 +302,13 @@ pipeline {
|
||||
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
|
||||
'''
|
||||
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
|
||||
sh '''
|
||||
bundleName=amd64-rootless
|
||||
echo "Creating ${bundleName}-bundles.tar.gz"
|
||||
# exclude overlay2 directories
|
||||
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
|
||||
'''
|
||||
sh '''
|
||||
echo "Creating janky-bundles.tar.gz"
|
||||
# exclude overlay2 directories
|
||||
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*.log' -o -name '*.prof' \\) -print | xargs tar -czf janky-bundles.tar.gz
|
||||
'''
|
||||
|
||||
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
|
||||
}
|
||||
archiveArtifacts artifacts: 'janky-bundles.tar.gz'
|
||||
}
|
||||
cleanup {
|
||||
sh 'make clean'
|
||||
@@ -456,98 +316,14 @@ pipeline {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage('cgroup2') {
|
||||
stage('z') {
|
||||
when {
|
||||
beforeAgent true
|
||||
expression { params.cgroup2 }
|
||||
}
|
||||
agent { label 'amd64 && ubuntu-2004 && cgroup2' }
|
||||
stages {
|
||||
stage("Print info") {
|
||||
steps {
|
||||
sh 'docker version'
|
||||
sh 'docker info'
|
||||
}
|
||||
}
|
||||
stage("Build dev image") {
|
||||
steps {
|
||||
sh '''
|
||||
docker build --force-rm --build-arg APT_MIRROR --build-arg SYSTEMD=true -t docker:${GIT_COMMIT} .
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage("Integration tests") {
|
||||
environment {
|
||||
DOCKER_SYSTEMD = '1' // recommended cgroup driver for v2
|
||||
TEST_SKIP_INTEGRATION_CLI = '1' // CLI tests do not support v2
|
||||
}
|
||||
steps {
|
||||
sh '''
|
||||
docker run --rm -t --privileged \
|
||||
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
|
||||
--name docker-pr$BUILD_NUMBER \
|
||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
-e DOCKER_EXPERIMENTAL \
|
||||
-e DOCKER_SYSTEMD \
|
||||
-e TEST_SKIP_INTEGRATION_CLI \
|
||||
-e TIMEOUT \
|
||||
-e VALIDATE_REPO=${GIT_URL} \
|
||||
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
|
||||
docker:${GIT_COMMIT} \
|
||||
hack/make.sh \
|
||||
dynbinary \
|
||||
test-integration
|
||||
'''
|
||||
}
|
||||
post {
|
||||
always {
|
||||
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
post {
|
||||
always {
|
||||
sh '''
|
||||
echo "Ensuring container killed."
|
||||
docker rm -vf docker-pr$BUILD_NUMBER || true
|
||||
'''
|
||||
|
||||
sh '''
|
||||
echo "Chowning /workspace to jenkins user"
|
||||
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
|
||||
'''
|
||||
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
|
||||
sh '''
|
||||
bundleName=amd64-cgroup2
|
||||
echo "Creating ${bundleName}-bundles.tar.gz"
|
||||
# exclude overlay2 directories
|
||||
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
|
||||
'''
|
||||
|
||||
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
|
||||
}
|
||||
}
|
||||
cleanup {
|
||||
sh 'make clean'
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage('arm64') {
|
||||
when {
|
||||
beforeAgent true
|
||||
expression { params.arm64 }
|
||||
}
|
||||
agent { label 'arm64 && ubuntu-2004' }
|
||||
environment {
|
||||
TEST_SKIP_INTEGRATION_CLI = '1'
|
||||
expression { params.z }
|
||||
}
|
||||
agent { label 's390x-ubuntu-1604' }
|
||||
// s390x machines run on Docker 18.06, and buildkit has some bugs on that version
|
||||
environment { DOCKER_BUILDKIT = '0' }
|
||||
|
||||
stages {
|
||||
stage("Print info") {
|
||||
@@ -563,7 +339,9 @@ pipeline {
|
||||
}
|
||||
stage("Build dev image") {
|
||||
steps {
|
||||
sh 'docker build --force-rm -t docker:${GIT_COMMIT} .'
|
||||
sh '''
|
||||
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} -f Dockerfile .
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage("Unit tests") {
|
||||
@@ -575,8 +353,6 @@ pipeline {
|
||||
-e DOCKER_EXPERIMENTAL \
|
||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
-e VALIDATE_REPO=${GIT_URL} \
|
||||
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
|
||||
docker:${GIT_COMMIT} \
|
||||
hack/test/unit
|
||||
'''
|
||||
@@ -597,22 +373,14 @@ pipeline {
|
||||
-e DOCKER_EXPERIMENTAL \
|
||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
-e TESTDEBUG \
|
||||
-e TEST_SKIP_INTEGRATION_CLI \
|
||||
-e TIMEOUT \
|
||||
-e VALIDATE_REPO=${GIT_URL} \
|
||||
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
|
||||
docker:${GIT_COMMIT} \
|
||||
hack/make.sh \
|
||||
dynbinary \
|
||||
test-integration
|
||||
'''
|
||||
}
|
||||
post {
|
||||
always {
|
||||
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -628,16 +396,13 @@ pipeline {
|
||||
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
|
||||
'''
|
||||
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
|
||||
sh '''
|
||||
bundleName=arm64-integration
|
||||
echo "Creating ${bundleName}-bundles.tar.gz"
|
||||
# exclude overlay2 directories
|
||||
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
|
||||
'''
|
||||
sh '''
|
||||
echo "Creating s390x-integration-bundles.tar.gz"
|
||||
# exclude overlay2 directories
|
||||
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*.log' -o -name '*.prof' \\) -print | xargs tar -czf s390x-integration-bundles.tar.gz
|
||||
'''
|
||||
|
||||
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
|
||||
}
|
||||
archiveArtifacts artifacts: 's390x-integration-bundles.tar.gz'
|
||||
}
|
||||
cleanup {
|
||||
sh 'make clean'
|
||||
@@ -645,26 +410,253 @@ pipeline {
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('win-RS5') {
|
||||
stage('z-master') {
|
||||
when {
|
||||
beforeAgent true
|
||||
expression { params.windowsRS5 }
|
||||
branch 'master'
|
||||
expression { params.z }
|
||||
}
|
||||
environment {
|
||||
DOCKER_BUILDKIT = '0'
|
||||
DOCKER_DUT_DEBUG = '1'
|
||||
SKIP_VALIDATION_TESTS = '1'
|
||||
SOURCES_DRIVE = 'd'
|
||||
SOURCES_SUBDIR = 'gopath'
|
||||
TESTRUN_DRIVE = 'd'
|
||||
TESTRUN_SUBDIR = "CI"
|
||||
WINDOWS_BASE_IMAGE = 'mcr.microsoft.com/windows/servercore'
|
||||
WINDOWS_BASE_IMAGE_TAG = 'ltsc2019'
|
||||
agent { label 's390x-ubuntu-1604' }
|
||||
// s390x machines run on Docker 18.06, and buildkit has some bugs on that version
|
||||
environment { DOCKER_BUILDKIT = '0' }
|
||||
|
||||
stages {
|
||||
stage("Print info") {
|
||||
steps {
|
||||
sh 'docker version'
|
||||
sh 'docker info'
|
||||
sh '''
|
||||
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
|
||||
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
|
||||
&& bash ${WORKSPACE}/check-config.sh || true
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage("Build dev image") {
|
||||
steps {
|
||||
sh '''
|
||||
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} -f Dockerfile .
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage("Integration-cli tests") {
|
||||
environment { TEST_SKIP_INTEGRATION = '1' }
|
||||
steps {
|
||||
sh '''
|
||||
docker run --rm -t --privileged \
|
||||
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
|
||||
--name docker-pr$BUILD_NUMBER \
|
||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
-e TEST_SKIP_INTEGRATION \
|
||||
-e TIMEOUT \
|
||||
docker:${GIT_COMMIT} \
|
||||
hack/make.sh \
|
||||
dynbinary \
|
||||
test-integration
|
||||
'''
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
post {
|
||||
always {
|
||||
sh '''
|
||||
echo "Ensuring container killed."
|
||||
docker rm -vf docker-pr$BUILD_NUMBER || true
|
||||
'''
|
||||
|
||||
sh '''
|
||||
echo "Chowning /workspace to jenkins user"
|
||||
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
|
||||
'''
|
||||
|
||||
sh '''
|
||||
echo "Creating s390x-integration-cli-bundles.tar.gz"
|
||||
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*.log' -o -name '*.prof' \\) -print | xargs tar -czf s390x-integration-cli-bundles.tar.gz
|
||||
'''
|
||||
|
||||
archiveArtifacts artifacts: 's390x-integration-cli-bundles.tar.gz'
|
||||
}
|
||||
cleanup {
|
||||
sh 'make clean'
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('powerpc') {
|
||||
when {
|
||||
beforeAgent true
|
||||
expression { params.powerpc }
|
||||
}
|
||||
agent { label 'ppc64le-ubuntu-1604' }
|
||||
// power machines run on Docker 18.06, and buildkit has some bugs on that version
|
||||
environment { DOCKER_BUILDKIT = '0' }
|
||||
|
||||
stages {
|
||||
stage("Print info") {
|
||||
steps {
|
||||
sh 'docker version'
|
||||
sh 'docker info'
|
||||
sh '''
|
||||
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
|
||||
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
|
||||
&& bash ${WORKSPACE}/check-config.sh || true
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage("Build dev image") {
|
||||
steps {
|
||||
sh 'docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} -f Dockerfile .'
|
||||
}
|
||||
}
|
||||
stage("Unit tests") {
|
||||
steps {
|
||||
sh '''
|
||||
docker run --rm -t --privileged \
|
||||
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
|
||||
--name docker-pr$BUILD_NUMBER \
|
||||
-e DOCKER_EXPERIMENTAL \
|
||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
docker:${GIT_COMMIT} \
|
||||
hack/test/unit
|
||||
'''
|
||||
}
|
||||
post {
|
||||
always {
|
||||
junit testResults: 'bundles/junit-report.xml', allowEmptyResults: true
|
||||
}
|
||||
}
|
||||
}
|
||||
stage("Integration tests") {
|
||||
environment { TEST_SKIP_INTEGRATION_CLI = '1' }
|
||||
steps {
|
||||
sh '''
|
||||
docker run --rm -t --privileged \
|
||||
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
|
||||
--name docker-pr$BUILD_NUMBER \
|
||||
-e DOCKER_EXPERIMENTAL \
|
||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
-e TEST_SKIP_INTEGRATION_CLI \
|
||||
-e TIMEOUT \
|
||||
docker:${GIT_COMMIT} \
|
||||
hack/make.sh \
|
||||
dynbinary \
|
||||
test-integration
|
||||
'''
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
post {
|
||||
always {
|
||||
sh '''
|
||||
echo "Ensuring container killed."
|
||||
docker rm -vf docker-pr$BUILD_NUMBER || true
|
||||
'''
|
||||
|
||||
sh '''
|
||||
echo "Chowning /workspace to jenkins user"
|
||||
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
|
||||
'''
|
||||
|
||||
sh '''
|
||||
echo "Creating powerpc-integration-bundles.tar.gz"
|
||||
# exclude overlay2 directories
|
||||
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*.log' -o -name '*.prof' \\) -print | xargs tar -czf powerpc-integration-bundles.tar.gz
|
||||
'''
|
||||
|
||||
archiveArtifacts artifacts: 'powerpc-integration-bundles.tar.gz'
|
||||
}
|
||||
cleanup {
|
||||
sh 'make clean'
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('powerpc-master') {
|
||||
when {
|
||||
beforeAgent true
|
||||
branch 'master'
|
||||
expression { params.powerpc }
|
||||
}
|
||||
agent { label 'ppc64le-ubuntu-1604' }
|
||||
// power machines run on Docker 18.06, and buildkit has some bugs on that version
|
||||
environment { DOCKER_BUILDKIT = '0' }
|
||||
|
||||
stages {
|
||||
stage("Print info") {
|
||||
steps {
|
||||
sh 'docker version'
|
||||
sh 'docker info'
|
||||
sh '''
|
||||
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
|
||||
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
|
||||
&& bash ${WORKSPACE}/check-config.sh || true
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage("Build dev image") {
|
||||
steps {
|
||||
sh 'docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} -f Dockerfile .'
|
||||
}
|
||||
}
|
||||
stage("Integration-cli tests") {
|
||||
environment { TEST_SKIP_INTEGRATION = '1' }
|
||||
steps {
|
||||
sh '''
|
||||
docker run --rm -t --privileged \
|
||||
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
|
||||
--name docker-pr$BUILD_NUMBER \
|
||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
-e TEST_SKIP_INTEGRATION \
|
||||
-e TIMEOUT \
|
||||
docker:${GIT_COMMIT} \
|
||||
hack/make.sh \
|
||||
dynbinary \
|
||||
test-integration
|
||||
'''
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
post {
|
||||
always {
|
||||
sh '''
|
||||
echo "Ensuring container killed."
|
||||
docker rm -vf docker-pr$BUILD_NUMBER || true
|
||||
'''
|
||||
|
||||
sh '''
|
||||
echo "Chowning /workspace to jenkins user"
|
||||
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
|
||||
'''
|
||||
|
||||
sh '''
|
||||
echo "Creating powerpc-integration-cli-bundles.tar.gz"
|
||||
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*.log' -o -name '*.prof' \\) -print | xargs tar -czf powerpc-integration-cli-bundles.tar.gz
|
||||
'''
|
||||
|
||||
archiveArtifacts artifacts: 'powerpc-integration-cli-bundles.tar.gz'
|
||||
}
|
||||
cleanup {
|
||||
sh 'make clean'
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('windowsRS1') {
|
||||
when {
|
||||
beforeAgent true
|
||||
expression { params.windowsRS1 }
|
||||
}
|
||||
agent {
|
||||
node {
|
||||
customWorkspace 'd:\\gopath\\src\\github.com\\docker\\docker'
|
||||
label 'windows-2019'
|
||||
label 'windows-rs1'
|
||||
customWorkspace 'c:\\gopath\\src\\github.com\\docker\\docker'
|
||||
}
|
||||
}
|
||||
stages {
|
||||
@@ -678,32 +670,39 @@ pipeline {
|
||||
steps {
|
||||
powershell '''
|
||||
$ErrorActionPreference = 'Stop'
|
||||
Invoke-WebRequest https://github.com/moby/docker-ci-zap/blob/master/docker-ci-zap.exe?raw=true -OutFile C:/Windows/System32/docker-ci-zap.exe
|
||||
./hack/ci/windows.ps1
|
||||
.\\hack\\ci\\windows.ps1
|
||||
exit $LastExitCode
|
||||
'''
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
always {
|
||||
junit testResults: 'bundles/junit-report-*.xml', allowEmptyResults: true
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
|
||||
powershell '''
|
||||
cd $env:WORKSPACE
|
||||
$bundleName="windowsRS5-integration"
|
||||
Write-Host -ForegroundColor Green "Creating ${bundleName}-bundles.zip"
|
||||
|
||||
# archiveArtifacts does not support env-vars to , so save the artifacts in a fixed location
|
||||
Compress-Archive -Path "bundles/CIDUT.out", "bundles/CIDUT.err", "bundles/junit-report-*.xml" -CompressionLevel Optimal -DestinationPath "${bundleName}-bundles.zip"
|
||||
'''
|
||||
|
||||
archiveArtifacts artifacts: '*-bundles.zip', allowEmptyArchive: true
|
||||
}
|
||||
stage('windowsRS5-process') {
|
||||
when {
|
||||
beforeAgent true
|
||||
expression { params.windowsRS5 }
|
||||
}
|
||||
agent {
|
||||
node {
|
||||
label 'windows-rs5'
|
||||
customWorkspace 'c:\\gopath\\src\\github.com\\docker\\docker'
|
||||
}
|
||||
}
|
||||
stages {
|
||||
stage("Print info") {
|
||||
steps {
|
||||
sh 'docker version'
|
||||
sh 'docker info'
|
||||
}
|
||||
}
|
||||
cleanup {
|
||||
sh 'make clean'
|
||||
deleteDir()
|
||||
stage("Run tests") {
|
||||
steps {
|
||||
powershell '''
|
||||
$ErrorActionPreference = 'Stop'
|
||||
.\\hack\\ci\\windows.ps1
|
||||
exit $LastExitCode
|
||||
'''
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
68
MAINTAINERS
68
MAINTAINERS
@@ -24,17 +24,21 @@
|
||||
# subsystem maintainers accountable. If ownership is unclear, they are the de facto owners.
|
||||
|
||||
people = [
|
||||
"aaronlehmann",
|
||||
"akihirosuda",
|
||||
"anusha",
|
||||
"coolljt0725",
|
||||
"cpuguy83",
|
||||
"crosbymichael",
|
||||
"dnephin",
|
||||
"duglin",
|
||||
"estesp",
|
||||
"jhowardmsft",
|
||||
"johnstep",
|
||||
"justincormack",
|
||||
"kolyshkin",
|
||||
"lowenna",
|
||||
"mhbauer",
|
||||
"mlaventure",
|
||||
"runcom",
|
||||
"stevvooe",
|
||||
"thajeztah",
|
||||
@@ -62,12 +66,14 @@
|
||||
people = [
|
||||
"alexellis",
|
||||
"andrewhsu",
|
||||
"anonymuse",
|
||||
"chanwit",
|
||||
"fntlnz",
|
||||
"gianarb",
|
||||
"olljanat",
|
||||
"programmerq",
|
||||
"rheinwein",
|
||||
"ripcurld",
|
||||
"samwhited",
|
||||
"thajeztah"
|
||||
]
|
||||
|
||||
@@ -78,12 +84,6 @@
|
||||
# Thank you!
|
||||
|
||||
people = [
|
||||
# Aaron Lehmann was a maintainer for swarmkit, the registry, and the engine,
|
||||
# and contributed many improvements, features, and bugfixes in those areas,
|
||||
# among which "automated service rollbacks", templated secrets and configs,
|
||||
# and resumable image layer downloads.
|
||||
"aaronlehmann",
|
||||
|
||||
# Harald Albers is the mastermind behind the bash completion scripts for the
|
||||
# Docker CLI. The completion scripts moved to the Docker CLI repository, so
|
||||
# you can now find him perform his magic in the https://github.com/docker/cli repository.
|
||||
@@ -103,19 +103,6 @@
|
||||
# and tweets as @calavera.
|
||||
"calavera",
|
||||
|
||||
# Before becoming a maintainer, Daniel Nephin was a core contributor
|
||||
# to "Fig" (now known as Docker Compose). As a maintainer for both the
|
||||
# Engine and Docker CLI, Daniel contributed many features, among which
|
||||
# the `docker stack` commands, allowing users to deploy their Docker
|
||||
# Compose projects as a Swarm service.
|
||||
"dnephin",
|
||||
|
||||
# Doug Davis contributed many features and fixes for the classic builder,
|
||||
# such as "wildcard" copy, the dockerignore file, custom paths/names
|
||||
# for the Dockerfile, as well as enhancements to the API and documentation.
|
||||
# Follow Doug on Twitter, where he tweets as @duginabox.
|
||||
"duglin",
|
||||
|
||||
# As a maintainer, Erik was responsible for the "builder", and
|
||||
# started the first designs for the new networking model in
|
||||
# Docker. Erik is now working on all kinds of plugins for Docker
|
||||
@@ -124,7 +111,7 @@
|
||||
# still stumble into him in our issue tracker, or on IRC.
|
||||
"erikh",
|
||||
|
||||
# Evan Hazlett is the creator of the Shipyard and Interlock open source projects,
|
||||
# Evan Hazlett is the creator of of the Shipyard and Interlock open source projects,
|
||||
# and the author of "Orca", which became the foundation of Docker Universal Control
|
||||
# Plane (UCP). As a maintainer, Evan helped integrating SwarmKit (secrets, tasks)
|
||||
# into the Docker engine.
|
||||
@@ -179,13 +166,6 @@
|
||||
# Swarm mode networking.
|
||||
"mavenugo",
|
||||
|
||||
# As a maintainer, Kenfe-Mickaël Laventure worked on the container runtime,
|
||||
# integrating containerd 1.0 with the daemon, and adding support for custom
|
||||
# OCI runtimes, as well as implementing the `docker prune` subcommands,
|
||||
# which was a welcome feature to be added. You can keep up with Mickaél on
|
||||
# Twitter (@kmlaventure).
|
||||
"mlaventure",
|
||||
|
||||
# As a docs maintainer, Mary Anthony contributed greatly to the Docker
|
||||
# docs. She wrote the Docker Contributor Guide and Getting Started
|
||||
# Guides. She helped create a doc build system independent of
|
||||
@@ -278,6 +258,11 @@
|
||||
Email = "andrewhsu@docker.com"
|
||||
GitHub = "andrewhsu"
|
||||
|
||||
[people.anonymuse]
|
||||
Name = "Jesse White"
|
||||
Email = "anonymuse@gmail.com"
|
||||
GitHub = "anonymuse"
|
||||
|
||||
[people.anusha]
|
||||
Name = "Anusha Ragunathan"
|
||||
Email = "anusha@docker.com"
|
||||
@@ -298,6 +283,11 @@
|
||||
Email = "cpuguy83@gmail.com"
|
||||
GitHub = "cpuguy83"
|
||||
|
||||
[people.chanwit]
|
||||
Name = "Chanwit Kaewkasi"
|
||||
Email = "chanwit@gmail.com"
|
||||
GitHub = "chanwit"
|
||||
|
||||
[people.crosbymichael]
|
||||
Name = "Michael Crosby"
|
||||
Email = "crosbymichael@gmail.com"
|
||||
@@ -348,6 +338,11 @@
|
||||
Email = "james@lovedthanlost.net"
|
||||
GitHub = "jamtur01"
|
||||
|
||||
[people.jhowardmsft]
|
||||
Name = "John Howard"
|
||||
Email = "jhoward@microsoft.com"
|
||||
GitHub = "jhowardmsft"
|
||||
|
||||
[people.jessfraz]
|
||||
Name = "Jessie Frazelle"
|
||||
Email = "jess@linux.com"
|
||||
@@ -373,11 +368,6 @@
|
||||
Email = "lk4d4@docker.com"
|
||||
GitHub = "lk4d4"
|
||||
|
||||
[people.lowenna]
|
||||
Name = "John Howard"
|
||||
Email = "github@lowenna.com"
|
||||
GitHub = "lowenna"
|
||||
|
||||
[people.mavenugo]
|
||||
Name = "Madhu Venugopal"
|
||||
Email = "madhu@docker.com"
|
||||
@@ -413,6 +403,11 @@
|
||||
Email = "jeff@docker.com"
|
||||
GitHub = "programmerq"
|
||||
|
||||
[people.rheinwein]
|
||||
Name = "Laura Frank"
|
||||
Email = "laura@codeship.com"
|
||||
GitHub = "rheinwein"
|
||||
|
||||
[people.ripcurld]
|
||||
Name = "Boaz Shuster"
|
||||
Email = "ripcurld.github@gmail.com"
|
||||
@@ -423,11 +418,6 @@
|
||||
Email = "runcom@redhat.com"
|
||||
GitHub = "runcom"
|
||||
|
||||
[people.samwhited]
|
||||
Name = "Sam Whited"
|
||||
Email = "sam@samwhited.com"
|
||||
GitHub = "samwhited"
|
||||
|
||||
[people.shykes]
|
||||
Name = "Solomon Hykes"
|
||||
Email = "solomon@docker.com"
|
||||
|
||||
114
Makefile
114
Makefile
@@ -1,26 +1,12 @@
|
||||
.PHONY: all binary dynbinary build cross help install manpages run shell test test-docker-py test-integration test-unit validate win
|
||||
|
||||
BUILDX_VERSION ?= v0.8.2
|
||||
|
||||
ifdef USE_BUILDX
|
||||
BUILDX ?= $(shell command -v buildx)
|
||||
BUILDX ?= $(shell command -v docker-buildx)
|
||||
DOCKER_BUILDX_CLI_PLUGIN_PATH ?= ~/.docker/cli-plugins/docker-buildx
|
||||
BUILDX ?= $(shell if [ -x "$(DOCKER_BUILDX_CLI_PLUGIN_PATH)" ]; then echo $(DOCKER_BUILDX_CLI_PLUGIN_PATH); fi)
|
||||
endif
|
||||
|
||||
ifndef USE_BUILDX
|
||||
DOCKER_BUILDKIT := 1
|
||||
export DOCKER_BUILDKIT
|
||||
endif
|
||||
|
||||
BUILDX ?= bundles/buildx
|
||||
DOCKER ?= docker
|
||||
|
||||
# set the graph driver as the current graphdriver if not set
|
||||
DOCKER_GRAPHDRIVER := $(if $(DOCKER_GRAPHDRIVER),$(DOCKER_GRAPHDRIVER),$(shell docker info 2>&1 | grep "Storage Driver" | sed 's/.*: //'))
|
||||
export DOCKER_GRAPHDRIVER
|
||||
|
||||
# enable/disable cross-compile
|
||||
DOCKER_CROSS ?= false
|
||||
|
||||
# get OS/Arch of docker engine
|
||||
DOCKER_OSARCH := $(shell bash -c 'source hack/make/.detect-daemon-osarch && echo $${DOCKER_ENGINE_OSARCH}')
|
||||
DOCKERFILE := $(shell bash -c 'source hack/make/.detect-daemon-osarch && echo $${DOCKERFILE}')
|
||||
@@ -46,6 +32,7 @@ export VALIDATE_ORIGIN_BRANCH
|
||||
#
|
||||
DOCKER_ENVS := \
|
||||
-e DOCKER_CROSSPLATFORMS \
|
||||
-e BUILD_APT_MIRROR \
|
||||
-e BUILDFLAGS \
|
||||
-e KEEPBUNDLE \
|
||||
-e DOCKER_BUILD_ARGS \
|
||||
@@ -62,12 +49,11 @@ DOCKER_ENVS := \
|
||||
-e DOCKER_LDFLAGS \
|
||||
-e DOCKER_PORT \
|
||||
-e DOCKER_REMAP_ROOT \
|
||||
-e DOCKER_ROOTLESS \
|
||||
-e DOCKER_STORAGE_OPTS \
|
||||
-e DOCKER_TEST_HOST \
|
||||
-e DOCKER_USERLANDPROXY \
|
||||
-e DOCKERD_ARGS \
|
||||
-e TEST_FORCE_VALIDATE \
|
||||
-e TEST_INTEGRATION_DEST \
|
||||
-e TEST_INTEGRATION_DIR \
|
||||
-e TEST_SKIP_INTEGRATION \
|
||||
-e TEST_SKIP_INTEGRATION_CLI \
|
||||
@@ -76,7 +62,6 @@ DOCKER_ENVS := \
|
||||
-e TESTFLAGS \
|
||||
-e TESTFLAGS_INTEGRATION \
|
||||
-e TESTFLAGS_INTEGRATION_CLI \
|
||||
-e TEST_FILTER \
|
||||
-e TIMEOUT \
|
||||
-e VALIDATE_REPO \
|
||||
-e VALIDATE_BRANCH \
|
||||
@@ -101,7 +86,6 @@ BIND_DIR := $(if $(BINDDIR),$(BINDDIR),$(if $(DOCKER_HOST),,bundles))
|
||||
# DOCKER_MOUNT can be overriden, but use at your own risk!
|
||||
ifndef DOCKER_MOUNT
|
||||
DOCKER_MOUNT := $(if $(BIND_DIR),-v "$(CURDIR)/$(BIND_DIR):/go/src/github.com/docker/docker/$(BIND_DIR)")
|
||||
DOCKER_MOUNT := $(if $(DOCKER_BINDDIR_MOUNT_OPTS),$(DOCKER_MOUNT):$(DOCKER_BINDDIR_MOUNT_OPTS),$(DOCKER_MOUNT))
|
||||
|
||||
# This allows the test suite to be able to run without worrying about the underlying fs used by the container running the daemon (e.g. aufs-on-aufs), so long as the host running the container is running a supported fs.
|
||||
# The volume will be cleaned up when the container is removed due to `--rm`.
|
||||
@@ -122,7 +106,9 @@ GIT_BRANCH_CLEAN := $(shell echo $(GIT_BRANCH) | sed -e "s/[^[:alnum:]]/-/g")
|
||||
DOCKER_IMAGE := docker-dev$(if $(GIT_BRANCH_CLEAN),:$(GIT_BRANCH_CLEAN))
|
||||
DOCKER_PORT_FORWARD := $(if $(DOCKER_PORT),-p "$(DOCKER_PORT)",)
|
||||
|
||||
DOCKER_FLAGS := $(DOCKER) run --rm -i --privileged $(DOCKER_CONTAINER_NAME) $(DOCKER_ENVS) $(DOCKER_MOUNT) $(DOCKER_PORT_FORWARD)
|
||||
DOCKER_FLAGS := docker run --rm -i --privileged $(DOCKER_CONTAINER_NAME) $(DOCKER_ENVS) $(DOCKER_MOUNT) $(DOCKER_PORT_FORWARD)
|
||||
BUILD_APT_MIRROR := $(if $(DOCKER_BUILD_APT_MIRROR),--build-arg APT_MIRROR=$(DOCKER_BUILD_APT_MIRROR))
|
||||
export BUILD_APT_MIRROR
|
||||
|
||||
SWAGGER_DOCS_PORT ?= 9000
|
||||
|
||||
@@ -141,51 +127,34 @@ endif
|
||||
|
||||
DOCKER_RUN_DOCKER := $(DOCKER_FLAGS) "$(DOCKER_IMAGE)"
|
||||
|
||||
DOCKER_BUILD_ARGS += --build-arg=GO_VERSION
|
||||
ifdef DOCKER_SYSTEMD
|
||||
DOCKER_BUILD_ARGS += --build-arg=SYSTEMD=true
|
||||
endif
|
||||
|
||||
BUILD_OPTS := ${DOCKER_BUILD_ARGS} ${DOCKER_BUILD_OPTS} -f "$(DOCKERFILE)"
|
||||
ifdef USE_BUILDX
|
||||
BUILD_OPTS += $(BUILDX_BUILD_EXTRA_OPTS)
|
||||
BUILD_CMD := $(BUILDX) build
|
||||
else
|
||||
BUILD_CMD := $(DOCKER) build
|
||||
endif
|
||||
|
||||
# This is used for the legacy "build" target and anything still depending on it
|
||||
BUILD_CROSS =
|
||||
ifdef DOCKER_CROSS
|
||||
BUILD_CROSS = --build-arg CROSS=$(DOCKER_CROSS)
|
||||
endif
|
||||
ifdef DOCKER_CROSSPLATFORMS
|
||||
BUILD_CROSS = --build-arg CROSS=true
|
||||
endif
|
||||
|
||||
VERSION_AUTOGEN_ARGS = --build-arg VERSION --build-arg DOCKER_GITCOMMIT --build-arg PRODUCT --build-arg PLATFORM --build-arg DEFAULT_PRODUCT_LICENSE
|
||||
|
||||
default: binary
|
||||
|
||||
all: build ## validate all checks, build linux binaries, run all tests,\ncross build non-linux binaries, and generate archives
|
||||
all: build ## validate all checks, build linux binaries, run all tests\ncross build non-linux binaries and generate archives
|
||||
$(DOCKER_RUN_DOCKER) bash -c 'hack/validate/default && hack/make.sh'
|
||||
|
||||
# This is only used to work around read-only bind mounts of the source code into
|
||||
# binary build targets. We end up mounting a tmpfs over autogen which allows us
|
||||
# to write build-time generated assets even though the source is mounted read-only
|
||||
# ...But in order to do so, this dir needs to already exist.
|
||||
autogen:
|
||||
mkdir -p autogen
|
||||
binary: build ## build the linux binaries
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh binary
|
||||
|
||||
binary: buildx autogen ## build statically linked linux binaries
|
||||
$(BUILD_CMD) $(BUILD_OPTS) --output=bundles/ --target=$@ $(VERSION_AUTOGEN_ARGS) .
|
||||
dynbinary: build ## build the linux dynbinaries
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary
|
||||
|
||||
dynbinary: buildx autogen ## build dynamically linked linux binaries
|
||||
$(BUILD_CMD) $(BUILD_OPTS) --output=bundles/ --target=$@ $(VERSION_AUTOGEN_ARGS) .
|
||||
|
||||
cross: BUILD_OPTS += --build-arg CROSS=true --build-arg DOCKER_CROSSPLATFORMS
|
||||
cross: buildx autogen ## cross build the binaries for darwin, freebsd and\nwindows
|
||||
$(BUILD_CMD) $(BUILD_OPTS) --output=bundles/ --target=$@ $(VERSION_AUTOGEN_ARGS) .
|
||||
|
||||
cross: DOCKER_CROSS := true
|
||||
cross: build ## cross build the binaries for darwin, freebsd and\nwindows
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary binary cross
|
||||
|
||||
ifdef DOCKER_CROSSPLATFORMS
|
||||
build: DOCKER_CROSS := true
|
||||
endif
|
||||
ifeq ($(BIND_DIR), .)
|
||||
build: DOCKER_BUILD_OPTS += --target=dev
|
||||
endif
|
||||
build: DOCKER_BUILD_ARGS += --build-arg=CROSS=$(DOCKER_CROSS)
|
||||
build: DOCKER_BUILDKIT ?= 1
|
||||
build: bundles
|
||||
$(warning The docker client CLI has moved to github.com/docker/cli. For a dev-test cycle involving the CLI, run:${\n} DOCKER_CLI_PATH=/host/path/to/cli/binary make shell ${\n} then change the cli and compile into a binary at the same location.${\n})
|
||||
DOCKER_BUILDKIT="${DOCKER_BUILDKIT}" docker build --build-arg=GO_VERSION ${BUILD_APT_MIRROR} ${DOCKER_BUILD_ARGS} ${DOCKER_BUILD_OPTS} -t "$(DOCKER_IMAGE)" -f "$(DOCKERFILE)" .
|
||||
|
||||
bundles:
|
||||
mkdir bundles
|
||||
@@ -205,20 +174,8 @@ install: ## install the linux binaries
|
||||
|
||||
run: build ## run the docker daemon in a container
|
||||
$(DOCKER_RUN_DOCKER) sh -c "KEEPBUNDLE=1 hack/make.sh install-binary run"
|
||||
|
||||
.PHONY: build
|
||||
ifeq ($(BIND_DIR), .)
|
||||
build: shell_target := --target=dev
|
||||
else
|
||||
build: shell_target := --target=final
|
||||
endif
|
||||
ifdef USE_BUILDX
|
||||
build: buildx_load := --load
|
||||
endif
|
||||
build: buildx
|
||||
$(BUILD_CMD) $(BUILD_OPTS) $(shell_target) $(buildx_load) $(BUILD_CROSS) -t "$(DOCKER_IMAGE)" .
|
||||
|
||||
shell: build ## start a shell inside the build env
|
||||
shell: build ## start a shell inside the build env
|
||||
$(DOCKER_RUN_DOCKER) bash
|
||||
|
||||
test: build test-unit ## run the unit, integration and docker-py tests
|
||||
@@ -264,14 +221,3 @@ swagger-docs: ## preview the API documentation
|
||||
-e 'REDOC_OPTIONS=hide-hostname="true" lazy-rendering' \
|
||||
-p $(SWAGGER_DOCS_PORT):80 \
|
||||
bfirsh/redoc:1.6.2
|
||||
|
||||
.PHONY: buildx
|
||||
ifdef USE_BUILDX
|
||||
ifeq ($(BUILDX), bundles/buildx)
|
||||
buildx: bundles/buildx ## build buildx cli tool
|
||||
endif
|
||||
endif
|
||||
|
||||
bundles/buildx: bundles ## build buildx CLI tool
|
||||
curl -fsSL https://raw.githubusercontent.com/moby/buildkit/70deac12b5857a1aa4da65e90b262368e2f71500/hack/install-buildx | VERSION="$(BUILDX_VERSION)" BINDIR="$(@D)" bash
|
||||
$@ version
|
||||
|
||||
2
NOTICE
2
NOTICE
@@ -3,7 +3,7 @@ Copyright 2012-2017 Docker, Inc.
|
||||
|
||||
This product includes software developed at Docker, Inc. (https://www.docker.com).
|
||||
|
||||
This product contains software (https://github.com/creack/pty) developed
|
||||
This product contains software (https://github.com/kr/pty) developed
|
||||
by Keith Rarick, licensed under the MIT License.
|
||||
|
||||
The following is courtesy of our legal counsel:
|
||||
|
||||
@@ -1,9 +0,0 @@
|
||||
# Reporting security issues
|
||||
|
||||
The Moby maintainers take security seriously. If you discover a security issue, please bring it to their attention right away!
|
||||
|
||||
### Reporting a Vulnerability
|
||||
|
||||
Please **DO NOT** file a public issue, instead send your report privately to security@docker.com.
|
||||
|
||||
Security reports are greatly appreciated and we will publicly thank you for it, although we keep your name confidential if you request it. We also like to send gifts—if you're into schwag, make sure to let us know. We currently do not offer a paid security bounty program, but are not ruling it out in the future.
|
||||
38
TESTING.md
38
TESTING.md
@@ -28,7 +28,7 @@ Most code changes will fall into one of the following categories.
|
||||
### Writing tests for new features
|
||||
|
||||
New code should be covered by unit tests. If the code is difficult to test with
|
||||
unit tests, then that is a good sign that it should be refactored to make it
|
||||
a unit tests then that is a good sign that it should be refactored to make it
|
||||
easier to reuse and maintain. Consider accepting unexported interfaces instead
|
||||
of structs so that fakes can be provided for dependencies.
|
||||
|
||||
@@ -44,23 +44,16 @@ case. Error cases should be handled by unit tests.
|
||||
|
||||
Bugs fixes should include a unit test case which exercises the bug.
|
||||
|
||||
A bug fix may also include new assertions in existing integration tests for the
|
||||
A bug fix may also include new assertions in an existing integration tests for the
|
||||
API endpoint.
|
||||
|
||||
### Writing new integration tests
|
||||
|
||||
Note the `integration-cli` tests are deprecated; new tests will be rejected by
|
||||
the CI.
|
||||
|
||||
Instead, implement new tests under `integration/`.
|
||||
|
||||
### Integration tests environment considerations
|
||||
|
||||
When adding new tests or modifying existing tests under `integration/`, testing
|
||||
When adding new tests or modifying existing test under `integration/`, testing
|
||||
environment should be properly considered. `skip.If` from
|
||||
[gotest.tools/skip](https://godoc.org/gotest.tools/skip) can be used to make the
|
||||
test run conditionally. Full testing environment conditions can be found at
|
||||
[environment.go](https://github.com/moby/moby/blob/6b6eeed03b963a27085ea670f40cd5ff8a61f32e/testutil/environment/environment.go)
|
||||
[environment.go](https://github.com/moby/moby/blob/cb37987ee11655ed6bbef663d245e55922354c68/internal/test/environment/environment.go)
|
||||
|
||||
Here is a quick example. If the test needs to interact with a docker daemon on
|
||||
the same host, the following condition should be checked within the test code
|
||||
@@ -74,8 +67,6 @@ If a remote daemon is detected, the test will be skipped.
|
||||
|
||||
## Running tests
|
||||
|
||||
### Unit Tests
|
||||
|
||||
To run the unit test suite:
|
||||
|
||||
```
|
||||
@@ -91,33 +82,12 @@ The following environment variables may be used to run a subset of tests:
|
||||
* `TESTFLAGS` - flags passed to `go test`, to run tests which match a pattern
|
||||
use `TESTFLAGS="-test.run TestNameOrPrefix"`
|
||||
|
||||
### Integration Tests
|
||||
|
||||
To run the integration test suite:
|
||||
|
||||
```
|
||||
make test-integration
|
||||
```
|
||||
|
||||
This make target runs both the "integration" suite and the "integration-cli"
|
||||
suite.
|
||||
|
||||
You can specify which integration test dirs to build and run by specifying
|
||||
the list of dirs in the TEST_INTEGRATION_DIR environment variable.
|
||||
|
||||
You can also explicitly skip either suite by setting (any value) in
|
||||
TEST_SKIP_INTEGRATION and/or TEST_SKIP_INTEGRATION_CLI environment variables.
|
||||
|
||||
Flags specific to each suite can be set in the TESTFLAGS_INTEGRATION and
|
||||
TESTFLAGS_INTEGRATION_CLI environment variables.
|
||||
|
||||
If all you want is to specify a test filter to run, you can set the
|
||||
`TEST_FILTER` environment variable. This ends up getting passed directly to `go
|
||||
test -run` (or `go test -check-f`, depending on the test suite). It will also
|
||||
automatically set the other above mentioned environment variables accordingly.
|
||||
|
||||
### Go Version
|
||||
|
||||
You can change a version of golang used for building stuff that is being tested
|
||||
by setting `GO_VERSION` variable, for example:
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ package api // import "github.com/docker/docker/api"
|
||||
// Common constants for daemon and client.
|
||||
const (
|
||||
// DefaultVersion of Current REST API
|
||||
DefaultVersion = "1.41"
|
||||
DefaultVersion = "1.40"
|
||||
|
||||
// NoBaseImageSpecifier is the symbol used by the FROM
|
||||
// command to specify that no base image is to be used.
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
package api // import "github.com/docker/docker/api"
|
||||
|
||||
@@ -3,18 +3,17 @@ package build // import "github.com/docker/docker/api/server/backend/build"
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/backend"
|
||||
"github.com/docker/docker/api/types/events"
|
||||
"github.com/docker/docker/builder"
|
||||
buildkit "github.com/docker/docker/builder/builder-next"
|
||||
daemonevents "github.com/docker/docker/daemon/events"
|
||||
"github.com/docker/docker/builder/fscache"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
@@ -32,14 +31,14 @@ type Builder interface {
|
||||
// Backend provides build functionality to the API router
|
||||
type Backend struct {
|
||||
builder Builder
|
||||
fsCache *fscache.FSCache
|
||||
imageComponent ImageComponent
|
||||
buildkit *buildkit.Builder
|
||||
eventsService *daemonevents.Events
|
||||
}
|
||||
|
||||
// NewBackend creates a new build backend from components
|
||||
func NewBackend(components ImageComponent, builder Builder, buildkit *buildkit.Builder, es *daemonevents.Events) (*Backend, error) {
|
||||
return &Backend{imageComponent: components, builder: builder, buildkit: buildkit, eventsService: es}, nil
|
||||
func NewBackend(components ImageComponent, builder Builder, fsCache *fscache.FSCache, buildkit *buildkit.Builder) (*Backend, error) {
|
||||
return &Backend{imageComponent: components, builder: builder, fsCache: fsCache, buildkit: buildkit}, nil
|
||||
}
|
||||
|
||||
// RegisterGRPC registers buildkit controller to the grpc server.
|
||||
@@ -100,16 +99,34 @@ func (b *Backend) Build(ctx context.Context, config backend.BuildConfig) (string
|
||||
|
||||
// PruneCache removes all cached build sources
|
||||
func (b *Backend) PruneCache(ctx context.Context, opts types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error) {
|
||||
buildCacheSize, cacheIDs, err := b.buildkit.Prune(ctx, opts)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to prune build cache")
|
||||
}
|
||||
b.eventsService.Log("prune", events.BuilderEventType, events.Actor{
|
||||
Attributes: map[string]string{
|
||||
"reclaimed": strconv.FormatInt(buildCacheSize, 10),
|
||||
},
|
||||
eg, ctx := errgroup.WithContext(ctx)
|
||||
|
||||
var fsCacheSize uint64
|
||||
eg.Go(func() error {
|
||||
var err error
|
||||
fsCacheSize, err = b.fsCache.Prune(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to prune fscache")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return &types.BuildCachePruneReport{SpaceReclaimed: uint64(buildCacheSize), CachesDeleted: cacheIDs}, nil
|
||||
|
||||
var buildCacheSize int64
|
||||
var cacheIDs []string
|
||||
eg.Go(func() error {
|
||||
var err error
|
||||
buildCacheSize, cacheIDs, err = b.buildkit.Prune(ctx, opts)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to prune build cache")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
if err := eg.Wait(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &types.BuildCachePruneReport{SpaceReclaimed: fsCacheSize + uint64(buildCacheSize), CachesDeleted: cacheIDs}, nil
|
||||
}
|
||||
|
||||
// Cancel cancels the build by ID
|
||||
|
||||
@@ -1,34 +0,0 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/docker/docker/api/server/httpstatus"
|
||||
"github.com/docker/docker/api/server/httputils"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
"github.com/gorilla/mux"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// makeErrorHandler makes an HTTP handler that decodes a Docker error and
|
||||
// returns it in the response.
|
||||
func makeErrorHandler(err error) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
statusCode := httpstatus.FromError(err)
|
||||
vars := mux.Vars(r)
|
||||
if apiVersionSupportsJSONErrors(vars["version"]) {
|
||||
response := &types.ErrorResponse{
|
||||
Message: err.Error(),
|
||||
}
|
||||
_ = httputils.WriteJSON(w, statusCode, response)
|
||||
} else {
|
||||
http.Error(w, status.Convert(err).Message(), statusCode)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func apiVersionSupportsJSONErrors(version string) bool {
|
||||
const firstAPIVersionWithJSONErrors = "1.23"
|
||||
return version == "" || versions.GreaterThan(version, firstAPIVersionWithJSONErrors)
|
||||
}
|
||||
@@ -1,150 +0,0 @@
|
||||
package httpstatus // import "github.com/docker/docker/api/server/httpstatus"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
containerderrors "github.com/containerd/containerd/errdefs"
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/sirupsen/logrus"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
type causer interface {
|
||||
Cause() error
|
||||
}
|
||||
|
||||
// FromError retrieves status code from error message.
|
||||
func FromError(err error) int {
|
||||
if err == nil {
|
||||
logrus.WithFields(logrus.Fields{"error": err}).Error("unexpected HTTP error handling")
|
||||
return http.StatusInternalServerError
|
||||
}
|
||||
|
||||
var statusCode int
|
||||
|
||||
// Stop right there
|
||||
// Are you sure you should be adding a new error class here? Do one of the existing ones work?
|
||||
|
||||
// Note that the below functions are already checking the error causal chain for matches.
|
||||
switch {
|
||||
case errdefs.IsNotFound(err):
|
||||
statusCode = http.StatusNotFound
|
||||
case errdefs.IsInvalidParameter(err):
|
||||
statusCode = http.StatusBadRequest
|
||||
case errdefs.IsConflict(err):
|
||||
statusCode = http.StatusConflict
|
||||
case errdefs.IsUnauthorized(err):
|
||||
statusCode = http.StatusUnauthorized
|
||||
case errdefs.IsUnavailable(err):
|
||||
statusCode = http.StatusServiceUnavailable
|
||||
case errdefs.IsForbidden(err):
|
||||
statusCode = http.StatusForbidden
|
||||
case errdefs.IsNotModified(err):
|
||||
statusCode = http.StatusNotModified
|
||||
case errdefs.IsNotImplemented(err):
|
||||
statusCode = http.StatusNotImplemented
|
||||
case errdefs.IsSystem(err) || errdefs.IsUnknown(err) || errdefs.IsDataLoss(err) || errdefs.IsDeadline(err) || errdefs.IsCancelled(err):
|
||||
statusCode = http.StatusInternalServerError
|
||||
default:
|
||||
statusCode = statusCodeFromGRPCError(err)
|
||||
if statusCode != http.StatusInternalServerError {
|
||||
return statusCode
|
||||
}
|
||||
statusCode = statusCodeFromContainerdError(err)
|
||||
if statusCode != http.StatusInternalServerError {
|
||||
return statusCode
|
||||
}
|
||||
statusCode = statusCodeFromDistributionError(err)
|
||||
if statusCode != http.StatusInternalServerError {
|
||||
return statusCode
|
||||
}
|
||||
if e, ok := err.(causer); ok {
|
||||
return FromError(e.Cause())
|
||||
}
|
||||
|
||||
logrus.WithFields(logrus.Fields{
|
||||
"module": "api",
|
||||
"error_type": fmt.Sprintf("%T", err),
|
||||
}).Debugf("FIXME: Got an API for which error does not match any expected type!!!: %+v", err)
|
||||
}
|
||||
|
||||
if statusCode == 0 {
|
||||
statusCode = http.StatusInternalServerError
|
||||
}
|
||||
|
||||
return statusCode
|
||||
}
|
||||
|
||||
// statusCodeFromGRPCError returns status code according to gRPC error
|
||||
func statusCodeFromGRPCError(err error) int {
|
||||
switch status.Code(err) {
|
||||
case codes.InvalidArgument: // code 3
|
||||
return http.StatusBadRequest
|
||||
case codes.NotFound: // code 5
|
||||
return http.StatusNotFound
|
||||
case codes.AlreadyExists: // code 6
|
||||
return http.StatusConflict
|
||||
case codes.PermissionDenied: // code 7
|
||||
return http.StatusForbidden
|
||||
case codes.FailedPrecondition: // code 9
|
||||
return http.StatusBadRequest
|
||||
case codes.Unauthenticated: // code 16
|
||||
return http.StatusUnauthorized
|
||||
case codes.OutOfRange: // code 11
|
||||
return http.StatusBadRequest
|
||||
case codes.Unimplemented: // code 12
|
||||
return http.StatusNotImplemented
|
||||
case codes.Unavailable: // code 14
|
||||
return http.StatusServiceUnavailable
|
||||
default:
|
||||
// codes.Canceled(1)
|
||||
// codes.Unknown(2)
|
||||
// codes.DeadlineExceeded(4)
|
||||
// codes.ResourceExhausted(8)
|
||||
// codes.Aborted(10)
|
||||
// codes.Internal(13)
|
||||
// codes.DataLoss(15)
|
||||
return http.StatusInternalServerError
|
||||
}
|
||||
}
|
||||
|
||||
// statusCodeFromDistributionError returns status code according to registry errcode
|
||||
// code is loosely based on errcode.ServeJSON() in docker/distribution
|
||||
func statusCodeFromDistributionError(err error) int {
|
||||
switch errs := err.(type) {
|
||||
case errcode.Errors:
|
||||
if len(errs) < 1 {
|
||||
return http.StatusInternalServerError
|
||||
}
|
||||
if _, ok := errs[0].(errcode.ErrorCoder); ok {
|
||||
return statusCodeFromDistributionError(errs[0])
|
||||
}
|
||||
case errcode.ErrorCoder:
|
||||
return errs.ErrorCode().Descriptor().HTTPStatusCode
|
||||
}
|
||||
return http.StatusInternalServerError
|
||||
}
|
||||
|
||||
// statusCodeFromContainerdError returns status code for containerd errors when
|
||||
// consumed directly (not through gRPC)
|
||||
func statusCodeFromContainerdError(err error) int {
|
||||
switch {
|
||||
case containerderrors.IsInvalidArgument(err):
|
||||
return http.StatusBadRequest
|
||||
case containerderrors.IsNotFound(err):
|
||||
return http.StatusNotFound
|
||||
case containerderrors.IsAlreadyExists(err):
|
||||
return http.StatusConflict
|
||||
case containerderrors.IsFailedPrecondition(err):
|
||||
return http.StatusPreconditionFailed
|
||||
case containerderrors.IsUnavailable(err):
|
||||
return http.StatusServiceUnavailable
|
||||
case containerderrors.IsNotImplemented(err):
|
||||
return http.StatusNotImplemented
|
||||
default:
|
||||
return http.StatusInternalServerError
|
||||
}
|
||||
}
|
||||
9
api/server/httputils/errors_deprecated.go
Normal file
9
api/server/httputils/errors_deprecated.go
Normal file
@@ -0,0 +1,9 @@
|
||||
package httputils // import "github.com/docker/docker/api/server/httputils"
|
||||
import "github.com/docker/docker/errdefs"
|
||||
|
||||
// GetHTTPErrorStatusCode retrieves status code from error message.
|
||||
//
|
||||
// Deprecated: use errdefs.GetHTTPErrorStatusCode
|
||||
func GetHTTPErrorStatusCode(err error) int {
|
||||
return errdefs.GetHTTPErrorStatusCode(err)
|
||||
}
|
||||
@@ -23,7 +23,7 @@ func TestBoolValue(t *testing.T) {
|
||||
for c, e := range cases {
|
||||
v := url.Values{}
|
||||
v.Set("test", c)
|
||||
r, _ := http.NewRequest(http.MethodPost, "", nil)
|
||||
r, _ := http.NewRequest("POST", "", nil)
|
||||
r.Form = v
|
||||
|
||||
a := BoolValue(r, "test")
|
||||
@@ -34,14 +34,14 @@ func TestBoolValue(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestBoolValueOrDefault(t *testing.T) {
|
||||
r, _ := http.NewRequest(http.MethodGet, "", nil)
|
||||
r, _ := http.NewRequest("GET", "", nil)
|
||||
if !BoolValueOrDefault(r, "queryparam", true) {
|
||||
t.Fatal("Expected to get true default value, got false")
|
||||
}
|
||||
|
||||
v := url.Values{}
|
||||
v.Set("param", "")
|
||||
r, _ = http.NewRequest(http.MethodGet, "", nil)
|
||||
r, _ = http.NewRequest("GET", "", nil)
|
||||
r.Form = v
|
||||
if BoolValueOrDefault(r, "param", true) {
|
||||
t.Fatal("Expected not to get true")
|
||||
@@ -59,7 +59,7 @@ func TestInt64ValueOrZero(t *testing.T) {
|
||||
for c, e := range cases {
|
||||
v := url.Values{}
|
||||
v.Set("test", c)
|
||||
r, _ := http.NewRequest(http.MethodPost, "", nil)
|
||||
r, _ := http.NewRequest("POST", "", nil)
|
||||
r.Form = v
|
||||
|
||||
a := Int64ValueOrZero(r, "test")
|
||||
@@ -79,7 +79,7 @@ func TestInt64ValueOrDefault(t *testing.T) {
|
||||
for c, e := range cases {
|
||||
v := url.Values{}
|
||||
v.Set("test", c)
|
||||
r, _ := http.NewRequest(http.MethodPost, "", nil)
|
||||
r, _ := http.NewRequest("POST", "", nil)
|
||||
r.Form = v
|
||||
|
||||
a, err := Int64ValueOrDefault(r, "test", -1)
|
||||
@@ -95,7 +95,7 @@ func TestInt64ValueOrDefault(t *testing.T) {
|
||||
func TestInt64ValueOrDefaultWithError(t *testing.T) {
|
||||
v := url.Values{}
|
||||
v.Set("test", "invalid")
|
||||
r, _ := http.NewRequest(http.MethodPost, "", nil)
|
||||
r, _ := http.NewRequest("POST", "", nil)
|
||||
r.Form = v
|
||||
|
||||
_, err := Int64ValueOrDefault(r, "test", -1)
|
||||
|
||||
@@ -7,9 +7,13 @@ import (
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// APIVersionKey is the client's requested API version.
|
||||
@@ -27,7 +31,7 @@ func HijackConnection(w http.ResponseWriter) (io.ReadCloser, io.Writer, error) {
|
||||
return nil, nil, err
|
||||
}
|
||||
// Flush the options to make sure the client sets the raw mode
|
||||
_, _ = conn.Write([]byte{})
|
||||
conn.Write([]byte{})
|
||||
return conn, conn, nil
|
||||
}
|
||||
|
||||
@@ -37,9 +41,9 @@ func CloseStreams(streams ...interface{}) {
|
||||
if tcpc, ok := stream.(interface {
|
||||
CloseWrite() error
|
||||
}); ok {
|
||||
_ = tcpc.CloseWrite()
|
||||
tcpc.CloseWrite()
|
||||
} else if closer, ok := stream.(io.Closer); ok {
|
||||
_ = closer.Close()
|
||||
closer.Close()
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -88,6 +92,28 @@ func VersionFromContext(ctx context.Context) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// MakeErrorHandler makes an HTTP handler that decodes a Docker error and
|
||||
// returns it in the response.
|
||||
func MakeErrorHandler(err error) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
statusCode := errdefs.GetHTTPErrorStatusCode(err)
|
||||
vars := mux.Vars(r)
|
||||
if apiVersionSupportsJSONErrors(vars["version"]) {
|
||||
response := &types.ErrorResponse{
|
||||
Message: err.Error(),
|
||||
}
|
||||
WriteJSON(w, statusCode, response)
|
||||
} else {
|
||||
http.Error(w, status.Convert(err).Message(), statusCode)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func apiVersionSupportsJSONErrors(version string) bool {
|
||||
const firstAPIVersionWithJSONErrors = "1.23"
|
||||
return version == "" || versions.GreaterThan(version, firstAPIVersionWithJSONErrors)
|
||||
}
|
||||
|
||||
// matchesContentType validates the content type against the expected one
|
||||
func matchesContentType(contentType, expectedType string) bool {
|
||||
mimetype, _, err := mime.ParseMediaType(contentType)
|
||||
|
||||
@@ -51,10 +51,10 @@ func WriteLogStream(_ context.Context, w io.Writer, msgs <-chan *backend.LogMess
|
||||
logLine = append([]byte(msg.Timestamp.Format(jsonmessage.RFC3339NanoFixed)+" "), logLine...)
|
||||
}
|
||||
if msg.Source == "stdout" && config.ShowStdout {
|
||||
_, _ = outStream.Write(logLine)
|
||||
outStream.Write(logLine)
|
||||
}
|
||||
if msg.Source == "stderr" && config.ShowStderr {
|
||||
_, _ = errStream.Write(logLine)
|
||||
errStream.Write(logLine)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,7 +18,7 @@ func DebugRequestMiddleware(handler func(ctx context.Context, w http.ResponseWri
|
||||
return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
logrus.Debugf("Calling %s %s", r.Method, r.RequestURI)
|
||||
|
||||
if r.Method != http.MethodPost {
|
||||
if r.Method != "POST" {
|
||||
return handler(ctx, w, r, vars)
|
||||
}
|
||||
if err := httputils.CheckForJSON(r); err != nil {
|
||||
|
||||
@@ -3,8 +3,8 @@ package middleware // import "github.com/docker/docker/api/server/middleware"
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"gotest.tools/v3/assert"
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
"gotest.tools/assert"
|
||||
is "gotest.tools/assert/cmp"
|
||||
)
|
||||
|
||||
func TestMaskSecretKeys(t *testing.T) {
|
||||
|
||||
@@ -8,8 +8,8 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/server/httputils"
|
||||
"gotest.tools/v3/assert"
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
"gotest.tools/assert"
|
||||
is "gotest.tools/assert/cmp"
|
||||
)
|
||||
|
||||
func TestVersionMiddlewareVersion(t *testing.T) {
|
||||
@@ -25,7 +25,7 @@ func TestVersionMiddlewareVersion(t *testing.T) {
|
||||
m := NewVersionMiddleware(defaultVersion, defaultVersion, minVersion)
|
||||
h := m.WrapHandler(handler)
|
||||
|
||||
req, _ := http.NewRequest(http.MethodGet, "/containers/json", nil)
|
||||
req, _ := http.NewRequest("GET", "/containers/json", nil)
|
||||
resp := httptest.NewRecorder()
|
||||
ctx := context.Background()
|
||||
|
||||
@@ -76,7 +76,7 @@ func TestVersionMiddlewareWithErrorsReturnsHeaders(t *testing.T) {
|
||||
m := NewVersionMiddleware(defaultVersion, defaultVersion, minVersion)
|
||||
h := m.WrapHandler(handler)
|
||||
|
||||
req, _ := http.NewRequest(http.MethodGet, "/containers/json", nil)
|
||||
req, _ := http.NewRequest("GET", "/containers/json", nil)
|
||||
resp := httptest.NewRecorder()
|
||||
ctx := context.Background()
|
||||
|
||||
|
||||
@@ -38,36 +38,8 @@ func (e invalidIsolationError) Error() string {
|
||||
func (e invalidIsolationError) InvalidParameter() {}
|
||||
|
||||
func newImageBuildOptions(ctx context.Context, r *http.Request) (*types.ImageBuildOptions, error) {
|
||||
options := &types.ImageBuildOptions{
|
||||
Version: types.BuilderV1, // Builder V1 is the default, but can be overridden
|
||||
Dockerfile: r.FormValue("dockerfile"),
|
||||
SuppressOutput: httputils.BoolValue(r, "q"),
|
||||
NoCache: httputils.BoolValue(r, "nocache"),
|
||||
ForceRemove: httputils.BoolValue(r, "forcerm"),
|
||||
MemorySwap: httputils.Int64ValueOrZero(r, "memswap"),
|
||||
Memory: httputils.Int64ValueOrZero(r, "memory"),
|
||||
CPUShares: httputils.Int64ValueOrZero(r, "cpushares"),
|
||||
CPUPeriod: httputils.Int64ValueOrZero(r, "cpuperiod"),
|
||||
CPUQuota: httputils.Int64ValueOrZero(r, "cpuquota"),
|
||||
CPUSetCPUs: r.FormValue("cpusetcpus"),
|
||||
CPUSetMems: r.FormValue("cpusetmems"),
|
||||
CgroupParent: r.FormValue("cgroupparent"),
|
||||
NetworkMode: r.FormValue("networkmode"),
|
||||
Tags: r.Form["t"],
|
||||
ExtraHosts: r.Form["extrahosts"],
|
||||
SecurityOpt: r.Form["securityopt"],
|
||||
Squash: httputils.BoolValue(r, "squash"),
|
||||
Target: r.FormValue("target"),
|
||||
RemoteContext: r.FormValue("remote"),
|
||||
SessionID: r.FormValue("session"),
|
||||
BuildID: r.FormValue("buildid"),
|
||||
}
|
||||
|
||||
if runtime.GOOS != "windows" && options.SecurityOpt != nil {
|
||||
return nil, errdefs.InvalidParameter(errors.New("The daemon on this platform does not support setting security options on build"))
|
||||
}
|
||||
|
||||
version := httputils.VersionFromContext(ctx)
|
||||
options := &types.ImageBuildOptions{}
|
||||
if httputils.BoolValue(r, "forcerm") && versions.GreaterThanOrEqualTo(version, "1.12") {
|
||||
options.Remove = true
|
||||
} else if r.FormValue("rm") == "" && versions.GreaterThanOrEqualTo(version, "1.12") {
|
||||
@@ -78,37 +50,52 @@ func newImageBuildOptions(ctx context.Context, r *http.Request) (*types.ImageBui
|
||||
if httputils.BoolValue(r, "pull") && versions.GreaterThanOrEqualTo(version, "1.16") {
|
||||
options.PullParent = true
|
||||
}
|
||||
|
||||
options.Dockerfile = r.FormValue("dockerfile")
|
||||
options.SuppressOutput = httputils.BoolValue(r, "q")
|
||||
options.NoCache = httputils.BoolValue(r, "nocache")
|
||||
options.ForceRemove = httputils.BoolValue(r, "forcerm")
|
||||
options.MemorySwap = httputils.Int64ValueOrZero(r, "memswap")
|
||||
options.Memory = httputils.Int64ValueOrZero(r, "memory")
|
||||
options.CPUShares = httputils.Int64ValueOrZero(r, "cpushares")
|
||||
options.CPUPeriod = httputils.Int64ValueOrZero(r, "cpuperiod")
|
||||
options.CPUQuota = httputils.Int64ValueOrZero(r, "cpuquota")
|
||||
options.CPUSetCPUs = r.FormValue("cpusetcpus")
|
||||
options.CPUSetMems = r.FormValue("cpusetmems")
|
||||
options.CgroupParent = r.FormValue("cgroupparent")
|
||||
options.NetworkMode = r.FormValue("networkmode")
|
||||
options.Tags = r.Form["t"]
|
||||
options.ExtraHosts = r.Form["extrahosts"]
|
||||
options.SecurityOpt = r.Form["securityopt"]
|
||||
options.Squash = httputils.BoolValue(r, "squash")
|
||||
options.Target = r.FormValue("target")
|
||||
options.RemoteContext = r.FormValue("remote")
|
||||
if versions.GreaterThanOrEqualTo(version, "1.32") {
|
||||
options.Platform = r.FormValue("platform")
|
||||
}
|
||||
if versions.GreaterThanOrEqualTo(version, "1.40") {
|
||||
outputsJSON := r.FormValue("outputs")
|
||||
if outputsJSON != "" {
|
||||
var outputs []types.ImageBuildOutput
|
||||
if err := json.Unmarshal([]byte(outputsJSON), &outputs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
options.Outputs = outputs
|
||||
}
|
||||
}
|
||||
|
||||
if s := r.Form.Get("shmsize"); s != "" {
|
||||
shmSize, err := strconv.ParseInt(s, 10, 64)
|
||||
if r.Form.Get("shmsize") != "" {
|
||||
shmSize, err := strconv.ParseInt(r.Form.Get("shmsize"), 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
options.ShmSize = shmSize
|
||||
}
|
||||
|
||||
if i := r.FormValue("isolation"); i != "" {
|
||||
options.Isolation = container.Isolation(i)
|
||||
if !options.Isolation.IsValid() {
|
||||
return nil, invalidIsolationError(options.Isolation)
|
||||
if i := container.Isolation(r.FormValue("isolation")); i != "" {
|
||||
if !container.Isolation.IsValid(i) {
|
||||
return nil, invalidIsolationError(i)
|
||||
}
|
||||
options.Isolation = i
|
||||
}
|
||||
|
||||
if ulimitsJSON := r.FormValue("ulimits"); ulimitsJSON != "" {
|
||||
var buildUlimits = []*units.Ulimit{}
|
||||
if runtime.GOOS != "windows" && options.SecurityOpt != nil {
|
||||
return nil, errdefs.InvalidParameter(errors.New("The daemon on this platform does not support setting security options on build"))
|
||||
}
|
||||
|
||||
var buildUlimits = []*units.Ulimit{}
|
||||
ulimitsJSON := r.FormValue("ulimits")
|
||||
if ulimitsJSON != "" {
|
||||
if err := json.Unmarshal([]byte(ulimitsJSON), &buildUlimits); err != nil {
|
||||
return nil, errors.Wrap(errdefs.InvalidParameter(err), "error reading ulimit settings")
|
||||
}
|
||||
@@ -127,7 +114,8 @@ func newImageBuildOptions(ctx context.Context, r *http.Request) (*types.ImageBui
|
||||
// the fact they mentioned it, we need to pass that along to the builder
|
||||
// so that it can print a warning about "foo" being unused if there is
|
||||
// no "ARG foo" in the Dockerfile.
|
||||
if buildArgsJSON := r.FormValue("buildargs"); buildArgsJSON != "" {
|
||||
buildArgsJSON := r.FormValue("buildargs")
|
||||
if buildArgsJSON != "" {
|
||||
var buildArgs = map[string]*string{}
|
||||
if err := json.Unmarshal([]byte(buildArgsJSON), &buildArgs); err != nil {
|
||||
return nil, errors.Wrap(errdefs.InvalidParameter(err), "error reading build args")
|
||||
@@ -135,7 +123,8 @@ func newImageBuildOptions(ctx context.Context, r *http.Request) (*types.ImageBui
|
||||
options.BuildArgs = buildArgs
|
||||
}
|
||||
|
||||
if labelsJSON := r.FormValue("labels"); labelsJSON != "" {
|
||||
labelsJSON := r.FormValue("labels")
|
||||
if labelsJSON != "" {
|
||||
var labels = map[string]string{}
|
||||
if err := json.Unmarshal([]byte(labelsJSON), &labels); err != nil {
|
||||
return nil, errors.Wrap(errdefs.InvalidParameter(err), "error reading labels")
|
||||
@@ -143,41 +132,51 @@ func newImageBuildOptions(ctx context.Context, r *http.Request) (*types.ImageBui
|
||||
options.Labels = labels
|
||||
}
|
||||
|
||||
if cacheFromJSON := r.FormValue("cachefrom"); cacheFromJSON != "" {
|
||||
cacheFromJSON := r.FormValue("cachefrom")
|
||||
if cacheFromJSON != "" {
|
||||
var cacheFrom = []string{}
|
||||
if err := json.Unmarshal([]byte(cacheFromJSON), &cacheFrom); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
options.CacheFrom = cacheFrom
|
||||
}
|
||||
options.SessionID = r.FormValue("session")
|
||||
options.BuildID = r.FormValue("buildid")
|
||||
builderVersion, err := parseVersion(r.FormValue("version"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
options.Version = builderVersion
|
||||
|
||||
if bv := r.FormValue("version"); bv != "" {
|
||||
v, err := parseVersion(bv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if versions.GreaterThanOrEqualTo(version, "1.40") {
|
||||
outputsJSON := r.FormValue("outputs")
|
||||
if outputsJSON != "" {
|
||||
var outputs []types.ImageBuildOutput
|
||||
if err := json.Unmarshal([]byte(outputsJSON), &outputs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
options.Outputs = outputs
|
||||
}
|
||||
options.Version = v
|
||||
}
|
||||
|
||||
return options, nil
|
||||
}
|
||||
|
||||
func parseVersion(s string) (types.BuilderVersion, error) {
|
||||
switch types.BuilderVersion(s) {
|
||||
case types.BuilderV1:
|
||||
if s == "" || s == string(types.BuilderV1) {
|
||||
return types.BuilderV1, nil
|
||||
case types.BuilderBuildKit:
|
||||
return types.BuilderBuildKit, nil
|
||||
default:
|
||||
return "", errors.Errorf("invalid version %q", s)
|
||||
}
|
||||
if s == string(types.BuilderBuildKit) {
|
||||
return types.BuilderBuildKit, nil
|
||||
}
|
||||
return "", errors.Errorf("invalid version %s", s)
|
||||
}
|
||||
|
||||
func (br *buildRouter) postPrune(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
if err := httputils.ParseForm(r); err != nil {
|
||||
return err
|
||||
}
|
||||
fltrs, err := filters.FromJSON(r.Form.Get("filters"))
|
||||
filters, err := filters.FromJSON(r.Form.Get("filters"))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not parse filters")
|
||||
}
|
||||
@@ -192,7 +191,7 @@ func (br *buildRouter) postPrune(ctx context.Context, w http.ResponseWriter, r *
|
||||
|
||||
opts := types.BuildCachePruneOptions{
|
||||
All: httputils.BoolValue(r, "all"),
|
||||
Filters: fltrs,
|
||||
Filters: filters,
|
||||
KeepStorage: int64(ks),
|
||||
}
|
||||
|
||||
@@ -235,12 +234,12 @@ func (br *buildRouter) postBuild(ctx context.Context, w http.ResponseWriter, r *
|
||||
}
|
||||
|
||||
output := ioutils.NewWriteFlusher(ww)
|
||||
defer func() { _ = output.Close() }()
|
||||
defer output.Close()
|
||||
|
||||
errf := func(err error) error {
|
||||
|
||||
if httputils.BoolValue(r, "q") && notVerboseBuffer.Len() > 0 {
|
||||
_, _ = output.Write(notVerboseBuffer.Bytes())
|
||||
output.Write(notVerboseBuffer.Bytes())
|
||||
}
|
||||
|
||||
// Do not write the error in the http output if it's still empty.
|
||||
@@ -291,7 +290,7 @@ func (br *buildRouter) postBuild(ctx context.Context, w http.ResponseWriter, r *
|
||||
// Everything worked so if -q was provided the output from the daemon
|
||||
// should be just the image ID and we'll print that to stdout.
|
||||
if buildOptions.SuppressOutput {
|
||||
_, _ = fmt.Fprintln(streamformatter.NewStdoutWriter(output), imgID)
|
||||
fmt.Fprintln(streamformatter.NewStdoutWriter(output), imgID)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -307,7 +306,7 @@ func getAuthConfigs(header http.Header) map[string]types.AuthConfig {
|
||||
authConfigsJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authConfigsEncoded))
|
||||
// Pulling an image does not error when no auth is provided so to remain
|
||||
// consistent with the existing api decode errors are ignored
|
||||
_ = json.NewDecoder(authConfigsJSON).Decode(&authConfigs)
|
||||
json.NewDecoder(authConfigsJSON).Decode(&authConfigs)
|
||||
return authConfigs
|
||||
}
|
||||
|
||||
@@ -429,7 +428,7 @@ func (w *wcf) notify() {
|
||||
w.mu.Lock()
|
||||
if !w.ready {
|
||||
if w.buf.Len() > 0 {
|
||||
_, _ = io.Copy(w.Writer, w.buf)
|
||||
io.Copy(w.Writer, w.buf)
|
||||
}
|
||||
if w.flushed {
|
||||
w.flusher.Flush()
|
||||
|
||||
@@ -10,15 +10,13 @@ type containerRouter struct {
|
||||
backend Backend
|
||||
decoder httputils.ContainerDecoder
|
||||
routes []router.Route
|
||||
cgroup2 bool
|
||||
}
|
||||
|
||||
// NewRouter initializes a new container router
|
||||
func NewRouter(b Backend, decoder httputils.ContainerDecoder, cgroup2 bool) router.Router {
|
||||
func NewRouter(b Backend, decoder httputils.ContainerDecoder) router.Router {
|
||||
r := &containerRouter{
|
||||
backend: b,
|
||||
decoder: decoder,
|
||||
cgroup2: cgroup2,
|
||||
}
|
||||
r.initRoutes()
|
||||
return r
|
||||
|
||||
@@ -9,8 +9,6 @@ import (
|
||||
"strconv"
|
||||
"syscall"
|
||||
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/docker/docker/api/server/httpstatus"
|
||||
"github.com/docker/docker/api/server/httputils"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/backend"
|
||||
@@ -21,7 +19,6 @@ import (
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/docker/docker/pkg/signal"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/net/websocket"
|
||||
@@ -44,7 +41,7 @@ func (s *containerRouter) postCommit(ctx context.Context, w http.ResponseWriter,
|
||||
}
|
||||
|
||||
config, _, _, err := s.decoder.DecodeConfig(r.Body)
|
||||
if err != nil && err != io.EOF { // Do not fail if body is empty.
|
||||
if err != nil && err != io.EOF { //Do not fail if body is empty.
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -108,14 +105,9 @@ func (s *containerRouter) getContainersStats(ctx context.Context, w http.Respons
|
||||
if !stream {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
}
|
||||
var oneShot bool
|
||||
if versions.GreaterThanOrEqualTo(httputils.VersionFromContext(ctx), "1.41") {
|
||||
oneShot = httputils.BoolValueOrDefault(r, "one-shot", false)
|
||||
}
|
||||
|
||||
config := &backend.ContainerStatsConfig{
|
||||
Stream: stream,
|
||||
OneShot: oneShot,
|
||||
OutStream: w,
|
||||
Version: httputils.VersionFromContext(ctx),
|
||||
}
|
||||
@@ -490,28 +482,14 @@ func (s *containerRouter) postContainersCreate(ctx context.Context, w http.Respo
|
||||
// Ignore KernelMemoryTCP because it was added in API 1.40.
|
||||
hostConfig.KernelMemoryTCP = 0
|
||||
|
||||
// Ignore Capabilities because it was added in API 1.40.
|
||||
hostConfig.Capabilities = nil
|
||||
|
||||
// Older clients (API < 1.40) expects the default to be shareable, make them happy
|
||||
if hostConfig.IpcMode.IsEmpty() {
|
||||
hostConfig.IpcMode = container.IpcMode("shareable")
|
||||
}
|
||||
}
|
||||
if hostConfig != nil && versions.LessThan(version, "1.41") && !s.cgroup2 {
|
||||
// Older clients expect the default to be "host" on cgroup v1 hosts
|
||||
if hostConfig.CgroupnsMode.IsEmpty() {
|
||||
hostConfig.CgroupnsMode = container.CgroupnsMode("host")
|
||||
}
|
||||
}
|
||||
|
||||
var platform *specs.Platform
|
||||
if versions.GreaterThanOrEqualTo(version, "1.41") {
|
||||
if v := r.Form.Get("platform"); v != "" {
|
||||
p, err := platforms.Parse(v)
|
||||
if err != nil {
|
||||
return errdefs.InvalidParameter(err)
|
||||
}
|
||||
platform = &p
|
||||
}
|
||||
}
|
||||
|
||||
if hostConfig != nil && hostConfig.PidsLimit != nil && *hostConfig.PidsLimit <= 0 {
|
||||
// Don't set a limit if either no limit was specified, or "unlimited" was
|
||||
@@ -527,7 +505,6 @@ func (s *containerRouter) postContainersCreate(ctx context.Context, w http.Respo
|
||||
HostConfig: hostConfig,
|
||||
NetworkingConfig: networkingConfig,
|
||||
AdjustCPUShares: adjustCPUShares,
|
||||
Platform: platform,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -627,7 +604,7 @@ func (s *containerRouter) postContainersAttach(ctx context.Context, w http.Respo
|
||||
// Remember to close stream if error happens
|
||||
conn, _, errHijack := hijacker.Hijack()
|
||||
if errHijack == nil {
|
||||
statusCode := httpstatus.FromError(err)
|
||||
statusCode := errdefs.GetHTTPErrorStatusCode(err)
|
||||
statusText := http.StatusText(statusCode)
|
||||
fmt.Fprintf(conn, "HTTP/1.1 %d %s\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n%s\r\n", statusCode, statusText, err.Error())
|
||||
httputils.CloseStreams(conn)
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
"github.com/docker/docker/api/types"
|
||||
registrytypes "github.com/docker/docker/api/types/registry"
|
||||
"github.com/docker/docker/errdefs"
|
||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
|
||||
@@ -2,7 +2,6 @@ package grpc // import "github.com/docker/docker/api/server/router/grpc"
|
||||
|
||||
import (
|
||||
"github.com/docker/docker/api/server/router"
|
||||
"github.com/moby/buildkit/util/grpcerrors"
|
||||
"golang.org/x/net/http2"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
@@ -16,11 +15,8 @@ type grpcRouter struct {
|
||||
// NewRouter initializes a new grpc http router
|
||||
func NewRouter(backends ...Backend) router.Router {
|
||||
r := &grpcRouter{
|
||||
h2Server: &http2.Server{},
|
||||
grpcServer: grpc.NewServer(
|
||||
grpc.UnaryInterceptor(grpcerrors.UnaryServerInterceptor),
|
||||
grpc.StreamInterceptor(grpcerrors.StreamServerInterceptor),
|
||||
),
|
||||
h2Server: &http2.Server{},
|
||||
grpcServer: grpc.NewServer(),
|
||||
}
|
||||
for _, b := range backends {
|
||||
b.RegisterGRPC(r.grpcServer)
|
||||
@@ -30,12 +26,12 @@ func NewRouter(backends ...Backend) router.Router {
|
||||
}
|
||||
|
||||
// Routes returns the available routers to the session controller
|
||||
func (gr *grpcRouter) Routes() []router.Route {
|
||||
return gr.routes
|
||||
func (r *grpcRouter) Routes() []router.Route {
|
||||
return r.routes
|
||||
}
|
||||
|
||||
func (gr *grpcRouter) initRoutes() {
|
||||
gr.routes = []router.Route{
|
||||
router.NewPostRoute("/grpc", gr.serveGRPC),
|
||||
func (r *grpcRouter) initRoutes() {
|
||||
r.routes = []router.Route{
|
||||
router.NewPostRoute("/grpc", r.serveGRPC),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -57,7 +57,7 @@ func (s *imageRouter) postImagesCreate(ctx context.Context, w http.ResponseWrite
|
||||
}
|
||||
}
|
||||
|
||||
if image != "" { // pull
|
||||
if image != "" { //pull
|
||||
metaHeaders := map[string][]string{}
|
||||
for k, v := range r.Header {
|
||||
if strings.HasPrefix(k, "X-Meta-") {
|
||||
@@ -76,7 +76,7 @@ func (s *imageRouter) postImagesCreate(ctx context.Context, w http.ResponseWrite
|
||||
}
|
||||
}
|
||||
err = s.backend.PullImage(ctx, image, tag, platform, metaHeaders, authConfig, output)
|
||||
} else { // import
|
||||
} else { //import
|
||||
src := r.Form.Get("fromSrc")
|
||||
// 'err' MUST NOT be defined within this block, we need any error
|
||||
// generated from the download to be available to the output
|
||||
@@ -91,7 +91,7 @@ func (s *imageRouter) postImagesCreate(ctx context.Context, w http.ResponseWrite
|
||||
if !output.Flushed() {
|
||||
return err
|
||||
}
|
||||
_, _ = output.Write(streamformatter.FormatError(err))
|
||||
output.Write(streamformatter.FormatError(err))
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -136,7 +136,7 @@ func (s *imageRouter) postImagesPush(ctx context.Context, w http.ResponseWriter,
|
||||
if !output.Flushed() {
|
||||
return err
|
||||
}
|
||||
_, _ = output.Write(streamformatter.FormatError(err))
|
||||
output.Write(streamformatter.FormatError(err))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -161,7 +161,7 @@ func (s *imageRouter) getImagesGet(ctx context.Context, w http.ResponseWriter, r
|
||||
if !output.Flushed() {
|
||||
return err
|
||||
}
|
||||
_, _ = output.Write(streamformatter.FormatError(err))
|
||||
output.Write(streamformatter.FormatError(err))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -177,7 +177,7 @@ func (s *imageRouter) postImagesLoad(ctx context.Context, w http.ResponseWriter,
|
||||
output := ioutils.NewWriteFlusher(w)
|
||||
defer output.Close()
|
||||
if err := s.backend.LoadImage(r.Body, output, quiet); err != nil {
|
||||
_, _ = output.Write(streamformatter.FormatError(err))
|
||||
output.Write(streamformatter.FormatError(err))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -231,12 +231,10 @@ func (s *imageRouter) getImagesJSON(ctx context.Context, w http.ResponseWriter,
|
||||
return err
|
||||
}
|
||||
|
||||
version := httputils.VersionFromContext(ctx)
|
||||
if versions.LessThan(version, "1.41") {
|
||||
filterParam := r.Form.Get("filter")
|
||||
if filterParam != "" {
|
||||
imageFilters.Add("reference", filterParam)
|
||||
}
|
||||
filterParam := r.Form.Get("filter")
|
||||
// FIXME(vdemeester) This has been deprecated in 1.13, and is target for removal for v17.12
|
||||
if filterParam != "" {
|
||||
imageFilters.Add("reference", filterParam)
|
||||
}
|
||||
|
||||
images, err := s.backend.Images(imageFilters, httputils.BoolValue(r, "all"), false)
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
package router // import "github.com/docker/docker/api/server/router"
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/docker/docker/api/server/httputils"
|
||||
)
|
||||
|
||||
@@ -44,30 +42,30 @@ func NewRoute(method, path string, handler httputils.APIFunc, opts ...RouteWrapp
|
||||
|
||||
// NewGetRoute initializes a new route with the http method GET.
|
||||
func NewGetRoute(path string, handler httputils.APIFunc, opts ...RouteWrapper) Route {
|
||||
return NewRoute(http.MethodGet, path, handler, opts...)
|
||||
return NewRoute("GET", path, handler, opts...)
|
||||
}
|
||||
|
||||
// NewPostRoute initializes a new route with the http method POST.
|
||||
func NewPostRoute(path string, handler httputils.APIFunc, opts ...RouteWrapper) Route {
|
||||
return NewRoute(http.MethodPost, path, handler, opts...)
|
||||
return NewRoute("POST", path, handler, opts...)
|
||||
}
|
||||
|
||||
// NewPutRoute initializes a new route with the http method PUT.
|
||||
func NewPutRoute(path string, handler httputils.APIFunc, opts ...RouteWrapper) Route {
|
||||
return NewRoute(http.MethodPut, path, handler, opts...)
|
||||
return NewRoute("PUT", path, handler, opts...)
|
||||
}
|
||||
|
||||
// NewDeleteRoute initializes a new route with the http method DELETE.
|
||||
func NewDeleteRoute(path string, handler httputils.APIFunc, opts ...RouteWrapper) Route {
|
||||
return NewRoute(http.MethodDelete, path, handler, opts...)
|
||||
return NewRoute("DELETE", path, handler, opts...)
|
||||
}
|
||||
|
||||
// NewOptionsRoute initializes a new route with the http method OPTIONS.
|
||||
func NewOptionsRoute(path string, handler httputils.APIFunc, opts ...RouteWrapper) Route {
|
||||
return NewRoute(http.MethodOptions, path, handler, opts...)
|
||||
return NewRoute("OPTIONS", path, handler, opts...)
|
||||
}
|
||||
|
||||
// NewHeadRoute initializes a new route with the http method HEAD.
|
||||
func NewHeadRoute(path string, handler httputils.APIFunc, opts ...RouteWrapper) Route {
|
||||
return NewRoute(http.MethodHead, path, handler, opts...)
|
||||
return NewRoute("HEAD", path, handler, opts...)
|
||||
}
|
||||
|
||||
@@ -30,7 +30,7 @@ func (n *networkRouter) getNetworksList(ctx context.Context, w http.ResponseWrit
|
||||
}
|
||||
|
||||
if err := network.ValidateFilters(filter); err != nil {
|
||||
return errdefs.InvalidParameter(err)
|
||||
return err
|
||||
}
|
||||
|
||||
var list []types.NetworkResource
|
||||
|
||||
@@ -123,7 +123,7 @@ func (pr *pluginRouter) upgradePlugin(ctx context.Context, w http.ResponseWriter
|
||||
if !output.Flushed() {
|
||||
return err
|
||||
}
|
||||
_, _ = output.Write(streamformatter.FormatError(err))
|
||||
output.Write(streamformatter.FormatError(err))
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -162,7 +162,7 @@ func (pr *pluginRouter) pullPlugin(ctx context.Context, w http.ResponseWriter, r
|
||||
if !output.Flushed() {
|
||||
return err
|
||||
}
|
||||
_, _ = output.Write(streamformatter.FormatError(err))
|
||||
output.Write(streamformatter.FormatError(err))
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -211,7 +211,7 @@ func (pr *pluginRouter) createPlugin(ctx context.Context, w http.ResponseWriter,
|
||||
if err := pr.backend.CreateFromContext(ctx, r.Body, options); err != nil {
|
||||
return err
|
||||
}
|
||||
// TODO: send progress bar
|
||||
//TODO: send progress bar
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
return nil
|
||||
}
|
||||
@@ -270,7 +270,7 @@ func (pr *pluginRouter) pushPlugin(ctx context.Context, w http.ResponseWriter, r
|
||||
if !output.Flushed() {
|
||||
return err
|
||||
}
|
||||
_, _ = output.Write(streamformatter.FormatError(err))
|
||||
output.Write(streamformatter.FormatError(err))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -167,19 +167,7 @@ func (sr *swarmRouter) getServices(ctx context.Context, w http.ResponseWriter, r
|
||||
return errdefs.InvalidParameter(err)
|
||||
}
|
||||
|
||||
// the status query parameter is only support in API versions >= 1.41. If
|
||||
// the client is using a lesser version, ignore the parameter.
|
||||
cliVersion := httputils.VersionFromContext(ctx)
|
||||
var status bool
|
||||
if value := r.URL.Query().Get("status"); value != "" && !versions.LessThan(cliVersion, "1.41") {
|
||||
var err error
|
||||
status, err = strconv.ParseBool(value)
|
||||
if err != nil {
|
||||
return errors.Wrapf(errdefs.InvalidParameter(err), "invalid value for status: %s", value)
|
||||
}
|
||||
}
|
||||
|
||||
services, err := sr.backend.GetServices(basictypes.ServiceListOptions{Filters: filter, Status: status})
|
||||
services, err := sr.backend.GetServices(basictypes.ServiceListOptions{Filters: filter})
|
||||
if err != nil {
|
||||
logrus.Errorf("Error getting services: %v", err)
|
||||
return err
|
||||
@@ -190,21 +178,15 @@ func (sr *swarmRouter) getServices(ctx context.Context, w http.ResponseWriter, r
|
||||
|
||||
func (sr *swarmRouter) getService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
var insertDefaults bool
|
||||
|
||||
if value := r.URL.Query().Get("insertDefaults"); value != "" {
|
||||
var err error
|
||||
insertDefaults, err = strconv.ParseBool(value)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("invalid value for insertDefaults: %s", value)
|
||||
return errors.Wrapf(errdefs.InvalidParameter(err), "invalid value for insertDefaults: %s", value)
|
||||
}
|
||||
}
|
||||
|
||||
// you may note that there is no code here to handle the "status" query
|
||||
// parameter, as in getServices. the Status field is not supported when
|
||||
// retrieving an individual service because the Backend API changes
|
||||
// required to accommodate it would be too disruptive, and because that
|
||||
// field is so rarely needed as part of an individual service inspection.
|
||||
|
||||
service, err := sr.backend.GetService(vars["id"], insertDefaults)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error getting service %s: %v", vars["id"], err)
|
||||
@@ -225,13 +207,15 @@ func (sr *swarmRouter) createService(ctx context.Context, w http.ResponseWriter,
|
||||
|
||||
// Get returns "" if the header does not exist
|
||||
encodedAuth := r.Header.Get("X-Registry-Auth")
|
||||
cliVersion := r.Header.Get("version")
|
||||
queryRegistry := false
|
||||
if v := httputils.VersionFromContext(ctx); v != "" {
|
||||
if versions.LessThan(v, "1.30") {
|
||||
if cliVersion != "" {
|
||||
if versions.LessThan(cliVersion, "1.30") {
|
||||
queryRegistry = true
|
||||
}
|
||||
adjustForAPIVersion(v, &service)
|
||||
adjustForAPIVersion(cliVersion, &service)
|
||||
}
|
||||
|
||||
resp, err := sr.backend.CreateService(service, encodedAuth, queryRegistry)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error creating service %s: %v", service.Name, err)
|
||||
@@ -263,12 +247,13 @@ func (sr *swarmRouter) updateService(ctx context.Context, w http.ResponseWriter,
|
||||
flags.EncodedRegistryAuth = r.Header.Get("X-Registry-Auth")
|
||||
flags.RegistryAuthFrom = r.URL.Query().Get("registryAuthFrom")
|
||||
flags.Rollback = r.URL.Query().Get("rollback")
|
||||
cliVersion := r.Header.Get("version")
|
||||
queryRegistry := false
|
||||
if v := httputils.VersionFromContext(ctx); v != "" {
|
||||
if versions.LessThan(v, "1.30") {
|
||||
if cliVersion != "" {
|
||||
if versions.LessThan(cliVersion, "1.30") {
|
||||
queryRegistry = true
|
||||
}
|
||||
adjustForAPIVersion(v, &service)
|
||||
adjustForAPIVersion(cliVersion, &service)
|
||||
}
|
||||
|
||||
resp, err := sr.backend.UpdateService(vars["id"], version, service, flags, queryRegistry)
|
||||
|
||||
@@ -95,23 +95,4 @@ func adjustForAPIVersion(cliVersion string, service *swarm.ServiceSpec) {
|
||||
service.TaskTemplate.Placement.MaxReplicas = 0
|
||||
}
|
||||
}
|
||||
if versions.LessThan(cliVersion, "1.41") {
|
||||
if service.TaskTemplate.ContainerSpec != nil {
|
||||
// Capabilities and Ulimits for docker swarm services weren't
|
||||
// supported before API version 1.41
|
||||
service.TaskTemplate.ContainerSpec.CapabilityAdd = nil
|
||||
service.TaskTemplate.ContainerSpec.CapabilityDrop = nil
|
||||
service.TaskTemplate.ContainerSpec.Ulimits = nil
|
||||
}
|
||||
if service.TaskTemplate.Resources != nil && service.TaskTemplate.Resources.Limits != nil {
|
||||
// Limits.Pids not supported before API version 1.41
|
||||
service.TaskTemplate.Resources.Limits.Pids = 0
|
||||
}
|
||||
|
||||
// jobs were only introduced in API version 1.41. Nil out both Job
|
||||
// modes; if the service is one of these modes and subsequently has no
|
||||
// mode, then something down the pipe will thrown an error.
|
||||
service.Mode.ReplicatedJob = nil
|
||||
service.Mode.GlobalJob = nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"github.com/docker/go-units"
|
||||
)
|
||||
|
||||
func TestAdjustForAPIVersion(t *testing.T) {
|
||||
@@ -40,40 +39,21 @@ func TestAdjustForAPIVersion(t *testing.T) {
|
||||
ConfigName: "configRuntime",
|
||||
},
|
||||
},
|
||||
Ulimits: []*units.Ulimit{
|
||||
{
|
||||
Name: "nofile",
|
||||
Soft: 100,
|
||||
Hard: 200,
|
||||
},
|
||||
},
|
||||
},
|
||||
Placement: &swarm.Placement{
|
||||
MaxReplicas: 222,
|
||||
},
|
||||
Resources: &swarm.ResourceRequirements{
|
||||
Limits: &swarm.Limit{
|
||||
Pids: 300,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// first, does calling this with a later version correctly NOT strip
|
||||
// fields? do the later version first, so we can reuse this spec in the
|
||||
// next test.
|
||||
adjustForAPIVersion("1.41", spec)
|
||||
adjustForAPIVersion("1.40", spec)
|
||||
if !reflect.DeepEqual(spec.TaskTemplate.ContainerSpec.Sysctls, expectedSysctls) {
|
||||
t.Error("Sysctls was stripped from spec")
|
||||
}
|
||||
|
||||
if spec.TaskTemplate.Resources.Limits.Pids == 0 {
|
||||
t.Error("PidsLimit was stripped from spec")
|
||||
}
|
||||
if spec.TaskTemplate.Resources.Limits.Pids != 300 {
|
||||
t.Error("PidsLimit did not preserve the value from spec")
|
||||
}
|
||||
|
||||
if spec.TaskTemplate.ContainerSpec.Privileges.CredentialSpec.Config != "someconfig" {
|
||||
t.Error("CredentialSpec.Config field was stripped from spec")
|
||||
}
|
||||
@@ -86,20 +66,12 @@ func TestAdjustForAPIVersion(t *testing.T) {
|
||||
t.Error("MaxReplicas was stripped from spec")
|
||||
}
|
||||
|
||||
if len(spec.TaskTemplate.ContainerSpec.Ulimits) == 0 {
|
||||
t.Error("Ulimits were stripped from spec")
|
||||
}
|
||||
|
||||
// next, does calling this with an earlier version correctly strip fields?
|
||||
adjustForAPIVersion("1.29", spec)
|
||||
if spec.TaskTemplate.ContainerSpec.Sysctls != nil {
|
||||
t.Error("Sysctls was not stripped from spec")
|
||||
}
|
||||
|
||||
if spec.TaskTemplate.Resources.Limits.Pids != 0 {
|
||||
t.Error("PidsLimit was not stripped from spec")
|
||||
}
|
||||
|
||||
if spec.TaskTemplate.ContainerSpec.Privileges.CredentialSpec.Config != "" {
|
||||
t.Error("CredentialSpec.Config field was not stripped from spec")
|
||||
}
|
||||
@@ -112,8 +84,4 @@ func TestAdjustForAPIVersion(t *testing.T) {
|
||||
t.Error("MaxReplicas was not stripped from spec")
|
||||
}
|
||||
|
||||
if len(spec.TaskTemplate.ContainerSpec.Ulimits) != 0 {
|
||||
t.Error("Ulimits were not stripped from spec")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
// Backend is the methods that need to be implemented to provide
|
||||
// system specific functionality.
|
||||
type Backend interface {
|
||||
SystemInfo() *types.Info
|
||||
SystemInfo() (*types.Info, error)
|
||||
SystemVersion() types.Version
|
||||
SystemDiskUsage(ctx context.Context) (*types.DiskUsage, error)
|
||||
SubscribeToEvents(since, until time.Time, ef filters.Args) ([]events.Message, chan interface{})
|
||||
|
||||
@@ -2,7 +2,8 @@ package system // import "github.com/docker/docker/api/server/router/system"
|
||||
|
||||
import (
|
||||
"github.com/docker/docker/api/server/router"
|
||||
buildkit "github.com/docker/docker/builder/builder-next"
|
||||
"github.com/docker/docker/builder/builder-next"
|
||||
"github.com/docker/docker/builder/fscache"
|
||||
)
|
||||
|
||||
// systemRouter provides information about the Docker system overall.
|
||||
@@ -11,15 +12,17 @@ type systemRouter struct {
|
||||
backend Backend
|
||||
cluster ClusterBackend
|
||||
routes []router.Route
|
||||
fscache *fscache.FSCache // legacy
|
||||
builder *buildkit.Builder
|
||||
features *map[string]bool
|
||||
}
|
||||
|
||||
// NewRouter initializes a new system router
|
||||
func NewRouter(b Backend, c ClusterBackend, builder *buildkit.Builder, features *map[string]bool) router.Router {
|
||||
func NewRouter(b Backend, c ClusterBackend, fscache *fscache.FSCache, builder *buildkit.Builder, features *map[string]bool) router.Router {
|
||||
r := &systemRouter{
|
||||
backend: b,
|
||||
cluster: c,
|
||||
fscache: fscache,
|
||||
builder: builder,
|
||||
features: features,
|
||||
}
|
||||
|
||||
@@ -44,8 +44,10 @@ func (s *systemRouter) pingHandler(ctx context.Context, w http.ResponseWriter, r
|
||||
}
|
||||
|
||||
func (s *systemRouter) getInfo(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
info := s.backend.SystemInfo()
|
||||
|
||||
info, err := s.backend.SystemInfo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if s.cluster != nil {
|
||||
info.Swarm = s.cluster.Info()
|
||||
info.Warnings = append(info.Warnings, info.Swarm.Warnings...)
|
||||
@@ -99,6 +101,16 @@ func (s *systemRouter) getDiskUsage(ctx context.Context, w http.ResponseWriter,
|
||||
return err
|
||||
})
|
||||
|
||||
var builderSize int64 // legacy
|
||||
eg.Go(func() error {
|
||||
var err error
|
||||
builderSize, err = s.fscache.DiskUsage(ctx)
|
||||
if err != nil {
|
||||
return pkgerrors.Wrap(err, "error getting fscache build cache usage")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
var buildCache []*types.BuildCache
|
||||
eg.Go(func() error {
|
||||
var err error
|
||||
@@ -113,7 +125,6 @@ func (s *systemRouter) getDiskUsage(ctx context.Context, w http.ResponseWriter,
|
||||
return err
|
||||
}
|
||||
|
||||
var builderSize int64
|
||||
for _, b := range buildCache {
|
||||
builderSize += b.Size
|
||||
}
|
||||
@@ -163,9 +174,7 @@ func (s *systemRouter) getEvents(ctx context.Context, w http.ResponseWriter, r *
|
||||
|
||||
if !onlyPastEvents {
|
||||
dur := until.Sub(now)
|
||||
timer := time.NewTimer(dur)
|
||||
defer timer.Stop()
|
||||
timeout = timer.C
|
||||
timeout = time.After(dur)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
30
api/server/router_swapper.go
Normal file
30
api/server/router_swapper.go
Normal file
@@ -0,0 +1,30 @@
|
||||
package server // import "github.com/docker/docker/api/server"
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"sync"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
|
||||
// routerSwapper is an http.Handler that allows you to swap
|
||||
// mux routers.
|
||||
type routerSwapper struct {
|
||||
mu sync.Mutex
|
||||
router *mux.Router
|
||||
}
|
||||
|
||||
// Swap changes the old router with the new one.
|
||||
func (rs *routerSwapper) Swap(newRouter *mux.Router) {
|
||||
rs.mu.Lock()
|
||||
rs.router = newRouter
|
||||
rs.mu.Unlock()
|
||||
}
|
||||
|
||||
// ServeHTTP makes the routerSwapper to implement the http.Handler interface.
|
||||
func (rs *routerSwapper) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
rs.mu.Lock()
|
||||
router := rs.router
|
||||
rs.mu.Unlock()
|
||||
router.ServeHTTP(w, r)
|
||||
}
|
||||
@@ -6,14 +6,13 @@ import (
|
||||
"net"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/server/httpstatus"
|
||||
"github.com/docker/docker/api/server/httputils"
|
||||
"github.com/docker/docker/api/server/middleware"
|
||||
"github.com/docker/docker/api/server/router"
|
||||
"github.com/docker/docker/api/server/router/debug"
|
||||
"github.com/docker/docker/dockerversion"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
@@ -33,10 +32,11 @@ type Config struct {
|
||||
|
||||
// Server contains instance details for the server
|
||||
type Server struct {
|
||||
cfg *Config
|
||||
servers []*HTTPServer
|
||||
routers []router.Router
|
||||
middlewares []middleware.Middleware
|
||||
cfg *Config
|
||||
servers []*HTTPServer
|
||||
routers []router.Router
|
||||
routerSwapper *routerSwapper
|
||||
middlewares []middleware.Middleware
|
||||
}
|
||||
|
||||
// New returns a new instance of the server based on the specified configuration.
|
||||
@@ -58,8 +58,7 @@ func (s *Server) Accept(addr string, listeners ...net.Listener) {
|
||||
for _, listener := range listeners {
|
||||
httpServer := &HTTPServer{
|
||||
srv: &http.Server{
|
||||
Addr: addr,
|
||||
ReadHeaderTimeout: 5 * time.Minute, // "G112: Potential Slowloris Attack (gosec)"; not a real concern for our use, so setting a long timeout.
|
||||
Addr: addr,
|
||||
},
|
||||
l: listener,
|
||||
}
|
||||
@@ -81,7 +80,7 @@ func (s *Server) Close() {
|
||||
func (s *Server) serveAPI() error {
|
||||
var chErrors = make(chan error, len(s.servers))
|
||||
for _, srv := range s.servers {
|
||||
srv.srv.Handler = s.createMux()
|
||||
srv.srv.Handler = s.routerSwapper
|
||||
go func(srv *HTTPServer) {
|
||||
var err error
|
||||
logrus.Infof("API listen on %s", srv.l.Addr())
|
||||
@@ -141,11 +140,11 @@ func (s *Server) makeHTTPHandler(handler httputils.APIFunc) http.HandlerFunc {
|
||||
}
|
||||
|
||||
if err := handlerFunc(ctx, w, r, vars); err != nil {
|
||||
statusCode := httpstatus.FromError(err)
|
||||
statusCode := errdefs.GetHTTPErrorStatusCode(err)
|
||||
if statusCode >= 500 {
|
||||
logrus.Errorf("Handler for %s %s returned error: %v", r.Method, r.URL.Path, err)
|
||||
}
|
||||
makeErrorHandler(err)(w, r)
|
||||
httputils.MakeErrorHandler(err)(w, r)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -154,6 +153,11 @@ func (s *Server) makeHTTPHandler(handler httputils.APIFunc) http.HandlerFunc {
|
||||
// This method also enables the Go profiler.
|
||||
func (s *Server) InitRouter(routers ...router.Router) {
|
||||
s.routers = append(s.routers, routers...)
|
||||
|
||||
m := s.createMux()
|
||||
s.routerSwapper = &routerSwapper{
|
||||
router: m,
|
||||
}
|
||||
}
|
||||
|
||||
type pageNotFoundError struct{}
|
||||
@@ -186,7 +190,7 @@ func (s *Server) createMux() *mux.Router {
|
||||
m.Path("/debug" + r.Path()).Handler(f)
|
||||
}
|
||||
|
||||
notFoundHandler := makeErrorHandler(pageNotFoundError{})
|
||||
notFoundHandler := httputils.MakeErrorHandler(pageNotFoundError{})
|
||||
m.HandleFunc(versionMatcher+"/{path:.*}", notFoundHandler)
|
||||
m.NotFoundHandler = notFoundHandler
|
||||
m.MethodNotAllowedHandler = notFoundHandler
|
||||
|
||||
@@ -22,7 +22,7 @@ func TestMiddlewares(t *testing.T) {
|
||||
|
||||
srv.UseMiddleware(middleware.NewVersionMiddleware("0.1omega2", api.DefaultVersion, api.MinVersion))
|
||||
|
||||
req, _ := http.NewRequest(http.MethodGet, "/containers/json", nil)
|
||||
req, _ := http.NewRequest("GET", "/containers/json", nil)
|
||||
resp := httptest.NewRecorder()
|
||||
ctx := context.Background()
|
||||
|
||||
|
||||
2186
api/swagger.yaml
2186
api/swagger.yaml
File diff suppressed because it is too large
Load Diff
@@ -1,7 +1,8 @@
|
||||
package {{ .Package }} // import "github.com/docker/docker/api/types/{{ .Package }}"
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Code generated by `swagger generate operation`. DO NOT EDIT.
|
||||
// DO NOT EDIT THIS FILE
|
||||
// This file was generated by `swagger generate operation`
|
||||
//
|
||||
// See hack/generate-swagger-api.sh
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
@@ -30,7 +30,7 @@ type ContainerAttachConfig struct {
|
||||
// expectation is for the logger endpoints to assemble the chunks using this
|
||||
// metadata.
|
||||
type PartialLogMetaData struct {
|
||||
Last bool // true if this message is last of a partial
|
||||
Last bool //true if this message is last of a partial
|
||||
ID string // identifies group of messages comprising a single record
|
||||
Ordinal int // ordering of message in partial group
|
||||
}
|
||||
@@ -73,7 +73,6 @@ type LogSelector struct {
|
||||
// behavior of a backend.ContainerStats() call.
|
||||
type ContainerStatsConfig struct {
|
||||
Stream bool
|
||||
OneShot bool
|
||||
OutStream io.Writer
|
||||
Version string
|
||||
}
|
||||
|
||||
@@ -50,7 +50,7 @@ type ContainerCommitOptions struct {
|
||||
|
||||
// ContainerExecInspect holds information returned by exec inspect.
|
||||
type ContainerExecInspect struct {
|
||||
ExecID string `json:"ID"`
|
||||
ExecID string
|
||||
ContainerID string
|
||||
Running bool
|
||||
ExitCode int
|
||||
@@ -205,7 +205,7 @@ const (
|
||||
// BuilderV1 is the first generation builder in docker daemon
|
||||
BuilderV1 BuilderVersion = "1"
|
||||
// BuilderBuildKit is builder based on moby/buildkit project
|
||||
BuilderBuildKit BuilderVersion = "2"
|
||||
BuilderBuildKit = "2"
|
||||
)
|
||||
|
||||
// ImageBuildResponse holds information
|
||||
@@ -265,7 +265,7 @@ type ImagePullOptions struct {
|
||||
// if the privilege request fails.
|
||||
type RequestPrivilegeFunc func() (string, error)
|
||||
|
||||
// ImagePushOptions holds information to push images.
|
||||
//ImagePushOptions holds information to push images.
|
||||
type ImagePushOptions ImagePullOptions
|
||||
|
||||
// ImageRemoveOptions holds parameters to remove images.
|
||||
@@ -363,10 +363,6 @@ type ServiceUpdateOptions struct {
|
||||
// ServiceListOptions holds parameters to list services with.
|
||||
type ServiceListOptions struct {
|
||||
Filters filters.Args
|
||||
|
||||
// Status indicates whether the server should include the service task
|
||||
// count of running and desired tasks.
|
||||
Status bool
|
||||
}
|
||||
|
||||
// ServiceInspectOptions holds parameters related to the "service inspect"
|
||||
|
||||
@@ -3,7 +3,6 @@ package types // import "github.com/docker/docker/api/types"
|
||||
import (
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/network"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// configs holds structs used for internal communication between the
|
||||
@@ -16,7 +15,6 @@ type ContainerCreateConfig struct {
|
||||
Config *container.Config
|
||||
HostConfig *container.HostConfig
|
||||
NetworkingConfig *network.NetworkingConfig
|
||||
Platform *specs.Platform
|
||||
AdjustCPUShares bool
|
||||
}
|
||||
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
package container // import "github.com/docker/docker/api/types/container"
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Code generated by `swagger generate operation`. DO NOT EDIT.
|
||||
// DO NOT EDIT THIS FILE
|
||||
// This file was generated by `swagger generate operation`
|
||||
//
|
||||
// See hack/generate-swagger-api.sh
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
package container // import "github.com/docker/docker/api/types/container"
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Code generated by `swagger generate operation`. DO NOT EDIT.
|
||||
// DO NOT EDIT THIS FILE
|
||||
// This file was generated by `swagger generate operation`
|
||||
//
|
||||
// See hack/generate-swagger-api.sh
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
package container // import "github.com/docker/docker/api/types/container"
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Code generated by `swagger generate operation`. DO NOT EDIT.
|
||||
// DO NOT EDIT THIS FILE
|
||||
// This file was generated by `swagger generate operation`
|
||||
//
|
||||
// See hack/generate-swagger-api.sh
|
||||
// ----------------------------------------------------------------------------
|
||||
@@ -10,9 +11,7 @@ package container // import "github.com/docker/docker/api/types/container"
|
||||
// swagger:model ContainerTopOKBody
|
||||
type ContainerTopOKBody struct {
|
||||
|
||||
// Each process running in the container, where each is process
|
||||
// is an array of values corresponding to the titles.
|
||||
//
|
||||
// Each process running in the container, where each is process is an array of values corresponding to the titles
|
||||
// Required: true
|
||||
Processes [][]string `json:"Processes"`
|
||||
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
package container // import "github.com/docker/docker/api/types/container"
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Code generated by `swagger generate operation`. DO NOT EDIT.
|
||||
// DO NOT EDIT THIS FILE
|
||||
// This file was generated by `swagger generate operation`
|
||||
//
|
||||
// See hack/generate-swagger-api.sh
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
package container // import "github.com/docker/docker/api/types/container"
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Code generated by `swagger generate operation`. DO NOT EDIT.
|
||||
// DO NOT EDIT THIS FILE
|
||||
// This file was generated by `swagger generate operation`
|
||||
//
|
||||
// See hack/generate-swagger-api.sh
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
@@ -7,32 +7,9 @@ import (
|
||||
"github.com/docker/docker/api/types/mount"
|
||||
"github.com/docker/docker/api/types/strslice"
|
||||
"github.com/docker/go-connections/nat"
|
||||
units "github.com/docker/go-units"
|
||||
"github.com/docker/go-units"
|
||||
)
|
||||
|
||||
// CgroupnsMode represents the cgroup namespace mode of the container
|
||||
type CgroupnsMode string
|
||||
|
||||
// IsPrivate indicates whether the container uses its own private cgroup namespace
|
||||
func (c CgroupnsMode) IsPrivate() bool {
|
||||
return c == "private"
|
||||
}
|
||||
|
||||
// IsHost indicates whether the container shares the host's cgroup namespace
|
||||
func (c CgroupnsMode) IsHost() bool {
|
||||
return c == "host"
|
||||
}
|
||||
|
||||
// IsEmpty indicates whether the container cgroup namespace mode is unset
|
||||
func (c CgroupnsMode) IsEmpty() bool {
|
||||
return c == ""
|
||||
}
|
||||
|
||||
// Valid indicates whether the cgroup namespace mode is valid
|
||||
func (c CgroupnsMode) Valid() bool {
|
||||
return c.IsEmpty() || c.IsPrivate() || c.IsHost()
|
||||
}
|
||||
|
||||
// Isolation represents the isolation technology of a container. The supported
|
||||
// values are platform specific
|
||||
type Isolation string
|
||||
@@ -145,7 +122,7 @@ func (n NetworkMode) ConnectedContainer() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// UserDefined indicates user-created network
|
||||
//UserDefined indicates user-created network
|
||||
func (n NetworkMode) UserDefined() string {
|
||||
if n.IsUserDefined() {
|
||||
return string(n)
|
||||
@@ -361,7 +338,7 @@ type Resources struct {
|
||||
Devices []DeviceMapping // List of devices to map inside the container
|
||||
DeviceCgroupRules []string // List of rule to be added to the device cgroup
|
||||
DeviceRequests []DeviceRequest // List of device requests for device drivers
|
||||
KernelMemory int64 // Kernel memory limit (in bytes), Deprecated: kernel 5.4 deprecated kmem.limit_in_bytes
|
||||
KernelMemory int64 // Kernel memory limit (in bytes)
|
||||
KernelMemoryTCP int64 // Hard limit for kernel TCP buffer memory (in bytes)
|
||||
MemoryReservation int64 // Memory soft limit (in bytes)
|
||||
MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap
|
||||
@@ -403,10 +380,10 @@ type HostConfig struct {
|
||||
// Applicable to UNIX platforms
|
||||
CapAdd strslice.StrSlice // List of kernel capabilities to add to the container
|
||||
CapDrop strslice.StrSlice // List of kernel capabilities to remove from the container
|
||||
CgroupnsMode CgroupnsMode // Cgroup namespace mode to use for the container
|
||||
DNS []string `json:"Dns"` // List of DNS server to lookup
|
||||
DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for
|
||||
DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for
|
||||
Capabilities []string `json:"Capabilities"` // List of kernel capabilities to be available for container (this overrides the default set)
|
||||
DNS []string `json:"Dns"` // List of DNS server to lookup
|
||||
DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for
|
||||
DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for
|
||||
ExtraHosts []string // List of extra hosts
|
||||
GroupAdd []string // List of additional groups that the container process will run as
|
||||
IpcMode IpcMode // IPC namespace to use for the container
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
package container // import "github.com/docker/docker/api/types/container"
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
package types
|
||||
|
||||
// Error returns the error message
|
||||
func (e ErrorResponse) Error() string {
|
||||
return e.Message
|
||||
}
|
||||
@@ -1,8 +1,6 @@
|
||||
package events // import "github.com/docker/docker/api/types/events"
|
||||
|
||||
const (
|
||||
// BuilderEventType is the event type that the builder generates
|
||||
BuilderEventType = "builder"
|
||||
// ContainerEventType is the event type that containers generate
|
||||
ContainerEventType = "container"
|
||||
// DaemonEventType is the event type that daemon generate
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
/*
|
||||
Package filters provides tools for encoding a mapping of keys to a set of
|
||||
/*Package filters provides tools for encoding a mapping of keys to a set of
|
||||
multiple values.
|
||||
*/
|
||||
package filters // import "github.com/docker/docker/api/types/filters"
|
||||
@@ -37,19 +36,10 @@ func NewArgs(initialArgs ...KeyValuePair) Args {
|
||||
return args
|
||||
}
|
||||
|
||||
// Keys returns all the keys in list of Args
|
||||
func (args Args) Keys() []string {
|
||||
keys := make([]string, 0, len(args.fields))
|
||||
for k := range args.fields {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
return keys
|
||||
}
|
||||
|
||||
// MarshalJSON returns a JSON byte representation of the Args
|
||||
func (args Args) MarshalJSON() ([]byte, error) {
|
||||
if len(args.fields) == 0 {
|
||||
return []byte("{}"), nil
|
||||
return []byte{}, nil
|
||||
}
|
||||
return json.Marshal(args.fields)
|
||||
}
|
||||
@@ -67,7 +57,7 @@ func ToJSON(a Args) (string, error) {
|
||||
// then the encoded format will use an older legacy format where the values are a
|
||||
// list of strings, instead of a set.
|
||||
//
|
||||
// Deprecated: do not use in any new code; use ToJSON instead
|
||||
// Deprecated: Use ToJSON
|
||||
func ToParamWithVersion(version string, a Args) (string, error) {
|
||||
if a.Len() == 0 {
|
||||
return "", nil
|
||||
@@ -107,6 +97,9 @@ func FromJSON(p string) (Args, error) {
|
||||
|
||||
// UnmarshalJSON populates the Args from JSON encode bytes
|
||||
func (args Args) UnmarshalJSON(raw []byte) error {
|
||||
if len(raw) == 0 {
|
||||
return nil
|
||||
}
|
||||
return json.Unmarshal(raw, &args.fields)
|
||||
}
|
||||
|
||||
@@ -152,7 +145,7 @@ func (args Args) Len() int {
|
||||
func (args Args) MatchKVList(key string, sources map[string]string) bool {
|
||||
fieldValues := args.fields[key]
|
||||
|
||||
// do not filter if there is no filter set or cannot determine filter
|
||||
//do not filter if there is no filter set or cannot determine filter
|
||||
if len(fieldValues) == 0 {
|
||||
return true
|
||||
}
|
||||
@@ -198,7 +191,7 @@ func (args Args) Match(field, source string) bool {
|
||||
// ExactMatch returns true if the source matches exactly one of the values.
|
||||
func (args Args) ExactMatch(key, source string) bool {
|
||||
fieldValues, ok := args.fields[key]
|
||||
// do not filter if there is no filter set or cannot determine filter
|
||||
//do not filter if there is no filter set or cannot determine filter
|
||||
if !ok || len(fieldValues) == 0 {
|
||||
return true
|
||||
}
|
||||
@@ -211,7 +204,7 @@ func (args Args) ExactMatch(key, source string) bool {
|
||||
// matches exactly the value.
|
||||
func (args Args) UniqueExactMatch(key, source string) bool {
|
||||
fieldValues := args.fields[key]
|
||||
// do not filter if there is no filter set or cannot determine filter
|
||||
//do not filter if there is no filter set or cannot determine filter
|
||||
if len(fieldValues) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -1,34 +1,13 @@
|
||||
package filters // import "github.com/docker/docker/api/types/filters"
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"gotest.tools/v3/assert"
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
"gotest.tools/assert"
|
||||
is "gotest.tools/assert/cmp"
|
||||
)
|
||||
|
||||
func TestMarshalJSON(t *testing.T) {
|
||||
fields := map[string]map[string]bool{
|
||||
"created": {"today": true},
|
||||
"image.name": {"ubuntu*": true, "*untu": true},
|
||||
}
|
||||
a := Args{fields: fields}
|
||||
|
||||
_, err := a.MarshalJSON()
|
||||
if err != nil {
|
||||
t.Errorf("failed to marshal the filters: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalJSONWithEmpty(t *testing.T) {
|
||||
_, err := json.Marshal(NewArgs())
|
||||
if err != nil {
|
||||
t.Errorf("failed to marshal the filters: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestToJSON(t *testing.T) {
|
||||
fields := map[string]map[string]bool{
|
||||
"created": {"today": true},
|
||||
@@ -358,17 +337,14 @@ func TestWalkValues(t *testing.T) {
|
||||
f.Add("status", "running")
|
||||
f.Add("status", "paused")
|
||||
|
||||
err := f.WalkValues("status", func(value string) error {
|
||||
f.WalkValues("status", func(value string) error {
|
||||
if value != "running" && value != "paused" {
|
||||
t.Fatalf("Unexpected value %s", value)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Expected no error, got %v", err)
|
||||
}
|
||||
|
||||
err = f.WalkValues("status", func(value string) error {
|
||||
err := f.WalkValues("status", func(value string) error {
|
||||
return errors.New("return")
|
||||
})
|
||||
if err == nil {
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
package image // import "github.com/docker/docker/api/types/image"
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Code generated by `swagger generate operation`. DO NOT EDIT.
|
||||
// DO NOT EDIT THIS FILE
|
||||
// This file was generated by `swagger generate operation`
|
||||
//
|
||||
// See hack/generate-swagger-api.sh
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
@@ -113,7 +113,7 @@ type TmpfsOptions struct {
|
||||
// TODO(stevvooe): There are several more tmpfs flags, specified in the
|
||||
// daemon, that are accepted. Only the most basic are added for now.
|
||||
//
|
||||
// From https://github.com/moby/sys/blob/mount/v0.1.1/mount/flags.go#L47-L56
|
||||
// From docker/docker/pkg/mount/flags.go:
|
||||
//
|
||||
// var validFlags = map[string]bool{
|
||||
// "": true,
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package network // import "github.com/docker/docker/api/types/network"
|
||||
import (
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/errdefs"
|
||||
)
|
||||
|
||||
// Address represents an IP address
|
||||
@@ -12,7 +13,7 @@ type Address struct {
|
||||
// IPAM represents IP Address Management
|
||||
type IPAM struct {
|
||||
Driver string
|
||||
Options map[string]string // Per network IPAM driver options
|
||||
Options map[string]string //Per network IPAM driver options
|
||||
Config []IPAMConfig
|
||||
}
|
||||
|
||||
@@ -122,5 +123,5 @@ var acceptedFilters = map[string]bool{
|
||||
|
||||
// ValidateFilters validates the list of filter args with the available filters.
|
||||
func ValidateFilters(filter filters.Args) error {
|
||||
return filter.Validate(acceptedFilters)
|
||||
return errdefs.InvalidParameter(filter.Validate(acceptedFilters))
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"encoding/json"
|
||||
"net"
|
||||
|
||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// ServiceConfig stores daemon registry services configuration.
|
||||
@@ -45,32 +45,31 @@ func (ipnet *NetIPNet) UnmarshalJSON(b []byte) (err error) {
|
||||
// IndexInfo contains information about a registry
|
||||
//
|
||||
// RepositoryInfo Examples:
|
||||
// {
|
||||
// "Index" : {
|
||||
// "Name" : "docker.io",
|
||||
// "Mirrors" : ["https://registry-2.docker.io/v1/", "https://registry-3.docker.io/v1/"],
|
||||
// "Secure" : true,
|
||||
// "Official" : true,
|
||||
// },
|
||||
// "RemoteName" : "library/debian",
|
||||
// "LocalName" : "debian",
|
||||
// "CanonicalName" : "docker.io/debian"
|
||||
// "Official" : true,
|
||||
// }
|
||||
//
|
||||
// {
|
||||
// "Index" : {
|
||||
// "Name" : "docker.io",
|
||||
// "Mirrors" : ["https://registry-2.docker.io/v1/", "https://registry-3.docker.io/v1/"],
|
||||
// "Secure" : true,
|
||||
// "Official" : true,
|
||||
// },
|
||||
// "RemoteName" : "library/debian",
|
||||
// "LocalName" : "debian",
|
||||
// "CanonicalName" : "docker.io/debian"
|
||||
// "Official" : true,
|
||||
// }
|
||||
//
|
||||
// {
|
||||
// "Index" : {
|
||||
// "Name" : "127.0.0.1:5000",
|
||||
// "Mirrors" : [],
|
||||
// "Secure" : false,
|
||||
// "Official" : false,
|
||||
// },
|
||||
// "RemoteName" : "user/repo",
|
||||
// "LocalName" : "127.0.0.1:5000/user/repo",
|
||||
// "CanonicalName" : "127.0.0.1:5000/user/repo",
|
||||
// "Official" : false,
|
||||
// }
|
||||
// {
|
||||
// "Index" : {
|
||||
// "Name" : "127.0.0.1:5000",
|
||||
// "Mirrors" : [],
|
||||
// "Secure" : false,
|
||||
// "Official" : false,
|
||||
// },
|
||||
// "RemoteName" : "user/repo",
|
||||
// "LocalName" : "127.0.0.1:5000/user/repo",
|
||||
// "CanonicalName" : "127.0.0.1:5000/user/repo",
|
||||
// "Official" : false,
|
||||
// }
|
||||
type IndexInfo struct {
|
||||
// Name is the name of the registry, such as "docker.io"
|
||||
Name string
|
||||
|
||||
94
api/types/seccomp.go
Normal file
94
api/types/seccomp.go
Normal file
@@ -0,0 +1,94 @@
|
||||
package types // import "github.com/docker/docker/api/types"
|
||||
|
||||
// Seccomp represents the config for a seccomp profile for syscall restriction.
|
||||
type Seccomp struct {
|
||||
DefaultAction Action `json:"defaultAction"`
|
||||
// Architectures is kept to maintain backward compatibility with the old
|
||||
// seccomp profile.
|
||||
Architectures []Arch `json:"architectures,omitempty"`
|
||||
ArchMap []Architecture `json:"archMap,omitempty"`
|
||||
Syscalls []*Syscall `json:"syscalls"`
|
||||
}
|
||||
|
||||
// Architecture is used to represent a specific architecture
|
||||
// and its sub-architectures
|
||||
type Architecture struct {
|
||||
Arch Arch `json:"architecture"`
|
||||
SubArches []Arch `json:"subArchitectures"`
|
||||
}
|
||||
|
||||
// Arch used for architectures
|
||||
type Arch string
|
||||
|
||||
// Additional architectures permitted to be used for system calls
|
||||
// By default only the native architecture of the kernel is permitted
|
||||
const (
|
||||
ArchX86 Arch = "SCMP_ARCH_X86"
|
||||
ArchX86_64 Arch = "SCMP_ARCH_X86_64"
|
||||
ArchX32 Arch = "SCMP_ARCH_X32"
|
||||
ArchARM Arch = "SCMP_ARCH_ARM"
|
||||
ArchAARCH64 Arch = "SCMP_ARCH_AARCH64"
|
||||
ArchMIPS Arch = "SCMP_ARCH_MIPS"
|
||||
ArchMIPS64 Arch = "SCMP_ARCH_MIPS64"
|
||||
ArchMIPS64N32 Arch = "SCMP_ARCH_MIPS64N32"
|
||||
ArchMIPSEL Arch = "SCMP_ARCH_MIPSEL"
|
||||
ArchMIPSEL64 Arch = "SCMP_ARCH_MIPSEL64"
|
||||
ArchMIPSEL64N32 Arch = "SCMP_ARCH_MIPSEL64N32"
|
||||
ArchPPC Arch = "SCMP_ARCH_PPC"
|
||||
ArchPPC64 Arch = "SCMP_ARCH_PPC64"
|
||||
ArchPPC64LE Arch = "SCMP_ARCH_PPC64LE"
|
||||
ArchS390 Arch = "SCMP_ARCH_S390"
|
||||
ArchS390X Arch = "SCMP_ARCH_S390X"
|
||||
)
|
||||
|
||||
// Action taken upon Seccomp rule match
|
||||
type Action string
|
||||
|
||||
// Define actions for Seccomp rules
|
||||
const (
|
||||
ActKill Action = "SCMP_ACT_KILL"
|
||||
ActTrap Action = "SCMP_ACT_TRAP"
|
||||
ActErrno Action = "SCMP_ACT_ERRNO"
|
||||
ActTrace Action = "SCMP_ACT_TRACE"
|
||||
ActAllow Action = "SCMP_ACT_ALLOW"
|
||||
)
|
||||
|
||||
// Operator used to match syscall arguments in Seccomp
|
||||
type Operator string
|
||||
|
||||
// Define operators for syscall arguments in Seccomp
|
||||
const (
|
||||
OpNotEqual Operator = "SCMP_CMP_NE"
|
||||
OpLessThan Operator = "SCMP_CMP_LT"
|
||||
OpLessEqual Operator = "SCMP_CMP_LE"
|
||||
OpEqualTo Operator = "SCMP_CMP_EQ"
|
||||
OpGreaterEqual Operator = "SCMP_CMP_GE"
|
||||
OpGreaterThan Operator = "SCMP_CMP_GT"
|
||||
OpMaskedEqual Operator = "SCMP_CMP_MASKED_EQ"
|
||||
)
|
||||
|
||||
// Arg used for matching specific syscall arguments in Seccomp
|
||||
type Arg struct {
|
||||
Index uint `json:"index"`
|
||||
Value uint64 `json:"value"`
|
||||
ValueTwo uint64 `json:"valueTwo"`
|
||||
Op Operator `json:"op"`
|
||||
}
|
||||
|
||||
// Filter is used to conditionally apply Seccomp rules
|
||||
type Filter struct {
|
||||
Caps []string `json:"caps,omitempty"`
|
||||
Arches []string `json:"arches,omitempty"`
|
||||
MinKernel string `json:"minKernel,omitempty"`
|
||||
}
|
||||
|
||||
// Syscall is used to match a group of syscalls in Seccomp
|
||||
type Syscall struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
Names []string `json:"names,omitempty"`
|
||||
Action Action `json:"action"`
|
||||
Args []*Arg `json:"args"`
|
||||
Comment string `json:"comment"`
|
||||
Includes Filter `json:"includes"`
|
||||
Excludes Filter `json:"excludes"`
|
||||
}
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/mount"
|
||||
"github.com/docker/go-units"
|
||||
)
|
||||
|
||||
// DNSConfig specifies DNS related configurations in resolver configuration file (resolv.conf)
|
||||
@@ -68,13 +67,10 @@ type ContainerSpec struct {
|
||||
// The format of extra hosts on swarmkit is specified in:
|
||||
// http://man7.org/linux/man-pages/man5/hosts.5.html
|
||||
// IP_address canonical_hostname [aliases...]
|
||||
Hosts []string `json:",omitempty"`
|
||||
DNSConfig *DNSConfig `json:",omitempty"`
|
||||
Secrets []*SecretReference `json:",omitempty"`
|
||||
Configs []*ConfigReference `json:",omitempty"`
|
||||
Isolation container.Isolation `json:",omitempty"`
|
||||
Sysctls map[string]string `json:",omitempty"`
|
||||
CapabilityAdd []string `json:",omitempty"`
|
||||
CapabilityDrop []string `json:",omitempty"`
|
||||
Ulimits []*units.Ulimit `json:",omitempty"`
|
||||
Hosts []string `json:",omitempty"`
|
||||
DNSConfig *DNSConfig `json:",omitempty"`
|
||||
Secrets []*SecretReference `json:",omitempty"`
|
||||
Configs []*ConfigReference `json:",omitempty"`
|
||||
Isolation container.Isolation `json:",omitempty"`
|
||||
Sysctls map[string]string `json:",omitempty"`
|
||||
}
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// Code generated by protoc-gen-gogo.
|
||||
// source: plugin.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package runtime is a generated protocol buffer package.
|
||||
@@ -37,7 +38,6 @@ type PluginSpec struct {
|
||||
Remote string `protobuf:"bytes,2,opt,name=remote,proto3" json:"remote,omitempty"`
|
||||
Privileges []*PluginPrivilege `protobuf:"bytes,3,rep,name=privileges" json:"privileges,omitempty"`
|
||||
Disabled bool `protobuf:"varint,4,opt,name=disabled,proto3" json:"disabled,omitempty"`
|
||||
Env []string `protobuf:"bytes,5,rep,name=env" json:"env,omitempty"`
|
||||
}
|
||||
|
||||
func (m *PluginSpec) Reset() { *m = PluginSpec{} }
|
||||
@@ -73,13 +73,6 @@ func (m *PluginSpec) GetDisabled() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *PluginSpec) GetEnv() []string {
|
||||
if m != nil {
|
||||
return m.Env
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// PluginPrivilege describes a permission the user has to accept
|
||||
// upon installing a plugin.
|
||||
type PluginPrivilege struct {
|
||||
@@ -167,21 +160,6 @@ func (m *PluginSpec) MarshalTo(dAtA []byte) (int, error) {
|
||||
}
|
||||
i++
|
||||
}
|
||||
if len(m.Env) > 0 {
|
||||
for _, s := range m.Env {
|
||||
dAtA[i] = 0x2a
|
||||
i++
|
||||
l = len(s)
|
||||
for l >= 1<<7 {
|
||||
dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
|
||||
l >>= 7
|
||||
i++
|
||||
}
|
||||
dAtA[i] = uint8(l)
|
||||
i++
|
||||
i += copy(dAtA[i:], s)
|
||||
}
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
@@ -230,6 +208,24 @@ func (m *PluginPrivilege) MarshalTo(dAtA []byte) (int, error) {
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func encodeFixed64Plugin(dAtA []byte, offset int, v uint64) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
dAtA[offset+4] = uint8(v >> 32)
|
||||
dAtA[offset+5] = uint8(v >> 40)
|
||||
dAtA[offset+6] = uint8(v >> 48)
|
||||
dAtA[offset+7] = uint8(v >> 56)
|
||||
return offset + 8
|
||||
}
|
||||
func encodeFixed32Plugin(dAtA []byte, offset int, v uint32) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
return offset + 4
|
||||
}
|
||||
func encodeVarintPlugin(dAtA []byte, offset int, v uint64) int {
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
@@ -259,12 +255,6 @@ func (m *PluginSpec) Size() (n int) {
|
||||
if m.Disabled {
|
||||
n += 2
|
||||
}
|
||||
if len(m.Env) > 0 {
|
||||
for _, s := range m.Env {
|
||||
l = len(s)
|
||||
n += 1 + l + sovPlugin(uint64(l))
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
@@ -439,35 +429,6 @@ func (m *PluginSpec) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
}
|
||||
m.Disabled = bool(v != 0)
|
||||
case 5:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowPlugin
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthPlugin
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Env = append(m.Env, string(dAtA[iNdEx:postIndex]))
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipPlugin(dAtA[iNdEx:])
|
||||
@@ -734,21 +695,18 @@ var (
|
||||
func init() { proto.RegisterFile("plugin.proto", fileDescriptorPlugin) }
|
||||
|
||||
var fileDescriptorPlugin = []byte{
|
||||
// 256 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x4d, 0x4b, 0xc3, 0x30,
|
||||
0x18, 0xc7, 0x89, 0xdd, 0xc6, 0xfa, 0x4c, 0x70, 0x04, 0x91, 0xe2, 0xa1, 0x94, 0x9d, 0x7a, 0x6a,
|
||||
0x45, 0x2f, 0x82, 0x37, 0x0f, 0x9e, 0x47, 0xbc, 0x09, 0x1e, 0xd2, 0xf6, 0xa1, 0x06, 0x9b, 0x17,
|
||||
0x92, 0xb4, 0xe2, 0x37, 0xf1, 0x23, 0x79, 0xf4, 0x23, 0x48, 0x3f, 0x89, 0x98, 0x75, 0x32, 0x64,
|
||||
0xa7, 0xff, 0x4b, 0xc2, 0x9f, 0x1f, 0x0f, 0x9c, 0x9a, 0xae, 0x6f, 0x85, 0x2a, 0x8c, 0xd5, 0x5e,
|
||||
0x6f, 0x3e, 0x08, 0xc0, 0x36, 0x14, 0x8f, 0x06, 0x6b, 0x4a, 0x61, 0xa6, 0xb8, 0xc4, 0x84, 0x64,
|
||||
0x24, 0x8f, 0x59, 0xf0, 0xf4, 0x02, 0x16, 0x16, 0xa5, 0xf6, 0x98, 0x9c, 0x84, 0x76, 0x4a, 0xf4,
|
||||
0x0a, 0xc0, 0x58, 0x31, 0x88, 0x0e, 0x5b, 0x74, 0x49, 0x94, 0x45, 0xf9, 0xea, 0x7a, 0x5d, 0xec,
|
||||
0xc6, 0xb6, 0xfb, 0x07, 0x76, 0xf0, 0x87, 0x5e, 0xc2, 0xb2, 0x11, 0x8e, 0x57, 0x1d, 0x36, 0xc9,
|
||||
0x2c, 0x23, 0xf9, 0x92, 0xfd, 0x65, 0xba, 0x86, 0x08, 0xd5, 0x90, 0xcc, 0xb3, 0x28, 0x8f, 0xd9,
|
||||
0xaf, 0xdd, 0x3c, 0xc3, 0xd9, 0xbf, 0xb1, 0xa3, 0x78, 0x19, 0xac, 0x1a, 0x74, 0xb5, 0x15, 0xc6,
|
||||
0x0b, 0xad, 0x26, 0xc6, 0xc3, 0x8a, 0x9e, 0xc3, 0x7c, 0xe0, 0x5d, 0x8f, 0x81, 0x31, 0x66, 0xbb,
|
||||
0x70, 0xff, 0xf0, 0x39, 0xa6, 0xe4, 0x6b, 0x4c, 0xc9, 0xf7, 0x98, 0x92, 0xa7, 0xdb, 0x56, 0xf8,
|
||||
0x97, 0xbe, 0x2a, 0x6a, 0x2d, 0xcb, 0x46, 0xd7, 0xaf, 0x68, 0xf7, 0xc2, 0x8d, 0x28, 0xfd, 0xbb,
|
||||
0x41, 0x57, 0xba, 0x37, 0x6e, 0x65, 0x69, 0x7b, 0xe5, 0x85, 0xc4, 0xbb, 0x49, 0xab, 0x45, 0x38,
|
||||
0xe4, 0xcd, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x99, 0xa8, 0xd9, 0x9b, 0x58, 0x01, 0x00, 0x00,
|
||||
// 196 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0xc8, 0x29, 0x4d,
|
||||
0xcf, 0xcc, 0xd3, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x57, 0x6a, 0x63, 0xe4, 0xe2, 0x0a, 0x00, 0x0b,
|
||||
0x04, 0x17, 0xa4, 0x26, 0x0b, 0x09, 0x71, 0xb1, 0xe4, 0x25, 0xe6, 0xa6, 0x4a, 0x30, 0x2a, 0x30,
|
||||
0x6a, 0x70, 0x06, 0x81, 0xd9, 0x42, 0x62, 0x5c, 0x6c, 0x45, 0xa9, 0xb9, 0xf9, 0x25, 0xa9, 0x12,
|
||||
0x4c, 0x60, 0x51, 0x28, 0x4f, 0xc8, 0x80, 0x8b, 0xab, 0xa0, 0x28, 0xb3, 0x2c, 0x33, 0x27, 0x35,
|
||||
0x3d, 0xb5, 0x58, 0x82, 0x59, 0x81, 0x59, 0x83, 0xdb, 0x48, 0x40, 0x0f, 0x62, 0x58, 0x00, 0x4c,
|
||||
0x22, 0x08, 0x49, 0x8d, 0x90, 0x14, 0x17, 0x47, 0x4a, 0x66, 0x71, 0x62, 0x52, 0x4e, 0x6a, 0x8a,
|
||||
0x04, 0x8b, 0x02, 0xa3, 0x06, 0x47, 0x10, 0x9c, 0xaf, 0x14, 0xcb, 0xc5, 0x8f, 0xa6, 0x15, 0xab,
|
||||
0x63, 0x14, 0xb8, 0xb8, 0x53, 0x52, 0x8b, 0x93, 0x8b, 0x32, 0x0b, 0x4a, 0x32, 0xf3, 0xf3, 0xa0,
|
||||
0x2e, 0x42, 0x16, 0x12, 0x12, 0xe1, 0x62, 0x2d, 0x4b, 0xcc, 0x29, 0x4d, 0x05, 0xbb, 0x88, 0x33,
|
||||
0x08, 0xc2, 0x71, 0xe2, 0x39, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4,
|
||||
0x18, 0x93, 0xd8, 0xc0, 0x9e, 0x37, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xb8, 0x84, 0xad, 0x79,
|
||||
0x0c, 0x01, 0x00, 0x00,
|
||||
}
|
||||
|
||||
@@ -9,7 +9,6 @@ message PluginSpec {
|
||||
string remote = 2;
|
||||
repeated PluginPrivilege privileges = 3;
|
||||
bool disabled = 4;
|
||||
repeated string env = 5;
|
||||
}
|
||||
|
||||
// PluginPrivilege describes a permission the user has to accept
|
||||
|
||||
@@ -10,17 +10,6 @@ type Service struct {
|
||||
PreviousSpec *ServiceSpec `json:",omitempty"`
|
||||
Endpoint Endpoint `json:",omitempty"`
|
||||
UpdateStatus *UpdateStatus `json:",omitempty"`
|
||||
|
||||
// ServiceStatus is an optional, extra field indicating the number of
|
||||
// desired and running tasks. It is provided primarily as a shortcut to
|
||||
// calculating these values client-side, which otherwise would require
|
||||
// listing all tasks for a service, an operation that could be
|
||||
// computation and network expensive.
|
||||
ServiceStatus *ServiceStatus `json:",omitempty"`
|
||||
|
||||
// JobStatus is the status of a Service which is in one of ReplicatedJob or
|
||||
// GlobalJob modes. It is absent on Replicated and Global services.
|
||||
JobStatus *JobStatus `json:",omitempty"`
|
||||
}
|
||||
|
||||
// ServiceSpec represents the spec of a service.
|
||||
@@ -43,10 +32,8 @@ type ServiceSpec struct {
|
||||
|
||||
// ServiceMode represents the mode of a service.
|
||||
type ServiceMode struct {
|
||||
Replicated *ReplicatedService `json:",omitempty"`
|
||||
Global *GlobalService `json:",omitempty"`
|
||||
ReplicatedJob *ReplicatedJob `json:",omitempty"`
|
||||
GlobalJob *GlobalJob `json:",omitempty"`
|
||||
Replicated *ReplicatedService `json:",omitempty"`
|
||||
Global *GlobalService `json:",omitempty"`
|
||||
}
|
||||
|
||||
// UpdateState is the state of a service update.
|
||||
@@ -83,32 +70,6 @@ type ReplicatedService struct {
|
||||
// GlobalService is a kind of ServiceMode.
|
||||
type GlobalService struct{}
|
||||
|
||||
// ReplicatedJob is the a type of Service which executes a defined Tasks
|
||||
// in parallel until the specified number of Tasks have succeeded.
|
||||
type ReplicatedJob struct {
|
||||
// MaxConcurrent indicates the maximum number of Tasks that should be
|
||||
// executing simultaneously for this job at any given time. There may be
|
||||
// fewer Tasks that MaxConcurrent executing simultaneously; for example, if
|
||||
// there are fewer than MaxConcurrent tasks needed to reach
|
||||
// TotalCompletions.
|
||||
//
|
||||
// If this field is empty, it will default to a max concurrency of 1.
|
||||
MaxConcurrent *uint64 `json:",omitempty"`
|
||||
|
||||
// TotalCompletions is the total number of Tasks desired to run to
|
||||
// completion.
|
||||
//
|
||||
// If this field is empty, the value of MaxConcurrent will be used.
|
||||
TotalCompletions *uint64 `json:",omitempty"`
|
||||
}
|
||||
|
||||
// GlobalJob is the type of a Service which executes a Task on every Node
|
||||
// matching the Service's placement constraints. These tasks run to completion
|
||||
// and then exit.
|
||||
//
|
||||
// This type is deliberately empty.
|
||||
type GlobalJob struct{}
|
||||
|
||||
const (
|
||||
// UpdateFailureActionPause PAUSE
|
||||
UpdateFailureActionPause = "pause"
|
||||
@@ -161,42 +122,3 @@ type UpdateConfig struct {
|
||||
// started, or the new task is started before the old task is shut down.
|
||||
Order string
|
||||
}
|
||||
|
||||
// ServiceStatus represents the number of running tasks in a service and the
|
||||
// number of tasks desired to be running.
|
||||
type ServiceStatus struct {
|
||||
// RunningTasks is the number of tasks for the service actually in the
|
||||
// Running state
|
||||
RunningTasks uint64
|
||||
|
||||
// DesiredTasks is the number of tasks desired to be running by the
|
||||
// service. For replicated services, this is the replica count. For global
|
||||
// services, this is computed by taking the number of tasks with desired
|
||||
// state of not-Shutdown.
|
||||
DesiredTasks uint64
|
||||
|
||||
// CompletedTasks is the number of tasks in the state Completed, if this
|
||||
// service is in ReplicatedJob or GlobalJob mode. This field must be
|
||||
// cross-referenced with the service type, because the default value of 0
|
||||
// may mean that a service is not in a job mode, or it may mean that the
|
||||
// job has yet to complete any tasks.
|
||||
CompletedTasks uint64
|
||||
}
|
||||
|
||||
// JobStatus is the status of a job-type service.
|
||||
type JobStatus struct {
|
||||
// JobIteration is a value increased each time a Job is executed,
|
||||
// successfully or otherwise. "Executed", in this case, means the job as a
|
||||
// whole has been started, not that an individual Task has been launched. A
|
||||
// job is "Executed" when its ServiceSpec is updated. JobIteration can be
|
||||
// used to disambiguate Tasks belonging to different executions of a job.
|
||||
//
|
||||
// Though JobIteration will increase with each subsequent execution, it may
|
||||
// not necessarily increase by 1, and so JobIteration should not be used to
|
||||
// keep track of the number of times a job has been executed.
|
||||
JobIteration Version
|
||||
|
||||
// LastExecution is the time that the job was last executed, as observed by
|
||||
// Swarm manager.
|
||||
LastExecution time.Time `json:",omitempty"`
|
||||
}
|
||||
|
||||
@@ -56,12 +56,6 @@ type Task struct {
|
||||
DesiredState TaskState `json:",omitempty"`
|
||||
NetworksAttachments []NetworkAttachment `json:",omitempty"`
|
||||
GenericResources []GenericResource `json:",omitempty"`
|
||||
|
||||
// JobIteration is the JobIteration of the Service that this Task was
|
||||
// spawned from, if the Service is a ReplicatedJob or GlobalJob. This is
|
||||
// used to determine which Tasks belong to which run of the job. This field
|
||||
// is absent if the Service mode is Replicated or Global.
|
||||
JobIteration *Version `json:",omitempty"`
|
||||
}
|
||||
|
||||
// TaskSpec represents the spec of a task.
|
||||
@@ -91,21 +85,13 @@ type TaskSpec struct {
|
||||
Runtime RuntimeType `json:",omitempty"`
|
||||
}
|
||||
|
||||
// Resources represents resources (CPU/Memory) which can be advertised by a
|
||||
// node and requested to be reserved for a task.
|
||||
// Resources represents resources (CPU/Memory).
|
||||
type Resources struct {
|
||||
NanoCPUs int64 `json:",omitempty"`
|
||||
MemoryBytes int64 `json:",omitempty"`
|
||||
GenericResources []GenericResource `json:",omitempty"`
|
||||
}
|
||||
|
||||
// Limit describes limits on resources which can be requested by a task.
|
||||
type Limit struct {
|
||||
NanoCPUs int64 `json:",omitempty"`
|
||||
MemoryBytes int64 `json:",omitempty"`
|
||||
Pids int64 `json:",omitempty"`
|
||||
}
|
||||
|
||||
// GenericResource represents a "user defined" resource which can
|
||||
// be either an integer (e.g: SSD=3) or a string (e.g: SSD=sda1)
|
||||
type GenericResource struct {
|
||||
@@ -133,7 +119,7 @@ type DiscreteGenericResource struct {
|
||||
|
||||
// ResourceRequirements represents resources requirements.
|
||||
type ResourceRequirements struct {
|
||||
Limits *Limit `json:",omitempty"`
|
||||
Limits *Resources `json:",omitempty"`
|
||||
Reservations *Resources `json:",omitempty"`
|
||||
}
|
||||
|
||||
|
||||
@@ -100,10 +100,8 @@ func GetTimestamp(value string, reference time.Time) (string, error) {
|
||||
// if the incoming nanosecond portion is longer or shorter than 9 digits it is
|
||||
// converted to nanoseconds. The expectation is that the seconds and
|
||||
// seconds will be used to create a time variable. For example:
|
||||
//
|
||||
// seconds, nanoseconds, err := ParseTimestamp("1136073600.000000001",0)
|
||||
// if err == nil since := time.Unix(seconds, nanoseconds)
|
||||
//
|
||||
// seconds, nanoseconds, err := ParseTimestamp("1136073600.000000001",0)
|
||||
// if err == nil since := time.Unix(seconds, nanoseconds)
|
||||
// returns seconds as def(aultSeconds) if value == ""
|
||||
func ParseTimestamps(value string, def int64) (int64, int64, error) {
|
||||
if value == "" {
|
||||
|
||||
@@ -39,7 +39,6 @@ type ImageInspect struct {
|
||||
Author string
|
||||
Config *container.Config
|
||||
Architecture string
|
||||
Variant string `json:",omitempty"`
|
||||
Os string
|
||||
OsVersion string `json:",omitempty"`
|
||||
Size int64
|
||||
@@ -154,11 +153,11 @@ type Info struct {
|
||||
Images int
|
||||
Driver string
|
||||
DriverStatus [][2]string
|
||||
SystemStatus [][2]string `json:",omitempty"` // SystemStatus is only propagated by the Swarm standalone API
|
||||
SystemStatus [][2]string
|
||||
Plugins PluginsInfo
|
||||
MemoryLimit bool
|
||||
SwapLimit bool
|
||||
KernelMemory bool // Deprecated: kernel 5.4 deprecated kmem.limit_in_bytes
|
||||
KernelMemory bool
|
||||
KernelMemoryTCP bool
|
||||
CPUCfsPeriod bool `json:"CpuCfsPeriod"`
|
||||
CPUCfsQuota bool `json:"CpuCfsQuota"`
|
||||
@@ -175,11 +174,9 @@ type Info struct {
|
||||
SystemTime string
|
||||
LoggingDriver string
|
||||
CgroupDriver string
|
||||
CgroupVersion string `json:",omitempty"`
|
||||
NEventsListener int
|
||||
KernelVersion string
|
||||
OperatingSystem string
|
||||
OSVersion string
|
||||
OSType string
|
||||
Architecture string
|
||||
IndexServerAddress string
|
||||
@@ -195,24 +192,23 @@ type Info struct {
|
||||
Labels []string
|
||||
ExperimentalBuild bool
|
||||
ServerVersion string
|
||||
ClusterStore string `json:",omitempty"` // Deprecated: host-discovery and overlay networks with external k/v stores are deprecated
|
||||
ClusterAdvertise string `json:",omitempty"` // Deprecated: host-discovery and overlay networks with external k/v stores are deprecated
|
||||
ClusterStore string
|
||||
ClusterAdvertise string
|
||||
Runtimes map[string]Runtime
|
||||
DefaultRuntime string
|
||||
Swarm swarm.Info
|
||||
// LiveRestoreEnabled determines whether containers should be kept
|
||||
// running when the daemon is shutdown or upon daemon start if
|
||||
// running containers are detected
|
||||
LiveRestoreEnabled bool
|
||||
Isolation container.Isolation
|
||||
InitBinary string
|
||||
ContainerdCommit Commit
|
||||
RuncCommit Commit
|
||||
InitCommit Commit
|
||||
SecurityOptions []string
|
||||
ProductLicense string `json:",omitempty"`
|
||||
DefaultAddressPools []NetworkAddressPool `json:",omitempty"`
|
||||
Warnings []string
|
||||
LiveRestoreEnabled bool
|
||||
Isolation container.Isolation
|
||||
InitBinary string
|
||||
ContainerdCommit Commit
|
||||
RuncCommit Commit
|
||||
InitCommit Commit
|
||||
SecurityOptions []string
|
||||
ProductLicense string `json:",omitempty"`
|
||||
Warnings []string
|
||||
}
|
||||
|
||||
// KeyValue holds a key/value pair
|
||||
@@ -220,12 +216,6 @@ type KeyValue struct {
|
||||
Key, Value string
|
||||
}
|
||||
|
||||
// NetworkAddressPool is a temp struct used by Info struct
|
||||
type NetworkAddressPool struct {
|
||||
Base string
|
||||
Size int
|
||||
}
|
||||
|
||||
// SecurityOpt contains the name and options of a security option
|
||||
type SecurityOpt struct {
|
||||
Name string
|
||||
@@ -326,7 +316,7 @@ type ContainerState struct {
|
||||
}
|
||||
|
||||
// ContainerNode stores information about the node that a container
|
||||
// is running on. It's only used by the Docker Swarm standalone API
|
||||
// is running on. It's only available in Docker Swarm
|
||||
type ContainerNode struct {
|
||||
ID string
|
||||
IPAddress string `json:"IP"`
|
||||
@@ -350,7 +340,7 @@ type ContainerJSONBase struct {
|
||||
HostnamePath string
|
||||
HostsPath string
|
||||
LogPath string
|
||||
Node *ContainerNode `json:",omitempty"` // Node is only propagated by Docker Swarm standalone API
|
||||
Node *ContainerNode `json:",omitempty"`
|
||||
Name string
|
||||
RestartCount int
|
||||
Driver string
|
||||
@@ -518,16 +508,6 @@ type Checkpoint struct {
|
||||
type Runtime struct {
|
||||
Path string `json:"path"`
|
||||
Args []string `json:"runtimeArgs,omitempty"`
|
||||
|
||||
// This is exposed here only for internal use
|
||||
// It is not currently supported to specify custom shim configs
|
||||
Shim *ShimConfig `json:"-"`
|
||||
}
|
||||
|
||||
// ShimConfig is used by runtime to configure containerd shims
|
||||
type ShimConfig struct {
|
||||
Binary string
|
||||
Opts interface{}
|
||||
}
|
||||
|
||||
// DiskUsage contains response of Engine API:
|
||||
|
||||
@@ -27,13 +27,10 @@ type Volume struct {
|
||||
Name string `json:"Name"`
|
||||
|
||||
// The driver specific options used when creating the volume.
|
||||
//
|
||||
// Required: true
|
||||
Options map[string]string `json:"Options"`
|
||||
|
||||
// The level at which the volume exists. Either `global` for cluster-wide,
|
||||
// or `local` for machine level.
|
||||
//
|
||||
// The level at which the volume exists. Either `global` for cluster-wide, or `local` for machine level.
|
||||
// Required: true
|
||||
Scope string `json:"Scope"`
|
||||
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
package volume // import "github.com/docker/docker/api/types/volume"
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Code generated by `swagger generate operation`. DO NOT EDIT.
|
||||
// DO NOT EDIT THIS FILE
|
||||
// This file was generated by `swagger generate operation`
|
||||
//
|
||||
// See hack/generate-swagger-api.sh
|
||||
// ----------------------------------------------------------------------------
|
||||
@@ -14,9 +15,7 @@ type VolumeCreateBody struct {
|
||||
// Required: true
|
||||
Driver string `json:"Driver"`
|
||||
|
||||
// A mapping of driver options and values. These options are
|
||||
// passed directly to the driver and are driver specific.
|
||||
//
|
||||
// A mapping of driver options and values. These options are passed directly to the driver and are driver specific.
|
||||
// Required: true
|
||||
DriverOpts map[string]string `json:"DriverOpts"`
|
||||
|
||||
@@ -25,7 +24,6 @@ type VolumeCreateBody struct {
|
||||
Labels map[string]string `json:"Labels"`
|
||||
|
||||
// The new volume's name. If not specified, Docker generates a name.
|
||||
//
|
||||
// Required: true
|
||||
Name string `json:"Name"`
|
||||
}
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
package volume // import "github.com/docker/docker/api/types/volume"
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Code generated by `swagger generate operation`. DO NOT EDIT.
|
||||
// DO NOT EDIT THIS FILE
|
||||
// This file was generated by `swagger generate operation`
|
||||
//
|
||||
// See hack/generate-swagger-api.sh
|
||||
// ----------------------------------------------------------------------------
|
||||
@@ -16,8 +17,7 @@ type VolumeListOKBody struct {
|
||||
// Required: true
|
||||
Volumes []*types.Volume `json:"Volumes"`
|
||||
|
||||
// Warnings that occurred when fetching the list of volumes.
|
||||
//
|
||||
// Warnings that occurred when fetching the list of volumes
|
||||
// Required: true
|
||||
Warnings []string `json:"Warnings"`
|
||||
}
|
||||
|
||||
@@ -5,23 +5,21 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"io/ioutil"
|
||||
"runtime"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
containerderrors "github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/gc"
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/leases"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
ctdreference "github.com/containerd/containerd/reference"
|
||||
"github.com/containerd/containerd/remotes"
|
||||
"github.com/containerd/containerd/remotes/docker"
|
||||
"github.com/containerd/containerd/remotes/docker/schema1"
|
||||
distreference "github.com/docker/distribution/reference"
|
||||
dimages "github.com/docker/docker/daemon/images"
|
||||
"github.com/docker/docker/distribution"
|
||||
"github.com/docker/docker/distribution/metadata"
|
||||
"github.com/docker/docker/distribution/xfer"
|
||||
@@ -30,20 +28,19 @@ import (
|
||||
pkgprogress "github.com/docker/docker/pkg/progress"
|
||||
"github.com/docker/docker/reference"
|
||||
"github.com/moby/buildkit/cache"
|
||||
"github.com/moby/buildkit/client/llb"
|
||||
gw "github.com/moby/buildkit/frontend/gateway/client"
|
||||
"github.com/moby/buildkit/session"
|
||||
"github.com/moby/buildkit/solver"
|
||||
"github.com/moby/buildkit/session/auth"
|
||||
"github.com/moby/buildkit/source"
|
||||
"github.com/moby/buildkit/util/flightcontrol"
|
||||
"github.com/moby/buildkit/util/imageutil"
|
||||
"github.com/moby/buildkit/util/leaseutil"
|
||||
"github.com/moby/buildkit/util/progress"
|
||||
"github.com/moby/buildkit/util/resolver"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/moby/buildkit/util/tracing"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/opencontainers/image-spec/identity"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
|
||||
@@ -55,29 +52,68 @@ type SourceOpt struct {
|
||||
DownloadManager distribution.RootFSDownloadManager
|
||||
MetadataStore metadata.V2MetadataService
|
||||
ImageStore image.Store
|
||||
RegistryHosts docker.RegistryHosts
|
||||
LayerStore layer.Store
|
||||
LeaseManager leases.Manager
|
||||
GarbageCollect func(ctx context.Context) (gc.Stats, error)
|
||||
ResolverOpt resolver.ResolveOptionsFunc
|
||||
}
|
||||
|
||||
// Source is the source implementation for accessing container images
|
||||
type Source struct {
|
||||
type imageSource struct {
|
||||
SourceOpt
|
||||
g flightcontrol.Group
|
||||
g flightcontrol.Group
|
||||
resolverCache *resolverCache
|
||||
}
|
||||
|
||||
// NewSource creates a new image source
|
||||
func NewSource(opt SourceOpt) (*Source, error) {
|
||||
return &Source{SourceOpt: opt}, nil
|
||||
func NewSource(opt SourceOpt) (source.Source, error) {
|
||||
is := &imageSource{
|
||||
SourceOpt: opt,
|
||||
resolverCache: newResolverCache(),
|
||||
}
|
||||
|
||||
return is, nil
|
||||
}
|
||||
|
||||
// ID returns image scheme identifier
|
||||
func (is *Source) ID() string {
|
||||
func (is *imageSource) ID() string {
|
||||
return source.DockerImageScheme
|
||||
}
|
||||
|
||||
func (is *Source) resolveLocal(refStr string) (*image.Image, error) {
|
||||
func (is *imageSource) getResolver(ctx context.Context, rfn resolver.ResolveOptionsFunc, ref string, sm *session.Manager) remotes.Resolver {
|
||||
if res := is.resolverCache.Get(ctx, ref); res != nil {
|
||||
return res
|
||||
}
|
||||
|
||||
opt := docker.ResolverOptions{
|
||||
Client: tracing.DefaultClient,
|
||||
}
|
||||
if rfn != nil {
|
||||
opt = rfn(ref)
|
||||
}
|
||||
opt.Credentials = is.getCredentialsFromSession(ctx, sm)
|
||||
r := docker.NewResolver(opt)
|
||||
r = is.resolverCache.Add(ctx, ref, r)
|
||||
return r
|
||||
}
|
||||
|
||||
func (is *imageSource) getCredentialsFromSession(ctx context.Context, sm *session.Manager) func(string) (string, string, error) {
|
||||
id := session.FromContext(ctx)
|
||||
if id == "" {
|
||||
// can be removed after containerd/containerd#2812
|
||||
return func(string) (string, string, error) {
|
||||
return "", "", nil
|
||||
}
|
||||
}
|
||||
return func(host string) (string, string, error) {
|
||||
timeoutCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
caller, err := sm.Get(timeoutCtx, id)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
return auth.CredentialsFunc(tracing.ContextWithSpanFromContext(context.TODO(), ctx), caller)(host)
|
||||
}
|
||||
}
|
||||
|
||||
func (is *imageSource) resolveLocal(refStr string) ([]byte, error) {
|
||||
ref, err := distreference.ParseNormalizedNamed(refStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -90,23 +126,16 @@ func (is *Source) resolveLocal(refStr string) (*image.Image, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return img, nil
|
||||
return img.RawJSON(), nil
|
||||
}
|
||||
|
||||
func (is *Source) resolveRemote(ctx context.Context, ref string, platform *ocispec.Platform, sm *session.Manager, g session.Group) (digest.Digest, []byte, error) {
|
||||
func (is *imageSource) resolveRemote(ctx context.Context, ref string, platform *ocispec.Platform, sm *session.Manager) (digest.Digest, []byte, error) {
|
||||
type t struct {
|
||||
dgst digest.Digest
|
||||
dt []byte
|
||||
}
|
||||
p := platforms.DefaultSpec()
|
||||
if platform != nil {
|
||||
p = *platform
|
||||
}
|
||||
// key is used to synchronize resolutions that can happen in parallel when doing multi-stage.
|
||||
key := "getconfig::" + ref + "::" + platforms.Format(p)
|
||||
res, err := is.g.Do(ctx, key, func(ctx context.Context) (interface{}, error) {
|
||||
res := resolver.DefaultPool.GetResolver(is.RegistryHosts, ref, "pull", sm, g)
|
||||
dgst, dt, err := imageutil.Config(ctx, ref, res, is.ContentStore, is.LeaseManager, platform)
|
||||
res, err := is.g.Do(ctx, ref, func(ctx context.Context) (interface{}, error) {
|
||||
dgst, dt, err := imageutil.Config(ctx, ref, is.getResolver(ctx, is.ResolverOpt, ref, sm), is.ContentStore, nil, platform)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -120,15 +149,14 @@ func (is *Source) resolveRemote(ctx context.Context, ref string, platform *ocisp
|
||||
return typed.dgst, typed.dt, nil
|
||||
}
|
||||
|
||||
// ResolveImageConfig returns image config for an image
|
||||
func (is *Source) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt, sm *session.Manager, g session.Group) (digest.Digest, []byte, error) {
|
||||
func (is *imageSource) ResolveImageConfig(ctx context.Context, ref string, opt gw.ResolveImageConfigOpt, sm *session.Manager) (digest.Digest, []byte, error) {
|
||||
resolveMode, err := source.ParseImageResolveMode(opt.ResolveMode)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
switch resolveMode {
|
||||
case source.ResolveModeForcePull:
|
||||
dgst, dt, err := is.resolveRemote(ctx, ref, opt.Platform, sm, g)
|
||||
dgst, dt, err := is.resolveRemote(ctx, ref, opt.Platform, sm)
|
||||
// TODO: pull should fallback to local in case of failure to allow offline behavior
|
||||
// the fallback doesn't work currently
|
||||
return dgst, dt, err
|
||||
@@ -145,26 +173,18 @@ func (is *Source) ResolveImageConfig(ctx context.Context, ref string, opt llb.Re
|
||||
// default == prefer local, but in the future could be smarter
|
||||
fallthrough
|
||||
case source.ResolveModePreferLocal:
|
||||
img, err := is.resolveLocal(ref)
|
||||
dt, err := is.resolveLocal(ref)
|
||||
if err == nil {
|
||||
if opt.Platform != nil && !platformMatches(img, opt.Platform) {
|
||||
logrus.WithField("ref", ref).Debugf("Requested build platform %s does not match local image platform %s, checking remote",
|
||||
path.Join(opt.Platform.OS, opt.Platform.Architecture, opt.Platform.Variant),
|
||||
path.Join(img.OS, img.Architecture, img.Variant),
|
||||
)
|
||||
} else {
|
||||
return "", img.RawJSON(), err
|
||||
}
|
||||
return "", dt, err
|
||||
}
|
||||
// fallback to remote
|
||||
return is.resolveRemote(ctx, ref, opt.Platform, sm, g)
|
||||
return is.resolveRemote(ctx, ref, opt.Platform, sm)
|
||||
}
|
||||
// should never happen
|
||||
return "", nil, fmt.Errorf("builder cannot resolve image %s: invalid mode %q", ref, opt.ResolveMode)
|
||||
}
|
||||
|
||||
// Resolve returns access to pulling for an identifier
|
||||
func (is *Source) Resolve(ctx context.Context, id source.Identifier, sm *session.Manager, vtx solver.Vertex) (source.SourceInstance, error) {
|
||||
func (is *imageSource) Resolve(ctx context.Context, id source.Identifier, sm *session.Manager) (source.SourceInstance, error) {
|
||||
imageIdentifier, ok := id.(*source.ImageIdentifier)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("invalid image identifier %v", id)
|
||||
@@ -176,9 +196,9 @@ func (is *Source) Resolve(ctx context.Context, id source.Identifier, sm *session
|
||||
}
|
||||
|
||||
p := &puller{
|
||||
src: imageIdentifier,
|
||||
is: is,
|
||||
//resolver: is.getResolver(is.RegistryHosts, imageIdentifier.Reference.String(), sm, g),
|
||||
src: imageIdentifier,
|
||||
is: is,
|
||||
resolver: is.getResolver(ctx, is.ResolverOpt, imageIdentifier.Reference.String(), sm),
|
||||
platform: platform,
|
||||
sm: sm,
|
||||
}
|
||||
@@ -186,22 +206,20 @@ func (is *Source) Resolve(ctx context.Context, id source.Identifier, sm *session
|
||||
}
|
||||
|
||||
type puller struct {
|
||||
is *Source
|
||||
is *imageSource
|
||||
resolveOnce sync.Once
|
||||
resolveLocalOnce sync.Once
|
||||
g flightcontrol.Group
|
||||
src *source.ImageIdentifier
|
||||
desc ocispec.Descriptor
|
||||
ref string
|
||||
resolveErr error
|
||||
resolver remotes.Resolver
|
||||
config []byte
|
||||
platform ocispec.Platform
|
||||
sm *session.Manager
|
||||
}
|
||||
|
||||
func (p *puller) resolver(g session.Group) remotes.Resolver {
|
||||
return resolver.DefaultPool.GetResolver(p.is.RegistryHosts, p.src.Reference.String(), "pull", p.sm, g)
|
||||
}
|
||||
|
||||
func (p *puller) mainManifestKey(platform ocispec.Platform) (digest.Digest, error) {
|
||||
func (p *puller) mainManifestKey(dgst digest.Digest, platform ocispec.Platform) (digest.Digest, error) {
|
||||
dt, err := json.Marshal(struct {
|
||||
Digest digest.Digest
|
||||
OS string
|
||||
@@ -242,38 +260,31 @@ func (p *puller) resolveLocal() {
|
||||
}
|
||||
|
||||
if p.src.ResolveMode == source.ResolveModeDefault || p.src.ResolveMode == source.ResolveModePreferLocal {
|
||||
ref := p.src.Reference.String()
|
||||
img, err := p.is.resolveLocal(ref)
|
||||
dt, err := p.is.resolveLocal(p.src.Reference.String())
|
||||
if err == nil {
|
||||
if !platformMatches(img, &p.platform) {
|
||||
logrus.WithField("ref", ref).Debugf("Requested build platform %s does not match local image platform %s, not resolving",
|
||||
path.Join(p.platform.OS, p.platform.Architecture, p.platform.Variant),
|
||||
path.Join(img.OS, img.Architecture, img.Variant),
|
||||
)
|
||||
} else {
|
||||
p.config = img.RawJSON()
|
||||
}
|
||||
p.config = dt
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (p *puller) resolve(ctx context.Context, g session.Group) error {
|
||||
_, err := p.g.Do(ctx, "", func(ctx context.Context) (_ interface{}, err error) {
|
||||
func (p *puller) resolve(ctx context.Context) error {
|
||||
p.resolveOnce.Do(func() {
|
||||
resolveProgressDone := oneOffProgress(ctx, "resolve "+p.src.Reference.String())
|
||||
defer func() {
|
||||
resolveProgressDone(err)
|
||||
}()
|
||||
|
||||
ref, err := distreference.ParseNormalizedNamed(p.src.Reference.String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
p.resolveErr = err
|
||||
resolveProgressDone(err)
|
||||
return
|
||||
}
|
||||
|
||||
if p.desc.Digest == "" && p.config == nil {
|
||||
origRef, desc, err := p.resolver(g).Resolve(ctx, ref.String())
|
||||
origRef, desc, err := p.resolver.Resolve(ctx, ref.String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
p.resolveErr = err
|
||||
resolveProgressDone(err)
|
||||
return
|
||||
}
|
||||
|
||||
p.desc = desc
|
||||
@@ -288,90 +299,71 @@ func (p *puller) resolve(ctx context.Context, g session.Group) error {
|
||||
if p.config == nil && p.desc.MediaType != images.MediaTypeDockerSchema1Manifest {
|
||||
ref, err := distreference.WithDigest(ref, p.desc.Digest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
p.resolveErr = err
|
||||
resolveProgressDone(err)
|
||||
return
|
||||
}
|
||||
_, dt, err := p.is.ResolveImageConfig(ctx, ref.String(), llb.ResolveImageConfigOpt{Platform: &p.platform, ResolveMode: resolveModeToString(p.src.ResolveMode)}, p.sm, g)
|
||||
_, dt, err := p.is.ResolveImageConfig(ctx, ref.String(), gw.ResolveImageConfigOpt{Platform: &p.platform, ResolveMode: resolveModeToString(p.src.ResolveMode)}, p.sm)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
p.resolveErr = err
|
||||
resolveProgressDone(err)
|
||||
return
|
||||
}
|
||||
|
||||
p.config = dt
|
||||
}
|
||||
return nil, nil
|
||||
resolveProgressDone(nil)
|
||||
})
|
||||
return err
|
||||
return p.resolveErr
|
||||
}
|
||||
|
||||
func (p *puller) CacheKey(ctx context.Context, g session.Group, index int) (string, solver.CacheOpts, bool, error) {
|
||||
func (p *puller) CacheKey(ctx context.Context, index int) (string, bool, error) {
|
||||
p.resolveLocal()
|
||||
|
||||
if p.desc.Digest != "" && index == 0 {
|
||||
dgst, err := p.mainManifestKey(p.platform)
|
||||
dgst, err := p.mainManifestKey(p.desc.Digest, p.platform)
|
||||
if err != nil {
|
||||
return "", nil, false, err
|
||||
return "", false, err
|
||||
}
|
||||
return dgst.String(), nil, false, nil
|
||||
return dgst.String(), false, nil
|
||||
}
|
||||
|
||||
if p.config != nil {
|
||||
k := cacheKeyFromConfig(p.config).String()
|
||||
if k == "" {
|
||||
return digest.FromBytes(p.config).String(), nil, true, nil
|
||||
return digest.FromBytes(p.config).String(), true, nil
|
||||
}
|
||||
return k, nil, true, nil
|
||||
return k, true, nil
|
||||
}
|
||||
|
||||
if err := p.resolve(ctx, g); err != nil {
|
||||
return "", nil, false, err
|
||||
if err := p.resolve(ctx); err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
|
||||
if p.desc.Digest != "" && index == 0 {
|
||||
dgst, err := p.mainManifestKey(p.platform)
|
||||
dgst, err := p.mainManifestKey(p.desc.Digest, p.platform)
|
||||
if err != nil {
|
||||
return "", nil, false, err
|
||||
return "", false, err
|
||||
}
|
||||
return dgst.String(), nil, false, nil
|
||||
}
|
||||
|
||||
if len(p.config) == 0 && p.desc.MediaType != images.MediaTypeDockerSchema1Manifest {
|
||||
return "", nil, false, errors.Errorf("invalid empty config file resolved for %s", p.src.Reference.String())
|
||||
return dgst.String(), false, nil
|
||||
}
|
||||
|
||||
k := cacheKeyFromConfig(p.config).String()
|
||||
if k == "" || p.desc.MediaType == images.MediaTypeDockerSchema1Manifest {
|
||||
dgst, err := p.mainManifestKey(p.platform)
|
||||
if k == "" {
|
||||
dgst, err := p.mainManifestKey(p.desc.Digest, p.platform)
|
||||
if err != nil {
|
||||
return "", nil, false, err
|
||||
return "", false, err
|
||||
}
|
||||
return dgst.String(), nil, true, nil
|
||||
return dgst.String(), true, nil
|
||||
}
|
||||
|
||||
return k, nil, true, nil
|
||||
return k, true, nil
|
||||
}
|
||||
|
||||
func (p *puller) getRef(ctx context.Context, diffIDs []layer.DiffID, opts ...cache.RefOption) (cache.ImmutableRef, error) {
|
||||
var parent cache.ImmutableRef
|
||||
if len(diffIDs) > 1 {
|
||||
var err error
|
||||
parent, err = p.getRef(ctx, diffIDs[:len(diffIDs)-1], opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer parent.Release(context.TODO())
|
||||
}
|
||||
return p.is.CacheAccessor.GetByBlob(ctx, ocispec.Descriptor{
|
||||
Annotations: map[string]string{
|
||||
"containerd.io/uncompressed": diffIDs[len(diffIDs)-1].String(),
|
||||
},
|
||||
}, parent, opts...)
|
||||
}
|
||||
|
||||
func (p *puller) Snapshot(ctx context.Context, g session.Group) (cache.ImmutableRef, error) {
|
||||
func (p *puller) Snapshot(ctx context.Context) (cache.ImmutableRef, error) {
|
||||
p.resolveLocal()
|
||||
if len(p.config) == 0 {
|
||||
if err := p.resolve(ctx, g); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := p.resolve(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if p.config != nil {
|
||||
@@ -380,31 +372,16 @@ func (p *puller) Snapshot(ctx context.Context, g session.Group) (cache.Immutable
|
||||
if len(img.RootFS.DiffIDs) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
l, err := p.is.LayerStore.Get(img.RootFS.ChainID())
|
||||
if err == nil {
|
||||
layer.ReleaseAndLog(p.is.LayerStore, l)
|
||||
ref, err := p.getRef(ctx, img.RootFS.DiffIDs, cache.WithDescription(fmt.Sprintf("from local %s", p.ref)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ref, nil
|
||||
ref, err := p.is.CacheAccessor.GetFromSnapshotter(ctx, string(img.RootFS.ChainID()), cache.WithDescription(fmt.Sprintf("from local %s", p.ref)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ref, nil
|
||||
}
|
||||
}
|
||||
|
||||
ongoing := newJobs(p.ref)
|
||||
|
||||
ctx, done, err := leaseutil.WithLease(ctx, p.is.LeaseManager, leases.WithExpiration(5*time.Minute), leaseutil.MakeTemporary)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
done(context.TODO())
|
||||
if p.is.GarbageCollect != nil {
|
||||
go p.is.GarbageCollect(context.TODO())
|
||||
}
|
||||
}()
|
||||
|
||||
pctx, stopProgress := context.WithCancel(ctx)
|
||||
|
||||
pw, _, ctx := progress.FromContext(ctx)
|
||||
@@ -419,15 +396,17 @@ func (p *puller) Snapshot(ctx context.Context, g session.Group) (cache.Immutable
|
||||
<-progressDone
|
||||
}()
|
||||
|
||||
fetcher, err := p.resolver(g).Fetcher(ctx, p.ref)
|
||||
fetcher, err := p.resolver.Fetcher(ctx, p.ref)
|
||||
if err != nil {
|
||||
stopProgress()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
platform := platforms.Only(p.platform)
|
||||
|
||||
var nonLayers []digest.Digest
|
||||
// workaround for GCR bug that requires a request to manifest endpoint for authentication to work.
|
||||
// if current resolver has not used manifests do a dummy request.
|
||||
// in most cases resolver should be cached and extra request is not needed.
|
||||
ensureManifestRequested(ctx, p.resolver, p.ref)
|
||||
|
||||
var (
|
||||
schema1Converter *schema1.Converter
|
||||
@@ -449,7 +428,6 @@ func (p *puller) Snapshot(ctx context.Context, g session.Group) (cache.Immutable
|
||||
case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest,
|
||||
images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex,
|
||||
images.MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig:
|
||||
nonLayers = append(nonLayers, desc.Digest)
|
||||
default:
|
||||
return nil, images.ErrSkipDesc
|
||||
}
|
||||
@@ -459,6 +437,8 @@ func (p *puller) Snapshot(ctx context.Context, g session.Group) (cache.Immutable
|
||||
|
||||
// Get all the children for a descriptor
|
||||
childrenHandler := images.ChildrenHandler(p.is.ContentStore)
|
||||
// Set any children labels for that content
|
||||
childrenHandler = images.SetChildrenLabels(p.is.ContentStore, childrenHandler)
|
||||
// Filter the children by the platform
|
||||
childrenHandler = images.FilterPlatforms(childrenHandler, platform)
|
||||
// Limit manifests pulled to the best match in an index
|
||||
@@ -529,7 +509,7 @@ func (p *puller) Snapshot(ctx context.Context, g session.Group) (cache.Immutable
|
||||
tm := time.Now()
|
||||
end = &tm
|
||||
}
|
||||
_ = pw.Write("extracting "+p.ID, progress.Status{
|
||||
pw.Write("extracting "+p.ID, progress.Status{
|
||||
Action: "extract",
|
||||
Started: &st.st,
|
||||
Completed: end,
|
||||
@@ -546,9 +526,6 @@ func (p *puller) Snapshot(ctx context.Context, g session.Group) (cache.Immutable
|
||||
layers := make([]xfer.DownloadDescriptor, 0, len(mfst.Layers))
|
||||
|
||||
for i, desc := range mfst.Layers {
|
||||
if err := desc.Digest.Validate(); err != nil {
|
||||
return nil, errors.Wrap(err, "layer digest could not be validated")
|
||||
}
|
||||
ongoing.add(desc)
|
||||
layers = append(layers, &layerDescriptor{
|
||||
desc: desc,
|
||||
@@ -561,6 +538,9 @@ func (p *puller) Snapshot(ctx context.Context, g session.Group) (cache.Immutable
|
||||
|
||||
defer func() {
|
||||
<-progressDone
|
||||
for _, desc := range mfst.Layers {
|
||||
p.is.ContentStore.Delete(context.TODO(), desc.Digest)
|
||||
}
|
||||
}()
|
||||
|
||||
r := image.NewRootFS()
|
||||
@@ -570,22 +550,12 @@ func (p *puller) Snapshot(ctx context.Context, g session.Group) (cache.Immutable
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ref, err := p.getRef(ctx, rootFS.DiffIDs, cache.WithDescription(fmt.Sprintf("pulled from %s", p.ref)))
|
||||
ref, err := p.is.CacheAccessor.GetFromSnapshotter(ctx, string(rootFS.ChainID()), cache.WithDescription(fmt.Sprintf("pulled from %s", p.ref)))
|
||||
release()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// keep manifest blobs until ref is alive for cache
|
||||
for _, nl := range nonLayers {
|
||||
if err := p.is.LeaseManager.AddResource(ctx, leases.Lease{ID: ref.ID()}, leases.Resource{
|
||||
ID: nl.String(),
|
||||
Type: "content",
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: handle windows layers for cross platform builds
|
||||
|
||||
if p.src.RecordType != "" && cache.GetRecordType(ref) == "" {
|
||||
@@ -600,7 +570,7 @@ func (p *puller) Snapshot(ctx context.Context, g session.Group) (cache.Immutable
|
||||
|
||||
// Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error)
|
||||
type layerDescriptor struct {
|
||||
is *Source
|
||||
is *imageSource
|
||||
fetcher remotes.Fetcher
|
||||
desc ocispec.Descriptor
|
||||
diffID layer.DiffID
|
||||
@@ -640,7 +610,7 @@ func (ld *layerDescriptor) Download(ctx context.Context, progressOutput pkgprogr
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
return io.NopCloser(content.NewReader(ra)), ld.desc.Size, nil
|
||||
return ioutil.NopCloser(content.NewReader(ra)), ld.desc.Size, nil
|
||||
}
|
||||
|
||||
func (ld *layerDescriptor) Close() {
|
||||
@@ -702,7 +672,7 @@ func showProgress(ctx context.Context, ongoing *jobs, cs content.Store, pw progr
|
||||
refKey := remotes.MakeRefKey(ctx, j.Descriptor)
|
||||
if a, ok := actives[refKey]; ok {
|
||||
started := j.started
|
||||
_ = pw.Write(j.Digest.String(), progress.Status{
|
||||
pw.Write(j.Digest.String(), progress.Status{
|
||||
Action: a.Status,
|
||||
Total: int(a.Total),
|
||||
Current: int(a.Offset),
|
||||
@@ -715,7 +685,7 @@ func showProgress(ctx context.Context, ongoing *jobs, cs content.Store, pw progr
|
||||
info, err := cs.Info(context.TODO(), j.Digest)
|
||||
if err != nil {
|
||||
if containerderrors.IsNotFound(err) {
|
||||
// _ = pw.Write(j.Digest.String(), progress.Status{
|
||||
// pw.Write(j.Digest.String(), progress.Status{
|
||||
// Action: "waiting",
|
||||
// })
|
||||
continue
|
||||
@@ -727,7 +697,7 @@ func showProgress(ctx context.Context, ongoing *jobs, cs content.Store, pw progr
|
||||
if done || j.done {
|
||||
started := j.started
|
||||
createdAt := info.CreatedAt
|
||||
_ = pw.Write(j.Digest.String(), progress.Status{
|
||||
pw.Write(j.Digest.String(), progress.Status{
|
||||
Action: "done",
|
||||
Current: int(info.Size),
|
||||
Total: int(info.Size),
|
||||
@@ -813,13 +783,13 @@ func oneOffProgress(ctx context.Context, id string) func(err error) error {
|
||||
st := progress.Status{
|
||||
Started: &now,
|
||||
}
|
||||
_ = pw.Write(id, st)
|
||||
pw.Write(id, st)
|
||||
return func(err error) error {
|
||||
// TODO: set error on status
|
||||
now := time.Now()
|
||||
st.Completed = &now
|
||||
_ = pw.Write(id, st)
|
||||
_ = pw.Close()
|
||||
pw.Write(id, st)
|
||||
pw.Close()
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -830,7 +800,6 @@ func cacheKeyFromConfig(dt []byte) digest.Digest {
|
||||
var img ocispec.Image
|
||||
err := json.Unmarshal(dt, &img)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Errorf("failed to unmarshal image config for cache key %v", err)
|
||||
return digest.FromBytes(dt)
|
||||
}
|
||||
if img.RootFS.Type != "layers" || len(img.RootFS.DiffIDs) == 0 {
|
||||
@@ -853,12 +822,89 @@ func resolveModeToString(rm source.ResolveMode) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func platformMatches(img *image.Image, p *ocispec.Platform) bool {
|
||||
return dimages.OnlyPlatformWithFallback(*p).Match(ocispec.Platform{
|
||||
Architecture: img.Architecture,
|
||||
OS: img.OS,
|
||||
OSVersion: img.OSVersion,
|
||||
OSFeatures: img.OSFeatures,
|
||||
Variant: img.Variant,
|
||||
})
|
||||
type resolverCache struct {
|
||||
mu sync.Mutex
|
||||
m map[string]cachedResolver
|
||||
}
|
||||
|
||||
type cachedResolver struct {
|
||||
timeout time.Time
|
||||
remotes.Resolver
|
||||
counter int64
|
||||
}
|
||||
|
||||
func (cr *cachedResolver) Resolve(ctx context.Context, ref string) (name string, desc ocispec.Descriptor, err error) {
|
||||
atomic.AddInt64(&cr.counter, 1)
|
||||
return cr.Resolver.Resolve(ctx, ref)
|
||||
}
|
||||
|
||||
func (r *resolverCache) Add(ctx context.Context, ref string, resolver remotes.Resolver) remotes.Resolver {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
ref = r.repo(ref) + "-" + session.FromContext(ctx)
|
||||
|
||||
cr, ok := r.m[ref]
|
||||
cr.timeout = time.Now().Add(time.Minute)
|
||||
if ok {
|
||||
return &cr
|
||||
}
|
||||
|
||||
cr.Resolver = resolver
|
||||
r.m[ref] = cr
|
||||
return &cr
|
||||
}
|
||||
|
||||
func (r *resolverCache) repo(refStr string) string {
|
||||
ref, err := distreference.ParseNormalizedNamed(refStr)
|
||||
if err != nil {
|
||||
return refStr
|
||||
}
|
||||
return ref.Name()
|
||||
}
|
||||
|
||||
func (r *resolverCache) Get(ctx context.Context, ref string) remotes.Resolver {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
ref = r.repo(ref) + "-" + session.FromContext(ctx)
|
||||
|
||||
cr, ok := r.m[ref]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return &cr
|
||||
}
|
||||
|
||||
func (r *resolverCache) clean(now time.Time) {
|
||||
r.mu.Lock()
|
||||
for k, cr := range r.m {
|
||||
if now.After(cr.timeout) {
|
||||
delete(r.m, k)
|
||||
}
|
||||
}
|
||||
r.mu.Unlock()
|
||||
}
|
||||
|
||||
func newResolverCache() *resolverCache {
|
||||
rc := &resolverCache{
|
||||
m: map[string]cachedResolver{},
|
||||
}
|
||||
t := time.NewTicker(time.Minute)
|
||||
go func() {
|
||||
for {
|
||||
rc.clean(<-t.C)
|
||||
}
|
||||
}()
|
||||
return rc
|
||||
}
|
||||
|
||||
func ensureManifestRequested(ctx context.Context, res remotes.Resolver, ref string) {
|
||||
cr, ok := res.(*cachedResolver)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if atomic.LoadInt64(&cr.counter) == 0 {
|
||||
res.Resolve(ctx, ref)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/remotes/docker"
|
||||
distreference "github.com/docker/distribution/reference"
|
||||
imagestore "github.com/docker/docker/image"
|
||||
"github.com/docker/docker/reference"
|
||||
@@ -16,27 +15,23 @@ import (
|
||||
v1 "github.com/moby/buildkit/cache/remotecache/v1"
|
||||
"github.com/moby/buildkit/session"
|
||||
"github.com/moby/buildkit/solver"
|
||||
"github.com/moby/buildkit/util/resolver"
|
||||
"github.com/moby/buildkit/worker"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func init() {
|
||||
// See https://github.com/moby/buildkit/pull/1993.
|
||||
v1.EmptyLayerRemovalSupported = false
|
||||
}
|
||||
|
||||
// ResolveCacheImporterFunc returns a resolver function for local inline cache
|
||||
func ResolveCacheImporterFunc(sm *session.Manager, resolverFunc docker.RegistryHosts, cs content.Store, rs reference.Store, is imagestore.Store) remotecache.ResolveCacheImporterFunc {
|
||||
func ResolveCacheImporterFunc(sm *session.Manager, resolverOpt resolver.ResolveOptionsFunc, rs reference.Store, is imagestore.Store) remotecache.ResolveCacheImporterFunc {
|
||||
|
||||
upstream := registryremotecache.ResolveCacheImporterFunc(sm, cs, resolverFunc)
|
||||
upstream := registryremotecache.ResolveCacheImporterFunc(sm, resolverOpt)
|
||||
|
||||
return func(ctx context.Context, group session.Group, attrs map[string]string) (remotecache.Importer, specs.Descriptor, error) {
|
||||
return func(ctx context.Context, attrs map[string]string) (remotecache.Importer, specs.Descriptor, error) {
|
||||
if dt, err := tryImportLocal(rs, is, attrs["ref"]); err == nil {
|
||||
return newLocalImporter(dt), specs.Descriptor{}, nil
|
||||
}
|
||||
return upstream(ctx, group, attrs)
|
||||
return upstream(ctx, attrs)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,133 +0,0 @@
|
||||
package snapshot
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/containerd/containerd/leases"
|
||||
"github.com/sirupsen/logrus"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
type sLM struct {
|
||||
manager leases.Manager
|
||||
s *snapshotter
|
||||
|
||||
mu sync.Mutex
|
||||
byLease map[string]map[string]struct{}
|
||||
bySnapshot map[string]map[string]struct{}
|
||||
}
|
||||
|
||||
func newLeaseManager(s *snapshotter, lm leases.Manager) *sLM {
|
||||
return &sLM{
|
||||
s: s,
|
||||
manager: lm,
|
||||
|
||||
byLease: map[string]map[string]struct{}{},
|
||||
bySnapshot: map[string]map[string]struct{}{},
|
||||
}
|
||||
}
|
||||
|
||||
func (l *sLM) Create(ctx context.Context, opts ...leases.Opt) (leases.Lease, error) {
|
||||
return l.manager.Create(ctx, opts...)
|
||||
}
|
||||
|
||||
func (l *sLM) Delete(ctx context.Context, lease leases.Lease, opts ...leases.DeleteOpt) error {
|
||||
if err := l.manager.Delete(ctx, lease, opts...); err != nil {
|
||||
return err
|
||||
}
|
||||
l.mu.Lock()
|
||||
if snaps, ok := l.byLease[lease.ID]; ok {
|
||||
for sID := range snaps {
|
||||
l.delRef(lease.ID, sID)
|
||||
}
|
||||
}
|
||||
l.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *sLM) List(ctx context.Context, filters ...string) ([]leases.Lease, error) {
|
||||
return l.manager.List(ctx, filters...)
|
||||
}
|
||||
|
||||
func (l *sLM) AddResource(ctx context.Context, lease leases.Lease, resource leases.Resource) error {
|
||||
if err := l.manager.AddResource(ctx, lease, resource); err != nil {
|
||||
return err
|
||||
}
|
||||
if resource.Type == "snapshots/default" {
|
||||
l.mu.Lock()
|
||||
l.addRef(lease.ID, resource.ID)
|
||||
l.mu.Unlock()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *sLM) DeleteResource(ctx context.Context, lease leases.Lease, resource leases.Resource) error {
|
||||
if err := l.manager.DeleteResource(ctx, lease, resource); err != nil {
|
||||
return err
|
||||
}
|
||||
if resource.Type == "snapshots/default" {
|
||||
l.mu.Lock()
|
||||
l.delRef(lease.ID, resource.ID)
|
||||
l.mu.Unlock()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *sLM) ListResources(ctx context.Context, lease leases.Lease) ([]leases.Resource, error) {
|
||||
return l.manager.ListResources(ctx, lease)
|
||||
}
|
||||
|
||||
func (l *sLM) addRef(lID, sID string) {
|
||||
load := false
|
||||
snapshots, ok := l.byLease[lID]
|
||||
if !ok {
|
||||
snapshots = map[string]struct{}{}
|
||||
l.byLease[lID] = snapshots
|
||||
}
|
||||
if _, ok := snapshots[sID]; !ok {
|
||||
snapshots[sID] = struct{}{}
|
||||
}
|
||||
leases, ok := l.bySnapshot[sID]
|
||||
if !ok {
|
||||
leases = map[string]struct{}{}
|
||||
l.byLease[sID] = leases
|
||||
load = true
|
||||
}
|
||||
if _, ok := leases[lID]; !ok {
|
||||
leases[lID] = struct{}{}
|
||||
}
|
||||
|
||||
if load {
|
||||
l.s.getLayer(sID, true)
|
||||
if _, ok := l.s.chainID(sID); ok {
|
||||
l.s.db.Update(func(tx *bolt.Tx) error {
|
||||
b, err := tx.CreateBucketIfNotExists([]byte(lID))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return b.Put(keyChainID, []byte(sID))
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (l *sLM) delRef(lID, sID string) {
|
||||
snapshots, ok := l.byLease[lID]
|
||||
if !ok {
|
||||
delete(snapshots, sID)
|
||||
if len(snapshots) == 0 {
|
||||
delete(l.byLease, lID)
|
||||
}
|
||||
}
|
||||
leases, ok := l.bySnapshot[sID]
|
||||
if !ok {
|
||||
delete(leases, lID)
|
||||
if len(leases) == 0 {
|
||||
delete(l.bySnapshot, sID)
|
||||
if err := l.s.remove(context.TODO(), sID); err != nil {
|
||||
logrus.Warnf("failed to remove snapshot %v", sID)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/containerd/containerd/leases"
|
||||
"github.com/containerd/containerd/mount"
|
||||
"github.com/containerd/containerd/snapshots"
|
||||
"github.com/docker/docker/daemon/graphdriver"
|
||||
@@ -22,7 +21,6 @@ import (
|
||||
|
||||
var keyParent = []byte("parent")
|
||||
var keyCommitted = []byte("committed")
|
||||
var keyIsCommitted = []byte("iscommitted")
|
||||
var keyChainID = []byte("chainid")
|
||||
var keySize = []byte("size")
|
||||
|
||||
@@ -53,17 +51,19 @@ type snapshotter struct {
|
||||
reg graphIDRegistrar
|
||||
}
|
||||
|
||||
var _ snapshot.SnapshotterBase = &snapshotter{}
|
||||
|
||||
// NewSnapshotter creates a new snapshotter
|
||||
func NewSnapshotter(opt Opt, prevLM leases.Manager) (snapshot.Snapshotter, leases.Manager, error) {
|
||||
func NewSnapshotter(opt Opt) (snapshot.SnapshotterBase, error) {
|
||||
dbPath := filepath.Join(opt.Root, "snapshots.db")
|
||||
db, err := bolt.Open(dbPath, 0600, nil)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "failed to open database file %s", dbPath)
|
||||
return nil, errors.Wrapf(err, "failed to open database file %s", dbPath)
|
||||
}
|
||||
|
||||
reg, ok := opt.LayerStore.(graphIDRegistrar)
|
||||
if !ok {
|
||||
return nil, nil, errors.Errorf("layerstore doesn't support graphID registration")
|
||||
return nil, errors.Errorf("layerstore doesn't support graphID registration")
|
||||
}
|
||||
|
||||
s := &snapshotter{
|
||||
@@ -72,26 +72,7 @@ func NewSnapshotter(opt Opt, prevLM leases.Manager) (snapshot.Snapshotter, lease
|
||||
refs: map[string]layer.Layer{},
|
||||
reg: reg,
|
||||
}
|
||||
|
||||
lm := newLeaseManager(s, prevLM)
|
||||
|
||||
ll, err := lm.List(context.TODO())
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
for _, l := range ll {
|
||||
rr, err := lm.ListResources(context.TODO(), l)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
for _, r := range rr {
|
||||
if r.Type == "snapshots/default" {
|
||||
lm.addRef(l.ID, r.ID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return s, lm, nil
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (s *snapshotter) Name() string {
|
||||
@@ -106,11 +87,11 @@ func (s *snapshotter) Prepare(ctx context.Context, key, parent string, opts ...s
|
||||
origParent := parent
|
||||
if parent != "" {
|
||||
if l, err := s.getLayer(parent, false); err != nil {
|
||||
return errors.Wrapf(err, "failed to get parent layer %s", parent)
|
||||
return err
|
||||
} else if l != nil {
|
||||
parent, err = getGraphID(l)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to get parent graphid %s", l.ChainID())
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
parent, _ = s.getGraphDriverID(parent)
|
||||
@@ -165,24 +146,23 @@ func (s *snapshotter) getLayer(key string, withCommitted bool) (layer.Layer, err
|
||||
return nil
|
||||
}); err != nil {
|
||||
s.mu.Unlock()
|
||||
return nil, errors.WithStack(err)
|
||||
return nil, err
|
||||
}
|
||||
s.mu.Unlock()
|
||||
if id == "" {
|
||||
s.mu.Unlock()
|
||||
return nil, nil
|
||||
}
|
||||
return s.getLayer(string(id), withCommitted)
|
||||
}
|
||||
var err error
|
||||
l, err = s.opt.LayerStore.Get(id)
|
||||
if err != nil {
|
||||
s.mu.Unlock()
|
||||
return nil, errors.WithStack(err)
|
||||
return nil, err
|
||||
}
|
||||
s.refs[key] = l
|
||||
if err := s.db.Update(func(tx *bolt.Tx) error {
|
||||
_, err := tx.CreateBucketIfNotExists([]byte(key))
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
}); err != nil {
|
||||
s.mu.Unlock()
|
||||
return nil, err
|
||||
@@ -317,10 +297,6 @@ func (s *snapshotter) Mounts(ctx context.Context, key string) (snapshot.Mountabl
|
||||
}
|
||||
|
||||
func (s *snapshotter) Remove(ctx context.Context, key string) error {
|
||||
return errors.Errorf("calling snapshot.remove is forbidden")
|
||||
}
|
||||
|
||||
func (s *snapshotter) remove(ctx context.Context, key string) error {
|
||||
l, err := s.getLayer(key, true)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -329,17 +305,8 @@ func (s *snapshotter) remove(ctx context.Context, key string) error {
|
||||
id, _ := s.getGraphDriverID(key)
|
||||
|
||||
var found bool
|
||||
var alreadyCommitted bool
|
||||
if err := s.db.Update(func(tx *bolt.Tx) error {
|
||||
b := tx.Bucket([]byte(key))
|
||||
found = b != nil
|
||||
|
||||
if b != nil {
|
||||
if b.Get(keyIsCommitted) != nil {
|
||||
alreadyCommitted = true
|
||||
return nil
|
||||
}
|
||||
}
|
||||
found = tx.Bucket([]byte(key)) != nil
|
||||
if found {
|
||||
tx.DeleteBucket([]byte(key))
|
||||
if id != key {
|
||||
@@ -351,10 +318,6 @@ func (s *snapshotter) remove(ctx context.Context, key string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if alreadyCommitted {
|
||||
return nil
|
||||
}
|
||||
|
||||
if l != nil {
|
||||
s.mu.Lock()
|
||||
delete(s.refs, key)
|
||||
@@ -376,14 +339,7 @@ func (s *snapshotter) Commit(ctx context.Context, name, key string, opts ...snap
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := b.Put(keyCommitted, []byte(key)); err != nil {
|
||||
return err
|
||||
}
|
||||
b, err = tx.CreateBucketIfNotExists([]byte(key))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return b.Put(keyIsCommitted, []byte{})
|
||||
return b.Put(keyCommitted, []byte(key))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -391,8 +347,8 @@ func (s *snapshotter) View(ctx context.Context, key, parent string, opts ...snap
|
||||
return s.Mounts(ctx, parent)
|
||||
}
|
||||
|
||||
func (s *snapshotter) Walk(context.Context, snapshots.WalkFunc, ...string) error {
|
||||
return nil
|
||||
func (s *snapshotter) Walk(ctx context.Context, fn func(context.Context, snapshots.Info) error) error {
|
||||
return errors.Errorf("not-implemented")
|
||||
}
|
||||
|
||||
func (s *snapshotter) Update(ctx context.Context, info snapshots.Info, fieldpaths ...string) (snapshots.Info, error) {
|
||||
|
||||
@@ -10,14 +10,13 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/containerd/containerd/remotes/docker"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/backend"
|
||||
"github.com/docker/docker/builder"
|
||||
"github.com/docker/docker/daemon/config"
|
||||
"github.com/docker/docker/daemon/images"
|
||||
"github.com/docker/docker/opts"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/streamformatter"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
@@ -28,6 +27,7 @@ import (
|
||||
"github.com/moby/buildkit/identity"
|
||||
"github.com/moby/buildkit/session"
|
||||
"github.com/moby/buildkit/util/entitlements"
|
||||
"github.com/moby/buildkit/util/resolver"
|
||||
"github.com/moby/buildkit/util/tracing"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sync/errgroup"
|
||||
@@ -71,18 +71,16 @@ type Opt struct {
|
||||
Dist images.DistributionServices
|
||||
NetworkController libnetwork.NetworkController
|
||||
DefaultCgroupParent string
|
||||
RegistryHosts docker.RegistryHosts
|
||||
ResolverOpt resolver.ResolveOptionsFunc
|
||||
BuilderConfig config.BuilderConfig
|
||||
Rootless bool
|
||||
IdentityMapping *idtools.IdentityMapping
|
||||
DNSConfig config.DNSConfig
|
||||
ApparmorProfile string
|
||||
}
|
||||
|
||||
// Builder can build using BuildKit backend
|
||||
type Builder struct {
|
||||
controller *control.Controller
|
||||
dnsconfig config.DNSConfig
|
||||
reqBodyHandler *reqBodyHandler
|
||||
|
||||
mu sync.Mutex
|
||||
@@ -103,7 +101,6 @@ func New(opt Opt) (*Builder, error) {
|
||||
}
|
||||
b := &Builder{
|
||||
controller: c,
|
||||
dnsconfig: opt.DNSConfig,
|
||||
reqBodyHandler: reqHandler,
|
||||
jobs: map[string]*buildJob{},
|
||||
}
|
||||
@@ -244,9 +241,7 @@ func (b *Builder) Build(ctx context.Context, opt backend.BuildConfig) (*builder.
|
||||
}
|
||||
|
||||
defer func() {
|
||||
b.mu.Lock()
|
||||
delete(b.jobs, buildID)
|
||||
b.mu.Unlock()
|
||||
}()
|
||||
}
|
||||
|
||||
@@ -320,7 +315,7 @@ func (b *Builder) Build(ctx context.Context, opt backend.BuildConfig) (*builder.
|
||||
return nil, errors.Errorf("network mode %q not supported by buildkit", opt.Options.NetworkMode)
|
||||
}
|
||||
|
||||
extraHosts, err := toBuildkitExtraHosts(opt.Options.ExtraHosts, b.dnsconfig.HostGatewayIP)
|
||||
extraHosts, err := toBuildkitExtraHosts(opt.Options.ExtraHosts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -473,6 +468,14 @@ func (sp *pruneProxy) SendMsg(m interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type contentStoreNoLabels struct {
|
||||
content.Store
|
||||
}
|
||||
|
||||
func (c *contentStoreNoLabels) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) {
|
||||
return content.Info{}, nil
|
||||
}
|
||||
|
||||
type wrapRC struct {
|
||||
io.ReadCloser
|
||||
once sync.Once
|
||||
@@ -549,28 +552,18 @@ func (j *buildJob) SetUpload(ctx context.Context, rc io.ReadCloser) error {
|
||||
}
|
||||
|
||||
// toBuildkitExtraHosts converts hosts from docker key:value format to buildkit's csv format
|
||||
func toBuildkitExtraHosts(inp []string, hostGatewayIP net.IP) (string, error) {
|
||||
func toBuildkitExtraHosts(inp []string) (string, error) {
|
||||
if len(inp) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
hosts := make([]string, 0, len(inp))
|
||||
for _, h := range inp {
|
||||
host, ip, ok := strings.Cut(h, ":")
|
||||
if !ok || host == "" || ip == "" {
|
||||
parts := strings.Split(h, ":")
|
||||
|
||||
if len(parts) != 2 || parts[0] == "" || net.ParseIP(parts[1]) == nil {
|
||||
return "", errors.Errorf("invalid host %s", h)
|
||||
}
|
||||
// If the IP Address is a "host-gateway", replace this value with the
|
||||
// IP address stored in the daemon level HostGatewayIP config variable.
|
||||
if ip == opts.HostGatewayName {
|
||||
gateway := hostGatewayIP.String()
|
||||
if gateway == "" {
|
||||
return "", fmt.Errorf("unable to derive the IP value for host-gateway")
|
||||
}
|
||||
ip = gateway
|
||||
} else if net.ParseIP(ip) == nil {
|
||||
return "", fmt.Errorf("invalid host %s", h)
|
||||
}
|
||||
hosts = append(hosts, host+"="+ip)
|
||||
hosts = append(hosts, parts[0]+"="+parts[1])
|
||||
}
|
||||
return strings.Join(hosts, ","), nil
|
||||
}
|
||||
|
||||
@@ -1,17 +1,13 @@
|
||||
package buildkit
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/containerd/containerd/content/local"
|
||||
ctdmetadata "github.com/containerd/containerd/metadata"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/containerd/containerd/snapshots"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/builder/builder-next/adapters/containerimage"
|
||||
"github.com/docker/docker/builder/builder-next/adapters/localinlinecache"
|
||||
"github.com/docker/docker/builder/builder-next/adapters/snapshot"
|
||||
@@ -32,15 +28,13 @@ import (
|
||||
dockerfile "github.com/moby/buildkit/frontend/dockerfile/builder"
|
||||
"github.com/moby/buildkit/frontend/gateway"
|
||||
"github.com/moby/buildkit/frontend/gateway/forwarder"
|
||||
containerdsnapshot "github.com/moby/buildkit/snapshot/containerd"
|
||||
"github.com/moby/buildkit/snapshot/blobmapping"
|
||||
"github.com/moby/buildkit/solver/bboltcachestorage"
|
||||
"github.com/moby/buildkit/util/archutil"
|
||||
"github.com/moby/buildkit/util/binfmt_misc"
|
||||
"github.com/moby/buildkit/util/entitlements"
|
||||
"github.com/moby/buildkit/util/leaseutil"
|
||||
"github.com/moby/buildkit/worker"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
func newController(rt http.RoundTripper, opt Opt) (*control.Controller, error) {
|
||||
@@ -60,42 +54,34 @@ func newController(rt http.RoundTripper, opt Opt) (*control.Controller, error) {
|
||||
return nil, errors.Errorf("could not access graphdriver")
|
||||
}
|
||||
|
||||
store, err := local.NewStore(filepath.Join(root, "content"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
db, err := bolt.Open(filepath.Join(root, "containerdmeta.db"), 0644, nil)
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
mdb := ctdmetadata.NewDB(db, store, map[string]snapshots.Snapshotter{})
|
||||
|
||||
store = containerdsnapshot.NewContentStore(mdb.ContentStore(), "buildkit")
|
||||
|
||||
lm := leaseutil.WithNamespace(ctdmetadata.NewLeaseManager(mdb), "buildkit")
|
||||
|
||||
snapshotter, lm, err := snapshot.NewSnapshotter(snapshot.Opt{
|
||||
sbase, err := snapshot.NewSnapshotter(snapshot.Opt{
|
||||
GraphDriver: driver,
|
||||
LayerStore: dist.LayerStore,
|
||||
Root: root,
|
||||
IdentityMapping: opt.IdentityMapping,
|
||||
}, lm)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := cache.MigrateV2(context.Background(), filepath.Join(root, "metadata.db"), filepath.Join(root, "metadata_v2.db"), store, snapshotter, lm); err != nil {
|
||||
store, err := local.NewStore(filepath.Join(root, "content"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
store = &contentStoreNoLabels{store}
|
||||
|
||||
md, err := metadata.NewStore(filepath.Join(root, "metadata_v2.db"))
|
||||
md, err := metadata.NewStore(filepath.Join(root, "metadata.db"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
layerGetter, ok := snapshotter.(imagerefchecker.LayerGetter)
|
||||
snapshotter := blobmapping.NewSnapshotter(blobmapping.Opt{
|
||||
Content: store,
|
||||
Snapshotter: sbase,
|
||||
MetadataStore: md,
|
||||
})
|
||||
|
||||
layerGetter, ok := sbase.(imagerefchecker.LayerGetter)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("snapshotter does not implement layergetter")
|
||||
}
|
||||
@@ -109,9 +95,6 @@ func newController(rt http.RoundTripper, opt Opt) (*control.Controller, error) {
|
||||
Snapshotter: snapshotter,
|
||||
MetadataStore: md,
|
||||
PruneRefChecker: refChecker,
|
||||
LeaseManager: lm,
|
||||
ContentStore: store,
|
||||
GarbageCollect: mdb.GarbageCollect,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -124,10 +107,7 @@ func newController(rt http.RoundTripper, opt Opt) (*control.Controller, error) {
|
||||
MetadataStore: dist.V2MetadataService,
|
||||
ImageStore: dist.ImageStore,
|
||||
ReferenceStore: dist.ReferenceStore,
|
||||
RegistryHosts: opt.RegistryHosts,
|
||||
LayerStore: dist.LayerStore,
|
||||
LeaseManager: lm,
|
||||
GarbageCollect: mdb.GarbageCollect,
|
||||
ResolverOpt: opt.ResolverOpt,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -135,12 +115,12 @@ func newController(rt http.RoundTripper, opt Opt) (*control.Controller, error) {
|
||||
|
||||
dns := getDNSConfig(opt.DNSConfig)
|
||||
|
||||
exec, err := newExecutor(root, opt.DefaultCgroupParent, opt.NetworkController, dns, opt.Rootless, opt.IdentityMapping, opt.ApparmorProfile)
|
||||
exec, err := newExecutor(root, opt.DefaultCgroupParent, opt.NetworkController, dns, opt.Rootless, opt.IdentityMapping)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
differ, ok := snapshotter.(containerimageexp.Differ)
|
||||
differ, ok := sbase.(containerimageexp.Differ)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("snapshotter doesn't support differ")
|
||||
}
|
||||
@@ -164,24 +144,16 @@ func newController(rt http.RoundTripper, opt Opt) (*control.Controller, error) {
|
||||
return nil, errors.Wrap(err, "could not get builder GC policy")
|
||||
}
|
||||
|
||||
layers, ok := snapshotter.(mobyworker.LayerAccess)
|
||||
layers, ok := sbase.(mobyworker.LayerAccess)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("snapshotter doesn't support differ")
|
||||
}
|
||||
|
||||
p, err := parsePlatforms(archutil.SupportedPlatforms(true))
|
||||
p, err := parsePlatforms(binfmt_misc.SupportedPlatforms())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
leases, err := lm.List(context.TODO(), "labels.\"buildkit/lease.temporary\"")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, l := range leases {
|
||||
lm.Delete(context.TODO(), l)
|
||||
}
|
||||
|
||||
wopt := mobyworker.Opt{
|
||||
ID: "moby",
|
||||
MetadataStore: md,
|
||||
@@ -217,13 +189,16 @@ func newController(rt http.RoundTripper, opt Opt) (*control.Controller, error) {
|
||||
Frontends: frontends,
|
||||
CacheKeyStorage: cacheStorage,
|
||||
ResolveCacheImporterFuncs: map[string]remotecache.ResolveCacheImporterFunc{
|
||||
"registry": localinlinecache.ResolveCacheImporterFunc(opt.SessionManager, opt.RegistryHosts, store, dist.ReferenceStore, dist.ImageStore),
|
||||
"registry": localinlinecache.ResolveCacheImporterFunc(opt.SessionManager, opt.ResolverOpt, dist.ReferenceStore, dist.ImageStore),
|
||||
"local": localremotecache.ResolveCacheImporterFunc(opt.SessionManager),
|
||||
},
|
||||
ResolveCacheExporterFuncs: map[string]remotecache.ResolveCacheExporterFunc{
|
||||
"inline": inlineremotecache.ResolveCacheExporterFunc(),
|
||||
},
|
||||
Entitlements: getEntitlements(opt.BuilderConfig),
|
||||
Entitlements: []string{
|
||||
string(entitlements.EntitlementNetworkHost),
|
||||
// string(entitlements.EntitlementSecurityInsecure),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -257,7 +232,7 @@ func getGCPolicy(conf config.BuilderConfig, root string) ([]client.PruneInfo, er
|
||||
gcPolicy[i], err = toBuildkitPruneInfo(types.BuildCachePruneOptions{
|
||||
All: p.All,
|
||||
KeepStorage: b,
|
||||
Filters: filters.Args(p.Filter),
|
||||
Filters: p.Filter,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -279,15 +254,3 @@ func parsePlatforms(platformsStr []string) ([]specs.Platform, error) {
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func getEntitlements(conf config.BuilderConfig) []string {
|
||||
var ents []string
|
||||
// Incase of no config settings, NetworkHost should be enabled & SecurityInsecure must be disabled.
|
||||
if conf.Entitlements.NetworkHost == nil || *conf.Entitlements.NetworkHost {
|
||||
ents = append(ents, string(entitlements.EntitlementNetworkHost))
|
||||
}
|
||||
if conf.Entitlements.SecurityInsecure != nil && *conf.Entitlements.SecurityInsecure {
|
||||
ents = append(ents, string(entitlements.EntitlementSecurityInsecure))
|
||||
}
|
||||
return ents
|
||||
}
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
package buildkit
|
||||
@@ -11,7 +10,6 @@ import (
|
||||
|
||||
"github.com/docker/docker/daemon/config"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/docker/libnetwork"
|
||||
"github.com/moby/buildkit/executor"
|
||||
"github.com/moby/buildkit/executor/oci"
|
||||
@@ -25,25 +23,12 @@ import (
|
||||
|
||||
const networkName = "bridge"
|
||||
|
||||
func newExecutor(root, cgroupParent string, net libnetwork.NetworkController, dnsConfig *oci.DNSConfig, rootless bool, idmap *idtools.IdentityMapping, apparmorProfile string) (executor.Executor, error) {
|
||||
netRoot := filepath.Join(root, "net")
|
||||
func newExecutor(root, cgroupParent string, net libnetwork.NetworkController, dnsConfig *oci.DNSConfig, rootless bool, idmap *idtools.IdentityMapping) (executor.Executor, error) {
|
||||
networkProviders := map[pb.NetMode]network.Provider{
|
||||
pb.NetMode_UNSET: &bridgeProvider{NetworkController: net, Root: netRoot},
|
||||
pb.NetMode_UNSET: &bridgeProvider{NetworkController: net, Root: filepath.Join(root, "net")},
|
||||
pb.NetMode_HOST: network.NewHostProvider(),
|
||||
pb.NetMode_NONE: network.NewNoneProvider(),
|
||||
}
|
||||
|
||||
// make sure net state directory is cleared from previous state
|
||||
fis, err := os.ReadDir(netRoot)
|
||||
if err == nil {
|
||||
for _, fi := range fis {
|
||||
fp := filepath.Join(netRoot, fi.Name())
|
||||
if err := os.RemoveAll(fp); err != nil {
|
||||
logrus.WithError(err).Errorf("failed to delete old network state: %v", fp)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return runcexecutor.New(runcexecutor.Opt{
|
||||
Root: filepath.Join(root, "executor"),
|
||||
CommandCandidates: []string{"runc"},
|
||||
@@ -52,7 +37,6 @@ func newExecutor(root, cgroupParent string, net libnetwork.NetworkController, dn
|
||||
NoPivot: os.Getenv("DOCKER_RAMDISK") != "",
|
||||
IdentityMapping: idmap,
|
||||
DNS: dnsConfig,
|
||||
ApparmorProfile: apparmorProfile,
|
||||
}, networkProviders)
|
||||
}
|
||||
|
||||
@@ -110,21 +94,19 @@ func (iface *lnInterface) init(c libnetwork.NetworkController, n libnetwork.Netw
|
||||
iface.ep = ep
|
||||
}
|
||||
|
||||
func (iface *lnInterface) Set(s *specs.Spec) error {
|
||||
func (iface *lnInterface) Set(s *specs.Spec) {
|
||||
<-iface.ready
|
||||
if iface.err != nil {
|
||||
logrus.WithError(iface.err).Error("failed to set networking spec")
|
||||
return iface.err
|
||||
return
|
||||
}
|
||||
shortNetCtlrID := stringid.TruncateID(iface.provider.NetworkController.ID())
|
||||
// attach netns to bridge within the container namespace, using reexec in a prestart hook
|
||||
s.Hooks = &specs.Hooks{
|
||||
Prestart: []specs.Hook{{
|
||||
Path: filepath.Join("/proc", strconv.Itoa(os.Getpid()), "exe"),
|
||||
Args: []string{"libnetwork-setkey", "-exec-root=" + iface.provider.Config().Daemon.ExecRoot, iface.sbx.ContainerID(), shortNetCtlrID},
|
||||
Args: []string{"libnetwork-setkey", "-exec-root=" + iface.provider.Config().Daemon.ExecRoot, iface.sbx.ContainerID(), iface.provider.NetworkController.ID()},
|
||||
}},
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (iface *lnInterface) Close() error {
|
||||
@@ -132,10 +114,7 @@ func (iface *lnInterface) Close() error {
|
||||
if iface.sbx != nil {
|
||||
go func() {
|
||||
if err := iface.sbx.Delete(); err != nil {
|
||||
logrus.WithError(err).Errorf("failed to delete builder network sandbox")
|
||||
}
|
||||
if err := os.RemoveAll(filepath.Join(iface.provider.Root, iface.sbx.ContainerID())); err != nil {
|
||||
logrus.WithError(err).Errorf("failed to delete builder sandbox directory")
|
||||
logrus.Errorf("failed to delete builder network sandbox: %v", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
@@ -3,26 +3,24 @@ package buildkit
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
|
||||
"github.com/docker/docker/daemon/config"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/libnetwork"
|
||||
"github.com/moby/buildkit/cache"
|
||||
"github.com/moby/buildkit/executor"
|
||||
"github.com/moby/buildkit/executor/oci"
|
||||
)
|
||||
|
||||
func newExecutor(_, _ string, _ libnetwork.NetworkController, _ *oci.DNSConfig, _ bool, _ *idtools.IdentityMapping, _ string) (executor.Executor, error) {
|
||||
func newExecutor(_, _ string, _ libnetwork.NetworkController, _ *oci.DNSConfig, _ bool, _ *idtools.IdentityMapping) (executor.Executor, error) {
|
||||
return &winExecutor{}, nil
|
||||
}
|
||||
|
||||
type winExecutor struct {
|
||||
}
|
||||
|
||||
func (w *winExecutor) Run(ctx context.Context, id string, root executor.Mount, mounts []executor.Mount, process executor.ProcessInfo, started chan<- struct{}) (err error) {
|
||||
return errors.New("buildkit executor not implemented for windows")
|
||||
}
|
||||
|
||||
func (w *winExecutor) Exec(ctx context.Context, id string, process executor.ProcessInfo) error {
|
||||
func (e *winExecutor) Exec(ctx context.Context, meta executor.Meta, rootfs cache.Mountable, mounts []executor.Mount, stdin io.ReadCloser, stdout, stderr io.WriteCloser) error {
|
||||
return errors.New("buildkit executor not implemented for windows")
|
||||
}
|
||||
|
||||
|
||||
@@ -77,7 +77,8 @@ func (e *imageExporterInstance) Name() string {
|
||||
return "exporting to image"
|
||||
}
|
||||
|
||||
func (e *imageExporterInstance) Export(ctx context.Context, inp exporter.Source, sessionID string) (map[string]string, error) {
|
||||
func (e *imageExporterInstance) Export(ctx context.Context, inp exporter.Source) (map[string]string, error) {
|
||||
|
||||
if len(inp.Refs) > 1 {
|
||||
return nil, fmt.Errorf("exporting multiple references to image store is currently unsupported")
|
||||
}
|
||||
@@ -129,7 +130,7 @@ func (e *imageExporterInstance) Export(ctx context.Context, inp exporter.Source,
|
||||
diffs[i] = digest.Digest(diffIDs[i])
|
||||
}
|
||||
|
||||
_ = layersDone(nil)
|
||||
layersDone(nil)
|
||||
}
|
||||
|
||||
if len(config) == 0 {
|
||||
@@ -159,7 +160,7 @@ func (e *imageExporterInstance) Export(ctx context.Context, inp exporter.Source,
|
||||
if err != nil {
|
||||
return nil, configDone(err)
|
||||
}
|
||||
_ = configDone(nil)
|
||||
configDone(nil)
|
||||
|
||||
if e.opt.ReferenceStore != nil {
|
||||
for _, targetName := range e.targetNames {
|
||||
@@ -168,7 +169,7 @@ func (e *imageExporterInstance) Export(ctx context.Context, inp exporter.Source,
|
||||
if err := e.opt.ReferenceStore.AddTag(targetName, digest.Digest(id), true); err != nil {
|
||||
return nil, tagDone(err)
|
||||
}
|
||||
_ = tagDone(nil)
|
||||
tagDone(nil)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -20,12 +20,13 @@ import (
|
||||
// )
|
||||
|
||||
func emptyImageConfig() ([]byte, error) {
|
||||
img := ocispec.Image{}
|
||||
img.Architecture = runtime.GOARCH
|
||||
img.OS = runtime.GOOS
|
||||
img := ocispec.Image{
|
||||
Architecture: runtime.GOARCH,
|
||||
OS: runtime.GOOS,
|
||||
}
|
||||
img.RootFS.Type = "layers"
|
||||
img.Config.WorkingDir = "/"
|
||||
img.Config.Env = []string{"PATH=" + system.DefaultPathEnvUnix}
|
||||
img.Config.Env = []string{"PATH=" + system.DefaultPathEnv}
|
||||
dt, err := json.Marshal(img)
|
||||
return dt, errors.Wrap(err, "failed to create empty image config")
|
||||
}
|
||||
@@ -77,9 +78,9 @@ func patchImageConfig(dt []byte, dps []digest.Digest, history []ocispec.History,
|
||||
}
|
||||
|
||||
if cache != nil {
|
||||
dt, err = json.Marshal(cache)
|
||||
dt, err := json.Marshal(cache)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to marshal cache")
|
||||
return nil, err
|
||||
}
|
||||
m["moby.buildkit.cache.v0"] = dt
|
||||
}
|
||||
@@ -203,13 +204,13 @@ func oneOffProgress(ctx context.Context, id string) func(err error) error {
|
||||
st := progress.Status{
|
||||
Started: &now,
|
||||
}
|
||||
_ = pw.Write(id, st)
|
||||
pw.Write(id, st)
|
||||
return func(err error) error {
|
||||
// TODO: set error on status
|
||||
now := time.Now()
|
||||
st.Completed = &now
|
||||
_ = pw.Write(id, st)
|
||||
_ = pw.Close()
|
||||
pw.Write(id, st)
|
||||
pw.Close()
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/layer"
|
||||
"github.com/moby/buildkit/cache"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// LayerGetter abstracts away the snapshotter
|
||||
@@ -58,7 +57,7 @@ type checker struct {
|
||||
cache map[string]bool
|
||||
}
|
||||
|
||||
func (c *checker) Exists(key string, chain []digest.Digest) bool {
|
||||
func (c *checker) Exists(key string) bool {
|
||||
if c.opt.ImageStore == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -35,14 +35,13 @@ func (h *reqBodyHandler) newRequest(rc io.ReadCloser) (string, func()) {
|
||||
h.mu.Lock()
|
||||
delete(h.requests, id)
|
||||
h.mu.Unlock()
|
||||
rc.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func (h *reqBodyHandler) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
host := req.URL.Host
|
||||
if strings.HasPrefix(host, urlPrefix) {
|
||||
if req.Method != http.MethodGet {
|
||||
if req.Method != "GET" {
|
||||
return nil, errors.Errorf("invalid request")
|
||||
}
|
||||
id := strings.TrimPrefix(host, urlPrefix)
|
||||
@@ -57,7 +56,7 @@ func (h *reqBodyHandler) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
|
||||
resp := &http.Response{
|
||||
Status: "200 OK",
|
||||
StatusCode: http.StatusOK,
|
||||
StatusCode: 200,
|
||||
Body: rc,
|
||||
ContentLength: -1,
|
||||
}
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
package worker
|
||||
@@ -12,7 +11,7 @@ func detectDefaultGCCap(root string) int64 {
|
||||
if err := syscall.Statfs(root, &st); err != nil {
|
||||
return defaultCap
|
||||
}
|
||||
diskSize := int64(st.Bsize) * int64(st.Blocks) //nolint unconvert
|
||||
diskSize := int64(st.Bsize) * int64(st.Blocks) // nolint unconvert
|
||||
avail := diskSize / 10
|
||||
return (avail/(1<<30) + 1) * 1e9 // round up
|
||||
}
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package worker
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
nethttp "net/http"
|
||||
"runtime"
|
||||
"strings"
|
||||
@@ -13,7 +14,6 @@ import (
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/containerd/containerd/rootfs"
|
||||
"github.com/docker/docker/builder/builder-next/adapters/containerimage"
|
||||
"github.com/docker/docker/distribution"
|
||||
distmetadata "github.com/docker/docker/distribution/metadata"
|
||||
"github.com/docker/docker/distribution/xfer"
|
||||
@@ -23,24 +23,21 @@ import (
|
||||
"github.com/moby/buildkit/cache"
|
||||
"github.com/moby/buildkit/cache/metadata"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/client/llb"
|
||||
"github.com/moby/buildkit/executor"
|
||||
"github.com/moby/buildkit/exporter"
|
||||
localexporter "github.com/moby/buildkit/exporter/local"
|
||||
tarexporter "github.com/moby/buildkit/exporter/tar"
|
||||
"github.com/moby/buildkit/frontend"
|
||||
gw "github.com/moby/buildkit/frontend/gateway/client"
|
||||
"github.com/moby/buildkit/session"
|
||||
"github.com/moby/buildkit/snapshot"
|
||||
"github.com/moby/buildkit/solver"
|
||||
"github.com/moby/buildkit/solver/llbsolver/mounts"
|
||||
"github.com/moby/buildkit/solver/llbsolver/ops"
|
||||
"github.com/moby/buildkit/solver/pb"
|
||||
"github.com/moby/buildkit/source"
|
||||
"github.com/moby/buildkit/source/git"
|
||||
"github.com/moby/buildkit/source/http"
|
||||
"github.com/moby/buildkit/source/local"
|
||||
"github.com/moby/buildkit/util/archutil"
|
||||
"github.com/moby/buildkit/util/compression"
|
||||
"github.com/moby/buildkit/util/contentutil"
|
||||
"github.com/moby/buildkit/util/progress"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
@@ -68,7 +65,7 @@ type Opt struct {
|
||||
Snapshotter snapshot.Snapshotter
|
||||
ContentStore content.Store
|
||||
CacheManager cache.Manager
|
||||
ImageSource *containerimage.Source
|
||||
ImageSource source.Source
|
||||
DownloadManager distribution.RootFSDownloadManager
|
||||
V2MetadataService distmetadata.V2MetadataService
|
||||
Transport nethttp.RoundTripper
|
||||
@@ -142,19 +139,7 @@ func (w *Worker) Labels() map[string]string {
|
||||
}
|
||||
|
||||
// Platforms returns one or more platforms supported by the image.
|
||||
func (w *Worker) Platforms(noCache bool) []ocispec.Platform {
|
||||
if noCache {
|
||||
pm := make(map[string]struct{}, len(w.Opt.Platforms))
|
||||
for _, p := range w.Opt.Platforms {
|
||||
pm[platforms.Format(p)] = struct{}{}
|
||||
}
|
||||
for _, p := range archutil.SupportedPlatforms(noCache) {
|
||||
if _, ok := pm[p]; !ok {
|
||||
pp, _ := platforms.Parse(p)
|
||||
w.Opt.Platforms = append(w.Opt.Platforms, pp)
|
||||
}
|
||||
}
|
||||
}
|
||||
func (w *Worker) Platforms() []ocispec.Platform {
|
||||
if len(w.Opt.Platforms) == 0 {
|
||||
return []ocispec.Platform{platforms.DefaultSpec()}
|
||||
}
|
||||
@@ -166,23 +151,13 @@ func (w *Worker) GCPolicy() []client.PruneInfo {
|
||||
return w.Opt.GCPolicy
|
||||
}
|
||||
|
||||
// ContentStore returns content store
|
||||
func (w *Worker) ContentStore() content.Store {
|
||||
return w.Opt.ContentStore
|
||||
}
|
||||
|
||||
// MetadataStore returns the metadata store
|
||||
func (w *Worker) MetadataStore() *metadata.Store {
|
||||
return w.Opt.MetadataStore
|
||||
}
|
||||
|
||||
// LoadRef loads a reference by ID
|
||||
func (w *Worker) LoadRef(ctx context.Context, id string, hidden bool) (cache.ImmutableRef, error) {
|
||||
func (w *Worker) LoadRef(id string, hidden bool) (cache.ImmutableRef, error) {
|
||||
var opts []cache.RefOption
|
||||
if hidden {
|
||||
opts = append(opts, cache.NoUpdateLastUsed)
|
||||
}
|
||||
return w.CacheManager().Get(ctx, id, opts...)
|
||||
return w.CacheManager.Get(context.TODO(), id, opts...)
|
||||
}
|
||||
|
||||
// ResolveOp converts a LLB vertex into a LLB operation
|
||||
@@ -192,9 +167,9 @@ func (w *Worker) ResolveOp(v solver.Vertex, s frontend.FrontendLLBBridge, sm *se
|
||||
case *pb.Op_Source:
|
||||
return ops.NewSourceOp(v, op, baseOp.Platform, w.SourceManager, sm, w)
|
||||
case *pb.Op_Exec:
|
||||
return ops.NewExecOp(v, op, baseOp.Platform, w.CacheManager(), sm, w.Opt.MetadataStore, w.Executor(), w)
|
||||
return ops.NewExecOp(v, op, baseOp.Platform, w.CacheManager, sm, w.MetadataStore, w.Executor, w)
|
||||
case *pb.Op_File:
|
||||
return ops.NewFileOp(v, op, w.CacheManager(), w.Opt.MetadataStore, w)
|
||||
return ops.NewFileOp(v, op, w.CacheManager, w.MetadataStore, w)
|
||||
case *pb.Op_Build:
|
||||
return ops.NewBuildOp(v, op, s, w)
|
||||
}
|
||||
@@ -203,18 +178,33 @@ func (w *Worker) ResolveOp(v solver.Vertex, s frontend.FrontendLLBBridge, sm *se
|
||||
}
|
||||
|
||||
// ResolveImageConfig returns image config for an image
|
||||
func (w *Worker) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt, sm *session.Manager, g session.Group) (digest.Digest, []byte, error) {
|
||||
return w.ImageSource.ResolveImageConfig(ctx, ref, opt, sm, g)
|
||||
func (w *Worker) ResolveImageConfig(ctx context.Context, ref string, opt gw.ResolveImageConfigOpt, sm *session.Manager) (digest.Digest, []byte, error) {
|
||||
// ImageSource is typically source/containerimage
|
||||
resolveImageConfig, ok := w.ImageSource.(resolveImageConfig)
|
||||
if !ok {
|
||||
return "", nil, errors.Errorf("worker %q does not implement ResolveImageConfig", w.ID())
|
||||
}
|
||||
return resolveImageConfig.ResolveImageConfig(ctx, ref, opt, sm)
|
||||
}
|
||||
|
||||
// Exec executes a process directly on a worker
|
||||
func (w *Worker) Exec(ctx context.Context, meta executor.Meta, rootFS cache.ImmutableRef, stdin io.ReadCloser, stdout, stderr io.WriteCloser) error {
|
||||
active, err := w.CacheManager.New(ctx, rootFS)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer active.Release(context.TODO())
|
||||
return w.Executor.Exec(ctx, meta, active, nil, stdin, stdout, stderr)
|
||||
}
|
||||
|
||||
// DiskUsage returns disk usage report
|
||||
func (w *Worker) DiskUsage(ctx context.Context, opt client.DiskUsageInfo) ([]*client.UsageInfo, error) {
|
||||
return w.CacheManager().DiskUsage(ctx, opt)
|
||||
return w.CacheManager.DiskUsage(ctx, opt)
|
||||
}
|
||||
|
||||
// Prune deletes reclaimable build cache
|
||||
func (w *Worker) Prune(ctx context.Context, ch chan client.UsageInfo, info ...client.PruneInfo) error {
|
||||
return w.CacheManager().Prune(ctx, ch, info...)
|
||||
return w.CacheManager.Prune(ctx, ch, info...)
|
||||
}
|
||||
|
||||
// Exporter returns exporter by name
|
||||
@@ -236,7 +226,7 @@ func (w *Worker) Exporter(name string, sm *session.Manager) (exporter.Exporter,
|
||||
}
|
||||
|
||||
// GetRemote returns a remote snapshot reference for a local one
|
||||
func (w *Worker) GetRemote(ctx context.Context, ref cache.ImmutableRef, createIfNeeded bool, _ compression.Type, _ session.Group) (*solver.Remote, error) {
|
||||
func (w *Worker) GetRemote(ctx context.Context, ref cache.ImmutableRef, createIfNeeded bool) (*solver.Remote, error) {
|
||||
var diffIDs []layer.DiffID
|
||||
var err error
|
||||
if !createIfNeeded {
|
||||
@@ -271,20 +261,20 @@ func (w *Worker) GetRemote(ctx context.Context, ref cache.ImmutableRef, createIf
|
||||
|
||||
// PruneCacheMounts removes the current cache snapshots for specified IDs
|
||||
func (w *Worker) PruneCacheMounts(ctx context.Context, ids []string) error {
|
||||
mu := mounts.CacheMountsLocker()
|
||||
mu := ops.CacheMountsLocker()
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
|
||||
for _, id := range ids {
|
||||
id = "cache-dir:" + id
|
||||
sis, err := w.Opt.MetadataStore.Search(id)
|
||||
sis, err := w.MetadataStore.Search(id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, si := range sis {
|
||||
for _, k := range si.Indexes() {
|
||||
if k == id || strings.HasPrefix(k, id+":") {
|
||||
if siCached := w.CacheManager().Metadata(si.ID()); siCached != nil {
|
||||
if siCached := w.CacheManager.Metadata(si.ID()); siCached != nil {
|
||||
si = siCached
|
||||
}
|
||||
if err := cache.CachePolicyDefault(si); err != nil {
|
||||
@@ -297,7 +287,7 @@ func (w *Worker) PruneCacheMounts(ctx context.Context, ids []string) error {
|
||||
return err
|
||||
}
|
||||
// if ref is unused try to clean it up right away by releasing it
|
||||
if mref, err := w.CacheManager().GetMutable(ctx, si.ID()); err == nil {
|
||||
if mref, err := w.CacheManager.GetMutable(ctx, si.ID()); err == nil {
|
||||
go mref.Release(context.TODO())
|
||||
}
|
||||
break
|
||||
@@ -306,27 +296,10 @@ func (w *Worker) PruneCacheMounts(ctx context.Context, ids []string) error {
|
||||
}
|
||||
}
|
||||
|
||||
mounts.ClearActiveCacheMounts()
|
||||
ops.ClearActiveCacheMounts()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *Worker) getRef(ctx context.Context, diffIDs []layer.DiffID, opts ...cache.RefOption) (cache.ImmutableRef, error) {
|
||||
var parent cache.ImmutableRef
|
||||
if len(diffIDs) > 1 {
|
||||
var err error
|
||||
parent, err = w.getRef(ctx, diffIDs[:len(diffIDs)-1], opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer parent.Release(context.TODO())
|
||||
}
|
||||
return w.CacheManager().GetByBlob(context.TODO(), ocispec.Descriptor{
|
||||
Annotations: map[string]string{
|
||||
"containerd.io/uncompressed": diffIDs[len(diffIDs)-1].String(),
|
||||
},
|
||||
}, parent, opts...)
|
||||
}
|
||||
|
||||
// FromRemote converts a remote snapshot reference to a local one
|
||||
func (w *Worker) FromRemote(ctx context.Context, remote *solver.Remote) (cache.ImmutableRef, error) {
|
||||
rootfs, err := getLayers(ctx, remote.Descriptors)
|
||||
@@ -349,7 +322,7 @@ func (w *Worker) FromRemote(ctx context.Context, remote *solver.Remote) (cache.I
|
||||
|
||||
defer func() {
|
||||
for _, l := range rootfs {
|
||||
w.ContentStore().Delete(context.TODO(), l.Blob.Digest)
|
||||
w.ContentStore.Delete(context.TODO(), l.Blob.Digest)
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -375,7 +348,7 @@ func (w *Worker) FromRemote(ctx context.Context, remote *solver.Remote) (cache.I
|
||||
if v, ok := remote.Descriptors[i].Annotations["buildkit/description"]; ok {
|
||||
descr = v
|
||||
}
|
||||
ref, err := w.getRef(ctx, rootFS.DiffIDs[:i+1], cache.WithDescription(descr), cache.WithCreationTime(tm))
|
||||
ref, err := w.CacheManager.GetFromSnapshotter(ctx, string(layer.CreateChainID(rootFS.DiffIDs[:i+1])), cache.WithDescription(descr), cache.WithCreationTime(tm))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -388,16 +361,6 @@ func (w *Worker) FromRemote(ctx context.Context, remote *solver.Remote) (cache.I
|
||||
return nil, errors.Errorf("unreachable")
|
||||
}
|
||||
|
||||
// Executor returns executor.Executor for running processes
|
||||
func (w *Worker) Executor() executor.Executor {
|
||||
return w.Opt.Executor
|
||||
}
|
||||
|
||||
// CacheManager returns cache.Manager for accessing local storage
|
||||
func (w *Worker) CacheManager() cache.Manager {
|
||||
return w.Opt.CacheManager
|
||||
}
|
||||
|
||||
type discardProgress struct{}
|
||||
|
||||
func (*discardProgress) WriteProgress(_ pkgprogress.Progress) error {
|
||||
@@ -428,24 +391,21 @@ func (ld *layerDescriptor) DiffID() (layer.DiffID, error) {
|
||||
|
||||
func (ld *layerDescriptor) Download(ctx context.Context, progressOutput pkgprogress.Output) (io.ReadCloser, int64, error) {
|
||||
done := oneOffProgress(ld.pctx, fmt.Sprintf("pulling %s", ld.desc.Digest))
|
||||
|
||||
// TODO should this write output to progressOutput? Or use something similar to loggerFromContext()? see https://github.com/moby/buildkit/commit/aa29e7729464f3c2a773e27795e584023c751cb8
|
||||
discardLogs := func(_ []byte) {}
|
||||
if err := contentutil.Copy(ctx, ld.w.ContentStore(), ld.provider, ld.desc, discardLogs); err != nil {
|
||||
if err := contentutil.Copy(ctx, ld.w.ContentStore, ld.provider, ld.desc); err != nil {
|
||||
return nil, 0, done(err)
|
||||
}
|
||||
_ = done(nil)
|
||||
done(nil)
|
||||
|
||||
ra, err := ld.w.ContentStore().ReaderAt(ctx, ld.desc)
|
||||
ra, err := ld.w.ContentStore.ReaderAt(ctx, ld.desc)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
return io.NopCloser(content.NewReader(ra)), ld.desc.Size, nil
|
||||
return ioutil.NopCloser(content.NewReader(ra)), ld.desc.Size, nil
|
||||
}
|
||||
|
||||
func (ld *layerDescriptor) Close() {
|
||||
// ld.is.ContentStore().Delete(context.TODO(), ld.desc.Digest)
|
||||
// ld.is.ContentStore.Delete(context.TODO(), ld.desc.Digest)
|
||||
}
|
||||
|
||||
func (ld *layerDescriptor) Registered(diffID layer.DiffID) {
|
||||
@@ -483,17 +443,21 @@ func oneOffProgress(ctx context.Context, id string) func(err error) error {
|
||||
st := progress.Status{
|
||||
Started: &now,
|
||||
}
|
||||
_ = pw.Write(id, st)
|
||||
pw.Write(id, st)
|
||||
return func(err error) error {
|
||||
// TODO: set error on status
|
||||
now := time.Now()
|
||||
st.Completed = &now
|
||||
_ = pw.Write(id, st)
|
||||
_ = pw.Close()
|
||||
pw.Write(id, st)
|
||||
pw.Close()
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
type resolveImageConfig interface {
|
||||
ResolveImageConfig(ctx context.Context, ref string, opt gw.ResolveImageConfigOpt, sm *session.Manager) (digest.Digest, []byte, error)
|
||||
}
|
||||
|
||||
type emptyProvider struct {
|
||||
}
|
||||
|
||||
|
||||
@@ -5,8 +5,8 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"gotest.tools/v3/assert"
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
"gotest.tools/assert"
|
||||
is "gotest.tools/assert/cmp"
|
||||
)
|
||||
|
||||
func strPtr(source string) *string {
|
||||
|
||||
@@ -5,14 +5,17 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/backend"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/builder"
|
||||
"github.com/docker/docker/builder/fscache"
|
||||
"github.com/docker/docker/builder/remotecontext"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
@@ -22,6 +25,7 @@ import (
|
||||
"github.com/moby/buildkit/frontend/dockerfile/instructions"
|
||||
"github.com/moby/buildkit/frontend/dockerfile/parser"
|
||||
"github.com/moby/buildkit/frontend/dockerfile/shell"
|
||||
"github.com/moby/buildkit/session"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -45,19 +49,31 @@ const (
|
||||
stepFormat = "Step %d/%d : %v"
|
||||
)
|
||||
|
||||
// SessionGetter is object used to get access to a session by uuid
|
||||
type SessionGetter interface {
|
||||
Get(ctx context.Context, uuid string) (session.Caller, error)
|
||||
}
|
||||
|
||||
// BuildManager is shared across all Builder objects
|
||||
type BuildManager struct {
|
||||
idMapping *idtools.IdentityMapping
|
||||
backend builder.Backend
|
||||
pathCache pathCache // TODO: make this persistent
|
||||
sg SessionGetter
|
||||
fsCache *fscache.FSCache
|
||||
}
|
||||
|
||||
// NewBuildManager creates a BuildManager
|
||||
func NewBuildManager(b builder.Backend, identityMapping *idtools.IdentityMapping) (*BuildManager, error) {
|
||||
func NewBuildManager(b builder.Backend, sg SessionGetter, fsCache *fscache.FSCache, identityMapping *idtools.IdentityMapping) (*BuildManager, error) {
|
||||
bm := &BuildManager{
|
||||
backend: b,
|
||||
pathCache: &syncmap.Map{},
|
||||
sg: sg,
|
||||
idMapping: identityMapping,
|
||||
fsCache: fsCache,
|
||||
}
|
||||
if err := fsCache.RegisterTransport(remotecontext.ClientSessionRemote, NewClientSessionTransport()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bm, nil
|
||||
}
|
||||
@@ -84,6 +100,12 @@ func (bm *BuildManager) Build(ctx context.Context, config backend.BuildConfig) (
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
if src, err := bm.initializeClientSession(ctx, cancel, config.Options); err != nil {
|
||||
return nil, err
|
||||
} else if src != nil {
|
||||
source = src
|
||||
}
|
||||
|
||||
builderOptions := builderOptions{
|
||||
Options: config.Options,
|
||||
ProgressWriter: config.ProgressWriter,
|
||||
@@ -98,6 +120,39 @@ func (bm *BuildManager) Build(ctx context.Context, config backend.BuildConfig) (
|
||||
return b.build(source, dockerfile)
|
||||
}
|
||||
|
||||
func (bm *BuildManager) initializeClientSession(ctx context.Context, cancel func(), options *types.ImageBuildOptions) (builder.Source, error) {
|
||||
if options.SessionID == "" || bm.sg == nil {
|
||||
return nil, nil
|
||||
}
|
||||
logrus.Debug("client is session enabled")
|
||||
|
||||
connectCtx, cancelCtx := context.WithTimeout(ctx, sessionConnectTimeout)
|
||||
defer cancelCtx()
|
||||
|
||||
c, err := bm.sg.Get(connectCtx, options.SessionID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
go func() {
|
||||
<-c.Context().Done()
|
||||
cancel()
|
||||
}()
|
||||
if options.RemoteContext == remotecontext.ClientSessionRemote {
|
||||
st := time.Now()
|
||||
csi, err := NewClientSessionSourceIdentifier(ctx, bm.sg, options.SessionID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
src, err := bm.fsCache.SyncFrom(ctx, csi)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
logrus.Debugf("sync-time: %v", time.Since(st))
|
||||
return src, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// builderOptions are the dependencies required by the builder
|
||||
type builderOptions struct {
|
||||
Options *types.ImageBuildOptions
|
||||
@@ -189,8 +244,7 @@ func (b *Builder) build(source builder.Source, dockerfile *parser.Result) (*buil
|
||||
|
||||
stages, metaArgs, err := instructions.Parse(dockerfile.AST)
|
||||
if err != nil {
|
||||
var uiErr *instructions.UnknownInstruction
|
||||
if errors.As(err, &uiErr) {
|
||||
if instructions.IsUnknownInstruction(err) {
|
||||
buildsFailed.WithValues(metricsUnknownInstructionError).Inc()
|
||||
}
|
||||
return nil, errdefs.InvalidParameter(err)
|
||||
@@ -234,10 +288,8 @@ func processMetaArg(meta instructions.ArgCommand, shlex *shell.Lex, args *BuildA
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, arg := range meta.Args {
|
||||
args.AddArg(arg.Key, arg.Value)
|
||||
args.AddMetaArg(arg.Key, arg.Value)
|
||||
}
|
||||
args.AddArg(meta.Key, meta.Value)
|
||||
args.AddMetaArg(meta.Key, meta.Value)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -256,10 +308,10 @@ func (b *Builder) dispatchDockerfileWithCancellation(parseResult []instructions.
|
||||
totalCommands += len(stage.Commands)
|
||||
}
|
||||
shlex := shell.NewLex(escapeToken)
|
||||
for i := range metaArgs {
|
||||
currentCommandIndex = printCommand(b.Stdout, currentCommandIndex, totalCommands, &metaArgs[i])
|
||||
for _, meta := range metaArgs {
|
||||
currentCommandIndex = printCommand(b.Stdout, currentCommandIndex, totalCommands, &meta)
|
||||
|
||||
err := processMetaArg(metaArgs[i], shlex, buildArgs)
|
||||
err := processMetaArg(meta, shlex, buildArgs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -267,8 +319,7 @@ func (b *Builder) dispatchDockerfileWithCancellation(parseResult []instructions.
|
||||
|
||||
stagesResults := newStagesBuildResults()
|
||||
|
||||
for _, s := range parseResult {
|
||||
stage := s
|
||||
for _, stage := range parseResult {
|
||||
if err := stagesResults.checkStageNameAvailable(stage.Name); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -348,8 +399,8 @@ func BuildFromConfig(config *container.Config, changes []string, os string) (*co
|
||||
}
|
||||
}
|
||||
|
||||
b.Stdout = io.Discard
|
||||
b.Stderr = io.Discard
|
||||
b.Stdout = ioutil.Discard
|
||||
b.Stderr = ioutil.Discard
|
||||
b.disableCommit = true
|
||||
|
||||
var commands []instructions.Command
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
package dockerfile // import "github.com/docker/docker/builder/dockerfile"
|
||||
|
||||
76
builder/dockerfile/clientsession.go
Normal file
76
builder/dockerfile/clientsession.go
Normal file
@@ -0,0 +1,76 @@
|
||||
package dockerfile // import "github.com/docker/docker/builder/dockerfile"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/builder/fscache"
|
||||
"github.com/docker/docker/builder/remotecontext"
|
||||
"github.com/moby/buildkit/session"
|
||||
"github.com/moby/buildkit/session/filesync"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const sessionConnectTimeout = 5 * time.Second
|
||||
|
||||
// ClientSessionTransport is a transport for copying files from docker client
|
||||
// to the daemon.
|
||||
type ClientSessionTransport struct{}
|
||||
|
||||
// NewClientSessionTransport returns new ClientSessionTransport instance
|
||||
func NewClientSessionTransport() *ClientSessionTransport {
|
||||
return &ClientSessionTransport{}
|
||||
}
|
||||
|
||||
// Copy data from a remote to a destination directory.
|
||||
func (cst *ClientSessionTransport) Copy(ctx context.Context, id fscache.RemoteIdentifier, dest string, cu filesync.CacheUpdater) error {
|
||||
csi, ok := id.(*ClientSessionSourceIdentifier)
|
||||
if !ok {
|
||||
return errors.New("invalid identifier for client session")
|
||||
}
|
||||
|
||||
return filesync.FSSync(ctx, csi.caller, filesync.FSSendRequestOpt{
|
||||
IncludePatterns: csi.includePatterns,
|
||||
DestDir: dest,
|
||||
CacheUpdater: cu,
|
||||
})
|
||||
}
|
||||
|
||||
// ClientSessionSourceIdentifier is an identifier that can be used for requesting
|
||||
// files from remote client
|
||||
type ClientSessionSourceIdentifier struct {
|
||||
includePatterns []string
|
||||
caller session.Caller
|
||||
uuid string
|
||||
}
|
||||
|
||||
// NewClientSessionSourceIdentifier returns new ClientSessionSourceIdentifier instance
|
||||
func NewClientSessionSourceIdentifier(ctx context.Context, sg SessionGetter, uuid string) (*ClientSessionSourceIdentifier, error) {
|
||||
csi := &ClientSessionSourceIdentifier{
|
||||
uuid: uuid,
|
||||
}
|
||||
caller, err := sg.Get(ctx, uuid)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get session for %s", uuid)
|
||||
}
|
||||
|
||||
csi.caller = caller
|
||||
return csi, nil
|
||||
}
|
||||
|
||||
// Transport returns transport identifier for remote identifier
|
||||
func (csi *ClientSessionSourceIdentifier) Transport() string {
|
||||
return remotecontext.ClientSessionRemote
|
||||
}
|
||||
|
||||
// SharedKey returns shared key for remote identifier. Shared key is used
|
||||
// for finding the base for a repeated transfer.
|
||||
func (csi *ClientSessionSourceIdentifier) SharedKey() string {
|
||||
return csi.caller.SharedKey()
|
||||
}
|
||||
|
||||
// Key returns unique key for remote identifier. Requests with same key return
|
||||
// same data.
|
||||
func (csi *ClientSessionSourceIdentifier) Key() string {
|
||||
return csi.uuid
|
||||
}
|
||||
@@ -45,7 +45,7 @@ var errCancelled = errors.New("build cancelled")
|
||||
// Run a container by ID
|
||||
func (c *containerManager) Run(ctx context.Context, cID string, stdout, stderr io.Writer) (err error) {
|
||||
attached := make(chan struct{})
|
||||
errCh := make(chan error, 1)
|
||||
errCh := make(chan error)
|
||||
go func() {
|
||||
errCh <- c.backend.ContainerAttachRaw(cID, nil, stdout, stderr, true, attached)
|
||||
}()
|
||||
|
||||
@@ -242,8 +242,6 @@ func (o *copier) calcCopyInfo(origPath string, allowWildcards bool) ([]copyInfo,
|
||||
// Deal with the single file case
|
||||
copyInfo, err := copyInfoForFile(o.source, origPath)
|
||||
switch {
|
||||
case imageSource == nil && errors.Is(err, os.ErrNotExist):
|
||||
return nil, errors.Wrapf(err, "file not found in build context or excluded by .dockerignore")
|
||||
case err != nil:
|
||||
return nil, err
|
||||
case copyInfo.hash != "":
|
||||
@@ -317,10 +315,6 @@ func (o *copier) copyWithWildcards(origPath string) ([]copyInfo, error) {
|
||||
func copyInfoForFile(source builder.Source, path string) (copyInfo, error) {
|
||||
fi, err := remotecontext.StatAt(source, path)
|
||||
if err != nil {
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
// return the relative path in the error, which is more user-friendly than the full path to the tmp-dir
|
||||
return copyInfo{}, errors.WithStack(&os.PathError{Op: "stat", Path: path, Err: os.ErrNotExist})
|
||||
}
|
||||
return copyInfo{}, err
|
||||
}
|
||||
|
||||
@@ -565,11 +559,8 @@ func copyFile(archiver Archiver, source, dest *copyEndpoint, identity *idtools.I
|
||||
// Normal containers
|
||||
if identity == nil {
|
||||
// Use system.MkdirAll here, which is a custom version of os.MkdirAll
|
||||
// modified for use on Windows to handle volume GUID paths. These paths
|
||||
// are of the form \\?\Volume{<GUID>}\<path>. An example would be:
|
||||
// \\?\Volume{dae8d3ac-b9a1-11e9-88eb-e8554b2ba1db}\bin\busybox.exe
|
||||
|
||||
if err := system.MkdirAll(filepath.Dir(dest.path), 0755); err != nil {
|
||||
// modified for use on Windows to handle volume GUID paths (\\?\{dae8d3ac-b9a1-11e9-88eb-e8554b2ba1db}\path\)
|
||||
if err := system.MkdirAll(filepath.Dir(dest.path), 0755, ""); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
@@ -597,7 +588,7 @@ func endsInSlash(driver containerfs.Driver, path string) bool {
|
||||
func isExistingDirectory(point *copyEndpoint) (bool, error) {
|
||||
destStat, err := point.driver.Stat(point.path)
|
||||
switch {
|
||||
case errors.Is(err, os.ErrNotExist):
|
||||
case os.IsNotExist(err):
|
||||
return false, nil
|
||||
case err != nil:
|
||||
return false, err
|
||||
|
||||
@@ -5,9 +5,9 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/pkg/containerfs"
|
||||
"gotest.tools/v3/assert"
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
"gotest.tools/v3/fs"
|
||||
"gotest.tools/assert"
|
||||
is "gotest.tools/assert/cmp"
|
||||
"gotest.tools/fs"
|
||||
)
|
||||
|
||||
func TestIsExistingDirectory(t *testing.T) {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user