mirror of
https://github.com/moby/moby.git
synced 2026-01-13 03:31:39 +00:00
Compare commits
268 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
200b524eff | ||
|
|
37ec11c8e3 | ||
|
|
6e2e248bdf | ||
|
|
1782e74e54 | ||
|
|
91703956df | ||
|
|
fdaec73c1c | ||
|
|
fd4670161d | ||
|
|
a36f6bfc36 | ||
|
|
6c71500862 | ||
|
|
6e84332e53 | ||
|
|
d51db2f20b | ||
|
|
c97602ef99 | ||
|
|
605bc1ffc5 | ||
|
|
1a60fdbdce | ||
|
|
ad59cba5a0 | ||
|
|
e2ddb3a2ff | ||
|
|
2f867e6b4c | ||
|
|
8c51888016 | ||
|
|
989e7f5d3a | ||
|
|
f660ef2c25 | ||
|
|
5f40e17cfd | ||
|
|
8c91e9672c | ||
|
|
613c2f27ed | ||
|
|
e4b8756784 | ||
|
|
ffeebb217c | ||
|
|
c7fca75c03 | ||
|
|
88330c9aac | ||
|
|
ba8664cc22 | ||
|
|
24c6c3eb52 | ||
|
|
0841c61862 | ||
|
|
2e4c5c57c3 | ||
|
|
db7a8cb7ba | ||
|
|
6b0ba3745d | ||
|
|
5c15222f0f | ||
|
|
f935add758 | ||
|
|
3c1fa928cb | ||
|
|
37cf1cd68e | ||
|
|
02c953cf36 | ||
|
|
9dc0488d1c | ||
|
|
278f1a130b | ||
|
|
3744b45ba8 | ||
|
|
a818442de7 | ||
|
|
19e733f89f | ||
|
|
e9ecd5e486 | ||
|
|
7b9ec00eec | ||
|
|
748f37022d | ||
|
|
1d0353548a | ||
|
|
02b07d4ede | ||
|
|
caabacdda5 | ||
|
|
d158b9e74f | ||
|
|
317e0acc4e | ||
|
|
325f6ee47a | ||
|
|
c51d247f03 | ||
|
|
eb137ff176 | ||
|
|
03dfb0ba53 | ||
|
|
a79fabbfe8 | ||
|
|
fc274cd2ff | ||
|
|
d4f336d8ef | ||
|
|
f80c6d7ae1 | ||
|
|
e042692db1 | ||
|
|
ce8b8f1cf3 | ||
|
|
24f71e3998 | ||
|
|
484a3c3ad0 | ||
|
|
6646d08782 | ||
|
|
a9ae6c7547 | ||
|
|
cc7773c787 | ||
|
|
b2185081d9 | ||
|
|
a6d4103450 | ||
|
|
b6430ba413 | ||
|
|
d161dfe1a3 | ||
|
|
8afe9f422d | ||
|
|
42b58273f6 | ||
|
|
a8572d3e8e | ||
|
|
01c732d40a | ||
|
|
3482a3b14a | ||
|
|
1ffccb515a | ||
|
|
55a4be8cf5 | ||
|
|
1043f40fb5 | ||
|
|
d21754a3fb | ||
|
|
b54b6d145c | ||
|
|
43dedf3975 | ||
|
|
a69626afb1 | ||
|
|
ad7105260f | ||
|
|
b66c7ad62e | ||
|
|
5cd4797c89 | ||
|
|
7dfd23acf1 | ||
|
|
6c633fbe18 | ||
|
|
2c64d7c858 | ||
|
|
82a4418f57 | ||
|
|
e7a4385e24 | ||
|
|
09251ef9ca | ||
|
|
00ad8e7c57 | ||
|
|
5fffdb3226 | ||
|
|
e32fc16daa | ||
|
|
9c93de59da | ||
|
|
73911117b3 | ||
|
|
8fe3b4d2ec | ||
|
|
a1f6b04a8d | ||
|
|
7a566c0e4a | ||
|
|
61a250fd23 | ||
|
|
8f18feabeb | ||
|
|
08a77f11a6 | ||
|
|
4fd103ae26 | ||
|
|
52a6fc02b1 | ||
|
|
12b8ec42b6 | ||
|
|
23122e4d52 | ||
|
|
04a6b49a89 | ||
|
|
c488cf7e95 | ||
|
|
c95cf2a5d3 | ||
|
|
9606931393 | ||
|
|
850fff5fc7 | ||
|
|
0d17f40994 | ||
|
|
34867646af | ||
|
|
0b2d88d328 | ||
|
|
27b0fee846 | ||
|
|
4cc45d91eb | ||
|
|
67c602c3fe | ||
|
|
db7f375d6a | ||
|
|
7d6ec38402 | ||
|
|
64a05e3d16 | ||
|
|
262abed3d2 | ||
|
|
e137337fe6 | ||
|
|
c9c87d76d6 | ||
|
|
a4decd0c4c | ||
|
|
25bec4665b | ||
|
|
56cc26f927 | ||
|
|
4980e48e4b | ||
|
|
299385de7f | ||
|
|
8486ea11ae | ||
|
|
5b8cee93b5 | ||
|
|
49556e0470 | ||
|
|
02fe71843e | ||
|
|
757650e8dc | ||
|
|
9e06a42123 | ||
|
|
e8eb3ca4ee | ||
|
|
673f04f0b1 | ||
|
|
65bf95f3df | ||
|
|
9fc9c3099d | ||
|
|
37cb9e7300 | ||
|
|
59be98043a | ||
|
|
f5749085e9 | ||
|
|
6236f7b8a4 | ||
|
|
9512677feb | ||
|
|
5bb36e25ba | ||
|
|
45654ed012 | ||
|
|
334099505f | ||
|
|
e1783a72d1 | ||
|
|
c27094289a | ||
|
|
0afe0309bd | ||
|
|
41f3cea42f | ||
|
|
9cf6464b63 | ||
|
|
52a3c39506 | ||
|
|
4fc9786f78 | ||
|
|
46dfcd83bf | ||
|
|
c40a7d393b | ||
|
|
fb51c760c4 | ||
|
|
66bfae52bc | ||
|
|
6ca0546f25 | ||
|
|
2822d49c10 | ||
|
|
64b0c76151 | ||
|
|
5591f0b1ee | ||
|
|
4594e70063 | ||
|
|
7236817725 | ||
|
|
78746ca9e8 | ||
|
|
5853cd510c | ||
|
|
6ee7d86a12 | ||
|
|
ae6284a623 | ||
|
|
8d624c31dd | ||
|
|
1222a7081a | ||
|
|
6f1145e740 | ||
|
|
ef87a664ef | ||
|
|
3dc9802a83 | ||
|
|
fd1fe0b702 | ||
|
|
fdaf08a57b | ||
|
|
4d0b8cc2d7 | ||
|
|
7c63f178e7 | ||
|
|
b811212ccd | ||
|
|
fa8ac94616 | ||
|
|
2199ada691 | ||
|
|
fd7611ff1f | ||
|
|
c20e8dffbb | ||
|
|
734e7a8e55 | ||
|
|
dbfc648a94 | ||
|
|
8e67dfab97 | ||
|
|
b38d454861 | ||
|
|
4b8336f7cf | ||
|
|
2697d2b687 | ||
|
|
690e097fed | ||
|
|
dc0a4db7c9 | ||
|
|
f58f842143 | ||
|
|
7184074c08 | ||
|
|
6679a5faeb | ||
|
|
90c72824c3 | ||
|
|
ad08dc12e0 | ||
|
|
7b54720ccb | ||
|
|
0922d32bce | ||
|
|
148d9f0e58 | ||
|
|
5070e418b8 | ||
|
|
054c3c2931 | ||
|
|
9406f3622d | ||
|
|
9816bfcaf5 | ||
|
|
52d6ad2a68 | ||
|
|
58e5151270 | ||
|
|
6e5ed2ccce | ||
|
|
54bd14a3fe | ||
|
|
c9ddc6effc | ||
|
|
16836e60bc | ||
|
|
e44436c31f | ||
|
|
34b3cf4b0c | ||
|
|
51618f7a83 | ||
|
|
b499acc0e8 | ||
|
|
67541d5841 | ||
|
|
989fab3c71 | ||
|
|
6bf8dfc4d8 | ||
|
|
e090646d47 | ||
|
|
b3bb2aabb8 | ||
|
|
e69efe2ef5 | ||
|
|
ccab609365 | ||
|
|
0a6866b839 | ||
|
|
cce1763d57 | ||
|
|
3d67dd0465 | ||
|
|
73e2f72a7c | ||
|
|
2926a45be6 | ||
|
|
b73fd4d936 | ||
|
|
bb2adc4496 | ||
|
|
b501aa82d5 | ||
|
|
46a703bb3b | ||
|
|
ff9340ca2c | ||
|
|
90a90ae2e1 | ||
|
|
66ed41aec8 | ||
|
|
ea2e2c5427 | ||
|
|
a5d731edec | ||
|
|
fc576226b2 | ||
|
|
c24fd7a2c3 | ||
|
|
5fb0a7ced7 | ||
|
|
2c26eac566 | ||
|
|
5badfb40eb | ||
|
|
f43fc6650c | ||
|
|
85361af1f7 | ||
|
|
ee40a9ebcd | ||
|
|
e8620110fc | ||
|
|
e988001872 | ||
|
|
6531bac59b | ||
|
|
2a82480df9 | ||
|
|
84a5b528ae | ||
|
|
511741735e | ||
|
|
2b8bc86679 | ||
|
|
4e2dbfa1af | ||
|
|
3a3bfcbf47 | ||
|
|
7be43586af | ||
|
|
d7085abec2 | ||
|
|
fc1d808c44 | ||
|
|
7485ef7e46 | ||
|
|
d2ecc7bad1 | ||
|
|
f121eccf29 | ||
|
|
00a9cf39ed | ||
|
|
c2d0053207 | ||
|
|
4c35d81147 | ||
|
|
28150fc70c | ||
|
|
d2c3163642 | ||
|
|
3153708f13 | ||
|
|
2f94f10342 | ||
|
|
b8a4fe5f8f | ||
|
|
648704522b | ||
|
|
4032b6778d | ||
|
|
5fa80da2d3 | ||
|
|
be371291bc | ||
|
|
1d531ff64f |
@@ -7,9 +7,7 @@ curators:
|
||||
- ehazlett
|
||||
- fntlnz
|
||||
- gianarb
|
||||
- kolyshkin
|
||||
- mgoelzer
|
||||
- olljanat
|
||||
- programmerq
|
||||
- rheinwein
|
||||
- ripcurld0
|
||||
@@ -17,5 +15,3 @@ curators:
|
||||
|
||||
features:
|
||||
- comments
|
||||
- pr_description_required
|
||||
|
||||
|
||||
@@ -3,4 +3,5 @@ bundles
|
||||
vendor/pkg
|
||||
.go-pkg-cache
|
||||
.git
|
||||
hack/integration-cli-on-swarm/integration-cli-on-swarm
|
||||
|
||||
|
||||
4
.github/CODEOWNERS
vendored
4
.github/CODEOWNERS
vendored
@@ -4,6 +4,7 @@
|
||||
# KEEP THIS FILE SORTED. Order is important. Last match takes precedence.
|
||||
|
||||
builder/** @tonistiigi
|
||||
client/** @dnephin
|
||||
contrib/mkimage/** @tianon
|
||||
daemon/graphdriver/devmapper/** @rhvgoyal
|
||||
daemon/graphdriver/lcow/** @johnstep @jhowardmsft
|
||||
@@ -12,5 +13,8 @@ daemon/graphdriver/overlay2/** @dmcgowan
|
||||
daemon/graphdriver/windows/** @johnstep @jhowardmsft
|
||||
daemon/logger/awslogs/** @samuelkarp
|
||||
hack/** @tianon
|
||||
hack/integration-cli-on-swarm/** @AkihiroSuda
|
||||
integration-cli/** @vdemeester
|
||||
integration/** @vdemeester
|
||||
plugin/** @cpuguy83
|
||||
project/** @thaJeztah
|
||||
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -3,7 +3,6 @@
|
||||
# please consider a global .gitignore https://help.github.com/articles/ignoring-files
|
||||
*.exe
|
||||
*.exe~
|
||||
*.gz
|
||||
*.orig
|
||||
test.main
|
||||
.*.swp
|
||||
@@ -20,6 +19,6 @@ contrib/builder/rpm/*/changelog
|
||||
dockerversion/version_autogen.go
|
||||
dockerversion/version_autogen_unix.go
|
||||
vendor/pkg/
|
||||
go-test-report.json
|
||||
hack/integration-cli-on-swarm/integration-cli-on-swarm
|
||||
coverage.txt
|
||||
profile.out
|
||||
junit-report.xml
|
||||
|
||||
70
.mailmap
70
.mailmap
@@ -17,9 +17,7 @@ AJ Bowen <aj@soulshake.net>
|
||||
AJ Bowen <aj@soulshake.net> <aj@gandi.net>
|
||||
AJ Bowen <aj@soulshake.net> <amy@gandi.net>
|
||||
Akihiro Matsushima <amatsusbit@gmail.com> <amatsus@users.noreply.github.com>
|
||||
Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp>
|
||||
Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp> <suda.kyoto@gmail.com>
|
||||
Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp> <suda.akihiro@lab.ntt.co.jp>
|
||||
Akihiro Suda <suda.akihiro@lab.ntt.co.jp> <suda.kyoto@gmail.com>
|
||||
Aleksa Sarai <asarai@suse.de>
|
||||
Aleksa Sarai <asarai@suse.de> <asarai@suse.com>
|
||||
Aleksa Sarai <asarai@suse.de> <cyphar@cyphar.com>
|
||||
@@ -37,8 +35,6 @@ Allen Sun <allensun.shl@alibaba-inc.com> <allen.sun@daocloud.io>
|
||||
Allen Sun <allensun.shl@alibaba-inc.com> <shlallen1990@gmail.com>
|
||||
Andrew Weiss <andrew.weiss@docker.com> <andrew.weiss@microsoft.com>
|
||||
Andrew Weiss <andrew.weiss@docker.com> <andrew.weiss@outlook.com>
|
||||
Andrey Kolomentsev <andrey.kolomentsev@docker.com>
|
||||
Andrey Kolomentsev <andrey.kolomentsev@docker.com> <andrey.kolomentsev@gmail.com>
|
||||
André Martins <aanm90@gmail.com> <martins@noironetworks.com>
|
||||
Andy Rothfusz <github@developersupport.net> <github@metaliveblog.com>
|
||||
Andy Smith <github@anarkystic.com>
|
||||
@@ -59,11 +55,9 @@ Ben Bonnefoy <frenchben@docker.com>
|
||||
Ben Golub <ben.golub@dotcloud.com>
|
||||
Ben Toews <mastahyeti@gmail.com> <mastahyeti@users.noreply.github.com>
|
||||
Benoit Chesneau <bchesneau@gmail.com>
|
||||
Bevisy Zhang <binbin36520@gmail.com>
|
||||
Bhiraj Butala <abhiraj.butala@gmail.com>
|
||||
Bhumika Bayani <bhumikabayani@gmail.com>
|
||||
Bilal Amarni <bilal.amarni@gmail.com> <bamarni@users.noreply.github.com>
|
||||
Bily Zhang <xcoder@tenxcloud.com>
|
||||
Bill Wang <ozbillwang@gmail.com> <SydOps@users.noreply.github.com>
|
||||
Bin Liu <liubin0329@gmail.com>
|
||||
Bin Liu <liubin0329@gmail.com> <liubin0329@users.noreply.github.com>
|
||||
@@ -83,7 +77,6 @@ Chen Chuanliang <chen.chuanliang@zte.com.cn>
|
||||
Chen Mingjie <chenmingjie0828@163.com>
|
||||
Chen Qiu <cheney-90@hotmail.com>
|
||||
Chen Qiu <cheney-90@hotmail.com> <21321229@zju.edu.cn>
|
||||
Chengfei Shang <cfshang@alauda.io>
|
||||
Chris Dias <cdias@microsoft.com>
|
||||
Chris McKinnel <chris.mckinnel@tangentlabs.co.uk>
|
||||
Christopher Biscardi <biscarch@sketcht.com>
|
||||
@@ -104,7 +97,6 @@ Daniel Garcia <daniel@danielgarcia.info>
|
||||
Daniel Gasienica <daniel@gasienica.ch> <dgasienica@zynga.com>
|
||||
Daniel Goosen <daniel.goosen@surveysampling.com> <djgoosen@users.noreply.github.com>
|
||||
Daniel Grunwell <mwgrunny@gmail.com>
|
||||
Daniel Hiltgen <daniel.hiltgen@docker.com> <dhiltgen@users.noreply.github.com>
|
||||
Daniel J Walsh <dwalsh@redhat.com>
|
||||
Daniel Mizyrycki <daniel.mizyrycki@dotcloud.com> <daniel@dotcloud.com>
|
||||
Daniel Mizyrycki <daniel.mizyrycki@dotcloud.com> <mzdaniel@glidelink.net>
|
||||
@@ -112,7 +104,6 @@ Daniel Mizyrycki <daniel.mizyrycki@dotcloud.com> <root@vagrant-ubuntu-12.10.vagr
|
||||
Daniel Nephin <dnephin@docker.com> <dnephin@gmail.com>
|
||||
Daniel Norberg <dano@spotify.com> <daniel.norberg@gmail.com>
|
||||
Daniel Watkins <daniel@daniel-watkins.co.uk>
|
||||
Daniel Zhang <jmzwcn@gmail.com>
|
||||
Danny Yates <danny@codeaholics.org> <Danny.Yates@mailonline.co.uk>
|
||||
Darren Shepherd <darren.s.shepherd@gmail.com> <darren@rancher.com>
|
||||
Dattatraya Kumbhar <dattatraya.kumbhar@gslab.com>
|
||||
@@ -127,8 +118,6 @@ Deshi Xiao <dxiao@redhat.com> <dsxiao@dataman-inc.com>
|
||||
Deshi Xiao <dxiao@redhat.com> <xiaods@gmail.com>
|
||||
Diego Siqueira <dieg0@live.com>
|
||||
Diogo Monica <diogo@docker.com> <diogo.monica@gmail.com>
|
||||
Dmitry Sharshakov <d3dx12.xx@gmail.com>
|
||||
Dmitry Sharshakov <d3dx12.xx@gmail.com> <sh7dm@outlook.com>
|
||||
Dominik Honnef <dominik@honnef.co> <dominikh@fork-bomb.org>
|
||||
Doug Davis <dug@us.ibm.com> <duglin@users.noreply.github.com>
|
||||
Doug Tangren <d.tangren@gmail.com>
|
||||
@@ -157,15 +146,12 @@ Fengtu Wang <wangfengtu@huawei.com> <wangfengtu@huawei.com>
|
||||
Francisco Carriedo <fcarriedo@gmail.com>
|
||||
Frank Rosquin <frank.rosquin+github@gmail.com> <frank.rosquin@gmail.com>
|
||||
Frederick F. Kautz IV <fkautz@redhat.com> <fkautz@alumni.cmu.edu>
|
||||
Fu JinLin <withlin@yeah.net>
|
||||
Gabriel Nicolas Avellaneda <avellaneda.gabriel@gmail.com>
|
||||
Gaetan de Villele <gdevillele@gmail.com>
|
||||
Gang Qiao <qiaohai8866@gmail.com> <1373319223@qq.com>
|
||||
Geon Kim <geon0250@gmail.com>
|
||||
George Kontridze <george@bugsnag.com>
|
||||
Gerwim Feiken <g.feiken@tfe.nl> <gerwim@gmail.com>
|
||||
Giampaolo Mancini <giampaolo@trampolineup.com>
|
||||
Giovan Isa Musthofa <giovanism@outlook.co.id>
|
||||
Gopikannan Venugopalsamy <gopikannan.venugopalsamy@gmail.com>
|
||||
Gou Rao <gou@portworx.com> <gourao@users.noreply.github.com>
|
||||
Greg Stephens <greg@udon.org>
|
||||
@@ -189,7 +175,6 @@ Harry Zhang <harryz@hyper.sh> <resouer@gmail.com>
|
||||
Harry Zhang <resouer@163.com>
|
||||
Harshal Patil <harshal.patil@in.ibm.com> <harche@users.noreply.github.com>
|
||||
Helen Xie <chenjg@harmonycloud.cn>
|
||||
Hiroyuki Sasagawa <hs19870702@gmail.com>
|
||||
Hollie Teal <hollie@docker.com>
|
||||
Hollie Teal <hollie@docker.com> <hollie.teal@docker.com>
|
||||
Hollie Teal <hollie@docker.com> <hollietealok@users.noreply.github.com>
|
||||
@@ -198,32 +183,26 @@ Huu Nguyen <huu@prismskylabs.com> <whoshuu@gmail.com>
|
||||
Hyzhou Zhy <hyzhou.zhy@alibaba-inc.com>
|
||||
Hyzhou Zhy <hyzhou.zhy@alibaba-inc.com> <1187766782@qq.com>
|
||||
Ilya Khlopotov <ilya.khlopotov@gmail.com>
|
||||
Iskander Sharipov <quasilyte@gmail.com>
|
||||
Ivan Markin <sw@nogoegst.net> <twim@riseup.net>
|
||||
Jack Laxson <jackjrabbit@gmail.com>
|
||||
Jacob Atzen <jacob@jacobatzen.dk> <jatzen@gmail.com>
|
||||
Jacob Tomlinson <jacob@tom.linson.uk> <jacobtomlinson@users.noreply.github.com>
|
||||
Jaivish Kothari <janonymous.codevulture@gmail.com>
|
||||
Jamie Hannaford <jamie@limetree.org> <jamie.hannaford@rackspace.com>
|
||||
Jean Rouge <rougej+github@gmail.com> <jer329@cornell.edu>
|
||||
Jean-Baptiste Barth <jeanbaptiste.barth@gmail.com>
|
||||
Jean-Baptiste Dalido <jeanbaptiste@appgratis.com>
|
||||
Jean-Tiare Le Bigot <jt@yadutaf.fr> <admin@jtlebi.fr>
|
||||
Jeff Anderson <jeff@docker.com> <jefferya@programmerq.net>
|
||||
Jeff Nickoloff <jeff.nickoloff@gmail.com> <jeff@allingeek.com>
|
||||
Jeroen Franse <jeroenfranse@gmail.com>
|
||||
Jessica Frazelle <acidburn@microsoft.com>
|
||||
Jessica Frazelle <acidburn@microsoft.com> <acidburn@docker.com>
|
||||
Jessica Frazelle <acidburn@microsoft.com> <acidburn@google.com>
|
||||
Jessica Frazelle <acidburn@microsoft.com> <jess@docker.com>
|
||||
Jessica Frazelle <acidburn@microsoft.com> <jess@mesosphere.com>
|
||||
Jessica Frazelle <acidburn@microsoft.com> <jessfraz@google.com>
|
||||
Jessica Frazelle <acidburn@microsoft.com> <jfrazelle@users.noreply.github.com>
|
||||
Jessica Frazelle <acidburn@microsoft.com> <me@jessfraz.com>
|
||||
Jessica Frazelle <acidburn@microsoft.com> <princess@docker.com>
|
||||
Jian Liao <jliao@alauda.io>
|
||||
Jiang Jinyang <jjyruby@gmail.com>
|
||||
Jiang Jinyang <jjyruby@gmail.com> <jiangjinyang@outlook.com>
|
||||
Jessica Frazelle <jessfraz@google.com>
|
||||
Jessica Frazelle <jessfraz@google.com> <acidburn@docker.com>
|
||||
Jessica Frazelle <jessfraz@google.com> <acidburn@google.com>
|
||||
Jessica Frazelle <jessfraz@google.com> <jess@docker.com>
|
||||
Jessica Frazelle <jessfraz@google.com> <jess@mesosphere.com>
|
||||
Jessica Frazelle <jessfraz@google.com> <jfrazelle@users.noreply.github.com>
|
||||
Jessica Frazelle <jessfraz@google.com> <me@jessfraz.com>
|
||||
Jessica Frazelle <jessfraz@google.com> <princess@docker.com>
|
||||
Jim Galasyn <jim.galasyn@docker.com>
|
||||
Jiuyue Ma <majiuyue@huawei.com>
|
||||
Joey Geiger <jgeiger@gmail.com>
|
||||
@@ -244,9 +223,7 @@ Jon Surrell <jon.surrell@gmail.com> <jon.surrell@automattic.com>
|
||||
Jordan Arentsen <blissdev@gmail.com>
|
||||
Jordan Jennings <jjn2009@gmail.com> <jjn2009@users.noreply.github.com>
|
||||
Jorit Kleine-Möllhoff <joppich@bricknet.de> <joppich@users.noreply.github.com>
|
||||
Jose Diaz-Gonzalez <email@josediazgonzalez.com>
|
||||
Jose Diaz-Gonzalez <email@josediazgonzalez.com> <jose@seatgeek.com>
|
||||
Jose Diaz-Gonzalez <email@josediazgonzalez.com> <josegonzalez@users.noreply.github.com>
|
||||
Jose Diaz-Gonzalez <jose@seatgeek.com> <josegonzalez@users.noreply.github.com>
|
||||
Josh Bonczkowski <josh.bonczkowski@gmail.com>
|
||||
Josh Eveleth <joshe@opendns.com> <jeveleth@users.noreply.github.com>
|
||||
Josh Hawn <josh.hawn@docker.com> <jlhawn@berkeley.edu>
|
||||
@@ -260,7 +237,6 @@ Justin Cormack <justin.cormack@docker.com>
|
||||
Justin Cormack <justin.cormack@docker.com> <justin.cormack@unikernel.com>
|
||||
Justin Cormack <justin.cormack@docker.com> <justin@specialbusservice.com>
|
||||
Justin Simonelis <justin.p.simonelis@gmail.com> <justin.simonelis@PTS-JSIMON2.toronto.exclamation.com>
|
||||
Justin Terry <juterry@microsoft.com>
|
||||
Jérôme Petazzoni <jerome.petazzoni@docker.com> <jerome.petazzoni@dotcloud.com>
|
||||
Jérôme Petazzoni <jerome.petazzoni@docker.com> <jerome.petazzoni@gmail.com>
|
||||
Jérôme Petazzoni <jerome.petazzoni@docker.com> <jp@enix.org>
|
||||
@@ -269,11 +245,8 @@ Kai Qiang Wu (Kennan) <wkq5325@gmail.com>
|
||||
Kai Qiang Wu (Kennan) <wkq5325@gmail.com> <wkqwu@cn.ibm.com>
|
||||
Kamil Domański <kamil@domanski.co>
|
||||
Kamjar Gerami <kami.gerami@gmail.com>
|
||||
Karthik Nayak <karthik.188@gmail.com>
|
||||
Karthik Nayak <karthik.188@gmail.com> <Karthik.188@gmail.com>
|
||||
Ken Cochrane <kencochrane@gmail.com> <KenCochrane@gmail.com>
|
||||
Ken Herner <kherner@progress.com> <chosenken@gmail.com>
|
||||
Ken Reese <krrgithub@gmail.com>
|
||||
Kenfe-Mickaël Laventure <mickael.laventure@gmail.com>
|
||||
Kevin Feyrer <kevin.feyrer@btinternet.com> <kevinfeyrer@users.noreply.github.com>
|
||||
Kevin Kern <kaiwentan@harmonycloud.cn>
|
||||
@@ -287,7 +260,6 @@ Konstantin Pelykh <kpelykh@zettaset.com>
|
||||
Kotaro Yoshimatsu <kotaro.yoshimatsu@gmail.com>
|
||||
Kunal Kushwaha <kushwaha_kunal_v7@lab.ntt.co.jp> <kunal.kushwaha@gmail.com>
|
||||
Lajos Papp <lajos.papp@sequenceiq.com> <lalyos@yahoo.com>
|
||||
Lei Gong <lgong@alauda.io>
|
||||
Lei Jitang <leijitang@huawei.com>
|
||||
Lei Jitang <leijitang@huawei.com> <leijitang@gmail.com>
|
||||
Liang Mingqiang <mqliang.zju@gmail.com>
|
||||
@@ -296,8 +268,7 @@ Liao Qingwei <liaoqingwei@huawei.com>
|
||||
Linus Heckemann <lheckemann@twig-world.com>
|
||||
Linus Heckemann <lheckemann@twig-world.com> <anonymouse2048@gmail.com>
|
||||
Lokesh Mandvekar <lsm5@fedoraproject.org> <lsm5@redhat.com>
|
||||
Lorenzo Fontana <fontanalorenz@gmail.com> <fontanalorenzo@me.com>
|
||||
Lorenzo Fontana <fontanalorenz@gmail.com> <lo@linux.com>
|
||||
Lorenzo Fontana <lo@linux.com> <fontanalorenzo@me.com>
|
||||
Louis Opter <kalessin@kalessin.fr>
|
||||
Louis Opter <kalessin@kalessin.fr> <louis@dotcloud.com>
|
||||
Luca Favatella <luca.favatella@erlang-solutions.com> <lucafavatella@users.noreply.github.com>
|
||||
@@ -333,8 +304,6 @@ Matthew Mosesohn <raytrac3r@gmail.com>
|
||||
Matthew Mueller <mattmuelle@gmail.com>
|
||||
Matthias Kühnle <git.nivoc@neverbox.com> <kuehnle@online.de>
|
||||
Mauricio Garavaglia <mauricio@medallia.com> <mauriciogaravaglia@gmail.com>
|
||||
Maxwell <csuhp007@gmail.com>
|
||||
Maxwell <csuhp007@gmail.com> <csuhqg@foxmail.com>
|
||||
Michael Crosby <michael@docker.com> <crosby.michael@gmail.com>
|
||||
Michael Crosby <michael@docker.com> <crosbymichael@gmail.com>
|
||||
Michael Crosby <michael@docker.com> <michael@crosbymichael.com>
|
||||
@@ -346,8 +315,6 @@ Michael Nussbaum <michael.nussbaum@getbraintree.com>
|
||||
Michael Nussbaum <michael.nussbaum@getbraintree.com> <code@getbraintree.com>
|
||||
Michael Spetsiotis <michael_spets@hotmail.com>
|
||||
Michal Minář <miminar@redhat.com>
|
||||
Michiel de Jong <michiel@unhosted.org>
|
||||
Mickaël Fortunato <morsi.morsicus@gmail.com>
|
||||
Miguel Angel Alvarez Cabrerizo <doncicuto@gmail.com> <30386061+doncicuto@users.noreply.github.com>
|
||||
Miguel Angel Fernández <elmendalerenda@gmail.com>
|
||||
Mihai Borobocea <MihaiBorob@gmail.com> <MihaiBorobocea@gmail.com>
|
||||
@@ -360,7 +327,6 @@ Moorthy RS <rsmoorthy@gmail.com> <rsmoorthy@users.noreply.github.com>
|
||||
Moysés Borges <moysesb@gmail.com>
|
||||
Moysés Borges <moysesb@gmail.com> <moyses.furtado@wplex.com.br>
|
||||
Nace Oroz <orkica@gmail.com>
|
||||
Natasha Jarus <linuxmercedes@gmail.com>
|
||||
Nathan LeClaire <nathan.leclaire@docker.com> <nathan.leclaire@gmail.com>
|
||||
Nathan LeClaire <nathan.leclaire@docker.com> <nathanleclaire@gmail.com>
|
||||
Neil Horman <nhorman@tuxdriver.com> <nhorman@hmswarspite.think-freely.org>
|
||||
@@ -372,9 +338,6 @@ Nolan Darilek <nolan@thewordnerd.info>
|
||||
O.S. Tezer <ostezer@gmail.com>
|
||||
O.S. Tezer <ostezer@gmail.com> <ostezer@users.noreply.github.com>
|
||||
Oh Jinkyun <tintypemolly@gmail.com> <tintypemolly@Ohui-MacBook-Pro.local>
|
||||
Oliver Reason <oli@overrateddev.co>
|
||||
Olli Janatuinen <olli.janatuinen@gmail.com>
|
||||
Olli Janatuinen <olli.janatuinen@gmail.com> <olljanat@users.noreply.github.com>
|
||||
Ouyang Liduo <oyld0210@163.com>
|
||||
Patrick Stapleton <github@gdi2290.com>
|
||||
Paul Liljenberg <liljenberg.paul@gmail.com> <letters@paulnotcom.se>
|
||||
@@ -396,10 +359,7 @@ Robert Terhaar <rterhaar@atlanticdynamic.com> <robbyt@users.noreply.github.com>
|
||||
Roberto G. Hashioka <roberto.hashioka@docker.com> <roberto_hashioka@hotmail.com>
|
||||
Roberto Muñoz Fernández <robertomf@gmail.com> <roberto.munoz.fernandez.contractor@bbva.com>
|
||||
Roman Dudin <katrmr@gmail.com> <decadent@users.noreply.github.com>
|
||||
Rong Zhang <rongzhang@alauda.io>
|
||||
Rongxiang Song <tinysong1226@gmail.com>
|
||||
Ross Boucher <rboucher@gmail.com>
|
||||
Rui Cao <ruicao@alauda.io>
|
||||
Runshen Zhu <runshen.zhu@gmail.com>
|
||||
Ryan Stelly <ryan.stelly@live.com>
|
||||
Sakeven Jiang <jc5930@sina.cn>
|
||||
@@ -472,7 +432,6 @@ Tõnis Tiigi <tonistiigi@gmail.com>
|
||||
Trishna Guha <trishnaguha17@gmail.com>
|
||||
Tristan Carel <tristan@cogniteev.com>
|
||||
Tristan Carel <tristan@cogniteev.com> <tristan.carel@gmail.com>
|
||||
Tyler Brown <tylers.pile@gmail.com>
|
||||
Umesh Yadav <umesh4257@gmail.com>
|
||||
Umesh Yadav <umesh4257@gmail.com> <dungeonmaster18@users.noreply.github.com>
|
||||
Victor Lyuboslavsky <victor@victoreda.com>
|
||||
@@ -505,12 +464,8 @@ Wei Wu <wuwei4455@gmail.com> cizixs <cizixs@163.com>
|
||||
Wenjun Tang <tangwj2@lenovo.com> <dodia@163.com>
|
||||
Wewang Xiaorenfine <wang.xiaoren@zte.com.cn>
|
||||
Will Weaver <monkey@buildingbananas.com>
|
||||
Xian Chaobo <xianchaobo@huawei.com>
|
||||
Xian Chaobo <xianchaobo@huawei.com> <jimmyxian2004@yahoo.com.cn>
|
||||
Xianglin Gao <xlgao@zju.edu.cn>
|
||||
Xianlu Bird <xianlubird@gmail.com>
|
||||
Xiao YongBiao <xyb4638@gmail.com>
|
||||
Xiaodong Zhang <a4012017@sina.com>
|
||||
Xiaoyu Zhang <zhang.xiaoyu33@zte.com.cn>
|
||||
Xuecong Liao <satorulogic@gmail.com>
|
||||
Yamasaki Masahide <masahide.y@gmail.com>
|
||||
@@ -522,18 +477,15 @@ Yi EungJun <eungjun.yi@navercorp.com> <semtlenori@gmail.com>
|
||||
Ying Li <ying.li@docker.com>
|
||||
Ying Li <ying.li@docker.com> <cyli@twistedmatrix.com>
|
||||
Yong Tang <yong.tang.github@outlook.com> <yongtang@users.noreply.github.com>
|
||||
Yongxin Li <yxli@alauda.io>
|
||||
Yosef Fertel <yfertel@gmail.com> <frosforever@users.noreply.github.com>
|
||||
Yu Changchun <yuchangchun1@huawei.com>
|
||||
Yu Chengxia <yuchengxia@huawei.com>
|
||||
Yu Peng <yu.peng36@zte.com.cn>
|
||||
Yu Peng <yu.peng36@zte.com.cn> <yupeng36@zte.com.cn>
|
||||
Yue Zhang <zy675793960@yeah.net>
|
||||
Zachary Jaffee <zjaffee@us.ibm.com> <zij@case.edu>
|
||||
Zachary Jaffee <zjaffee@us.ibm.com> <zjaffee@apache.org>
|
||||
ZhangHang <stevezhang2014@gmail.com>
|
||||
Zhenkun Bi <bi.zhenkun@zte.com.cn>
|
||||
Zhoulin Xie <zhoulin.xie@daocloud.io>
|
||||
Zhou Hao <zhouhao@cn.fujitsu.com>
|
||||
Zhu Kunjia <zhu.kunjia@zte.com.cn>
|
||||
Zou Yu <zouyu7@huawei.com>
|
||||
|
||||
120
AUTHORS
120
AUTHORS
@@ -44,7 +44,7 @@ Ajey Charantimath <ajey.charantimath@gmail.com>
|
||||
ajneu <ajneu@users.noreply.github.com>
|
||||
Akash Gupta <akagup@microsoft.com>
|
||||
Akihiro Matsushima <amatsusbit@gmail.com>
|
||||
Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp>
|
||||
Akihiro Suda <suda.akihiro@lab.ntt.co.jp>
|
||||
Akim Demaille <akim.demaille@docker.com>
|
||||
Akira Koyasu <mail@akirakoyasu.net>
|
||||
Akshay Karle <akshay.a.karle@gmail.com>
|
||||
@@ -81,7 +81,6 @@ Alexandre Garnier <zigarn@gmail.com>
|
||||
Alexandre González <agonzalezro@gmail.com>
|
||||
Alexandre Jomin <alexandrejomin@gmail.com>
|
||||
Alexandru Sfirlogea <alexandru.sfirlogea@gmail.com>
|
||||
Alexei Margasov <alexei38@yandex.ru>
|
||||
Alexey Guskov <lexag@mail.ru>
|
||||
Alexey Kotlyarov <alexey@infoxchange.net.au>
|
||||
Alexey Shamrin <shamrin@gmail.com>
|
||||
@@ -119,7 +118,6 @@ Andreas Köhler <andi5.py@gmx.net>
|
||||
Andreas Savvides <andreas@editd.com>
|
||||
Andreas Tiefenthaler <at@an-ti.eu>
|
||||
Andrei Gherzan <andrei@resin.io>
|
||||
Andrei Vagin <avagin@gmail.com>
|
||||
Andrew C. Bodine <acbodine@us.ibm.com>
|
||||
Andrew Clay Shafer <andrewcshafer@gmail.com>
|
||||
Andrew Duckworth <grillopress@gmail.com>
|
||||
@@ -139,7 +137,6 @@ Andrew Po <absourd.noise@gmail.com>
|
||||
Andrew Weiss <andrew.weiss@docker.com>
|
||||
Andrew Williams <williams.andrew@gmail.com>
|
||||
Andrews Medina <andrewsmedina@gmail.com>
|
||||
Andrey Kolomentsev <andrey.kolomentsev@docker.com>
|
||||
Andrey Petrov <andrey.petrov@shazow.net>
|
||||
Andrey Stolbovsky <andrey.stolbovsky@gmail.com>
|
||||
André Martins <aanm90@gmail.com>
|
||||
@@ -154,7 +151,6 @@ Andy Wilson <wilson.andrew.j+github@gmail.com>
|
||||
Anes Hasicic <anes.hasicic@gmail.com>
|
||||
Anil Belur <askb23@gmail.com>
|
||||
Anil Madhavapeddy <anil@recoil.org>
|
||||
Ankit Jain <ajatkj@yahoo.co.in>
|
||||
Ankush Agarwal <ankushagarwal11@gmail.com>
|
||||
Anonmily <michelle@michelleliu.io>
|
||||
Anran Qiao <anran.qiao@daocloud.io>
|
||||
@@ -186,7 +182,6 @@ Asad Saeeduddin <masaeedu@gmail.com>
|
||||
Asbjørn Enge <asbjorn@hanafjedle.net>
|
||||
averagehuman <averagehuman@users.noreply.github.com>
|
||||
Avi Das <andas222@gmail.com>
|
||||
Avi Kivity <avi@scylladb.com>
|
||||
Avi Miller <avi.miller@oracle.com>
|
||||
Avi Vaid <avaid1996@gmail.com>
|
||||
ayoshitake <airandfingers@gmail.com>
|
||||
@@ -200,27 +195,23 @@ bdevloed <boris.de.vloed@gmail.com>
|
||||
Ben Bonnefoy <frenchben@docker.com>
|
||||
Ben Firshman <ben@firshman.co.uk>
|
||||
Ben Golub <ben.golub@dotcloud.com>
|
||||
Ben Gould <ben@bengould.co.uk>
|
||||
Ben Hall <ben@benhall.me.uk>
|
||||
Ben Sargent <ben@brokendigits.com>
|
||||
Ben Severson <BenSeverson@users.noreply.github.com>
|
||||
Ben Toews <mastahyeti@gmail.com>
|
||||
Ben Wiklund <ben@daisyowl.com>
|
||||
Benjamin Atkin <ben@benatkin.com>
|
||||
Benjamin Baker <Benjamin.baker@utexas.edu>
|
||||
Benjamin Boudreau <boudreau.benjamin@gmail.com>
|
||||
Benjamin Yolken <yolken@stripe.com>
|
||||
Benoit Chesneau <bchesneau@gmail.com>
|
||||
Bernerd Schaefer <bj.schaefer@gmail.com>
|
||||
Bernhard M. Wiedemann <bwiedemann@suse.de>
|
||||
Bert Goethals <bert@bertg.be>
|
||||
Bevisy Zhang <binbin36520@gmail.com>
|
||||
Bharath Thiruveedula <bharath_ves@hotmail.com>
|
||||
Bhiraj Butala <abhiraj.butala@gmail.com>
|
||||
Bhumika Bayani <bhumikabayani@gmail.com>
|
||||
Bilal Amarni <bilal.amarni@gmail.com>
|
||||
Bill Wang <ozbillwang@gmail.com>
|
||||
Bily Zhang <xcoder@tenxcloud.com>
|
||||
Bin Liu <liubin0329@gmail.com>
|
||||
Bingshen Wang <bingshen.wbs@alibaba-inc.com>
|
||||
Blake Geno <blakegeno@gmail.com>
|
||||
@@ -255,7 +246,6 @@ Brian Torres-Gil <brian@dralth.com>
|
||||
Brian Trump <btrump@yelp.com>
|
||||
Brice Jaglin <bjaglin@teads.tv>
|
||||
Briehan Lombaard <briehan.lombaard@gmail.com>
|
||||
Brielle Broder <bbroder@google.com>
|
||||
Bruno Bigras <bigras.bruno@gmail.com>
|
||||
Bruno Binet <bruno.binet@gmail.com>
|
||||
Bruno Gazzera <bgazzera@paginar.com>
|
||||
@@ -310,7 +300,6 @@ Chen Min <chenmin46@huawei.com>
|
||||
Chen Mingjie <chenmingjie0828@163.com>
|
||||
Chen Qiu <cheney-90@hotmail.com>
|
||||
Cheng-mean Liu <soccerl@microsoft.com>
|
||||
Chengfei Shang <cfshang@alauda.io>
|
||||
Chengguang Xu <cgxu519@gmx.com>
|
||||
chenyuzhu <chenyuzhi@oschina.cn>
|
||||
Chetan Birajdar <birajdar.chetan@gmail.com>
|
||||
@@ -336,11 +325,9 @@ Chris Swan <chris.swan@iee.org>
|
||||
Chris Telfer <ctelfer@docker.com>
|
||||
Chris Wahl <github@wahlnetwork.com>
|
||||
Chris Weyl <cweyl@alumni.drew.edu>
|
||||
Chris White <me@cwprogram.com>
|
||||
Christian Berendt <berendt@b1-systems.de>
|
||||
Christian Brauner <christian.brauner@ubuntu.com>
|
||||
Christian Böhme <developement@boehme3d.de>
|
||||
Christian Muehlhaeuser <muesli@gmail.com>
|
||||
Christian Persson <saser@live.se>
|
||||
Christian Rotzoll <ch.rotzoll@gmail.com>
|
||||
Christian Simon <simon@swine.de>
|
||||
@@ -363,7 +350,6 @@ Cody Roseborough <crrosebo@amazon.com>
|
||||
Coenraad Loubser <coenraad@wish.org.za>
|
||||
Colin Dunklau <colin.dunklau@gmail.com>
|
||||
Colin Hebert <hebert.colin@gmail.com>
|
||||
Colin Panisset <github@clabber.com>
|
||||
Colin Rice <colin@daedrum.net>
|
||||
Colin Walters <walters@verbum.org>
|
||||
Collin Guarino <collin.guarino@gmail.com>
|
||||
@@ -399,7 +385,6 @@ Dan Levy <dan@danlevy.net>
|
||||
Dan McPherson <dmcphers@redhat.com>
|
||||
Dan Stine <sw@stinemail.com>
|
||||
Dan Williams <me@deedubs.com>
|
||||
Dani Hodovic <dani.hodovic@gmail.com>
|
||||
Dani Louca <dani.louca@docker.com>
|
||||
Daniel Antlinger <d.antlinger@gmx.at>
|
||||
Daniel Dao <dqminh@cloudflare.com>
|
||||
@@ -453,14 +438,12 @@ David Mackey <tdmackey@booleanhaiku.com>
|
||||
David Mat <david@davidmat.com>
|
||||
David Mcanulty <github@hellspark.com>
|
||||
David McKay <david@rawkode.com>
|
||||
David P Hilton <david.hilton.p@gmail.com>
|
||||
David Pelaez <pelaez89@gmail.com>
|
||||
David R. Jenni <david.r.jenni@gmail.com>
|
||||
David Röthlisberger <david@rothlis.net>
|
||||
David Sheets <dsheets@docker.com>
|
||||
David Sissitka <me@dsissitka.com>
|
||||
David Trott <github@davidtrott.com>
|
||||
David Wang <00107082@163.com>
|
||||
David Williamson <david.williamson@docker.com>
|
||||
David Xia <dxia@spotify.com>
|
||||
David Young <yangboh@cn.ibm.com>
|
||||
@@ -468,10 +451,8 @@ Davide Ceretti <davide.ceretti@hogarthww.com>
|
||||
Dawn Chen <dawnchen@google.com>
|
||||
dbdd <wangtong2712@gmail.com>
|
||||
dcylabs <dcylabs@gmail.com>
|
||||
Debayan De <debayande@users.noreply.github.com>
|
||||
Deborah Gertrude Digges <deborah.gertrude.digges@gmail.com>
|
||||
deed02392 <georgehafiz@gmail.com>
|
||||
Deep Debroy <ddebroy@docker.com>
|
||||
Deng Guangxing <dengguangxing@huawei.com>
|
||||
Deni Bertovic <deni@kset.org>
|
||||
Denis Defreyne <denis@soundcloud.com>
|
||||
@@ -496,7 +477,6 @@ Dieter Reuter <dieter.reuter@me.com>
|
||||
Dillon Dixon <dillondixon@gmail.com>
|
||||
Dima Stopel <dima@twistlock.com>
|
||||
Dimitri John Ledkov <dimitri.j.ledkov@intel.com>
|
||||
Dimitris Mandalidis <dimitris.mandalidis@gmail.com>
|
||||
Dimitris Rozakis <dimrozakis@gmail.com>
|
||||
Dimitry Andric <d.andric@activevideo.com>
|
||||
Dinesh Subhraveti <dineshs@altiscale.com>
|
||||
@@ -510,7 +490,6 @@ Dmitri Shuralyov <shurcooL@gmail.com>
|
||||
Dmitry Demeshchuk <demeshchuk@gmail.com>
|
||||
Dmitry Gusev <dmitry.gusev@gmail.com>
|
||||
Dmitry Kononenko <d@dm42.ru>
|
||||
Dmitry Sharshakov <d3dx12.xx@gmail.com>
|
||||
Dmitry Shyshkin <dmitry@shyshkin.org.ua>
|
||||
Dmitry Smirnov <onlyjob@member.fsf.org>
|
||||
Dmitry V. Krivenok <krivenok.dmitry@gmail.com>
|
||||
@@ -524,7 +503,6 @@ Don Kjer <don.kjer@gmail.com>
|
||||
Don Spaulding <donspauldingii@gmail.com>
|
||||
Donald Huang <don.hcd@gmail.com>
|
||||
Dong Chen <dongluo.chen@docker.com>
|
||||
Donghwa Kim <shanytt@gmail.com>
|
||||
Donovan Jones <git@gamma.net.nz>
|
||||
Doron Podoleanu <doronp@il.ibm.com>
|
||||
Doug Davis <dug@us.ibm.com>
|
||||
@@ -601,9 +579,7 @@ Ewa Czechowska <ewa@ai-traders.com>
|
||||
Eystein Måløy Stenberg <eystein.maloy.stenberg@cfengine.com>
|
||||
ezbercih <cem.ezberci@gmail.com>
|
||||
Ezra Silvera <ezra@il.ibm.com>
|
||||
Fabian Kramm <kramm@covexo.com>
|
||||
Fabian Lauer <kontakt@softwareschmiede-saar.de>
|
||||
Fabian Raetz <fabian.raetz@gmail.com>
|
||||
Fabiano Rosas <farosas@br.ibm.com>
|
||||
Fabio Falci <fabiofalci@gmail.com>
|
||||
Fabio Kung <fabio.kung@gmail.com>
|
||||
@@ -615,7 +591,6 @@ Faiz Khan <faizkhan00@gmail.com>
|
||||
falmp <chico.lopes@gmail.com>
|
||||
Fangming Fang <fangming.fang@arm.com>
|
||||
Fangyuan Gao <21551127@zju.edu.cn>
|
||||
fanjiyun <fan.jiyun@zte.com.cn>
|
||||
Fareed Dudhia <fareeddudhia@googlemail.com>
|
||||
Fathi Boudra <fathi.boudra@linaro.org>
|
||||
Federico Gimenez <fgimenez@coit.es>
|
||||
@@ -646,7 +621,6 @@ Florin Patan <florinpatan@gmail.com>
|
||||
fonglh <fonglh@gmail.com>
|
||||
Foysal Iqbal <foysal.iqbal.fb@gmail.com>
|
||||
Francesc Campoy <campoy@google.com>
|
||||
Francesco Mari <mari.francesco@gmail.com>
|
||||
Francis Chuang <francis.chuang@boostport.com>
|
||||
Francisco Carriedo <fcarriedo@gmail.com>
|
||||
Francisco Souza <f@souza.cc>
|
||||
@@ -660,7 +634,6 @@ Frederik Loeffert <frederik@zitrusmedia.de>
|
||||
Frederik Nordahl Jul Sabroe <frederikns@gmail.com>
|
||||
Freek Kalter <freek@kalteronline.org>
|
||||
Frieder Bluemle <frieder.bluemle@gmail.com>
|
||||
Fu JinLin <withlin@yeah.net>
|
||||
Félix Baylac-Jacqué <baylac.felix@gmail.com>
|
||||
Félix Cantournet <felix.cantournet@cloudwatt.com>
|
||||
Gabe Rosenhouse <gabe@missionst.com>
|
||||
@@ -680,7 +653,6 @@ Gaël PORTAY <gael.portay@savoirfairelinux.com>
|
||||
Genki Takiuchi <genki@s21g.com>
|
||||
GennadySpb <lipenkov@gmail.com>
|
||||
Geoffrey Bachelet <grosfrais@gmail.com>
|
||||
Geon Kim <geon0250@gmail.com>
|
||||
George Kontridze <george@bugsnag.com>
|
||||
George MacRorie <gmacr31@gmail.com>
|
||||
George Xie <georgexsh@gmail.com>
|
||||
@@ -693,7 +665,6 @@ Ghislain Bourgeois <ghislain.bourgeois@gmail.com>
|
||||
Giampaolo Mancini <giampaolo@trampolineup.com>
|
||||
Gianluca Borello <g.borello@gmail.com>
|
||||
Gildas Cuisinier <gildas.cuisinier@gcuisinier.net>
|
||||
Giovan Isa Musthofa <giovanism@outlook.co.id>
|
||||
gissehel <public-devgit-dantus@gissehel.org>
|
||||
Giuseppe Mazzotta <gdm85@users.noreply.github.com>
|
||||
Gleb Fotengauer-Malinovskiy <glebfm@altlinux.org>
|
||||
@@ -705,7 +676,6 @@ Gopikannan Venugopalsamy <gopikannan.venugopalsamy@gmail.com>
|
||||
Gosuke Miyashita <gosukenator@gmail.com>
|
||||
Gou Rao <gou@portworx.com>
|
||||
Govinda Fichtner <govinda.fichtner@googlemail.com>
|
||||
Grant Millar <grant@cylo.io>
|
||||
Grant Reaber <grant.reaber@gmail.com>
|
||||
Graydon Hoare <graydon@pobox.com>
|
||||
Greg Fausak <greg@tacodata.com>
|
||||
@@ -724,9 +694,7 @@ Guruprasad <lgp171188@gmail.com>
|
||||
Gustav Sinder <gustav.sinder@gmail.com>
|
||||
gwx296173 <gaojing3@huawei.com>
|
||||
Günter Zöchbauer <guenter@gzoechbauer.com>
|
||||
haikuoliu <haikuo@amazon.com>
|
||||
Hakan Özler <hakan.ozler@kodcu.com>
|
||||
Hamish Hutchings <moredhel@aoeu.me>
|
||||
Hans Kristian Flaatten <hans@starefossen.com>
|
||||
Hans Rødtang <hansrodtang@gmail.com>
|
||||
Hao Shu Wei <haosw@cn.ibm.com>
|
||||
@@ -734,7 +702,6 @@ Hao Zhang <21521210@zju.edu.cn>
|
||||
Harald Albers <github@albersweb.de>
|
||||
Harley Laue <losinggeneration@gmail.com>
|
||||
Harold Cooper <hrldcpr@gmail.com>
|
||||
Harrison Turton <harrisonturton@gmail.com>
|
||||
Harry Zhang <harryz@hyper.sh>
|
||||
Harshal Patil <harshal.patil@in.ibm.com>
|
||||
Harshal Patil <harshalp@linux.vnet.ibm.com>
|
||||
@@ -746,7 +713,6 @@ Hector Castro <hectcastro@gmail.com>
|
||||
Helen Xie <chenjg@harmonycloud.cn>
|
||||
Henning Sprang <henning.sprang@gmail.com>
|
||||
Hiroshi Hatake <hatake@clear-code.com>
|
||||
Hiroyuki Sasagawa <hs19870702@gmail.com>
|
||||
Hobofan <goisser94@gmail.com>
|
||||
Hollie Teal <hollie@docker.com>
|
||||
Hong Xu <hong@topbug.net>
|
||||
@@ -769,7 +735,6 @@ Ian Bishop <ianbishop@pace7.com>
|
||||
Ian Bull <irbull@gmail.com>
|
||||
Ian Calvert <ianjcalvert@gmail.com>
|
||||
Ian Campbell <ian.campbell@docker.com>
|
||||
Ian Chen <ianre657@gmail.com>
|
||||
Ian Lee <IanLee1521@gmail.com>
|
||||
Ian Main <imain@redhat.com>
|
||||
Ian Philpot <ian.philpot@microsoft.com>
|
||||
@@ -787,11 +752,9 @@ Ilya Khlopotov <ilya.khlopotov@gmail.com>
|
||||
imre Fitos <imre.fitos+github@gmail.com>
|
||||
inglesp <peter.inglesby@gmail.com>
|
||||
Ingo Gottwald <in.gottwald@gmail.com>
|
||||
Innovimax <innovimax@gmail.com>
|
||||
Isaac Dupree <antispam@idupree.com>
|
||||
Isabel Jimenez <contact.isabeljimenez@gmail.com>
|
||||
Isao Jonas <isao.jonas@gmail.com>
|
||||
Iskander Sharipov <quasilyte@gmail.com>
|
||||
Ivan Babrou <ibobrik@gmail.com>
|
||||
Ivan Fraixedes <ifcdev@gmail.com>
|
||||
Ivan Grcic <igrcic@gmail.com>
|
||||
@@ -822,7 +785,6 @@ James Mills <prologic@shortcircuit.net.au>
|
||||
James Nesbitt <james.nesbitt@wunderkraut.com>
|
||||
James Nugent <james@jen20.com>
|
||||
James Turnbull <james@lovedthanlost.net>
|
||||
James Watkins-Harvey <jwatkins@progi-media.com>
|
||||
Jamie Hannaford <jamie@limetree.org>
|
||||
Jamshid Afshar <jafshar@yahoo.com>
|
||||
Jan Keromnes <janx@linux.com>
|
||||
@@ -855,7 +817,6 @@ jaxgeller <jacksongeller@gmail.com>
|
||||
Jay <imjching@hotmail.com>
|
||||
Jay <teguhwpurwanto@gmail.com>
|
||||
Jay Kamat <github@jgkamat.33mail.com>
|
||||
Jean Rouge <rougej+github@gmail.com>
|
||||
Jean-Baptiste Barth <jeanbaptiste.barth@gmail.com>
|
||||
Jean-Baptiste Dalido <jeanbaptiste@appgratis.com>
|
||||
Jean-Christophe Berthon <huygens@berthon.eu>
|
||||
@@ -886,13 +847,11 @@ Jeroen Franse <jeroenfranse@gmail.com>
|
||||
Jeroen Jacobs <github@jeroenj.be>
|
||||
Jesse Dearing <jesse.dearing@gmail.com>
|
||||
Jesse Dubay <jesse@thefortytwo.net>
|
||||
Jessica Frazelle <acidburn@microsoft.com>
|
||||
Jessica Frazelle <jessfraz@google.com>
|
||||
Jezeniel Zapanta <jpzapanta22@gmail.com>
|
||||
Jhon Honce <jhonce@redhat.com>
|
||||
Ji.Zhilong <zhilongji@gmail.com>
|
||||
Jian Liao <jliao@alauda.io>
|
||||
Jian Zhang <zhangjian.fnst@cn.fujitsu.com>
|
||||
Jiang Jinyang <jjyruby@gmail.com>
|
||||
Jie Luo <luo612@zju.edu.cn>
|
||||
Jihyun Hwang <jhhwang@telcoware.com>
|
||||
Jilles Oldenbeuving <ojilles@gmail.com>
|
||||
@@ -903,14 +862,14 @@ Jim Perrin <jperrin@centos.org>
|
||||
Jimmy Cuadra <jimmy@jimmycuadra.com>
|
||||
Jimmy Puckett <jimmy.puckett@spinen.com>
|
||||
Jimmy Song <rootsongjc@gmail.com>
|
||||
jimmyxian <jimmyxian2004@yahoo.com.cn>
|
||||
Jinsoo Park <cellpjs@gmail.com>
|
||||
Jintao Zhang <zhangjintao9020@gmail.com>
|
||||
Jiri Appl <jiria@microsoft.com>
|
||||
Jiri Popelka <jpopelka@redhat.com>
|
||||
Jiuyue Ma <majiuyue@huawei.com>
|
||||
Jiří Župka <jzupka@redhat.com>
|
||||
jjy <jiangjinyang@outlook.com>
|
||||
jmzwcn <jmzwcn@gmail.com>
|
||||
Joao Fernandes <joao.fernandes@docker.com>
|
||||
Joao Trindade <trindade.joao@gmail.com>
|
||||
Joe Beda <joe.github@bedafamily.com>
|
||||
Joe Doliner <jdoliner@pachyderm.io>
|
||||
Joe Ferguson <joe@infosiftr.com>
|
||||
@@ -949,7 +908,6 @@ Jon Johnson <jonjohnson@google.com>
|
||||
Jon Surrell <jon.surrell@gmail.com>
|
||||
Jon Wedaman <jweede@gmail.com>
|
||||
Jonas Pfenniger <jonas@pfenniger.name>
|
||||
Jonathan A. Schweder <jonathanschweder@gmail.com>
|
||||
Jonathan A. Sternberg <jonathansternberg@gmail.com>
|
||||
Jonathan Boulle <jonathanboulle@gmail.com>
|
||||
Jonathan Camp <jonathan@irondojo.com>
|
||||
@@ -970,7 +928,7 @@ Jordan Jennings <jjn2009@gmail.com>
|
||||
Jordan Sissel <jls@semicomplete.com>
|
||||
Jorge Marin <chipironcin@users.noreply.github.com>
|
||||
Jorit Kleine-Möllhoff <joppich@bricknet.de>
|
||||
Jose Diaz-Gonzalez <email@josediazgonzalez.com>
|
||||
Jose Diaz-Gonzalez <jose@seatgeek.com>
|
||||
Joseph Anthony Pasquale Holsten <joseph@josephholsten.com>
|
||||
Joseph Hager <ajhager@gmail.com>
|
||||
Joseph Kern <jkern@semafour.net>
|
||||
@@ -1024,8 +982,7 @@ kargakis <kargakis@users.noreply.github.com>
|
||||
Karl Grzeszczak <karlgrz@gmail.com>
|
||||
Karol Duleba <mr.fuxi@gmail.com>
|
||||
Karthik Karanth <karanth.karthik@gmail.com>
|
||||
Karthik Nayak <karthik.188@gmail.com>
|
||||
Kasper Fabæch Brandt <poizan@poizan.dk>
|
||||
Karthik Nayak <Karthik.188@gmail.com>
|
||||
Kate Heddleston <kate.heddleston@gmail.com>
|
||||
Katie McLaughlin <katie@glasnt.com>
|
||||
Kato Kazuyoshi <kato.kazuyoshi@gmail.com>
|
||||
@@ -1033,7 +990,6 @@ Katrina Owen <katrina.owen@gmail.com>
|
||||
Kawsar Saiyeed <kawsar.saiyeed@projiris.com>
|
||||
Kay Yan <kay.yan@daocloud.io>
|
||||
kayrus <kay.diam@gmail.com>
|
||||
Kazuhiro Sera <seratch@gmail.com>
|
||||
Ke Li <kel@splunk.com>
|
||||
Ke Xu <leonhartx.k@gmail.com>
|
||||
Kei Ohmura <ohmura.kei@gmail.com>
|
||||
@@ -1042,7 +998,6 @@ Keli Hu <dev@keli.hu>
|
||||
Ken Cochrane <kencochrane@gmail.com>
|
||||
Ken Herner <kherner@progress.com>
|
||||
Ken ICHIKAWA <ichikawa.ken@jp.fujitsu.com>
|
||||
Ken Reese <krrgithub@gmail.com>
|
||||
Kenfe-Mickaël Laventure <mickael.laventure@gmail.com>
|
||||
Kenjiro Nakayama <nakayamakenjiro@gmail.com>
|
||||
Kent Johnson <kentoj@gmail.com>
|
||||
@@ -1080,13 +1035,11 @@ Krasimir Georgiev <support@vip-consult.co.uk>
|
||||
Kris-Mikael Krister <krismikael@protonmail.com>
|
||||
Kristian Haugene <kristian.haugene@capgemini.com>
|
||||
Kristina Zabunova <triara.xiii@gmail.com>
|
||||
Krystian Wojcicki <kwojcicki@sympatico.ca>
|
||||
krrg <krrgithub@gmail.com>
|
||||
Kun Zhang <zkazure@gmail.com>
|
||||
Kunal Kushwaha <kushwaha_kunal_v7@lab.ntt.co.jp>
|
||||
Kunal Tyagi <tyagi.kunal@live.com>
|
||||
Kyle Conroy <kyle.j.conroy@gmail.com>
|
||||
Kyle Linden <linden.kyle@gmail.com>
|
||||
Kyle Wuolle <kyle.wuolle@gmail.com>
|
||||
kyu <leehk1227@gmail.com>
|
||||
Lachlan Coote <lcoote@vmware.com>
|
||||
Lai Jiangshan <jiangshanlai@gmail.com>
|
||||
@@ -1107,7 +1060,6 @@ Leandro Siqueira <leandro.siqueira@gmail.com>
|
||||
Lee Chao <932819864@qq.com>
|
||||
Lee, Meng-Han <sunrisedm4@gmail.com>
|
||||
leeplay <hyeongkyu.lee@navercorp.com>
|
||||
Lei Gong <lgong@alauda.io>
|
||||
Lei Jitang <leijitang@huawei.com>
|
||||
Len Weincier <len@cloudafrica.net>
|
||||
Lennie <github@consolejunkie.net>
|
||||
@@ -1124,8 +1076,6 @@ Liana Lo <liana.lixia@gmail.com>
|
||||
Liang Mingqiang <mqliang.zju@gmail.com>
|
||||
Liang-Chi Hsieh <viirya@gmail.com>
|
||||
Liao Qingwei <liaoqingwei@huawei.com>
|
||||
Lifubang <lifubang@acmcoder.com>
|
||||
Lihua Tang <lhtang@alauda.io>
|
||||
Lily Guo <lily.guo@docker.com>
|
||||
limsy <seongyeol37@gmail.com>
|
||||
Lin Lu <doraalin@163.com>
|
||||
@@ -1144,8 +1094,7 @@ Lloyd Dewolf <foolswisdom@gmail.com>
|
||||
Lokesh Mandvekar <lsm5@fedoraproject.org>
|
||||
longliqiang88 <394564827@qq.com>
|
||||
Lorenz Leutgeb <lorenz.leutgeb@gmail.com>
|
||||
Lorenzo Fontana <fontanalorenz@gmail.com>
|
||||
Lotus Fenn <fenn.lotus@gmail.com>
|
||||
Lorenzo Fontana <lo@linux.com>
|
||||
Louis Opter <kalessin@kalessin.fr>
|
||||
Luca Favatella <luca.favatella@erlang-solutions.com>
|
||||
Luca Marturana <lucamarturana@gmail.com>
|
||||
@@ -1202,7 +1151,6 @@ Marius Gundersen <me@mariusgundersen.net>
|
||||
Marius Sturm <marius@graylog.com>
|
||||
Marius Voila <marius.voila@gmail.com>
|
||||
Mark Allen <mrallen1@yahoo.com>
|
||||
Mark Jeromin <mark.jeromin@sysfrog.net>
|
||||
Mark McGranaghan <mmcgrana@gmail.com>
|
||||
Mark McKinstry <mmckinst@umich.edu>
|
||||
Mark Milstein <mark@epiloque.com>
|
||||
@@ -1219,7 +1167,6 @@ Martijn van Oosterhout <kleptog@svana.org>
|
||||
Martin Honermeyer <maze@strahlungsfrei.de>
|
||||
Martin Kelly <martin@surround.io>
|
||||
Martin Mosegaard Amdisen <martin.amdisen@praqma.com>
|
||||
Martin Muzatko <martin@happy-css.com>
|
||||
Martin Redmond <redmond.martin@gmail.com>
|
||||
Mary Anthony <mary.anthony@docker.com>
|
||||
Masahito Zembutsu <zembutsu@users.noreply.github.com>
|
||||
@@ -1253,7 +1200,6 @@ Matthias Klumpp <matthias@tenstral.net>
|
||||
Matthias Kühnle <git.nivoc@neverbox.com>
|
||||
Matthias Rampke <mr@soundcloud.com>
|
||||
Matthieu Hauglustaine <matt.hauglustaine@gmail.com>
|
||||
Mattias Jernberg <nostrad@gmail.com>
|
||||
Mauricio Garavaglia <mauricio@medallia.com>
|
||||
mauriyouth <mauriyouth@gmail.com>
|
||||
Max Shytikov <mshytikov@gmail.com>
|
||||
@@ -1262,8 +1208,6 @@ Maxim Ivanov <ivanov.maxim@gmail.com>
|
||||
Maxim Kulkin <mkulkin@mirantis.com>
|
||||
Maxim Treskin <zerthurd@gmail.com>
|
||||
Maxime Petazzoni <max@signalfuse.com>
|
||||
Maximiliano Maccanti <maccanti@amazon.com>
|
||||
Maxwell <csuhp007@gmail.com>
|
||||
Meaglith Ma <genedna@gmail.com>
|
||||
meejah <meejah@meejah.ca>
|
||||
Megan Kostick <mkostick@us.ibm.com>
|
||||
@@ -1304,9 +1248,8 @@ Michal Wieczorek <wieczorek-michal@wp.pl>
|
||||
Michaël Pailloncy <mpapo.dev@gmail.com>
|
||||
Michał Czeraszkiewicz <czerasz@gmail.com>
|
||||
Michał Gryko <github@odkurzacz.org>
|
||||
Michiel de Jong <michiel@unhosted.org>
|
||||
Mickaël Fortunato <morsi.morsicus@gmail.com>
|
||||
Mickaël Remars <mickael@remars.com>
|
||||
Michiel@unhosted <michiel@unhosted.org>
|
||||
Mickaël FORTUNATO <morsi.morsicus@gmail.com>
|
||||
Miguel Angel Fernández <elmendalerenda@gmail.com>
|
||||
Miguel Morales <mimoralea@gmail.com>
|
||||
Mihai Borobocea <MihaiBorob@gmail.com>
|
||||
@@ -1337,7 +1280,6 @@ Mitch Capper <mitch.capper@gmail.com>
|
||||
Mizuki Urushida <z11111001011@gmail.com>
|
||||
mlarcher <github@ringabell.org>
|
||||
Mohammad Banikazemi <mb@us.ibm.com>
|
||||
Mohammad Nasirifar <farnasirim@gmail.com>
|
||||
Mohammed Aaqib Ansari <maaquib@gmail.com>
|
||||
Mohit Soni <mosoni@ebay.com>
|
||||
Moorthy RS <rsmoorthy@gmail.com>
|
||||
@@ -1362,7 +1304,6 @@ Nan Monnand Deng <monnand@gmail.com>
|
||||
Naoki Orii <norii@cs.cmu.edu>
|
||||
Natalie Parker <nparker@omnifone.com>
|
||||
Natanael Copa <natanael.copa@docker.com>
|
||||
Natasha Jarus <linuxmercedes@gmail.com>
|
||||
Nate Brennand <nate.brennand@clever.com>
|
||||
Nate Eagleson <nate@nateeag.com>
|
||||
Nate Jones <nate@endot.org>
|
||||
@@ -1396,7 +1337,6 @@ Nicolas Dudebout <nicolas.dudebout@gatech.edu>
|
||||
Nicolas Goy <kuon@goyman.com>
|
||||
Nicolas Kaiser <nikai@nikai.net>
|
||||
Nicolas Sterchele <sterchele.nicolas@gmail.com>
|
||||
Nicolas V Castet <nvcastet@us.ibm.com>
|
||||
Nicolás Hock Isaza <nhocki@gmail.com>
|
||||
Nigel Poulton <nigelpoulton@hotmail.com>
|
||||
Nik Nyby <nikolas@gnu.org>
|
||||
@@ -1412,7 +1352,6 @@ Noah Treuhaft <noah.treuhaft@docker.com>
|
||||
NobodyOnSE <ich@sektor.selfip.com>
|
||||
noducks <onemannoducks@gmail.com>
|
||||
Nolan Darilek <nolan@thewordnerd.info>
|
||||
Noriki Nakamura <noriki.nakamura@miraclelinux.com>
|
||||
nponeccop <andy.melnikov@gmail.com>
|
||||
Nuutti Kotivuori <naked@iki.fi>
|
||||
nzwsch <hi@nzwsch.com>
|
||||
@@ -1424,11 +1363,8 @@ Ohad Schneider <ohadschn@users.noreply.github.com>
|
||||
ohmystack <jun.jiang02@ele.me>
|
||||
Ole Reifschneider <mail@ole-reifschneider.de>
|
||||
Oliver Neal <ItsVeryWindy@users.noreply.github.com>
|
||||
Oliver Reason <oli@overrateddev.co>
|
||||
Olivier Gambier <dmp42@users.noreply.github.com>
|
||||
Olle Jonsson <olle.jonsson@gmail.com>
|
||||
Olli Janatuinen <olli.janatuinen@gmail.com>
|
||||
Omri Shiv <Omri.Shiv@teradata.com>
|
||||
Oriol Francès <oriolfa@gmail.com>
|
||||
Oskar Niburski <oskarniburski@gmail.com>
|
||||
Otto Kekäläinen <otto@seravo.fi>
|
||||
@@ -1484,7 +1420,6 @@ Peter Edge <peter.edge@gmail.com>
|
||||
Peter Ericson <pdericson@gmail.com>
|
||||
Peter Esbensen <pkesbensen@gmail.com>
|
||||
Peter Jaffe <pjaffe@nevo.com>
|
||||
Peter Kang <peter@spell.run>
|
||||
Peter Malmgren <ptmalmgren@gmail.com>
|
||||
Peter Salvatore <peter@psftw.com>
|
||||
Peter Volpe <petervo@redhat.com>
|
||||
@@ -1517,7 +1452,6 @@ Prasanna Gautam <prasannagautam@gmail.com>
|
||||
Pratik Karki <prertik@outlook.com>
|
||||
Prayag Verma <prayag.verma@gmail.com>
|
||||
Priya Wadhwa <priyawadhwa@google.com>
|
||||
Projjol Banerji <probaner23@gmail.com>
|
||||
Przemek Hejman <przemyslaw.hejman@gmail.com>
|
||||
Pure White <daniel48@126.com>
|
||||
pysqz <randomq@126.com>
|
||||
@@ -1528,7 +1462,6 @@ Quentin Brossard <qbrossard@gmail.com>
|
||||
Quentin Perez <qperez@ocs.online.net>
|
||||
Quentin Tayssier <qtayssier@gmail.com>
|
||||
r0n22 <cameron.regan@gmail.com>
|
||||
Radostin Stoyanov <rstoyanov1@gmail.com>
|
||||
Rafal Jeczalik <rjeczalik@gmail.com>
|
||||
Rafe Colton <rafael.colton@gmail.com>
|
||||
Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com>
|
||||
@@ -1542,7 +1475,6 @@ Ralph Bean <rbean@redhat.com>
|
||||
Ramkumar Ramachandra <artagnon@gmail.com>
|
||||
Ramon Brooker <rbrooker@aetherealmind.com>
|
||||
Ramon van Alteren <ramon@vanalteren.nl>
|
||||
RaviTeja Pothana <ravi-teja@live.com>
|
||||
Ray Tsang <rayt@google.com>
|
||||
ReadmeCritic <frankensteinbot@gmail.com>
|
||||
Recursive Madman <recursive.madman@gmx.de>
|
||||
@@ -1592,7 +1524,6 @@ Roel Van Nyen <roel.vannyen@gmail.com>
|
||||
Roger Peppe <rogpeppe@gmail.com>
|
||||
Rohit Jnagal <jnagal@google.com>
|
||||
Rohit Kadam <rohit.d.kadam@gmail.com>
|
||||
Rohit Kapur <rkapur@flatiron.com>
|
||||
Rojin George <rojingeorge@huawei.com>
|
||||
Roland Huß <roland@jolokia.org>
|
||||
Roland Kammerer <roland.kammerer@linbit.com>
|
||||
@@ -1602,9 +1533,6 @@ Roman Dudin <katrmr@gmail.com>
|
||||
Roman Strashkin <roman.strashkin@gmail.com>
|
||||
Ron Smits <ron.smits@gmail.com>
|
||||
Ron Williams <ron.a.williams@gmail.com>
|
||||
Rong Gao <gaoronggood@163.com>
|
||||
Rong Zhang <rongzhang@alauda.io>
|
||||
Rongxiang Song <tinysong1226@gmail.com>
|
||||
root <docker-dummy@example.com>
|
||||
root <root@lxdebmas.marist.edu>
|
||||
root <root@ubuntu-14.04-amd64-vbox>
|
||||
@@ -1616,10 +1544,8 @@ Rovanion Luckey <rovanion.luckey@gmail.com>
|
||||
Royce Remer <royceremer@gmail.com>
|
||||
Rozhnov Alexandr <nox73@ya.ru>
|
||||
Rudolph Gottesheim <r.gottesheim@loot.at>
|
||||
Rui Cao <ruicao@alauda.io>
|
||||
Rui Lopes <rgl@ruilopes.com>
|
||||
Runshen Zhu <runshen.zhu@gmail.com>
|
||||
Russ Magee <rmagee@gmail.com>
|
||||
Ryan Abrams <rdabrams@gmail.com>
|
||||
Ryan Anderson <anderson.ryanc@gmail.com>
|
||||
Ryan Aslett <github@mixologic.com>
|
||||
@@ -1638,7 +1564,6 @@ Ryan Wallner <ryan.wallner@clusterhq.com>
|
||||
Ryan Zhang <ryan.zhang@docker.com>
|
||||
ryancooper7 <ryan.cooper7@gmail.com>
|
||||
RyanDeng <sheldon.d1018@gmail.com>
|
||||
Ryo Nakao <nakabonne@gmail.com>
|
||||
Rémy Greinhofer <remy.greinhofer@livelovely.com>
|
||||
s. rannou <mxs@sbrk.org>
|
||||
s00318865 <sunyuan3@huawei.com>
|
||||
@@ -1647,7 +1572,6 @@ Sachin Joshi <sachin_jayant_joshi@hotmail.com>
|
||||
Sagar Hani <sagarhani33@gmail.com>
|
||||
Sainath Grandhi <sainath.grandhi@intel.com>
|
||||
Sakeven Jiang <jc5930@sina.cn>
|
||||
Salahuddin Khan <salah@docker.com>
|
||||
Sally O'Malley <somalley@redhat.com>
|
||||
Sam Abed <sam.abed@gmail.com>
|
||||
Sam Alba <sam.alba@gmail.com>
|
||||
@@ -1669,7 +1593,6 @@ Santhosh Manohar <santhosh@docker.com>
|
||||
sapphiredev <se.imas.kr@gmail.com>
|
||||
Sargun Dhillon <sargun@netflix.com>
|
||||
Sascha Andres <sascha.andres@outlook.com>
|
||||
Sascha Grunert <sgrunert@suse.com>
|
||||
Satnam Singh <satnam@raintown.org>
|
||||
Satoshi Amemiya <satoshi_amemiya@voyagegroup.com>
|
||||
Satoshi Tagomori <tagomoris@gmail.com>
|
||||
@@ -1696,9 +1619,7 @@ Serge Hallyn <serge.hallyn@ubuntu.com>
|
||||
Sergey Alekseev <sergey.alekseev.minsk@gmail.com>
|
||||
Sergey Evstifeev <sergey.evstifeev@gmail.com>
|
||||
Sergii Kabashniuk <skabashnyuk@codenvy.com>
|
||||
Sergio Lopez <slp@redhat.com>
|
||||
Serhat Gülçiçek <serhat25@gmail.com>
|
||||
SeungUkLee <lsy931106@gmail.com>
|
||||
Sevki Hasirci <s@sevki.org>
|
||||
Shane Canon <scanon@lbl.gov>
|
||||
Shane da Silva <shane@dasilva.io>
|
||||
@@ -1726,7 +1647,6 @@ Sidhartha Mani <sidharthamn@gmail.com>
|
||||
sidharthamani <sid@rancher.com>
|
||||
Silas Sewell <silas@sewell.org>
|
||||
Silvan Jegen <s.jegen@gmail.com>
|
||||
Simão Reis <smnrsti@gmail.com>
|
||||
Simei He <hesimei@zju.edu.cn>
|
||||
Simon Eskildsen <sirup@sirupsen.com>
|
||||
Simon Ferquel <simon.ferquel@docker.com>
|
||||
@@ -1794,11 +1714,10 @@ tang0th <tang0th@gmx.com>
|
||||
Tangi Colin <tangicolin@gmail.com>
|
||||
Tatsuki Sugiura <sugi@nemui.org>
|
||||
Tatsushi Inagaki <e29253@jp.ibm.com>
|
||||
Taylan Isikdemir <taylani@google.com>
|
||||
Taylor Jones <monitorjbl@gmail.com>
|
||||
tbonza <tylers.pile@gmail.com>
|
||||
Ted M. Young <tedyoung@gmail.com>
|
||||
Tehmasp Chaudhri <tehmasp@gmail.com>
|
||||
Tejaswini Duggaraju <naduggar@microsoft.com>
|
||||
Tejesh Mehta <tejesh.mehta@gmail.com>
|
||||
terryding77 <550147740@qq.com>
|
||||
tgic <farmer1992@gmail.com>
|
||||
@@ -1892,7 +1811,6 @@ Tristan Carel <tristan@cogniteev.com>
|
||||
Troy Denton <trdenton@gmail.com>
|
||||
Tycho Andersen <tycho@docker.com>
|
||||
Tyler Brock <tyler.brock@gmail.com>
|
||||
Tyler Brown <tylers.pile@gmail.com>
|
||||
Tzu-Jung Lee <roylee17@gmail.com>
|
||||
uhayate <uhayate.gong@daocloud.io>
|
||||
Ulysse Carion <ulyssecarion@gmail.com>
|
||||
@@ -1953,7 +1871,6 @@ Wassim Dhif <wassimdhif@gmail.com>
|
||||
Wayne Chang <wayne@neverfear.org>
|
||||
Wayne Song <wsong@docker.com>
|
||||
Weerasak Chongnguluam <singpor@gmail.com>
|
||||
Wei Fu <fuweid89@gmail.com>
|
||||
Wei Wu <wuwei4455@gmail.com>
|
||||
Wei-Ting Kuo <waitingkuo0527@gmail.com>
|
||||
weipeng <weipeng@tuscloud.io>
|
||||
@@ -1983,24 +1900,17 @@ WiseTrem <shepelyov.g@gmail.com>
|
||||
Wolfgang Powisch <powo@powo.priv.at>
|
||||
Wonjun Kim <wonjun.kim@navercorp.com>
|
||||
xamyzhao <x.amy.zhao@gmail.com>
|
||||
Xian Chaobo <xianchaobo@huawei.com>
|
||||
Xianglin Gao <xlgao@zju.edu.cn>
|
||||
Xianlu Bird <xianlubird@gmail.com>
|
||||
Xiao YongBiao <xyb4638@gmail.com>
|
||||
XiaoBing Jiang <s7v7nislands@gmail.com>
|
||||
Xiaodong Zhang <a4012017@sina.com>
|
||||
Xiaoxi He <xxhe@alauda.io>
|
||||
Xiaoxu Chen <chenxiaoxu14@otcaix.iscas.ac.cn>
|
||||
Xiaoyu Zhang <zhang.xiaoyu33@zte.com.cn>
|
||||
xichengliudui <1693291525@qq.com>
|
||||
xiekeyang <xiekeyang@huawei.com>
|
||||
Ximo Guanter Gonzálbez <joaquin.guantergonzalbez@telefonica.com>
|
||||
Xinbo Weng <xihuanbo_0521@zju.edu.cn>
|
||||
Xinzi Zhou <imdreamrunner@gmail.com>
|
||||
Xiuming Chen <cc@cxm.cc>
|
||||
Xuecong Liao <satorulogic@gmail.com>
|
||||
xuzhaokui <cynicholas@gmail.com>
|
||||
Yadnyawalkya Tale <ytale@redhat.com>
|
||||
Yahya <ya7yaz@gmail.com>
|
||||
YAMADA Tsuyoshi <tyamada@minimum2scp.org>
|
||||
Yamasaki Masahide <masahide.y@gmail.com>
|
||||
@@ -2020,7 +1930,6 @@ Yihang Ho <hoyihang5@gmail.com>
|
||||
Ying Li <ying.li@docker.com>
|
||||
Yohei Ueda <yohei@jp.ibm.com>
|
||||
Yong Tang <yong.tang.github@outlook.com>
|
||||
Yongxin Li <yxli@alauda.io>
|
||||
Yongzhi Pan <panyongzhi@gmail.com>
|
||||
Yosef Fertel <yfertel@gmail.com>
|
||||
You-Sheng Yang (楊有勝) <vicamo@gmail.com>
|
||||
@@ -2031,12 +1940,9 @@ Yu Peng <yu.peng36@zte.com.cn>
|
||||
Yu-Ju Hong <yjhong@google.com>
|
||||
Yuan Sun <sunyuan3@huawei.com>
|
||||
Yuanhong Peng <pengyuanhong@huawei.com>
|
||||
Yue Zhang <zy675793960@yeah.net>
|
||||
Yuhao Fang <fangyuhao@gmail.com>
|
||||
Yuichiro Kaneko <spiketeika@gmail.com>
|
||||
Yunxiang Huang <hyxqshk@vip.qq.com>
|
||||
Yurii Rashkovskii <yrashk@gmail.com>
|
||||
Yusuf Tarık Günaydın <yusuf_tarik@hotmail.com>
|
||||
Yves Junqueira <yves.junqueira@gmail.com>
|
||||
Zac Dover <zdover@redhat.com>
|
||||
Zach Borboa <zachborboa@gmail.com>
|
||||
@@ -2053,10 +1959,8 @@ ZhangHang <stevezhang2014@gmail.com>
|
||||
zhangxianwei <xianwei.zw@alibaba-inc.com>
|
||||
Zhenan Ye <21551168@zju.edu.cn>
|
||||
zhenghenghuo <zhenghenghuo@zju.edu.cn>
|
||||
Zhenhai Gao <gaozh1988@live.com>
|
||||
Zhenkun Bi <bi.zhenkun@zte.com.cn>
|
||||
Zhou Hao <zhouhao@cn.fujitsu.com>
|
||||
Zhoulin Xie <zhoulin.xie@daocloud.io>
|
||||
Zhu Guihua <zhugh.fnst@cn.fujitsu.com>
|
||||
Zhu Kunjia <zhu.kunjia@zte.com.cn>
|
||||
Zhuoyun Wei <wzyboy@wzyboy.org>
|
||||
|
||||
@@ -99,7 +99,7 @@ be found.
|
||||
* Add `--format` option to `docker node ls` [#30424](https://github.com/docker/docker/pull/30424)
|
||||
* Add `--prune` option to `docker stack deploy` to remove services that are no longer defined in the docker-compose file [#31302](https://github.com/docker/docker/pull/31302)
|
||||
* Add `PORTS` column for `docker service ls` when using `ingress` mode [#30813](https://github.com/docker/docker/pull/30813)
|
||||
- Fix unnecessary re-deploying of tasks when environment-variables are used [#32364](https://github.com/docker/docker/pull/32364)
|
||||
- Fix unnescessary re-deploying of tasks when environment-variables are used [#32364](https://github.com/docker/docker/pull/32364)
|
||||
- Fix `docker stack deploy` not supporting `endpoint_mode` when deploying from a docker compose file [#32333](https://github.com/docker/docker/pull/32333)
|
||||
- Proceed with startup if cluster component cannot be created to allow recovering from a broken swarm setup [#31631](https://github.com/docker/docker/pull/31631)
|
||||
|
||||
|
||||
355
Dockerfile
355
Dockerfile
@@ -24,39 +24,32 @@
|
||||
# the case. Therefore, you don't have to disable it anymore.
|
||||
#
|
||||
|
||||
ARG CROSS="false"
|
||||
# IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored
|
||||
ARG GO_VERSION=1.13.15
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
ARG VPNKIT_DIGEST=e508a17cfacc8fd39261d5b4e397df2b953690da577e2c987a47630cd0c42f8e
|
||||
|
||||
FROM golang:${GO_VERSION}-buster AS base
|
||||
ARG APT_MIRROR
|
||||
RUN sed -ri "s/(httpredir|deb).debian.org/${APT_MIRROR:-deb.debian.org}/g" /etc/apt/sources.list \
|
||||
&& sed -ri "s/(security).debian.org/${APT_MIRROR:-security.debian.org}/g" /etc/apt/sources.list
|
||||
ENV GO111MODULE=off
|
||||
FROM golang:1.10.8 AS base
|
||||
# FIXME(vdemeester) this is kept for other script depending on it to not fail right away
|
||||
# Remove this once the other scripts uses something else to detect the version
|
||||
ENV GO_VERSION 1.10.8
|
||||
# allow replacing httpredir or deb mirror
|
||||
ARG APT_MIRROR=deb.debian.org
|
||||
RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list
|
||||
|
||||
FROM base AS criu
|
||||
ARG DEBIAN_FRONTEND
|
||||
# Install dependency packages specific to criu
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
libcap-dev \
|
||||
libnet-dev \
|
||||
libnl-3-dev \
|
||||
libprotobuf-c-dev \
|
||||
libprotobuf-dev \
|
||||
protobuf-c-compiler \
|
||||
protobuf-compiler \
|
||||
python-protobuf \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install CRIU for checkpoint/restore support
|
||||
ARG CRIU_VERSION=3.14
|
||||
RUN mkdir -p /usr/src/criu \
|
||||
&& curl -sSL https://github.com/checkpoint-restore/criu/archive/v${CRIU_VERSION}.tar.gz | tar -C /usr/src/criu/ -xz --strip-components=1 \
|
||||
&& cd /usr/src/criu \
|
||||
&& make \
|
||||
&& make PREFIX=/build/ install-criu
|
||||
ENV CRIU_VERSION 3.6
|
||||
# Install dependency packages specific to criu
|
||||
RUN apt-get update && apt-get install -y \
|
||||
libnet-dev \
|
||||
libprotobuf-c0-dev \
|
||||
libprotobuf-dev \
|
||||
libnl-3-dev \
|
||||
libcap-dev \
|
||||
protobuf-compiler \
|
||||
protobuf-c-compiler \
|
||||
python-protobuf \
|
||||
&& mkdir -p /usr/src/criu \
|
||||
&& curl -sSL https://github.com/checkpoint-restore/criu/archive/v${CRIU_VERSION}.tar.gz | tar -C /usr/src/criu/ -xz --strip-components=1 \
|
||||
&& cd /usr/src/criu \
|
||||
&& make \
|
||||
&& make PREFIX=/build/ install-criu
|
||||
|
||||
FROM base AS registry
|
||||
# Install two versions of the registry. The first is an older version that
|
||||
@@ -66,181 +59,115 @@ FROM base AS registry
|
||||
ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd
|
||||
ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827
|
||||
RUN set -x \
|
||||
&& export GOPATH="$(mktemp -d)" \
|
||||
&& git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \
|
||||
&& (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \
|
||||
&& GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \
|
||||
go build -buildmode=pie -o /build/registry-v2 github.com/docker/distribution/cmd/registry \
|
||||
&& case $(dpkg --print-architecture) in \
|
||||
amd64|ppc64*|s390x) \
|
||||
(cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1"); \
|
||||
GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH"; \
|
||||
go build -buildmode=pie -o /build/registry-v2-schema1 github.com/docker/distribution/cmd/registry; \
|
||||
;; \
|
||||
esac \
|
||||
&& rm -rf "$GOPATH"
|
||||
&& export GOPATH="$(mktemp -d)" \
|
||||
&& git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \
|
||||
&& (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \
|
||||
&& GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \
|
||||
go build -buildmode=pie -o /build/registry-v2 github.com/docker/distribution/cmd/registry \
|
||||
&& case $(dpkg --print-architecture) in \
|
||||
amd64|ppc64*|s390x) \
|
||||
(cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1"); \
|
||||
GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH"; \
|
||||
go build -buildmode=pie -o /build/registry-v2-schema1 github.com/docker/distribution/cmd/registry; \
|
||||
;; \
|
||||
esac \
|
||||
&& rm -rf "$GOPATH"
|
||||
|
||||
|
||||
|
||||
FROM base AS docker-py
|
||||
# Get the "docker-py" source so we can run their integration tests
|
||||
ENV DOCKER_PY_COMMIT 8b246db271a85d6541dc458838627e89c683e42f
|
||||
RUN git clone https://github.com/docker/docker-py.git /build \
|
||||
&& cd /build \
|
||||
&& git checkout -q $DOCKER_PY_COMMIT
|
||||
|
||||
|
||||
|
||||
FROM base AS swagger
|
||||
# Install go-swagger for validating swagger.yaml
|
||||
# This is https://github.com/kolyshkin/go-swagger/tree/golang-1.13-fix
|
||||
# TODO: move to under moby/ or fix upstream go-swagger to work for us.
|
||||
ENV GO_SWAGGER_COMMIT 5793aa66d4b4112c2602c716516e24710e4adbb5
|
||||
ENV GO_SWAGGER_COMMIT c28258affb0b6251755d92489ef685af8d4ff3eb
|
||||
RUN set -x \
|
||||
&& export GOPATH="$(mktemp -d)" \
|
||||
&& git clone https://github.com/kolyshkin/go-swagger.git "$GOPATH/src/github.com/go-swagger/go-swagger" \
|
||||
&& (cd "$GOPATH/src/github.com/go-swagger/go-swagger" && git checkout -q "$GO_SWAGGER_COMMIT") \
|
||||
&& go build -o /build/swagger github.com/go-swagger/go-swagger/cmd/swagger \
|
||||
&& rm -rf "$GOPATH"
|
||||
&& export GOPATH="$(mktemp -d)" \
|
||||
&& git clone https://github.com/go-swagger/go-swagger.git "$GOPATH/src/github.com/go-swagger/go-swagger" \
|
||||
&& (cd "$GOPATH/src/github.com/go-swagger/go-swagger" && git checkout -q "$GO_SWAGGER_COMMIT") \
|
||||
&& go build -o /build/swagger github.com/go-swagger/go-swagger/cmd/swagger \
|
||||
&& rm -rf "$GOPATH"
|
||||
|
||||
|
||||
FROM base AS frozen-images
|
||||
ARG DEBIAN_FRONTEND
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
ca-certificates \
|
||||
jq \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
RUN apt-get update && apt-get install -y jq ca-certificates --no-install-recommends
|
||||
# Get useful and necessary Hub images so we can "docker load" locally instead of pulling
|
||||
COPY contrib/download-frozen-image-v2.sh /
|
||||
RUN /download-frozen-image-v2.sh /build \
|
||||
buildpack-deps:jessie@sha256:dd86dced7c9cd2a724e779730f0a53f93b7ef42228d4344b25ce9a42a1486251 \
|
||||
busybox:latest@sha256:bbc3a03235220b170ba48a157dd097dd1379299370e1ed99ce976df0355d24f0 \
|
||||
busybox:glibc@sha256:0b55a30394294ab23b9afd58fab94e61a923f5834fba7ddbae7f8e0c11ba85e6 \
|
||||
debian:jessie@sha256:287a20c5f73087ab406e6b364833e3fb7b3ae63ca0eb3486555dc27ed32c6e60 \
|
||||
hello-world:latest@sha256:be0cd392e45be79ffeffa6b05338b98ebb16c87b255f48e297ec7f98e123905c
|
||||
buildpack-deps:jessie@sha256:dd86dced7c9cd2a724e779730f0a53f93b7ef42228d4344b25ce9a42a1486251 \
|
||||
busybox:latest@sha256:bbc3a03235220b170ba48a157dd097dd1379299370e1ed99ce976df0355d24f0 \
|
||||
busybox:glibc@sha256:0b55a30394294ab23b9afd58fab94e61a923f5834fba7ddbae7f8e0c11ba85e6 \
|
||||
debian:jessie@sha256:287a20c5f73087ab406e6b364833e3fb7b3ae63ca0eb3486555dc27ed32c6e60 \
|
||||
hello-world:latest@sha256:be0cd392e45be79ffeffa6b05338b98ebb16c87b255f48e297ec7f98e123905c
|
||||
# See also ensureFrozenImagesLinux() in "integration-cli/fixtures_linux_daemon_test.go" (which needs to be updated when adding images to this list)
|
||||
|
||||
FROM base AS cross-false
|
||||
# Just a little hack so we don't have to install these deps twice, once for runc and once for dockerd
|
||||
FROM base AS runtime-dev
|
||||
RUN apt-get update && apt-get install -y \
|
||||
libapparmor-dev \
|
||||
libseccomp-dev
|
||||
|
||||
FROM base AS cross-true
|
||||
ARG DEBIAN_FRONTEND
|
||||
RUN dpkg --add-architecture arm64
|
||||
RUN dpkg --add-architecture armel
|
||||
RUN dpkg --add-architecture armhf
|
||||
RUN if [ "$(go env GOHOSTARCH)" = "amd64" ]; then \
|
||||
apt-get update && apt-get install -y --no-install-recommends \
|
||||
crossbuild-essential-arm64 \
|
||||
crossbuild-essential-armel \
|
||||
crossbuild-essential-armhf \
|
||||
&& rm -rf /var/lib/apt/lists/*; \
|
||||
fi
|
||||
|
||||
FROM cross-${CROSS} as dev-base
|
||||
|
||||
FROM dev-base AS runtime-dev-cross-false
|
||||
ARG DEBIAN_FRONTEND
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
libapparmor-dev \
|
||||
libseccomp-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
FROM cross-true AS runtime-dev-cross-true
|
||||
ARG DEBIAN_FRONTEND
|
||||
# These crossbuild packages rely on gcc-<arch>, but this doesn't want to install
|
||||
# on non-amd64 systems.
|
||||
# Additionally, the crossbuild-amd64 is currently only on debian:buster, so
|
||||
# other architectures cannnot crossbuild amd64.
|
||||
RUN if [ "$(go env GOHOSTARCH)" = "amd64" ]; then \
|
||||
apt-get update && apt-get install -y --no-install-recommends \
|
||||
libapparmor-dev:arm64 \
|
||||
libapparmor-dev:armel \
|
||||
libapparmor-dev:armhf \
|
||||
libseccomp-dev:arm64 \
|
||||
libseccomp-dev:armel \
|
||||
libseccomp-dev:armhf \
|
||||
# install this arches seccomp here due to compat issues with the v0 builder
|
||||
# This is as opposed to inheriting from runtime-dev-cross-false
|
||||
libapparmor-dev \
|
||||
libseccomp-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*; \
|
||||
fi
|
||||
|
||||
FROM runtime-dev-cross-${CROSS} AS runtime-dev
|
||||
|
||||
FROM base AS tomlv
|
||||
ENV INSTALL_BINARY_NAME=tomlv
|
||||
ARG TOMLV_COMMIT
|
||||
COPY hack/dockerfile/install/install.sh ./install.sh
|
||||
COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./
|
||||
RUN PREFIX=/build ./install.sh $INSTALL_BINARY_NAME
|
||||
RUN PREFIX=/build/ ./install.sh $INSTALL_BINARY_NAME
|
||||
|
||||
FROM base AS vndr
|
||||
ENV INSTALL_BINARY_NAME=vndr
|
||||
ARG VNDR_COMMIT
|
||||
COPY hack/dockerfile/install/install.sh ./install.sh
|
||||
COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./
|
||||
RUN PREFIX=/build ./install.sh $INSTALL_BINARY_NAME
|
||||
RUN PREFIX=/build/ ./install.sh $INSTALL_BINARY_NAME
|
||||
|
||||
FROM dev-base AS containerd
|
||||
ARG DEBIAN_FRONTEND
|
||||
ARG CONTAINERD_COMMIT
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
libbtrfs-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
FROM base AS containerd
|
||||
RUN apt-get update && apt-get install -y btrfs-tools
|
||||
ENV INSTALL_BINARY_NAME=containerd
|
||||
COPY hack/dockerfile/install/install.sh ./install.sh
|
||||
COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./
|
||||
RUN PREFIX=/build ./install.sh $INSTALL_BINARY_NAME
|
||||
RUN PREFIX=/build/ ./install.sh $INSTALL_BINARY_NAME
|
||||
|
||||
FROM dev-base AS proxy
|
||||
FROM base AS proxy
|
||||
ENV INSTALL_BINARY_NAME=proxy
|
||||
ARG LIBNETWORK_COMMIT
|
||||
COPY hack/dockerfile/install/install.sh ./install.sh
|
||||
COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./
|
||||
RUN PREFIX=/build ./install.sh $INSTALL_BINARY_NAME
|
||||
RUN PREFIX=/build/ ./install.sh $INSTALL_BINARY_NAME
|
||||
|
||||
FROM base AS gometalinter
|
||||
ENV INSTALL_BINARY_NAME=gometalinter
|
||||
COPY hack/dockerfile/install/install.sh ./install.sh
|
||||
COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./
|
||||
RUN PREFIX=/build ./install.sh $INSTALL_BINARY_NAME
|
||||
RUN PREFIX=/build/ ./install.sh $INSTALL_BINARY_NAME
|
||||
|
||||
FROM base AS gotestsum
|
||||
ENV INSTALL_BINARY_NAME=gotestsum
|
||||
ARG GOTESTSUM_COMMIT
|
||||
COPY hack/dockerfile/install/install.sh ./install.sh
|
||||
COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./
|
||||
RUN PREFIX=/build ./install.sh $INSTALL_BINARY_NAME
|
||||
|
||||
FROM dev-base AS dockercli
|
||||
FROM base AS dockercli
|
||||
ENV INSTALL_BINARY_NAME=dockercli
|
||||
ARG DOCKERCLI_CHANNEL
|
||||
ARG DOCKERCLI_VERSION
|
||||
COPY hack/dockerfile/install/install.sh ./install.sh
|
||||
COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./
|
||||
RUN PREFIX=/build ./install.sh $INSTALL_BINARY_NAME
|
||||
|
||||
FROM runtime-dev AS runc
|
||||
ENV INSTALL_BINARY_NAME=runc
|
||||
ARG RUNC_COMMIT
|
||||
ARG RUNC_BUILDTAGS
|
||||
COPY hack/dockerfile/install/install.sh ./install.sh
|
||||
COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./
|
||||
RUN PREFIX=/build ./install.sh $INSTALL_BINARY_NAME
|
||||
|
||||
FROM dev-base AS tini
|
||||
ARG DEBIAN_FRONTEND
|
||||
ARG TINI_COMMIT
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
cmake \
|
||||
vim-common \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
COPY hack/dockerfile/install/install.sh ./install.sh
|
||||
ENV INSTALL_BINARY_NAME=tini
|
||||
COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./
|
||||
RUN PREFIX=/build ./install.sh $INSTALL_BINARY_NAME
|
||||
|
||||
FROM dev-base AS rootlesskit
|
||||
ENV INSTALL_BINARY_NAME=rootlesskit
|
||||
ARG ROOTLESSKIT_COMMIT
|
||||
COPY hack/dockerfile/install/install.sh ./install.sh
|
||||
COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./
|
||||
RUN PREFIX=/build/ ./install.sh $INSTALL_BINARY_NAME
|
||||
COPY ./contrib/dockerd-rootless.sh /build
|
||||
|
||||
FROM djs55/vpnkit@sha256:${VPNKIT_DIGEST} AS vpnkit
|
||||
FROM runtime-dev AS runc
|
||||
ENV INSTALL_BINARY_NAME=runc
|
||||
COPY hack/dockerfile/install/install.sh ./install.sh
|
||||
COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./
|
||||
RUN PREFIX=/build/ ./install.sh $INSTALL_BINARY_NAME
|
||||
|
||||
FROM base AS tini
|
||||
RUN apt-get update && apt-get install -y cmake vim-common
|
||||
COPY hack/dockerfile/install/install.sh ./install.sh
|
||||
ENV INSTALL_BINARY_NAME=tini
|
||||
COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./
|
||||
RUN PREFIX=/build/ ./install.sh $INSTALL_BINARY_NAME
|
||||
|
||||
|
||||
|
||||
# TODO: Some of this is only really needed for testing, it would be nice to split this up
|
||||
FROM runtime-dev AS dev
|
||||
ARG DEBIAN_FRONTEND
|
||||
RUN groupadd -r docker
|
||||
RUN useradd --create-home --gid docker unprivilegeduser
|
||||
# Let us use a .bashrc file
|
||||
@@ -251,66 +178,66 @@ RUN ln -s /usr/local/completion/bash/docker /etc/bash_completion.d/docker
|
||||
RUN ldconfig
|
||||
# This should only install packages that are specifically needed for the dev environment and nothing else
|
||||
# Do you really need to add another package here? Can it be done in a different build stage?
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
apparmor \
|
||||
aufs-tools \
|
||||
bash-completion \
|
||||
binutils-mingw-w64 \
|
||||
libbtrfs-dev \
|
||||
bzip2 \
|
||||
g++-mingw-w64-x86-64 \
|
||||
iptables \
|
||||
jq \
|
||||
libcap2-bin \
|
||||
libdevmapper-dev \
|
||||
libnet1 \
|
||||
libnl-3-200 \
|
||||
libprotobuf-c1 \
|
||||
libsystemd-dev \
|
||||
libudev-dev \
|
||||
net-tools \
|
||||
pigz \
|
||||
python3-pip \
|
||||
python3-setuptools \
|
||||
python3-wheel \
|
||||
thin-provisioning-tools \
|
||||
vim \
|
||||
vim-common \
|
||||
xfsprogs \
|
||||
xz-utils \
|
||||
zip \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Switch to use iptables instead of nftables (to match the host machine)
|
||||
RUN update-alternatives --set iptables /usr/sbin/iptables-legacy || true \
|
||||
&& update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy || true \
|
||||
&& update-alternatives --set arptables /usr/sbin/arptables-legacy || true
|
||||
|
||||
RUN pip3 install yamllint==1.16.0
|
||||
|
||||
COPY --from=dockercli /build/ /usr/local/cli
|
||||
RUN apt-get update && apt-get install -y \
|
||||
apparmor \
|
||||
aufs-tools \
|
||||
bash-completion \
|
||||
btrfs-tools \
|
||||
iptables \
|
||||
jq \
|
||||
libcap2-bin \
|
||||
libdevmapper-dev \
|
||||
libudev-dev \
|
||||
libsystemd-dev \
|
||||
binutils-mingw-w64 \
|
||||
g++-mingw-w64-x86-64 \
|
||||
net-tools \
|
||||
pigz \
|
||||
python-backports.ssl-match-hostname \
|
||||
python-dev \
|
||||
python-mock \
|
||||
python-pip \
|
||||
python-requests \
|
||||
python-setuptools \
|
||||
python-websocket \
|
||||
python-wheel \
|
||||
thin-provisioning-tools \
|
||||
vim \
|
||||
vim-common \
|
||||
xfsprogs \
|
||||
zip \
|
||||
bzip2 \
|
||||
xz-utils \
|
||||
--no-install-recommends
|
||||
COPY --from=swagger /build/swagger* /usr/local/bin/
|
||||
COPY --from=frozen-images /build/ /docker-frozen-images
|
||||
COPY --from=swagger /build/ /usr/local/bin/
|
||||
COPY --from=tomlv /build/ /usr/local/bin/
|
||||
COPY --from=tini /build/ /usr/local/bin/
|
||||
COPY --from=registry /build/ /usr/local/bin/
|
||||
COPY --from=criu /build/ /usr/local/
|
||||
COPY --from=vndr /build/ /usr/local/bin/
|
||||
COPY --from=gotestsum /build/ /usr/local/bin/
|
||||
COPY --from=gometalinter /build/ /usr/local/bin/
|
||||
COPY --from=runc /build/ /usr/local/bin/
|
||||
COPY --from=containerd /build/ /usr/local/bin/
|
||||
COPY --from=rootlesskit /build/ /usr/local/bin/
|
||||
COPY --from=vpnkit /vpnkit /usr/local/bin/vpnkit.x86_64
|
||||
COPY --from=proxy /build/ /usr/local/bin/
|
||||
COPY --from=gometalinter /build/ /usr/local/bin/
|
||||
COPY --from=tomlv /build/ /usr/local/bin/
|
||||
COPY --from=vndr /build/ /usr/local/bin/
|
||||
COPY --from=tini /build/ /usr/local/bin/
|
||||
COPY --from=runc /build/ /usr/local/bin/
|
||||
COPY --from=containerd /build/ /usr/local/bin/
|
||||
COPY --from=proxy /build/ /usr/local/bin/
|
||||
COPY --from=dockercli /build/ /usr/local/cli
|
||||
COPY --from=registry /build/registry* /usr/local/bin/
|
||||
COPY --from=criu /build/ /usr/local/
|
||||
COPY --from=docker-py /build/ /docker-py
|
||||
# TODO: This is for the docker-py tests, which shouldn't really be needed for
|
||||
# this image, but currently CI is expecting to run this image. This should be
|
||||
# split out into a separate image, including all the `python-*` deps installed
|
||||
# above.
|
||||
RUN cd /docker-py \
|
||||
&& pip install docker-pycreds==0.2.1 \
|
||||
&& pip install yamllint==1.5.0 \
|
||||
&& pip install -r test-requirements.txt
|
||||
|
||||
ENV PATH=/usr/local/cli:$PATH
|
||||
ENV DOCKER_BUILDTAGS apparmor seccomp selinux
|
||||
# Options for hack/validate/gometalinter
|
||||
ENV GOMETALINTER_OPTS="--deadline=2m"
|
||||
WORKDIR /go/src/github.com/docker/docker
|
||||
VOLUME /var/lib/docker
|
||||
# Wrap all commands in the "docker-in-docker" script to allow nested containers
|
||||
ENTRYPOINT ["hack/dind"]
|
||||
|
||||
FROM dev AS final
|
||||
# Upload docker source
|
||||
COPY . /go/src/github.com/docker/docker
|
||||
|
||||
@@ -1,67 +1,49 @@
|
||||
ARG GO_VERSION=1.13.15
|
||||
## Step 1: Build tests
|
||||
FROM golang:1.10.8-alpine3.7 as builder
|
||||
|
||||
FROM golang:${GO_VERSION}-alpine AS base
|
||||
ENV GO111MODULE=off
|
||||
RUN apk --no-cache add \
|
||||
RUN apk add --update \
|
||||
bash \
|
||||
btrfs-progs-dev \
|
||||
build-base \
|
||||
curl \
|
||||
lvm2-dev \
|
||||
jq
|
||||
jq \
|
||||
&& rm -rf /var/cache/apk/*
|
||||
|
||||
RUN mkdir -p /build/
|
||||
RUN mkdir -p /go/src/github.com/docker/docker/
|
||||
WORKDIR /go/src/github.com/docker/docker/
|
||||
|
||||
FROM base AS frozen-images
|
||||
# Get useful and necessary Hub images so we can "docker load" locally instead of pulling
|
||||
COPY contrib/download-frozen-image-v2.sh /
|
||||
RUN /download-frozen-image-v2.sh /build \
|
||||
# Generate frozen images
|
||||
COPY contrib/download-frozen-image-v2.sh contrib/download-frozen-image-v2.sh
|
||||
RUN contrib/download-frozen-image-v2.sh /output/docker-frozen-images \
|
||||
buildpack-deps:jessie@sha256:dd86dced7c9cd2a724e779730f0a53f93b7ef42228d4344b25ce9a42a1486251 \
|
||||
busybox:latest@sha256:bbc3a03235220b170ba48a157dd097dd1379299370e1ed99ce976df0355d24f0 \
|
||||
busybox:glibc@sha256:0b55a30394294ab23b9afd58fab94e61a923f5834fba7ddbae7f8e0c11ba85e6 \
|
||||
debian:jessie@sha256:287a20c5f73087ab406e6b364833e3fb7b3ae63ca0eb3486555dc27ed32c6e60 \
|
||||
hello-world:latest@sha256:be0cd392e45be79ffeffa6b05338b98ebb16c87b255f48e297ec7f98e123905c
|
||||
# See also ensureFrozenImagesLinux() in "integration-cli/fixtures_linux_daemon_test.go" (which needs to be updated when adding images to this list)
|
||||
|
||||
FROM base AS dockercli
|
||||
ENV INSTALL_BINARY_NAME=dockercli
|
||||
COPY hack/dockerfile/install/install.sh ./install.sh
|
||||
COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./
|
||||
RUN PREFIX=/build ./install.sh $INSTALL_BINARY_NAME
|
||||
|
||||
# Build DockerSuite.TestBuild* dependency
|
||||
FROM base AS contrib
|
||||
COPY contrib/syscall-test /build/syscall-test
|
||||
COPY contrib/httpserver/Dockerfile /build/httpserver/Dockerfile
|
||||
COPY contrib/httpserver contrib/httpserver
|
||||
RUN CGO_ENABLED=0 go build -buildmode=pie -o /build/httpserver/httpserver github.com/docker/docker/contrib/httpserver
|
||||
|
||||
# Build the integration tests and copy the resulting binaries to /build/tests
|
||||
FROM base AS builder
|
||||
# Install dockercli
|
||||
# Please edit hack/dockerfile/install/<name>.installer to update them.
|
||||
COPY hack/dockerfile/install hack/dockerfile/install
|
||||
RUN ./hack/dockerfile/install/install.sh dockercli
|
||||
|
||||
# Set tag and add sources
|
||||
COPY . .
|
||||
# Copy test sources tests that use assert can print errors
|
||||
RUN mkdir -p /build${PWD} && find integration integration-cli -name \*_test.go -exec cp --parents '{}' /build${PWD} \;
|
||||
# Build and install test binaries
|
||||
ARG DOCKER_GITCOMMIT=undefined
|
||||
ARG DOCKER_GITCOMMIT
|
||||
ENV DOCKER_GITCOMMIT=${DOCKER_GITCOMMIT:-undefined}
|
||||
ADD . .
|
||||
|
||||
# Build DockerSuite.TestBuild* dependency
|
||||
RUN CGO_ENABLED=0 go build -buildmode=pie -o /output/httpserver github.com/docker/docker/contrib/httpserver
|
||||
|
||||
# Build the integration tests and copy the resulting binaries to /output/tests
|
||||
RUN hack/make.sh build-integration-test-binary
|
||||
RUN mkdir -p /build/tests && find . -name test.main -exec cp --parents '{}' /build/tests \;
|
||||
RUN mkdir -p /output/tests && find . -name test.main -exec cp --parents '{}' /output/tests \;
|
||||
|
||||
## Generate testing image
|
||||
FROM alpine:3.10 as runner
|
||||
|
||||
ENV DOCKER_REMOTE_DAEMON=1
|
||||
ENV DOCKER_INTEGRATION_DAEMON_DEST=/
|
||||
ENTRYPOINT ["/scripts/run.sh"]
|
||||
|
||||
# Add an unprivileged user to be used for tests which need it
|
||||
RUN addgroup docker && adduser -D -G docker unprivilegeduser -s /bin/ash
|
||||
## Step 2: Generate testing image
|
||||
FROM alpine:3.7 as runner
|
||||
|
||||
# GNU tar is used for generating the emptyfs image
|
||||
RUN apk --no-cache add \
|
||||
RUN apk add --update \
|
||||
bash \
|
||||
ca-certificates \
|
||||
g++ \
|
||||
@@ -69,16 +51,24 @@ RUN apk --no-cache add \
|
||||
iptables \
|
||||
pigz \
|
||||
tar \
|
||||
xz
|
||||
xz \
|
||||
&& rm -rf /var/cache/apk/*
|
||||
|
||||
COPY hack/test/e2e-run.sh /scripts/run.sh
|
||||
COPY hack/make/.ensure-emptyfs /scripts/ensure-emptyfs.sh
|
||||
# Add an unprivileged user to be used for tests which need it
|
||||
RUN addgroup docker && adduser -D -G docker unprivilegeduser -s /bin/ash
|
||||
|
||||
COPY integration/testdata /tests/integration/testdata
|
||||
COPY integration/build/testdata /tests/integration/build/testdata
|
||||
COPY integration-cli/fixtures /tests/integration-cli/fixtures
|
||||
COPY contrib/httpserver/Dockerfile /tests/contrib/httpserver/Dockerfile
|
||||
COPY contrib/syscall-test /tests/contrib/syscall-test
|
||||
COPY integration-cli/fixtures /tests/integration-cli/fixtures
|
||||
|
||||
COPY --from=frozen-images /build/ /docker-frozen-images
|
||||
COPY --from=dockercli /build/ /usr/bin/
|
||||
COPY --from=contrib /build/ /tests/contrib/
|
||||
COPY --from=builder /build/ /
|
||||
COPY hack/test/e2e-run.sh /scripts/run.sh
|
||||
COPY hack/make/.ensure-emptyfs /scripts/ensure-emptyfs.sh
|
||||
|
||||
COPY --from=builder /output/docker-frozen-images /docker-frozen-images
|
||||
COPY --from=builder /output/httpserver /tests/contrib/httpserver/httpserver
|
||||
COPY --from=builder /output/tests /tests
|
||||
COPY --from=builder /usr/local/bin/docker /usr/bin/docker
|
||||
|
||||
ENV DOCKER_REMOTE_DAEMON=1 DOCKER_INTEGRATION_DAEMON_DEST=/
|
||||
|
||||
ENTRYPOINT ["/scripts/run.sh"]
|
||||
|
||||
@@ -5,10 +5,7 @@
|
||||
|
||||
# This represents the bare minimum required to build and test Docker.
|
||||
|
||||
ARG GO_VERSION=1.13.15
|
||||
|
||||
FROM golang:${GO_VERSION}-stretch
|
||||
ENV GO111MODULE=off
|
||||
FROM debian:stretch
|
||||
|
||||
# allow replacing httpredir or deb mirror
|
||||
ARG APT_MIRROR=deb.debian.org
|
||||
@@ -40,6 +37,18 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
vim-common \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install Go
|
||||
# IMPORTANT: If the version of Go is updated, the Windows to Linux CI machines
|
||||
# will need updating, to avoid errors. Ping #docker-maintainers on IRC
|
||||
# with a heads-up.
|
||||
# IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored
|
||||
ENV GO_VERSION 1.10.8
|
||||
RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" \
|
||||
| tar -xzC /usr/local
|
||||
ENV PATH /go/bin:/usr/local/go/bin:$PATH
|
||||
ENV GOPATH /go
|
||||
ENV CGO_LDFLAGS -L/lib
|
||||
|
||||
# Install runc, containerd, tini and docker-proxy
|
||||
# Please edit hack/dockerfile/install/<name>.installer to update them.
|
||||
COPY hack/dockerfile/install hack/dockerfile/install
|
||||
|
||||
@@ -45,8 +45,8 @@
|
||||
#
|
||||
# 1. Clone the sources from github.com:
|
||||
#
|
||||
# >> git clone https://github.com/docker/docker.git C:\gopath\src\github.com\docker\docker
|
||||
# >> Cloning into 'C:\gopath\src\github.com\docker\docker'...
|
||||
# >> git clone https://github.com/docker/docker.git C:\go\src\github.com\docker\docker
|
||||
# >> Cloning into 'C:\go\src\github.com\docker\docker'...
|
||||
# >> remote: Counting objects: 186216, done.
|
||||
# >> remote: Compressing objects: 100% (21/21), done.
|
||||
# >> remote: Total 186216 (delta 5), reused 0 (delta 0), pack-reused 186195
|
||||
@@ -59,7 +59,7 @@
|
||||
#
|
||||
# 2. Change directory to the cloned docker sources:
|
||||
#
|
||||
# >> cd C:\gopath\src\github.com\docker\docker
|
||||
# >> cd C:\go\src\github.com\docker\docker
|
||||
#
|
||||
#
|
||||
# 3. Build a docker image with the components required to build the docker binaries from source
|
||||
@@ -79,8 +79,8 @@
|
||||
# 5. Copy the binaries out of the container, replacing HostPath with an appropriate destination
|
||||
# folder on the host system where you want the binaries to be located.
|
||||
#
|
||||
# >> docker cp binaries:C:\gopath\src\github.com\docker\docker\bundles\docker.exe C:\HostPath\docker.exe
|
||||
# >> docker cp binaries:C:\gopath\src\github.com\docker\docker\bundles\dockerd.exe C:\HostPath\dockerd.exe
|
||||
# >> docker cp binaries:C:\go\src\github.com\docker\docker\bundles\docker.exe C:\HostPath\docker.exe
|
||||
# >> docker cp binaries:C:\go\src\github.com\docker\docker\bundles\dockerd.exe C:\HostPath\dockerd.exe
|
||||
#
|
||||
#
|
||||
# 6. (Optional) Remove the interim container holding the built executable binaries:
|
||||
@@ -147,33 +147,23 @@
|
||||
# The docker integration tests do not currently run in a container on Windows, predominantly
|
||||
# due to Windows not supporting privileged mode, so anything using a volume would fail.
|
||||
# They (along with the rest of the docker CI suite) can be run using
|
||||
# https://github.com/kevpar/docker-w2wCIScripts/blob/master/runCI/Invoke-DockerCI.ps1.
|
||||
# https://github.com/jhowardmsft/docker-w2wCIScripts/blob/master/runCI/Invoke-DockerCI.ps1.
|
||||
#
|
||||
# -----------------------------------------------------------------------------------------
|
||||
|
||||
|
||||
# The number of build steps below are explicitly minimised to improve performance.
|
||||
|
||||
# Extremely important - do not change the following line to reference a "specific" image,
|
||||
# such as `mcr.microsoft.com/windows/servercore:ltsc2019`. If using this Dockerfile in process
|
||||
# isolated containers, the kernel of the host must match the container image, and hence
|
||||
# would fail between Windows Server 2016 (aka RS1) and Windows Server 2019 (aka RS5).
|
||||
# It is expected that the image `microsoft/windowsservercore:latest` is present, and matches
|
||||
# the hosts kernel version before doing a build.
|
||||
FROM microsoft/windowsservercore
|
||||
|
||||
# Use PowerShell as the default shell
|
||||
SHELL ["powershell", "-Command", "$ErrorActionPreference = 'Stop'; $ProgressPreference = 'SilentlyContinue';"]
|
||||
|
||||
ARG GO_VERSION=1.13.15
|
||||
|
||||
# Environment variable notes:
|
||||
# - GO_VERSION must be consistent with 'Dockerfile' used by Linux.
|
||||
# - FROM_DOCKERFILE is used for detection of building within a container.
|
||||
ENV GO_VERSION=${GO_VERSION} `
|
||||
ENV GO_VERSION=1.10.8 `
|
||||
GIT_VERSION=2.11.1 `
|
||||
GOPATH=C:\gopath `
|
||||
GO111MODULE=off `
|
||||
GOPATH=C:\go `
|
||||
FROM_DOCKERFILE=1
|
||||
|
||||
RUN `
|
||||
@@ -215,17 +205,16 @@ RUN `
|
||||
Download-File $location C:\gitsetup.zip; `
|
||||
`
|
||||
Write-Host INFO: Downloading go...; `
|
||||
$dlGoVersion=$Env:GO_VERSION -replace '\.0$',''; `
|
||||
Download-File "https://golang.org/dl/go${dlGoVersion}.windows-amd64.zip" C:\go.zip; `
|
||||
Download-File $('https://golang.org/dl/go'+$Env:GO_VERSION+'.windows-amd64.zip') C:\go.zip; `
|
||||
`
|
||||
Write-Host INFO: Downloading compiler 1 of 3...; `
|
||||
Download-File https://raw.githubusercontent.com/moby/docker-tdmgcc/master/gcc.zip C:\gcc.zip; `
|
||||
Download-File https://raw.githubusercontent.com/jhowardmsft/docker-tdmgcc/master/gcc.zip C:\gcc.zip; `
|
||||
`
|
||||
Write-Host INFO: Downloading compiler 2 of 3...; `
|
||||
Download-File https://raw.githubusercontent.com/moby/docker-tdmgcc/master/runtime.zip C:\runtime.zip; `
|
||||
Download-File https://raw.githubusercontent.com/jhowardmsft/docker-tdmgcc/master/runtime.zip C:\runtime.zip; `
|
||||
`
|
||||
Write-Host INFO: Downloading compiler 3 of 3...; `
|
||||
Download-File https://raw.githubusercontent.com/moby/docker-tdmgcc/master/binutils.zip C:\binutils.zip; `
|
||||
Download-File https://raw.githubusercontent.com/jhowardmsft/docker-tdmgcc/master/binutils.zip C:\binutils.zip; `
|
||||
`
|
||||
Write-Host INFO: Extracting git...; `
|
||||
Expand-Archive C:\gitsetup.zip C:\git-tmp; `
|
||||
@@ -250,7 +239,7 @@ RUN `
|
||||
Remove-Item C:\gitsetup.zip; `
|
||||
`
|
||||
Write-Host INFO: Creating source directory...; `
|
||||
New-Item -ItemType Directory -Path ${GOPATH}\src\github.com\docker\docker | Out-Null; `
|
||||
New-Item -ItemType Directory -Path C:\go\src\github.com\docker\docker | Out-Null; `
|
||||
`
|
||||
Write-Host INFO: Configuring git core.autocrlf...; `
|
||||
C:\git\cmd\git config --global core.autocrlf true; `
|
||||
@@ -261,7 +250,7 @@ RUN `
|
||||
ENTRYPOINT ["powershell.exe"]
|
||||
|
||||
# Set the working directory to the location of the sources
|
||||
WORKDIR ${GOPATH}\src\github.com\docker\docker
|
||||
WORKDIR C:\go\src\github.com\docker\docker
|
||||
|
||||
# Copy the sources into the container
|
||||
COPY . .
|
||||
|
||||
877
Jenkinsfile
vendored
877
Jenkinsfile
vendored
@@ -1,877 +0,0 @@
|
||||
#!groovy
|
||||
pipeline {
|
||||
agent none
|
||||
|
||||
options {
|
||||
buildDiscarder(logRotator(daysToKeepStr: '30'))
|
||||
timeout(time: 2, unit: 'HOURS')
|
||||
timestamps()
|
||||
}
|
||||
parameters {
|
||||
booleanParam(name: 'unit_validate', defaultValue: true, description: 'amd64 (x86_64) unit tests and vendor check')
|
||||
booleanParam(name: 'amd64', defaultValue: true, description: 'amd64 (x86_64) Build/Test')
|
||||
booleanParam(name: 's390x', defaultValue: true, description: 'IBM Z (s390x) Build/Test')
|
||||
booleanParam(name: 'ppc64le', defaultValue: true, description: 'PowerPC (ppc64le) Build/Test')
|
||||
booleanParam(name: 'windowsRS1', defaultValue: false, description: 'Windows 2016 (RS1) Build/Test')
|
||||
booleanParam(name: 'windowsRS5', defaultValue: true, description: 'Windows 2019 (RS5) Build/Test')
|
||||
booleanParam(name: 'skip_dco', defaultValue: false, description: 'Skip the DCO check')
|
||||
}
|
||||
environment {
|
||||
DOCKER_BUILDKIT = '1'
|
||||
DOCKER_EXPERIMENTAL = '1'
|
||||
DOCKER_GRAPHDRIVER = 'overlay2'
|
||||
APT_MIRROR = 'cdn-fastly.deb.debian.org'
|
||||
CHECK_CONFIG_COMMIT = '78405559cfe5987174aa2cb6463b9b2c1b917255'
|
||||
TESTDEBUG = '0'
|
||||
TIMEOUT = '120m'
|
||||
}
|
||||
stages {
|
||||
stage('pr-hack') {
|
||||
when { changeRequest() }
|
||||
steps {
|
||||
script {
|
||||
echo "Workaround for PR auto-cancel feature. Borrowed from https://issues.jenkins-ci.org/browse/JENKINS-43353"
|
||||
def buildNumber = env.BUILD_NUMBER as int
|
||||
if (buildNumber > 1) milestone(buildNumber - 1)
|
||||
milestone(buildNumber)
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('DCO-check') {
|
||||
when {
|
||||
beforeAgent true
|
||||
expression { !params.skip_dco }
|
||||
}
|
||||
agent { label 'amd64 && ubuntu-1804 && overlay2' }
|
||||
steps {
|
||||
sh '''
|
||||
docker run --rm \
|
||||
-v "$WORKSPACE:/workspace" \
|
||||
-e VALIDATE_REPO=${GIT_URL} \
|
||||
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
|
||||
alpine sh -c 'apk add --no-cache -q bash git openssh-client && cd /workspace && hack/validate/dco'
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage('Build') {
|
||||
parallel {
|
||||
stage('unit-validate') {
|
||||
when {
|
||||
beforeAgent true
|
||||
expression { params.unit_validate }
|
||||
}
|
||||
agent { label 'amd64 && ubuntu-1804 && overlay2' }
|
||||
|
||||
stages {
|
||||
stage("Print info") {
|
||||
steps {
|
||||
sh 'docker version'
|
||||
sh 'docker info'
|
||||
sh '''
|
||||
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
|
||||
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
|
||||
&& bash ${WORKSPACE}/check-config.sh || true
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage("Build dev image") {
|
||||
steps {
|
||||
sh 'docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .'
|
||||
}
|
||||
}
|
||||
stage("Validate") {
|
||||
steps {
|
||||
sh '''
|
||||
docker run --rm -t --privileged \
|
||||
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
|
||||
-v "$WORKSPACE/.git:/go/src/github.com/docker/docker/.git" \
|
||||
--name docker-pr$BUILD_NUMBER \
|
||||
-e DOCKER_EXPERIMENTAL \
|
||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
-e VALIDATE_REPO=${GIT_URL} \
|
||||
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
|
||||
docker:${GIT_COMMIT} \
|
||||
hack/validate/default
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage("Docker-py") {
|
||||
steps {
|
||||
sh '''
|
||||
docker run --rm -t --privileged \
|
||||
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
|
||||
--name docker-pr$BUILD_NUMBER \
|
||||
-e DOCKER_EXPERIMENTAL \
|
||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
-e VALIDATE_REPO=${GIT_URL} \
|
||||
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
|
||||
docker:${GIT_COMMIT} \
|
||||
hack/make.sh \
|
||||
dynbinary-daemon \
|
||||
test-docker-py
|
||||
'''
|
||||
}
|
||||
post {
|
||||
always {
|
||||
junit testResults: 'bundles/test-docker-py/junit-report.xml', allowEmptyResults: true
|
||||
|
||||
sh '''
|
||||
echo "Ensuring container killed."
|
||||
docker rm -vf docker-pr$BUILD_NUMBER || true
|
||||
'''
|
||||
|
||||
sh '''
|
||||
echo 'Chowning /workspace to jenkins user'
|
||||
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
|
||||
'''
|
||||
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
|
||||
sh '''
|
||||
bundleName=docker-py
|
||||
echo "Creating ${bundleName}-bundles.tar.gz"
|
||||
tar -czf ${bundleName}-bundles.tar.gz bundles/test-docker-py/*.xml bundles/test-docker-py/*.log
|
||||
'''
|
||||
|
||||
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stage("Static") {
|
||||
steps {
|
||||
sh '''
|
||||
docker run --rm -t --privileged \
|
||||
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
|
||||
--name docker-pr$BUILD_NUMBER \
|
||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
docker:${GIT_COMMIT} \
|
||||
hack/make.sh binary-daemon
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage("Cross") {
|
||||
steps {
|
||||
sh '''
|
||||
docker run --rm -t --privileged \
|
||||
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
|
||||
--name docker-pr$BUILD_NUMBER \
|
||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
docker:${GIT_COMMIT} \
|
||||
hack/make.sh cross
|
||||
'''
|
||||
}
|
||||
}
|
||||
// needs to be last stage that calls make.sh for the junit report to work
|
||||
stage("Unit tests") {
|
||||
steps {
|
||||
sh '''
|
||||
docker run --rm -t --privileged \
|
||||
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
|
||||
--name docker-pr$BUILD_NUMBER \
|
||||
-e DOCKER_EXPERIMENTAL \
|
||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
-e VALIDATE_REPO=${GIT_URL} \
|
||||
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
|
||||
docker:${GIT_COMMIT} \
|
||||
hack/test/unit
|
||||
'''
|
||||
}
|
||||
post {
|
||||
always {
|
||||
junit testResults: 'bundles/junit-report.xml', allowEmptyResults: true
|
||||
}
|
||||
}
|
||||
}
|
||||
stage("Validate vendor") {
|
||||
steps {
|
||||
sh '''
|
||||
docker run --rm -t --privileged \
|
||||
-v "$WORKSPACE/.git:/go/src/github.com/docker/docker/.git" \
|
||||
--name docker-pr$BUILD_NUMBER \
|
||||
-e DOCKER_EXPERIMENTAL \
|
||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
-e VALIDATE_REPO=${GIT_URL} \
|
||||
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
|
||||
docker:${GIT_COMMIT} \
|
||||
hack/validate/vendor
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage("Build e2e image") {
|
||||
steps {
|
||||
sh '''
|
||||
echo "Building e2e image"
|
||||
docker build --build-arg DOCKER_GITCOMMIT=${GIT_COMMIT} -t moby-e2e-test -f Dockerfile.e2e .
|
||||
'''
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
post {
|
||||
always {
|
||||
sh '''
|
||||
echo 'Ensuring container killed.'
|
||||
docker rm -vf docker-pr$BUILD_NUMBER || true
|
||||
'''
|
||||
|
||||
sh '''
|
||||
echo 'Chowning /workspace to jenkins user'
|
||||
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
|
||||
'''
|
||||
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
|
||||
sh '''
|
||||
bundleName=unit
|
||||
echo "Creating ${bundleName}-bundles.tar.gz"
|
||||
tar -czvf ${bundleName}-bundles.tar.gz bundles/junit-report.xml bundles/go-test-report.json bundles/profile.out
|
||||
'''
|
||||
|
||||
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
|
||||
}
|
||||
}
|
||||
cleanup {
|
||||
sh 'make clean'
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('amd64') {
|
||||
when {
|
||||
beforeAgent true
|
||||
expression { params.amd64 }
|
||||
}
|
||||
agent { label 'amd64 && ubuntu-1804 && overlay2' }
|
||||
|
||||
stages {
|
||||
stage("Print info") {
|
||||
steps {
|
||||
sh 'docker version'
|
||||
sh 'docker info'
|
||||
sh '''
|
||||
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
|
||||
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
|
||||
&& bash ${WORKSPACE}/check-config.sh || true
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage("Build dev image") {
|
||||
steps {
|
||||
sh '''
|
||||
# todo: include ip_vs in base image
|
||||
sudo modprobe ip_vs
|
||||
|
||||
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage("Run tests") {
|
||||
steps {
|
||||
sh '''#!/bin/bash
|
||||
# bash is needed so 'jobs -p' works properly
|
||||
# it also accepts setting inline envvars for functions without explicitly exporting
|
||||
set -x
|
||||
|
||||
run_tests() {
|
||||
[ -n "$TESTDEBUG" ] && rm= || rm=--rm;
|
||||
docker run $rm -t --privileged \
|
||||
-v "$WORKSPACE/bundles/${TEST_INTEGRATION_DEST}:/go/src/github.com/docker/docker/bundles" \
|
||||
-v "$WORKSPACE/bundles/dynbinary-daemon:/go/src/github.com/docker/docker/bundles/dynbinary-daemon" \
|
||||
-v "$WORKSPACE/.git:/go/src/github.com/docker/docker/.git" \
|
||||
--name "$CONTAINER_NAME" \
|
||||
-e KEEPBUNDLE=1 \
|
||||
-e TESTDEBUG \
|
||||
-e TESTFLAGS \
|
||||
-e TEST_SKIP_INTEGRATION \
|
||||
-e TEST_SKIP_INTEGRATION_CLI \
|
||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
-e TIMEOUT \
|
||||
-e VALIDATE_REPO=${GIT_URL} \
|
||||
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
|
||||
docker:${GIT_COMMIT} \
|
||||
hack/make.sh \
|
||||
"$1" \
|
||||
test-integration
|
||||
}
|
||||
|
||||
trap "exit" INT TERM
|
||||
trap 'pids=$(jobs -p); echo "Remaining pids to kill: [$pids]"; [ -z "$pids" ] || kill $pids' EXIT
|
||||
|
||||
CONTAINER_NAME=docker-pr$BUILD_NUMBER
|
||||
|
||||
docker run --rm -t --privileged \
|
||||
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
|
||||
-v "$WORKSPACE/.git:/go/src/github.com/docker/docker/.git" \
|
||||
--name ${CONTAINER_NAME}-build \
|
||||
-e DOCKER_EXPERIMENTAL \
|
||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
docker:${GIT_COMMIT} \
|
||||
hack/make.sh \
|
||||
dynbinary-daemon
|
||||
|
||||
# flaky + integration
|
||||
TEST_INTEGRATION_DEST=1 CONTAINER_NAME=${CONTAINER_NAME}-1 TEST_SKIP_INTEGRATION_CLI=1 run_tests test-integration-flaky &
|
||||
|
||||
# integration-cli first set
|
||||
TEST_INTEGRATION_DEST=2 CONTAINER_NAME=${CONTAINER_NAME}-2 TEST_SKIP_INTEGRATION=1 TESTFLAGS="-test.run Test(DockerSuite|DockerNetworkSuite|DockerHubPullSuite|DockerRegistrySuite|DockerSchema1RegistrySuite|DockerRegistryAuthTokenSuite|DockerRegistryAuthHtpasswdSuite)/" run_tests &
|
||||
|
||||
# integration-cli second set
|
||||
TEST_INTEGRATION_DEST=3 CONTAINER_NAME=${CONTAINER_NAME}-3 TEST_SKIP_INTEGRATION=1 TESTFLAGS="-test.run Test(DockerSwarmSuite|DockerDaemonSuite|DockerExternalVolumeSuite)/" run_tests &
|
||||
|
||||
c=0
|
||||
for job in $(jobs -p); do
|
||||
wait ${job} || c=$?
|
||||
done
|
||||
exit $c
|
||||
'''
|
||||
}
|
||||
post {
|
||||
always {
|
||||
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
post {
|
||||
always {
|
||||
sh '''
|
||||
echo "Ensuring container killed."
|
||||
cids=$(docker ps -aq -f name=docker-pr${BUILD_NUMBER}-*)
|
||||
[ -n "$cids" ] && docker rm -vf $cids || true
|
||||
'''
|
||||
|
||||
sh '''
|
||||
echo "Chowning /workspace to jenkins user"
|
||||
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
|
||||
'''
|
||||
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
|
||||
sh '''
|
||||
bundleName=amd64
|
||||
echo "Creating ${bundleName}-bundles.tar.gz"
|
||||
# exclude overlay2 directories
|
||||
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
|
||||
'''
|
||||
|
||||
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
|
||||
}
|
||||
}
|
||||
cleanup {
|
||||
sh 'make clean'
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('s390x') {
|
||||
when {
|
||||
beforeAgent true
|
||||
expression { params.s390x }
|
||||
}
|
||||
agent { label 's390x-ubuntu-1804' }
|
||||
|
||||
stages {
|
||||
stage("Print info") {
|
||||
steps {
|
||||
sh 'docker version'
|
||||
sh 'docker info'
|
||||
sh '''
|
||||
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
|
||||
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
|
||||
&& bash ${WORKSPACE}/check-config.sh || true
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage("Build dev image") {
|
||||
steps {
|
||||
sh '''
|
||||
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage("Unit tests") {
|
||||
steps {
|
||||
sh '''
|
||||
docker run --rm -t --privileged \
|
||||
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
|
||||
--name docker-pr$BUILD_NUMBER \
|
||||
-e DOCKER_EXPERIMENTAL \
|
||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
-e VALIDATE_REPO=${GIT_URL} \
|
||||
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
|
||||
docker:${GIT_COMMIT} \
|
||||
hack/test/unit
|
||||
'''
|
||||
}
|
||||
post {
|
||||
always {
|
||||
junit testResults: 'bundles/junit-report.xml', allowEmptyResults: true
|
||||
}
|
||||
}
|
||||
}
|
||||
stage("Integration tests") {
|
||||
environment { TEST_SKIP_INTEGRATION_CLI = '1' }
|
||||
steps {
|
||||
sh '''
|
||||
docker run --rm -t --privileged \
|
||||
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
|
||||
--name docker-pr$BUILD_NUMBER \
|
||||
-e DOCKER_EXPERIMENTAL \
|
||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
-e TESTDEBUG \
|
||||
-e TEST_SKIP_INTEGRATION_CLI \
|
||||
-e TIMEOUT \
|
||||
-e VALIDATE_REPO=${GIT_URL} \
|
||||
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
|
||||
docker:${GIT_COMMIT} \
|
||||
hack/make.sh \
|
||||
dynbinary \
|
||||
test-integration
|
||||
'''
|
||||
}
|
||||
post {
|
||||
always {
|
||||
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
post {
|
||||
always {
|
||||
sh '''
|
||||
echo "Ensuring container killed."
|
||||
docker rm -vf docker-pr$BUILD_NUMBER || true
|
||||
'''
|
||||
|
||||
sh '''
|
||||
echo "Chowning /workspace to jenkins user"
|
||||
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
|
||||
'''
|
||||
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
|
||||
sh '''
|
||||
bundleName=s390x-integration
|
||||
echo "Creating ${bundleName}-bundles.tar.gz"
|
||||
# exclude overlay2 directories
|
||||
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
|
||||
'''
|
||||
|
||||
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
|
||||
}
|
||||
}
|
||||
cleanup {
|
||||
sh 'make clean'
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('s390x integration-cli') {
|
||||
when {
|
||||
beforeAgent true
|
||||
not { changeRequest() }
|
||||
expression { params.s390x }
|
||||
}
|
||||
agent { label 's390x-ubuntu-1804' }
|
||||
|
||||
stages {
|
||||
stage("Print info") {
|
||||
steps {
|
||||
sh 'docker version'
|
||||
sh 'docker info'
|
||||
sh '''
|
||||
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
|
||||
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
|
||||
&& bash ${WORKSPACE}/check-config.sh || true
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage("Build dev image") {
|
||||
steps {
|
||||
sh '''
|
||||
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage("Integration-cli tests") {
|
||||
environment { TEST_SKIP_INTEGRATION = '1' }
|
||||
steps {
|
||||
sh '''
|
||||
docker run --rm -t --privileged \
|
||||
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
|
||||
--name docker-pr$BUILD_NUMBER \
|
||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
-e TEST_SKIP_INTEGRATION \
|
||||
-e TIMEOUT \
|
||||
-e VALIDATE_REPO=${GIT_URL} \
|
||||
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
|
||||
docker:${GIT_COMMIT} \
|
||||
hack/make.sh \
|
||||
dynbinary \
|
||||
test-integration
|
||||
'''
|
||||
}
|
||||
post {
|
||||
always {
|
||||
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
post {
|
||||
always {
|
||||
sh '''
|
||||
echo "Ensuring container killed."
|
||||
docker rm -vf docker-pr$BUILD_NUMBER || true
|
||||
'''
|
||||
|
||||
sh '''
|
||||
echo "Chowning /workspace to jenkins user"
|
||||
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
|
||||
'''
|
||||
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
|
||||
sh '''
|
||||
bundleName=s390x-integration-cli
|
||||
echo "Creating ${bundleName}-bundles.tar.gz"
|
||||
# exclude overlay2 directories
|
||||
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
|
||||
'''
|
||||
|
||||
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
|
||||
}
|
||||
}
|
||||
cleanup {
|
||||
sh 'make clean'
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('ppc64le') {
|
||||
when {
|
||||
beforeAgent true
|
||||
expression { params.ppc64le }
|
||||
}
|
||||
agent { label 'ppc64le-ubuntu-1604' }
|
||||
// ppc64le machines run on Docker 18.06, and buildkit has some bugs on that version
|
||||
environment { DOCKER_BUILDKIT = '0' }
|
||||
|
||||
stages {
|
||||
stage("Print info") {
|
||||
steps {
|
||||
sh 'docker version'
|
||||
sh 'docker info'
|
||||
sh '''
|
||||
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
|
||||
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
|
||||
&& bash ${WORKSPACE}/check-config.sh || true
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage("Build dev image") {
|
||||
steps {
|
||||
sh 'docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .'
|
||||
}
|
||||
}
|
||||
stage("Unit tests") {
|
||||
steps {
|
||||
sh '''
|
||||
docker run --rm -t --privileged \
|
||||
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
|
||||
--name docker-pr$BUILD_NUMBER \
|
||||
-e DOCKER_EXPERIMENTAL \
|
||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
-e VALIDATE_REPO=${GIT_URL} \
|
||||
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
|
||||
docker:${GIT_COMMIT} \
|
||||
hack/test/unit
|
||||
'''
|
||||
}
|
||||
post {
|
||||
always {
|
||||
junit testResults: 'bundles/junit-report.xml', allowEmptyResults: true
|
||||
}
|
||||
}
|
||||
}
|
||||
stage("Integration tests") {
|
||||
environment { TEST_SKIP_INTEGRATION_CLI = '1' }
|
||||
steps {
|
||||
sh '''
|
||||
docker run --rm -t --privileged \
|
||||
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
|
||||
--name docker-pr$BUILD_NUMBER \
|
||||
-e DOCKER_EXPERIMENTAL \
|
||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
-e TESTDEBUG \
|
||||
-e TEST_SKIP_INTEGRATION_CLI \
|
||||
-e TIMEOUT \
|
||||
-e VALIDATE_REPO=${GIT_URL} \
|
||||
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
|
||||
docker:${GIT_COMMIT} \
|
||||
hack/make.sh \
|
||||
dynbinary \
|
||||
test-integration
|
||||
'''
|
||||
}
|
||||
post {
|
||||
always {
|
||||
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
post {
|
||||
always {
|
||||
sh '''
|
||||
echo "Ensuring container killed."
|
||||
docker rm -vf docker-pr$BUILD_NUMBER || true
|
||||
'''
|
||||
|
||||
sh '''
|
||||
echo "Chowning /workspace to jenkins user"
|
||||
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
|
||||
'''
|
||||
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
|
||||
sh '''
|
||||
bundleName=ppc64le-integration
|
||||
echo "Creating ${bundleName}-bundles.tar.gz"
|
||||
# exclude overlay2 directories
|
||||
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
|
||||
'''
|
||||
|
||||
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
|
||||
}
|
||||
}
|
||||
cleanup {
|
||||
sh 'make clean'
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('ppc64le integration-cli') {
|
||||
when {
|
||||
beforeAgent true
|
||||
not { changeRequest() }
|
||||
expression { params.ppc64le }
|
||||
}
|
||||
agent { label 'ppc64le-ubuntu-1604' }
|
||||
// ppc64le machines run on Docker 18.06, and buildkit has some bugs on that version
|
||||
environment { DOCKER_BUILDKIT = '0' }
|
||||
|
||||
stages {
|
||||
stage("Print info") {
|
||||
steps {
|
||||
sh 'docker version'
|
||||
sh 'docker info'
|
||||
sh '''
|
||||
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
|
||||
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
|
||||
&& bash ${WORKSPACE}/check-config.sh || true
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage("Build dev image") {
|
||||
steps {
|
||||
sh 'docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .'
|
||||
}
|
||||
}
|
||||
stage("Integration-cli tests") {
|
||||
environment { TEST_SKIP_INTEGRATION = '1' }
|
||||
steps {
|
||||
sh '''
|
||||
docker run --rm -t --privileged \
|
||||
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
|
||||
--name docker-pr$BUILD_NUMBER \
|
||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
-e TEST_SKIP_INTEGRATION \
|
||||
-e TIMEOUT \
|
||||
-e VALIDATE_REPO=${GIT_URL} \
|
||||
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
|
||||
docker:${GIT_COMMIT} \
|
||||
hack/make.sh \
|
||||
dynbinary \
|
||||
test-integration
|
||||
'''
|
||||
}
|
||||
post {
|
||||
always {
|
||||
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
post {
|
||||
always {
|
||||
sh '''
|
||||
echo "Ensuring container killed."
|
||||
docker rm -vf docker-pr$BUILD_NUMBER || true
|
||||
'''
|
||||
|
||||
sh '''
|
||||
echo "Chowning /workspace to jenkins user"
|
||||
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
|
||||
'''
|
||||
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
|
||||
sh '''
|
||||
bundleName=ppc64le-integration-cli
|
||||
echo "Creating ${bundleName}-bundles.tar.gz"
|
||||
# exclude overlay2 directories
|
||||
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
|
||||
'''
|
||||
|
||||
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
|
||||
}
|
||||
}
|
||||
cleanup {
|
||||
sh 'make clean'
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('win-RS1') {
|
||||
when {
|
||||
beforeAgent true
|
||||
// Skip this stage on PRs unless the windowsRS1 checkbox is selected
|
||||
anyOf {
|
||||
not { changeRequest() }
|
||||
expression { params.windowsRS1 }
|
||||
}
|
||||
}
|
||||
environment {
|
||||
DOCKER_BUILDKIT = '0'
|
||||
DOCKER_DUT_DEBUG = '1'
|
||||
SKIP_VALIDATION_TESTS = '1'
|
||||
SOURCES_DRIVE = 'd'
|
||||
SOURCES_SUBDIR = 'gopath'
|
||||
TESTRUN_DRIVE = 'd'
|
||||
TESTRUN_SUBDIR = "CI"
|
||||
WINDOWS_BASE_IMAGE = 'mcr.microsoft.com/windows/servercore'
|
||||
WINDOWS_BASE_IMAGE_TAG = 'ltsc2016'
|
||||
}
|
||||
agent {
|
||||
node {
|
||||
customWorkspace 'd:\\gopath\\src\\github.com\\docker\\docker'
|
||||
label 'windows-2016'
|
||||
}
|
||||
}
|
||||
stages {
|
||||
stage("Print info") {
|
||||
steps {
|
||||
sh 'docker version'
|
||||
sh 'docker info'
|
||||
}
|
||||
}
|
||||
stage("Run tests") {
|
||||
steps {
|
||||
powershell '''
|
||||
$ErrorActionPreference = 'Stop'
|
||||
[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12
|
||||
Invoke-WebRequest https://github.com/moby/docker-ci-zap/blob/master/docker-ci-zap.exe?raw=true -OutFile C:/Windows/System32/docker-ci-zap.exe
|
||||
./hack/ci/windows.ps1
|
||||
exit $LastExitCode
|
||||
'''
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
always {
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
|
||||
powershell '''
|
||||
$bundleName="windowsRS1-integration"
|
||||
Write-Host -ForegroundColor Green "Creating ${bundleName}-bundles.zip"
|
||||
|
||||
# archiveArtifacts does not support env-vars to , so save the artifacts in a fixed location
|
||||
Compress-Archive -Path "${env:TEMP}/CIDUT.out", "${env:TEMP}/CIDUT.err" -CompressionLevel Optimal -DestinationPath "${bundleName}-bundles.zip"
|
||||
'''
|
||||
|
||||
archiveArtifacts artifacts: '*-bundles.zip', allowEmptyArchive: true
|
||||
}
|
||||
}
|
||||
cleanup {
|
||||
sh 'make clean'
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('win-RS5') {
|
||||
when {
|
||||
beforeAgent true
|
||||
expression { params.windowsRS5 }
|
||||
}
|
||||
environment {
|
||||
DOCKER_BUILDKIT = '0'
|
||||
DOCKER_DUT_DEBUG = '1'
|
||||
SKIP_VALIDATION_TESTS = '1'
|
||||
SOURCES_DRIVE = 'd'
|
||||
SOURCES_SUBDIR = 'gopath'
|
||||
TESTRUN_DRIVE = 'd'
|
||||
TESTRUN_SUBDIR = "CI"
|
||||
WINDOWS_BASE_IMAGE = 'mcr.microsoft.com/windows/servercore'
|
||||
WINDOWS_BASE_IMAGE_TAG = 'ltsc2019'
|
||||
}
|
||||
agent {
|
||||
node {
|
||||
customWorkspace 'd:\\gopath\\src\\github.com\\docker\\docker'
|
||||
label 'windows-2019'
|
||||
}
|
||||
}
|
||||
stages {
|
||||
stage("Print info") {
|
||||
steps {
|
||||
sh 'docker version'
|
||||
sh 'docker info'
|
||||
}
|
||||
}
|
||||
stage("Run tests") {
|
||||
steps {
|
||||
powershell '''
|
||||
$ErrorActionPreference = 'Stop'
|
||||
Invoke-WebRequest https://github.com/moby/docker-ci-zap/blob/master/docker-ci-zap.exe?raw=true -OutFile C:/Windows/System32/docker-ci-zap.exe
|
||||
./hack/ci/windows.ps1
|
||||
exit $LastExitCode
|
||||
'''
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
always {
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
|
||||
powershell '''
|
||||
$bundleName="windowsRS5-integration"
|
||||
Write-Host -ForegroundColor Green "Creating ${bundleName}-bundles.zip"
|
||||
|
||||
# archiveArtifacts does not support env-vars to , so save the artifacts in a fixed location
|
||||
Compress-Archive -Path "${env:TEMP}/CIDUT.out", "${env:TEMP}/CIDUT.err" -CompressionLevel Optimal -DestinationPath "${bundleName}-bundles.zip"
|
||||
'''
|
||||
|
||||
archiveArtifacts artifacts: '*-bundles.zip', allowEmptyArchive: true
|
||||
}
|
||||
}
|
||||
cleanup {
|
||||
sh 'make clean'
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
2
LICENSE
2
LICENSE
@@ -176,7 +176,7 @@
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
Copyright 2013-2018 Docker, Inc.
|
||||
Copyright 2013-2017 Docker, Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
30
MAINTAINERS
30
MAINTAINERS
@@ -36,7 +36,6 @@
|
||||
"jhowardmsft",
|
||||
"johnstep",
|
||||
"justincormack",
|
||||
"kolyshkin",
|
||||
"mhbauer",
|
||||
"mlaventure",
|
||||
"runcom",
|
||||
@@ -51,6 +50,15 @@
|
||||
"yongtang"
|
||||
]
|
||||
|
||||
[Org."Docs maintainers"]
|
||||
|
||||
# TODO Describe the docs maintainers role.
|
||||
|
||||
people = [
|
||||
"misty",
|
||||
"thajeztah"
|
||||
]
|
||||
|
||||
[Org.Curators]
|
||||
|
||||
# The curators help ensure that incoming issues and pull requests are properly triaged and
|
||||
@@ -70,7 +78,6 @@
|
||||
"chanwit",
|
||||
"fntlnz",
|
||||
"gianarb",
|
||||
"olljanat",
|
||||
"programmerq",
|
||||
"rheinwein",
|
||||
"ripcurld",
|
||||
@@ -111,7 +118,7 @@
|
||||
# still stumble into him in our issue tracker, or on IRC.
|
||||
"erikh",
|
||||
|
||||
# Evan Hazlett is the creator of the Shipyard and Interlock open source projects,
|
||||
# Evan Hazlett is the creator of of the Shipyard and Interlock open source projects,
|
||||
# and the author of "Orca", which became the foundation of Docker Universal Control
|
||||
# Plane (UCP). As a maintainer, Evan helped integrating SwarmKit (secrets, tasks)
|
||||
# into the Docker engine.
|
||||
@@ -240,7 +247,7 @@
|
||||
|
||||
[people.akihirosuda]
|
||||
Name = "Akihiro Suda"
|
||||
Email = "akihiro.suda.cz@hco.ntt.co.jp"
|
||||
Email = "suda.akihiro@lab.ntt.co.jp"
|
||||
GitHub = "AkihiroSuda"
|
||||
|
||||
[people.aluzzardi]
|
||||
@@ -358,11 +365,6 @@
|
||||
Email = "justin.cormack@docker.com"
|
||||
GitHub = "justincormack"
|
||||
|
||||
[people.kolyshkin]
|
||||
Name = "Kir Kolyshkin"
|
||||
Email = "kolyshkin@gmail.com"
|
||||
GitHub = "kolyshkin"
|
||||
|
||||
[people.lk4d4]
|
||||
Name = "Alexander Morozov"
|
||||
Email = "lk4d4@docker.com"
|
||||
@@ -378,6 +380,11 @@
|
||||
Email = "mbauer@us.ibm.com"
|
||||
GitHub = "mhbauer"
|
||||
|
||||
[people.misty]
|
||||
Name = "Misty Stanley-Jones"
|
||||
Email = "misty@docker.com"
|
||||
GitHub = "mistyhacks"
|
||||
|
||||
[people.mlaventure]
|
||||
Name = "Kenfe-Mickaël Laventure"
|
||||
Email = "mickael.laventure@gmail.com"
|
||||
@@ -393,11 +400,6 @@
|
||||
Email = "mrjana@docker.com"
|
||||
GitHub = "mrjana"
|
||||
|
||||
[people.olljanat]
|
||||
Name = "Olli Janatuinen"
|
||||
Email = "olli.janatuinen@gmail.com"
|
||||
GitHub = "olljanat"
|
||||
|
||||
[people.programmerq]
|
||||
Name = "Jeff Anderson"
|
||||
Email = "jeff@docker.com"
|
||||
|
||||
100
Makefile
100
Makefile
@@ -1,11 +1,10 @@
|
||||
.PHONY: all binary dynbinary build cross help install manpages run shell test test-docker-py test-integration test-unit validate win
|
||||
.PHONY: all binary dynbinary build cross help init-go-pkg-cache install manpages run shell test test-docker-py test-integration test-unit validate win
|
||||
|
||||
# set the graph driver as the current graphdriver if not set
|
||||
DOCKER_GRAPHDRIVER := $(if $(DOCKER_GRAPHDRIVER),$(DOCKER_GRAPHDRIVER),$(shell docker info 2>&1 | grep "Storage Driver" | sed 's/.*: //'))
|
||||
export DOCKER_GRAPHDRIVER
|
||||
|
||||
# enable/disable cross-compile
|
||||
DOCKER_CROSS ?= false
|
||||
DOCKER_INCREMENTAL_BINARY := $(if $(DOCKER_INCREMENTAL_BINARY),$(DOCKER_INCREMENTAL_BINARY),1)
|
||||
export DOCKER_INCREMENTAL_BINARY
|
||||
|
||||
# get OS/Arch of docker engine
|
||||
DOCKER_OSARCH := $(shell bash -c 'source hack/make/.detect-daemon-osarch && echo $${DOCKER_ENGINE_OSARCH}')
|
||||
@@ -37,7 +36,6 @@ DOCKER_ENVS := \
|
||||
-e KEEPBUNDLE \
|
||||
-e DOCKER_BUILD_ARGS \
|
||||
-e DOCKER_BUILD_GOGC \
|
||||
-e DOCKER_BUILD_OPTS \
|
||||
-e DOCKER_BUILD_PKGS \
|
||||
-e DOCKER_BUILDKIT \
|
||||
-e DOCKER_BASH_COMPLETION_PATH \
|
||||
@@ -46,22 +44,16 @@ DOCKER_ENVS := \
|
||||
-e DOCKER_EXPERIMENTAL \
|
||||
-e DOCKER_GITCOMMIT \
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
-e DOCKER_INCREMENTAL_BINARY \
|
||||
-e DOCKER_LDFLAGS \
|
||||
-e DOCKER_PORT \
|
||||
-e DOCKER_REMAP_ROOT \
|
||||
-e DOCKER_STORAGE_OPTS \
|
||||
-e DOCKER_TEST_HOST \
|
||||
-e DOCKER_USERLANDPROXY \
|
||||
-e DOCKERD_ARGS \
|
||||
-e TEST_INTEGRATION_DIR \
|
||||
-e TEST_SKIP_INTEGRATION \
|
||||
-e TEST_SKIP_INTEGRATION_CLI \
|
||||
-e TESTDEBUG \
|
||||
-e TESTDIRS \
|
||||
-e TESTFLAGS \
|
||||
-e TESTFLAGS_INTEGRATION \
|
||||
-e TESTFLAGS_INTEGRATION_CLI \
|
||||
-e TEST_FILTER \
|
||||
-e TIMEOUT \
|
||||
-e VALIDATE_REPO \
|
||||
-e VALIDATE_BRANCH \
|
||||
@@ -82,26 +74,25 @@ DOCKER_ENVS := \
|
||||
# (default to no bind mount if DOCKER_HOST is set)
|
||||
# note: BINDDIR is supported for backwards-compatibility here
|
||||
BIND_DIR := $(if $(BINDDIR),$(BINDDIR),$(if $(DOCKER_HOST),,bundles))
|
||||
|
||||
# DOCKER_MOUNT can be overriden, but use at your own risk!
|
||||
ifndef DOCKER_MOUNT
|
||||
DOCKER_MOUNT := $(if $(BIND_DIR),-v "$(CURDIR)/$(BIND_DIR):/go/src/github.com/docker/docker/$(BIND_DIR)")
|
||||
DOCKER_MOUNT := $(if $(DOCKER_BINDDIR_MOUNT_OPTS),$(DOCKER_MOUNT):$(DOCKER_BINDDIR_MOUNT_OPTS),$(DOCKER_MOUNT))
|
||||
|
||||
# This allows the test suite to be able to run without worrying about the underlying fs used by the container running the daemon (e.g. aufs-on-aufs), so long as the host running the container is running a supported fs.
|
||||
# The volume will be cleaned up when the container is removed due to `--rm`.
|
||||
# Note that `BIND_DIR` will already be set to `bundles` if `DOCKER_HOST` is not set (see above BIND_DIR line), in such case this will do nothing since `DOCKER_MOUNT` will already be set.
|
||||
DOCKER_MOUNT := $(if $(DOCKER_MOUNT),$(DOCKER_MOUNT),-v /go/src/github.com/docker/docker/bundles) -v "$(CURDIR)/.git:/go/src/github.com/docker/docker/.git"
|
||||
|
||||
DOCKER_MOUNT_CACHE := -v docker-dev-cache:/root/.cache
|
||||
DOCKER_MOUNT_CLI := $(if $(DOCKER_CLI_PATH),-v $(shell dirname $(DOCKER_CLI_PATH)):/usr/local/cli,)
|
||||
DOCKER_MOUNT_BASH_COMPLETION := $(if $(DOCKER_BASH_COMPLETION_PATH),-v $(shell dirname $(DOCKER_BASH_COMPLETION_PATH)):/usr/local/completion/bash,)
|
||||
DOCKER_MOUNT := $(DOCKER_MOUNT) $(DOCKER_MOUNT_CACHE) $(DOCKER_MOUNT_CLI) $(DOCKER_MOUNT_BASH_COMPLETION)
|
||||
endif # ifndef DOCKER_MOUNT
|
||||
|
||||
# This allows to set the docker-dev container name
|
||||
DOCKER_CONTAINER_NAME := $(if $(CONTAINER_NAME),--name $(CONTAINER_NAME),)
|
||||
|
||||
# enable package cache if DOCKER_INCREMENTAL_BINARY and DOCKER_MOUNT (i.e.DOCKER_HOST) are set
|
||||
PKGCACHE_MAP := gopath:/go/pkg goroot-linux_amd64:/usr/local/go/pkg/linux_amd64 goroot-linux_amd64_netgo:/usr/local/go/pkg/linux_amd64_netgo
|
||||
PKGCACHE_VOLROOT := dockerdev-go-pkg-cache
|
||||
PKGCACHE_VOL := $(if $(PKGCACHE_DIR),$(CURDIR)/$(PKGCACHE_DIR)/,$(PKGCACHE_VOLROOT)-)
|
||||
DOCKER_MOUNT_PKGCACHE := $(if $(DOCKER_INCREMENTAL_BINARY),$(shell echo $(PKGCACHE_MAP) | sed -E 's@([^ ]*)@-v "$(PKGCACHE_VOL)\1"@g'),)
|
||||
DOCKER_MOUNT_CLI := $(if $(DOCKER_CLI_PATH),-v $(shell dirname $(DOCKER_CLI_PATH)):/usr/local/cli,)
|
||||
DOCKER_MOUNT_BASH_COMPLETION := $(if $(DOCKER_BASH_COMPLETION_PATH),-v $(shell dirname $(DOCKER_BASH_COMPLETION_PATH)):/usr/local/completion/bash,)
|
||||
DOCKER_MOUNT := $(DOCKER_MOUNT) $(DOCKER_MOUNT_PKGCACHE) $(DOCKER_MOUNT_CLI) $(DOCKER_MOUNT_BASH_COMPLETION)
|
||||
|
||||
GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
|
||||
GIT_BRANCH_CLEAN := $(shell echo $(GIT_BRANCH) | sed -e "s/[^[:alnum:]]/-/g")
|
||||
DOCKER_IMAGE := docker-dev$(if $(GIT_BRANCH_CLEAN),:$(GIT_BRANCH_CLEAN))
|
||||
@@ -113,6 +104,9 @@ export BUILD_APT_MIRROR
|
||||
|
||||
SWAGGER_DOCS_PORT ?= 9000
|
||||
|
||||
INTEGRATION_CLI_MASTER_IMAGE := $(if $(INTEGRATION_CLI_MASTER_IMAGE), $(INTEGRATION_CLI_MASTER_IMAGE), integration-cli-master)
|
||||
INTEGRATION_CLI_WORKER_IMAGE := $(if $(INTEGRATION_CLI_WORKER_IMAGE), $(INTEGRATION_CLI_WORKER_IMAGE), integration-cli-worker)
|
||||
|
||||
define \n
|
||||
|
||||
|
||||
@@ -139,36 +133,28 @@ binary: build ## build the linux binaries
|
||||
dynbinary: build ## build the linux dynbinaries
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary
|
||||
|
||||
|
||||
|
||||
cross: DOCKER_CROSS := true
|
||||
cross: build ## cross build the binaries for darwin, freebsd and\nwindows
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary binary cross
|
||||
|
||||
ifdef DOCKER_CROSSPLATFORMS
|
||||
build: DOCKER_CROSS := true
|
||||
endif
|
||||
ifeq ($(BIND_DIR), .)
|
||||
build: DOCKER_BUILD_OPTS += --target=dev
|
||||
endif
|
||||
build: DOCKER_BUILD_ARGS += --build-arg=CROSS=$(DOCKER_CROSS)
|
||||
build: DOCKER_BUILDKIT ?= 1
|
||||
build: bundles
|
||||
build: bundles init-go-pkg-cache
|
||||
$(warning The docker client CLI has moved to github.com/docker/cli. For a dev-test cycle involving the CLI, run:${\n} DOCKER_CLI_PATH=/host/path/to/cli/binary make shell ${\n} then change the cli and compile into a binary at the same location.${\n})
|
||||
DOCKER_BUILDKIT="${DOCKER_BUILDKIT}" docker build --build-arg=GO_VERSION ${BUILD_APT_MIRROR} ${DOCKER_BUILD_ARGS} ${DOCKER_BUILD_OPTS} -t "$(DOCKER_IMAGE)" -f "$(DOCKERFILE)" .
|
||||
docker build ${BUILD_APT_MIRROR} ${DOCKER_BUILD_ARGS} -t "$(DOCKER_IMAGE)" -f "$(DOCKERFILE)" .
|
||||
|
||||
bundles:
|
||||
mkdir bundles
|
||||
|
||||
.PHONY: clean
|
||||
clean: clean-cache
|
||||
clean: clean-pkg-cache-vol ## clean up cached resources
|
||||
|
||||
.PHONY: clean-cache
|
||||
clean-cache:
|
||||
docker volume rm -f docker-dev-cache
|
||||
clean-pkg-cache-vol:
|
||||
@- $(foreach mapping,$(PKGCACHE_MAP), \
|
||||
$(shell docker volume rm $(PKGCACHE_VOLROOT)-$(shell echo $(mapping) | awk -F':/' '{ print $$1 }') > /dev/null 2>&1) \
|
||||
)
|
||||
|
||||
cross: build ## cross build the binaries for darwin, freebsd and\nwindows
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary binary cross
|
||||
|
||||
help: ## this help
|
||||
@awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z0-9_-]+:.*?## / {gsub("\\\\n",sprintf("\n%22c",""), $$2);printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST)
|
||||
@awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {sub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST)
|
||||
|
||||
init-go-pkg-cache:
|
||||
$(if $(PKGCACHE_DIR), mkdir -p $(shell echo $(PKGCACHE_MAP) | sed -E 's@([^: ]*):[^ ]*@$(PKGCACHE_DIR)/\1@g'))
|
||||
|
||||
install: ## install the linux binaries
|
||||
KEEPBUNDLE=1 hack/make.sh install-binary
|
||||
@@ -187,16 +173,8 @@ test-docker-py: build ## run the docker-py tests
|
||||
|
||||
test-integration-cli: test-integration ## (DEPRECATED) use test-integration
|
||||
|
||||
ifneq ($(and $(TEST_SKIP_INTEGRATION),$(TEST_SKIP_INTEGRATION_CLI)),)
|
||||
test-integration:
|
||||
@echo Both integrations suites skipped per environment variables
|
||||
else
|
||||
test-integration: build ## run the integration tests
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary test-integration
|
||||
endif
|
||||
|
||||
test-integration-flaky: build ## run the stress test for all new integration tests
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary test-integration-flaky
|
||||
|
||||
test-unit: build ## run the unit tests
|
||||
$(DOCKER_RUN_DOCKER) hack/test/unit
|
||||
@@ -205,7 +183,7 @@ validate: build ## validate DCO, Seccomp profile generation, gofmt,\n./pkg/ isol
|
||||
$(DOCKER_RUN_DOCKER) hack/validate/all
|
||||
|
||||
win: build ## cross build the binary for windows
|
||||
$(DOCKER_RUN_DOCKER) DOCKER_CROSSPLATFORMS=windows/amd64 hack/make.sh cross
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh win
|
||||
|
||||
.PHONY: swagger-gen
|
||||
swagger-gen:
|
||||
@@ -222,3 +200,19 @@ swagger-docs: ## preview the API documentation
|
||||
-e 'REDOC_OPTIONS=hide-hostname="true" lazy-rendering' \
|
||||
-p $(SWAGGER_DOCS_PORT):80 \
|
||||
bfirsh/redoc:1.6.2
|
||||
|
||||
build-integration-cli-on-swarm: build ## build images and binary for running integration-cli on Swarm in parallel
|
||||
@echo "Building hack/integration-cli-on-swarm (if build fails, please refer to hack/integration-cli-on-swarm/README.md)"
|
||||
go build -buildmode=pie -o ./hack/integration-cli-on-swarm/integration-cli-on-swarm ./hack/integration-cli-on-swarm/host
|
||||
@echo "Building $(INTEGRATION_CLI_MASTER_IMAGE)"
|
||||
docker build -t $(INTEGRATION_CLI_MASTER_IMAGE) hack/integration-cli-on-swarm/agent
|
||||
# For worker, we don't use `docker build` so as to enable DOCKER_INCREMENTAL_BINARY and so on
|
||||
@echo "Building $(INTEGRATION_CLI_WORKER_IMAGE) from $(DOCKER_IMAGE)"
|
||||
$(eval tmp := integration-cli-worker-tmp)
|
||||
# We mount pkgcache, but not bundle (bundle needs to be baked into the image)
|
||||
# For avoiding bakings DOCKER_GRAPHDRIVER and so on to image, we cannot use $(DOCKER_ENVS) here
|
||||
docker run -t -d --name $(tmp) -e DOCKER_GITCOMMIT -e BUILDFLAGS -e DOCKER_INCREMENTAL_BINARY --privileged $(DOCKER_MOUNT_PKGCACHE) $(DOCKER_IMAGE) top
|
||||
docker exec $(tmp) hack/make.sh build-integration-test-binary dynbinary
|
||||
docker exec $(tmp) go build -buildmode=pie -o /worker github.com/docker/docker/hack/integration-cli-on-swarm/agent/worker
|
||||
docker commit -c 'ENTRYPOINT ["/worker"]' $(tmp) $(INTEGRATION_CLI_WORKER_IMAGE)
|
||||
docker rm -f $(tmp)
|
||||
|
||||
2
NOTICE
2
NOTICE
@@ -3,7 +3,7 @@ Copyright 2012-2017 Docker, Inc.
|
||||
|
||||
This product includes software developed at Docker, Inc. (https://www.docker.com).
|
||||
|
||||
This product contains software (https://github.com/creack/pty) developed
|
||||
This product contains software (https://github.com/kr/pty) developed
|
||||
by Keith Rarick, licensed under the MIT License.
|
||||
|
||||
The following is courtesy of our legal counsel:
|
||||
|
||||
91
ROADMAP.md
91
ROADMAP.md
@@ -35,83 +35,34 @@ issue, in the Slack channel, or in person at the Moby Summits that happen every
|
||||
|
||||
## 1.1 Runtime improvements
|
||||
|
||||
Over time we have accumulated a lot of functionality in the container runtime
|
||||
aspect of Moby while also growing in other areas. Much of the container runtime
|
||||
pieces are now duplicated work available in other, lower level components such
|
||||
as [containerd](https://containerd.io).
|
||||
We introduced [`runC`](https://runc.io) as a standalone low-level tool for container
|
||||
execution in 2015, the first stage in spinning out parts of the Engine into standalone tools.
|
||||
|
||||
Moby currently only utilizes containerd for basic runtime state management, e.g. starting
|
||||
and stopping a container, which is what the pre-containerd 1.0 daemon provided.
|
||||
Now that containerd is a full-fledged container runtime which supports full
|
||||
container life-cycle management, we would like to start relying more on containerd
|
||||
and removing the bits in Moby which are now duplicated. This will necessitate
|
||||
a significant effort to refactor and even remove large parts of Moby's codebase.
|
||||
As runC continued evolving, and the OCI specification along with it, we created
|
||||
[`containerd`](https://github.com/containerd/containerd), a daemon to control and monitor `runC`.
|
||||
In late 2016 this was relaunched as the `containerd` 1.0 track, aiming to provide a common runtime
|
||||
for the whole spectrum of container systems, including Kubernetes, with wide community support.
|
||||
This change meant that there was an increased scope for `containerd`, including image management
|
||||
and storage drivers.
|
||||
|
||||
Tracking issues:
|
||||
Moby will rely on a long-running `containerd` companion daemon for all container execution
|
||||
related operations. This could open the door in the future for Engine restarts without interrupting
|
||||
running containers. The switch over to containerd 1.0 is an important goal for the project, and
|
||||
will result in a significant simplification of the functions implemented in this repository.
|
||||
|
||||
- [#38043](https://github.com/moby/moby/issues/38043) Proposal: containerd image integration
|
||||
|
||||
## 1.2 Image Builder
|
||||
|
||||
Work is ongoing to integrate [BuildKit](https://github.com/moby/buildkit) into
|
||||
Moby and replace the "v0" build implementation. Buildkit offers better cache
|
||||
management, parallelizable build steps, and better extensibility while also
|
||||
keeping builds portable, a chief tenent of Moby's builder.
|
||||
|
||||
Upon completion of this effort, users will have a builder that performs better
|
||||
while also being more extensible, enabling users to provide their own custom
|
||||
syntax which can be either Dockerfile-like or something completely different.
|
||||
|
||||
See [buildpacks on buildkit](https://github.com/tonistiigi/buildkit-pack) as an
|
||||
example of this extensibility.
|
||||
|
||||
New features for the builder and Dockerfile should be implemented first in the
|
||||
BuildKit backend using an external Dockerfile implementation from the container
|
||||
images. This allows everyone to test and evaluate the feature without upgrading
|
||||
their daemon. New features should go to the experimental channel first, and can be
|
||||
part of the `docker/dockerfile:experimental` image. From there they graduate to
|
||||
`docker/dockerfile:latest` and binary releases. The Dockerfile frontend source
|
||||
code is temporarily located at
|
||||
[https://github.com/moby/buildkit/tree/master/frontend/dockerfile](https://github.com/moby/buildkit/tree/master/frontend/dockerfile)
|
||||
with separate new features defined with go build tags.
|
||||
|
||||
Tracking issues:
|
||||
|
||||
- [#32925](https://github.com/moby/moby/issues/32925) discussion: builder future: buildkit
|
||||
|
||||
## 1.3 Rootless Mode
|
||||
|
||||
Running the daemon requires elevated privileges for many tasks. We would like to
|
||||
support running the daemon as a normal, unprivileged user without requiring `suid`
|
||||
binaries.
|
||||
|
||||
Tracking issues:
|
||||
|
||||
- [#37375](https://github.com/moby/moby/issues/37375) Proposal: allow running `dockerd` as an unprivileged user (aka rootless mode)
|
||||
|
||||
## 1.4 Testing
|
||||
|
||||
Moby has many tests, both unit and integration. Moby needs more tests which can
|
||||
cover the full spectrum functionality and edge cases out there.
|
||||
|
||||
Tests in the `integration-cli` folder should also be migrated into (both in
|
||||
location and style) the `integration` folder. These newer tests are simpler to
|
||||
run in isolation, simpler to read, simpler to write, and more fully exercise the
|
||||
API. Meanwhile tests of the docker CLI should generally live in docker/cli.
|
||||
|
||||
Tracking issues:
|
||||
|
||||
- [#32866](https://github.com/moby/moby/issues/32866) Replace integration-cli suite with API test suite
|
||||
|
||||
## 1.5 Internal decoupling
|
||||
## 1.2 Internal decoupling
|
||||
|
||||
A lot of work has been done in trying to decouple Moby internals. This process of creating
|
||||
standalone projects with a well defined function that attract a dedicated community should continue.
|
||||
As well as integrating `containerd` we would like to integrate [BuildKit](https://github.com/moby/buildkit)
|
||||
as the next standalone component.
|
||||
|
||||
We see gRPC as the natural communication layer between decoupled components.
|
||||
|
||||
In addition to pushing out large components into other projects, much of the
|
||||
internal code structure, and in particular the
|
||||
["Daemon"](https://godoc.org/github.com/docker/docker/daemon#Daemon) object,
|
||||
should be split into smaller, more manageable, and more testable components.
|
||||
## 1.3 Custom assembly tooling
|
||||
|
||||
We have been prototyping the Moby [assembly tool](https://github.com/moby/tool) which was originally
|
||||
developed for LinuxKit and intend to turn it into a more generic packaging and assembly mechanism
|
||||
that can build not only the default version of Moby, as distribution packages or other useful forms,
|
||||
but can also build very different container systems, themselves built of cooperating daemons built in
|
||||
and running in containers. We intend to merge this functionality into this repo.
|
||||
|
||||
30
TESTING.md
30
TESTING.md
@@ -67,8 +67,6 @@ If a remote daemon is detected, the test will be skipped.
|
||||
|
||||
## Running tests
|
||||
|
||||
### Unit Tests
|
||||
|
||||
To run the unit test suite:
|
||||
|
||||
```
|
||||
@@ -84,36 +82,8 @@ The following environment variables may be used to run a subset of tests:
|
||||
* `TESTFLAGS` - flags passed to `go test`, to run tests which match a pattern
|
||||
use `TESTFLAGS="-test.run TestNameOrPrefix"`
|
||||
|
||||
### Integration Tests
|
||||
|
||||
To run the integration test suite:
|
||||
|
||||
```
|
||||
make test-integration
|
||||
```
|
||||
|
||||
This make target runs both the "integration" suite and the "integration-cli"
|
||||
suite.
|
||||
|
||||
You can specify which integration test dirs to build and run by specifying
|
||||
the list of dirs in the TEST_INTEGRATION_DIR environment variable.
|
||||
|
||||
You can also explicitly skip either suite by setting (any value) in
|
||||
TEST_SKIP_INTEGRATION and/or TEST_SKIP_INTEGRATION_CLI environment variables.
|
||||
|
||||
Flags specific to each suite can be set in the TESTFLAGS_INTEGRATION and
|
||||
TESTFLAGS_INTEGRATION_CLI environment variables.
|
||||
|
||||
If all you want is to specity a test filter to run, you can set the
|
||||
`TEST_FILTER` environment variable. This ends up getting passed directly to `go
|
||||
test -run` (or `go test -check-f`, dpenending on the test suite). It will also
|
||||
automatically set the other above mentioned environment variables accordingly.
|
||||
|
||||
### Go Version
|
||||
|
||||
You can change a version of golang used for building stuff that is being tested
|
||||
by setting `GO_VERSION` variable, for example:
|
||||
|
||||
```
|
||||
make GO_VERSION=1.12.8 test
|
||||
```
|
||||
|
||||
@@ -3,7 +3,7 @@ package api // import "github.com/docker/docker/api"
|
||||
// Common constants for daemon and client.
|
||||
const (
|
||||
// DefaultVersion of Current REST API
|
||||
DefaultVersion = "1.40"
|
||||
DefaultVersion = "1.39"
|
||||
|
||||
// NoBaseImageSpecifier is the symbol used by the FROM
|
||||
// command to specify that no base image is to be used.
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
// ImageComponent provides an interface for working with images
|
||||
@@ -41,13 +40,6 @@ func NewBackend(components ImageComponent, builder Builder, fsCache *fscache.FSC
|
||||
return &Backend{imageComponent: components, builder: builder, fsCache: fsCache, buildkit: buildkit}, nil
|
||||
}
|
||||
|
||||
// RegisterGRPC registers buildkit controller to the grpc server.
|
||||
func (b *Backend) RegisterGRPC(s *grpc.Server) {
|
||||
if b.buildkit != nil {
|
||||
b.buildkit.RegisterGRPC(s)
|
||||
}
|
||||
}
|
||||
|
||||
// Build builds an image from a Source
|
||||
func (b *Backend) Build(ctx context.Context, config backend.BuildConfig) (string, error) {
|
||||
options := config.Options
|
||||
@@ -90,8 +82,6 @@ func (b *Backend) Build(ctx context.Context, config backend.BuildConfig) (string
|
||||
if !useBuildKit {
|
||||
stdout := config.ProgressWriter.StdoutFormatter
|
||||
fmt.Fprintf(stdout, "Successfully built %s\n", stringid.TruncateID(imageID))
|
||||
}
|
||||
if imageID != "" {
|
||||
err = tagger.TagImages(image.ID(imageID))
|
||||
}
|
||||
return imageID, err
|
||||
|
||||
131
api/server/httputils/errors.go
Normal file
131
api/server/httputils/errors.go
Normal file
@@ -0,0 +1,131 @@
|
||||
package httputils // import "github.com/docker/docker/api/server/httputils"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/sirupsen/logrus"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
)
|
||||
|
||||
type causer interface {
|
||||
Cause() error
|
||||
}
|
||||
|
||||
// GetHTTPErrorStatusCode retrieves status code from error message.
|
||||
func GetHTTPErrorStatusCode(err error) int {
|
||||
if err == nil {
|
||||
logrus.WithFields(logrus.Fields{"error": err}).Error("unexpected HTTP error handling")
|
||||
return http.StatusInternalServerError
|
||||
}
|
||||
|
||||
var statusCode int
|
||||
|
||||
// Stop right there
|
||||
// Are you sure you should be adding a new error class here? Do one of the existing ones work?
|
||||
|
||||
// Note that the below functions are already checking the error causal chain for matches.
|
||||
switch {
|
||||
case errdefs.IsNotFound(err):
|
||||
statusCode = http.StatusNotFound
|
||||
case errdefs.IsInvalidParameter(err):
|
||||
statusCode = http.StatusBadRequest
|
||||
case errdefs.IsConflict(err) || errdefs.IsAlreadyExists(err):
|
||||
statusCode = http.StatusConflict
|
||||
case errdefs.IsUnauthorized(err):
|
||||
statusCode = http.StatusUnauthorized
|
||||
case errdefs.IsUnavailable(err):
|
||||
statusCode = http.StatusServiceUnavailable
|
||||
case errdefs.IsForbidden(err):
|
||||
statusCode = http.StatusForbidden
|
||||
case errdefs.IsNotModified(err):
|
||||
statusCode = http.StatusNotModified
|
||||
case errdefs.IsNotImplemented(err):
|
||||
statusCode = http.StatusNotImplemented
|
||||
case errdefs.IsSystem(err) || errdefs.IsUnknown(err) || errdefs.IsDataLoss(err) || errdefs.IsDeadline(err) || errdefs.IsCancelled(err):
|
||||
statusCode = http.StatusInternalServerError
|
||||
default:
|
||||
statusCode = statusCodeFromGRPCError(err)
|
||||
if statusCode != http.StatusInternalServerError {
|
||||
return statusCode
|
||||
}
|
||||
|
||||
if e, ok := err.(causer); ok {
|
||||
return GetHTTPErrorStatusCode(e.Cause())
|
||||
}
|
||||
|
||||
logrus.WithFields(logrus.Fields{
|
||||
"module": "api",
|
||||
"error_type": fmt.Sprintf("%T", err),
|
||||
}).Debugf("FIXME: Got an API for which error does not match any expected type!!!: %+v", err)
|
||||
}
|
||||
|
||||
if statusCode == 0 {
|
||||
statusCode = http.StatusInternalServerError
|
||||
}
|
||||
|
||||
return statusCode
|
||||
}
|
||||
|
||||
func apiVersionSupportsJSONErrors(version string) bool {
|
||||
const firstAPIVersionWithJSONErrors = "1.23"
|
||||
return version == "" || versions.GreaterThan(version, firstAPIVersionWithJSONErrors)
|
||||
}
|
||||
|
||||
// MakeErrorHandler makes an HTTP handler that decodes a Docker error and
|
||||
// returns it in the response.
|
||||
func MakeErrorHandler(err error) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
statusCode := GetHTTPErrorStatusCode(err)
|
||||
vars := mux.Vars(r)
|
||||
if apiVersionSupportsJSONErrors(vars["version"]) {
|
||||
response := &types.ErrorResponse{
|
||||
Message: err.Error(),
|
||||
}
|
||||
WriteJSON(w, statusCode, response)
|
||||
} else {
|
||||
http.Error(w, grpc.ErrorDesc(err), statusCode)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// statusCodeFromGRPCError returns status code according to gRPC error
|
||||
func statusCodeFromGRPCError(err error) int {
|
||||
switch grpc.Code(err) {
|
||||
case codes.InvalidArgument: // code 3
|
||||
return http.StatusBadRequest
|
||||
case codes.NotFound: // code 5
|
||||
return http.StatusNotFound
|
||||
case codes.AlreadyExists: // code 6
|
||||
return http.StatusConflict
|
||||
case codes.PermissionDenied: // code 7
|
||||
return http.StatusForbidden
|
||||
case codes.FailedPrecondition: // code 9
|
||||
return http.StatusBadRequest
|
||||
case codes.Unauthenticated: // code 16
|
||||
return http.StatusUnauthorized
|
||||
case codes.OutOfRange: // code 11
|
||||
return http.StatusBadRequest
|
||||
case codes.Unimplemented: // code 12
|
||||
return http.StatusNotImplemented
|
||||
case codes.Unavailable: // code 14
|
||||
return http.StatusServiceUnavailable
|
||||
default:
|
||||
if e, ok := err.(causer); ok {
|
||||
return statusCodeFromGRPCError(e.Cause())
|
||||
}
|
||||
// codes.Canceled(1)
|
||||
// codes.Unknown(2)
|
||||
// codes.DeadlineExceeded(4)
|
||||
// codes.ResourceExhausted(8)
|
||||
// codes.Aborted(10)
|
||||
// codes.Internal(13)
|
||||
// codes.DataLoss(15)
|
||||
return http.StatusInternalServerError
|
||||
}
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
package httputils // import "github.com/docker/docker/api/server/httputils"
|
||||
import "github.com/docker/docker/errdefs"
|
||||
|
||||
// GetHTTPErrorStatusCode retrieves status code from error message.
|
||||
//
|
||||
// Deprecated: use errdefs.GetHTTPErrorStatusCode
|
||||
func GetHTTPErrorStatusCode(err error) int {
|
||||
return errdefs.GetHTTPErrorStatusCode(err)
|
||||
}
|
||||
@@ -7,17 +7,15 @@ import (
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
type contextKey string
|
||||
|
||||
// APIVersionKey is the client's requested API version.
|
||||
type APIVersionKey struct{}
|
||||
const APIVersionKey contextKey = "api-version"
|
||||
|
||||
// APIFunc is an adapter to allow the use of ordinary functions as Docker API endpoints.
|
||||
// Any function that has the appropriate signature can be registered as an API endpoint (e.g. getVersion).
|
||||
@@ -85,35 +83,13 @@ func VersionFromContext(ctx context.Context) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
if val := ctx.Value(APIVersionKey{}); val != nil {
|
||||
if val := ctx.Value(APIVersionKey); val != nil {
|
||||
return val.(string)
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// MakeErrorHandler makes an HTTP handler that decodes a Docker error and
|
||||
// returns it in the response.
|
||||
func MakeErrorHandler(err error) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
statusCode := errdefs.GetHTTPErrorStatusCode(err)
|
||||
vars := mux.Vars(r)
|
||||
if apiVersionSupportsJSONErrors(vars["version"]) {
|
||||
response := &types.ErrorResponse{
|
||||
Message: err.Error(),
|
||||
}
|
||||
WriteJSON(w, statusCode, response)
|
||||
} else {
|
||||
http.Error(w, status.Convert(err).Message(), statusCode)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func apiVersionSupportsJSONErrors(version string) bool {
|
||||
const firstAPIVersionWithJSONErrors = "1.23"
|
||||
return version == "" || versions.GreaterThan(version, firstAPIVersionWithJSONErrors)
|
||||
}
|
||||
|
||||
// matchesContentType validates the content type against the expected one
|
||||
func matchesContentType(contentType, expectedType string) bool {
|
||||
mimetype, _, err := mime.ParseMediaType(contentType)
|
||||
|
||||
@@ -41,7 +41,7 @@ func DebugRequestMiddleware(handler func(ctx context.Context, w http.ResponseWri
|
||||
|
||||
var postForm map[string]interface{}
|
||||
if err := json.Unmarshal(b, &postForm); err == nil {
|
||||
maskSecretKeys(postForm)
|
||||
maskSecretKeys(postForm, r.RequestURI)
|
||||
formStr, errMarshal := json.Marshal(postForm)
|
||||
if errMarshal == nil {
|
||||
logrus.Debugf("form data: %s", string(formStr))
|
||||
@@ -54,37 +54,41 @@ func DebugRequestMiddleware(handler func(ctx context.Context, w http.ResponseWri
|
||||
}
|
||||
}
|
||||
|
||||
func maskSecretKeys(inp interface{}) {
|
||||
func maskSecretKeys(inp interface{}, path string) {
|
||||
// Remove any query string from the path
|
||||
idx := strings.Index(path, "?")
|
||||
if idx != -1 {
|
||||
path = path[:idx]
|
||||
}
|
||||
// Remove trailing / characters
|
||||
path = strings.TrimRight(path, "/")
|
||||
|
||||
if arr, ok := inp.([]interface{}); ok {
|
||||
for _, f := range arr {
|
||||
maskSecretKeys(f)
|
||||
maskSecretKeys(f, path)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if form, ok := inp.(map[string]interface{}); ok {
|
||||
scrub := []string{
|
||||
// Note: The Data field contains the base64-encoded secret in 'secret'
|
||||
// and 'config' create and update requests. Currently, no other POST
|
||||
// API endpoints use a data field, so we scrub this field unconditionally.
|
||||
// Change this handling to be conditional if a new endpoint is added
|
||||
// in future where this field should not be scrubbed.
|
||||
"data",
|
||||
"jointoken",
|
||||
"password",
|
||||
"secret",
|
||||
"signingcakey",
|
||||
"unlockkey",
|
||||
}
|
||||
loop0:
|
||||
for k, v := range form {
|
||||
for _, m := range scrub {
|
||||
for _, m := range []string{"password", "secret", "jointoken", "unlockkey", "signingcakey"} {
|
||||
if strings.EqualFold(m, k) {
|
||||
form[k] = "*****"
|
||||
continue loop0
|
||||
}
|
||||
}
|
||||
maskSecretKeys(v)
|
||||
maskSecretKeys(v, path)
|
||||
}
|
||||
|
||||
// Route-specific redactions
|
||||
if strings.HasSuffix(path, "/secrets/create") {
|
||||
for k := range form {
|
||||
if k == "Data" {
|
||||
form[k] = "*****"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,25 +9,31 @@ import (
|
||||
|
||||
func TestMaskSecretKeys(t *testing.T) {
|
||||
tests := []struct {
|
||||
doc string
|
||||
path string
|
||||
input map[string]interface{}
|
||||
expected map[string]interface{}
|
||||
}{
|
||||
{
|
||||
doc: "secret/config create and update requests",
|
||||
path: "/v1.30/secrets/create",
|
||||
input: map[string]interface{}{"Data": "foo", "Name": "name", "Labels": map[string]interface{}{}},
|
||||
expected: map[string]interface{}{"Data": "*****", "Name": "name", "Labels": map[string]interface{}{}},
|
||||
},
|
||||
{
|
||||
doc: "masking other fields (recursively)",
|
||||
path: "/v1.30/secrets/create//",
|
||||
input: map[string]interface{}{"Data": "foo", "Name": "name", "Labels": map[string]interface{}{}},
|
||||
expected: map[string]interface{}{"Data": "*****", "Name": "name", "Labels": map[string]interface{}{}},
|
||||
},
|
||||
|
||||
{
|
||||
path: "/secrets/create?key=val",
|
||||
input: map[string]interface{}{"Data": "foo", "Name": "name", "Labels": map[string]interface{}{}},
|
||||
expected: map[string]interface{}{"Data": "*****", "Name": "name", "Labels": map[string]interface{}{}},
|
||||
},
|
||||
{
|
||||
path: "/v1.30/some/other/path",
|
||||
input: map[string]interface{}{
|
||||
"password": "pass",
|
||||
"secret": "secret",
|
||||
"jointoken": "jointoken",
|
||||
"unlockkey": "unlockkey",
|
||||
"signingcakey": "signingcakey",
|
||||
"password": "pass",
|
||||
"other": map[string]interface{}{
|
||||
"password": "pass",
|
||||
"secret": "secret",
|
||||
"jointoken": "jointoken",
|
||||
"unlockkey": "unlockkey",
|
||||
@@ -35,13 +41,8 @@ func TestMaskSecretKeys(t *testing.T) {
|
||||
},
|
||||
},
|
||||
expected: map[string]interface{}{
|
||||
"password": "*****",
|
||||
"secret": "*****",
|
||||
"jointoken": "*****",
|
||||
"unlockkey": "*****",
|
||||
"signingcakey": "*****",
|
||||
"password": "*****",
|
||||
"other": map[string]interface{}{
|
||||
"password": "*****",
|
||||
"secret": "*****",
|
||||
"jointoken": "*****",
|
||||
"unlockkey": "*****",
|
||||
@@ -49,27 +50,10 @@ func TestMaskSecretKeys(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
doc: "case insensitive field matching",
|
||||
input: map[string]interface{}{
|
||||
"PASSWORD": "pass",
|
||||
"other": map[string]interface{}{
|
||||
"PASSWORD": "pass",
|
||||
},
|
||||
},
|
||||
expected: map[string]interface{}{
|
||||
"PASSWORD": "*****",
|
||||
"other": map[string]interface{}{
|
||||
"PASSWORD": "*****",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, testcase := range tests {
|
||||
t.Run(testcase.doc, func(t *testing.T) {
|
||||
maskSecretKeys(testcase.input)
|
||||
assert.Check(t, is.DeepEqual(testcase.expected, testcase.input))
|
||||
})
|
||||
maskSecretKeys(testcase.input, testcase.path)
|
||||
assert.Check(t, is.DeepEqual(testcase.expected, testcase.input))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -58,7 +58,7 @@ func (v VersionMiddleware) WrapHandler(handler func(ctx context.Context, w http.
|
||||
if versions.GreaterThan(apiVersion, v.defaultVersion) {
|
||||
return versionUnsupportedError{version: apiVersion, maxVersion: v.defaultVersion}
|
||||
}
|
||||
ctx = context.WithValue(ctx, httputils.APIVersionKey{}, apiVersion)
|
||||
ctx = context.WithValue(ctx, httputils.APIVersionKey, apiVersion)
|
||||
return handler(ctx, w, r, vars)
|
||||
}
|
||||
|
||||
|
||||
@@ -31,8 +31,8 @@ func (r *buildRouter) Routes() []router.Route {
|
||||
|
||||
func (r *buildRouter) initRoutes() {
|
||||
r.routes = []router.Route{
|
||||
router.NewPostRoute("/build", r.postBuild),
|
||||
router.NewPostRoute("/build/prune", r.postPrune),
|
||||
router.NewPostRoute("/build", r.postBuild, router.WithCancel),
|
||||
router.NewPostRoute("/build/prune", r.postPrune, router.WithCancel),
|
||||
router.NewPostRoute("/build/cancel", r.postCancel),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -148,17 +148,6 @@ func newImageBuildOptions(ctx context.Context, r *http.Request) (*types.ImageBui
|
||||
}
|
||||
options.Version = builderVersion
|
||||
|
||||
if versions.GreaterThanOrEqualTo(version, "1.40") {
|
||||
outputsJSON := r.FormValue("outputs")
|
||||
if outputsJSON != "" {
|
||||
var outputs []types.ImageBuildOutput
|
||||
if err := json.Unmarshal([]byte(outputsJSON), &outputs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
options.Outputs = outputs
|
||||
}
|
||||
}
|
||||
|
||||
return options, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -38,8 +38,8 @@ func (r *containerRouter) initRoutes() {
|
||||
router.NewGetRoute("/containers/{name:.*}/changes", r.getContainersChanges),
|
||||
router.NewGetRoute("/containers/{name:.*}/json", r.getContainersByName),
|
||||
router.NewGetRoute("/containers/{name:.*}/top", r.getContainersTop),
|
||||
router.NewGetRoute("/containers/{name:.*}/logs", r.getContainersLogs),
|
||||
router.NewGetRoute("/containers/{name:.*}/stats", r.getContainersStats),
|
||||
router.NewGetRoute("/containers/{name:.*}/logs", r.getContainersLogs, router.WithCancel),
|
||||
router.NewGetRoute("/containers/{name:.*}/stats", r.getContainersStats, router.WithCancel),
|
||||
router.NewGetRoute("/containers/{name:.*}/attach/ws", r.wsContainersAttach),
|
||||
router.NewGetRoute("/exec/{id:.*}/json", r.getExecByID),
|
||||
router.NewGetRoute("/containers/{name:.*}/archive", r.getContainersArchive),
|
||||
@@ -51,7 +51,7 @@ func (r *containerRouter) initRoutes() {
|
||||
router.NewPostRoute("/containers/{name:.*}/restart", r.postContainersRestart),
|
||||
router.NewPostRoute("/containers/{name:.*}/start", r.postContainersStart),
|
||||
router.NewPostRoute("/containers/{name:.*}/stop", r.postContainersStop),
|
||||
router.NewPostRoute("/containers/{name:.*}/wait", r.postContainersWait),
|
||||
router.NewPostRoute("/containers/{name:.*}/wait", r.postContainersWait, router.WithCancel),
|
||||
router.NewPostRoute("/containers/{name:.*}/resize", r.postContainersResize),
|
||||
router.NewPostRoute("/containers/{name:.*}/attach", r.postContainersAttach),
|
||||
router.NewPostRoute("/containers/{name:.*}/copy", r.postContainersCopy), // Deprecated since 1.8, Errors out since 1.12
|
||||
@@ -60,7 +60,7 @@ func (r *containerRouter) initRoutes() {
|
||||
router.NewPostRoute("/exec/{name:.*}/resize", r.postContainerExecResize),
|
||||
router.NewPostRoute("/containers/{name:.*}/rename", r.postContainerRename),
|
||||
router.NewPostRoute("/containers/{name:.*}/update", r.postContainerUpdate),
|
||||
router.NewPostRoute("/containers/prune", r.postContainersPrune),
|
||||
router.NewPostRoute("/containers/prune", r.postContainersPrune, router.WithCancel),
|
||||
router.NewPostRoute("/commit", r.postCommit),
|
||||
// PUT
|
||||
router.NewPutRoute("/containers/{name:.*}/archive", r.putContainersArchive),
|
||||
|
||||
@@ -338,6 +338,9 @@ func (s *containerRouter) postContainersWait(ctx context.Context, w http.Respons
|
||||
}
|
||||
}
|
||||
|
||||
// Note: the context should get canceled if the client closes the
|
||||
// connection since this handler has been wrapped by the
|
||||
// router.WithCancel() wrapper.
|
||||
waitC, err := s.backend.ContainerWait(ctx, vars["name"], waitCondition)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -425,16 +428,6 @@ func (s *containerRouter) postContainerUpdate(ctx context.Context, w http.Respon
|
||||
if err := decoder.Decode(&updateConfig); err != nil {
|
||||
return err
|
||||
}
|
||||
if versions.LessThan(httputils.VersionFromContext(ctx), "1.40") {
|
||||
updateConfig.PidsLimit = nil
|
||||
}
|
||||
if updateConfig.PidsLimit != nil && *updateConfig.PidsLimit <= 0 {
|
||||
// Both `0` and `-1` are accepted to set "unlimited" when updating.
|
||||
// Historically, any negative value was accepted, so treat them as
|
||||
// "unlimited" as well.
|
||||
var unlimited int64
|
||||
updateConfig.PidsLimit = &unlimited
|
||||
}
|
||||
|
||||
hostConfig := &container.HostConfig{
|
||||
Resources: updateConfig.Resources,
|
||||
@@ -472,33 +465,6 @@ func (s *containerRouter) postContainersCreate(ctx context.Context, w http.Respo
|
||||
hostConfig.AutoRemove = false
|
||||
}
|
||||
|
||||
if hostConfig != nil && versions.LessThan(version, "1.40") {
|
||||
// Ignore BindOptions.NonRecursive because it was added in API 1.40.
|
||||
for _, m := range hostConfig.Mounts {
|
||||
if bo := m.BindOptions; bo != nil {
|
||||
bo.NonRecursive = false
|
||||
}
|
||||
}
|
||||
// Ignore KernelMemoryTCP because it was added in API 1.40.
|
||||
hostConfig.KernelMemoryTCP = 0
|
||||
|
||||
// Ignore Capabilities because it was added in API 1.40.
|
||||
hostConfig.Capabilities = nil
|
||||
|
||||
// Older clients (API < 1.40) expects the default to be shareable, make them happy
|
||||
if hostConfig.IpcMode.IsEmpty() {
|
||||
hostConfig.IpcMode = container.IpcMode("shareable")
|
||||
}
|
||||
}
|
||||
|
||||
if hostConfig != nil && hostConfig.PidsLimit != nil && *hostConfig.PidsLimit <= 0 {
|
||||
// Don't set a limit if either no limit was specified, or "unlimited" was
|
||||
// explicitly set.
|
||||
// Both `0` and `-1` are accepted as "unlimited", and historically any
|
||||
// negative value was accepted, so treat those as "unlimited" as well.
|
||||
hostConfig.PidsLimit = nil
|
||||
}
|
||||
|
||||
ccr, err := s.backend.ContainerCreate(types.ContainerCreateConfig{
|
||||
Name: name,
|
||||
Config: config,
|
||||
@@ -604,7 +570,7 @@ func (s *containerRouter) postContainersAttach(ctx context.Context, w http.Respo
|
||||
// Remember to close stream if error happens
|
||||
conn, _, errHijack := hijacker.Hijack()
|
||||
if errHijack == nil {
|
||||
statusCode := errdefs.GetHTTPErrorStatusCode(err)
|
||||
statusCode := httputils.GetHTTPErrorStatusCode(err)
|
||||
statusText := http.StatusText(statusCode)
|
||||
fmt.Fprintf(conn, "HTTP/1.1 %d %s\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n%s\r\n", statusCode, statusText, err.Error())
|
||||
httputils.CloseStreams(conn)
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
"github.com/docker/docker/api/server/httputils"
|
||||
"github.com/docker/docker/api/types"
|
||||
registrytypes "github.com/docker/docker/api/types/registry"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@@ -43,10 +42,9 @@ func (s *distributionRouter) getDistributionInfo(ctx context.Context, w http.Res
|
||||
|
||||
image := vars["name"]
|
||||
|
||||
// TODO why is reference.ParseAnyReference() / reference.ParseNormalizedNamed() not using the reference.ErrTagInvalidFormat (and so on) errors?
|
||||
ref, err := reference.ParseAnyReference(image)
|
||||
if err != nil {
|
||||
return errdefs.InvalidParameter(err)
|
||||
return err
|
||||
}
|
||||
namedRef, ok := ref.(reference.Named)
|
||||
if !ok {
|
||||
@@ -54,7 +52,7 @@ func (s *distributionRouter) getDistributionInfo(ctx context.Context, w http.Res
|
||||
// full image ID
|
||||
return errors.Errorf("no manifest found for full image ID")
|
||||
}
|
||||
return errdefs.InvalidParameter(errors.Errorf("unknown image reference format: %s", image))
|
||||
return errors.Errorf("unknown image reference format: %s", image)
|
||||
}
|
||||
|
||||
distrepo, _, err := s.backend.GetRepository(ctx, namedRef, config)
|
||||
@@ -68,7 +66,7 @@ func (s *distributionRouter) getDistributionInfo(ctx context.Context, w http.Res
|
||||
|
||||
taggedRef, ok := namedRef.(reference.NamedTagged)
|
||||
if !ok {
|
||||
return errdefs.InvalidParameter(errors.Errorf("image reference not tagged: %s", image))
|
||||
return errors.Errorf("image reference not tagged: %s", image)
|
||||
}
|
||||
|
||||
descriptor, err := distrepo.Tags(ctx).Get(ctx, taggedRef.Tag())
|
||||
@@ -94,16 +92,6 @@ func (s *distributionRouter) getDistributionInfo(ctx context.Context, w http.Res
|
||||
}
|
||||
mnfst, err := mnfstsrvc.Get(ctx, distributionInspect.Descriptor.Digest)
|
||||
if err != nil {
|
||||
switch err {
|
||||
case reference.ErrReferenceInvalidFormat,
|
||||
reference.ErrTagInvalidFormat,
|
||||
reference.ErrDigestInvalidFormat,
|
||||
reference.ErrNameContainsUppercase,
|
||||
reference.ErrNameEmpty,
|
||||
reference.ErrNameTooLong,
|
||||
reference.ErrNameNotCanonical:
|
||||
return errdefs.InvalidParameter(err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -44,7 +44,7 @@ func experimentalHandler(ctx context.Context, w http.ResponseWriter, r *http.Req
|
||||
return notImplementedError{}
|
||||
}
|
||||
|
||||
// Handler returns the APIFunc to let the server wrap it in middlewares.
|
||||
// Handler returns returns the APIFunc to let the server wrap it in middlewares.
|
||||
func (r *experimentalRoute) Handler() httputils.APIFunc {
|
||||
return r.handler
|
||||
}
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
package grpc // import "github.com/docker/docker/api/server/router/grpc"
|
||||
|
||||
import "google.golang.org/grpc"
|
||||
|
||||
// Backend abstracts a registerable GRPC service.
|
||||
type Backend interface {
|
||||
RegisterGRPC(*grpc.Server)
|
||||
}
|
||||
@@ -1,37 +0,0 @@
|
||||
package grpc // import "github.com/docker/docker/api/server/router/grpc"
|
||||
|
||||
import (
|
||||
"github.com/docker/docker/api/server/router"
|
||||
"golang.org/x/net/http2"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
type grpcRouter struct {
|
||||
routes []router.Route
|
||||
grpcServer *grpc.Server
|
||||
h2Server *http2.Server
|
||||
}
|
||||
|
||||
// NewRouter initializes a new grpc http router
|
||||
func NewRouter(backends ...Backend) router.Router {
|
||||
r := &grpcRouter{
|
||||
h2Server: &http2.Server{},
|
||||
grpcServer: grpc.NewServer(),
|
||||
}
|
||||
for _, b := range backends {
|
||||
b.RegisterGRPC(r.grpcServer)
|
||||
}
|
||||
r.initRoutes()
|
||||
return r
|
||||
}
|
||||
|
||||
// Routes returns the available routers to the session controller
|
||||
func (r *grpcRouter) Routes() []router.Route {
|
||||
return r.routes
|
||||
}
|
||||
|
||||
func (r *grpcRouter) initRoutes() {
|
||||
r.routes = []router.Route{
|
||||
router.NewPostRoute("/grpc", r.serveGRPC),
|
||||
}
|
||||
}
|
||||
@@ -1,45 +0,0 @@
|
||||
package grpc // import "github.com/docker/docker/api/server/router/grpc"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/http2"
|
||||
)
|
||||
|
||||
func (gr *grpcRouter) serveGRPC(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
h, ok := w.(http.Hijacker)
|
||||
if !ok {
|
||||
return errors.New("handler does not support hijack")
|
||||
}
|
||||
proto := r.Header.Get("Upgrade")
|
||||
if proto == "" {
|
||||
return errors.New("no upgrade proto in request")
|
||||
}
|
||||
if proto != "h2c" {
|
||||
return errors.Errorf("protocol %s not supported", proto)
|
||||
}
|
||||
|
||||
conn, _, err := h.Hijack()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp := &http.Response{
|
||||
StatusCode: http.StatusSwitchingProtocols,
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
Header: http.Header{},
|
||||
}
|
||||
resp.Header.Set("Connection", "Upgrade")
|
||||
resp.Header.Set("Upgrade", proto)
|
||||
|
||||
// set raw mode
|
||||
conn.Write([]byte{})
|
||||
resp.Write(conn)
|
||||
|
||||
// https://godoc.org/golang.org/x/net/http2#Server.ServeConn
|
||||
// TODO: is it a problem that conn has already been written to?
|
||||
gr.h2Server.ServeConn(conn, &http2.ServeConnOpts{Handler: gr.grpcServer})
|
||||
return nil
|
||||
}
|
||||
@@ -34,10 +34,10 @@ func (r *imageRouter) initRoutes() {
|
||||
router.NewGetRoute("/images/{name:.*}/json", r.getImagesByName),
|
||||
// POST
|
||||
router.NewPostRoute("/images/load", r.postImagesLoad),
|
||||
router.NewPostRoute("/images/create", r.postImagesCreate),
|
||||
router.NewPostRoute("/images/{name:.*}/push", r.postImagesPush),
|
||||
router.NewPostRoute("/images/create", r.postImagesCreate, router.WithCancel),
|
||||
router.NewPostRoute("/images/{name:.*}/push", r.postImagesPush, router.WithCancel),
|
||||
router.NewPostRoute("/images/{name:.*}/tag", r.postImagesTag),
|
||||
router.NewPostRoute("/images/prune", r.postImagesPrune),
|
||||
router.NewPostRoute("/images/prune", r.postImagesPrune, router.WithCancel),
|
||||
// DELETE
|
||||
router.NewDeleteRoute("/images/{name:.*}", r.deleteImages),
|
||||
}
|
||||
|
||||
@@ -57,35 +57,37 @@ func (s *imageRouter) postImagesCreate(ctx context.Context, w http.ResponseWrite
|
||||
}
|
||||
}
|
||||
|
||||
if image != "" { //pull
|
||||
metaHeaders := map[string][]string{}
|
||||
for k, v := range r.Header {
|
||||
if strings.HasPrefix(k, "X-Meta-") {
|
||||
metaHeaders[k] = v
|
||||
if err == nil {
|
||||
if image != "" { //pull
|
||||
metaHeaders := map[string][]string{}
|
||||
for k, v := range r.Header {
|
||||
if strings.HasPrefix(k, "X-Meta-") {
|
||||
metaHeaders[k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
authEncoded := r.Header.Get("X-Registry-Auth")
|
||||
authConfig := &types.AuthConfig{}
|
||||
if authEncoded != "" {
|
||||
authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded))
|
||||
if err := json.NewDecoder(authJSON).Decode(authConfig); err != nil {
|
||||
// for a pull it is not an error if no auth was given
|
||||
// to increase compatibility with the existing api it is defaulting to be empty
|
||||
authConfig = &types.AuthConfig{}
|
||||
authEncoded := r.Header.Get("X-Registry-Auth")
|
||||
authConfig := &types.AuthConfig{}
|
||||
if authEncoded != "" {
|
||||
authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded))
|
||||
if err := json.NewDecoder(authJSON).Decode(authConfig); err != nil {
|
||||
// for a pull it is not an error if no auth was given
|
||||
// to increase compatibility with the existing api it is defaulting to be empty
|
||||
authConfig = &types.AuthConfig{}
|
||||
}
|
||||
}
|
||||
err = s.backend.PullImage(ctx, image, tag, platform, metaHeaders, authConfig, output)
|
||||
} else { //import
|
||||
src := r.Form.Get("fromSrc")
|
||||
// 'err' MUST NOT be defined within this block, we need any error
|
||||
// generated from the download to be available to the output
|
||||
// stream processing below
|
||||
os := ""
|
||||
if platform != nil {
|
||||
os = platform.OS
|
||||
}
|
||||
err = s.backend.ImportImage(src, repo, os, tag, message, r.Body, output, r.Form["changes"])
|
||||
}
|
||||
err = s.backend.PullImage(ctx, image, tag, platform, metaHeaders, authConfig, output)
|
||||
} else { //import
|
||||
src := r.Form.Get("fromSrc")
|
||||
// 'err' MUST NOT be defined within this block, we need any error
|
||||
// generated from the download to be available to the output
|
||||
// stream processing below
|
||||
os := ""
|
||||
if platform != nil {
|
||||
os = platform.OS
|
||||
}
|
||||
err = s.backend.ImportImage(src, repo, os, tag, message, r.Body, output, r.Form["changes"])
|
||||
}
|
||||
if err != nil {
|
||||
if !output.Flushed() {
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
package router // import "github.com/docker/docker/api/server/router"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
|
||||
"github.com/docker/docker/api/server/httputils"
|
||||
)
|
||||
|
||||
@@ -69,3 +72,33 @@ func NewOptionsRoute(path string, handler httputils.APIFunc, opts ...RouteWrappe
|
||||
func NewHeadRoute(path string, handler httputils.APIFunc, opts ...RouteWrapper) Route {
|
||||
return NewRoute("HEAD", path, handler, opts...)
|
||||
}
|
||||
|
||||
func cancellableHandler(h httputils.APIFunc) httputils.APIFunc {
|
||||
return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
if notifier, ok := w.(http.CloseNotifier); ok {
|
||||
notify := notifier.CloseNotify()
|
||||
notifyCtx, cancel := context.WithCancel(ctx)
|
||||
finished := make(chan struct{})
|
||||
defer close(finished)
|
||||
ctx = notifyCtx
|
||||
go func() {
|
||||
select {
|
||||
case <-notify:
|
||||
cancel()
|
||||
case <-finished:
|
||||
}
|
||||
}()
|
||||
}
|
||||
return h(ctx, w, r, vars)
|
||||
}
|
||||
}
|
||||
|
||||
// WithCancel makes new route which embeds http.CloseNotifier feature to
|
||||
// context.Context of handler.
|
||||
func WithCancel(r Route) Route {
|
||||
return localRoute{
|
||||
method: r.Method(),
|
||||
path: r.Path(),
|
||||
handler: cancellableHandler(r.Handler()),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -36,7 +36,7 @@ func (r *networkRouter) initRoutes() {
|
||||
router.NewPostRoute("/networks/create", r.postNetworkCreate),
|
||||
router.NewPostRoute("/networks/{id:.*}/connect", r.postNetworkConnect),
|
||||
router.NewPostRoute("/networks/{id:.*}/disconnect", r.postNetworkDisconnect),
|
||||
router.NewPostRoute("/networks/prune", r.postNetworksPrune),
|
||||
router.NewPostRoute("/networks/prune", r.postNetworksPrune, router.WithCancel),
|
||||
// DELETE
|
||||
router.NewDeleteRoute("/networks/{id:.*}", r.deleteNetwork),
|
||||
}
|
||||
|
||||
@@ -28,11 +28,11 @@ func (r *pluginRouter) initRoutes() {
|
||||
router.NewGetRoute("/plugins/{name:.*}/json", r.inspectPlugin),
|
||||
router.NewGetRoute("/plugins/privileges", r.getPrivileges),
|
||||
router.NewDeleteRoute("/plugins/{name:.*}", r.removePlugin),
|
||||
router.NewPostRoute("/plugins/{name:.*}/enable", r.enablePlugin),
|
||||
router.NewPostRoute("/plugins/{name:.*}/enable", r.enablePlugin), // PATCH?
|
||||
router.NewPostRoute("/plugins/{name:.*}/disable", r.disablePlugin),
|
||||
router.NewPostRoute("/plugins/pull", r.pullPlugin),
|
||||
router.NewPostRoute("/plugins/{name:.*}/push", r.pushPlugin),
|
||||
router.NewPostRoute("/plugins/{name:.*}/upgrade", r.upgradePlugin),
|
||||
router.NewPostRoute("/plugins/pull", r.pullPlugin, router.WithCancel),
|
||||
router.NewPostRoute("/plugins/{name:.*}/push", r.pushPlugin, router.WithCancel),
|
||||
router.NewPostRoute("/plugins/{name:.*}/upgrade", r.upgradePlugin, router.WithCancel),
|
||||
router.NewPostRoute("/plugins/{name:.*}/set", r.setPlugin),
|
||||
router.NewPostRoute("/plugins/create", r.createPlugin),
|
||||
}
|
||||
|
||||
@@ -37,7 +37,7 @@ func (sr *swarmRouter) initRoutes() {
|
||||
router.NewPostRoute("/services/create", sr.createService),
|
||||
router.NewPostRoute("/services/{id}/update", sr.updateService),
|
||||
router.NewDeleteRoute("/services/{id}", sr.removeService),
|
||||
router.NewGetRoute("/services/{id}/logs", sr.getServiceLogs),
|
||||
router.NewGetRoute("/services/{id}/logs", sr.getServiceLogs, router.WithCancel),
|
||||
|
||||
router.NewGetRoute("/nodes", sr.getNodes),
|
||||
router.NewGetRoute("/nodes/{id}", sr.getNode),
|
||||
@@ -46,7 +46,7 @@ func (sr *swarmRouter) initRoutes() {
|
||||
|
||||
router.NewGetRoute("/tasks", sr.getTasks),
|
||||
router.NewGetRoute("/tasks/{id}", sr.getTask),
|
||||
router.NewGetRoute("/tasks/{id}/logs", sr.getTaskLogs),
|
||||
router.NewGetRoute("/tasks/{id}/logs", sr.getTaskLogs, router.WithCancel),
|
||||
|
||||
router.NewGetRoute("/secrets", sr.getSecrets),
|
||||
router.NewPostRoute("/secrets/create", sr.createSecret),
|
||||
|
||||
@@ -28,16 +28,11 @@ func (sr *swarmRouter) initCluster(ctx context.Context, w http.ResponseWriter, r
|
||||
return errdefs.InvalidParameter(err)
|
||||
}
|
||||
version := httputils.VersionFromContext(ctx)
|
||||
|
||||
// DefaultAddrPool and SubnetSize were added in API 1.39. Ignore on older API versions.
|
||||
if versions.LessThan(version, "1.39") {
|
||||
req.DefaultAddrPool = nil
|
||||
req.SubnetSize = 0
|
||||
}
|
||||
// DataPathPort was added in API 1.40. Ignore this option on older API versions.
|
||||
if versions.LessThan(version, "1.40") {
|
||||
req.DataPathPort = 0
|
||||
}
|
||||
nodeID, err := sr.backend.Init(req)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error initializing swarm: %v", err)
|
||||
@@ -209,11 +204,8 @@ func (sr *swarmRouter) createService(ctx context.Context, w http.ResponseWriter,
|
||||
encodedAuth := r.Header.Get("X-Registry-Auth")
|
||||
cliVersion := r.Header.Get("version")
|
||||
queryRegistry := false
|
||||
if cliVersion != "" {
|
||||
if versions.LessThan(cliVersion, "1.30") {
|
||||
queryRegistry = true
|
||||
}
|
||||
adjustForAPIVersion(cliVersion, &service)
|
||||
if cliVersion != "" && versions.LessThan(cliVersion, "1.30") {
|
||||
queryRegistry = true
|
||||
}
|
||||
|
||||
resp, err := sr.backend.CreateService(service, encodedAuth, queryRegistry)
|
||||
@@ -249,11 +241,8 @@ func (sr *swarmRouter) updateService(ctx context.Context, w http.ResponseWriter,
|
||||
flags.Rollback = r.URL.Query().Get("rollback")
|
||||
cliVersion := r.Header.Get("version")
|
||||
queryRegistry := false
|
||||
if cliVersion != "" {
|
||||
if versions.LessThan(cliVersion, "1.30") {
|
||||
queryRegistry = true
|
||||
}
|
||||
adjustForAPIVersion(cliVersion, &service)
|
||||
if cliVersion != "" && versions.LessThan(cliVersion, "1.30") {
|
||||
queryRegistry = true
|
||||
}
|
||||
|
||||
resp, err := sr.backend.UpdateService(vars["id"], version, service, flags, queryRegistry)
|
||||
|
||||
@@ -9,8 +9,6 @@ import (
|
||||
"github.com/docker/docker/api/server/httputils"
|
||||
basictypes "github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/backend"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
)
|
||||
|
||||
// swarmLogs takes an http response, request, and selector, and writes the logs
|
||||
@@ -66,33 +64,3 @@ func (sr *swarmRouter) swarmLogs(ctx context.Context, w io.Writer, r *http.Reque
|
||||
httputils.WriteLogStream(ctx, w, msgs, logsConfig, !tty)
|
||||
return nil
|
||||
}
|
||||
|
||||
// adjustForAPIVersion takes a version and service spec and removes fields to
|
||||
// make the spec compatible with the specified version.
|
||||
func adjustForAPIVersion(cliVersion string, service *swarm.ServiceSpec) {
|
||||
if cliVersion == "" {
|
||||
return
|
||||
}
|
||||
if versions.LessThan(cliVersion, "1.40") {
|
||||
if service.TaskTemplate.ContainerSpec != nil {
|
||||
// Sysctls for docker swarm services weren't supported before
|
||||
// API version 1.40
|
||||
service.TaskTemplate.ContainerSpec.Sysctls = nil
|
||||
|
||||
if service.TaskTemplate.ContainerSpec.Privileges != nil && service.TaskTemplate.ContainerSpec.Privileges.CredentialSpec != nil {
|
||||
// Support for setting credential-spec through configs was added in API 1.40
|
||||
service.TaskTemplate.ContainerSpec.Privileges.CredentialSpec.Config = ""
|
||||
}
|
||||
for _, config := range service.TaskTemplate.ContainerSpec.Configs {
|
||||
// support for the Runtime target was added in API 1.40
|
||||
config.Runtime = nil
|
||||
}
|
||||
}
|
||||
|
||||
if service.TaskTemplate.Placement != nil {
|
||||
// MaxReplicas for docker swarm services weren't supported before
|
||||
// API version 1.40
|
||||
service.TaskTemplate.Placement.MaxReplicas = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,87 +0,0 @@
|
||||
package swarm // import "github.com/docker/docker/api/server/router/swarm"
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
)
|
||||
|
||||
func TestAdjustForAPIVersion(t *testing.T) {
|
||||
var (
|
||||
expectedSysctls = map[string]string{"foo": "bar"}
|
||||
)
|
||||
// testing the negative -- does this leave everything else alone? -- is
|
||||
// prohibitively time-consuming to write, because it would need an object
|
||||
// with literally every field filled in.
|
||||
spec := &swarm.ServiceSpec{
|
||||
TaskTemplate: swarm.TaskSpec{
|
||||
ContainerSpec: &swarm.ContainerSpec{
|
||||
Sysctls: expectedSysctls,
|
||||
Privileges: &swarm.Privileges{
|
||||
CredentialSpec: &swarm.CredentialSpec{
|
||||
Config: "someconfig",
|
||||
},
|
||||
},
|
||||
Configs: []*swarm.ConfigReference{
|
||||
{
|
||||
File: &swarm.ConfigReferenceFileTarget{
|
||||
Name: "foo",
|
||||
UID: "bar",
|
||||
GID: "baz",
|
||||
},
|
||||
ConfigID: "configFile",
|
||||
ConfigName: "configFile",
|
||||
},
|
||||
{
|
||||
Runtime: &swarm.ConfigReferenceRuntimeTarget{},
|
||||
ConfigID: "configRuntime",
|
||||
ConfigName: "configRuntime",
|
||||
},
|
||||
},
|
||||
},
|
||||
Placement: &swarm.Placement{
|
||||
MaxReplicas: 222,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// first, does calling this with a later version correctly NOT strip
|
||||
// fields? do the later version first, so we can reuse this spec in the
|
||||
// next test.
|
||||
adjustForAPIVersion("1.40", spec)
|
||||
if !reflect.DeepEqual(spec.TaskTemplate.ContainerSpec.Sysctls, expectedSysctls) {
|
||||
t.Error("Sysctls was stripped from spec")
|
||||
}
|
||||
|
||||
if spec.TaskTemplate.ContainerSpec.Privileges.CredentialSpec.Config != "someconfig" {
|
||||
t.Error("CredentialSpec.Config field was stripped from spec")
|
||||
}
|
||||
|
||||
if spec.TaskTemplate.ContainerSpec.Configs[1].Runtime == nil {
|
||||
t.Error("ConfigReferenceRuntimeTarget was stripped from spec")
|
||||
}
|
||||
|
||||
if spec.TaskTemplate.Placement.MaxReplicas != 222 {
|
||||
t.Error("MaxReplicas was stripped from spec")
|
||||
}
|
||||
|
||||
// next, does calling this with an earlier version correctly strip fields?
|
||||
adjustForAPIVersion("1.29", spec)
|
||||
if spec.TaskTemplate.ContainerSpec.Sysctls != nil {
|
||||
t.Error("Sysctls was not stripped from spec")
|
||||
}
|
||||
|
||||
if spec.TaskTemplate.ContainerSpec.Privileges.CredentialSpec.Config != "" {
|
||||
t.Error("CredentialSpec.Config field was not stripped from spec")
|
||||
}
|
||||
|
||||
if spec.TaskTemplate.ContainerSpec.Configs[1].Runtime != nil {
|
||||
t.Error("ConfigReferenceRuntimeTarget was not stripped from spec")
|
||||
}
|
||||
|
||||
if spec.TaskTemplate.Placement.MaxReplicas != 0 {
|
||||
t.Error("MaxReplicas was not stripped from spec")
|
||||
}
|
||||
|
||||
}
|
||||
@@ -30,11 +30,10 @@ func NewRouter(b Backend, c ClusterBackend, fscache *fscache.FSCache, builder *b
|
||||
r.routes = []router.Route{
|
||||
router.NewOptionsRoute("/{anyroute:.*}", optionsHandler),
|
||||
router.NewGetRoute("/_ping", r.pingHandler),
|
||||
router.NewHeadRoute("/_ping", r.pingHandler),
|
||||
router.NewGetRoute("/events", r.getEvents),
|
||||
router.NewGetRoute("/events", r.getEvents, router.WithCancel),
|
||||
router.NewGetRoute("/info", r.getInfo),
|
||||
router.NewGetRoute("/version", r.getVersion),
|
||||
router.NewGetRoute("/system/df", r.getDiskUsage),
|
||||
router.NewGetRoute("/system/df", r.getDiskUsage, router.WithCancel),
|
||||
router.NewPostRoute("/auth", r.postAuth),
|
||||
}
|
||||
|
||||
|
||||
@@ -27,18 +27,10 @@ func optionsHandler(ctx context.Context, w http.ResponseWriter, r *http.Request,
|
||||
}
|
||||
|
||||
func (s *systemRouter) pingHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
w.Header().Add("Cache-Control", "no-cache, no-store, must-revalidate")
|
||||
w.Header().Add("Pragma", "no-cache")
|
||||
|
||||
builderVersion := build.BuilderVersion(*s.features)
|
||||
if bv := builderVersion; bv != "" {
|
||||
w.Header().Set("Builder-Version", string(bv))
|
||||
}
|
||||
if r.Method == http.MethodHead {
|
||||
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||
w.Header().Set("Content-Length", "0")
|
||||
return nil
|
||||
}
|
||||
_, err := w.Write([]byte{'O', 'K'})
|
||||
return err
|
||||
}
|
||||
@@ -50,7 +42,6 @@ func (s *systemRouter) getInfo(ctx context.Context, w http.ResponseWriter, r *ht
|
||||
}
|
||||
if s.cluster != nil {
|
||||
info.Swarm = s.cluster.Info()
|
||||
info.Warnings = append(info.Warnings, info.Swarm.Warnings...)
|
||||
}
|
||||
|
||||
if versions.LessThan(httputils.VersionFromContext(ctx), "1.25") {
|
||||
|
||||
@@ -29,7 +29,7 @@ func (r *volumeRouter) initRoutes() {
|
||||
router.NewGetRoute("/volumes/{name:.*}", r.getVolumeByName),
|
||||
// POST
|
||||
router.NewPostRoute("/volumes/create", r.postVolumesCreate),
|
||||
router.NewPostRoute("/volumes/prune", r.postVolumesPrune),
|
||||
router.NewPostRoute("/volumes/prune", r.postVolumesPrune, router.WithCancel),
|
||||
// DELETE
|
||||
router.NewDeleteRoute("/volumes/{name:.*}", r.deleteVolumes),
|
||||
}
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
"github.com/docker/docker/api/server/router"
|
||||
"github.com/docker/docker/api/server/router/debug"
|
||||
"github.com/docker/docker/dockerversion"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
@@ -130,8 +129,8 @@ func (s *Server) makeHTTPHandler(handler httputils.APIFunc) http.HandlerFunc {
|
||||
|
||||
// use intermediate variable to prevent "should not use basic type
|
||||
// string as key in context.WithValue" golint errors
|
||||
ctx := context.WithValue(r.Context(), dockerversion.UAStringKey{}, r.Header.Get("User-Agent"))
|
||||
r = r.WithContext(ctx)
|
||||
var ki interface{} = dockerversion.UAStringKey
|
||||
ctx := context.WithValue(context.Background(), ki, r.Header.Get("User-Agent"))
|
||||
handlerFunc := s.handlerWithGlobalMiddlewares(handler)
|
||||
|
||||
vars := mux.Vars(r)
|
||||
@@ -140,7 +139,7 @@ func (s *Server) makeHTTPHandler(handler httputils.APIFunc) http.HandlerFunc {
|
||||
}
|
||||
|
||||
if err := handlerFunc(ctx, w, r, vars); err != nil {
|
||||
statusCode := errdefs.GetHTTPErrorStatusCode(err)
|
||||
statusCode := httputils.GetHTTPErrorStatusCode(err)
|
||||
if statusCode >= 500 {
|
||||
logrus.Errorf("Handler for %s %s returned error: %v", r.Method, r.URL.Path, err)
|
||||
}
|
||||
@@ -193,7 +192,6 @@ func (s *Server) createMux() *mux.Router {
|
||||
notFoundHandler := httputils.MakeErrorHandler(pageNotFoundError{})
|
||||
m.HandleFunc(versionMatcher+"/{path:.*}", notFoundHandler)
|
||||
m.NotFoundHandler = notFoundHandler
|
||||
m.MethodNotAllowedHandler = notFoundHandler
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
2049
api/swagger.yaml
2049
api/swagger.yaml
File diff suppressed because it is too large
Load Diff
@@ -1,4 +1,4 @@
|
||||
package {{ .Package }} // import "github.com/docker/docker/api/types/{{ .Package }}"
|
||||
package {{ .Package }}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// DO NOT EDIT THIS FILE
|
||||
|
||||
@@ -50,7 +50,7 @@ type ContainerCommitOptions struct {
|
||||
|
||||
// ContainerExecInspect holds information returned by exec inspect.
|
||||
type ContainerExecInspect struct {
|
||||
ExecID string `json:"ID"`
|
||||
ExecID string
|
||||
ContainerID string
|
||||
Running bool
|
||||
ExitCode int
|
||||
@@ -187,15 +187,6 @@ type ImageBuildOptions struct {
|
||||
// build request. The same identifier can be used to gracefully cancel the
|
||||
// build with the cancel request.
|
||||
BuildID string
|
||||
// Outputs defines configurations for exporting build results. Only supported
|
||||
// in BuildKit mode
|
||||
Outputs []ImageBuildOutput
|
||||
}
|
||||
|
||||
// ImageBuildOutput defines configuration for exporting a build result
|
||||
type ImageBuildOutput struct {
|
||||
Type string
|
||||
Attrs map[string]string
|
||||
}
|
||||
|
||||
// BuilderVersion sets the version of underlying builder to use
|
||||
|
||||
@@ -54,7 +54,7 @@ type Config struct {
|
||||
Env []string // List of environment variable to set in the container
|
||||
Cmd strslice.StrSlice // Command to run when starting the container
|
||||
Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy
|
||||
ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (meaning treat as a command line) (Windows specific).
|
||||
ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific)
|
||||
Image string // Name of the image as it was passed by the operator (e.g. could be symbolic)
|
||||
Volumes map[string]struct{} // List of volumes (mounts) used for the container
|
||||
WorkingDir string // Current directory (PWD) in the command will be launched
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package container // import "github.com/docker/docker/api/types/container"
|
||||
package container
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// DO NOT EDIT THIS FILE
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package container // import "github.com/docker/docker/api/types/container"
|
||||
package container
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// DO NOT EDIT THIS FILE
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package container // import "github.com/docker/docker/api/types/container"
|
||||
package container
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// DO NOT EDIT THIS FILE
|
||||
@@ -11,9 +11,7 @@ package container // import "github.com/docker/docker/api/types/container"
|
||||
// swagger:model ContainerTopOKBody
|
||||
type ContainerTopOKBody struct {
|
||||
|
||||
// Each process running in the container, where each is process
|
||||
// is an array of values corresponding to the titles.
|
||||
//
|
||||
// Each process running in the container, where each is process is an array of values corresponding to the titles
|
||||
// Required: true
|
||||
Processes [][]string `json:"Processes"`
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package container // import "github.com/docker/docker/api/types/container"
|
||||
package container
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// DO NOT EDIT THIS FILE
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package container // import "github.com/docker/docker/api/types/container"
|
||||
package container
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// DO NOT EDIT THIS FILE
|
||||
|
||||
@@ -244,16 +244,6 @@ func (n PidMode) Container() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// DeviceRequest represents a request for devices from a device driver.
|
||||
// Used by GPU device drivers.
|
||||
type DeviceRequest struct {
|
||||
Driver string // Name of device driver
|
||||
Count int // Number of devices to request (-1 = All)
|
||||
DeviceIDs []string // List of device IDs as recognizable by the device driver
|
||||
Capabilities [][]string // An OR list of AND lists of device capabilities (e.g. "gpu")
|
||||
Options map[string]string // Options to pass onto the device driver
|
||||
}
|
||||
|
||||
// DeviceMapping represents the device mapping between the host and the container.
|
||||
type DeviceMapping struct {
|
||||
PathOnHost string
|
||||
@@ -337,14 +327,13 @@ type Resources struct {
|
||||
CpusetMems string // CpusetMems 0-2, 0,1
|
||||
Devices []DeviceMapping // List of devices to map inside the container
|
||||
DeviceCgroupRules []string // List of rule to be added to the device cgroup
|
||||
DeviceRequests []DeviceRequest // List of device requests for device drivers
|
||||
DiskQuota int64 // Disk limit (in bytes)
|
||||
KernelMemory int64 // Kernel memory limit (in bytes)
|
||||
KernelMemoryTCP int64 // Hard limit for kernel TCP buffer memory (in bytes)
|
||||
MemoryReservation int64 // Memory soft limit (in bytes)
|
||||
MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap
|
||||
MemorySwappiness *int64 // Tuning container memory swappiness behaviour
|
||||
OomKillDisable *bool // Whether to disable OOM Killer or not
|
||||
PidsLimit *int64 // Setting PIDs limit for a container; Set `0` or `-1` for unlimited, or `null` to not change.
|
||||
PidsLimit int64 // Setting pids limit for a container
|
||||
Ulimits []*units.Ulimit // List of ulimits to be set in the container
|
||||
|
||||
// Applicable to Windows
|
||||
@@ -380,10 +369,9 @@ type HostConfig struct {
|
||||
// Applicable to UNIX platforms
|
||||
CapAdd strslice.StrSlice // List of kernel capabilities to add to the container
|
||||
CapDrop strslice.StrSlice // List of kernel capabilities to remove from the container
|
||||
Capabilities []string `json:"Capabilities"` // List of kernel capabilities to be available for container (this overrides the default set)
|
||||
DNS []string `json:"Dns"` // List of DNS server to lookup
|
||||
DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for
|
||||
DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for
|
||||
DNS []string `json:"Dns"` // List of DNS server to lookup
|
||||
DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for
|
||||
DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for
|
||||
ExtraHosts []string // List of extra hosts
|
||||
GroupAdd []string // List of additional groups that the container process will run as
|
||||
IpcMode IpcMode // IPC namespace to use for the container
|
||||
|
||||
@@ -5,6 +5,7 @@ package filters // import "github.com/docker/docker/api/types/filters"
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
@@ -36,13 +37,39 @@ func NewArgs(initialArgs ...KeyValuePair) Args {
|
||||
return args
|
||||
}
|
||||
|
||||
// Keys returns all the keys in list of Args
|
||||
func (args Args) Keys() []string {
|
||||
keys := make([]string, 0, len(args.fields))
|
||||
for k := range args.fields {
|
||||
keys = append(keys, k)
|
||||
// ParseFlag parses a key=value string and adds it to an Args.
|
||||
//
|
||||
// Deprecated: Use Args.Add()
|
||||
func ParseFlag(arg string, prev Args) (Args, error) {
|
||||
filters := prev
|
||||
if len(arg) == 0 {
|
||||
return filters, nil
|
||||
}
|
||||
return keys
|
||||
|
||||
if !strings.Contains(arg, "=") {
|
||||
return filters, ErrBadFormat
|
||||
}
|
||||
|
||||
f := strings.SplitN(arg, "=", 2)
|
||||
|
||||
name := strings.ToLower(strings.TrimSpace(f[0]))
|
||||
value := strings.TrimSpace(f[1])
|
||||
|
||||
filters.Add(name, value)
|
||||
|
||||
return filters, nil
|
||||
}
|
||||
|
||||
// ErrBadFormat is an error returned when a filter is not in the form key=value
|
||||
//
|
||||
// Deprecated: this error will be removed in a future version
|
||||
var ErrBadFormat = errors.New("bad format of filter (expected name=value)")
|
||||
|
||||
// ToParam encodes the Args as args JSON encoded string
|
||||
//
|
||||
// Deprecated: use ToJSON
|
||||
func ToParam(a Args) (string, error) {
|
||||
return ToJSON(a)
|
||||
}
|
||||
|
||||
// MarshalJSON returns a JSON byte representation of the Args
|
||||
@@ -80,6 +107,13 @@ func ToParamWithVersion(version string, a Args) (string, error) {
|
||||
return ToJSON(a)
|
||||
}
|
||||
|
||||
// FromParam decodes a JSON encoded string into Args
|
||||
//
|
||||
// Deprecated: use FromJSON
|
||||
func FromParam(p string) (Args, error) {
|
||||
return FromJSON(p)
|
||||
}
|
||||
|
||||
// FromJSON decodes a JSON encoded string into Args
|
||||
func FromJSON(p string) (Args, error) {
|
||||
args := NewArgs()
|
||||
@@ -241,6 +275,14 @@ func (args Args) FuzzyMatch(key, source string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Include returns true if the key exists in the mapping
|
||||
//
|
||||
// Deprecated: use Contains
|
||||
func (args Args) Include(field string) bool {
|
||||
_, ok := args.fields[field]
|
||||
return ok
|
||||
}
|
||||
|
||||
// Contains returns true if the key exists in the mapping
|
||||
func (args Args) Contains(field string) bool {
|
||||
_, ok := args.fields[field]
|
||||
@@ -281,22 +323,6 @@ func (args Args) WalkValues(field string, op func(value string) error) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Clone returns a copy of args.
|
||||
func (args Args) Clone() (newArgs Args) {
|
||||
newArgs.fields = make(map[string]map[string]bool, len(args.fields))
|
||||
for k, m := range args.fields {
|
||||
var mm map[string]bool
|
||||
if m != nil {
|
||||
mm = make(map[string]bool, len(m))
|
||||
for kk, v := range m {
|
||||
mm[kk] = v
|
||||
}
|
||||
}
|
||||
newArgs.fields[k] = mm
|
||||
}
|
||||
return newArgs
|
||||
}
|
||||
|
||||
func deprecatedArgs(d map[string][]string) map[string]map[string]bool {
|
||||
m := map[string]map[string]bool{}
|
||||
for k, v := range d {
|
||||
|
||||
@@ -8,6 +8,40 @@ import (
|
||||
is "gotest.tools/assert/cmp"
|
||||
)
|
||||
|
||||
func TestParseArgs(t *testing.T) {
|
||||
// equivalent of `docker ps -f 'created=today' -f 'image.name=ubuntu*' -f 'image.name=*untu'`
|
||||
flagArgs := []string{
|
||||
"created=today",
|
||||
"image.name=ubuntu*",
|
||||
"image.name=*untu",
|
||||
}
|
||||
var (
|
||||
args = NewArgs()
|
||||
err error
|
||||
)
|
||||
|
||||
for i := range flagArgs {
|
||||
args, err = ParseFlag(flagArgs[i], args)
|
||||
assert.NilError(t, err)
|
||||
}
|
||||
assert.Check(t, is.Len(args.Get("created"), 1))
|
||||
assert.Check(t, is.Len(args.Get("image.name"), 2))
|
||||
}
|
||||
|
||||
func TestParseArgsEdgeCase(t *testing.T) {
|
||||
var args Args
|
||||
args, err := ParseFlag("", args)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if args.Len() != 0 {
|
||||
t.Fatalf("Expected an empty Args (map), got %v", args)
|
||||
}
|
||||
if args, err = ParseFlag("anything", args); err == nil || err != ErrBadFormat {
|
||||
t.Fatalf("Expected ErrBadFormat, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestToJSON(t *testing.T) {
|
||||
fields := map[string]map[string]bool{
|
||||
"created": {"today": true},
|
||||
@@ -313,6 +347,17 @@ func TestContains(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestInclude(t *testing.T) {
|
||||
f := NewArgs()
|
||||
if f.Include("status") {
|
||||
t.Fatal("Expected to not include a status key, got true")
|
||||
}
|
||||
f.Add("status", "running")
|
||||
if !f.Include("status") {
|
||||
t.Fatal("Expected to include a status key, got false")
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidate(t *testing.T) {
|
||||
f := NewArgs()
|
||||
f.Add("status", "running")
|
||||
@@ -376,11 +421,3 @@ func TestFuzzyMatch(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestClone(t *testing.T) {
|
||||
f := NewArgs()
|
||||
f.Add("foo", "bar")
|
||||
f2 := f.Clone()
|
||||
f2.Add("baz", "qux")
|
||||
assert.Check(t, is.Len(f.Get("baz"), 0))
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package image // import "github.com/docker/docker/api/types/image"
|
||||
package image
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// DO NOT EDIT THIS FILE
|
||||
|
||||
@@ -79,8 +79,7 @@ const (
|
||||
|
||||
// BindOptions defines options specific to mounts of type "bind".
|
||||
type BindOptions struct {
|
||||
Propagation Propagation `json:",omitempty"`
|
||||
NonRecursive bool `json:",omitempty"`
|
||||
Propagation Propagation `json:",omitempty"`
|
||||
}
|
||||
|
||||
// VolumeOptions represents the options for a mount of type volume.
|
||||
|
||||
@@ -112,13 +112,12 @@ type ConfigReference struct {
|
||||
}
|
||||
|
||||
var acceptedFilters = map[string]bool{
|
||||
"dangling": true,
|
||||
"driver": true,
|
||||
"id": true,
|
||||
"label": true,
|
||||
"name": true,
|
||||
"scope": true,
|
||||
"type": true,
|
||||
"driver": true,
|
||||
"type": true,
|
||||
"name": true,
|
||||
"id": true,
|
||||
"label": true,
|
||||
"scope": true,
|
||||
}
|
||||
|
||||
// ValidateFilters validates the list of filter args with the available filters.
|
||||
|
||||
@@ -77,9 +77,8 @@ type Arg struct {
|
||||
|
||||
// Filter is used to conditionally apply Seccomp rules
|
||||
type Filter struct {
|
||||
Caps []string `json:"caps,omitempty"`
|
||||
Arches []string `json:"arches,omitempty"`
|
||||
MinKernel string `json:"minKernel,omitempty"`
|
||||
Caps []string `json:"caps,omitempty"`
|
||||
Arches []string `json:"arches,omitempty"`
|
||||
}
|
||||
|
||||
// Syscall is used to match a group of syscalls in Seccomp
|
||||
|
||||
@@ -120,7 +120,7 @@ type NetworkStats struct {
|
||||
RxBytes uint64 `json:"rx_bytes"`
|
||||
// Packets received. Windows and Linux.
|
||||
RxPackets uint64 `json:"rx_packets"`
|
||||
// Received errors. Not used on Windows. Note that we don't `omitempty` this
|
||||
// Received errors. Not used on Windows. Note that we dont `omitempty` this
|
||||
// field as it is expected in the >=v1.21 API stats structure.
|
||||
RxErrors uint64 `json:"rx_errors"`
|
||||
// Incoming packets dropped. Windows and Linux.
|
||||
@@ -129,7 +129,7 @@ type NetworkStats struct {
|
||||
TxBytes uint64 `json:"tx_bytes"`
|
||||
// Packets sent. Windows and Linux.
|
||||
TxPackets uint64 `json:"tx_packets"`
|
||||
// Sent errors. Not used on Windows. Note that we don't `omitempty` this
|
||||
// Sent errors. Not used on Windows. Note that we dont `omitempty` this
|
||||
// field as it is expected in the >=v1.21 API stats structure.
|
||||
TxErrors uint64 `json:"tx_errors"`
|
||||
// Outgoing packets dropped. Windows and Linux.
|
||||
|
||||
@@ -29,8 +29,8 @@ func TestStrSliceMarshalJSON(t *testing.T) {
|
||||
|
||||
func TestStrSliceUnmarshalJSON(t *testing.T) {
|
||||
parts := map[string][]string{
|
||||
"": {"default", "values"},
|
||||
"[]": {},
|
||||
"": {"default", "values"},
|
||||
"[]": {},
|
||||
`["/bin/sh","-c","echo"]`: {"/bin/sh", "-c", "echo"},
|
||||
}
|
||||
for json, expectedParts := range parts {
|
||||
|
||||
@@ -27,14 +27,9 @@ type ConfigReferenceFileTarget struct {
|
||||
Mode os.FileMode
|
||||
}
|
||||
|
||||
// ConfigReferenceRuntimeTarget is a target for a config specifying that it
|
||||
// isn't mounted into the container but instead has some other purpose.
|
||||
type ConfigReferenceRuntimeTarget struct{}
|
||||
|
||||
// ConfigReference is a reference to a config in swarm
|
||||
type ConfigReference struct {
|
||||
File *ConfigReferenceFileTarget `json:",omitempty"`
|
||||
Runtime *ConfigReferenceRuntimeTarget `json:",omitempty"`
|
||||
File *ConfigReferenceFileTarget
|
||||
ConfigID string
|
||||
ConfigName string
|
||||
}
|
||||
|
||||
@@ -33,7 +33,6 @@ type SELinuxContext struct {
|
||||
|
||||
// CredentialSpec for managed service account (Windows only)
|
||||
type CredentialSpec struct {
|
||||
Config string
|
||||
File string
|
||||
Registry string
|
||||
}
|
||||
@@ -72,5 +71,4 @@ type ContainerSpec struct {
|
||||
Secrets []*SecretReference `json:",omitempty"`
|
||||
Configs []*ConfigReference `json:",omitempty"`
|
||||
Isolation container.Isolation `json:",omitempty"`
|
||||
Sysctls map[string]string `json:",omitempty"`
|
||||
}
|
||||
|
||||
@@ -14,7 +14,6 @@ type ClusterInfo struct {
|
||||
RootRotationInProgress bool
|
||||
DefaultAddrPool []string
|
||||
SubnetSize uint32
|
||||
DataPathPort uint32
|
||||
}
|
||||
|
||||
// Swarm represents a swarm.
|
||||
@@ -154,7 +153,6 @@ type InitRequest struct {
|
||||
ListenAddr string
|
||||
AdvertiseAddr string
|
||||
DataPathAddr string
|
||||
DataPathPort uint32
|
||||
ForceNewCluster bool
|
||||
Spec Spec
|
||||
AutoLockManagers bool
|
||||
@@ -209,8 +207,6 @@ type Info struct {
|
||||
Managers int `json:",omitempty"`
|
||||
|
||||
Cluster *ClusterInfo `json:",omitempty"`
|
||||
|
||||
Warnings []string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// Peer represents a peer.
|
||||
|
||||
@@ -127,7 +127,6 @@ type ResourceRequirements struct {
|
||||
type Placement struct {
|
||||
Constraints []string `json:",omitempty"`
|
||||
Preferences []PlacementPreference `json:",omitempty"`
|
||||
MaxReplicas uint64 `json:",omitempty"`
|
||||
|
||||
// Platforms stores all the platforms that the image can run on.
|
||||
// This field is used in the platform filter for scheduling. If empty,
|
||||
|
||||
@@ -158,12 +158,10 @@ type Info struct {
|
||||
MemoryLimit bool
|
||||
SwapLimit bool
|
||||
KernelMemory bool
|
||||
KernelMemoryTCP bool
|
||||
CPUCfsPeriod bool `json:"CpuCfsPeriod"`
|
||||
CPUCfsQuota bool `json:"CpuCfsQuota"`
|
||||
CPUShares bool
|
||||
CPUSet bool
|
||||
PidsLimit bool
|
||||
IPv4Forwarding bool
|
||||
BridgeNfIptables bool
|
||||
BridgeNfIP6tables bool `json:"BridgeNfIp6tables"`
|
||||
|
||||
@@ -27,13 +27,10 @@ type Volume struct {
|
||||
Name string `json:"Name"`
|
||||
|
||||
// The driver specific options used when creating the volume.
|
||||
//
|
||||
// Required: true
|
||||
Options map[string]string `json:"Options"`
|
||||
|
||||
// The level at which the volume exists. Either `global` for cluster-wide,
|
||||
// or `local` for machine level.
|
||||
//
|
||||
// The level at which the volume exists. Either `global` for cluster-wide, or `local` for machine level.
|
||||
// Required: true
|
||||
Scope string `json:"Scope"`
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package volume // import "github.com/docker/docker/api/types/volume"
|
||||
package volume
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// DO NOT EDIT THIS FILE
|
||||
@@ -15,9 +15,7 @@ type VolumeCreateBody struct {
|
||||
// Required: true
|
||||
Driver string `json:"Driver"`
|
||||
|
||||
// A mapping of driver options and values. These options are
|
||||
// passed directly to the driver and are driver specific.
|
||||
//
|
||||
// A mapping of driver options and values. These options are passed directly to the driver and are driver specific.
|
||||
// Required: true
|
||||
DriverOpts map[string]string `json:"DriverOpts"`
|
||||
|
||||
@@ -26,7 +24,6 @@ type VolumeCreateBody struct {
|
||||
Labels map[string]string `json:"Labels"`
|
||||
|
||||
// The new volume's name. If not specified, Docker generates a name.
|
||||
//
|
||||
// Required: true
|
||||
Name string `json:"Name"`
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package volume // import "github.com/docker/docker/api/types/volume"
|
||||
package volume
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// DO NOT EDIT THIS FILE
|
||||
@@ -17,8 +17,7 @@ type VolumeListOKBody struct {
|
||||
// Required: true
|
||||
Volumes []*types.Volume `json:"Volumes"`
|
||||
|
||||
// Warnings that occurred when fetching the list of volumes.
|
||||
//
|
||||
// Warnings that occurred when fetching the list of volumes
|
||||
// Required: true
|
||||
Warnings []string `json:"Warnings"`
|
||||
}
|
||||
|
||||
@@ -8,11 +8,10 @@ import (
|
||||
"io/ioutil"
|
||||
"runtime"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
containerderrors "github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
ctdreference "github.com/containerd/containerd/reference"
|
||||
@@ -37,7 +36,7 @@ import (
|
||||
"github.com/moby/buildkit/util/progress"
|
||||
"github.com/moby/buildkit/util/resolver"
|
||||
"github.com/moby/buildkit/util/tracing"
|
||||
"github.com/opencontainers/go-digest"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/opencontainers/image-spec/identity"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
@@ -46,6 +45,7 @@ import (
|
||||
|
||||
// SourceOpt is options for creating the image source
|
||||
type SourceOpt struct {
|
||||
SessionManager *session.Manager
|
||||
ContentStore content.Store
|
||||
CacheAccessor cache.Accessor
|
||||
ReferenceStore reference.Store
|
||||
@@ -57,15 +57,13 @@ type SourceOpt struct {
|
||||
|
||||
type imageSource struct {
|
||||
SourceOpt
|
||||
g flightcontrol.Group
|
||||
resolverCache *resolverCache
|
||||
g flightcontrol.Group
|
||||
}
|
||||
|
||||
// NewSource creates a new image source
|
||||
func NewSource(opt SourceOpt) (source.Source, error) {
|
||||
is := &imageSource{
|
||||
SourceOpt: opt,
|
||||
resolverCache: newResolverCache(),
|
||||
SourceOpt: opt,
|
||||
}
|
||||
|
||||
return is, nil
|
||||
@@ -75,24 +73,19 @@ func (is *imageSource) ID() string {
|
||||
return source.DockerImageScheme
|
||||
}
|
||||
|
||||
func (is *imageSource) getResolver(ctx context.Context, rfn resolver.ResolveOptionsFunc, ref string, sm *session.Manager) remotes.Resolver {
|
||||
if res := is.resolverCache.Get(ctx, ref); res != nil {
|
||||
return res
|
||||
}
|
||||
|
||||
func (is *imageSource) getResolver(ctx context.Context, rfn resolver.ResolveOptionsFunc, ref string) remotes.Resolver {
|
||||
opt := docker.ResolverOptions{
|
||||
Client: tracing.DefaultClient,
|
||||
}
|
||||
if rfn != nil {
|
||||
opt = rfn(ref)
|
||||
}
|
||||
opt.Credentials = is.getCredentialsFromSession(ctx, sm)
|
||||
opt.Credentials = is.getCredentialsFromSession(ctx)
|
||||
r := docker.NewResolver(opt)
|
||||
r = is.resolverCache.Add(ctx, ref, r)
|
||||
return r
|
||||
}
|
||||
|
||||
func (is *imageSource) getCredentialsFromSession(ctx context.Context, sm *session.Manager) func(string) (string, string, error) {
|
||||
func (is *imageSource) getCredentialsFromSession(ctx context.Context) func(string) (string, string, error) {
|
||||
id := session.FromContext(ctx)
|
||||
if id == "" {
|
||||
// can be removed after containerd/containerd#2812
|
||||
@@ -104,7 +97,7 @@ func (is *imageSource) getCredentialsFromSession(ctx context.Context, sm *sessio
|
||||
timeoutCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
caller, err := sm.Get(timeoutCtx, id)
|
||||
caller, err := is.SessionManager.Get(timeoutCtx, id)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
@@ -129,13 +122,13 @@ func (is *imageSource) resolveLocal(refStr string) ([]byte, error) {
|
||||
return img.RawJSON(), nil
|
||||
}
|
||||
|
||||
func (is *imageSource) resolveRemote(ctx context.Context, ref string, platform *ocispec.Platform, sm *session.Manager) (digest.Digest, []byte, error) {
|
||||
func (is *imageSource) resolveRemote(ctx context.Context, ref string, platform *ocispec.Platform) (digest.Digest, []byte, error) {
|
||||
type t struct {
|
||||
dgst digest.Digest
|
||||
dt []byte
|
||||
}
|
||||
res, err := is.g.Do(ctx, ref, func(ctx context.Context) (interface{}, error) {
|
||||
dgst, dt, err := imageutil.Config(ctx, ref, is.getResolver(ctx, is.ResolverOpt, ref, sm), is.ContentStore, nil, platform)
|
||||
dgst, dt, err := imageutil.Config(ctx, ref, is.getResolver(ctx, is.ResolverOpt, ref), is.ContentStore, platform)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -149,14 +142,14 @@ func (is *imageSource) resolveRemote(ctx context.Context, ref string, platform *
|
||||
return typed.dgst, typed.dt, nil
|
||||
}
|
||||
|
||||
func (is *imageSource) ResolveImageConfig(ctx context.Context, ref string, opt gw.ResolveImageConfigOpt, sm *session.Manager) (digest.Digest, []byte, error) {
|
||||
func (is *imageSource) ResolveImageConfig(ctx context.Context, ref string, opt gw.ResolveImageConfigOpt) (digest.Digest, []byte, error) {
|
||||
resolveMode, err := source.ParseImageResolveMode(opt.ResolveMode)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
switch resolveMode {
|
||||
case source.ResolveModeForcePull:
|
||||
dgst, dt, err := is.resolveRemote(ctx, ref, opt.Platform, sm)
|
||||
dgst, dt, err := is.resolveRemote(ctx, ref, opt.Platform)
|
||||
// TODO: pull should fallback to local in case of failure to allow offline behavior
|
||||
// the fallback doesn't work currently
|
||||
return dgst, dt, err
|
||||
@@ -178,13 +171,13 @@ func (is *imageSource) ResolveImageConfig(ctx context.Context, ref string, opt g
|
||||
return "", dt, err
|
||||
}
|
||||
// fallback to remote
|
||||
return is.resolveRemote(ctx, ref, opt.Platform, sm)
|
||||
return is.resolveRemote(ctx, ref, opt.Platform)
|
||||
}
|
||||
// should never happen
|
||||
return "", nil, fmt.Errorf("builder cannot resolve image %s: invalid mode %q", ref, opt.ResolveMode)
|
||||
}
|
||||
|
||||
func (is *imageSource) Resolve(ctx context.Context, id source.Identifier, sm *session.Manager) (source.SourceInstance, error) {
|
||||
func (is *imageSource) Resolve(ctx context.Context, id source.Identifier) (source.SourceInstance, error) {
|
||||
imageIdentifier, ok := id.(*source.ImageIdentifier)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("invalid image identifier %v", id)
|
||||
@@ -198,9 +191,8 @@ func (is *imageSource) Resolve(ctx context.Context, id source.Identifier, sm *se
|
||||
p := &puller{
|
||||
src: imageIdentifier,
|
||||
is: is,
|
||||
resolver: is.getResolver(ctx, is.ResolverOpt, imageIdentifier.Reference.String(), sm),
|
||||
resolver: is.getResolver(ctx, is.ResolverOpt, imageIdentifier.Reference.String()),
|
||||
platform: platform,
|
||||
sm: sm,
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
@@ -216,7 +208,6 @@ type puller struct {
|
||||
resolver remotes.Resolver
|
||||
config []byte
|
||||
platform ocispec.Platform
|
||||
sm *session.Manager
|
||||
}
|
||||
|
||||
func (p *puller) mainManifestKey(dgst digest.Digest, platform ocispec.Platform) (digest.Digest, error) {
|
||||
@@ -303,7 +294,7 @@ func (p *puller) resolve(ctx context.Context) error {
|
||||
resolveProgressDone(err)
|
||||
return
|
||||
}
|
||||
_, dt, err := p.is.ResolveImageConfig(ctx, ref.String(), gw.ResolveImageConfigOpt{Platform: &p.platform, ResolveMode: resolveModeToString(p.src.ResolveMode)}, p.sm)
|
||||
_, dt, err := p.is.ResolveImageConfig(ctx, ref.String(), gw.ResolveImageConfigOpt{Platform: &p.platform, ResolveMode: resolveModeToString(p.src.ResolveMode)})
|
||||
if err != nil {
|
||||
p.resolveErr = err
|
||||
resolveProgressDone(err)
|
||||
@@ -329,11 +320,7 @@ func (p *puller) CacheKey(ctx context.Context, index int) (string, bool, error)
|
||||
}
|
||||
|
||||
if p.config != nil {
|
||||
k := cacheKeyFromConfig(p.config).String()
|
||||
if k == "" {
|
||||
return digest.FromBytes(p.config).String(), true, nil
|
||||
}
|
||||
return k, true, nil
|
||||
return cacheKeyFromConfig(p.config).String(), true, nil
|
||||
}
|
||||
|
||||
if err := p.resolve(ctx); err != nil {
|
||||
@@ -348,16 +335,7 @@ func (p *puller) CacheKey(ctx context.Context, index int) (string, bool, error)
|
||||
return dgst.String(), false, nil
|
||||
}
|
||||
|
||||
k := cacheKeyFromConfig(p.config).String()
|
||||
if k == "" {
|
||||
dgst, err := p.mainManifestKey(p.desc.Digest, p.platform)
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
return dgst.String(), true, nil
|
||||
}
|
||||
|
||||
return k, true, nil
|
||||
return cacheKeyFromConfig(p.config).String(), true, nil
|
||||
}
|
||||
|
||||
func (p *puller) Snapshot(ctx context.Context) (cache.ImmutableRef, error) {
|
||||
@@ -402,12 +380,6 @@ func (p *puller) Snapshot(ctx context.Context) (cache.ImmutableRef, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
platform := platforms.Only(p.platform)
|
||||
// workaround for GCR bug that requires a request to manifest endpoint for authentication to work.
|
||||
// if current resolver has not used manifests do a dummy request.
|
||||
// in most cases resolver should be cached and extra request is not needed.
|
||||
ensureManifestRequested(ctx, p.resolver, p.ref)
|
||||
|
||||
var (
|
||||
schema1Converter *schema1.Converter
|
||||
handlers []images.Handler
|
||||
@@ -440,9 +412,7 @@ func (p *puller) Snapshot(ctx context.Context) (cache.ImmutableRef, error) {
|
||||
// Set any children labels for that content
|
||||
childrenHandler = images.SetChildrenLabels(p.is.ContentStore, childrenHandler)
|
||||
// Filter the children by the platform
|
||||
childrenHandler = images.FilterPlatforms(childrenHandler, platform)
|
||||
// Limit manifests pulled to the best match in an index
|
||||
childrenHandler = images.LimitManifests(childrenHandler, platform, 1)
|
||||
childrenHandler = images.FilterPlatforms(childrenHandler, platforms.Default())
|
||||
|
||||
handlers = append(handlers,
|
||||
remotes.FetchHandler(p.is.ContentStore, fetcher),
|
||||
@@ -450,7 +420,7 @@ func (p *puller) Snapshot(ctx context.Context) (cache.ImmutableRef, error) {
|
||||
)
|
||||
}
|
||||
|
||||
if err := images.Dispatch(ctx, images.Handlers(handlers...), nil, p.desc); err != nil {
|
||||
if err := images.Dispatch(ctx, images.Handlers(handlers...), p.desc); err != nil {
|
||||
stopProgress()
|
||||
return nil, err
|
||||
}
|
||||
@@ -463,12 +433,12 @@ func (p *puller) Snapshot(ctx context.Context) (cache.ImmutableRef, error) {
|
||||
}
|
||||
}
|
||||
|
||||
mfst, err := images.Manifest(ctx, p.is.ContentStore, p.desc, platform)
|
||||
mfst, err := images.Manifest(ctx, p.is.ContentStore, p.desc, platforms.Default())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
config, err := images.Config(ctx, p.is.ContentStore, p.desc, platform)
|
||||
config, err := images.Config(ctx, p.is.ContentStore, p.desc, platforms.Default())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -526,9 +496,6 @@ func (p *puller) Snapshot(ctx context.Context) (cache.ImmutableRef, error) {
|
||||
layers := make([]xfer.DownloadDescriptor, 0, len(mfst.Layers))
|
||||
|
||||
for i, desc := range mfst.Layers {
|
||||
if err := desc.Digest.Validate(); err != nil {
|
||||
return nil, errors.Wrap(err, "layer digest could not be validated")
|
||||
}
|
||||
ongoing.add(desc)
|
||||
layers = append(layers, &layerDescriptor{
|
||||
desc: desc,
|
||||
@@ -548,10 +515,10 @@ func (p *puller) Snapshot(ctx context.Context) (cache.ImmutableRef, error) {
|
||||
|
||||
r := image.NewRootFS()
|
||||
rootFS, release, err := p.is.DownloadManager.Download(ctx, *r, runtime.GOOS, layers, pkgprogress.ChanOutput(pchan))
|
||||
stopProgress()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stopProgress()
|
||||
|
||||
ref, err := p.is.CacheAccessor.GetFromSnapshotter(ctx, string(rootFS.ChainID()), cache.WithDescription(fmt.Sprintf("pulled from %s", p.ref)))
|
||||
release()
|
||||
@@ -687,7 +654,7 @@ func showProgress(ctx context.Context, ongoing *jobs, cs content.Store, pw progr
|
||||
if !j.done {
|
||||
info, err := cs.Info(context.TODO(), j.Digest)
|
||||
if err != nil {
|
||||
if containerderrors.IsNotFound(err) {
|
||||
if errdefs.IsNotFound(err) {
|
||||
// pw.Write(j.Digest.String(), progress.Status{
|
||||
// Action: "waiting",
|
||||
// })
|
||||
@@ -805,8 +772,8 @@ func cacheKeyFromConfig(dt []byte) digest.Digest {
|
||||
if err != nil {
|
||||
return digest.FromBytes(dt)
|
||||
}
|
||||
if img.RootFS.Type != "layers" || len(img.RootFS.DiffIDs) == 0 {
|
||||
return ""
|
||||
if img.RootFS.Type != "layers" {
|
||||
return digest.FromBytes(dt)
|
||||
}
|
||||
return identity.ChainID(img.RootFS.DiffIDs)
|
||||
}
|
||||
@@ -824,90 +791,3 @@ func resolveModeToString(rm source.ResolveMode) string {
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type resolverCache struct {
|
||||
mu sync.Mutex
|
||||
m map[string]cachedResolver
|
||||
}
|
||||
|
||||
type cachedResolver struct {
|
||||
counter int64 // needs to be 64bit aligned for 32bit systems
|
||||
timeout time.Time
|
||||
remotes.Resolver
|
||||
}
|
||||
|
||||
func (cr *cachedResolver) Resolve(ctx context.Context, ref string) (name string, desc ocispec.Descriptor, err error) {
|
||||
atomic.AddInt64(&cr.counter, 1)
|
||||
return cr.Resolver.Resolve(ctx, ref)
|
||||
}
|
||||
|
||||
func (r *resolverCache) Add(ctx context.Context, ref string, resolver remotes.Resolver) remotes.Resolver {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
ref = r.repo(ref) + "-" + session.FromContext(ctx)
|
||||
|
||||
cr, ok := r.m[ref]
|
||||
cr.timeout = time.Now().Add(time.Minute)
|
||||
if ok {
|
||||
return &cr
|
||||
}
|
||||
|
||||
cr.Resolver = resolver
|
||||
r.m[ref] = cr
|
||||
return &cr
|
||||
}
|
||||
|
||||
func (r *resolverCache) repo(refStr string) string {
|
||||
ref, err := distreference.ParseNormalizedNamed(refStr)
|
||||
if err != nil {
|
||||
return refStr
|
||||
}
|
||||
return ref.Name()
|
||||
}
|
||||
|
||||
func (r *resolverCache) Get(ctx context.Context, ref string) remotes.Resolver {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
ref = r.repo(ref) + "-" + session.FromContext(ctx)
|
||||
|
||||
cr, ok := r.m[ref]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return &cr
|
||||
}
|
||||
|
||||
func (r *resolverCache) clean(now time.Time) {
|
||||
r.mu.Lock()
|
||||
for k, cr := range r.m {
|
||||
if now.After(cr.timeout) {
|
||||
delete(r.m, k)
|
||||
}
|
||||
}
|
||||
r.mu.Unlock()
|
||||
}
|
||||
|
||||
func newResolverCache() *resolverCache {
|
||||
rc := &resolverCache{
|
||||
m: map[string]cachedResolver{},
|
||||
}
|
||||
t := time.NewTicker(time.Minute)
|
||||
go func() {
|
||||
for {
|
||||
rc.clean(<-t.C)
|
||||
}
|
||||
}()
|
||||
return rc
|
||||
}
|
||||
|
||||
func ensureManifestRequested(ctx context.Context, res remotes.Resolver, ref string) {
|
||||
cr, ok := res.(*cachedResolver)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if atomic.LoadInt64(&cr.counter) == 0 {
|
||||
res.Resolve(ctx, ref)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,163 +0,0 @@
|
||||
package localinlinecache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/images"
|
||||
distreference "github.com/docker/distribution/reference"
|
||||
imagestore "github.com/docker/docker/image"
|
||||
"github.com/docker/docker/reference"
|
||||
"github.com/moby/buildkit/cache/remotecache"
|
||||
registryremotecache "github.com/moby/buildkit/cache/remotecache/registry"
|
||||
v1 "github.com/moby/buildkit/cache/remotecache/v1"
|
||||
"github.com/moby/buildkit/session"
|
||||
"github.com/moby/buildkit/solver"
|
||||
"github.com/moby/buildkit/util/resolver"
|
||||
"github.com/moby/buildkit/worker"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ResolveCacheImporterFunc returns a resolver function for local inline cache
|
||||
func ResolveCacheImporterFunc(sm *session.Manager, resolverOpt resolver.ResolveOptionsFunc, rs reference.Store, is imagestore.Store) remotecache.ResolveCacheImporterFunc {
|
||||
|
||||
upstream := registryremotecache.ResolveCacheImporterFunc(sm, resolverOpt)
|
||||
|
||||
return func(ctx context.Context, attrs map[string]string) (remotecache.Importer, specs.Descriptor, error) {
|
||||
if dt, err := tryImportLocal(rs, is, attrs["ref"]); err == nil {
|
||||
return newLocalImporter(dt), specs.Descriptor{}, nil
|
||||
}
|
||||
return upstream(ctx, attrs)
|
||||
}
|
||||
}
|
||||
|
||||
func tryImportLocal(rs reference.Store, is imagestore.Store, refStr string) ([]byte, error) {
|
||||
ref, err := distreference.ParseNormalizedNamed(refStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dgst, err := rs.Get(ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
img, err := is.Get(imagestore.ID(dgst))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return img.RawJSON(), nil
|
||||
}
|
||||
|
||||
func newLocalImporter(dt []byte) remotecache.Importer {
|
||||
return &localImporter{dt: dt}
|
||||
}
|
||||
|
||||
type localImporter struct {
|
||||
dt []byte
|
||||
}
|
||||
|
||||
func (li *localImporter) Resolve(ctx context.Context, _ specs.Descriptor, id string, w worker.Worker) (solver.CacheManager, error) {
|
||||
cc := v1.NewCacheChains()
|
||||
if err := li.importInlineCache(ctx, li.dt, cc); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
keysStorage, resultStorage, err := v1.NewCacheKeyStorage(cc, w)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return solver.NewCacheManager(id, keysStorage, resultStorage), nil
|
||||
}
|
||||
|
||||
func (li *localImporter) importInlineCache(ctx context.Context, dt []byte, cc solver.CacheExporterTarget) error {
|
||||
var img image
|
||||
|
||||
if err := json.Unmarshal(dt, &img); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if img.Cache == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var config v1.CacheConfig
|
||||
if err := json.Unmarshal(img.Cache, &config.Records); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
createdDates, createdMsg, err := parseCreatedLayerInfo(img)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
layers := v1.DescriptorProvider{}
|
||||
for i, diffID := range img.Rootfs.DiffIDs {
|
||||
dgst := digest.Digest(diffID.String())
|
||||
desc := specs.Descriptor{
|
||||
Digest: dgst,
|
||||
Size: -1,
|
||||
MediaType: images.MediaTypeDockerSchema2Layer,
|
||||
Annotations: map[string]string{},
|
||||
}
|
||||
if createdAt := createdDates[i]; createdAt != "" {
|
||||
desc.Annotations["buildkit/createdat"] = createdAt
|
||||
}
|
||||
if createdBy := createdMsg[i]; createdBy != "" {
|
||||
desc.Annotations["buildkit/description"] = createdBy
|
||||
}
|
||||
desc.Annotations["containerd.io/uncompressed"] = img.Rootfs.DiffIDs[i].String()
|
||||
layers[dgst] = v1.DescriptorProviderPair{
|
||||
Descriptor: desc,
|
||||
Provider: &emptyProvider{},
|
||||
}
|
||||
config.Layers = append(config.Layers, v1.CacheLayer{
|
||||
Blob: dgst,
|
||||
ParentIndex: i - 1,
|
||||
})
|
||||
}
|
||||
|
||||
return v1.ParseConfig(config, layers, cc)
|
||||
}
|
||||
|
||||
type image struct {
|
||||
Rootfs struct {
|
||||
DiffIDs []digest.Digest `json:"diff_ids"`
|
||||
} `json:"rootfs"`
|
||||
Cache []byte `json:"moby.buildkit.cache.v0"`
|
||||
History []struct {
|
||||
Created *time.Time `json:"created,omitempty"`
|
||||
CreatedBy string `json:"created_by,omitempty"`
|
||||
EmptyLayer bool `json:"empty_layer,omitempty"`
|
||||
} `json:"history,omitempty"`
|
||||
}
|
||||
|
||||
func parseCreatedLayerInfo(img image) ([]string, []string, error) {
|
||||
dates := make([]string, 0, len(img.Rootfs.DiffIDs))
|
||||
createdBy := make([]string, 0, len(img.Rootfs.DiffIDs))
|
||||
for _, h := range img.History {
|
||||
if !h.EmptyLayer {
|
||||
str := ""
|
||||
if h.Created != nil {
|
||||
dt, err := h.Created.MarshalText()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
str = string(dt)
|
||||
}
|
||||
dates = append(dates, str)
|
||||
createdBy = append(createdBy, h.CreatedBy)
|
||||
}
|
||||
}
|
||||
return dates, createdBy, nil
|
||||
}
|
||||
|
||||
type emptyProvider struct {
|
||||
}
|
||||
|
||||
func (p *emptyProvider) ReaderAt(ctx context.Context, dec specs.Descriptor) (content.ReaderAt, error) {
|
||||
return nil, errors.Errorf("ReaderAt not implemented for empty provider")
|
||||
}
|
||||
@@ -12,22 +12,12 @@ import (
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
func (s *snapshotter) GetDiffIDs(ctx context.Context, key string) ([]layer.DiffID, error) {
|
||||
func (s *snapshotter) EnsureLayer(ctx context.Context, key string) ([]layer.DiffID, error) {
|
||||
if l, err := s.getLayer(key, true); err != nil {
|
||||
return nil, err
|
||||
} else if l != nil {
|
||||
return getDiffChain(l), nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (s *snapshotter) EnsureLayer(ctx context.Context, key string) ([]layer.DiffID, error) {
|
||||
diffIDs, err := s.GetDiffIDs(ctx, key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if diffIDs != nil {
|
||||
return diffIDs, nil
|
||||
}
|
||||
|
||||
id, committed := s.getGraphDriverID(key)
|
||||
if !committed {
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"github.com/containerd/containerd/snapshots"
|
||||
"github.com/docker/docker/daemon/graphdriver"
|
||||
"github.com/docker/docker/layer"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/moby/buildkit/identity"
|
||||
"github.com/moby/buildkit/snapshot"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
@@ -26,10 +25,9 @@ var keySize = []byte("size")
|
||||
|
||||
// Opt defines options for creating the snapshotter
|
||||
type Opt struct {
|
||||
GraphDriver graphdriver.Driver
|
||||
LayerStore layer.Store
|
||||
Root string
|
||||
IdentityMapping *idtools.IdentityMapping
|
||||
GraphDriver graphdriver.Driver
|
||||
LayerStore layer.Store
|
||||
Root string
|
||||
}
|
||||
|
||||
type graphIDRegistrar interface {
|
||||
@@ -75,14 +73,6 @@ func NewSnapshotter(opt Opt) (snapshot.SnapshotterBase, error) {
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (s *snapshotter) Name() string {
|
||||
return "default"
|
||||
}
|
||||
|
||||
func (s *snapshotter) IdentityMapping() *idtools.IdentityMapping {
|
||||
return s.opt.IdentityMapping
|
||||
}
|
||||
|
||||
func (s *snapshotter) Prepare(ctx context.Context, key, parent string, opts ...snapshots.Opt) error {
|
||||
origParent := parent
|
||||
if parent != "" {
|
||||
@@ -254,24 +244,24 @@ func (s *snapshotter) Mounts(ctx context.Context, key string) (snapshot.Mountabl
|
||||
id := identity.NewID()
|
||||
var rwlayer layer.RWLayer
|
||||
return &mountable{
|
||||
idmap: s.opt.IdentityMapping,
|
||||
acquire: func() ([]mount.Mount, func() error, error) {
|
||||
acquire: func() ([]mount.Mount, error) {
|
||||
rwlayer, err = s.opt.LayerStore.CreateRWLayer(id, l.ChainID(), nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
rootfs, err := rwlayer.Mount("")
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
return []mount.Mount{{
|
||||
Source: rootfs.Path(),
|
||||
Type: "bind",
|
||||
Options: []string{"rbind"},
|
||||
}}, func() error {
|
||||
_, err := s.opt.LayerStore.ReleaseRWLayer(rwlayer)
|
||||
return err
|
||||
}, nil
|
||||
Source: rootfs.Path(),
|
||||
Type: "bind",
|
||||
Options: []string{"rbind"},
|
||||
}}, nil
|
||||
},
|
||||
release: func() error {
|
||||
_, err := s.opt.LayerStore.ReleaseRWLayer(rwlayer)
|
||||
return err
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
@@ -279,19 +269,19 @@ func (s *snapshotter) Mounts(ctx context.Context, key string) (snapshot.Mountabl
|
||||
id, _ := s.getGraphDriverID(key)
|
||||
|
||||
return &mountable{
|
||||
idmap: s.opt.IdentityMapping,
|
||||
acquire: func() ([]mount.Mount, func() error, error) {
|
||||
acquire: func() ([]mount.Mount, error) {
|
||||
rootfs, err := s.opt.GraphDriver.Get(id, "")
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
return []mount.Mount{{
|
||||
Source: rootfs.Path(),
|
||||
Type: "bind",
|
||||
Options: []string{"rbind"},
|
||||
}}, func() error {
|
||||
return s.opt.GraphDriver.Put(id)
|
||||
}, nil
|
||||
Source: rootfs.Path(),
|
||||
Type: "bind",
|
||||
Options: []string{"rbind"},
|
||||
}}, nil
|
||||
},
|
||||
release: func() error {
|
||||
return s.opt.GraphDriver.Put(id)
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
@@ -438,33 +428,31 @@ func (s *snapshotter) Close() error {
|
||||
type mountable struct {
|
||||
mu sync.Mutex
|
||||
mounts []mount.Mount
|
||||
acquire func() ([]mount.Mount, func() error, error)
|
||||
acquire func() ([]mount.Mount, error)
|
||||
release func() error
|
||||
refCount int
|
||||
idmap *idtools.IdentityMapping
|
||||
}
|
||||
|
||||
func (m *mountable) Mount() ([]mount.Mount, func() error, error) {
|
||||
func (m *mountable) Mount() ([]mount.Mount, error) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
if m.mounts != nil {
|
||||
m.refCount++
|
||||
return m.mounts, m.releaseMount, nil
|
||||
return m.mounts, nil
|
||||
}
|
||||
|
||||
mounts, release, err := m.acquire()
|
||||
mounts, err := m.acquire()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
m.mounts = mounts
|
||||
m.release = release
|
||||
m.refCount = 1
|
||||
|
||||
return m.mounts, m.releaseMount, nil
|
||||
return m.mounts, nil
|
||||
}
|
||||
|
||||
func (m *mountable) releaseMount() error {
|
||||
func (m *mountable) Release() error {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
@@ -479,12 +467,5 @@ func (m *mountable) releaseMount() error {
|
||||
}
|
||||
|
||||
m.mounts = nil
|
||||
defer func() {
|
||||
m.release = nil
|
||||
}()
|
||||
return m.release()
|
||||
}
|
||||
|
||||
func (m *mountable) IdentityMapping() *idtools.IdentityMapping {
|
||||
return m.idmap
|
||||
}
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -17,7 +16,6 @@ import (
|
||||
"github.com/docker/docker/builder"
|
||||
"github.com/docker/docker/daemon/config"
|
||||
"github.com/docker/docker/daemon/images"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/streamformatter"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/docker/libnetwork"
|
||||
@@ -26,12 +24,12 @@ import (
|
||||
"github.com/moby/buildkit/control"
|
||||
"github.com/moby/buildkit/identity"
|
||||
"github.com/moby/buildkit/session"
|
||||
"github.com/moby/buildkit/solver/llbsolver"
|
||||
"github.com/moby/buildkit/util/entitlements"
|
||||
"github.com/moby/buildkit/util/resolver"
|
||||
"github.com/moby/buildkit/util/tracing"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"google.golang.org/grpc"
|
||||
grpcmetadata "google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
@@ -64,6 +62,10 @@ var cacheFields = map[string]bool{
|
||||
"immutable": false,
|
||||
}
|
||||
|
||||
func init() {
|
||||
llbsolver.AllowNetworkHostUnstable = true
|
||||
}
|
||||
|
||||
// Opt is option struct required for creating the builder
|
||||
type Opt struct {
|
||||
SessionManager *session.Manager
|
||||
@@ -73,10 +75,6 @@ type Opt struct {
|
||||
DefaultCgroupParent string
|
||||
ResolverOpt resolver.ResolveOptionsFunc
|
||||
BuilderConfig config.BuilderConfig
|
||||
Rootless bool
|
||||
IdentityMapping *idtools.IdentityMapping
|
||||
DNSConfig config.DNSConfig
|
||||
ApparmorProfile string
|
||||
}
|
||||
|
||||
// Builder can build using BuildKit backend
|
||||
@@ -92,10 +90,6 @@ type Builder struct {
|
||||
func New(opt Opt) (*Builder, error) {
|
||||
reqHandler := newReqBodyHandler(tracing.DefaultTransport)
|
||||
|
||||
if opt.IdentityMapping != nil && opt.IdentityMapping.Empty() {
|
||||
opt.IdentityMapping = nil
|
||||
}
|
||||
|
||||
c, err := newController(reqHandler, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -108,11 +102,6 @@ func New(opt Opt) (*Builder, error) {
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// RegisterGRPC registers controller to the grpc server.
|
||||
func (b *Builder) RegisterGRPC(s *grpc.Server) {
|
||||
b.controller.Register(s)
|
||||
}
|
||||
|
||||
// Cancel cancels a build using ID
|
||||
func (b *Builder) Cancel(ctx context.Context, id string) error {
|
||||
b.mu.Lock()
|
||||
@@ -242,9 +231,7 @@ func (b *Builder) Build(ctx context.Context, opt backend.BuildConfig) (*builder.
|
||||
}
|
||||
|
||||
defer func() {
|
||||
b.mu.Lock()
|
||||
delete(b.jobs, buildID)
|
||||
b.mu.Unlock()
|
||||
}()
|
||||
}
|
||||
|
||||
@@ -252,7 +239,9 @@ func (b *Builder) Build(ctx context.Context, opt backend.BuildConfig) (*builder.
|
||||
|
||||
id := identity.NewID()
|
||||
|
||||
frontendAttrs := map[string]string{}
|
||||
frontendAttrs := map[string]string{
|
||||
"override-copy-image": "docker.io/docker/dockerfile-copy:v0.1.9@sha256:e8f159d3f00786604b93c675ee2783f8dc194bb565e61ca5788f6a6e9d304061",
|
||||
}
|
||||
|
||||
if opt.Options.Target != "" {
|
||||
frontendAttrs["target"] = opt.Options.Target
|
||||
@@ -324,45 +313,19 @@ func (b *Builder) Build(ctx context.Context, opt backend.BuildConfig) (*builder.
|
||||
}
|
||||
frontendAttrs["add-hosts"] = extraHosts
|
||||
|
||||
exporterName := ""
|
||||
exporterAttrs := map[string]string{}
|
||||
|
||||
if len(opt.Options.Outputs) > 1 {
|
||||
return nil, errors.Errorf("multiple outputs not supported")
|
||||
} else if len(opt.Options.Outputs) == 0 {
|
||||
exporterName = "moby"
|
||||
} else {
|
||||
// cacheonly is a special type for triggering skipping all exporters
|
||||
if opt.Options.Outputs[0].Type != "cacheonly" {
|
||||
exporterName = opt.Options.Outputs[0].Type
|
||||
exporterAttrs = opt.Options.Outputs[0].Attrs
|
||||
}
|
||||
}
|
||||
|
||||
if exporterName == "moby" {
|
||||
if len(opt.Options.Tags) > 0 {
|
||||
exporterAttrs["name"] = strings.Join(opt.Options.Tags, ",")
|
||||
}
|
||||
}
|
||||
|
||||
cache := controlapi.CacheOptions{}
|
||||
|
||||
if inlineCache := opt.Options.BuildArgs["BUILDKIT_INLINE_CACHE"]; inlineCache != nil {
|
||||
if b, err := strconv.ParseBool(*inlineCache); err == nil && b {
|
||||
cache.Exports = append(cache.Exports, &controlapi.CacheOptionsEntry{
|
||||
Type: "inline",
|
||||
})
|
||||
}
|
||||
if len(opt.Options.Tags) > 0 {
|
||||
exporterAttrs["name"] = strings.Join(opt.Options.Tags, ",")
|
||||
}
|
||||
|
||||
req := &controlapi.SolveRequest{
|
||||
Ref: id,
|
||||
Exporter: exporterName,
|
||||
Exporter: "moby",
|
||||
ExporterAttrs: exporterAttrs,
|
||||
Frontend: "dockerfile.v0",
|
||||
FrontendAttrs: frontendAttrs,
|
||||
Session: opt.Options.SessionID,
|
||||
Cache: cache,
|
||||
}
|
||||
|
||||
if opt.Options.NetworkMode == "host" {
|
||||
@@ -378,9 +341,6 @@ func (b *Builder) Build(ctx context.Context, opt backend.BuildConfig) (*builder.
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if exporterName != "moby" {
|
||||
return nil
|
||||
}
|
||||
id, ok := resp.ExporterResponse["containerimage.digest"]
|
||||
if !ok {
|
||||
return errors.Errorf("missing image id")
|
||||
@@ -600,7 +560,7 @@ func toBuildkitPruneInfo(opts types.BuildCachePruneOptions) (client.PruneInfo, e
|
||||
|
||||
bkFilter := make([]string, 0, opts.Filters.Len())
|
||||
for cacheField := range cacheFields {
|
||||
if opts.Filters.Contains(cacheField) {
|
||||
if opts.Filters.Include(cacheField) {
|
||||
values := opts.Filters.Get(cacheField)
|
||||
switch len(values) {
|
||||
case 0:
|
||||
|
||||
@@ -6,11 +6,8 @@ import (
|
||||
"path/filepath"
|
||||
|
||||
"github.com/containerd/containerd/content/local"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/builder/builder-next/adapters/containerimage"
|
||||
"github.com/docker/docker/builder/builder-next/adapters/localinlinecache"
|
||||
"github.com/docker/docker/builder/builder-next/adapters/snapshot"
|
||||
containerimageexp "github.com/docker/docker/builder/builder-next/exporter"
|
||||
"github.com/docker/docker/builder/builder-next/imagerefchecker"
|
||||
@@ -20,26 +17,22 @@ import (
|
||||
units "github.com/docker/go-units"
|
||||
"github.com/moby/buildkit/cache"
|
||||
"github.com/moby/buildkit/cache/metadata"
|
||||
"github.com/moby/buildkit/cache/remotecache"
|
||||
inlineremotecache "github.com/moby/buildkit/cache/remotecache/inline"
|
||||
localremotecache "github.com/moby/buildkit/cache/remotecache/local"
|
||||
registryremotecache "github.com/moby/buildkit/cache/remotecache/registry"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/control"
|
||||
"github.com/moby/buildkit/exporter"
|
||||
"github.com/moby/buildkit/frontend"
|
||||
dockerfile "github.com/moby/buildkit/frontend/dockerfile/builder"
|
||||
"github.com/moby/buildkit/frontend/gateway"
|
||||
"github.com/moby/buildkit/frontend/gateway/forwarder"
|
||||
"github.com/moby/buildkit/snapshot/blobmapping"
|
||||
"github.com/moby/buildkit/solver/bboltcachestorage"
|
||||
"github.com/moby/buildkit/util/binfmt_misc"
|
||||
"github.com/moby/buildkit/util/entitlements"
|
||||
"github.com/moby/buildkit/worker"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func newController(rt http.RoundTripper, opt Opt) (*control.Controller, error) {
|
||||
if err := os.MkdirAll(opt.Root, 0711); err != nil {
|
||||
if err := os.MkdirAll(opt.Root, 0700); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -56,10 +49,9 @@ func newController(rt http.RoundTripper, opt Opt) (*control.Controller, error) {
|
||||
}
|
||||
|
||||
sbase, err := snapshot.NewSnapshotter(snapshot.Opt{
|
||||
GraphDriver: driver,
|
||||
LayerStore: dist.LayerStore,
|
||||
Root: root,
|
||||
IdentityMapping: opt.IdentityMapping,
|
||||
GraphDriver: driver,
|
||||
LayerStore: dist.LayerStore,
|
||||
Root: root,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -102,6 +94,7 @@ func newController(rt http.RoundTripper, opt Opt) (*control.Controller, error) {
|
||||
}
|
||||
|
||||
src, err := containerimage.NewSource(containerimage.SourceOpt{
|
||||
SessionManager: opt.SessionManager,
|
||||
CacheAccessor: cm,
|
||||
ContentStore: store,
|
||||
DownloadManager: dist.DownloadManager,
|
||||
@@ -114,9 +107,7 @@ func newController(rt http.RoundTripper, opt Opt) (*control.Controller, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dns := getDNSConfig(opt.DNSConfig)
|
||||
|
||||
exec, err := newExecutor(root, opt.DefaultCgroupParent, opt.NetworkController, dns, opt.Rootless, opt.IdentityMapping, opt.ApparmorProfile)
|
||||
exec, err := newExecutor(root, opt.DefaultCgroupParent, opt.NetworkController)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -145,18 +136,9 @@ func newController(rt http.RoundTripper, opt Opt) (*control.Controller, error) {
|
||||
return nil, errors.Wrap(err, "could not get builder GC policy")
|
||||
}
|
||||
|
||||
layers, ok := sbase.(mobyworker.LayerAccess)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("snapshotter doesn't support differ")
|
||||
}
|
||||
|
||||
p, err := parsePlatforms(binfmt_misc.SupportedPlatforms())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
wopt := mobyworker.Opt{
|
||||
ID: "moby",
|
||||
SessionManager: opt.SessionManager,
|
||||
MetadataStore: md,
|
||||
ContentStore: store,
|
||||
CacheManager: cm,
|
||||
@@ -166,10 +148,10 @@ func newController(rt http.RoundTripper, opt Opt) (*control.Controller, error) {
|
||||
ImageSource: src,
|
||||
DownloadManager: dist.DownloadManager,
|
||||
V2MetadataService: dist.V2MetadataService,
|
||||
Exporter: exp,
|
||||
Transport: rt,
|
||||
Layers: layers,
|
||||
Platforms: p,
|
||||
Exporters: map[string]exporter.Exporter{
|
||||
"moby": exp,
|
||||
},
|
||||
Transport: rt,
|
||||
}
|
||||
|
||||
wc := &worker.Controller{}
|
||||
@@ -185,18 +167,12 @@ func newController(rt http.RoundTripper, opt Opt) (*control.Controller, error) {
|
||||
}
|
||||
|
||||
return control.NewController(control.Opt{
|
||||
SessionManager: opt.SessionManager,
|
||||
WorkerController: wc,
|
||||
Frontends: frontends,
|
||||
CacheKeyStorage: cacheStorage,
|
||||
ResolveCacheImporterFuncs: map[string]remotecache.ResolveCacheImporterFunc{
|
||||
"registry": localinlinecache.ResolveCacheImporterFunc(opt.SessionManager, opt.ResolverOpt, dist.ReferenceStore, dist.ImageStore),
|
||||
"local": localremotecache.ResolveCacheImporterFunc(opt.SessionManager),
|
||||
},
|
||||
ResolveCacheExporterFuncs: map[string]remotecache.ResolveCacheExporterFunc{
|
||||
"inline": inlineremotecache.ResolveCacheExporterFunc(),
|
||||
},
|
||||
Entitlements: getEntitlements(opt.BuilderConfig),
|
||||
SessionManager: opt.SessionManager,
|
||||
WorkerController: wc,
|
||||
Frontends: frontends,
|
||||
CacheKeyStorage: cacheStorage,
|
||||
ResolveCacheImporterFunc: registryremotecache.ResolveCacheImporterFunc(opt.SessionManager, opt.ResolverOpt),
|
||||
// TODO: set ResolveCacheExporterFunc for exporting cache
|
||||
})
|
||||
}
|
||||
|
||||
@@ -230,7 +206,7 @@ func getGCPolicy(conf config.BuilderConfig, root string) ([]client.PruneInfo, er
|
||||
gcPolicy[i], err = toBuildkitPruneInfo(types.BuildCachePruneOptions{
|
||||
All: p.All,
|
||||
KeepStorage: b,
|
||||
Filters: filters.Args(p.Filter),
|
||||
Filters: p.Filter,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -240,27 +216,3 @@ func getGCPolicy(conf config.BuilderConfig, root string) ([]client.PruneInfo, er
|
||||
}
|
||||
return gcPolicy, nil
|
||||
}
|
||||
|
||||
func parsePlatforms(platformsStr []string) ([]specs.Platform, error) {
|
||||
out := make([]specs.Platform, 0, len(platformsStr))
|
||||
for _, s := range platformsStr {
|
||||
p, err := platforms.Parse(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out = append(out, platforms.Normalize(p))
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func getEntitlements(conf config.BuilderConfig) []string {
|
||||
var ents []string
|
||||
// Incase of no config settings, NetworkHost should be enabled & SecurityInsecure must be disabled.
|
||||
if conf.Entitlements.NetworkHost == nil || *conf.Entitlements.NetworkHost {
|
||||
ents = append(ents, string(entitlements.EntitlementNetworkHost))
|
||||
}
|
||||
if conf.Entitlements.SecurityInsecure != nil && *conf.Entitlements.SecurityInsecure {
|
||||
ents = append(ents, string(entitlements.EntitlementSecurityInsecure))
|
||||
}
|
||||
return ents
|
||||
}
|
||||
|
||||
@@ -8,12 +8,8 @@ import (
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"github.com/docker/docker/daemon/config"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/docker/libnetwork"
|
||||
"github.com/moby/buildkit/executor"
|
||||
"github.com/moby/buildkit/executor/oci"
|
||||
"github.com/moby/buildkit/executor/runcexecutor"
|
||||
"github.com/moby/buildkit/identity"
|
||||
"github.com/moby/buildkit/solver/pb"
|
||||
@@ -24,9 +20,9 @@ import (
|
||||
|
||||
const networkName = "bridge"
|
||||
|
||||
func newExecutor(root, cgroupParent string, net libnetwork.NetworkController, dnsConfig *oci.DNSConfig, rootless bool, idmap *idtools.IdentityMapping, apparmorProfile string) (executor.Executor, error) {
|
||||
func newExecutor(root, cgroupParent string, net libnetwork.NetworkController) (executor.Executor, error) {
|
||||
networkProviders := map[pb.NetMode]network.Provider{
|
||||
pb.NetMode_UNSET: &bridgeProvider{NetworkController: net, Root: filepath.Join(root, "net")},
|
||||
pb.NetMode_UNSET: &bridgeProvider{NetworkController: net},
|
||||
pb.NetMode_HOST: network.NewHostProvider(),
|
||||
pb.NetMode_NONE: network.NewNoneProvider(),
|
||||
}
|
||||
@@ -34,17 +30,11 @@ func newExecutor(root, cgroupParent string, net libnetwork.NetworkController, dn
|
||||
Root: filepath.Join(root, "executor"),
|
||||
CommandCandidates: []string{"runc"},
|
||||
DefaultCgroupParent: cgroupParent,
|
||||
Rootless: rootless,
|
||||
NoPivot: os.Getenv("DOCKER_RAMDISK") != "",
|
||||
IdentityMapping: idmap,
|
||||
DNS: dnsConfig,
|
||||
ApparmorProfile: apparmorProfile,
|
||||
}, networkProviders)
|
||||
}
|
||||
|
||||
type bridgeProvider struct {
|
||||
libnetwork.NetworkController
|
||||
Root string
|
||||
}
|
||||
|
||||
func (p *bridgeProvider) New() (network.Namespace, error) {
|
||||
@@ -80,8 +70,7 @@ func (iface *lnInterface) init(c libnetwork.NetworkController, n libnetwork.Netw
|
||||
return
|
||||
}
|
||||
|
||||
sbx, err := c.NewSandbox(id, libnetwork.OptionUseExternalKey(), libnetwork.OptionHostsPath(filepath.Join(iface.provider.Root, id, "hosts")),
|
||||
libnetwork.OptionResolvConfPath(filepath.Join(iface.provider.Root, id, "resolv.conf")))
|
||||
sbx, err := c.NewSandbox(id, libnetwork.OptionUseExternalKey())
|
||||
if err != nil {
|
||||
iface.err = err
|
||||
return
|
||||
@@ -99,38 +88,23 @@ func (iface *lnInterface) init(c libnetwork.NetworkController, n libnetwork.Netw
|
||||
func (iface *lnInterface) Set(s *specs.Spec) {
|
||||
<-iface.ready
|
||||
if iface.err != nil {
|
||||
logrus.WithError(iface.err).Error("failed to set networking spec")
|
||||
return
|
||||
}
|
||||
shortNetCtlrID := stringid.TruncateID(iface.provider.NetworkController.ID())
|
||||
// attach netns to bridge within the container namespace, using reexec in a prestart hook
|
||||
s.Hooks = &specs.Hooks{
|
||||
Prestart: []specs.Hook{{
|
||||
Path: filepath.Join("/proc", strconv.Itoa(os.Getpid()), "exe"),
|
||||
Args: []string{"libnetwork-setkey", "-exec-root=" + iface.provider.Config().Daemon.ExecRoot, iface.sbx.ContainerID(), shortNetCtlrID},
|
||||
Args: []string{"libnetwork-setkey", iface.sbx.ContainerID(), iface.provider.NetworkController.ID()},
|
||||
}},
|
||||
}
|
||||
}
|
||||
|
||||
func (iface *lnInterface) Close() error {
|
||||
<-iface.ready
|
||||
if iface.sbx != nil {
|
||||
go func() {
|
||||
if err := iface.sbx.Delete(); err != nil {
|
||||
logrus.Errorf("failed to delete builder network sandbox: %v", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
go func() {
|
||||
if err := iface.sbx.Delete(); err != nil {
|
||||
logrus.Errorf("failed to delete builder network sandbox: %v", err)
|
||||
}
|
||||
}()
|
||||
return iface.err
|
||||
}
|
||||
|
||||
func getDNSConfig(cfg config.DNSConfig) *oci.DNSConfig {
|
||||
if cfg.DNS != nil || cfg.DNSSearch != nil || cfg.DNSOptions != nil {
|
||||
return &oci.DNSConfig{
|
||||
Nameservers: cfg.DNS,
|
||||
SearchDomains: cfg.DNSSearch,
|
||||
Options: cfg.DNSOptions,
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -5,15 +5,12 @@ import (
|
||||
"errors"
|
||||
"io"
|
||||
|
||||
"github.com/docker/docker/daemon/config"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/libnetwork"
|
||||
"github.com/moby/buildkit/cache"
|
||||
"github.com/moby/buildkit/executor"
|
||||
"github.com/moby/buildkit/executor/oci"
|
||||
)
|
||||
|
||||
func newExecutor(_, _ string, _ libnetwork.NetworkController, _ *oci.DNSConfig, _ bool, _ *idtools.IdentityMapping, _ string) (executor.Executor, error) {
|
||||
func newExecutor(_, _ string, _ libnetwork.NetworkController) (executor.Executor, error) {
|
||||
return &winExecutor{}, nil
|
||||
}
|
||||
|
||||
@@ -23,7 +20,3 @@ type winExecutor struct {
|
||||
func (e *winExecutor) Exec(ctx context.Context, meta executor.Meta, rootfs cache.Mountable, mounts []executor.Mount, stdin io.ReadCloser, stdout, stderr io.WriteCloser) error {
|
||||
return errors.New("buildkit executor not implemented for windows")
|
||||
}
|
||||
|
||||
func getDNSConfig(config.DNSConfig) *oci.DNSConfig {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -117,12 +117,12 @@ func (e *imageExporterInstance) Export(ctx context.Context, inp exporter.Source)
|
||||
layersDone := oneOffProgress(ctx, "exporting layers")
|
||||
|
||||
if err := ref.Finalize(ctx, true); err != nil {
|
||||
return nil, layersDone(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
diffIDs, err := e.opt.Differ.EnsureLayer(ctx, ref.ID())
|
||||
if err != nil {
|
||||
return nil, layersDone(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
diffs = make([]digest.Digest, len(diffIDs))
|
||||
@@ -148,7 +148,7 @@ func (e *imageExporterInstance) Export(ctx context.Context, inp exporter.Source)
|
||||
|
||||
diffs, history = normalizeLayersAndHistory(diffs, history, ref)
|
||||
|
||||
config, err = patchImageConfig(config, diffs, history, inp.Metadata[exptypes.ExporterInlineCache])
|
||||
config, err = patchImageConfig(config, diffs, history)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -41,7 +41,7 @@ func parseHistoryFromConfig(dt []byte) ([]ocispec.History, error) {
|
||||
return config.History, nil
|
||||
}
|
||||
|
||||
func patchImageConfig(dt []byte, dps []digest.Digest, history []ocispec.History, cache []byte) ([]byte, error) {
|
||||
func patchImageConfig(dt []byte, dps []digest.Digest, history []ocispec.History) ([]byte, error) {
|
||||
m := map[string]json.RawMessage{}
|
||||
if err := json.Unmarshal(dt, &m); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to parse image config for patch")
|
||||
@@ -77,14 +77,6 @@ func patchImageConfig(dt []byte, dps []digest.Digest, history []ocispec.History,
|
||||
m["created"] = dt
|
||||
}
|
||||
|
||||
if cache != nil {
|
||||
dt, err := json.Marshal(cache)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m["moby.buildkit.cache.v0"] = dt
|
||||
}
|
||||
|
||||
dt, err = json.Marshal(m)
|
||||
return dt, errors.Wrap(err, "failed to marshal config after patch")
|
||||
}
|
||||
@@ -137,37 +129,6 @@ func normalizeLayersAndHistory(diffs []digest.Digest, history []ocispec.History,
|
||||
history[i] = h
|
||||
}
|
||||
|
||||
// Find the first new layer time. Otherwise, the history item for a first
|
||||
// metadata command would be the creation time of a base image layer.
|
||||
// If there is no such then the last layer with timestamp.
|
||||
var created *time.Time
|
||||
var noCreatedTime bool
|
||||
for _, h := range history {
|
||||
if h.Created != nil {
|
||||
created = h.Created
|
||||
if noCreatedTime {
|
||||
break
|
||||
}
|
||||
} else {
|
||||
noCreatedTime = true
|
||||
}
|
||||
}
|
||||
|
||||
// Fill in created times for all history items to be either the first new
|
||||
// layer time or the previous layer.
|
||||
noCreatedTime = false
|
||||
for i, h := range history {
|
||||
if h.Created != nil {
|
||||
if noCreatedTime {
|
||||
created = h.Created
|
||||
}
|
||||
} else {
|
||||
noCreatedTime = true
|
||||
h.Created = created
|
||||
}
|
||||
history[i] = h
|
||||
}
|
||||
|
||||
return diffs, history
|
||||
}
|
||||
|
||||
|
||||
@@ -35,7 +35,6 @@ func (h *reqBodyHandler) newRequest(rc io.ReadCloser) (string, func()) {
|
||||
h.mu.Lock()
|
||||
delete(h.requests, id)
|
||||
h.mu.Unlock()
|
||||
rc.Close()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -7,11 +7,9 @@ import (
|
||||
"io/ioutil"
|
||||
nethttp "net/http"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/containerd/containerd/rootfs"
|
||||
"github.com/docker/docker/distribution"
|
||||
@@ -25,8 +23,6 @@ import (
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/executor"
|
||||
"github.com/moby/buildkit/exporter"
|
||||
localexporter "github.com/moby/buildkit/exporter/local"
|
||||
tarexporter "github.com/moby/buildkit/exporter/tar"
|
||||
"github.com/moby/buildkit/frontend"
|
||||
gw "github.com/moby/buildkit/frontend/gateway/client"
|
||||
"github.com/moby/buildkit/session"
|
||||
@@ -44,34 +40,24 @@ import (
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
const labelCreatedAt = "buildkit/createdat"
|
||||
|
||||
// LayerAccess provides access to a moby layer from a snapshot
|
||||
type LayerAccess interface {
|
||||
GetDiffIDs(ctx context.Context, key string) ([]layer.DiffID, error)
|
||||
EnsureLayer(ctx context.Context, key string) ([]layer.DiffID, error)
|
||||
}
|
||||
|
||||
// Opt defines a structure for creating a worker.
|
||||
type Opt struct {
|
||||
ID string
|
||||
Labels map[string]string
|
||||
GCPolicy []client.PruneInfo
|
||||
SessionManager *session.Manager
|
||||
MetadataStore *metadata.Store
|
||||
Executor executor.Executor
|
||||
Snapshotter snapshot.Snapshotter
|
||||
ContentStore content.Store
|
||||
CacheManager cache.Manager
|
||||
ImageSource source.Source
|
||||
Exporters map[string]exporter.Exporter
|
||||
DownloadManager distribution.RootFSDownloadManager
|
||||
V2MetadataService distmetadata.V2MetadataService
|
||||
Transport nethttp.RoundTripper
|
||||
Exporter exporter.Exporter
|
||||
Layers LayerAccess
|
||||
Platforms []ocispec.Platform
|
||||
}
|
||||
|
||||
// Worker is a local worker instance with dedicated snapshotter, cache, and so on.
|
||||
@@ -113,8 +99,9 @@ func NewWorker(opt Opt) (*Worker, error) {
|
||||
}
|
||||
|
||||
ss, err := local.NewSource(local.Opt{
|
||||
CacheAccessor: cm,
|
||||
MetadataStore: opt.MetadataStore,
|
||||
SessionManager: opt.SessionManager,
|
||||
CacheAccessor: cm,
|
||||
MetadataStore: opt.MetadataStore,
|
||||
})
|
||||
if err == nil {
|
||||
sm.Register(ss)
|
||||
@@ -140,10 +127,8 @@ func (w *Worker) Labels() map[string]string {
|
||||
|
||||
// Platforms returns one or more platforms supported by the image.
|
||||
func (w *Worker) Platforms() []ocispec.Platform {
|
||||
if len(w.Opt.Platforms) == 0 {
|
||||
return []ocispec.Platform{platforms.DefaultSpec()}
|
||||
}
|
||||
return w.Opt.Platforms
|
||||
// does not handle lcow
|
||||
return []ocispec.Platform{platforms.DefaultSpec()}
|
||||
}
|
||||
|
||||
// GCPolicy returns automatic GC Policy
|
||||
@@ -161,15 +146,13 @@ func (w *Worker) LoadRef(id string, hidden bool) (cache.ImmutableRef, error) {
|
||||
}
|
||||
|
||||
// ResolveOp converts a LLB vertex into a LLB operation
|
||||
func (w *Worker) ResolveOp(v solver.Vertex, s frontend.FrontendLLBBridge, sm *session.Manager) (solver.Op, error) {
|
||||
func (w *Worker) ResolveOp(v solver.Vertex, s frontend.FrontendLLBBridge) (solver.Op, error) {
|
||||
if baseOp, ok := v.Sys().(*pb.Op); ok {
|
||||
switch op := baseOp.Op.(type) {
|
||||
case *pb.Op_Source:
|
||||
return ops.NewSourceOp(v, op, baseOp.Platform, w.SourceManager, sm, w)
|
||||
return ops.NewSourceOp(v, op, baseOp.Platform, w.SourceManager, w)
|
||||
case *pb.Op_Exec:
|
||||
return ops.NewExecOp(v, op, baseOp.Platform, w.CacheManager, sm, w.MetadataStore, w.Executor, w)
|
||||
case *pb.Op_File:
|
||||
return ops.NewFileOp(v, op, w.CacheManager, w.MetadataStore, w)
|
||||
return ops.NewExecOp(v, op, w.CacheManager, w.Opt.SessionManager, w.MetadataStore, w.Executor, w)
|
||||
case *pb.Op_Build:
|
||||
return ops.NewBuildOp(v, op, s, w)
|
||||
}
|
||||
@@ -178,13 +161,13 @@ func (w *Worker) ResolveOp(v solver.Vertex, s frontend.FrontendLLBBridge, sm *se
|
||||
}
|
||||
|
||||
// ResolveImageConfig returns image config for an image
|
||||
func (w *Worker) ResolveImageConfig(ctx context.Context, ref string, opt gw.ResolveImageConfigOpt, sm *session.Manager) (digest.Digest, []byte, error) {
|
||||
func (w *Worker) ResolveImageConfig(ctx context.Context, ref string, opt gw.ResolveImageConfigOpt) (digest.Digest, []byte, error) {
|
||||
// ImageSource is typically source/containerimage
|
||||
resolveImageConfig, ok := w.ImageSource.(resolveImageConfig)
|
||||
if !ok {
|
||||
return "", nil, errors.Errorf("worker %q does not implement ResolveImageConfig", w.ID())
|
||||
}
|
||||
return resolveImageConfig.ResolveImageConfig(ctx, ref, opt, sm)
|
||||
return resolveImageConfig.ResolveImageConfig(ctx, ref, opt)
|
||||
}
|
||||
|
||||
// Exec executes a process directly on a worker
|
||||
@@ -208,96 +191,17 @@ func (w *Worker) Prune(ctx context.Context, ch chan client.UsageInfo, info ...cl
|
||||
}
|
||||
|
||||
// Exporter returns exporter by name
|
||||
func (w *Worker) Exporter(name string, sm *session.Manager) (exporter.Exporter, error) {
|
||||
switch name {
|
||||
case "moby":
|
||||
return w.Opt.Exporter, nil
|
||||
case client.ExporterLocal:
|
||||
return localexporter.New(localexporter.Opt{
|
||||
SessionManager: sm,
|
||||
})
|
||||
case client.ExporterTar:
|
||||
return tarexporter.New(tarexporter.Opt{
|
||||
SessionManager: sm,
|
||||
})
|
||||
default:
|
||||
func (w *Worker) Exporter(name string) (exporter.Exporter, error) {
|
||||
exp, ok := w.Exporters[name]
|
||||
if !ok {
|
||||
return nil, errors.Errorf("exporter %q could not be found", name)
|
||||
}
|
||||
return exp, nil
|
||||
}
|
||||
|
||||
// GetRemote returns a remote snapshot reference for a local one
|
||||
func (w *Worker) GetRemote(ctx context.Context, ref cache.ImmutableRef, createIfNeeded bool) (*solver.Remote, error) {
|
||||
var diffIDs []layer.DiffID
|
||||
var err error
|
||||
if !createIfNeeded {
|
||||
diffIDs, err = w.Layers.GetDiffIDs(ctx, ref.ID())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
if err := ref.Finalize(ctx, true); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
diffIDs, err = w.Layers.EnsureLayer(ctx, ref.ID())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
descriptors := make([]ocispec.Descriptor, len(diffIDs))
|
||||
for i, dgst := range diffIDs {
|
||||
descriptors[i] = ocispec.Descriptor{
|
||||
MediaType: images.MediaTypeDockerSchema2Layer,
|
||||
Digest: digest.Digest(dgst),
|
||||
Size: -1,
|
||||
}
|
||||
}
|
||||
|
||||
return &solver.Remote{
|
||||
Descriptors: descriptors,
|
||||
Provider: &emptyProvider{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// PruneCacheMounts removes the current cache snapshots for specified IDs
|
||||
func (w *Worker) PruneCacheMounts(ctx context.Context, ids []string) error {
|
||||
mu := ops.CacheMountsLocker()
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
|
||||
for _, id := range ids {
|
||||
id = "cache-dir:" + id
|
||||
sis, err := w.MetadataStore.Search(id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, si := range sis {
|
||||
for _, k := range si.Indexes() {
|
||||
if k == id || strings.HasPrefix(k, id+":") {
|
||||
if siCached := w.CacheManager.Metadata(si.ID()); siCached != nil {
|
||||
si = siCached
|
||||
}
|
||||
if err := cache.CachePolicyDefault(si); err != nil {
|
||||
return err
|
||||
}
|
||||
si.Queue(func(b *bolt.Bucket) error {
|
||||
return si.SetValue(b, k, nil)
|
||||
})
|
||||
if err := si.Commit(); err != nil {
|
||||
return err
|
||||
}
|
||||
// if ref is unused try to clean it up right away by releasing it
|
||||
if mref, err := w.CacheManager.GetMutable(ctx, si.ID()); err == nil {
|
||||
go mref.Release(context.TODO())
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ops.ClearActiveCacheMounts()
|
||||
return nil
|
||||
return nil, errors.Errorf("getremote not implemented")
|
||||
}
|
||||
|
||||
// FromRemote converts a remote snapshot reference to a local one
|
||||
@@ -333,32 +237,11 @@ func (w *Worker) FromRemote(ctx context.Context, remote *solver.Remote) (cache.I
|
||||
}
|
||||
defer release()
|
||||
|
||||
if len(rootFS.DiffIDs) != len(layers) {
|
||||
return nil, errors.Errorf("invalid layer count mismatch %d vs %d", len(rootFS.DiffIDs), len(layers))
|
||||
ref, err := w.CacheManager.GetFromSnapshotter(ctx, string(rootFS.ChainID()), cache.WithDescription(fmt.Sprintf("imported %s", remote.Descriptors[len(remote.Descriptors)-1].Digest)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for i := range rootFS.DiffIDs {
|
||||
tm := time.Now()
|
||||
if tmstr, ok := remote.Descriptors[i].Annotations[labelCreatedAt]; ok {
|
||||
if err := (&tm).UnmarshalText([]byte(tmstr)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
descr := fmt.Sprintf("imported %s", remote.Descriptors[i].Digest)
|
||||
if v, ok := remote.Descriptors[i].Annotations["buildkit/description"]; ok {
|
||||
descr = v
|
||||
}
|
||||
ref, err := w.CacheManager.GetFromSnapshotter(ctx, string(layer.CreateChainID(rootFS.DiffIDs[:i+1])), cache.WithDescription(descr), cache.WithCreationTime(tm))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if i == len(remote.Descriptors)-1 {
|
||||
return ref, nil
|
||||
}
|
||||
defer ref.Release(context.TODO())
|
||||
}
|
||||
|
||||
return nil, errors.Errorf("unreachable")
|
||||
return ref, nil
|
||||
}
|
||||
|
||||
type discardProgress struct{}
|
||||
@@ -455,12 +338,5 @@ func oneOffProgress(ctx context.Context, id string) func(err error) error {
|
||||
}
|
||||
|
||||
type resolveImageConfig interface {
|
||||
ResolveImageConfig(ctx context.Context, ref string, opt gw.ResolveImageConfigOpt, sm *session.Manager) (digest.Digest, []byte, error)
|
||||
}
|
||||
|
||||
type emptyProvider struct {
|
||||
}
|
||||
|
||||
func (p *emptyProvider) ReaderAt(ctx context.Context, dec ocispec.Descriptor) (content.ReaderAt, error) {
|
||||
return nil, errors.Errorf("ReaderAt not implemented for empty provider")
|
||||
ResolveImageConfig(ctx context.Context, ref string, opt gw.ResolveImageConfigOpt) (digest.Digest, []byte, error)
|
||||
}
|
||||
|
||||
@@ -60,8 +60,8 @@ type ImageBackend interface {
|
||||
type ExecBackend interface {
|
||||
// ContainerAttachRaw attaches to container.
|
||||
ContainerAttachRaw(cID string, stdin io.ReadCloser, stdout, stderr io.Writer, stream bool, attached chan struct{}) error
|
||||
// ContainerCreateIgnoreImagesArgsEscaped creates a new Docker container and returns potential warnings
|
||||
ContainerCreateIgnoreImagesArgsEscaped(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error)
|
||||
// ContainerCreate creates a new Docker container and returns potential warnings
|
||||
ContainerCreate(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error)
|
||||
// ContainerRm removes a container specified by `id`.
|
||||
ContainerRm(name string, config *types.ContainerRmConfig) error
|
||||
// ContainerKill stops the container execution abruptly.
|
||||
|
||||
@@ -113,7 +113,7 @@ func (b *BuildArgs) GetAllAllowed() map[string]string {
|
||||
return b.getAllFromMapping(b.allowedBuildArgs)
|
||||
}
|
||||
|
||||
// GetAllMeta returns a mapping with all the meta args
|
||||
// GetAllMeta returns a mapping with all the meta meta args
|
||||
func (b *BuildArgs) GetAllMeta() map[string]string {
|
||||
return b.getAllFromMapping(b.allowedMetaArgs)
|
||||
}
|
||||
|
||||
@@ -29,7 +29,7 @@ func newContainerManager(docker builder.ExecBackend) *containerManager {
|
||||
|
||||
// Create a container
|
||||
func (c *containerManager) Create(runConfig *container.Config, hostConfig *container.HostConfig) (container.ContainerCreateCreatedBody, error) {
|
||||
container, err := c.backend.ContainerCreateIgnoreImagesArgsEscaped(types.ContainerCreateConfig{
|
||||
container, err := c.backend.ContainerCreate(types.ContainerCreateConfig{
|
||||
Config: runConfig,
|
||||
HostConfig: hostConfig,
|
||||
})
|
||||
|
||||
@@ -64,7 +64,6 @@ type copyInstruction struct {
|
||||
dest string
|
||||
chownStr string
|
||||
allowLocalDecompression bool
|
||||
preserveOwnership bool
|
||||
}
|
||||
|
||||
// copier reads a raw COPY or ADD command, fetches remote sources using a downloader,
|
||||
@@ -467,7 +466,7 @@ func downloadSource(output io.Writer, stdout io.Writer, srcURL string) (remote b
|
||||
|
||||
type copyFileOptions struct {
|
||||
decompress bool
|
||||
identity *idtools.Identity
|
||||
identity idtools.Identity
|
||||
archiver Archiver
|
||||
}
|
||||
|
||||
@@ -533,7 +532,7 @@ func isArchivePath(driver containerfs.ContainerFS, path string) bool {
|
||||
return err == nil
|
||||
}
|
||||
|
||||
func copyDirectory(archiver Archiver, source, dest *copyEndpoint, identity *idtools.Identity) error {
|
||||
func copyDirectory(archiver Archiver, source, dest *copyEndpoint, identity idtools.Identity) error {
|
||||
destExists, err := isExistingDirectory(dest)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to query destination path")
|
||||
@@ -542,42 +541,28 @@ func copyDirectory(archiver Archiver, source, dest *copyEndpoint, identity *idto
|
||||
if err := archiver.CopyWithTar(source.path, dest.path); err != nil {
|
||||
return errors.Wrapf(err, "failed to copy directory")
|
||||
}
|
||||
if identity != nil {
|
||||
// TODO: @gupta-ak. Investigate how LCOW permission mappings will work.
|
||||
return fixPermissions(source.path, dest.path, *identity, !destExists)
|
||||
}
|
||||
return nil
|
||||
// TODO: @gupta-ak. Investigate how LCOW permission mappings will work.
|
||||
return fixPermissions(source.path, dest.path, identity, !destExists)
|
||||
}
|
||||
|
||||
func copyFile(archiver Archiver, source, dest *copyEndpoint, identity *idtools.Identity) error {
|
||||
func copyFile(archiver Archiver, source, dest *copyEndpoint, identity idtools.Identity) error {
|
||||
if runtime.GOOS == "windows" && dest.driver.OS() == "linux" {
|
||||
// LCOW
|
||||
if err := dest.driver.MkdirAll(dest.driver.Dir(dest.path), 0755); err != nil {
|
||||
return errors.Wrapf(err, "failed to create new directory")
|
||||
}
|
||||
} else {
|
||||
// Normal containers
|
||||
if identity == nil {
|
||||
// Use system.MkdirAll here, which is a custom version of os.MkdirAll
|
||||
// modified for use on Windows to handle volume GUID paths (\\?\{dae8d3ac-b9a1-11e9-88eb-e8554b2ba1db}\path\)
|
||||
if err := system.MkdirAll(filepath.Dir(dest.path), 0755, ""); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := idtools.MkdirAllAndChownNew(filepath.Dir(dest.path), 0755, *identity); err != nil {
|
||||
return errors.Wrapf(err, "failed to create new directory")
|
||||
}
|
||||
if err := idtools.MkdirAllAndChownNew(filepath.Dir(dest.path), 0755, identity); err != nil {
|
||||
// Normal containers
|
||||
return errors.Wrapf(err, "failed to create new directory")
|
||||
}
|
||||
}
|
||||
|
||||
if err := archiver.CopyFileWithTar(source.path, dest.path); err != nil {
|
||||
return errors.Wrapf(err, "failed to copy file")
|
||||
}
|
||||
if identity != nil {
|
||||
// TODO: @gupta-ak. Investigate how LCOW permission mappings will work.
|
||||
return fixPermissions(source.path, dest.path, *identity, false)
|
||||
}
|
||||
return nil
|
||||
// TODO: @gupta-ak. Investigate how LCOW permission mappings will work.
|
||||
return fixPermissions(source.path, dest.path, identity, false)
|
||||
}
|
||||
|
||||
func endsInSlash(driver containerfs.Driver, path string) bool {
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/docker/docker/api"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/strslice"
|
||||
"github.com/docker/docker/builder"
|
||||
"github.com/docker/docker/errdefs"
|
||||
@@ -126,9 +127,7 @@ func dispatchCopy(d dispatchRequest, c *instructions.CopyCommand) error {
|
||||
return err
|
||||
}
|
||||
copyInstruction.chownStr = c.Chown
|
||||
if c.From != "" && copyInstruction.chownStr == "" {
|
||||
copyInstruction.preserveOwnership = true
|
||||
}
|
||||
|
||||
return d.builder.performCopy(d, copyInstruction)
|
||||
}
|
||||
|
||||
@@ -331,6 +330,14 @@ func dispatchWorkdir(d dispatchRequest, c *instructions.WorkdirCommand) error {
|
||||
return d.builder.commitContainer(d.state, containerID, runConfigWithCommentCmd)
|
||||
}
|
||||
|
||||
func resolveCmdLine(cmd instructions.ShellDependantCmdLine, runConfig *container.Config, os string) []string {
|
||||
result := cmd.CmdLine
|
||||
if cmd.PrependShell && result != nil {
|
||||
result = append(getShell(runConfig, os), result...)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// RUN some command yo
|
||||
//
|
||||
// run a command and commit the image. Args are automatically prepended with
|
||||
@@ -346,7 +353,7 @@ func dispatchRun(d dispatchRequest, c *instructions.RunCommand) error {
|
||||
return system.ErrNotSupportedOperatingSystem
|
||||
}
|
||||
stateRunConfig := d.state.runConfig
|
||||
cmdFromArgs, argsEscaped := resolveCmdLine(c.ShellDependantCmdLine, stateRunConfig, d.state.operatingSystem, c.Name(), c.String())
|
||||
cmdFromArgs := resolveCmdLine(c.ShellDependantCmdLine, stateRunConfig, d.state.operatingSystem)
|
||||
buildArgs := d.state.buildArgs.FilterAllowed(stateRunConfig.Env)
|
||||
|
||||
saveCmd := cmdFromArgs
|
||||
@@ -356,7 +363,6 @@ func dispatchRun(d dispatchRequest, c *instructions.RunCommand) error {
|
||||
|
||||
runConfigForCacheProbe := copyRunConfig(stateRunConfig,
|
||||
withCmd(saveCmd),
|
||||
withArgsEscaped(argsEscaped),
|
||||
withEntrypointOverride(saveCmd, nil))
|
||||
if hit, err := d.builder.probeCache(d.state, runConfigForCacheProbe); err != nil || hit {
|
||||
return err
|
||||
@@ -364,11 +370,13 @@ func dispatchRun(d dispatchRequest, c *instructions.RunCommand) error {
|
||||
|
||||
runConfig := copyRunConfig(stateRunConfig,
|
||||
withCmd(cmdFromArgs),
|
||||
withArgsEscaped(argsEscaped),
|
||||
withEnv(append(stateRunConfig.Env, buildArgs...)),
|
||||
withEntrypointOverride(saveCmd, strslice.StrSlice{""}),
|
||||
withoutHealthcheck())
|
||||
|
||||
// set config as already being escaped, this prevents double escaping on windows
|
||||
runConfig.ArgsEscaped = true
|
||||
|
||||
cID, err := d.builder.create(runConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -391,12 +399,6 @@ func dispatchRun(d dispatchRequest, c *instructions.RunCommand) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Don't persist the argsEscaped value in the committed image. Use the original
|
||||
// from previous build steps (only CMD and ENTRYPOINT persist this).
|
||||
if d.state.operatingSystem == "windows" {
|
||||
runConfigForCacheProbe.ArgsEscaped = stateRunConfig.ArgsEscaped
|
||||
}
|
||||
|
||||
return d.builder.commitContainer(d.state, cID, runConfigForCacheProbe)
|
||||
}
|
||||
|
||||
@@ -432,23 +434,15 @@ func prependEnvOnCmd(buildArgs *BuildArgs, buildArgVars []string, cmd strslice.S
|
||||
//
|
||||
func dispatchCmd(d dispatchRequest, c *instructions.CmdCommand) error {
|
||||
runConfig := d.state.runConfig
|
||||
cmd, argsEscaped := resolveCmdLine(c.ShellDependantCmdLine, runConfig, d.state.operatingSystem, c.Name(), c.String())
|
||||
|
||||
// We warn here as Windows shell processing operates differently to Linux.
|
||||
// Linux: /bin/sh -c "echo hello" world --> hello
|
||||
// Windows: cmd /s /c "echo hello" world --> hello world
|
||||
if d.state.operatingSystem == "windows" &&
|
||||
len(runConfig.Entrypoint) > 0 &&
|
||||
d.state.runConfig.ArgsEscaped != argsEscaped {
|
||||
fmt.Fprintf(d.builder.Stderr, " ---> [Warning] Shell-form ENTRYPOINT and exec-form CMD may have unexpected results\n")
|
||||
}
|
||||
|
||||
cmd := resolveCmdLine(c.ShellDependantCmdLine, runConfig, d.state.operatingSystem)
|
||||
runConfig.Cmd = cmd
|
||||
runConfig.ArgsEscaped = argsEscaped
|
||||
// set config as already being escaped, this prevents double escaping on windows
|
||||
runConfig.ArgsEscaped = true
|
||||
|
||||
if err := d.builder.commit(d.state, fmt.Sprintf("CMD %q", cmd)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(c.ShellDependantCmdLine.CmdLine) != 0 {
|
||||
d.state.cmdSet = true
|
||||
}
|
||||
@@ -483,22 +477,8 @@ func dispatchHealthcheck(d dispatchRequest, c *instructions.HealthCheckCommand)
|
||||
//
|
||||
func dispatchEntrypoint(d dispatchRequest, c *instructions.EntrypointCommand) error {
|
||||
runConfig := d.state.runConfig
|
||||
cmd, argsEscaped := resolveCmdLine(c.ShellDependantCmdLine, runConfig, d.state.operatingSystem, c.Name(), c.String())
|
||||
|
||||
// This warning is a little more complex than in dispatchCmd(), as the Windows base images (similar
|
||||
// universally to almost every Linux image out there) have a single .Cmd field populated so that
|
||||
// `docker run --rm image` starts the default shell which would typically be sh on Linux,
|
||||
// or cmd on Windows. The catch to this is that if a dockerfile had `CMD ["c:\\windows\\system32\\cmd.exe"]`,
|
||||
// we wouldn't be able to tell the difference. However, that would be highly unlikely, and besides, this
|
||||
// is only trying to give a helpful warning of possibly unexpected results.
|
||||
if d.state.operatingSystem == "windows" &&
|
||||
d.state.runConfig.ArgsEscaped != argsEscaped &&
|
||||
((len(runConfig.Cmd) == 1 && strings.ToLower(runConfig.Cmd[0]) != `c:\windows\system32\cmd.exe` && len(runConfig.Shell) == 0) || (len(runConfig.Cmd) > 1)) {
|
||||
fmt.Fprintf(d.builder.Stderr, " ---> [Warning] Shell-form CMD and exec-form ENTRYPOINT may have unexpected results\n")
|
||||
}
|
||||
|
||||
cmd := resolveCmdLine(c.ShellDependantCmdLine, runConfig, d.state.operatingSystem)
|
||||
runConfig.Entrypoint = cmd
|
||||
runConfig.ArgsEscaped = argsEscaped
|
||||
if !d.state.cmdSet {
|
||||
runConfig.Cmd = nil
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
@@ -16,7 +15,6 @@ import (
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/moby/buildkit/frontend/dockerfile/instructions"
|
||||
"github.com/moby/buildkit/frontend/dockerfile/parser"
|
||||
"github.com/moby/buildkit/frontend/dockerfile/shell"
|
||||
"gotest.tools/assert"
|
||||
is "gotest.tools/assert/cmp"
|
||||
@@ -438,14 +436,7 @@ func TestRunWithBuildArgs(t *testing.T) {
|
||||
|
||||
runConfig := &container.Config{}
|
||||
origCmd := strslice.StrSlice([]string{"cmd", "in", "from", "image"})
|
||||
|
||||
var cmdWithShell strslice.StrSlice
|
||||
if runtime.GOOS == "windows" {
|
||||
cmdWithShell = strslice.StrSlice([]string{strings.Join(append(getShell(runConfig, runtime.GOOS), []string{"echo foo"}...), " ")})
|
||||
} else {
|
||||
cmdWithShell = strslice.StrSlice(append(getShell(runConfig, runtime.GOOS), "echo foo"))
|
||||
}
|
||||
|
||||
cmdWithShell := strslice.StrSlice(append(getShell(runConfig, runtime.GOOS), "echo foo"))
|
||||
envVars := []string{"|1", "one=two"}
|
||||
cachedCmd := strslice.StrSlice(append(envVars, cmdWithShell...))
|
||||
|
||||
@@ -487,24 +478,13 @@ func TestRunWithBuildArgs(t *testing.T) {
|
||||
err := initializeStage(sb, from)
|
||||
assert.NilError(t, err)
|
||||
sb.state.buildArgs.AddArg("one", strPtr("two"))
|
||||
|
||||
// This is hugely annoying. On the Windows side, it relies on the
|
||||
// RunCommand being able to emit String() and Name() (as implemented by
|
||||
// withNameAndCode). Unfortunately, that is internal, and no way to directly
|
||||
// set. However, we can fortunately use ParseInstruction in the instructions
|
||||
// package to parse a fake node which can be used as our instructions.RunCommand
|
||||
// instead.
|
||||
node := &parser.Node{
|
||||
Original: `RUN echo foo`,
|
||||
Value: "run",
|
||||
run := &instructions.RunCommand{
|
||||
ShellDependantCmdLine: instructions.ShellDependantCmdLine{
|
||||
CmdLine: strslice.StrSlice{"echo foo"},
|
||||
PrependShell: true,
|
||||
},
|
||||
}
|
||||
runint, err := instructions.ParseInstruction(node)
|
||||
assert.NilError(t, err)
|
||||
runinst := runint.(*instructions.RunCommand)
|
||||
runinst.CmdLine = strslice.StrSlice{"echo foo"}
|
||||
runinst.PrependShell = true
|
||||
|
||||
assert.NilError(t, dispatch(sb, runinst))
|
||||
assert.NilError(t, dispatch(sb, run))
|
||||
|
||||
// Check that runConfig.Cmd has not been modified by run
|
||||
assert.Check(t, is.DeepEqual(origCmd, sb.state.runConfig.Cmd))
|
||||
|
||||
@@ -6,9 +6,6 @@ import (
|
||||
"errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/moby/buildkit/frontend/dockerfile/instructions"
|
||||
)
|
||||
|
||||
// normalizeWorkdir normalizes a user requested working directory in a
|
||||
@@ -24,13 +21,3 @@ func normalizeWorkdir(_ string, current string, requested string) (string, error
|
||||
}
|
||||
return requested, nil
|
||||
}
|
||||
|
||||
// resolveCmdLine takes a command line arg set and optionally prepends a platform-specific
|
||||
// shell in front of it.
|
||||
func resolveCmdLine(cmd instructions.ShellDependantCmdLine, runConfig *container.Config, os, _, _ string) ([]string, bool) {
|
||||
result := cmd.CmdLine
|
||||
if cmd.PrependShell && result != nil {
|
||||
result = append(getShell(runConfig, os), result...)
|
||||
}
|
||||
return result, false
|
||||
}
|
||||
|
||||
@@ -9,9 +9,7 @@ import (
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/moby/buildkit/frontend/dockerfile/instructions"
|
||||
)
|
||||
|
||||
var pattern = regexp.MustCompile(`^[a-zA-Z]:\.$`)
|
||||
@@ -95,47 +93,3 @@ func normalizeWorkdirWindows(current string, requested string) (string, error) {
|
||||
// Upper-case drive letter
|
||||
return (strings.ToUpper(string(requested[0])) + requested[1:]), nil
|
||||
}
|
||||
|
||||
// resolveCmdLine takes a command line arg set and optionally prepends a platform-specific
|
||||
// shell in front of it. It returns either an array of arguments and an indication that
|
||||
// the arguments are not yet escaped; Or, an array containing a single command line element
|
||||
// along with an indication that the arguments are escaped so the runtime shouldn't escape.
|
||||
//
|
||||
// A better solution could be made, but it would be exceptionally invasive throughout
|
||||
// many parts of the daemon which are coded assuming Linux args array only only, not taking
|
||||
// account of Windows-natural command line semantics and it's argv handling. Put another way,
|
||||
// while what is here is good-enough, it could be improved, but would be highly invasive.
|
||||
//
|
||||
// The commands when this function is called are RUN, ENTRYPOINT and CMD.
|
||||
func resolveCmdLine(cmd instructions.ShellDependantCmdLine, runConfig *container.Config, os, command, original string) ([]string, bool) {
|
||||
|
||||
// Make sure we return an empty array if there is no cmd.CmdLine
|
||||
if len(cmd.CmdLine) == 0 {
|
||||
return []string{}, runConfig.ArgsEscaped
|
||||
}
|
||||
|
||||
if os == "windows" { // ie WCOW
|
||||
if cmd.PrependShell {
|
||||
// WCOW shell-form. Return a single-element array containing the original command line prepended with the shell.
|
||||
// Also indicate that it has not been escaped (so will be passed through directly to HCS). Note that
|
||||
// we go back to the original un-parsed command line in the dockerfile line, strip off both the command part of
|
||||
// it (RUN/ENTRYPOINT/CMD), and also strip any leading white space. IOW, we deliberately ignore any prior parsing
|
||||
// so as to ensure it is treated exactly as a command line. For those interested, `RUN mkdir "c:/foo"` is a particularly
|
||||
// good example of why this is necessary if you fancy debugging how cmd.exe and its builtin mkdir works. (Windows
|
||||
// doesn't have a mkdir.exe, and I'm guessing cmd.exe has some very long unavoidable and unchangeable historical
|
||||
// design decisions over how both its built-in echo and mkdir are coded. Probably more too.)
|
||||
original = original[len(command):] // Strip off the command
|
||||
original = strings.TrimLeft(original, " \t\v\n") // Strip of leading whitespace
|
||||
return []string{strings.Join(getShell(runConfig, os), " ") + " " + original}, true
|
||||
}
|
||||
|
||||
// WCOW JSON/"exec" form.
|
||||
return cmd.CmdLine, false
|
||||
}
|
||||
|
||||
// LCOW - use args as an array, same as LCOL.
|
||||
if cmd.PrependShell && cmd.CmdLine != nil {
|
||||
return append(getShell(runConfig, os), cmd.CmdLine...), false
|
||||
}
|
||||
return cmd.CmdLine, false
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ package dockerfile // import "github.com/docker/docker/builder/dockerfile"
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/builder/remotecontext"
|
||||
@@ -24,11 +23,8 @@ func init() {
|
||||
reexec.Init()
|
||||
}
|
||||
|
||||
func TestDispatch(t *testing.T) {
|
||||
if runtime.GOOS != "windows" {
|
||||
skip.If(t, os.Getuid() != 0, "skipping test that requires root")
|
||||
}
|
||||
testCases := []dispatchTestCase{
|
||||
func initDispatchTestCases() []dispatchTestCase {
|
||||
dispatchTestCases := []dispatchTestCase{
|
||||
{
|
||||
name: "ADD multiple files to file",
|
||||
cmd: &instructions.AddCommand{SourcesAndDest: instructions.SourcesAndDest{
|
||||
@@ -95,46 +91,54 @@ func TestDispatch(t *testing.T) {
|
||||
}},
|
||||
expectedError: "source can't be a URL for COPY",
|
||||
files: nil,
|
||||
},
|
||||
}
|
||||
}}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
contextDir, cleanup := createTestTempDir(t, "", "builder-dockerfile-test")
|
||||
defer cleanup()
|
||||
return dispatchTestCases
|
||||
}
|
||||
|
||||
for filename, content := range tc.files {
|
||||
createTestTempFile(t, contextDir, filename, content, 0777)
|
||||
}
|
||||
func TestDispatch(t *testing.T) {
|
||||
skip.If(t, os.Getuid() != 0, "skipping test that requires root")
|
||||
testCases := initDispatchTestCases()
|
||||
|
||||
tarStream, err := archive.Tar(contextDir, archive.Uncompressed)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Error when creating tar stream: %s", err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err = tarStream.Close(); err != nil {
|
||||
t.Fatalf("Error when closing tar stream: %s", err)
|
||||
}
|
||||
}()
|
||||
|
||||
context, err := remotecontext.FromArchive(tarStream)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Error when creating tar context: %s", err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err = context.Close(); err != nil {
|
||||
t.Fatalf("Error when closing tar context: %s", err)
|
||||
}
|
||||
}()
|
||||
|
||||
b := newBuilderWithMockBackend()
|
||||
sb := newDispatchRequest(b, '`', context, NewBuildArgs(make(map[string]*string)), newStagesBuildResults())
|
||||
err = dispatch(sb, tc.cmd)
|
||||
assert.Check(t, is.ErrorContains(err, tc.expectedError))
|
||||
})
|
||||
for _, testCase := range testCases {
|
||||
executeTestCase(t, testCase)
|
||||
}
|
||||
}
|
||||
|
||||
func executeTestCase(t *testing.T, testCase dispatchTestCase) {
|
||||
contextDir, cleanup := createTestTempDir(t, "", "builder-dockerfile-test")
|
||||
defer cleanup()
|
||||
|
||||
for filename, content := range testCase.files {
|
||||
createTestTempFile(t, contextDir, filename, content, 0777)
|
||||
}
|
||||
|
||||
tarStream, err := archive.Tar(contextDir, archive.Uncompressed)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Error when creating tar stream: %s", err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err = tarStream.Close(); err != nil {
|
||||
t.Fatalf("Error when closing tar stream: %s", err)
|
||||
}
|
||||
}()
|
||||
|
||||
context, err := remotecontext.FromArchive(tarStream)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Error when creating tar context: %s", err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err = context.Close(); err != nil {
|
||||
t.Fatalf("Error when closing tar context: %s", err)
|
||||
}
|
||||
}()
|
||||
|
||||
b := newBuilderWithMockBackend()
|
||||
sb := newDispatchRequest(b, '`', context, NewBuildArgs(make(map[string]*string)), newStagesBuildResults())
|
||||
err = dispatch(sb, testCase.cmd)
|
||||
assert.Check(t, is.ErrorContains(err, testCase.expectedError))
|
||||
}
|
||||
|
||||
@@ -204,9 +204,7 @@ func (b *Builder) performCopy(req dispatchRequest, inst copyInstruction) error {
|
||||
opts := copyFileOptions{
|
||||
decompress: inst.allowLocalDecompression,
|
||||
archiver: b.getArchiver(info.root, destInfo.root),
|
||||
}
|
||||
if !inst.preserveOwnership {
|
||||
opts.identity = &identity
|
||||
identity: identity,
|
||||
}
|
||||
if err := performCopyForInfo(destInfo, info, opts); err != nil {
|
||||
return errors.Wrapf(err, "failed to copy files")
|
||||
@@ -310,12 +308,6 @@ func withCmd(cmd []string) runConfigModifier {
|
||||
}
|
||||
}
|
||||
|
||||
func withArgsEscaped(argsEscaped bool) runConfigModifier {
|
||||
return func(runConfig *container.Config) {
|
||||
runConfig.ArgsEscaped = argsEscaped
|
||||
}
|
||||
}
|
||||
|
||||
// withCmdComment sets Cmd to a nop comment string. See withCmdCommentString for
|
||||
// why there are two almost identical versions of this.
|
||||
func withCmdComment(comment string, platform string) runConfigModifier {
|
||||
|
||||
@@ -47,9 +47,6 @@ func TestDockerfileOutsideTheBuildContext(t *testing.T) {
|
||||
defer cleanup()
|
||||
|
||||
expectedError := "Forbidden path outside the build context: ../../Dockerfile ()"
|
||||
if runtime.GOOS == "windows" {
|
||||
expectedError = "failed to resolve scoped path ../../Dockerfile ()"
|
||||
}
|
||||
|
||||
readAndCheckDockerfile(t, "DockerfileOutsideTheBuildContext", contextDir, "../../Dockerfile", expectedError)
|
||||
}
|
||||
@@ -64,9 +61,7 @@ func TestNonExistingDockerfile(t *testing.T) {
|
||||
}
|
||||
|
||||
func readAndCheckDockerfile(t *testing.T, testName, contextDir, dockerfilePath, expectedError string) {
|
||||
if runtime.GOOS != "windows" {
|
||||
skip.If(t, os.Getuid() != 0, "skipping test that requires root")
|
||||
}
|
||||
skip.If(t, os.Getuid() != 0, "skipping test that requires root")
|
||||
tarStream, err := archive.Tar(contextDir, archive.Uncompressed)
|
||||
assert.NilError(t, err)
|
||||
|
||||
@@ -85,7 +80,7 @@ func readAndCheckDockerfile(t *testing.T, testName, contextDir, dockerfilePath,
|
||||
Source: tarStream,
|
||||
}
|
||||
_, _, err = remotecontext.Detect(config)
|
||||
assert.Check(t, is.ErrorContains(err, expectedError))
|
||||
assert.Check(t, is.Error(err, expectedError))
|
||||
}
|
||||
|
||||
func TestCopyRunConfig(t *testing.T) {
|
||||
|
||||
@@ -83,6 +83,7 @@ func lookupNTAccount(builder *Builder, accountName string, state *dispatchState)
|
||||
runConfig := copyRunConfig(state.runConfig,
|
||||
withCmdCommentString("internal run to obtain NT account information.", optionsPlatform.OS))
|
||||
|
||||
runConfig.ArgsEscaped = true
|
||||
runConfig.Cmd = []string{targetExecutable, "getaccountsid", accountName}
|
||||
|
||||
hostConfig := &container.HostConfig{Mounts: []mount.Mount{
|
||||
|
||||
@@ -28,7 +28,7 @@ func (m *MockBackend) ContainerAttachRaw(cID string, stdin io.ReadCloser, stdout
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MockBackend) ContainerCreateIgnoreImagesArgsEscaped(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error) {
|
||||
func (m *MockBackend) ContainerCreate(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error) {
|
||||
if m.containerCreateFunc != nil {
|
||||
return m.containerCreateFunc(config)
|
||||
}
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
"github.com/docker/docker/api/types/backend"
|
||||
"github.com/docker/docker/builder"
|
||||
"github.com/docker/docker/builder/dockerignore"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/docker/pkg/fileutils"
|
||||
"github.com/docker/docker/pkg/urlutil"
|
||||
"github.com/moby/buildkit/frontend/dockerfile/parser"
|
||||
@@ -35,9 +34,8 @@ func Detect(config backend.BuildConfig) (remote builder.Source, dockerfile *pars
|
||||
case remoteURL == ClientSessionRemote:
|
||||
res, err := parser.Parse(config.Source)
|
||||
if err != nil {
|
||||
return nil, nil, errdefs.InvalidParameter(err)
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return nil, res, nil
|
||||
case urlutil.IsGitURL(remoteURL):
|
||||
remote, dockerfile, err = newGitRemote(remoteURL, dockerfilePath)
|
||||
@@ -108,7 +106,7 @@ func newURLRemote(url string, dockerfilePath string, progressReader func(in io.R
|
||||
switch contentType {
|
||||
case mimeTypes.TextPlain:
|
||||
res, err := parser.Parse(progressReader(content))
|
||||
return nil, res, errdefs.InvalidParameter(err)
|
||||
return nil, res, err
|
||||
default:
|
||||
source, err := FromArchive(progressReader(content))
|
||||
if err != nil {
|
||||
@@ -148,17 +146,11 @@ func readAndParseDockerfile(name string, rc io.Reader) (*parser.Result, error) {
|
||||
br := bufio.NewReader(rc)
|
||||
if _, err := br.Peek(1); err != nil {
|
||||
if err == io.EOF {
|
||||
return nil, errdefs.InvalidParameter(errors.Errorf("the Dockerfile (%s) cannot be empty", name))
|
||||
return nil, errors.Errorf("the Dockerfile (%s) cannot be empty", name)
|
||||
}
|
||||
return nil, errors.Wrap(err, "unexpected error reading Dockerfile")
|
||||
}
|
||||
|
||||
dockerfile, err := parser.Parse(br)
|
||||
if err != nil {
|
||||
return nil, errdefs.InvalidParameter(errors.Wrapf(err, "failed to parse %s", name))
|
||||
}
|
||||
|
||||
return dockerfile, nil
|
||||
return parser.Parse(br)
|
||||
}
|
||||
|
||||
func openAt(remote builder.Source, path string) (driver.File, error) {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user