mirror of
https://github.com/moby/moby.git
synced 2026-01-11 18:51:37 +00:00
Compare commits
705 Commits
v17.05.0-c
...
v17.06.2-c
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5f70730d57 | ||
|
|
587c4842ac | ||
|
|
4656549203 | ||
|
|
b9e34b5fbb | ||
|
|
3b252135dc | ||
|
|
9ae42ade4a | ||
|
|
23259112e9 | ||
|
|
645ac4448c | ||
|
|
815a6b3d4b | ||
|
|
bf6931b711 | ||
|
|
35bd49d950 | ||
|
|
6a25ed8b4a | ||
|
|
5b4dd2f905 | ||
|
|
02e0bfa753 | ||
|
|
c8533a2648 | ||
|
|
7b9619dbd4 | ||
|
|
c886860024 | ||
|
|
a492706a7f | ||
|
|
107f32f718 | ||
|
|
73dc2c2f5a | ||
|
|
133ca4708d | ||
|
|
acf06a5d60 | ||
|
|
8ff1920458 | ||
|
|
1fd76f3838 | ||
|
|
acbee943fd | ||
|
|
8660f37d48 | ||
|
|
8f9d67cb79 | ||
|
|
c3d547c456 | ||
|
|
89bacc278b | ||
|
|
853274ece3 | ||
|
|
2cddc993de | ||
|
|
fc5e100344 | ||
|
|
7d99cdff8d | ||
|
|
064f22822c | ||
|
|
e712f523e0 | ||
|
|
f20ac0badb | ||
|
|
6fade192eb | ||
|
|
e011810b80 | ||
|
|
c32ca2f148 | ||
|
|
993b6912fd | ||
|
|
d27ffb2708 | ||
|
|
af9667841e | ||
|
|
9bb8dfa27e | ||
|
|
b3fefc36b3 | ||
|
|
b9f1bb4e97 | ||
|
|
064ade21f2 | ||
|
|
260246bb70 | ||
|
|
c0e0575129 | ||
|
|
482543a87c | ||
|
|
98debbf694 | ||
|
|
bdd34e70b3 | ||
|
|
801c2a432c | ||
|
|
02d284bd2f | ||
|
|
c6c8526b85 | ||
|
|
3ec56e0783 | ||
|
|
a473cc5ef7 | ||
|
|
af2c3bd37f | ||
|
|
3673d2b668 | ||
|
|
6c7b043f77 | ||
|
|
791faef9e2 | ||
|
|
5fe12d194d | ||
|
|
e4e9921367 | ||
|
|
b1323a1c8f | ||
|
|
27deeb1298 | ||
|
|
bc5e4e3ce3 | ||
|
|
0915c2c848 | ||
|
|
73c462b2e5 | ||
|
|
9c5bb024df | ||
|
|
cc547fdd4b | ||
|
|
5c27bce649 | ||
|
|
14bc03dbb8 | ||
|
|
b055794618 | ||
|
|
a65742e314 | ||
|
|
9a727acc15 | ||
|
|
82e1487e72 | ||
|
|
a11bca5891 | ||
|
|
a67f442586 | ||
|
|
842c51c394 | ||
|
|
a2f65ec4aa | ||
|
|
dcbaad5448 | ||
|
|
5c0be74bb8 | ||
|
|
91c49beea0 | ||
|
|
9778382f97 | ||
|
|
70d059f024 | ||
|
|
ab046e94d5 | ||
|
|
5c5ac7eb9b | ||
|
|
d62ed5f3ac | ||
|
|
182cd939be | ||
|
|
f7feb663b7 | ||
|
|
a2b498e128 | ||
|
|
f024e2d7eb | ||
|
|
4e8af4c431 | ||
|
|
e0ffd247bf | ||
|
|
33af086a0b | ||
|
|
3c9dfed30e | ||
|
|
ee9a857558 | ||
|
|
9ab2df07fd | ||
|
|
752471c92f | ||
|
|
f8ecde191a | ||
|
|
093c31c694 | ||
|
|
5c6a466a84 | ||
|
|
4d988e9141 | ||
|
|
b214658e4c | ||
|
|
f981cf94d0 | ||
|
|
b9863603cd | ||
|
|
0f09cebc65 | ||
|
|
870d659101 | ||
|
|
5ee88ddc0d | ||
|
|
631cf6dc8d | ||
|
|
bd04a75392 | ||
|
|
7a11613e94 | ||
|
|
0570feee3d | ||
|
|
daef057517 | ||
|
|
400454cf9a | ||
|
|
b66b1849e9 | ||
|
|
b01ed8895c | ||
|
|
4a5fa1e147 | ||
|
|
393ea2d964 | ||
|
|
90fd450182 | ||
|
|
80cc4bc95f | ||
|
|
9a3a4c0243 | ||
|
|
312781c2e1 | ||
|
|
0aefd9b0f8 | ||
|
|
f888a25ff5 | ||
|
|
58b1788c81 | ||
|
|
d98d4faef8 | ||
|
|
39a604c0d4 | ||
|
|
d8e85f8c60 | ||
|
|
a3536d2c6d | ||
|
|
b654b6244d | ||
|
|
61e527f16c | ||
|
|
4d2d2ea393 | ||
|
|
49bf533fe7 | ||
|
|
ab83b924bc | ||
|
|
93763f11ee | ||
|
|
01af41ed99 | ||
|
|
6c05ceeb0b | ||
|
|
dd27bbb41a | ||
|
|
f64a4ad008 | ||
|
|
9b0e1df4dd | ||
|
|
da9a885483 | ||
|
|
6f3f907cdb | ||
|
|
b0dd3dfc11 | ||
|
|
6b26257202 | ||
|
|
cc24049f77 | ||
|
|
7061b0f748 | ||
|
|
d012569b78 | ||
|
|
524f306340 | ||
|
|
df6dfcf198 | ||
|
|
23e857cd3d | ||
|
|
b28c1eae01 | ||
|
|
45c6f4262a | ||
|
|
c0d2ec1eb5 | ||
|
|
4874e05f74 | ||
|
|
f3d8ff774e | ||
|
|
fe11de0177 | ||
|
|
ca4b3c6f67 | ||
|
|
df86a14af2 | ||
|
|
7d4b8fb3b5 | ||
|
|
e103125883 | ||
|
|
587d07cca8 | ||
|
|
d618b56b40 | ||
|
|
3950cef941 | ||
|
|
8daeeacfc1 | ||
|
|
46392f2442 | ||
|
|
b34d3e730f | ||
|
|
fcafc7108b | ||
|
|
9ee7b4dda9 | ||
|
|
3d630498c3 | ||
|
|
280327cb4d | ||
|
|
59d45c384a | ||
|
|
6f6ee6fd04 | ||
|
|
ef07964ab7 | ||
|
|
dd340c52cb | ||
|
|
213ed02e25 | ||
|
|
4dd3e5b77c | ||
|
|
5034288381 | ||
|
|
77c9728847 | ||
|
|
c053a2069e | ||
|
|
03efb40cb8 | ||
|
|
a30ef99e8d | ||
|
|
7658851e74 | ||
|
|
d6f4fe9e38 | ||
|
|
4921171587 | ||
|
|
cfdf84d5d0 | ||
|
|
309f99edae | ||
|
|
1290ec2d4b | ||
|
|
67fdf574d5 | ||
|
|
e0d533b1e8 | ||
|
|
55fd0d0115 | ||
|
|
0a9c79f93b | ||
|
|
d6b1650825 | ||
|
|
7b7f9a481e | ||
|
|
1f6d81568a | ||
|
|
535d5004dc | ||
|
|
eab0f58661 | ||
|
|
ad846a136a | ||
|
|
bd4e8aa64e | ||
|
|
b5f4fc7162 | ||
|
|
c93a48e138 | ||
|
|
d12b2387d7 | ||
|
|
1c94c264c4 | ||
|
|
2a8f46abfd | ||
|
|
f944183c75 | ||
|
|
f8416e82d9 | ||
|
|
3adddab957 | ||
|
|
0bff591bb0 | ||
|
|
c0afd9c873 | ||
|
|
c1635c1ae3 | ||
|
|
6ef7afce83 | ||
|
|
df4ca50805 | ||
|
|
f6c00f6e80 | ||
|
|
f30b072b81 | ||
|
|
1efbe6e876 | ||
|
|
7535462b77 | ||
|
|
4dbea104ca | ||
|
|
04eb1f0cac | ||
|
|
d6e1cb7cbf | ||
|
|
203feeed86 | ||
|
|
632a8635a0 | ||
|
|
8c340b7edf | ||
|
|
55ed80f78a | ||
|
|
0ea0b2becf | ||
|
|
9771780a01 | ||
|
|
2c45392b8f | ||
|
|
33054be31b | ||
|
|
3377664e94 | ||
|
|
ba52bb0fd1 | ||
|
|
99c1456647 | ||
|
|
63d4000e97 | ||
|
|
4d8234fb10 | ||
|
|
6cea2e5206 | ||
|
|
3183031581 | ||
|
|
3b9a6309c8 | ||
|
|
d841da9379 | ||
|
|
6bffb585cd | ||
|
|
23359fd403 | ||
|
|
306074572c | ||
|
|
7fd8a9382c | ||
|
|
99b5fadd0c | ||
|
|
8022c5fdd1 | ||
|
|
8a03eb0b6c | ||
|
|
1ab62a0b4d | ||
|
|
dc87490b63 | ||
|
|
3a4cf2b076 | ||
|
|
5072b22c5f | ||
|
|
4a98f9ef7f | ||
|
|
4e0fdc9098 | ||
|
|
190c6e8cf8 | ||
|
|
a86023eaa9 | ||
|
|
20255ade29 | ||
|
|
c56cfbaf96 | ||
|
|
bf1c377f60 | ||
|
|
4374f16667 | ||
|
|
8ef9c19ecd | ||
|
|
f775005a17 | ||
|
|
6f073e3522 | ||
|
|
84b03660da | ||
|
|
ab74038df9 | ||
|
|
0a377b5f56 | ||
|
|
721b7a7fad | ||
|
|
83a4afe264 | ||
|
|
680084b2a2 | ||
|
|
ddd5278b07 | ||
|
|
b192218503 | ||
|
|
68f21418ac | ||
|
|
47436e9628 | ||
|
|
eb8abc9598 | ||
|
|
671805cce0 | ||
|
|
c307f4521e | ||
|
|
0e8e8f0f31 | ||
|
|
1245866249 | ||
|
|
6e83ef6ad6 | ||
|
|
8fd55cd064 | ||
|
|
d8908c3467 | ||
|
|
33ebf32cb0 | ||
|
|
69c35dad8e | ||
|
|
5771687002 | ||
|
|
376c75d13c | ||
|
|
a771c16834 | ||
|
|
16c4b33774 | ||
|
|
1847bb899a | ||
|
|
d2c5b6ee9f | ||
|
|
1f1befdcf5 | ||
|
|
9e9fc7b57c | ||
|
|
a58cc35ab8 | ||
|
|
102738101a | ||
|
|
7728557687 | ||
|
|
e3a30ffca6 | ||
|
|
3cf18596e9 | ||
|
|
f3bcea00cd | ||
|
|
3e7c1b9a99 | ||
|
|
974cec945b | ||
|
|
eb36d60216 | ||
|
|
77d5a0996f | ||
|
|
ff86a9196b | ||
|
|
1f55b71ff9 | ||
|
|
a89dd03c26 | ||
|
|
385176980e | ||
|
|
e2ec006797 | ||
|
|
2ca41c47c4 | ||
|
|
71d29266ff | ||
|
|
73e2f55543 | ||
|
|
51bca5c406 | ||
|
|
470dfd69b3 | ||
|
|
f02a5b50c4 | ||
|
|
6c28e8edd5 | ||
|
|
ab3a037a5b | ||
|
|
b3bc7b28d0 | ||
|
|
a3eebaf509 | ||
|
|
28d428f4ec | ||
|
|
e428c824c3 | ||
|
|
e8c2a33b74 | ||
|
|
a46f7bd5bd | ||
|
|
08fdce7738 | ||
|
|
c141574d5d | ||
|
|
32d47be263 | ||
|
|
64cccedbce | ||
|
|
4a812040eb | ||
|
|
0da7bd0314 | ||
|
|
47a17c211d | ||
|
|
37ce91ddd6 | ||
|
|
67d282a5c9 | ||
|
|
f4349a77fd | ||
|
|
890f021902 | ||
|
|
4836b0ab85 | ||
|
|
1397a49811 | ||
|
|
6efdac4291 | ||
|
|
c546894aef | ||
|
|
6e0e283387 | ||
|
|
46f5d7e325 | ||
|
|
2a7a938da4 | ||
|
|
94d3758487 | ||
|
|
815e8bb885 | ||
|
|
22a03192fb | ||
|
|
80bb6a46a3 | ||
|
|
c68612de0c | ||
|
|
24d4177859 | ||
|
|
12e232ee35 | ||
|
|
270a33b666 | ||
|
|
d1a7ea147c | ||
|
|
6a96049ddf | ||
|
|
543dfde637 | ||
|
|
3d7af24f63 | ||
|
|
7ff9fb5f04 | ||
|
|
2bc6fffacb | ||
|
|
1a6f8a92b2 | ||
|
|
500d267356 | ||
|
|
089c2e2131 | ||
|
|
1bb3f57c45 | ||
|
|
09ff5ce29c | ||
|
|
5206d45e70 | ||
|
|
edd1c9e325 | ||
|
|
547510fb55 | ||
|
|
9c537b95de | ||
|
|
781ff2bcff | ||
|
|
3a2d68a8a0 | ||
|
|
9370b6ac2d | ||
|
|
72cc81ee8d | ||
|
|
a762ceace4 | ||
|
|
5122abeb44 | ||
|
|
66bc8f128c | ||
|
|
47cceb7f2c | ||
|
|
7f968435f6 | ||
|
|
dcf2632945 | ||
|
|
d925e50470 | ||
|
|
595901bd7e | ||
|
|
a28b173a78 | ||
|
|
85a7f4bbc7 | ||
|
|
6b3801858c | ||
|
|
040a1d79a2 | ||
|
|
5766317e33 | ||
|
|
41b27de41b | ||
|
|
e842c653a0 | ||
|
|
4b846a125b | ||
|
|
63c16a443a | ||
|
|
daa8b262fe | ||
|
|
973bce2100 | ||
|
|
29efb93a19 | ||
|
|
dda41118b0 | ||
|
|
6f7dc65847 | ||
|
|
7238cca42c | ||
|
|
c3dff2359d | ||
|
|
afd7e2d2d5 | ||
|
|
8f159358fb | ||
|
|
a1aa69433d | ||
|
|
7ca86796c9 | ||
|
|
34337db95d | ||
|
|
1b68641fc8 | ||
|
|
54dcbab25e | ||
|
|
005506d36c | ||
|
|
669f4ba37e | ||
|
|
71e9f5c520 | ||
|
|
e30c2e86e8 | ||
|
|
f47a61c429 | ||
|
|
32915b1d0a | ||
|
|
5a076d7589 | ||
|
|
633f9252b8 | ||
|
|
d624f9a7b0 | ||
|
|
fedb3b8884 | ||
|
|
230bc34837 | ||
|
|
71760ae648 | ||
|
|
5894bc1abf | ||
|
|
28379bd29a | ||
|
|
bad7d1fa2f | ||
|
|
b61ffbfb52 | ||
|
|
4662d3e9be | ||
|
|
cbf8c07e9e | ||
|
|
b106dd8b57 | ||
|
|
37747a0fce | ||
|
|
5f0ea4d016 | ||
|
|
05ad14fc1b | ||
|
|
130d1491b7 | ||
|
|
0d99cb8629 | ||
|
|
118d4ee123 | ||
|
|
493a6c3d41 | ||
|
|
2f0ebba0e7 | ||
|
|
a74833aa70 | ||
|
|
80c3ec027d | ||
|
|
2f3f1e6ccd | ||
|
|
9c9cc5ec43 | ||
|
|
140fefd581 | ||
|
|
865a5fd5e3 | ||
|
|
a0fe5e9ca0 | ||
|
|
5657f05b5a | ||
|
|
bf5cf84534 | ||
|
|
fa4b377bbc | ||
|
|
37be263826 | ||
|
|
5ef07d79c4 | ||
|
|
60742f9a95 | ||
|
|
08d7fad45d | ||
|
|
d9371ee807 | ||
|
|
97f8607164 | ||
|
|
3dcab28982 | ||
|
|
d35fc14099 | ||
|
|
d70fde8ffe | ||
|
|
f3dfcfd307 | ||
|
|
a34f9b0bb1 | ||
|
|
7844876547 | ||
|
|
077f08bf54 | ||
|
|
ef20f027cc | ||
|
|
c87d67b0ad | ||
|
|
4f522580f5 | ||
|
|
146e058592 | ||
|
|
fa54c94b9d | ||
|
|
67ac02d4a6 | ||
|
|
0fe924ad98 | ||
|
|
7a95836931 | ||
|
|
cd456433ea | ||
|
|
1d0b921d70 | ||
|
|
1504d51701 | ||
|
|
a28d91349e | ||
|
|
41eb61d5c2 | ||
|
|
f157b54606 | ||
|
|
cfcf2a0cec | ||
|
|
9f738cc574 | ||
|
|
73abe0c682 | ||
|
|
ba332a60b2 | ||
|
|
0296797f0f | ||
|
|
f07811f766 | ||
|
|
adb2ddf288 | ||
|
|
84812f3f00 | ||
|
|
f9f66f946f | ||
|
|
5cd75dd14c | ||
|
|
be2ba8f791 | ||
|
|
e4c03623c2 | ||
|
|
4219156a62 | ||
|
|
42636920d3 | ||
|
|
30221cc521 | ||
|
|
6d7ae1fa0b | ||
|
|
f1571e8b67 | ||
|
|
cc9dc643ea | ||
|
|
f0bb1d7a4a | ||
|
|
80543b2601 | ||
|
|
44c3de8450 | ||
|
|
441e861095 | ||
|
|
df0d317a64 | ||
|
|
721e1736ae | ||
|
|
6353f993d9 | ||
|
|
52bded9868 | ||
|
|
ec67cea96e | ||
|
|
25058d9b0c | ||
|
|
9f0ebea335 | ||
|
|
9b4bbaf121 | ||
|
|
eaae8a9a9c | ||
|
|
ebe0a489a5 | ||
|
|
3875305284 | ||
|
|
9752e41fd0 | ||
|
|
a7519152d9 | ||
|
|
5794008b68 | ||
|
|
0d9e66b98a | ||
|
|
eabee4d47b | ||
|
|
68a5336b61 | ||
|
|
294c9eab56 | ||
|
|
dc329d38ad | ||
|
|
db3576f8a0 | ||
|
|
ccc1324b59 | ||
|
|
01fd4e49c3 | ||
|
|
cfae6278be | ||
|
|
25db82371e | ||
|
|
420b67f892 | ||
|
|
6b5a02e95c | ||
|
|
2881e2be58 | ||
|
|
aee2da3bdf | ||
|
|
0307fe1a0b | ||
|
|
169c013911 | ||
|
|
ae0f8c7ba1 | ||
|
|
afbd5a79ca | ||
|
|
eda3ab9394 | ||
|
|
acbfe6bc56 | ||
|
|
b7a32e1780 | ||
|
|
69a2ca0d44 | ||
|
|
c3e161fb52 | ||
|
|
b065ed3ec2 | ||
|
|
d90fb13de7 | ||
|
|
6559abaf47 | ||
|
|
5704d58cd8 | ||
|
|
3c5f3fd42b | ||
|
|
22d9eadee2 | ||
|
|
97d6b2a873 | ||
|
|
2414166e1e | ||
|
|
5eca7f7c5d | ||
|
|
0e9b7df626 | ||
|
|
8dc8cd4719 | ||
|
|
b96988feb9 | ||
|
|
d4bf6ad716 | ||
|
|
af8a1430f1 | ||
|
|
30fe330e62 | ||
|
|
54dfc3721c | ||
|
|
94465adaf0 | ||
|
|
ea820cae7b | ||
|
|
b3649f4065 | ||
|
|
3f6e861c14 | ||
|
|
b0401a71f7 | ||
|
|
4480e0417e | ||
|
|
95e6c848b1 | ||
|
|
08422650be | ||
|
|
1076ab58ec | ||
|
|
145dfd924c | ||
|
|
e8abe0a69d | ||
|
|
d4fb626e36 | ||
|
|
7a3aa2dabe | ||
|
|
f3ff8dc584 | ||
|
|
acc2d5b7d6 | ||
|
|
d1faf3df27 | ||
|
|
bb30ab9b5f | ||
|
|
c27603238c | ||
|
|
8c014db302 | ||
|
|
205ec49de9 | ||
|
|
e1101b1295 | ||
|
|
a3a109d956 | ||
|
|
cf6dc6160a | ||
|
|
749d90e10f | ||
|
|
47367b21d5 | ||
|
|
c9beb417f4 | ||
|
|
abbbf91498 | ||
|
|
52626bb919 | ||
|
|
e59327aaac | ||
|
|
3beb8d9c8b | ||
|
|
3279ca3c00 | ||
|
|
0dee69799e | ||
|
|
5a9f2a3ce6 | ||
|
|
cf7d246ab0 | ||
|
|
8cb9d7ec2e | ||
|
|
d0ce488d9d | ||
|
|
f9311c52ce | ||
|
|
663f0ba1b2 | ||
|
|
c22b21fede | ||
|
|
b2551c619d | ||
|
|
c264aefdac | ||
|
|
83ee902ecc | ||
|
|
a709f79c1a | ||
|
|
80013fd59a | ||
|
|
9484c3bd81 | ||
|
|
0e9148b1e9 | ||
|
|
8fff9bd081 | ||
|
|
bb5dfdb8c5 | ||
|
|
427a521b02 | ||
|
|
7f927516ea | ||
|
|
252f7246a7 | ||
|
|
d8413b86c0 | ||
|
|
b8e9250b59 | ||
|
|
50368e7a0e | ||
|
|
4fdb17c777 | ||
|
|
68e71aa3e6 | ||
|
|
32ca1214fa | ||
|
|
7936a962dc | ||
|
|
c79bf50ba6 | ||
|
|
a582d9dc42 | ||
|
|
b0ba39d431 | ||
|
|
2c435ab8be | ||
|
|
4bcb02b785 | ||
|
|
fe3797d986 | ||
|
|
ee785b1b98 | ||
|
|
8f51746997 | ||
|
|
3482b45e60 | ||
|
|
cd4c089b9e | ||
|
|
64932563c9 | ||
|
|
5e4e357f6e | ||
|
|
5cea9a0aa5 | ||
|
|
cad32e0111 | ||
|
|
f33f257888 | ||
|
|
3f676c1918 | ||
|
|
1eec7b5583 | ||
|
|
1c7b03a455 | ||
|
|
a9ff628a3c | ||
|
|
a899aa6796 | ||
|
|
4850bc4df8 | ||
|
|
2b5ef9bfef | ||
|
|
bda69e6142 | ||
|
|
2e63c759a2 | ||
|
|
a3ab46361e | ||
|
|
a35a65b78a | ||
|
|
72f4fc78ff | ||
|
|
6e577ea1c0 | ||
|
|
3977d2c440 | ||
|
|
bddd9cce40 | ||
|
|
0ee58cb3b9 | ||
|
|
b483e4f09c | ||
|
|
1d1defa399 | ||
|
|
6052f2b396 | ||
|
|
8a27758364 | ||
|
|
db35c2a5a8 | ||
|
|
f2146cb74b | ||
|
|
ac245e2845 | ||
|
|
56dca8b676 | ||
|
|
41f4c3cf7e | ||
|
|
aafd7fa969 | ||
|
|
4d62f67117 | ||
|
|
700b4807c3 | ||
|
|
0055a48277 | ||
|
|
77da738832 | ||
|
|
0b35ab1965 | ||
|
|
df3c425407 | ||
|
|
b7794ac46f | ||
|
|
0083d557df | ||
|
|
aa22bcb652 | ||
|
|
05930120a0 | ||
|
|
47615c9b9b | ||
|
|
01c80435c6 | ||
|
|
eede2056fe | ||
|
|
9b96b2d276 | ||
|
|
e34bee387e | ||
|
|
3b5af0a289 | ||
|
|
bc4560e512 | ||
|
|
d8b6a35d02 | ||
|
|
4cbc953a5d | ||
|
|
9c53fa2d0c | ||
|
|
2fb7c3c4f0 | ||
|
|
64c4c1c3d5 | ||
|
|
bb429da9a9 | ||
|
|
d2ab40e5b9 | ||
|
|
480d1b841f | ||
|
|
5fc912d2c8 | ||
|
|
aa92df71b2 | ||
|
|
d40a17ffc2 | ||
|
|
3c07259882 | ||
|
|
330ddf8eb8 | ||
|
|
d0bd5aa2a7 | ||
|
|
08a39d7f02 | ||
|
|
2aec48f448 | ||
|
|
8a7ff5ff74 | ||
|
|
10e171cd94 | ||
|
|
2a17d048de | ||
|
|
6672ffa566 | ||
|
|
18f90133ac | ||
|
|
78b2c1a84a | ||
|
|
a0a977864f | ||
|
|
9c77a4c297 | ||
|
|
50a9be4bf5 | ||
|
|
83f44d232d | ||
|
|
3343653edb | ||
|
|
068f344e03 | ||
|
|
17abacb894 | ||
|
|
d11c1520f4 | ||
|
|
4a0704cdbd | ||
|
|
8a8c1961b5 | ||
|
|
5b0ab45a1e | ||
|
|
9d4a8cda6b | ||
|
|
a6abd57b83 | ||
|
|
bfcd95817a | ||
|
|
c7fad9b750 | ||
|
|
12601e3559 | ||
|
|
b507158c0f | ||
|
|
d6bb4ae434 | ||
|
|
dc762610ab | ||
|
|
3a9be92927 | ||
|
|
8c2c69d31e | ||
|
|
f71bdc67a2 | ||
|
|
e06e2ef107 | ||
|
|
56fb4653e8 | ||
|
|
b11af7b2f6 | ||
|
|
e838679cd7 | ||
|
|
f32b90f463 | ||
|
|
6b60a60729 | ||
|
|
1ecd8ed518 | ||
|
|
a18d103b5e | ||
|
|
985a9c7047 | ||
|
|
c284e85f59 | ||
|
|
c7c6167bca | ||
|
|
44ce809c95 | ||
|
|
bcb53d3489 | ||
|
|
4d23394333 | ||
|
|
6bc1f345af | ||
|
|
1135eadff9 |
49
.mailmap
49
.mailmap
@@ -36,8 +36,8 @@ Guillaume J. Charmes <guillaume.charmes@docker.com> <charmes.guillaume@gmail.com
|
||||
Thatcher Peskens <thatcher@docker.com>
|
||||
Thatcher Peskens <thatcher@docker.com> <thatcher@dotcloud.com>
|
||||
Thatcher Peskens <thatcher@docker.com> dhrp <thatcher@gmx.net>
|
||||
Jérôme Petazzoni <jerome.petazzoni@dotcloud.com> jpetazzo <jerome.petazzoni@dotcloud.com>
|
||||
Jérôme Petazzoni <jerome.petazzoni@dotcloud.com> <jp@enix.org>
|
||||
Jérôme Petazzoni <jerome.petazzoni@docker.com> <jerome.petazzoni@dotcloud.com>
|
||||
Jérôme Petazzoni <jerome.petazzoni@docker.com> <jp@enix.org>
|
||||
Joffrey F <joffrey@docker.com>
|
||||
Joffrey F <joffrey@docker.com> <joffrey@dotcloud.com>
|
||||
Joffrey F <joffrey@docker.com> <f.joffrey@gmail.com>
|
||||
@@ -93,6 +93,7 @@ Sven Dowideit <SvenDowideit@home.org.au> <¨SvenDowideit@home.org.au¨>
|
||||
Sven Dowideit <SvenDowideit@home.org.au> <SvenDowideit@home.org.au>
|
||||
Sven Dowideit <SvenDowideit@home.org.au> <SvenDowideit@users.noreply.github.com>
|
||||
Sven Dowideit <SvenDowideit@home.org.au> <sven@t440s.home.gateway>
|
||||
Akihiro Matsushima <amatsusbit@gmail.com> <amatsus@users.noreply.github.com>
|
||||
<alexl@redhat.com> <alexander.larsson@gmail.com>
|
||||
Alexander Morozov <lk4d4@docker.com> <lk4d4math@gmail.com>
|
||||
Alexander Morozov <lk4d4@docker.com>
|
||||
@@ -108,6 +109,7 @@ Roberto G. Hashioka <roberto.hashioka@docker.com> <roberto_hashioka@hotmail.com>
|
||||
Sridhar Ratnakumar <sridharr@activestate.com>
|
||||
Sridhar Ratnakumar <sridharr@activestate.com> <github@srid.name>
|
||||
Liang-Chi Hsieh <viirya@gmail.com>
|
||||
Aaron L. Xu <liker.xu@foxmail.com>
|
||||
Aleksa Sarai <asarai@suse.de>
|
||||
Aleksa Sarai <asarai@suse.de> <asarai@suse.com>
|
||||
Aleksa Sarai <asarai@suse.de> <cyphar@cyphar.com>
|
||||
@@ -162,17 +164,31 @@ Darren Shepherd <darren.s.shepherd@gmail.com> <darren@rancher.com>
|
||||
Deshi Xiao <dxiao@redhat.com> <dsxiao@dataman-inc.com>
|
||||
Deshi Xiao <dxiao@redhat.com> <xiaods@gmail.com>
|
||||
Doug Davis <dug@us.ibm.com> <duglin@users.noreply.github.com>
|
||||
Giampaolo Mancini <giampaolo@trampolineup.com>
|
||||
K. Heller <pestophagous@gmail.com> <pestophagous@users.noreply.github.com>
|
||||
Jacob Atzen <jacob@jacobatzen.dk> <jatzen@gmail.com>
|
||||
Jeff Nickoloff <jeff.nickoloff@gmail.com> <jeff@allingeek.com>
|
||||
Jérôme Petazzoni <jerome.petazzoni@docker.com> <jerome.petazzoni@dotcloud.com>
|
||||
John Harris <john@johnharris.io>
|
||||
John Howard (VM) <John.Howard@microsoft.com> <jhowardmsft@users.noreply.github.com>
|
||||
John Howard (VM) <John.Howard@microsoft.com>
|
||||
John Howard (VM) <John.Howard@microsoft.com> <john.howard@microsoft.com>
|
||||
John Howard (VM) <John.Howard@microsoft.com> <jhoward@microsoft.com>
|
||||
John Howard (VM) <John.Howard@microsoft.com> <jhoward@ntdev.microsoft.com>
|
||||
Kevin Feyrer <kevin.feyrer@btinternet.com> <kevinfeyrer@users.noreply.github.com>
|
||||
Liao Qingwei <liaoqingwei@huawei.com>
|
||||
Luke Marsden <me@lukemarsden.net> <luke@digital-crocus.com>
|
||||
Madhu Venugopal <madhu@socketplane.io> <madhu@docker.com>
|
||||
Mageee <fangpuyi@foxmail.com> <21521230.zju.edu.cn>
|
||||
Mansi Nahar <mmn4185@rit.edu> <mansinahar@users.noreply.github.com>
|
||||
Mansi Nahar <mmn4185@rit.edu> <mansi.nahar@macbookpro-mansinahar.local>
|
||||
Mary Anthony <mary.anthony@docker.com> <mary@docker.com>
|
||||
Mary Anthony <mary.anthony@docker.com> moxiegirl <mary@docker.com>
|
||||
Mary Anthony <mary.anthony@docker.com> <moxieandmore@gmail.com>
|
||||
mattyw <mattyw@me.com> <gh@mattyw.net>
|
||||
Michael Spetsiotis <michael_spets@hotmail.com>
|
||||
Nik Nyby <nikolas@gnu.org> <nnyby@columbia.edu>
|
||||
Peter Jaffe <pjaffe@nevo.com>
|
||||
resouer <resouer@163.com> <resouer@gmail.com>
|
||||
AJ Bowen <aj@gandi.net> soulshake <amy@gandi.net>
|
||||
AJ Bowen <aj@gandi.net> soulshake <aj@gandi.net>
|
||||
@@ -234,11 +250,13 @@ Stephen Day <stephen.day@docker.com>
|
||||
Toli Kuznets <toli@docker.com>
|
||||
Tristan Carel <tristan@cogniteev.com>
|
||||
<tristan@cogniteev.com> <tristan.carel@gmail.com>
|
||||
Vincent Demeester <vincent@sbr.pm>
|
||||
<vincent@sbr.pm> <vincent+github@demeester.fr>
|
||||
<vincent.demeester@docker.com> <vincent@sbr.pm>
|
||||
<vincent.demeester@docker.com> <vincent+github@demeester.fr>
|
||||
Vincent Demeester <vincent.demeester@docker.com> <vincent@demeester.fr>
|
||||
Vishnu Kannan <vishnuk@google.com>
|
||||
xlgao-zju <xlgao@zju.edu.cn> xlgao <xlgao@zju.edu.cn>
|
||||
yuchangchun <yuchangchun1@huawei.com> y00277921 <yuchangchun1@huawei.com>
|
||||
Yu Changchun <yuchangchun1@huawei.com> y00277921 <yuchangchun1@huawei.com>
|
||||
Yu Changchun <yuchangchun1@huawei.com>
|
||||
<zij@case.edu> <zjaffee@us.ibm.com>
|
||||
<anujbahuguna.dev@gmail.com> <abahuguna@fiberlink.com>
|
||||
<eungjun.yi@navercorp.com> <semtlenori@gmail.com>
|
||||
@@ -284,38 +302,59 @@ Bingshen Wang <bingshen.wbs@alibaba-inc.com>
|
||||
Chen Chuanliang <chen.chuanliang@zte.com.cn>
|
||||
Chen Mingjie <chenmingjie0828@163.com>
|
||||
CUI Wei <ghostplant@qq.com> cuiwei13 <cuiwei13@pku.edu.cn>
|
||||
Daniel Grunwell <mwgrunny@gmail.com>
|
||||
Daniel J Walsh <dwalsh@redhat.com>
|
||||
Dattatraya Kumbhar <dattatraya.kumbhar@gslab.com>
|
||||
David Sheets <dsheets@docker.com> <sheets@alum.mit.edu>
|
||||
Diego Siqueira <dieg0@live.com>
|
||||
Eric G. Noriega <enoriega@vizuri.com> <egnoriega@users.noreply.github.com>
|
||||
Evelyn Xu <evelynhsu21@gmail.com>
|
||||
Felix Ruess <felix.ruess@gmail.com> <felix.ruess@roboception.de>
|
||||
Gabriel Nicolas Avellaneda <avellaneda.gabriel@gmail.com>
|
||||
Gang Qiao <qiaohai8866@gmail.com> <1373319223@qq.com>
|
||||
Gustav Sinder <gustav.sinder@gmail.com>
|
||||
Harshal Patil <harshal.patil@in.ibm.com> <harche@users.noreply.github.com>
|
||||
Helen Xie <chenjg@harmonycloud.cn>
|
||||
Hyzhou Zhy <hyzhou.zhy@alibaba-inc.com> <1187766782@qq.com>
|
||||
Hyzhou Zhy <hyzhou.zhy@alibaba-inc.com>
|
||||
Jacob Tomlinson <jacob@tom.linson.uk> <jacobtomlinson@users.noreply.github.com>
|
||||
Jiuyue Ma <majiuyue@huawei.com>
|
||||
Jose Diaz-Gonzalez <jose@seatgeek.com> <josegonzalez@users.noreply.github.com>
|
||||
Josh Eveleth <joshe@opendns.com> <jeveleth@users.noreply.github.com>
|
||||
Josh Wilson <josh.wilson@fivestars.com> <jcwilson@users.noreply.github.com>
|
||||
Jim Galasyn <jim.galasyn@docker.com>
|
||||
Kevin Kern <kaiwentan@harmonycloud.cn>
|
||||
Konstantin Gribov <grossws@gmail.com>
|
||||
Kunal Kushwaha <kushwaha_kunal_v7@lab.ntt.co.jp> <kunal.kushwaha@gmail.com>
|
||||
Lajos Papp <lajos.papp@sequenceiq.com> <lalyos@yahoo.com>
|
||||
Lyn <energylyn@zju.edu.cn>
|
||||
Markan Patel <mpatel678@gmail.com>
|
||||
Michael Käufl <docker@c.michael-kaeufl.de> <michael-k@users.noreply.github.com>
|
||||
Michal Minář <miminar@redhat.com>
|
||||
Michael Hudson-Doyle <michael.hudson@canonical.com> <michael.hudson@linaro.org>
|
||||
Mike Casas <mkcsas0@gmail.com> <mikecasas@users.noreply.github.com>
|
||||
Milind Chawre <milindchawre@gmail.com>
|
||||
Ma Müller <mueller-ma@users.noreply.github.com>
|
||||
Philipp Gillé <philipp.gille@gmail.com> <philippgille@users.noreply.github.com>
|
||||
Roberto Muñoz Fernández <robertomf@gmail.com> <roberto.munoz.fernandez.contractor@bbva.com>
|
||||
Sean Lee <seanlee@tw.ibm.com> <scaleoutsean@users.noreply.github.com>
|
||||
Shukui Yang <yangshukui@huawei.com>
|
||||
Stefan S. <tronicum@user.github.com>
|
||||
Steve Desmond <steve@vtsv.ca> <stevedesmond-ca@users.noreply.github.com>
|
||||
Sun Gengze <690388648@qq.com>
|
||||
Tim Zju <21651152@zju.edu.cn>
|
||||
Tõnis Tiigi <tonistiigi@gmail.com>
|
||||
Wayne Song <wsong@docker.com> <wsong@users.noreply.github.com>
|
||||
Wang Jie <wangjie5@chinaskycloud.com>
|
||||
Wang Ping <present.wp@icloud.com>
|
||||
Wang Yuexiao <wang.yuexiao@zte.com.cn>
|
||||
Wewang Xiaorenfine <wang.xiaoren@zte.com.cn>
|
||||
Wei Wu <wuwei4455@gmail.com> cizixs <cizixs@163.com>
|
||||
Ying Li <ying.li@docker.com> <cyli@twistedmatrix.com>
|
||||
Yong Tang <yong.tang.github@outlook.com> <yongtang@users.noreply.github.com>
|
||||
Yu Chengxia <yuchengxia@huawei.com>
|
||||
Yu Peng <yu.peng36@zte.com.cn>
|
||||
Yu Peng <yu.peng36@zte.com.cn> <yupeng36@zte.com.cn>
|
||||
Yao Zaiyong <yaozaiyong@hotmail.com>
|
||||
Zhenkun Bi <bi.zhenkun@zte.com.cn>
|
||||
Zhu Kunjia <zhu.kunjia@zte.com.cn>
|
||||
|
||||
108
AUTHORS
108
AUTHORS
@@ -5,6 +5,7 @@ Aanand Prasad <aanand.prasad@gmail.com>
|
||||
Aaron Davidson <aaron@databricks.com>
|
||||
Aaron Feng <aaron.feng@gmail.com>
|
||||
Aaron Huslage <huslage@gmail.com>
|
||||
Aaron L. Xu <liker.xu@foxmail.com>
|
||||
Aaron Lehmann <aaron.lehmann@docker.com>
|
||||
Aaron Welch <welch@packet.net>
|
||||
Aaron.L.Xu <likexu@harmonycloud.cn>
|
||||
@@ -37,6 +38,7 @@ Aidan Hobson Sayers <aidanhs@cantab.net>
|
||||
AJ Bowen <aj@gandi.net>
|
||||
Ajey Charantimath <ajey.charantimath@gmail.com>
|
||||
ajneu <ajneu@users.noreply.github.com>
|
||||
Akihiro Matsushima <amatsusbit@gmail.com>
|
||||
Akihiro Suda <suda.akihiro@lab.ntt.co.jp>
|
||||
Akira Koyasu <mail@akirakoyasu.net>
|
||||
Akshay Karle <akshay.a.karle@gmail.com>
|
||||
@@ -66,6 +68,7 @@ Alexander Larsson <alexl@redhat.com>
|
||||
Alexander Morozov <lk4d4@docker.com>
|
||||
Alexander Shopov <ash@kambanaria.org>
|
||||
Alexandre Beslic <alexandre.beslic@gmail.com>
|
||||
Alexandre Garnier <zigarn@gmail.com>
|
||||
Alexandre González <agonzalezro@gmail.com>
|
||||
Alexandru Sfirlogea <alexandru.sfirlogea@gmail.com>
|
||||
Alexey Guskov <lexag@mail.ru>
|
||||
@@ -80,6 +83,7 @@ Allen Madsen <blatyo@gmail.com>
|
||||
Allen Sun <allen.sun@daocloud.io>
|
||||
almoehi <almoehi@users.noreply.github.com>
|
||||
Alvaro Saurin <alvaro.saurin@gmail.com>
|
||||
Alvin Deng <alvin.q.deng@utexas.edu>
|
||||
Alvin Richards <alvin.richards@docker.com>
|
||||
amangoel <amangoel@gmail.com>
|
||||
Amen Belayneh <amenbelayneh@gmail.com>
|
||||
@@ -180,6 +184,7 @@ Ben Severson <BenSeverson@users.noreply.github.com>
|
||||
Ben Toews <mastahyeti@gmail.com>
|
||||
Ben Wiklund <ben@daisyowl.com>
|
||||
Benjamin Atkin <ben@benatkin.com>
|
||||
Benjamin Boudreau <boudreau.benjamin@gmail.com>
|
||||
Benoit Chesneau <bchesneau@gmail.com>
|
||||
Bernerd Schaefer <bj.schaefer@gmail.com>
|
||||
Bert Goethals <bert@bertg.be>
|
||||
@@ -193,6 +198,7 @@ Bingshen Wang <bingshen.wbs@alibaba-inc.com>
|
||||
Blake Geno <blakegeno@gmail.com>
|
||||
Boaz Shuster <ripcurld.github@gmail.com>
|
||||
bobby abbott <ttobbaybbob@gmail.com>
|
||||
Boris Pruessmann <boris@pruessmann.org>
|
||||
Boshi Lian <farmer1992@gmail.com>
|
||||
boucher <rboucher@gmail.com>
|
||||
Bouke Haarsma <bouke@webatoom.nl>
|
||||
@@ -278,6 +284,7 @@ Chris Armstrong <chris@opdemand.com>
|
||||
Chris Dituri <csdituri@gmail.com>
|
||||
Chris Fordham <chris@fordham-nagy.id.au>
|
||||
Chris Gavin <chris@chrisgavin.me>
|
||||
Chris Gibson <chris@chrisg.io>
|
||||
Chris Khoo <chris.khoo@gmail.com>
|
||||
Chris McKinnel <chrismckinnel@gmail.com>
|
||||
Chris Seto <chriskseto@gmail.com>
|
||||
@@ -308,11 +315,13 @@ Clayton Coleman <ccoleman@redhat.com>
|
||||
Clinton Kitson <clintonskitson@gmail.com>
|
||||
Coenraad Loubser <coenraad@wish.org.za>
|
||||
Colin Dunklau <colin.dunklau@gmail.com>
|
||||
Colin Hebert <hebert.colin@gmail.com>
|
||||
Colin Rice <colin@daedrum.net>
|
||||
Colin Walters <walters@verbum.org>
|
||||
Collin Guarino <collin.guarino@gmail.com>
|
||||
Colm Hally <colmhally@gmail.com>
|
||||
companycy <companycy@gmail.com>
|
||||
Corey Farrell <git@cfware.com>
|
||||
Cory Forsyth <cory.forsyth@gmail.com>
|
||||
cressie176 <github@stephen-cresswell.net>
|
||||
CrimsonGlory <CrimsonGlory@users.noreply.github.com>
|
||||
@@ -340,14 +349,15 @@ Dan Keder <dan.keder@gmail.com>
|
||||
Dan Levy <dan@danlevy.net>
|
||||
Dan McPherson <dmcphers@redhat.com>
|
||||
Dan Stine <sw@stinemail.com>
|
||||
Dan Walsh <dwalsh@redhat.com>
|
||||
Dan Williams <me@deedubs.com>
|
||||
Daniel Antlinger <d.antlinger@gmx.at>
|
||||
Daniel Exner <dex@dragonslave.de>
|
||||
Daniel Farrell <dfarrell@redhat.com>
|
||||
Daniel Garcia <daniel@danielgarcia.info>
|
||||
Daniel Gasienica <daniel@gasienica.ch>
|
||||
Daniel Grunwell <mwgrunny@gmail.com>
|
||||
Daniel Hiltgen <daniel.hiltgen@docker.com>
|
||||
Daniel J Walsh <dwalsh@redhat.com>
|
||||
Daniel Menet <membership@sontags.ch>
|
||||
Daniel Mizyrycki <daniel.mizyrycki@dotcloud.com>
|
||||
Daniel Nephin <dnephin@docker.com>
|
||||
@@ -386,10 +396,11 @@ David M. Karr <davidmichaelkarr@gmail.com>
|
||||
David Mackey <tdmackey@booleanhaiku.com>
|
||||
David Mat <david@davidmat.com>
|
||||
David Mcanulty <github@hellspark.com>
|
||||
David McKay <david@rawkode.com>
|
||||
David Pelaez <pelaez89@gmail.com>
|
||||
David R. Jenni <david.r.jenni@gmail.com>
|
||||
David Röthlisberger <david@rothlis.net>
|
||||
David Sheets <sheets@alum.mit.edu>
|
||||
David Sheets <dsheets@docker.com>
|
||||
David Sissitka <me@dsissitka.com>
|
||||
David Trott <github@davidtrott.com>
|
||||
David Williamson <davidwilliamson@users.noreply.github.com>
|
||||
@@ -403,6 +414,7 @@ decadent <decadent@users.noreply.github.com>
|
||||
deed02392 <georgehafiz@gmail.com>
|
||||
Deng Guangxing <dengguangxing@huawei.com>
|
||||
Deni Bertovic <deni@kset.org>
|
||||
Denis Defreyne <denis@soundcloud.com>
|
||||
Denis Gladkikh <denis@gladkikh.email>
|
||||
Denis Ollier <larchunix@users.noreply.github.com>
|
||||
Dennis Chen <barracks510@gmail.com>
|
||||
@@ -466,6 +478,7 @@ Eivin Giske Skaaren <eivinsn@axis.com>
|
||||
Eivind Uggedal <eivind@uggedal.com>
|
||||
Elan Ruusamäe <glen@delfi.ee>
|
||||
Elena Morozova <lelenanam@gmail.com>
|
||||
Elias Faxö <elias.faxo@tre.se>
|
||||
Elias Probst <mail@eliasprobst.eu>
|
||||
Elijah Zupancic <elijah@zupancic.name>
|
||||
eluck <mail@eluck.me>
|
||||
@@ -479,6 +492,7 @@ Eohyung Lee <liquidnuker@gmail.com>
|
||||
epeterso <epeterson@breakpoint-labs.com>
|
||||
Eric Barch <barch@tomesoftware.com>
|
||||
Eric Curtin <ericcurtin17@gmail.com>
|
||||
Eric G. Noriega <enoriega@vizuri.com>
|
||||
Eric Hanchrow <ehanchrow@ine.com>
|
||||
Eric Lee <thenorthsecedes@gmail.com>
|
||||
Eric Myhre <hash@exultant.us>
|
||||
@@ -486,9 +500,9 @@ Eric Paris <eparis@redhat.com>
|
||||
Eric Rafaloff <erafaloff@gmail.com>
|
||||
Eric Rosenberg <ehaydenr@users.noreply.github.com>
|
||||
Eric Sage <eric.david.sage@gmail.com>
|
||||
Erica Windisch <erica@windisch.us>
|
||||
Eric Yang <windfarer@gmail.com>
|
||||
Eric-Olivier Lamey <eo@lamey.me>
|
||||
Erica Windisch <erica@windisch.us>
|
||||
Erik Bray <erik.m.bray@gmail.com>
|
||||
Erik Dubbelboer <erik@dubbelboer.com>
|
||||
Erik Hollensbe <github@hollensbe.org>
|
||||
@@ -516,6 +530,7 @@ Ewa Czechowska <ewa@ai-traders.com>
|
||||
Eystein Måløy Stenberg <eystein.maloy.stenberg@cfengine.com>
|
||||
ezbercih <cem.ezberci@gmail.com>
|
||||
Ezra Silvera <ezra@il.ibm.com>
|
||||
Fabian Lauer <kontakt@softwareschmiede-saar.de>
|
||||
Fabiano Rosas <farosas@br.ibm.com>
|
||||
Fabio Falci <fabiofalci@gmail.com>
|
||||
Fabio Rapposelli <fabio@vmware.com>
|
||||
@@ -535,6 +550,7 @@ Felix Hupfeld <quofelix@users.noreply.github.com>
|
||||
Felix Rabe <felix@rabe.io>
|
||||
Felix Ruess <felix.ruess@gmail.com>
|
||||
Felix Schindler <fschindler@weluse.de>
|
||||
Fengtu Wang <wangfengtu@huawei.com>
|
||||
Ferenc Szabo <pragmaticfrank@gmail.com>
|
||||
Fernando <fermayo@gmail.com>
|
||||
Fero Volar <alian@alian.info>
|
||||
@@ -543,6 +559,7 @@ Filipe Brandenburger <filbranden@google.com>
|
||||
Filipe Oliveira <contato@fmoliveira.com.br>
|
||||
fl0yd <fl0yd@me.com>
|
||||
Flavio Castelli <fcastelli@suse.com>
|
||||
Flavio Crisciani <flavio.crisciani@docker.com>
|
||||
FLGMwt <ryan.stelly@live.com>
|
||||
Florian <FWirtz@users.noreply.github.com>
|
||||
Florian Klein <florian.klein@free.fr>
|
||||
@@ -551,6 +568,7 @@ Florian Weingarten <flo@hackvalue.de>
|
||||
Florin Asavoaie <florin.asavoaie@gmail.com>
|
||||
fonglh <fonglh@gmail.com>
|
||||
fortinux <fortinux@users.noreply.github.com>
|
||||
Foysal Iqbal <foysal.iqbal.fb@gmail.com>
|
||||
Francesc Campoy <campoy@google.com>
|
||||
Francis Chuang <francis.chuang@boostport.com>
|
||||
Francisco Carriedo <fcarriedo@gmail.com>
|
||||
@@ -578,9 +596,11 @@ Galen Sampson <galen.sampson@gmail.com>
|
||||
Gang Qiao <qiaohai8866@gmail.com>
|
||||
Gareth Rushgrove <gareth@morethanseven.net>
|
||||
Garrett Barboza <garrett@garrettbarboza.com>
|
||||
Gary Schaetz <gary@schaetzkc.com>
|
||||
Gaurav <gaurav.gosec@gmail.com>
|
||||
gautam, prasanna <prasannagautam@gmail.com>
|
||||
Gaël PORTAY <gael.portay@savoirfairelinux.com>
|
||||
Genki Takiuchi <genki@s21g.com>
|
||||
GennadySpb <lipenkov@gmail.com>
|
||||
Geoffrey Bachelet <grosfrais@gmail.com>
|
||||
George MacRorie <gmacr31@gmail.com>
|
||||
@@ -590,6 +610,7 @@ Gereon Frey <gereon.frey@dynport.de>
|
||||
German DZ <germ@ndz.com.ar>
|
||||
Gert van Valkenhoef <g.h.m.van.valkenhoef@rug.nl>
|
||||
Gerwim <gerwim@gmail.com>
|
||||
Giampaolo Mancini <giampaolo@trampolineup.com>
|
||||
Gianluca Borello <g.borello@gmail.com>
|
||||
Gildas Cuisinier <gildas.cuisinier@gcuisinier.net>
|
||||
gissehel <public-devgit-dantus@gissehel.org>
|
||||
@@ -605,10 +626,9 @@ Govinda Fichtner <govinda.fichtner@googlemail.com>
|
||||
Grant Reaber <grant.reaber@gmail.com>
|
||||
Graydon Hoare <graydon@pobox.com>
|
||||
Greg Fausak <greg@tacodata.com>
|
||||
Greg Pflaum <gpflaum@users.noreply.github.com>
|
||||
Greg Thornton <xdissent@me.com>
|
||||
grossws <grossws@gmail.com>
|
||||
grunny <mwgrunny@gmail.com>
|
||||
gs11 <gustav.sinder@gmail.com>
|
||||
Grzegorz Jaśkiewicz <gj.jaskiewicz@gmail.com>
|
||||
Guilhem Lettron <guilhem+github@lettron.fr>
|
||||
Guilherme Salgado <gsalgado@gmail.com>
|
||||
Guillaume Dufour <gdufour.prestataire@voyages-sncf.com>
|
||||
@@ -616,6 +636,7 @@ Guillaume J. Charmes <guillaume.charmes@docker.com>
|
||||
guoxiuyan <guoxiuyan@huawei.com>
|
||||
Gurjeet Singh <gurjeet@singh.im>
|
||||
Guruprasad <lgp171188@gmail.com>
|
||||
Gustav Sinder <gustav.sinder@gmail.com>
|
||||
gwx296173 <gaojing3@huawei.com>
|
||||
Günter Zöchbauer <guenter@gzoechbauer.com>
|
||||
Hans Kristian Flaatten <hans@starefossen.com>
|
||||
@@ -626,6 +647,7 @@ Harald Albers <github@albersweb.de>
|
||||
Harley Laue <losinggeneration@gmail.com>
|
||||
Harold Cooper <hrldcpr@gmail.com>
|
||||
Harry Zhang <harryz@hyper.sh>
|
||||
Harshal Patil <harshal.patil@in.ibm.com>
|
||||
Harshal Patil <harshalp@linux.vnet.ibm.com>
|
||||
He Simei <hesimei@zju.edu.cn>
|
||||
He Xin <he_xinworld@126.com>
|
||||
@@ -649,7 +671,7 @@ huqun <huqun@zju.edu.cn>
|
||||
Huu Nguyen <huu@prismskylabs.com>
|
||||
hyeongkyu.lee <hyeongkyu.lee@navercorp.com>
|
||||
hyp3rdino <markus.kortlang@lhsystems.com>
|
||||
Hyzhou <1187766782@qq.com>
|
||||
Hyzhou Zhy <hyzhou.zhy@alibaba-inc.com>
|
||||
Ian Babrou <ibobrik@gmail.com>
|
||||
Ian Bishop <ianbishop@pace7.com>
|
||||
Ian Bull <irbull@gmail.com>
|
||||
@@ -657,9 +679,11 @@ Ian Calvert <ianjcalvert@gmail.com>
|
||||
Ian Campbell <ian.campbell@docker.com>
|
||||
Ian Lee <IanLee1521@gmail.com>
|
||||
Ian Main <imain@redhat.com>
|
||||
Ian Philpot <ian.philpot@microsoft.com>
|
||||
Ian Truslove <ian.truslove@gmail.com>
|
||||
Iavael <iavaelooeyt@gmail.com>
|
||||
Icaro Seara <icaro.seara@gmail.com>
|
||||
Ignacio Capurro <icapurrofagian@gmail.com>
|
||||
Igor Dolzhikov <bluesriverz@gmail.com>
|
||||
Iliana Weller <iweller@amazon.com>
|
||||
Ilkka Laukkanen <ilkka@ilkka.io>
|
||||
@@ -675,6 +699,7 @@ Isao Jonas <isao.jonas@gmail.com>
|
||||
Ivan Babrou <ibobrik@gmail.com>
|
||||
Ivan Fraixedes <ifcdev@gmail.com>
|
||||
Ivan Grcic <igrcic@gmail.com>
|
||||
Ivan Markin <twim@riseup.net>
|
||||
J Bruni <joaohbruni@yahoo.com.br>
|
||||
J. Nunn <jbnunn@gmail.com>
|
||||
Jack Danger Canty <jackdanger@squareup.com>
|
||||
@@ -694,6 +719,7 @@ James Kyburz <james.kyburz@gmail.com>
|
||||
James Kyle <james@jameskyle.org>
|
||||
James Lal <james@lightsofapollo.com>
|
||||
James Mills <prologic@shortcircuit.net.au>
|
||||
James Nesbitt <james.nesbitt@wunderkraut.com>
|
||||
James Nugent <james@jen20.com>
|
||||
James Turnbull <james@lovedthanlost.net>
|
||||
Jamie Hannaford <jamie.hannaford@rackspace.com>
|
||||
@@ -747,10 +773,12 @@ Jeffrey Bolle <jeffreybolle@gmail.com>
|
||||
Jeffrey Morgan <jmorganca@gmail.com>
|
||||
Jeffrey van Gogh <jvg@google.com>
|
||||
Jenny Gebske <jennifer@gebske.de>
|
||||
Jeremy Chambers <jeremy@thehipbot.com>
|
||||
Jeremy Grosser <jeremy@synack.me>
|
||||
Jeremy Price <jprice.rhit@gmail.com>
|
||||
Jeremy Qian <vanpire110@163.com>
|
||||
Jeremy Unruh <jeremybunruh@gmail.com>
|
||||
Jeremy Yallop <yallop@docker.com>
|
||||
Jeroen Jacobs <github@jeroenj.be>
|
||||
Jesse Dearing <jesse.dearing@gmail.com>
|
||||
Jesse Dubay <jesse@thefortytwo.net>
|
||||
@@ -764,10 +792,12 @@ jianbosun <wonderflow.sun@gmail.com>
|
||||
Jie Luo <luo612@zju.edu.cn>
|
||||
Jilles Oldenbeuving <ojilles@gmail.com>
|
||||
Jim Alateras <jima@comware.com.au>
|
||||
Jim Galasyn <jim.galasyn@docker.com>
|
||||
Jim Minter <jminter@redhat.com>
|
||||
Jim Perrin <jperrin@centos.org>
|
||||
Jimmy Cuadra <jimmy@jimmycuadra.com>
|
||||
Jimmy Puckett <jimmy.puckett@spinen.com>
|
||||
Jimmy Song <rootsongjc@gmail.com>
|
||||
jimmyxian <jimmyxian2004@yahoo.com.cn>
|
||||
Jinsoo Park <cellpjs@gmail.com>
|
||||
Jiri Popelka <jpopelka@redhat.com>
|
||||
@@ -797,15 +827,19 @@ John Costa <john.costa@gmail.com>
|
||||
John Feminella <jxf@jxf.me>
|
||||
John Gardiner Myers <jgmyers@proofpoint.com>
|
||||
John Gossman <johngos@microsoft.com>
|
||||
John Harris <john@johnharris.io>
|
||||
John Howard (VM) <John.Howard@microsoft.com>
|
||||
John Laswell <john.n.laswell@gmail.com>
|
||||
John Maguire <jmaguire@duosecurity.com>
|
||||
John Mulhausen <john@docker.com>
|
||||
John OBrien III <jobrieniii@yahoo.com>
|
||||
John Starks <jostarks@microsoft.com>
|
||||
John Stephens <johnstep@docker.com>
|
||||
John Tims <john.k.tims@gmail.com>
|
||||
John V. Martinez <jvmatl@gmail.com>
|
||||
John Warwick <jwarwick@gmail.com>
|
||||
John Willis <john.willis@docker.com>
|
||||
johnharris85 <john@johnharris.io>
|
||||
Jon Johnson <jonjohnson@google.com>
|
||||
Jon Wedaman <jweede@gmail.com>
|
||||
Jonas Pfenniger <jonas@pfenniger.name>
|
||||
Jonathan A. Sternberg <jonathansternberg@gmail.com>
|
||||
@@ -820,6 +854,7 @@ Jonathan Pares <jonathanpa@users.noreply.github.com>
|
||||
Jonathan Rudenberg <jonathan@titanous.com>
|
||||
Jonathan Stoppani <jonathan.stoppani@divio.com>
|
||||
Jonh Wendell <jonh.wendell@redhat.com>
|
||||
Joni Sar <yoni@cocycles.com>
|
||||
Joost Cassee <joost@cassee.net>
|
||||
Jordan <jjn2009@users.noreply.github.com>
|
||||
Jordan Arentsen <blissdev@gmail.com>
|
||||
@@ -829,6 +864,7 @@ Jose Diaz-Gonzalez <jose@seatgeek.com>
|
||||
Joseph Anthony Pasquale Holsten <joseph@josephholsten.com>
|
||||
Joseph Hager <ajhager@gmail.com>
|
||||
Joseph Kern <jkern@semafour.net>
|
||||
Joseph Rothrock <rothrock@rothrock.org>
|
||||
Josh <jokajak@gmail.com>
|
||||
Josh Bodah <jb3689@yahoo.com>
|
||||
Josh Chorlton <jchorlton@gmail.com>
|
||||
@@ -847,6 +883,8 @@ Julien Barbier <write0@gmail.com>
|
||||
Julien Bisconti <veggiemonk@users.noreply.github.com>
|
||||
Julien Bordellier <julienbordellier@gmail.com>
|
||||
Julien Dubois <julien.dubois@gmail.com>
|
||||
Julien Kassar <github@kassisol.com>
|
||||
Julien Maitrehenry <julien.maitrehenry@me.com>
|
||||
Julien Pervillé <julien.perville@perfect-memory.com>
|
||||
Julio Montes <imc.coder@gmail.com>
|
||||
Jun-Ru Chang <jrjang@gmail.com>
|
||||
@@ -859,8 +897,9 @@ Justin Simonelis <justin.p.simonelis@gmail.com>
|
||||
Justin Terry <juterry@microsoft.com>
|
||||
Justyn Temme <justyntemme@gmail.com>
|
||||
Jyrki Puttonen <jyrkiput@gmail.com>
|
||||
Jérôme Petazzoni <jerome.petazzoni@dotcloud.com>
|
||||
Jérôme Petazzoni <jerome.petazzoni@docker.com>
|
||||
Jörg Thalheim <joerg@higgsboson.tk>
|
||||
K. Heller <pestophagous@gmail.com>
|
||||
Kai Blin <kai@samba.org>
|
||||
Kai Qiang Wu(Kennan) <wkq5325@gmail.com>
|
||||
Kamil Domański <kamil@domanski.co>
|
||||
@@ -872,6 +911,7 @@ Kareem Khazem <karkhaz@karkhaz.com>
|
||||
kargakis <kargakis@users.noreply.github.com>
|
||||
Karl Grzeszczak <karlgrz@gmail.com>
|
||||
Karol Duleba <mr.fuxi@gmail.com>
|
||||
Karthik Nayak <Karthik.188@gmail.com>
|
||||
Katie McLaughlin <katie@glasnt.com>
|
||||
Kato Kazuyoshi <kato.kazuyoshi@gmail.com>
|
||||
Katrina Owen <katrina.owen@gmail.com>
|
||||
@@ -892,6 +932,7 @@ Kent Johnson <kentoj@gmail.com>
|
||||
Kevin "qwazerty" Houdebert <kevin.houdebert@gmail.com>
|
||||
Kevin Burke <kev@inburke.com>
|
||||
Kevin Clark <kevin.clark@gmail.com>
|
||||
Kevin Feyrer <kevin.feyrer@btinternet.com>
|
||||
Kevin J. Lynagh <kevin@keminglabs.com>
|
||||
Kevin Jing Qiu <kevin@idempotent.ca>
|
||||
Kevin Kern <kaiwentan@harmonycloud.cn>
|
||||
@@ -915,6 +956,7 @@ knappe <tyler.knappe@gmail.com>
|
||||
Kohei Tsuruta <coheyxyz@gmail.com>
|
||||
Koichi Shiraishi <k@zchee.io>
|
||||
Konrad Kleine <konrad.wilhelm.kleine@gmail.com>
|
||||
Konstantin Gribov <grossws@gmail.com>
|
||||
Konstantin L <sw.double@gmail.com>
|
||||
Konstantin Pelykh <kpelykh@zettaset.com>
|
||||
Krasi Georgiev <krasi@vip-consult.solutions>
|
||||
@@ -961,7 +1003,7 @@ Liam Macgillavry <liam@kumina.nl>
|
||||
Liana Lo <liana.lixia@gmail.com>
|
||||
Liang Mingqiang <mqliang.zju@gmail.com>
|
||||
Liang-Chi Hsieh <viirya@gmail.com>
|
||||
liaoqingwei <liaoqingwei@huawei.com>
|
||||
Liao Qingwei <liaoqingwei@huawei.com>
|
||||
Lily Guo <lily.guo@docker.com>
|
||||
limsy <seongyeol37@gmail.com>
|
||||
Lin Lu <doraalin@163.com>
|
||||
@@ -994,24 +1036,24 @@ Luiz Svoboda <luizek@gmail.com>
|
||||
Lukas Waslowski <cr7pt0gr4ph7@gmail.com>
|
||||
lukaspustina <lukas.pustina@centerdevice.com>
|
||||
Lukasz Zajaczkowski <Lukasz.Zajaczkowski@ts.fujitsu.com>
|
||||
lukemarsden <luke@digital-crocus.com>
|
||||
Luke Marsden <me@lukemarsden.net>
|
||||
Lyn <energylyn@zju.edu.cn>
|
||||
Lynda O'Leary <lyndaoleary29@gmail.com>
|
||||
Lénaïc Huard <lhuard@amadeus.com>
|
||||
Ma Müller <mueller-ma@users.noreply.github.com>
|
||||
Ma Shimiao <mashimiao.fnst@cn.fujitsu.com>
|
||||
Mabin <bin.ma@huawei.com>
|
||||
Madhan Raj Mookkandy <MadhanRaj.Mookkandy@microsoft.com>
|
||||
Madhav Puri <madhav.puri@gmail.com>
|
||||
Madhu Venugopal <madhu@socketplane.io>
|
||||
Mageee <21521230.zju.edu.cn>
|
||||
Mageee <fangpuyi@foxmail.com>
|
||||
Mahesh Tiyyagura <tmahesh@gmail.com>
|
||||
malnick <malnick@gmail..com>
|
||||
Malte Janduda <mail@janduda.net>
|
||||
manchoz <giampaolo@trampolineup.com>
|
||||
Manfred Touron <m@42.am>
|
||||
Manfred Zabarauskas <manfredas@zabarauskas.com>
|
||||
Manjunath A Kumatagi <mkumatag@in.ibm.com>
|
||||
Mansi Nahar <mmn4185@rit.edu>
|
||||
mansinahar <mansinahar@users.noreply.github.com>
|
||||
Manuel Meurer <manuel@krautcomputing.com>
|
||||
Manuel Woelker <github@manuel.woelker.org>
|
||||
mapk0y <mapk0y@gmail.com>
|
||||
@@ -1037,6 +1079,7 @@ Mark McKinstry <mmckinst@umich.edu>
|
||||
Mark Milstein <mark@epiloque.com>
|
||||
Mark Parker <godefroi@users.noreply.github.com>
|
||||
Mark West <markewest@gmail.com>
|
||||
Markan Patel <mpatel678@gmail.com>
|
||||
Marko Mikulicic <mmikulicic@gmail.com>
|
||||
Marko Tibold <marko@tibold.nl>
|
||||
Markus Fix <lispmeister@gmail.com>
|
||||
@@ -1108,6 +1151,7 @@ Michael Käufl <docker@c.michael-kaeufl.de>
|
||||
Michael Neale <michael.neale@gmail.com>
|
||||
Michael Prokop <github@michael-prokop.at>
|
||||
Michael Scharf <github@scharf.gr>
|
||||
Michael Spetsiotis <michael_spets@hotmail.com>
|
||||
Michael Stapelberg <michael+gh@stapelberg.de>
|
||||
Michael Steinert <mike.steinert@gmail.com>
|
||||
Michael Thies <michaelthies78@gmail.com>
|
||||
@@ -1126,6 +1170,7 @@ Miguel Morales <mimoralea@gmail.com>
|
||||
Mihai Borobocea <MihaiBorob@gmail.com>
|
||||
Mihuleacc Sergiu <mihuleac.sergiu@gmail.com>
|
||||
Mike Brown <brownwm@us.ibm.com>
|
||||
Mike Casas <mkcsas0@gmail.com>
|
||||
Mike Chelen <michael.chelen@gmail.com>
|
||||
Mike Danese <mikedanese@google.com>
|
||||
Mike Dillon <mike@embody.org>
|
||||
@@ -1202,8 +1247,10 @@ Nicolas Goy <kuon@goyman.com>
|
||||
Nicolas Kaiser <nikai@nikai.net>
|
||||
Nicolás Hock Isaza <nhocki@gmail.com>
|
||||
Nigel Poulton <nigelpoulton@hotmail.com>
|
||||
Nik Nyby <nikolas@gnu.org>
|
||||
Nikhil Chawla <chawlanikhil24@gmail.com>
|
||||
NikolaMandic <mn080202@gmail.com>
|
||||
nikolas <nnyby@columbia.edu>
|
||||
Nikolas Garofil <nikolas.garofil@uantwerpen.be>
|
||||
Nikolay Milovanov <nmil@itransformers.net>
|
||||
Nirmal Mehta <nirmalkmehta@gmail.com>
|
||||
Nishant Totla <nishanttotla@gmail.com>
|
||||
@@ -1269,8 +1316,8 @@ Peeyush Gupta <gpeeyush@linux.vnet.ibm.com>
|
||||
Peggy Li <peggyli.224@gmail.com>
|
||||
Pei Su <sillyousu@gmail.com>
|
||||
Penghan Wang <ph.wang@daocloud.io>
|
||||
Per Weijnitz <per.weijnitz@gmail.com>
|
||||
perhapszzy@sina.com <perhapszzy@sina.com>
|
||||
pestophagous <pestophagous@users.noreply.github.com>
|
||||
Peter Bourgon <peter@bourgon.org>
|
||||
Peter Braden <peterbraden@peterbraden.co.uk>
|
||||
Peter Choi <reikani@Peters-MacBook-Pro.local>
|
||||
@@ -1278,6 +1325,7 @@ Peter Dave Hello <PeterDaveHello@users.noreply.github.com>
|
||||
Peter Edge <peter.edge@gmail.com>
|
||||
Peter Ericson <pdericson@gmail.com>
|
||||
Peter Esbensen <pkesbensen@gmail.com>
|
||||
Peter Jaffe <pjaffe@nevo.com>
|
||||
Peter Malmgren <ptmalmgren@gmail.com>
|
||||
Peter Salvatore <peter@psftw.com>
|
||||
Peter Volpe <petervo@redhat.com>
|
||||
@@ -1287,9 +1335,11 @@ Phil <underscorephil@gmail.com>
|
||||
Phil Estes <estesp@linux.vnet.ibm.com>
|
||||
Phil Spitler <pspitler@gmail.com>
|
||||
Philip Monroe <phil@philmonroe.com>
|
||||
Philipp Gillé <philipp.gille@gmail.com>
|
||||
Philipp Wahala <philipp.wahala@gmail.com>
|
||||
Philipp Weissensteiner <mail@philippweissensteiner.com>
|
||||
Phillip Alexander <git@phillipalexander.io>
|
||||
phineas <phin@phineas.io>
|
||||
pidster <pid@pidster.com>
|
||||
Piergiuliano Bossi <pgbossi@gmail.com>
|
||||
Pierre <py@poujade.org>
|
||||
@@ -1303,8 +1353,10 @@ Porjo <porjo38@yahoo.com.au>
|
||||
Poul Kjeldager Sørensen <pks@s-innovations.net>
|
||||
Pradeep Chhetri <pradeep@indix.com>
|
||||
Prasanna Gautam <prasannagautam@gmail.com>
|
||||
Pratik Karki <prertik@outlook.com>
|
||||
Prayag Verma <prayag.verma@gmail.com>
|
||||
Przemek Hejman <przemyslaw.hejman@gmail.com>
|
||||
Pure White <daniel48@126.com>
|
||||
pysqz <randomq@126.com>
|
||||
qhuang <h.huangqiang@huawei.com>
|
||||
Qiang Huang <h.huangqiang@huawei.com>
|
||||
@@ -1332,10 +1384,12 @@ Recursive Madman <recursive.madman@gmx.de>
|
||||
Reficul <xuzhenglun@gmail.com>
|
||||
Regan McCooey <rmccooey27@aol.com>
|
||||
Remi Rampin <remirampin@gmail.com>
|
||||
Remy Suen <remy.suen@gmail.com>
|
||||
Renato Riccieri Santos Zannon <renato.riccieri@gmail.com>
|
||||
resouer <resouer@163.com>
|
||||
rgstephens <greg@udon.org>
|
||||
Rhys Hiltner <rhys@twitch.tv>
|
||||
Ricardo N Feliciano <FelicianoTech@gmail.com>
|
||||
Rich Moyse <rich@moyse.us>
|
||||
Rich Seymour <rseymour@gmail.com>
|
||||
Richard <richard.scothern@gmail.com>
|
||||
@@ -1400,6 +1454,7 @@ Ryan Aslett <github@mixologic.com>
|
||||
Ryan Belgrave <rmb1993@gmail.com>
|
||||
Ryan Detzel <ryan.detzel@gmail.com>
|
||||
Ryan Fowler <rwfowler@gmail.com>
|
||||
Ryan Liu <ryanlyy@me.com>
|
||||
Ryan McLaughlin <rmclaughlin@insidesales.com>
|
||||
Ryan O'Donnell <odonnellryanc@gmail.com>
|
||||
Ryan Seto <ryanseto@yak.net>
|
||||
@@ -1407,6 +1462,7 @@ Ryan Thomas <rthomas@atlassian.com>
|
||||
Ryan Trauntvein <rtrauntvein@novacoast.com>
|
||||
Ryan Wallner <ryan.wallner@clusterhq.com>
|
||||
Ryan Zhang <ryan.zhang@docker.com>
|
||||
ryancooper7 <ryan.cooper7@gmail.com>
|
||||
RyanDeng <sheldon.d1018@gmail.com>
|
||||
Rémy Greinhofer <remy.greinhofer@livelovely.com>
|
||||
s. rannou <mxs@sbrk.org>
|
||||
@@ -1439,7 +1495,6 @@ Satnam Singh <satnam@raintown.org>
|
||||
satoru <satorulogic@gmail.com>
|
||||
Satoshi Amemiya <satoshi_amemiya@voyagegroup.com>
|
||||
Satoshi Tagomori <tagomoris@gmail.com>
|
||||
scaleoutsean <scaleoutsean@users.noreply.github.com>
|
||||
Scott Bessler <scottbessler@gmail.com>
|
||||
Scott Collier <emailscottcollier@gmail.com>
|
||||
Scott Johnston <scott@docker.com>
|
||||
@@ -1448,6 +1503,7 @@ Scott Walls <sawalls@umich.edu>
|
||||
sdreyesg <sdreyesg@gmail.com>
|
||||
Sean Christopherson <sean.j.christopherson@intel.com>
|
||||
Sean Cronin <seancron@gmail.com>
|
||||
Sean Lee <seanlee@tw.ibm.com>
|
||||
Sean McIntyre <s.mcintyre@xverba.ca>
|
||||
Sean OMeara <sean@chef.io>
|
||||
Sean P. Kane <skane@newrelic.com>
|
||||
@@ -1489,6 +1545,7 @@ Silas Sewell <silas@sewell.org>
|
||||
Silvan Jegen <s.jegen@gmail.com>
|
||||
Simei He <hesimei@zju.edu.cn>
|
||||
Simon Eskildsen <sirup@sirupsen.com>
|
||||
Simon Ferquel <simon.ferquel@docker.com>
|
||||
Simon Leinen <simon.leinen@gmail.com>
|
||||
Simon Taranto <simon.taranto@gmail.com>
|
||||
Sindhu S <sindhus@live.in>
|
||||
@@ -1517,6 +1574,7 @@ Stephen Crosby <stevecrozz@gmail.com>
|
||||
Stephen Day <stephen.day@docker.com>
|
||||
Stephen Drake <stephen@xenolith.net>
|
||||
Stephen Rust <srust@blockbridge.com>
|
||||
Steve Desmond <steve@vtsv.ca>
|
||||
Steve Dougherty <steve@asksteved.com>
|
||||
Steve Durrheimer <s.durrheimer@gmail.com>
|
||||
Steve Francia <steve.francia@gmail.com>
|
||||
@@ -1531,6 +1589,7 @@ Steven Taylor <steven.taylor@me.com>
|
||||
Subhajit Ghosh <isubuz.g@gmail.com>
|
||||
Sujith Haridasan <sujith.h@gmail.com>
|
||||
Sun Gengze <690388648@qq.com>
|
||||
Sunny Gogoi <indiasuny000@gmail.com>
|
||||
Suryakumar Sudar <surya.trunks@gmail.com>
|
||||
Sven Dowideit <SvenDowideit@home.org.au>
|
||||
Swapnil Daingade <swapnil.daingade@gmail.com>
|
||||
@@ -1539,6 +1598,7 @@ Sylvain Bellemare <sylvain@ascribe.io>
|
||||
Sébastien <sebastien@yoozio.com>
|
||||
Sébastien Luttringer <seblu@seblu.net>
|
||||
Sébastien Stormacq <sebsto@users.noreply.github.com>
|
||||
Tabakhase <mail@tabakhase.com>
|
||||
Tadej Janež <tadej.j@nez.si>
|
||||
TAGOMORI Satoshi <tagomoris@gmail.com>
|
||||
tang0th <tang0th@gmx.com>
|
||||
@@ -1638,6 +1698,7 @@ Tristan Carel <tristan@cogniteev.com>
|
||||
Troy Denton <trdenton@gmail.com>
|
||||
Tyler Brock <tyler.brock@gmail.com>
|
||||
Tzu-Jung Lee <roylee17@gmail.com>
|
||||
uhayate <uhayate.gong@daocloud.io>
|
||||
Ulysse Carion <ulyssecarion@gmail.com>
|
||||
unknown <sebastiaan@ws-key-sebas3.dpi1.dpi>
|
||||
vagrant <vagrant@ubuntu-14.04-amd64-vbox>
|
||||
@@ -1660,13 +1721,14 @@ VinayRaghavanKS <raghavan.vinay@gmail.com>
|
||||
Vincent Batts <vbatts@redhat.com>
|
||||
Vincent Bernat <bernat@luffy.cx>
|
||||
Vincent Bernat <Vincent.Bernat@exoscale.ch>
|
||||
Vincent Demeester <vincent@sbr.pm>
|
||||
Vincent Demeester <vincent.demeester@docker.com>
|
||||
Vincent Giersch <vincent.giersch@ovh.net>
|
||||
Vincent Mayers <vincent.mayers@inbloom.org>
|
||||
Vincent Woo <me@vincentwoo.com>
|
||||
Vinod Kulkarni <vinod.kulkarni@gmail.com>
|
||||
Vishal Doshi <vishal.doshi@gmail.com>
|
||||
Vishnu Kannan <vishnuk@google.com>
|
||||
Vitaly Ostrosablin <vostrosablin@virtuozzo.com>
|
||||
Vitor Monteiro <vmrmonteiro@gmail.com>
|
||||
Vivek Agarwal <me@vivek.im>
|
||||
Vivek Dasgupta <vdasgupt@redhat.com>
|
||||
@@ -1682,6 +1744,7 @@ waitingkuo <waitingkuo0527@gmail.com>
|
||||
Walter Leibbrandt <github@wrl.co.za>
|
||||
Walter Stanish <walter@pratyeka.org>
|
||||
WANG Chao <wcwxyz@gmail.com>
|
||||
Wang Jie <wangjie5@chinaskycloud.com>
|
||||
Wang Long <long.wanglong@huawei.com>
|
||||
Wang Ping <present.wp@icloud.com>
|
||||
Wang Xing <hzwangxing@corp.netease.com>
|
||||
@@ -1689,6 +1752,7 @@ Wang Yuexiao <wang.yuexiao@zte.com.cn>
|
||||
Ward Vandewege <ward@jhvc.com>
|
||||
WarheadsSE <max@warheads.net>
|
||||
Wayne Chang <wayne@neverfear.org>
|
||||
Wayne Song <wsong@docker.com>
|
||||
Wei Wu <wuwei4455@gmail.com>
|
||||
Wei-Ting Kuo <waitingkuo0527@gmail.com>
|
||||
weiyan <weiyan3@huawei.com>
|
||||
@@ -1732,8 +1796,8 @@ Yahya <ya7yaz@gmail.com>
|
||||
YAMADA Tsuyoshi <tyamada@minimum2scp.org>
|
||||
Yan Feng <yanfeng2@huawei.com>
|
||||
Yang Bai <hamo.by@gmail.com>
|
||||
yangshukui <yangshukui@huawei.com>
|
||||
Yanqiang Miao <miao.yanqiang@zte.com.cn>
|
||||
Yao Zaiyong <yaozaiyong@hotmail.com>
|
||||
Yasunori Mahata <nori@mahata.net>
|
||||
Yestin Sun <sunyi0804@gmail.com>
|
||||
Yi EungJun <eungjun.yi@navercorp.com>
|
||||
@@ -1746,10 +1810,11 @@ Yongzhi Pan <panyongzhi@gmail.com>
|
||||
yorkie <yorkiefixer@gmail.com>
|
||||
You-Sheng Yang (楊有勝) <vicamo@gmail.com>
|
||||
Youcef YEKHLEF <yyekhlef@gmail.com>
|
||||
Yu Changchun <yuchangchun1@huawei.com>
|
||||
Yu Chengxia <yuchengxia@huawei.com>
|
||||
Yu Peng <yu.peng36@zte.com.cn>
|
||||
Yuan Sun <sunyuan3@huawei.com>
|
||||
yuchangchun <yuchangchun1@huawei.com>
|
||||
yuchengxia <yuchengxia@huawei.com>
|
||||
Yuanhong Peng <pengyuanhong@huawei.com>
|
||||
Yunxiang Huang <hyxqshk@vip.qq.com>
|
||||
Yurii Rashkovskii <yrashk@gmail.com>
|
||||
yuzou <zouyu7@huawei.com>
|
||||
@@ -1785,4 +1850,5 @@ Zunayed Ali <zunayed@gmail.com>
|
||||
Álvaro Lázaro <alvaro.lazaro.g@gmail.com>
|
||||
Átila Camurça Alves <camurca.home@gmail.com>
|
||||
尹吉峰 <jifeng.yin@gmail.com>
|
||||
徐俊杰 <paco.xu@daocloud.io>
|
||||
搏通 <yufeng.pyf@alibaba-inc.com>
|
||||
|
||||
86
CHANGELOG.md
86
CHANGELOG.md
@@ -5,6 +5,92 @@ information on the list of deprecated flags and APIs please have a look at
|
||||
https://docs.docker.com/engine/deprecated/ where target removal dates can also
|
||||
be found.
|
||||
|
||||
## 17.05.0-ce (2017-05-04)
|
||||
|
||||
### Builder
|
||||
|
||||
+ Add multi-stage build support [#31257](https://github.com/docker/docker/pull/31257) [#32063](https://github.com/docker/docker/pull/32063)
|
||||
+ Allow using build-time args (`ARG`) in `FROM` [#31352](https://github.com/docker/docker/pull/31352)
|
||||
+ Add an option for specifying build target [#32496](https://github.com/docker/docker/pull/32496)
|
||||
* Accept `-f -` to read Dockerfile from `stdin`, but use local context for building [#31236](https://github.com/docker/docker/pull/31236)
|
||||
* The values of default build time arguments (e.g `HTTP_PROXY`) are no longer displayed in docker image history unless a corresponding `ARG` instruction is written in the Dockerfile. [#31584](https://github.com/docker/docker/pull/31584)
|
||||
- Fix setting command if a custom shell is used in a parent image [#32236](https://github.com/docker/docker/pull/32236)
|
||||
- Fix `docker build --label` when the label includes single quotes and a space [#31750](https://github.com/docker/docker/pull/31750)
|
||||
|
||||
### Client
|
||||
|
||||
* Add `--mount` flag to `docker run` and `docker create` [#32251](https://github.com/docker/docker/pull/32251)
|
||||
* Add `--type=secret` to `docker inspect` [#32124](https://github.com/docker/docker/pull/32124)
|
||||
* Add `--format` option to `docker secret ls` [#31552](https://github.com/docker/docker/pull/31552)
|
||||
* Add `--filter` option to `docker secret ls` [#30810](https://github.com/docker/docker/pull/30810)
|
||||
* Add `--filter scope=<swarm|local>` to `docker network ls` [#31529](https://github.com/docker/docker/pull/31529)
|
||||
* Add `--cpus` support to `docker update` [#31148](https://github.com/docker/docker/pull/31148)
|
||||
* Add label filter to `docker system prune` and other `prune` commands [#30740](https://github.com/docker/docker/pull/30740)
|
||||
* `docker stack rm` now accepts multiple stacks as input [#32110](https://github.com/docker/docker/pull/32110)
|
||||
* Improve `docker version --format` option when the client has downgraded the API version [#31022](https://github.com/docker/docker/pull/31022)
|
||||
* Prompt when using an encrypted client certificate to connect to a docker daemon [#31364](https://github.com/docker/docker/pull/31364)
|
||||
* Display created tags on successful `docker build` [#32077](https://github.com/docker/docker/pull/32077)
|
||||
* Cleanup compose convert error messages [#32087](https://github.com/moby/moby/pull/32087)
|
||||
|
||||
### Contrib
|
||||
|
||||
+ Add support for building docker debs for Ubuntu 17.04 Zesty on amd64 [#32435](https://github.com/docker/docker/pull/32435)
|
||||
|
||||
### Daemon
|
||||
|
||||
- Fix `--api-cors-header` being ignored if `--api-enable-cors` is not set [#32174](https://github.com/docker/docker/pull/32174)
|
||||
- Cleanup docker tmp dir on start [#31741](https://github.com/docker/docker/pull/31741)
|
||||
- Deprecate `--graph` flag in favor or `--data-root` [#28696](https://github.com/docker/docker/pull/28696)
|
||||
|
||||
### Logging
|
||||
|
||||
+ Add support for logging driver plugins [#28403](https://github.com/docker/docker/pull/28403)
|
||||
* Add support for showing logs of individual tasks to `docker service logs`, and add `/task/{id}/logs` REST endpoint [#32015](https://github.com/docker/docker/pull/32015)
|
||||
* Add `--log-opt env-regex` option to match environment variables using a regular expression [#27565](https://github.com/docker/docker/pull/27565)
|
||||
|
||||
### Networking
|
||||
|
||||
+ Allow user to replace, and customize the ingress network [#31714](https://github.com/docker/docker/pull/31714)
|
||||
- Fix UDP traffic in containers not working after the container is restarted [#32505](https://github.com/docker/docker/pull/32505)
|
||||
- Fix files being written to `/var/lib/docker` if a different data-root is set [#32505](https://github.com/docker/docker/pull/32505)
|
||||
|
||||
### Runtime
|
||||
|
||||
- Ensure health probe is stopped when a container exits [#32274](https://github.com/docker/docker/pull/32274)
|
||||
|
||||
### Swarm Mode
|
||||
|
||||
+ Add update/rollback order for services (`--update-order` / `--rollback-order`) [#30261](https://github.com/docker/docker/pull/30261)
|
||||
+ Add support for synchronous `service create` and `service update` [#31144](https://github.com/docker/docker/pull/31144)
|
||||
+ Add support for "grace periods" on healthchecks through the `HEALTHCHECK --start-period` and `--health-start-period` flag to
|
||||
`docker service create`, `docker service update`, `docker create`, and `docker run` to support containers with an initial startup
|
||||
time [#28938](https://github.com/docker/docker/pull/28938)
|
||||
* `docker service create` now omits fields that are not specified by the user, when possible. This will allow defaults to be applied inside the manager [#32284](https://github.com/docker/docker/pull/32284)
|
||||
* `docker service inspect` now shows default values for fields that are not specified by the user [#32284](https://github.com/docker/docker/pull/32284)
|
||||
* Move `docker service logs` out of experimental [#32462](https://github.com/docker/docker/pull/32462)
|
||||
* Add support for Credential Spec and SELinux to services to the API [#32339](https://github.com/docker/docker/pull/32339)
|
||||
* Add `--entrypoint` flag to `docker service create` and `docker service update` [#29228](https://github.com/docker/docker/pull/29228)
|
||||
* Add `--network-add` and `--network-rm` to `docker service update` [#32062](https://github.com/docker/docker/pull/32062)
|
||||
* Add `--credential-spec` flag to `docker service create` and `docker service update` [#32339](https://github.com/docker/docker/pull/32339)
|
||||
* Add `--filter mode=<global|replicated>` to `docker service ls` [#31538](https://github.com/docker/docker/pull/31538)
|
||||
* Resolve network IDs on the client side, instead of in the daemon when creating services [#32062](https://github.com/docker/docker/pull/32062)
|
||||
* Add `--format` option to `docker node ls` [#30424](https://github.com/docker/docker/pull/30424)
|
||||
* Add `--prune` option to `docker stack deploy` to remove services that are no longer defined in the docker-compose file [#31302](https://github.com/docker/docker/pull/31302)
|
||||
* Add `PORTS` column for `docker service ls` when using `ingress` mode [#30813](https://github.com/docker/docker/pull/30813)
|
||||
- Fix unnescessary re-deploying of tasks when environment-variables are used [#32364](https://github.com/docker/docker/pull/32364)
|
||||
- Fix `docker stack deploy` not supporting `endpoint_mode` when deploying from a docker compose file [#32333](https://github.com/docker/docker/pull/32333)
|
||||
- Proceed with startup if cluster component cannot be created to allow recovering from a broken swarm setup [#31631](https://github.com/docker/docker/pull/31631)
|
||||
|
||||
### Security
|
||||
|
||||
* Allow setting SELinux type or MCS labels when using `--ipc=container:` or `--ipc=host` [#30652](https://github.com/docker/docker/pull/30652)
|
||||
|
||||
|
||||
### Deprecation
|
||||
|
||||
- Deprecate `--api-enable-cors` daemon flag. This flag was marked deprecated in Docker 1.6.0 but not listed in deprecated features [#32352](https://github.com/docker/docker/pull/32352)
|
||||
- Remove Ubuntu 12.04 (Precise Pangolin) as supported platform. Ubuntu 12.04 is EOL, and no longer receives updates [#32520](https://github.com/docker/docker/pull/32520)
|
||||
|
||||
## 17.04.0-ce (2017-04-05)
|
||||
|
||||
### Builder
|
||||
|
||||
38
Dockerfile
38
Dockerfile
@@ -9,7 +9,7 @@
|
||||
# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash
|
||||
#
|
||||
# # Run the test suite:
|
||||
# docker run --privileged docker hack/make.sh test-unit test-integration-cli test-docker-py
|
||||
# docker run -e DOCKER_GITCOMMIT=foo --privileged docker hack/make.sh test-unit test-integration-cli test-docker-py
|
||||
#
|
||||
# # Publish a release:
|
||||
# docker run --privileged \
|
||||
@@ -29,11 +29,6 @@ FROM debian:jessie
|
||||
ARG APT_MIRROR=deb.debian.org
|
||||
RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list
|
||||
|
||||
# Add zfs ppa
|
||||
COPY keys/launchpad-ppa-zfs.asc /go/src/github.com/docker/docker/keys/
|
||||
RUN apt-key add /go/src/github.com/docker/docker/keys/launchpad-ppa-zfs.asc
|
||||
RUN echo deb http://ppa.launchpad.net/zfs-native/stable/ubuntu trusty main > /etc/apt/sources.list.d/zfs.list
|
||||
|
||||
# Packaged dependencies
|
||||
RUN apt-get update && apt-get install -y \
|
||||
apparmor \
|
||||
@@ -63,7 +58,6 @@ RUN apt-get update && apt-get install -y \
|
||||
libprotobuf-dev \
|
||||
libsystemd-journal-dev \
|
||||
libtool \
|
||||
libzfs-dev \
|
||||
mercurial \
|
||||
net-tools \
|
||||
pkg-config \
|
||||
@@ -74,7 +68,6 @@ RUN apt-get update && apt-get install -y \
|
||||
python-pip \
|
||||
python-websocket \
|
||||
tar \
|
||||
ubuntu-zfs \
|
||||
vim \
|
||||
vim-common \
|
||||
xfsprogs \
|
||||
@@ -97,17 +90,6 @@ RUN cd /usr/local/lvm2 \
|
||||
&& make install_device-mapper
|
||||
# See https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL
|
||||
|
||||
# Configure the container for OSX cross compilation
|
||||
ENV OSX_SDK MacOSX10.11.sdk
|
||||
ENV OSX_CROSS_COMMIT a9317c18a3a457ca0a657f08cc4d0d43c6cf8953
|
||||
RUN set -x \
|
||||
&& export OSXCROSS_PATH="/osxcross" \
|
||||
&& git clone https://github.com/tpoechtrager/osxcross.git $OSXCROSS_PATH \
|
||||
&& ( cd $OSXCROSS_PATH && git checkout -q $OSX_CROSS_COMMIT) \
|
||||
&& curl -sSL https://s3.dockerproject.org/darwin/v2/${OSX_SDK}.tar.xz -o "${OSXCROSS_PATH}/tarballs/${OSX_SDK}.tar.xz" \
|
||||
&& UNATTENDED=yes OSX_VERSION_MIN=10.6 ${OSXCROSS_PATH}/build.sh
|
||||
ENV PATH /osxcross/target/bin:$PATH
|
||||
|
||||
# Install seccomp: the version shipped upstream is too old
|
||||
ENV SECCOMP_VERSION 2.3.2
|
||||
RUN set -x \
|
||||
@@ -127,7 +109,8 @@ RUN set -x \
|
||||
# IMPORTANT: If the version of Go is updated, the Windows to Linux CI machines
|
||||
# will need updating, to avoid errors. Ping #docker-maintainers on IRC
|
||||
# with a heads-up.
|
||||
ENV GO_VERSION 1.7.5
|
||||
# IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored
|
||||
ENV GO_VERSION 1.8.3
|
||||
RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" \
|
||||
| tar -xzC /usr/local
|
||||
|
||||
@@ -235,17 +218,18 @@ RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker
|
||||
# Get useful and necessary Hub images so we can "docker load" locally instead of pulling
|
||||
COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/
|
||||
RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \
|
||||
buildpack-deps:jessie@sha256:25785f89240fbcdd8a74bdaf30dd5599a9523882c6dfc567f2e9ef7cf6f79db6 \
|
||||
busybox:latest@sha256:e4f93f6ed15a0cdd342f5aae387886fba0ab98af0a102da6276eaf24d6e6ade0 \
|
||||
debian:jessie@sha256:f968f10b4b523737e253a97eac59b0d1420b5c19b69928d35801a6373ffe330e \
|
||||
hello-world:latest@sha256:8be990ef2aeb16dbcb9271ddfe2610fa6658d13f6dfb8bc72074cc1ca36966a7
|
||||
# See also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is)
|
||||
buildpack-deps:jessie@sha256:85b379ec16065e4fe4127eb1c5fb1bcc03c559bd36dbb2e22ff496de55925fa6 \
|
||||
busybox:latest@sha256:32f093055929dbc23dec4d03e09dfe971f5973a9ca5cf059cbfb644c206aa83f \
|
||||
debian:jessie@sha256:72f784399fd2719b4cb4e16ef8e369a39dc67f53d978cd3e2e7bf4e502c7b793 \
|
||||
hello-world:latest@sha256:c5515758d4c5e1e838e9cd307f6c6a0d620b5e07e6f927b07d05f6d12a1ac8d7
|
||||
# See also ensureFrozenImagesLinux() in "integration-cli/fixtures_linux_daemon_test.go" (which needs to be updated when adding images to this list)
|
||||
|
||||
# Install tomlv, vndr, runc, containerd, tini, docker-proxy
|
||||
# Install tomlv, vndr, runc, containerd, tini, docker-proxy dockercli
|
||||
# Please edit hack/dockerfile/install-binaries.sh to update them.
|
||||
COPY hack/dockerfile/binaries-commits /tmp/binaries-commits
|
||||
COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh
|
||||
RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy bindata
|
||||
RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy bindata dockercli
|
||||
ENV PATH=/usr/local/cli:$PATH
|
||||
|
||||
# Wrap all commands in the "docker-in-docker" script to allow nested containers
|
||||
ENTRYPOINT ["hack/dind"]
|
||||
|
||||
@@ -98,7 +98,8 @@ RUN set -x \
|
||||
# bootstrap, so we use golang-go (1.6) as bootstrap to build Go from source code.
|
||||
# We don't use the official ARMv6 released binaries as a GOROOT_BOOTSTRAP, because
|
||||
# not all ARM64 platforms support 32-bit mode. 32-bit mode is optional for ARMv8.
|
||||
ENV GO_VERSION 1.7.5
|
||||
# IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored
|
||||
ENV GO_VERSION 1.8.3
|
||||
RUN mkdir /usr/src/go && curl -fsSL https://golang.org/dl/go${GO_VERSION}.src.tar.gz | tar -v -C /usr/src/go -xz --strip-components=1 \
|
||||
&& cd /usr/src/go/src \
|
||||
&& GOOS=linux GOARCH=arm64 GOROOT_BOOTSTRAP="$(go env GOROOT)" ./make.bash
|
||||
@@ -182,17 +183,18 @@ RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker
|
||||
# Get useful and necessary Hub images so we can "docker load" locally instead of pulling
|
||||
COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/
|
||||
RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \
|
||||
aarch64/buildpack-deps:jessie@sha256:6aa1d6910791b7ac78265fd0798e5abd6cb3f27ae992f6f960f6c303ec9535f2 \
|
||||
aarch64/busybox:latest@sha256:b23a6a37cf269dff6e46d2473b6e227afa42b037e6d23435f1d2bc40fc8c2828 \
|
||||
aarch64/debian:jessie@sha256:4be74a41a7c70ebe887b634b11ffe516cf4fcd56864a54941e56bb49883c3170 \
|
||||
aarch64/hello-world:latest@sha256:65a4a158587b307bb02db4de41b836addb0c35175bdc801367b1ac1ddeb9afda
|
||||
# See also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is)
|
||||
aarch64/buildpack-deps:jessie@sha256:107f4a96837ed89c493fc205cd28508ed0b6b680b4bf3e514e9f0fa0f6667b77 \
|
||||
aarch64/busybox:latest@sha256:5a06b8b2fdf22dd1f4085c6c3efd23ee99af01b2d668d286bc4be6d8baa10efb \
|
||||
aarch64/debian:jessie@sha256:e6f90b568631705bd5cb27490977378ba762792b38d47c91c4da7a539f63079a \
|
||||
aarch64/hello-world:latest@sha256:bd1722550b97668b23ede297abf824d4855f4d9f600dab7b4db1a963dae7ec9e
|
||||
# See also ensureFrozenImagesLinux() in "integration-cli/fixtures_linux_daemon_test.go" (which needs to be updated when adding images to this list)
|
||||
|
||||
# Install tomlv, vndr, runc, containerd, tini, docker-proxy
|
||||
# Please edit hack/dockerfile/install-binaries.sh to update them.
|
||||
COPY hack/dockerfile/binaries-commits /tmp/binaries-commits
|
||||
COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh
|
||||
RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy
|
||||
RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy dockercli
|
||||
ENV PATH=/usr/local/cli:$PATH
|
||||
|
||||
# Wrap all commands in the "docker-in-docker" script to allow nested containers
|
||||
ENTRYPOINT ["hack/dind"]
|
||||
|
||||
@@ -71,7 +71,8 @@ RUN cd /usr/local/lvm2 \
|
||||
# See https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL
|
||||
|
||||
# Install Go
|
||||
ENV GO_VERSION 1.7.5
|
||||
# IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored
|
||||
ENV GO_VERSION 1.8.3
|
||||
RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" \
|
||||
| tar -xzC /usr/local
|
||||
ENV PATH /go/bin:/usr/local/go/bin:$PATH
|
||||
@@ -163,17 +164,18 @@ RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker
|
||||
# Get useful and necessary Hub images so we can "docker load" locally instead of pulling
|
||||
COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/
|
||||
RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \
|
||||
armhf/buildpack-deps:jessie@sha256:ca6cce8e5bf5c952129889b5cc15cd6aa8d995d77e55e3749bbaadae50e476cb \
|
||||
armhf/busybox:latest@sha256:d98a7343ac750ffe387e3d514f8521ba69846c216778919b01414b8617cfb3d4 \
|
||||
armhf/debian:jessie@sha256:4a2187483f04a84f9830910fe3581d69b3c985cc045d9f01d8e2f3795b28107b \
|
||||
armhf/hello-world:latest@sha256:161dcecea0225975b2ad5f768058212c1e0d39e8211098666ffa1ac74cfb7791
|
||||
# See also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is)
|
||||
armhf/buildpack-deps:jessie@sha256:eb2dad77ef53e88d94c3c83862d315c806ea1ca49b6e74f4db362381365ce489 \
|
||||
armhf/busybox:latest@sha256:016a1e149d2acc2a3789a160dfa60ce870794eea27ad5e96f7a101970e5e1689 \
|
||||
armhf/debian:jessie@sha256:ac59fa18b28d0ef751eabb5ba4c4b5a9063f99398bae2f70495aa8ed6139b577 \
|
||||
armhf/hello-world:latest@sha256:9701edc932223a66e49dd6c894a11db8c2cf4eccd1414f1ec105a623bf16b426
|
||||
# See also ensureFrozenImagesLinux() in "integration-cli/fixtures_linux_daemon_test.go" (which needs to be updated when adding images to this list)
|
||||
|
||||
# Install tomlv, vndr, runc, containerd, tini, docker-proxy
|
||||
# Please edit hack/dockerfile/install-binaries.sh to update them.
|
||||
COPY hack/dockerfile/binaries-commits /tmp/binaries-commits
|
||||
COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh
|
||||
RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy
|
||||
RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy dockercli
|
||||
ENV PATH=/usr/local/cli:$PATH
|
||||
|
||||
ENTRYPOINT ["hack/dind"]
|
||||
|
||||
|
||||
@@ -95,7 +95,8 @@ RUN set -x \
|
||||
|
||||
# Install Go
|
||||
# NOTE: official ppc64le go binaries weren't available until go 1.6.4 and 1.7.4
|
||||
ENV GO_VERSION 1.7.5
|
||||
# IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored
|
||||
ENV GO_VERSION 1.8.3
|
||||
RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" \
|
||||
| tar -xzC /usr/local
|
||||
|
||||
@@ -169,17 +170,18 @@ RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker
|
||||
# Get useful and necessary Hub images so we can "docker load" locally instead of pulling
|
||||
COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/
|
||||
RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \
|
||||
ppc64le/buildpack-deps:jessie@sha256:902bfe4ef1389f94d143d64516dd50a2de75bca2e66d4a44b1d73f63ddf05dda \
|
||||
ppc64le/busybox:latest@sha256:38bb82085248d5a3c24bd7a5dc146f2f2c191e189da0441f1c2ca560e3fc6f1b \
|
||||
ppc64le/debian:jessie@sha256:412845f51b6ab662afba71bc7a716e20fdb9b84f185d180d4c7504f8a75c4f91 \
|
||||
ppc64le/hello-world:latest@sha256:186a40a9a02ca26df0b6c8acdfb8ac2f3ae6678996a838f977e57fac9d963974
|
||||
# See also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is)
|
||||
ppc64le/buildpack-deps:jessie@sha256:1a2f2d2cc8738f14b336aeffc3503b5c9dedf9e1f26c7313cb4999534ad4716f \
|
||||
ppc64le/busybox:latest@sha256:54f34c83adfab20cf0e630d879e210f07b0062cd6caaf16346a61396d50e7584 \
|
||||
ppc64le/debian:jessie@sha256:ea8c5b105e3790f075145b40e4be1e4488c9f33f55e6cc45182047b80a68f892 \
|
||||
ppc64le/hello-world:latest@sha256:7d57adf137665f748956c86089320710b66d08584db3500ed98f4bb3da637c2d
|
||||
# See also ensureFrozenImagesLinux() in "integration-cli/fixtures_linux_daemon_test.go" (which needs to be updated when adding images to this list)
|
||||
|
||||
# Install tomlv, vndr, runc, containerd, tini, docker-proxy
|
||||
# Please edit hack/dockerfile/install-binaries.sh to update them.
|
||||
COPY hack/dockerfile/binaries-commits /tmp/binaries-commits
|
||||
COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh
|
||||
RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy
|
||||
RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy dockercli
|
||||
ENV PATH=/usr/local/cli:$PATH
|
||||
|
||||
# Wrap all commands in the "docker-in-docker" script to allow nested containers
|
||||
ENTRYPOINT ["hack/dind"]
|
||||
|
||||
@@ -88,7 +88,8 @@ RUN cd /usr/local/lvm2 \
|
||||
&& make install_device-mapper
|
||||
# See https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL
|
||||
|
||||
ENV GO_VERSION 1.7.5
|
||||
# IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored
|
||||
ENV GO_VERSION 1.8.3
|
||||
RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-s390x.tar.gz" \
|
||||
| tar -xzC /usr/local
|
||||
|
||||
@@ -162,17 +163,18 @@ RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker
|
||||
# Get useful and necessary Hub images so we can "docker load" locally instead of pulling
|
||||
COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/
|
||||
RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \
|
||||
s390x/buildpack-deps:jessie@sha256:4d1381224acaca6c4bfe3604de3af6972083a8558a99672cb6989c7541780099 \
|
||||
s390x/busybox:latest@sha256:dd61522c983884a66ed72d60301925889028c6d2d5e0220a8fe1d9b4c6a4f01b \
|
||||
s390x/debian:jessie@sha256:b74c863400909eff3c5e196cac9bfd1f6333ce47aae6a38398d87d5875da170a \
|
||||
s390x/hello-world:latest@sha256:780d80b3a7677c3788c0d5cd9168281320c8d4a6d9183892d8ee5cdd610f5699
|
||||
# See also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is)
|
||||
s390x/buildpack-deps:jessie@sha256:552dec28146e4d2591fc0309aebdbac9e4fb1f335d90c70a14bbf72fb8bb1be5 \
|
||||
s390x/busybox:latest@sha256:e32f40c39ca596a4317392bd32809bb188c4ae5864ea827c3219c75c50069964 \
|
||||
s390x/debian:jessie@sha256:6994e3ffa5a1dabea09d536f350b3ed2715292cb469417c42a82b70fcbff7d32 \
|
||||
s390x/hello-world:latest@sha256:602db500fee63934292260e65c0c528128ad1c1c7c6497f95bbbac7d4d5312f1
|
||||
# See also ensureFrozenImagesLinux() in "integration-cli/fixtures_linux_daemon_test.go" (which needs to be updated when adding images to this list)
|
||||
|
||||
# Install tomlv, vndr, runc, containerd, tini, docker-proxy
|
||||
# Please edit hack/dockerfile/install-binaries.sh to update them.
|
||||
COPY hack/dockerfile/binaries-commits /tmp/binaries-commits
|
||||
COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh
|
||||
RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy
|
||||
RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy dockercli
|
||||
ENV PATH=/usr/local/cli:$PATH
|
||||
|
||||
# Wrap all commands in the "docker-in-docker" script to allow nested containers
|
||||
ENTRYPOINT ["hack/dind"]
|
||||
|
||||
@@ -53,7 +53,8 @@ RUN set -x \
|
||||
# IMPORTANT: If the version of Go is updated, the Windows to Linux CI machines
|
||||
# will need updating, to avoid errors. Ping #docker-maintainers on IRC
|
||||
# with a heads-up.
|
||||
ENV GO_VERSION 1.7.5
|
||||
# IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored
|
||||
ENV GO_VERSION 1.8.3
|
||||
RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" \
|
||||
| tar -xzC /usr/local
|
||||
ENV PATH /go/bin:/usr/local/go/bin:$PATH
|
||||
@@ -64,7 +65,8 @@ ENV CGO_LDFLAGS -L/lib
|
||||
# Please edit hack/dockerfile/install-binaries.sh to update them.
|
||||
COPY hack/dockerfile/binaries-commits /tmp/binaries-commits
|
||||
COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh
|
||||
RUN /tmp/install-binaries.sh runc containerd tini proxy
|
||||
RUN /tmp/install-binaries.sh runc containerd tini proxy dockercli
|
||||
ENV PATH=/usr/local/cli:$PATH
|
||||
|
||||
ENV AUTO_GOPATH 1
|
||||
WORKDIR /usr/src/docker
|
||||
|
||||
@@ -161,7 +161,7 @@ SHELL ["powershell", "-Command", "$ErrorActionPreference = 'Stop'; $ProgressPref
|
||||
# Environment variable notes:
|
||||
# - GO_VERSION must be consistent with 'Dockerfile' used by Linux.
|
||||
# - FROM_DOCKERFILE is used for detection of building within a container.
|
||||
ENV GO_VERSION=1.7.5 `
|
||||
ENV GO_VERSION=1.8.3 `
|
||||
GIT_VERSION=2.11.1 `
|
||||
GOPATH=C:\go `
|
||||
FROM_DOCKERFILE=1
|
||||
|
||||
@@ -312,7 +312,7 @@
|
||||
|
||||
[people.icecrime]
|
||||
Name = "Arnaud Porterie"
|
||||
Email = "arnaud@docker.com"
|
||||
Email = "icecrime@gmail.com"
|
||||
GitHub = "icecrime"
|
||||
|
||||
[people.jamtur01]
|
||||
|
||||
16
Makefile
16
Makefile
@@ -7,7 +7,7 @@ DOCKER_INCREMENTAL_BINARY := $(if $(DOCKER_INCREMENTAL_BINARY),$(DOCKER_INCREMEN
|
||||
export DOCKER_INCREMENTAL_BINARY
|
||||
|
||||
# get OS/Arch of docker engine
|
||||
DOCKER_OSARCH := $(shell bash -c 'source hack/make/.detect-daemon-osarch && echo $${DOCKER_ENGINE_OSARCH:-$$DOCKER_CLIENT_OSARCH}')
|
||||
DOCKER_OSARCH := $(shell bash -c 'source hack/make/.detect-daemon-osarch && echo $${DOCKER_ENGINE_OSARCH}')
|
||||
DOCKERFILE := $(shell bash -c 'source hack/make/.detect-daemon-osarch && echo $${DOCKERFILE}')
|
||||
|
||||
DOCKER_GITCOMMIT := $(shell git rev-parse --short HEAD || echo unsupported)
|
||||
@@ -17,13 +17,14 @@ export DOCKER_GITCOMMIT
|
||||
# to allow things like `make KEEPBUNDLE=1 binary` easily
|
||||
# `project/PACKAGERS.md` have some limited documentation of some of these
|
||||
DOCKER_ENVS := \
|
||||
$(if $(DOCKER_CROSSPLATFORMS), -e DOCKER_CROSSPLATFORMS) \
|
||||
-e BUILD_APT_MIRROR \
|
||||
-e BUILDFLAGS \
|
||||
-e KEEPBUNDLE \
|
||||
-e DOCKER_BUILD_ARGS \
|
||||
-e DOCKER_BUILD_GOGC \
|
||||
-e DOCKER_BUILD_PKGS \
|
||||
-e DOCKER_CROSSPLATFORMS \
|
||||
-e DOCKER_CLI_PATH \
|
||||
-e DOCKER_DEBUG \
|
||||
-e DOCKER_EXPERIMENTAL \
|
||||
-e DOCKER_GITCOMMIT \
|
||||
@@ -63,7 +64,8 @@ PKGCACHE_MAP := gopath:/go/pkg goroot-linux_amd64:/usr/local/go/pkg/linux_amd64
|
||||
PKGCACHE_VOLROOT := dockerdev-go-pkg-cache
|
||||
PKGCACHE_VOL := $(if $(PKGCACHE_DIR),$(CURDIR)/$(PKGCACHE_DIR)/,$(PKGCACHE_VOLROOT)-)
|
||||
DOCKER_MOUNT_PKGCACHE := $(if $(DOCKER_INCREMENTAL_BINARY),$(shell echo $(PKGCACHE_MAP) | sed -E 's@([^ ]*)@-v "$(PKGCACHE_VOL)\1"@g'),)
|
||||
DOCKER_MOUNT := $(DOCKER_MOUNT) $(DOCKER_MOUNT_PKGCACHE)
|
||||
DOCKER_MOUNT_CLI := $(if $(DOCKER_CLI_PATH),-v $(shell dirname $(DOCKER_CLI_PATH)):/usr/local/cli,)
|
||||
DOCKER_MOUNT := $(DOCKER_MOUNT) $(DOCKER_MOUNT_PKGCACHE) $(DOCKER_MOUNT_CLI)
|
||||
|
||||
GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
|
||||
GIT_BRANCH_CLEAN := $(shell echo $(GIT_BRANCH) | sed -e "s/[^[:alnum:]]/-/g")
|
||||
@@ -79,6 +81,11 @@ SWAGGER_DOCS_PORT ?= 9000
|
||||
INTEGRATION_CLI_MASTER_IMAGE := $(if $(INTEGRATION_CLI_MASTER_IMAGE), $(INTEGRATION_CLI_MASTER_IMAGE), integration-cli-master)
|
||||
INTEGRATION_CLI_WORKER_IMAGE := $(if $(INTEGRATION_CLI_WORKER_IMAGE), $(INTEGRATION_CLI_WORKER_IMAGE), integration-cli-worker)
|
||||
|
||||
define \n
|
||||
|
||||
|
||||
endef
|
||||
|
||||
# if this session isn't interactive, then we don't want to allocate a
|
||||
# TTY, which would fail, but if it is interactive, we do want to attach
|
||||
# so that the user can send e.g. ^C through.
|
||||
@@ -98,6 +105,7 @@ binary: build ## build the linux binaries
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh binary
|
||||
|
||||
build: bundles init-go-pkg-cache
|
||||
$(warning The docker client CLI has moved to github.com/docker/cli. By default, it is built from the git sha specified in hack/dockerfile/binaries-commits. For a dev-test cycle involving the CLI, run:${\n} DOCKER_CLI_PATH=/host/path/to/cli/binary make shell ${\n} then change the cli and compile into a binary at the same location.${\n})
|
||||
docker build ${BUILD_APT_MIRROR} ${DOCKER_BUILD_ARGS} -t "$(DOCKER_IMAGE)" -f "$(DOCKERFILE)" .
|
||||
|
||||
bundles:
|
||||
@@ -182,7 +190,7 @@ swagger-docs: ## preview the API documentation
|
||||
bfirsh/redoc:1.6.2
|
||||
|
||||
build-integration-cli-on-swarm: build ## build images and binary for running integration-cli on Swarm in parallel
|
||||
@echo "Building hack/integration-cli-on-swarm"
|
||||
@echo "Building hack/integration-cli-on-swarm (if build fails, please refer to hack/integration-cli-on-swarm/README.md)"
|
||||
go build -o ./hack/integration-cli-on-swarm/integration-cli-on-swarm ./hack/integration-cli-on-swarm/host
|
||||
@echo "Building $(INTEGRATION_CLI_MASTER_IMAGE)"
|
||||
docker build -t $(INTEGRATION_CLI_MASTER_IMAGE) hack/integration-cli-on-swarm/agent
|
||||
|
||||
316
README.md
316
README.md
@@ -1,270 +1,80 @@
|
||||
Docker: the container engine [](https://github.com/docker/docker/releases/latest)
|
||||
============================
|
||||
### Docker users, see [Moby and Docker](https://mobyproject.org/#moby-and-docker) to clarify the relationship between the projects
|
||||
|
||||
Docker is an open source project to pack, ship and run any application
|
||||
as a lightweight container.
|
||||
### Docker maintainers and contributors, see [Transitioning to Moby](#transitioning-to-moby) for more details
|
||||
|
||||
Docker containers are both *hardware-agnostic* and *platform-agnostic*.
|
||||
This means they can run anywhere, from your laptop to the largest
|
||||
cloud compute instance and everything in between - and they don't require
|
||||
you to use a particular language, framework or packaging system. That
|
||||
makes them great building blocks for deploying and scaling web apps,
|
||||
databases, and backend services without depending on a particular stack
|
||||
or provider.
|
||||
The Moby Project
|
||||
================
|
||||
|
||||
Docker began as an open-source implementation of the deployment engine which
|
||||
powered [dotCloud](http://web.archive.org/web/20130530031104/https://www.dotcloud.com/),
|
||||
a popular Platform-as-a-Service. It benefits directly from the experience
|
||||
accumulated over several years of large-scale operation and support of hundreds
|
||||
of thousands of applications and databases.
|
||||

|
||||
|
||||

|
||||
Moby is an open-source project created by Docker to advance the software containerization movement.
|
||||
It provides a “Lego set” of dozens of components, the framework for assembling them into custom container-based systems, and a place for all container enthusiasts to experiment and exchange ideas.
|
||||
|
||||
## Security Disclosure
|
||||
# Moby
|
||||
|
||||
Security is very important to us. If you have any issue regarding security,
|
||||
please disclose the information responsibly by sending an email to
|
||||
security@docker.com and not by creating a GitHub issue.
|
||||
## Overview
|
||||
|
||||
## Better than VMs
|
||||
At the core of Moby is a framework to assemble specialized container systems.
|
||||
It provides:
|
||||
|
||||
A common method for distributing applications and sandboxing their
|
||||
execution is to use virtual machines, or VMs. Typical VM formats are
|
||||
VMware's vmdk, Oracle VirtualBox's vdi, and Amazon EC2's ami. In theory
|
||||
these formats should allow every developer to automatically package
|
||||
their application into a "machine" for easy distribution and deployment.
|
||||
In practice, that almost never happens, for a few reasons:
|
||||
- A library of containerized components for all vital aspects of a container system: OS, container runtime, orchestration, infrastructure management, networking, storage, security, build, image distribution, etc.
|
||||
- Tools to assemble the components into runnable artifacts for a variety of platforms and architectures: bare metal (both x86 and Arm); executables for Linux, Mac and Windows; VM images for popular cloud and virtualization providers.
|
||||
- A set of reference assemblies which can be used as-is, modified, or used as inspiration to create your own.
|
||||
|
||||
* *Size*: VMs are very large which makes them impractical to store
|
||||
and transfer.
|
||||
* *Performance*: running VMs consumes significant CPU and memory,
|
||||
which makes them impractical in many scenarios, for example local
|
||||
development of multi-tier applications, and large-scale deployment
|
||||
of cpu and memory-intensive applications on large numbers of
|
||||
machines.
|
||||
* *Portability*: competing VM environments don't play well with each
|
||||
other. Although conversion tools do exist, they are limited and
|
||||
add even more overhead.
|
||||
* *Hardware-centric*: VMs were designed with machine operators in
|
||||
mind, not software developers. As a result, they offer very
|
||||
limited tooling for what developers need most: building, testing
|
||||
and running their software. For example, VMs offer no facilities
|
||||
for application versioning, monitoring, configuration, logging or
|
||||
service discovery.
|
||||
All Moby components are containers, so creating new components is as easy as building a new OCI-compatible container.
|
||||
|
||||
By contrast, Docker relies on a different sandboxing method known as
|
||||
*containerization*. Unlike traditional virtualization, containerization
|
||||
takes place at the kernel level. Most modern operating system kernels
|
||||
now support the primitives necessary for containerization, including
|
||||
Linux with [openvz](https://openvz.org),
|
||||
[vserver](http://linux-vserver.org) and more recently
|
||||
[lxc](https://linuxcontainers.org/), Solaris with
|
||||
[zones](https://docs.oracle.com/cd/E26502_01/html/E29024/preface-1.html#scrolltoc),
|
||||
and FreeBSD with
|
||||
[Jails](https://www.freebsd.org/doc/handbook/jails.html).
|
||||
## Principles
|
||||
|
||||
Docker builds on top of these low-level primitives to offer developers a
|
||||
portable format and runtime environment that solves all four problems.
|
||||
Docker containers are small (and their transfer can be optimized with
|
||||
layers), they have basically zero memory and cpu overhead, they are
|
||||
completely portable, and are designed from the ground up with an
|
||||
application-centric design.
|
||||
Moby is an open project guided by strong principles, but modular, flexible and without too strong an opinion on user experience, so it is open to the community to help set its direction.
|
||||
The guiding principles are:
|
||||
|
||||
Perhaps best of all, because Docker operates at the OS level, it can still be
|
||||
run inside a VM!
|
||||
- Batteries included but swappable: Moby includes enough components to build fully featured container system, but its modular architecture ensures that most of the components can be swapped by different implementations.
|
||||
- Usable security: Moby will provide secure defaults without compromising usability.
|
||||
- Container centric: Moby is built with containers, for running containers.
|
||||
|
||||
## Plays well with others
|
||||
With Moby, you should be able to describe all the components of your distributed application, from the high-level configuration files down to the kernel you would like to use and build and deploy it easily.
|
||||
|
||||
Docker does not require you to buy into a particular programming
|
||||
language, framework, packaging system, or configuration language.
|
||||
Moby uses [containerd](https://github.com/containerd/containerd) as the default container runtime.
|
||||
|
||||
Is your application a Unix process? Does it use files, tcp connections,
|
||||
environment variables, standard Unix streams and command-line arguments
|
||||
as inputs and outputs? Then Docker can run it.
|
||||
## Audience
|
||||
|
||||
Can your application's build be expressed as a sequence of such
|
||||
commands? Then Docker can build it.
|
||||
Moby is recommended for anyone who wants to assemble a container-based system. This includes:
|
||||
|
||||
## Escape dependency hell
|
||||
- Hackers who want to customize or patch their Docker build
|
||||
- System engineers or integrators building a container system
|
||||
- Infrastructure providers looking to adapt existing container systems to their environment
|
||||
- Container enthusiasts who want to experiment with the latest container tech
|
||||
- Open-source developers looking to test their project in a variety of different systems
|
||||
- Anyone curious about Docker internals and how it’s built
|
||||
|
||||
A common problem for developers is the difficulty of managing all
|
||||
their application's dependencies in a simple and automated way.
|
||||
Moby is NOT recommended for:
|
||||
|
||||
This is usually difficult for several reasons:
|
||||
- Application developers looking for an easy way to run their applications in containers. We recommend Docker CE instead.
|
||||
- Enterprise IT and development teams looking for a ready-to-use, commercially supported container platform. We recommend Docker EE instead.
|
||||
- Anyone curious about containers and looking for an easy way to learn. We recommend the docker.com website instead.
|
||||
|
||||
* *Cross-platform dependencies*. Modern applications often depend on
|
||||
a combination of system libraries and binaries, language-specific
|
||||
packages, framework-specific modules, internal components
|
||||
developed for another project, etc. These dependencies live in
|
||||
different "worlds" and require different tools - these tools
|
||||
typically don't work well with each other, requiring awkward
|
||||
custom integrations.
|
||||
# Transitioning to Moby
|
||||
|
||||
* *Conflicting dependencies*. Different applications may depend on
|
||||
different versions of the same dependency. Packaging tools handle
|
||||
these situations with various degrees of ease - but they all
|
||||
handle them in different and incompatible ways, which again forces
|
||||
the developer to do extra work.
|
||||
Docker is transitioning all of its open source collaborations to the Moby project going forward.
|
||||
During the transition, all open source activity should continue as usual.
|
||||
|
||||
* *Custom dependencies*. A developer may need to prepare a custom
|
||||
version of their application's dependency. Some packaging systems
|
||||
can handle custom versions of a dependency, others can't - and all
|
||||
of them handle it differently.
|
||||
We are proposing the following list of changes:
|
||||
|
||||
- splitting up the engine into more open components
|
||||
- removing the docker UI, SDK etc to keep them in the Docker org
|
||||
- clarifying that the project is not limited to the engine, but to the assembly of all the individual components of the Docker platform
|
||||
- open-source new tools & components which we currently use to assemble the Docker product, but could benefit the community
|
||||
- defining an open, community-centric governance inspired by the Fedora project (a very successful example of balancing the needs of the community with the constraints of the primary corporate sponsor)
|
||||
|
||||
Docker solves the problem of dependency hell by giving developers a simple
|
||||
way to express *all* their application's dependencies in one place, while
|
||||
streamlining the process of assembling them. If this makes you think of
|
||||
[XKCD 927](https://xkcd.com/927/), don't worry. Docker doesn't
|
||||
*replace* your favorite packaging systems. It simply orchestrates
|
||||
their use in a simple and repeatable way. How does it do that? With
|
||||
layers.
|
||||
-----
|
||||
|
||||
Docker defines a build as running a sequence of Unix commands, one
|
||||
after the other, in the same container. Build commands modify the
|
||||
contents of the container (usually by installing new files on the
|
||||
filesystem), the next command modifies it some more, etc. Since each
|
||||
build command inherits the result of the previous commands, the
|
||||
*order* in which the commands are executed expresses *dependencies*.
|
||||
|
||||
Here's a typical Docker build process:
|
||||
|
||||
```bash
|
||||
FROM ubuntu:12.04
|
||||
RUN apt-get update && apt-get install -y python python-pip curl
|
||||
RUN curl -sSL https://github.com/shykes/helloflask/archive/master.tar.gz | tar -xzv
|
||||
RUN cd helloflask-master && pip install -r requirements.txt
|
||||
```
|
||||
|
||||
Note that Docker doesn't care *how* dependencies are built - as long
|
||||
as they can be built by running a Unix command in a container.
|
||||
|
||||
|
||||
Getting started
|
||||
===============
|
||||
|
||||
Docker can be installed either on your computer for building applications or
|
||||
on servers for running them. To get started, [check out the installation
|
||||
instructions in the
|
||||
documentation](https://docs.docker.com/engine/installation/).
|
||||
|
||||
Usage examples
|
||||
==============
|
||||
|
||||
Docker can be used to run short-lived commands, long-running daemons
|
||||
(app servers, databases, etc.), interactive shell sessions, etc.
|
||||
|
||||
You can find a [list of real-world
|
||||
examples](https://docs.docker.com/engine/examples/) in the
|
||||
documentation.
|
||||
|
||||
Under the hood
|
||||
--------------
|
||||
|
||||
Under the hood, Docker is built on the following components:
|
||||
|
||||
* The
|
||||
[cgroups](https://www.kernel.org/doc/Documentation/cgroup-v1/cgroups.txt)
|
||||
and
|
||||
[namespaces](http://man7.org/linux/man-pages/man7/namespaces.7.html)
|
||||
capabilities of the Linux kernel
|
||||
* The [Go](https://golang.org) programming language
|
||||
* The [Docker Image Specification](https://github.com/docker/docker/blob/master/image/spec/v1.md)
|
||||
* The [Libcontainer Specification](https://github.com/opencontainers/runc/blob/master/libcontainer/SPEC.md)
|
||||
|
||||
Contributing to Docker [](https://godoc.org/github.com/docker/docker)
|
||||
======================
|
||||
|
||||
| **Master** (Linux) | **Experimental** (Linux) | **Windows** | **FreeBSD** |
|
||||
|------------------|----------------------|---------|---------|
|
||||
| [](https://jenkins.dockerproject.org/view/Docker/job/Docker%20Master/) | [](https://jenkins.dockerproject.org/view/Docker/job/Docker%20Master%20%28experimental%29/) | [/badge/icon)](http://jenkins.dockerproject.org/job/Docker%20Master%20(windows)/) | [/badge/icon)](http://jenkins.dockerproject.org/job/Docker%20Master%20(freebsd)/) |
|
||||
|
||||
Want to hack on Docker? Awesome! We have [instructions to help you get
|
||||
started contributing code or documentation](https://docs.docker.com/opensource/project/who-written-for/).
|
||||
|
||||
These instructions are probably not perfect, please let us know if anything
|
||||
feels wrong or incomplete. Better yet, submit a PR and improve them yourself.
|
||||
|
||||
Getting the development builds
|
||||
==============================
|
||||
|
||||
Want to run Docker from a master build? You can download
|
||||
master builds at [master.dockerproject.org](https://master.dockerproject.org).
|
||||
They are updated with each commit merged into the master branch.
|
||||
|
||||
Don't know how to use that super cool new feature in the master build? Check
|
||||
out the master docs at
|
||||
[docs.master.dockerproject.org](http://docs.master.dockerproject.org).
|
||||
|
||||
How the project is run
|
||||
======================
|
||||
|
||||
Docker is a very, very active project. If you want to learn more about how it is run,
|
||||
or want to get more involved, the best place to start is [the project directory](https://github.com/docker/docker/tree/master/project).
|
||||
|
||||
We are always open to suggestions on process improvements, and are always looking for more maintainers.
|
||||
|
||||
### Talking to other Docker users and contributors
|
||||
|
||||
<table class="tg">
|
||||
<col width="45%">
|
||||
<col width="65%">
|
||||
<tr>
|
||||
<td>Internet Relay Chat (IRC)</td>
|
||||
<td>
|
||||
<p>
|
||||
IRC is a direct line to our most knowledgeable Docker users; we have
|
||||
both the <code>#docker</code> and <code>#docker-dev</code> group on
|
||||
<strong>irc.freenode.net</strong>.
|
||||
IRC is a rich chat protocol but it can overwhelm new users. You can search
|
||||
<a href="https://botbot.me/freenode/docker/#" target="_blank">our chat archives</a>.
|
||||
</p>
|
||||
Read our <a href="https://docs.docker.com/opensource/get-help/#/irc-quickstart" target="_blank">IRC quickstart guide</a> for an easy way to get started.
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Docker Community Forums</td>
|
||||
<td>
|
||||
The <a href="https://forums.docker.com/c/open-source-projects/de" target="_blank">Docker Engine</a>
|
||||
group is for users of the Docker Engine project.
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Google Groups</td>
|
||||
<td>
|
||||
The <a href="https://groups.google.com/forum/#!forum/docker-dev"
|
||||
target="_blank">docker-dev</a> group is for contributors and other people
|
||||
contributing to the Docker project. You can join this group without a
|
||||
Google account by sending an email to <a
|
||||
href="mailto:docker-dev+subscribe@googlegroups.com">docker-dev+subscribe@googlegroups.com</a>.
|
||||
You'll receive a join-request message; simply reply to the message to
|
||||
confirm your subscription.
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Twitter</td>
|
||||
<td>
|
||||
You can follow <a href="https://twitter.com/docker/" target="_blank">Docker's Twitter feed</a>
|
||||
to get updates on our products. You can also tweet us questions or just
|
||||
share blogs or stories.
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Stack Overflow</td>
|
||||
<td>
|
||||
Stack Overflow has thousands of Docker questions listed. We regularly
|
||||
monitor <a href="https://stackoverflow.com/search?tab=newest&q=docker" target="_blank">Docker questions</a>
|
||||
and so do many other knowledgeable Docker users.
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
### Legal
|
||||
Legal
|
||||
=====
|
||||
|
||||
*Brought to you courtesy of our legal counsel. For more context,
|
||||
please see the [NOTICE](https://github.com/docker/docker/blob/master/NOTICE) document in this repo.*
|
||||
please see the [NOTICE](https://github.com/moby/moby/blob/master/NOTICE) document in this repo.*
|
||||
|
||||
Use and transfer of Docker may be subject to certain restrictions by the
|
||||
Use and transfer of Moby may be subject to certain restrictions by the
|
||||
United States and other governments.
|
||||
|
||||
It is your responsibility to ensure that your use and/or transfer does not
|
||||
@@ -275,30 +85,6 @@ For more information, please see https://www.bis.doc.gov
|
||||
|
||||
Licensing
|
||||
=========
|
||||
Docker is licensed under the Apache License, Version 2.0. See
|
||||
[LICENSE](https://github.com/docker/docker/blob/master/LICENSE) for the full
|
||||
Moby is licensed under the Apache License, Version 2.0. See
|
||||
[LICENSE](https://github.com/moby/moby/blob/master/LICENSE) for the full
|
||||
license text.
|
||||
|
||||
Other Docker Related Projects
|
||||
=============================
|
||||
There are a number of projects under development that are based on Docker's
|
||||
core technology. These projects expand the tooling built around the
|
||||
Docker platform to broaden its application and utility.
|
||||
|
||||
* [Docker Registry](https://github.com/docker/distribution): Registry
|
||||
server for Docker (hosting/delivery of repositories and images)
|
||||
* [Docker Machine](https://github.com/docker/machine): Machine management
|
||||
for a container-centric world
|
||||
* [Docker Swarm](https://github.com/docker/swarm): A Docker-native clustering
|
||||
system
|
||||
* [Docker Compose](https://github.com/docker/compose) (formerly Fig):
|
||||
Define and run multi-container apps
|
||||
* [Kitematic](https://github.com/docker/kitematic): The easiest way to use
|
||||
Docker on Mac and Windows
|
||||
|
||||
If you know of another project underway that should be listed here, please help
|
||||
us keep this list up-to-date by submitting a PR.
|
||||
|
||||
Awesome-Docker
|
||||
==============
|
||||
You can find more projects, tools and articles related to Docker on the [awesome-docker list](https://github.com/veggiemonk/awesome-docker). Add your project there.
|
||||
|
||||
@@ -14,8 +14,8 @@ It consists of various components in this repository:
|
||||
|
||||
The API is defined by the [Swagger](http://swagger.io/specification/) definition in `api/swagger.yaml`. This definition can be used to:
|
||||
|
||||
1. To automatically generate documentation.
|
||||
2. To automatically generate the Go server and client. (A work-in-progress.)
|
||||
1. Automatically generate documentation.
|
||||
2. Automatically generate the Go server and client. (A work-in-progress.)
|
||||
3. Provide a machine readable version of the API for introspecting what it can do, automatically generating clients for other languages, etc.
|
||||
|
||||
## Updating the API documentation
|
||||
|
||||
@@ -21,7 +21,7 @@ import (
|
||||
// Common constants for daemon and client.
|
||||
const (
|
||||
// DefaultVersion of Current REST API
|
||||
DefaultVersion string = "1.29"
|
||||
DefaultVersion string = "1.30"
|
||||
|
||||
// NoBaseImageSpecifier is the symbol used by the FROM
|
||||
// command to specify that no base image is to be used.
|
||||
|
||||
64
api/errors/errors_test.go
Normal file
64
api/errors/errors_test.go
Normal file
@@ -0,0 +1,64 @@
|
||||
package errors
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"net/http"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func newError(errorname string) error {
|
||||
|
||||
return fmt.Errorf("test%v", errorname)
|
||||
}
|
||||
|
||||
func TestErrors(t *testing.T) {
|
||||
errmsg := newError("apiError")
|
||||
err := apiError{
|
||||
error: errmsg,
|
||||
statusCode: 0,
|
||||
}
|
||||
assert.Equal(t, err.HTTPErrorStatusCode(), err.statusCode)
|
||||
|
||||
errmsg = newError("ErrorWithStatusCode")
|
||||
errcode := 1
|
||||
serr := NewErrorWithStatusCode(errmsg, errcode)
|
||||
apierr, ok := serr.(apiError)
|
||||
if !ok {
|
||||
t.Fatal("excepted err is apiError type")
|
||||
}
|
||||
assert.Equal(t, errcode, apierr.statusCode)
|
||||
|
||||
errmsg = newError("NewBadRequestError")
|
||||
baderr := NewBadRequestError(errmsg)
|
||||
apierr, ok = baderr.(apiError)
|
||||
if !ok {
|
||||
t.Fatal("excepted err is apiError type")
|
||||
}
|
||||
assert.Equal(t, http.StatusBadRequest, apierr.statusCode)
|
||||
|
||||
errmsg = newError("RequestForbiddenError")
|
||||
ferr := NewRequestForbiddenError(errmsg)
|
||||
apierr, ok = ferr.(apiError)
|
||||
if !ok {
|
||||
t.Fatal("excepted err is apiError type")
|
||||
}
|
||||
assert.Equal(t, http.StatusForbidden, apierr.statusCode)
|
||||
|
||||
errmsg = newError("RequestNotFoundError")
|
||||
nerr := NewRequestNotFoundError(errmsg)
|
||||
apierr, ok = nerr.(apiError)
|
||||
if !ok {
|
||||
t.Fatal("excepted err is apiError type")
|
||||
}
|
||||
assert.Equal(t, http.StatusNotFound, apierr.statusCode)
|
||||
|
||||
errmsg = newError("RequestConflictError")
|
||||
cerr := NewRequestConflictError(errmsg)
|
||||
apierr, ok = cerr.(apiError)
|
||||
if !ok {
|
||||
t.Fatal("excepted err is apiError type")
|
||||
}
|
||||
assert.Equal(t, http.StatusConflict, apierr.statusCode)
|
||||
|
||||
}
|
||||
70
api/server/backend/build/backend.go
Normal file
70
api/server/backend/build/backend.go
Normal file
@@ -0,0 +1,70 @@
|
||||
package build
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/docker/api/types/backend"
|
||||
"github.com/docker/docker/builder"
|
||||
"github.com/docker/docker/builder/dockerfile"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// ImageComponent provides an interface for working with images
|
||||
type ImageComponent interface {
|
||||
SquashImage(from string, to string) (string, error)
|
||||
TagImageWithReference(image.ID, reference.Named) error
|
||||
}
|
||||
|
||||
// Backend provides build functionality to the API router
|
||||
type Backend struct {
|
||||
manager *dockerfile.BuildManager
|
||||
imageComponent ImageComponent
|
||||
}
|
||||
|
||||
// NewBackend creates a new build backend from components
|
||||
func NewBackend(components ImageComponent, builderBackend builder.Backend) *Backend {
|
||||
manager := dockerfile.NewBuildManager(builderBackend)
|
||||
return &Backend{imageComponent: components, manager: manager}
|
||||
}
|
||||
|
||||
// Build builds an image from a Source
|
||||
func (b *Backend) Build(ctx context.Context, config backend.BuildConfig) (string, error) {
|
||||
options := config.Options
|
||||
tagger, err := NewTagger(b.imageComponent, config.ProgressWriter.StdoutFormatter, options.Tags)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
build, err := b.manager.Build(ctx, config)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
var imageID = build.ImageID
|
||||
if options.Squash {
|
||||
if imageID, err = squashBuild(build, b.imageComponent); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
stdout := config.ProgressWriter.StdoutFormatter
|
||||
fmt.Fprintf(stdout, "Successfully built %s\n", stringid.TruncateID(imageID))
|
||||
err = tagger.TagImages(image.ID(imageID))
|
||||
return imageID, err
|
||||
}
|
||||
|
||||
func squashBuild(build *builder.Result, imageComponent ImageComponent) (string, error) {
|
||||
var fromID string
|
||||
if build.FromImage != nil {
|
||||
fromID = build.FromImage.ImageID()
|
||||
}
|
||||
imageID, err := imageComponent.SquashImage(build.ImageID, fromID)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "error squashing image")
|
||||
}
|
||||
return imageID, nil
|
||||
}
|
||||
77
api/server/backend/build/tag.go
Normal file
77
api/server/backend/build/tag.go
Normal file
@@ -0,0 +1,77 @@
|
||||
package build
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Tagger is responsible for tagging an image created by a builder
|
||||
type Tagger struct {
|
||||
imageComponent ImageComponent
|
||||
stdout io.Writer
|
||||
repoAndTags []reference.Named
|
||||
}
|
||||
|
||||
// NewTagger returns a new Tagger for tagging the images of a build.
|
||||
// If any of the names are invalid tags an error is returned.
|
||||
func NewTagger(backend ImageComponent, stdout io.Writer, names []string) (*Tagger, error) {
|
||||
reposAndTags, err := sanitizeRepoAndTags(names)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Tagger{
|
||||
imageComponent: backend,
|
||||
stdout: stdout,
|
||||
repoAndTags: reposAndTags,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// TagImages creates image tags for the imageID
|
||||
func (bt *Tagger) TagImages(imageID image.ID) error {
|
||||
for _, rt := range bt.repoAndTags {
|
||||
if err := bt.imageComponent.TagImageWithReference(imageID, rt); err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintf(bt.stdout, "Successfully tagged %s\n", reference.FamiliarString(rt))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// sanitizeRepoAndTags parses the raw "t" parameter received from the client
|
||||
// to a slice of repoAndTag.
|
||||
// It also validates each repoName and tag.
|
||||
func sanitizeRepoAndTags(names []string) ([]reference.Named, error) {
|
||||
var (
|
||||
repoAndTags []reference.Named
|
||||
// This map is used for deduplicating the "-t" parameter.
|
||||
uniqNames = make(map[string]struct{})
|
||||
)
|
||||
for _, repo := range names {
|
||||
if repo == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
ref, err := reference.ParseNormalizedNamed(repo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if _, isCanonical := ref.(reference.Canonical); isCanonical {
|
||||
return nil, errors.New("build tag cannot contain a digest")
|
||||
}
|
||||
|
||||
ref = reference.TagNameOnly(ref)
|
||||
|
||||
nameWithTag := ref.String()
|
||||
|
||||
if _, exists := uniqNames[nameWithTag]; !exists {
|
||||
uniqNames[nameWithTag] = struct{}{}
|
||||
repoAndTags = append(repoAndTags, ref)
|
||||
}
|
||||
}
|
||||
return repoAndTags, nil
|
||||
}
|
||||
@@ -3,6 +3,7 @@ package httputils
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
@@ -85,6 +86,7 @@ func (s byKey) Swap(i, j int) {
|
||||
func stringAttrs(a backend.LogAttributes) string {
|
||||
var ss byKey
|
||||
for k, v := range a {
|
||||
k, v := url.QueryEscape(k), url.QueryEscape(v)
|
||||
ss = append(ss, k+"="+v)
|
||||
}
|
||||
sort.Sort(ss)
|
||||
|
||||
@@ -41,7 +41,7 @@ func DebugRequestMiddleware(handler func(ctx context.Context, w http.ResponseWri
|
||||
|
||||
var postForm map[string]interface{}
|
||||
if err := json.Unmarshal(b, &postForm); err == nil {
|
||||
maskSecretKeys(postForm)
|
||||
maskSecretKeys(postForm, r.RequestURI)
|
||||
formStr, errMarshal := json.Marshal(postForm)
|
||||
if errMarshal == nil {
|
||||
logrus.Debugf("form data: %s", string(formStr))
|
||||
@@ -54,23 +54,41 @@ func DebugRequestMiddleware(handler func(ctx context.Context, w http.ResponseWri
|
||||
}
|
||||
}
|
||||
|
||||
func maskSecretKeys(inp interface{}) {
|
||||
func maskSecretKeys(inp interface{}, path string) {
|
||||
// Remove any query string from the path
|
||||
idx := strings.Index(path, "?")
|
||||
if idx != -1 {
|
||||
path = path[:idx]
|
||||
}
|
||||
// Remove trailing / characters
|
||||
path = strings.TrimRight(path, "/")
|
||||
|
||||
if arr, ok := inp.([]interface{}); ok {
|
||||
for _, f := range arr {
|
||||
maskSecretKeys(f)
|
||||
maskSecretKeys(f, path)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if form, ok := inp.(map[string]interface{}); ok {
|
||||
loop0:
|
||||
for k, v := range form {
|
||||
for _, m := range []string{"password", "secret", "jointoken", "unlockkey"} {
|
||||
for _, m := range []string{"password", "secret", "jointoken", "unlockkey", "signingcakey"} {
|
||||
if strings.EqualFold(m, k) {
|
||||
form[k] = "*****"
|
||||
continue loop0
|
||||
}
|
||||
}
|
||||
maskSecretKeys(v)
|
||||
maskSecretKeys(v, path)
|
||||
}
|
||||
|
||||
// Route-specific redactions
|
||||
if strings.HasSuffix(path, "/secrets/create") {
|
||||
for k := range form {
|
||||
if k == "Data" {
|
||||
form[k] = "*****"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
58
api/server/middleware/debug_test.go
Normal file
58
api/server/middleware/debug_test.go
Normal file
@@ -0,0 +1,58 @@
|
||||
package middleware
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestMaskSecretKeys(t *testing.T) {
|
||||
tests := []struct {
|
||||
path string
|
||||
input map[string]interface{}
|
||||
expected map[string]interface{}
|
||||
}{
|
||||
{
|
||||
path: "/v1.30/secrets/create",
|
||||
input: map[string]interface{}{"Data": "foo", "Name": "name", "Labels": map[string]interface{}{}},
|
||||
expected: map[string]interface{}{"Data": "*****", "Name": "name", "Labels": map[string]interface{}{}},
|
||||
},
|
||||
{
|
||||
path: "/v1.30/secrets/create//",
|
||||
input: map[string]interface{}{"Data": "foo", "Name": "name", "Labels": map[string]interface{}{}},
|
||||
expected: map[string]interface{}{"Data": "*****", "Name": "name", "Labels": map[string]interface{}{}},
|
||||
},
|
||||
|
||||
{
|
||||
path: "/secrets/create?key=val",
|
||||
input: map[string]interface{}{"Data": "foo", "Name": "name", "Labels": map[string]interface{}{}},
|
||||
expected: map[string]interface{}{"Data": "*****", "Name": "name", "Labels": map[string]interface{}{}},
|
||||
},
|
||||
{
|
||||
path: "/v1.30/some/other/path",
|
||||
input: map[string]interface{}{
|
||||
"password": "pass",
|
||||
"other": map[string]interface{}{
|
||||
"secret": "secret",
|
||||
"jointoken": "jointoken",
|
||||
"unlockkey": "unlockkey",
|
||||
"signingcakey": "signingcakey",
|
||||
},
|
||||
},
|
||||
expected: map[string]interface{}{
|
||||
"password": "*****",
|
||||
"other": map[string]interface{}{
|
||||
"secret": "*****",
|
||||
"jointoken": "*****",
|
||||
"unlockkey": "*****",
|
||||
"signingcakey": "*****",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, testcase := range tests {
|
||||
maskSecretKeys(testcase.input, testcase.path)
|
||||
assert.Equal(t, testcase.expected, testcase.input)
|
||||
}
|
||||
}
|
||||
@@ -1,20 +1,17 @@
|
||||
package build
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/backend"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// Backend abstracts an image builder whose only purpose is to build an image referenced by an imageID.
|
||||
type Backend interface {
|
||||
// BuildFromContext builds a Docker image referenced by an imageID string.
|
||||
//
|
||||
// Note: Tagging an image should not be done by a Builder, it should instead be done
|
||||
// by the caller.
|
||||
//
|
||||
// Build a Docker image returning the id of the image
|
||||
// TODO: make this return a reference instead of string
|
||||
BuildFromContext(ctx context.Context, src io.ReadCloser, remote string, buildOptions *types.ImageBuildOptions, pg backend.ProgressWriter) (string, error)
|
||||
Build(context.Context, backend.BuildConfig) (string, error)
|
||||
}
|
||||
|
||||
type experimentalProvider interface {
|
||||
HasExperimental() bool
|
||||
}
|
||||
|
||||
@@ -5,14 +5,13 @@ import "github.com/docker/docker/api/server/router"
|
||||
// buildRouter is a router to talk with the build controller
|
||||
type buildRouter struct {
|
||||
backend Backend
|
||||
daemon experimentalProvider
|
||||
routes []router.Route
|
||||
}
|
||||
|
||||
// NewRouter initializes a new build router
|
||||
func NewRouter(b Backend) router.Router {
|
||||
r := &buildRouter{
|
||||
backend: b,
|
||||
}
|
||||
func NewRouter(b Backend, d experimentalProvider) router.Router {
|
||||
r := &buildRouter{backend: b, daemon: d}
|
||||
r.initRoutes()
|
||||
return r
|
||||
}
|
||||
@@ -24,6 +23,6 @@ func (r *buildRouter) Routes() []router.Route {
|
||||
|
||||
func (r *buildRouter) initRoutes() {
|
||||
r.routes = []router.Route{
|
||||
router.Cancellable(router.NewPostRoute("/build", r.postBuild)),
|
||||
router.NewPostRoute("/build", r.postBuild, router.WithCancel),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"sync"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
apierrors "github.com/docker/docker/api/errors"
|
||||
"github.com/docker/docker/api/server/httputils"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/backend"
|
||||
@@ -21,7 +22,8 @@ import (
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/docker/docker/pkg/progress"
|
||||
"github.com/docker/docker/pkg/streamformatter"
|
||||
"github.com/docker/go-units"
|
||||
units "github.com/docker/go-units"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
@@ -57,6 +59,7 @@ func newImageBuildOptions(ctx context.Context, r *http.Request) (*types.ImageBui
|
||||
options.SecurityOpt = r.Form["securityopt"]
|
||||
options.Squash = httputils.BoolValue(r, "squash")
|
||||
options.Target = r.FormValue("target")
|
||||
options.RemoteContext = r.FormValue("remote")
|
||||
|
||||
if r.Form.Get("shmsize") != "" {
|
||||
shmSize, err := strconv.ParseInt(r.Form.Get("shmsize"), 10, 64)
|
||||
@@ -86,9 +89,6 @@ func newImageBuildOptions(ctx context.Context, r *http.Request) (*types.ImageBui
|
||||
options.Ulimits = buildUlimits
|
||||
}
|
||||
|
||||
var buildArgs = map[string]*string{}
|
||||
buildArgsJSON := r.FormValue("buildargs")
|
||||
|
||||
// Note that there are two ways a --build-arg might appear in the
|
||||
// json of the query param:
|
||||
// "foo":"bar"
|
||||
@@ -101,25 +101,27 @@ func newImageBuildOptions(ctx context.Context, r *http.Request) (*types.ImageBui
|
||||
// the fact they mentioned it, we need to pass that along to the builder
|
||||
// so that it can print a warning about "foo" being unused if there is
|
||||
// no "ARG foo" in the Dockerfile.
|
||||
buildArgsJSON := r.FormValue("buildargs")
|
||||
if buildArgsJSON != "" {
|
||||
var buildArgs = map[string]*string{}
|
||||
if err := json.Unmarshal([]byte(buildArgsJSON), &buildArgs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
options.BuildArgs = buildArgs
|
||||
}
|
||||
|
||||
var labels = map[string]string{}
|
||||
labelsJSON := r.FormValue("labels")
|
||||
if labelsJSON != "" {
|
||||
var labels = map[string]string{}
|
||||
if err := json.Unmarshal([]byte(labelsJSON), &labels); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
options.Labels = labels
|
||||
}
|
||||
|
||||
var cacheFrom = []string{}
|
||||
cacheFromJSON := r.FormValue("cachefrom")
|
||||
if cacheFromJSON != "" {
|
||||
var cacheFrom = []string{}
|
||||
if err := json.Unmarshal([]byte(cacheFromJSON), &cacheFrom); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -129,6 +131,89 @@ func newImageBuildOptions(ctx context.Context, r *http.Request) (*types.ImageBui
|
||||
return options, nil
|
||||
}
|
||||
|
||||
func (br *buildRouter) postBuild(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
var (
|
||||
notVerboseBuffer = bytes.NewBuffer(nil)
|
||||
version = httputils.VersionFromContext(ctx)
|
||||
)
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
output := ioutils.NewWriteFlusher(w)
|
||||
defer output.Close()
|
||||
errf := func(err error) error {
|
||||
if httputils.BoolValue(r, "q") && notVerboseBuffer.Len() > 0 {
|
||||
output.Write(notVerboseBuffer.Bytes())
|
||||
}
|
||||
// Do not write the error in the http output if it's still empty.
|
||||
// This prevents from writing a 200(OK) when there is an internal error.
|
||||
if !output.Flushed() {
|
||||
return err
|
||||
}
|
||||
_, err = w.Write(streamformatter.FormatError(err))
|
||||
if err != nil {
|
||||
logrus.Warnf("could not write error response: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
buildOptions, err := newImageBuildOptions(ctx, r)
|
||||
if err != nil {
|
||||
return errf(err)
|
||||
}
|
||||
buildOptions.AuthConfigs = getAuthConfigs(r.Header)
|
||||
|
||||
if buildOptions.Squash && !br.daemon.HasExperimental() {
|
||||
return apierrors.NewBadRequestError(
|
||||
errors.New("squash is only supported with experimental mode"))
|
||||
}
|
||||
|
||||
out := io.Writer(output)
|
||||
if buildOptions.SuppressOutput {
|
||||
out = notVerboseBuffer
|
||||
}
|
||||
|
||||
// Currently, only used if context is from a remote url.
|
||||
// Look at code in DetectContextFromRemoteURL for more information.
|
||||
createProgressReader := func(in io.ReadCloser) io.ReadCloser {
|
||||
progressOutput := streamformatter.NewJSONProgressOutput(out, true)
|
||||
return progress.NewProgressReader(in, progressOutput, r.ContentLength, "Downloading context", buildOptions.RemoteContext)
|
||||
}
|
||||
|
||||
wantAux := versions.GreaterThanOrEqualTo(version, "1.30")
|
||||
|
||||
imgID, err := br.backend.Build(ctx, backend.BuildConfig{
|
||||
Source: r.Body,
|
||||
Options: buildOptions,
|
||||
ProgressWriter: buildProgressWriter(out, wantAux, createProgressReader),
|
||||
})
|
||||
if err != nil {
|
||||
return errf(err)
|
||||
}
|
||||
|
||||
// Everything worked so if -q was provided the output from the daemon
|
||||
// should be just the image ID and we'll print that to stdout.
|
||||
if buildOptions.SuppressOutput {
|
||||
fmt.Fprintln(streamformatter.NewStdoutWriter(output), imgID)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getAuthConfigs(header http.Header) map[string]types.AuthConfig {
|
||||
authConfigs := map[string]types.AuthConfig{}
|
||||
authConfigsEncoded := header.Get("X-Registry-Config")
|
||||
|
||||
if authConfigsEncoded == "" {
|
||||
return authConfigs
|
||||
}
|
||||
|
||||
authConfigsJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authConfigsEncoded))
|
||||
// Pulling an image does not error when no auth is provided so to remain
|
||||
// consistent with the existing api decode errors are ignored
|
||||
json.NewDecoder(authConfigsJSON).Decode(&authConfigs)
|
||||
return authConfigs
|
||||
}
|
||||
|
||||
type syncWriter struct {
|
||||
w io.Writer
|
||||
mu sync.Mutex
|
||||
@@ -141,87 +226,19 @@ func (s *syncWriter) Write(b []byte) (count int, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (br *buildRouter) postBuild(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
var (
|
||||
authConfigs = map[string]types.AuthConfig{}
|
||||
authConfigsEncoded = r.Header.Get("X-Registry-Config")
|
||||
notVerboseBuffer = bytes.NewBuffer(nil)
|
||||
)
|
||||
|
||||
if authConfigsEncoded != "" {
|
||||
authConfigsJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authConfigsEncoded))
|
||||
if err := json.NewDecoder(authConfigsJSON).Decode(&authConfigs); err != nil {
|
||||
// for a pull it is not an error if no auth was given
|
||||
// to increase compatibility with the existing api it is defaulting
|
||||
// to be empty.
|
||||
}
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
output := ioutils.NewWriteFlusher(w)
|
||||
defer output.Close()
|
||||
sf := streamformatter.NewJSONStreamFormatter()
|
||||
errf := func(err error) error {
|
||||
if httputils.BoolValue(r, "q") && notVerboseBuffer.Len() > 0 {
|
||||
output.Write(notVerboseBuffer.Bytes())
|
||||
}
|
||||
// Do not write the error in the http output if it's still empty.
|
||||
// This prevents from writing a 200(OK) when there is an internal error.
|
||||
if !output.Flushed() {
|
||||
return err
|
||||
}
|
||||
_, err = w.Write(sf.FormatError(err))
|
||||
if err != nil {
|
||||
logrus.Warnf("could not write error response: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
buildOptions, err := newImageBuildOptions(ctx, r)
|
||||
if err != nil {
|
||||
return errf(err)
|
||||
}
|
||||
buildOptions.AuthConfigs = authConfigs
|
||||
|
||||
remoteURL := r.FormValue("remote")
|
||||
|
||||
// Currently, only used if context is from a remote url.
|
||||
// Look at code in DetectContextFromRemoteURL for more information.
|
||||
createProgressReader := func(in io.ReadCloser) io.ReadCloser {
|
||||
progressOutput := sf.NewProgressOutput(output, true)
|
||||
if buildOptions.SuppressOutput {
|
||||
progressOutput = sf.NewProgressOutput(notVerboseBuffer, true)
|
||||
}
|
||||
return progress.NewProgressReader(in, progressOutput, r.ContentLength, "Downloading context", remoteURL)
|
||||
}
|
||||
|
||||
out := io.Writer(output)
|
||||
if buildOptions.SuppressOutput {
|
||||
out = notVerboseBuffer
|
||||
}
|
||||
func buildProgressWriter(out io.Writer, wantAux bool, createProgressReader func(io.ReadCloser) io.ReadCloser) backend.ProgressWriter {
|
||||
out = &syncWriter{w: out}
|
||||
stdout := &streamformatter.StdoutFormatter{Writer: out, StreamFormatter: sf}
|
||||
stderr := &streamformatter.StderrFormatter{Writer: out, StreamFormatter: sf}
|
||||
|
||||
pg := backend.ProgressWriter{
|
||||
var aux *streamformatter.AuxFormatter
|
||||
if wantAux {
|
||||
aux = &streamformatter.AuxFormatter{Writer: out}
|
||||
}
|
||||
|
||||
return backend.ProgressWriter{
|
||||
Output: out,
|
||||
StdoutFormatter: stdout,
|
||||
StderrFormatter: stderr,
|
||||
StdoutFormatter: streamformatter.NewStdoutWriter(out),
|
||||
StderrFormatter: streamformatter.NewStderrWriter(out),
|
||||
AuxFormatter: aux,
|
||||
ProgressReaderFunc: createProgressReader,
|
||||
}
|
||||
|
||||
imgID, err := br.backend.BuildFromContext(ctx, r.Body, remoteURL, buildOptions, pg)
|
||||
if err != nil {
|
||||
return errf(err)
|
||||
}
|
||||
|
||||
// Everything worked so if -q was provided the output from the daemon
|
||||
// should be just the image ID and we'll print that to stdout.
|
||||
if buildOptions.SuppressOutput {
|
||||
stdout := &streamformatter.StdoutFormatter{Writer: output, StreamFormatter: sf}
|
||||
fmt.Fprintf(stdout, "%s\n", string(imgID))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -29,8 +29,8 @@ func (r *checkpointRouter) Routes() []router.Route {
|
||||
|
||||
func (r *checkpointRouter) initRoutes() {
|
||||
r.routes = []router.Route{
|
||||
router.Experimental(router.NewGetRoute("/containers/{name:.*}/checkpoints", r.getContainerCheckpoints)),
|
||||
router.Experimental(router.NewPostRoute("/containers/{name:.*}/checkpoints", r.postContainerCheckpoint)),
|
||||
router.Experimental(router.NewDeleteRoute("/containers/{name}/checkpoints/{checkpoint}", r.deleteContainerCheckpoint)),
|
||||
router.NewGetRoute("/containers/{name:.*}/checkpoints", r.getContainerCheckpoints, router.Experimental),
|
||||
router.NewPostRoute("/containers/{name:.*}/checkpoints", r.postContainerCheckpoint, router.Experimental),
|
||||
router.NewDeleteRoute("/containers/{name}/checkpoints/{checkpoint}", r.deleteContainerCheckpoint, router.Experimental),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ package container
|
||||
|
||||
import (
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
@@ -10,6 +9,7 @@ import (
|
||||
"github.com/docker/docker/api/types/backend"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
containerpkg "github.com/docker/docker/container"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
)
|
||||
|
||||
@@ -27,7 +27,7 @@ type copyBackend interface {
|
||||
ContainerArchivePath(name string, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error)
|
||||
ContainerCopy(name string, res string) (io.ReadCloser, error)
|
||||
ContainerExport(name string, out io.Writer) error
|
||||
ContainerExtractToDir(name, path string, noOverwriteDirNonDir bool, content io.Reader) error
|
||||
ContainerExtractToDir(name, path string, copyUIDGID, noOverwriteDirNonDir bool, content io.Reader) error
|
||||
ContainerStatPath(name string, path string) (stat *types.ContainerPathStat, err error)
|
||||
}
|
||||
|
||||
@@ -44,7 +44,7 @@ type stateBackend interface {
|
||||
ContainerStop(name string, seconds *int) error
|
||||
ContainerUnpause(name string) error
|
||||
ContainerUpdate(name string, hostConfig *container.HostConfig) (container.ContainerUpdateOKBody, error)
|
||||
ContainerWait(name string, timeout time.Duration) (int, error)
|
||||
ContainerWait(ctx context.Context, name string, condition containerpkg.WaitCondition) (<-chan containerpkg.StateStatus, error)
|
||||
}
|
||||
|
||||
// monitorBackend includes functions to implement to provide containers monitoring functionality.
|
||||
@@ -65,7 +65,7 @@ type attachBackend interface {
|
||||
|
||||
// systemBackend includes functions to implement to provide system wide containers functionality
|
||||
type systemBackend interface {
|
||||
ContainersPrune(pruneFilters filters.Args) (*types.ContainersPruneReport, error)
|
||||
ContainersPrune(ctx context.Context, pruneFilters filters.Args) (*types.ContainersPruneReport, error)
|
||||
}
|
||||
|
||||
// Backend is all the methods that need to be implemented to provide container specific functionality.
|
||||
|
||||
@@ -46,8 +46,8 @@ func (r *containerRouter) initRoutes() {
|
||||
router.NewGetRoute("/containers/{name:.*}/changes", r.getContainersChanges),
|
||||
router.NewGetRoute("/containers/{name:.*}/json", r.getContainersByName),
|
||||
router.NewGetRoute("/containers/{name:.*}/top", r.getContainersTop),
|
||||
router.Cancellable(router.NewGetRoute("/containers/{name:.*}/logs", r.getContainersLogs)),
|
||||
router.Cancellable(router.NewGetRoute("/containers/{name:.*}/stats", r.getContainersStats)),
|
||||
router.NewGetRoute("/containers/{name:.*}/logs", r.getContainersLogs, router.WithCancel),
|
||||
router.NewGetRoute("/containers/{name:.*}/stats", r.getContainersStats, router.WithCancel),
|
||||
router.NewGetRoute("/containers/{name:.*}/attach/ws", r.wsContainersAttach),
|
||||
router.NewGetRoute("/exec/{id:.*}/json", r.getExecByID),
|
||||
router.NewGetRoute("/containers/{name:.*}/archive", r.getContainersArchive),
|
||||
@@ -59,7 +59,7 @@ func (r *containerRouter) initRoutes() {
|
||||
router.NewPostRoute("/containers/{name:.*}/restart", r.postContainersRestart),
|
||||
router.NewPostRoute("/containers/{name:.*}/start", r.postContainersStart),
|
||||
router.NewPostRoute("/containers/{name:.*}/stop", r.postContainersStop),
|
||||
router.NewPostRoute("/containers/{name:.*}/wait", r.postContainersWait),
|
||||
router.NewPostRoute("/containers/{name:.*}/wait", r.postContainersWait, router.WithCancel),
|
||||
router.NewPostRoute("/containers/{name:.*}/resize", r.postContainersResize),
|
||||
router.NewPostRoute("/containers/{name:.*}/attach", r.postContainersAttach),
|
||||
router.NewPostRoute("/containers/{name:.*}/copy", r.postContainersCopy), // Deprecated since 1.8, Errors out since 1.12
|
||||
@@ -68,7 +68,7 @@ func (r *containerRouter) initRoutes() {
|
||||
router.NewPostRoute("/exec/{name:.*}/resize", r.postContainerExecResize),
|
||||
router.NewPostRoute("/containers/{name:.*}/rename", r.postContainerRename),
|
||||
router.NewPostRoute("/containers/{name:.*}/update", r.postContainerUpdate),
|
||||
router.NewPostRoute("/containers/prune", r.postContainersPrune),
|
||||
router.NewPostRoute("/containers/prune", r.postContainersPrune, router.WithCancel),
|
||||
// PUT
|
||||
router.NewPutRoute("/containers/{name:.*}/archive", r.putContainersArchive),
|
||||
// DELETE
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"net/http"
|
||||
"strconv"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/docker/api"
|
||||
@@ -17,6 +16,7 @@ import (
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
containerpkg "github.com/docker/docker/container"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/docker/docker/pkg/signal"
|
||||
"golang.org/x/net/context"
|
||||
@@ -284,13 +284,48 @@ func (s *containerRouter) postContainersUnpause(ctx context.Context, w http.Resp
|
||||
}
|
||||
|
||||
func (s *containerRouter) postContainersWait(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
status, err := s.backend.ContainerWait(vars["name"], -1*time.Second)
|
||||
// Behavior changed in version 1.30 to handle wait condition and to
|
||||
// return headers immediately.
|
||||
version := httputils.VersionFromContext(ctx)
|
||||
legacyBehavior := versions.LessThan(version, "1.30")
|
||||
|
||||
// The wait condition defaults to "not-running".
|
||||
waitCondition := containerpkg.WaitConditionNotRunning
|
||||
if !legacyBehavior {
|
||||
if err := httputils.ParseForm(r); err != nil {
|
||||
return err
|
||||
}
|
||||
switch container.WaitCondition(r.Form.Get("condition")) {
|
||||
case container.WaitConditionNextExit:
|
||||
waitCondition = containerpkg.WaitConditionNextExit
|
||||
case container.WaitConditionRemoved:
|
||||
waitCondition = containerpkg.WaitConditionRemoved
|
||||
}
|
||||
}
|
||||
|
||||
// Note: the context should get canceled if the client closes the
|
||||
// connection since this handler has been wrapped by the
|
||||
// router.WithCancel() wrapper.
|
||||
waitC, err := s.backend.ContainerWait(ctx, vars["name"], waitCondition)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return httputils.WriteJSON(w, http.StatusOK, &container.ContainerWaitOKBody{
|
||||
StatusCode: int64(status),
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
if !legacyBehavior {
|
||||
// Write response header immediately.
|
||||
w.WriteHeader(http.StatusOK)
|
||||
if flusher, ok := w.(http.Flusher); ok {
|
||||
flusher.Flush()
|
||||
}
|
||||
}
|
||||
|
||||
// Block on the result of the wait operation.
|
||||
status := <-waitC
|
||||
|
||||
return json.NewEncoder(w).Encode(&container.ContainerWaitOKBody{
|
||||
StatusCode: int64(status.ExitCode()),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -565,7 +600,7 @@ func (s *containerRouter) postContainersPrune(ctx context.Context, w http.Respon
|
||||
return err
|
||||
}
|
||||
|
||||
pruneReport, err := s.backend.ContainersPrune(pruneFilters)
|
||||
pruneReport, err := s.backend.ContainersPrune(ctx, pruneFilters)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -112,5 +112,7 @@ func (s *containerRouter) putContainersArchive(ctx context.Context, w http.Respo
|
||||
}
|
||||
|
||||
noOverwriteDirNonDir := httputils.BoolValue(r, "noOverwriteDirNonDir")
|
||||
return s.backend.ContainerExtractToDir(v.Name, v.Path, noOverwriteDirNonDir, r.Body)
|
||||
copyUIDGID := httputils.BoolValue(r, "copyUIDGID")
|
||||
|
||||
return s.backend.ContainerExtractToDir(v.Name, v.Path, copyUIDGID, noOverwriteDirNonDir, r.Body)
|
||||
}
|
||||
|
||||
14
api/server/router/distribution/backend.go
Normal file
14
api/server/router/distribution/backend.go
Normal file
@@ -0,0 +1,14 @@
|
||||
package distribution
|
||||
|
||||
import (
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/docker/api/types"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// Backend is all the methods that need to be implemented
|
||||
// to provide image specific functionality.
|
||||
type Backend interface {
|
||||
GetRepository(context.Context, reference.Named, *types.AuthConfig) (distribution.Repository, bool, error)
|
||||
}
|
||||
31
api/server/router/distribution/distribution.go
Normal file
31
api/server/router/distribution/distribution.go
Normal file
@@ -0,0 +1,31 @@
|
||||
package distribution
|
||||
|
||||
import "github.com/docker/docker/api/server/router"
|
||||
|
||||
// distributionRouter is a router to talk with the registry
|
||||
type distributionRouter struct {
|
||||
backend Backend
|
||||
routes []router.Route
|
||||
}
|
||||
|
||||
// NewRouter initializes a new distribution router
|
||||
func NewRouter(backend Backend) router.Router {
|
||||
r := &distributionRouter{
|
||||
backend: backend,
|
||||
}
|
||||
r.initRoutes()
|
||||
return r
|
||||
}
|
||||
|
||||
// Routes returns the available routes
|
||||
func (r *distributionRouter) Routes() []router.Route {
|
||||
return r.routes
|
||||
}
|
||||
|
||||
// initRoutes initializes the routes in the distribution router
|
||||
func (r *distributionRouter) initRoutes() {
|
||||
r.routes = []router.Route{
|
||||
// GET
|
||||
router.NewGetRoute("/distribution/{name:.*}/json", r.getDistributionInfo),
|
||||
}
|
||||
}
|
||||
138
api/server/router/distribution/distribution_routes.go
Normal file
138
api/server/router/distribution/distribution_routes.go
Normal file
@@ -0,0 +1,138 @@
|
||||
package distribution
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/distribution/manifest/manifestlist"
|
||||
"github.com/docker/distribution/manifest/schema1"
|
||||
"github.com/docker/distribution/manifest/schema2"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/docker/api/server/httputils"
|
||||
"github.com/docker/docker/api/types"
|
||||
registrytypes "github.com/docker/docker/api/types/registry"
|
||||
"github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func (s *distributionRouter) getDistributionInfo(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
if err := httputils.ParseForm(r); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
var (
|
||||
config = &types.AuthConfig{}
|
||||
authEncoded = r.Header.Get("X-Registry-Auth")
|
||||
distributionInspect registrytypes.DistributionInspect
|
||||
)
|
||||
|
||||
if authEncoded != "" {
|
||||
authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded))
|
||||
if err := json.NewDecoder(authJSON).Decode(&config); err != nil {
|
||||
// for a search it is not an error if no auth was given
|
||||
// to increase compatibility with the existing api it is defaulting to be empty
|
||||
config = &types.AuthConfig{}
|
||||
}
|
||||
}
|
||||
|
||||
image := vars["name"]
|
||||
|
||||
ref, err := reference.ParseAnyReference(image)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
namedRef, ok := ref.(reference.Named)
|
||||
if !ok {
|
||||
if _, ok := ref.(reference.Digested); ok {
|
||||
// full image ID
|
||||
return errors.Errorf("no manifest found for full image ID")
|
||||
}
|
||||
return errors.Errorf("unknown image reference format: %s", image)
|
||||
}
|
||||
|
||||
distrepo, _, err := s.backend.GetRepository(ctx, namedRef, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
blobsrvc := distrepo.Blobs(ctx)
|
||||
|
||||
if canonicalRef, ok := namedRef.(reference.Canonical); !ok {
|
||||
namedRef = reference.TagNameOnly(namedRef)
|
||||
|
||||
taggedRef, ok := namedRef.(reference.NamedTagged)
|
||||
if !ok {
|
||||
return errors.Errorf("image reference not tagged: %s", image)
|
||||
}
|
||||
|
||||
descriptor, err := distrepo.Tags(ctx).Get(ctx, taggedRef.Tag())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
distributionInspect.Descriptor = v1.Descriptor{
|
||||
MediaType: descriptor.MediaType,
|
||||
Digest: descriptor.Digest,
|
||||
Size: descriptor.Size,
|
||||
}
|
||||
} else {
|
||||
// TODO(nishanttotla): Once manifests can be looked up as a blob, the
|
||||
// descriptor should be set using blobsrvc.Stat(ctx, canonicalRef.Digest())
|
||||
// instead of having to manually fill in the fields
|
||||
distributionInspect.Descriptor.Digest = canonicalRef.Digest()
|
||||
}
|
||||
|
||||
// we have a digest, so we can retrieve the manifest
|
||||
mnfstsrvc, err := distrepo.Manifests(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mnfst, err := mnfstsrvc.Get(ctx, distributionInspect.Descriptor.Digest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
mediaType, payload, err := mnfst.Payload()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// update MediaType because registry might return something incorrect
|
||||
distributionInspect.Descriptor.MediaType = mediaType
|
||||
if distributionInspect.Descriptor.Size == 0 {
|
||||
distributionInspect.Descriptor.Size = int64(len(payload))
|
||||
}
|
||||
|
||||
// retrieve platform information depending on the type of manifest
|
||||
switch mnfstObj := mnfst.(type) {
|
||||
case *manifestlist.DeserializedManifestList:
|
||||
for _, m := range mnfstObj.Manifests {
|
||||
distributionInspect.Platforms = append(distributionInspect.Platforms, v1.Platform{
|
||||
Architecture: m.Platform.Architecture,
|
||||
OS: m.Platform.OS,
|
||||
OSVersion: m.Platform.OSVersion,
|
||||
OSFeatures: m.Platform.OSFeatures,
|
||||
Variant: m.Platform.Variant,
|
||||
})
|
||||
}
|
||||
case *schema2.DeserializedManifest:
|
||||
configJSON, err := blobsrvc.Get(ctx, mnfstObj.Config.Digest)
|
||||
var platform v1.Platform
|
||||
if err == nil {
|
||||
err := json.Unmarshal(configJSON, &platform)
|
||||
if err == nil && (platform.OS != "" || platform.Architecture != "") {
|
||||
distributionInspect.Platforms = append(distributionInspect.Platforms, platform)
|
||||
}
|
||||
}
|
||||
case *schema1.SignedManifest:
|
||||
platform := v1.Platform{
|
||||
Architecture: mnfstObj.Architecture,
|
||||
OS: "linux",
|
||||
}
|
||||
distributionInspect.Platforms = append(distributionInspect.Platforms, platform)
|
||||
}
|
||||
|
||||
return httputils.WriteJSON(w, http.StatusOK, distributionInspect)
|
||||
}
|
||||
@@ -30,7 +30,7 @@ type imageBackend interface {
|
||||
Images(imageFilters filters.Args, all bool, withExtraAttrs bool) ([]*types.ImageSummary, error)
|
||||
LookupImage(name string) (*types.ImageInspect, error)
|
||||
TagImage(imageName, repository, tag string) error
|
||||
ImagesPrune(pruneFilters filters.Args) (*types.ImagesPruneReport, error)
|
||||
ImagesPrune(ctx context.Context, pruneFilters filters.Args) (*types.ImagesPruneReport, error)
|
||||
}
|
||||
|
||||
type importExportBackend interface {
|
||||
|
||||
@@ -40,10 +40,10 @@ func (r *imageRouter) initRoutes() {
|
||||
// POST
|
||||
router.NewPostRoute("/commit", r.postCommit),
|
||||
router.NewPostRoute("/images/load", r.postImagesLoad),
|
||||
router.Cancellable(router.NewPostRoute("/images/create", r.postImagesCreate)),
|
||||
router.Cancellable(router.NewPostRoute("/images/{name:.*}/push", r.postImagesPush)),
|
||||
router.NewPostRoute("/images/create", r.postImagesCreate, router.WithCancel),
|
||||
router.NewPostRoute("/images/{name:.*}/push", r.postImagesPush, router.WithCancel),
|
||||
router.NewPostRoute("/images/{name:.*}/tag", r.postImagesTag),
|
||||
router.NewPostRoute("/images/prune", r.postImagesPrune),
|
||||
router.NewPostRoute("/images/prune", r.postImagesPrune, router.WithCancel),
|
||||
// DELETE
|
||||
router.NewDeleteRoute("/images/{name:.*}", r.deleteImages),
|
||||
}
|
||||
|
||||
@@ -118,8 +118,7 @@ func (s *imageRouter) postImagesCreate(ctx context.Context, w http.ResponseWrite
|
||||
if !output.Flushed() {
|
||||
return err
|
||||
}
|
||||
sf := streamformatter.NewJSONStreamFormatter()
|
||||
output.Write(sf.FormatError(err))
|
||||
output.Write(streamformatter.FormatError(err))
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -164,8 +163,7 @@ func (s *imageRouter) postImagesPush(ctx context.Context, w http.ResponseWriter,
|
||||
if !output.Flushed() {
|
||||
return err
|
||||
}
|
||||
sf := streamformatter.NewJSONStreamFormatter()
|
||||
output.Write(sf.FormatError(err))
|
||||
output.Write(streamformatter.FormatError(err))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -190,8 +188,7 @@ func (s *imageRouter) getImagesGet(ctx context.Context, w http.ResponseWriter, r
|
||||
if !output.Flushed() {
|
||||
return err
|
||||
}
|
||||
sf := streamformatter.NewJSONStreamFormatter()
|
||||
output.Write(sf.FormatError(err))
|
||||
output.Write(streamformatter.FormatError(err))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -207,7 +204,7 @@ func (s *imageRouter) postImagesLoad(ctx context.Context, w http.ResponseWriter,
|
||||
output := ioutils.NewWriteFlusher(w)
|
||||
defer output.Close()
|
||||
if err := s.backend.LoadImage(r.Body, output, quiet); err != nil {
|
||||
output.Write(streamformatter.NewJSONStreamFormatter().FormatError(err))
|
||||
output.Write(streamformatter.FormatError(err))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -336,7 +333,7 @@ func (s *imageRouter) postImagesPrune(ctx context.Context, w http.ResponseWriter
|
||||
return err
|
||||
}
|
||||
|
||||
pruneReport, err := s.backend.ImagesPrune(pruneFilters)
|
||||
pruneReport, err := s.backend.ImagesPrune(ctx, pruneFilters)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -7,6 +7,10 @@ import (
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// RouteWrapper wraps a route with extra functionality.
|
||||
// It is passed in when creating a new route.
|
||||
type RouteWrapper func(r Route) Route
|
||||
|
||||
// localRoute defines an individual API route to connect
|
||||
// with the docker daemon. It implements Route.
|
||||
type localRoute struct {
|
||||
@@ -31,38 +35,42 @@ func (l localRoute) Path() string {
|
||||
}
|
||||
|
||||
// NewRoute initializes a new local route for the router.
|
||||
func NewRoute(method, path string, handler httputils.APIFunc) Route {
|
||||
return localRoute{method, path, handler}
|
||||
func NewRoute(method, path string, handler httputils.APIFunc, opts ...RouteWrapper) Route {
|
||||
var r Route = localRoute{method, path, handler}
|
||||
for _, o := range opts {
|
||||
r = o(r)
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// NewGetRoute initializes a new route with the http method GET.
|
||||
func NewGetRoute(path string, handler httputils.APIFunc) Route {
|
||||
return NewRoute("GET", path, handler)
|
||||
func NewGetRoute(path string, handler httputils.APIFunc, opts ...RouteWrapper) Route {
|
||||
return NewRoute("GET", path, handler, opts...)
|
||||
}
|
||||
|
||||
// NewPostRoute initializes a new route with the http method POST.
|
||||
func NewPostRoute(path string, handler httputils.APIFunc) Route {
|
||||
return NewRoute("POST", path, handler)
|
||||
func NewPostRoute(path string, handler httputils.APIFunc, opts ...RouteWrapper) Route {
|
||||
return NewRoute("POST", path, handler, opts...)
|
||||
}
|
||||
|
||||
// NewPutRoute initializes a new route with the http method PUT.
|
||||
func NewPutRoute(path string, handler httputils.APIFunc) Route {
|
||||
return NewRoute("PUT", path, handler)
|
||||
func NewPutRoute(path string, handler httputils.APIFunc, opts ...RouteWrapper) Route {
|
||||
return NewRoute("PUT", path, handler, opts...)
|
||||
}
|
||||
|
||||
// NewDeleteRoute initializes a new route with the http method DELETE.
|
||||
func NewDeleteRoute(path string, handler httputils.APIFunc) Route {
|
||||
return NewRoute("DELETE", path, handler)
|
||||
func NewDeleteRoute(path string, handler httputils.APIFunc, opts ...RouteWrapper) Route {
|
||||
return NewRoute("DELETE", path, handler, opts...)
|
||||
}
|
||||
|
||||
// NewOptionsRoute initializes a new route with the http method OPTIONS.
|
||||
func NewOptionsRoute(path string, handler httputils.APIFunc) Route {
|
||||
return NewRoute("OPTIONS", path, handler)
|
||||
func NewOptionsRoute(path string, handler httputils.APIFunc, opts ...RouteWrapper) Route {
|
||||
return NewRoute("OPTIONS", path, handler, opts...)
|
||||
}
|
||||
|
||||
// NewHeadRoute initializes a new route with the http method HEAD.
|
||||
func NewHeadRoute(path string, handler httputils.APIFunc) Route {
|
||||
return NewRoute("HEAD", path, handler)
|
||||
func NewHeadRoute(path string, handler httputils.APIFunc, opts ...RouteWrapper) Route {
|
||||
return NewRoute("HEAD", path, handler, opts...)
|
||||
}
|
||||
|
||||
func cancellableHandler(h httputils.APIFunc) httputils.APIFunc {
|
||||
@@ -85,9 +93,9 @@ func cancellableHandler(h httputils.APIFunc) httputils.APIFunc {
|
||||
}
|
||||
}
|
||||
|
||||
// Cancellable makes new route which embeds http.CloseNotifier feature to
|
||||
// WithCancel makes new route which embeds http.CloseNotifier feature to
|
||||
// context.Context of handler.
|
||||
func Cancellable(r Route) Route {
|
||||
func WithCancel(r Route) Route {
|
||||
return localRoute{
|
||||
method: r.Method(),
|
||||
path: r.Path(),
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package network
|
||||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/network"
|
||||
@@ -16,5 +18,5 @@ type Backend interface {
|
||||
ConnectContainerToNetwork(containerName, networkName string, endpointConfig *network.EndpointSettings) error
|
||||
DisconnectContainerFromNetwork(containerName string, networkName string, force bool) error
|
||||
DeleteNetwork(name string) error
|
||||
NetworksPrune(pruneFilters filters.Args) (*types.NetworksPruneReport, error)
|
||||
NetworksPrune(ctx context.Context, pruneFilters filters.Args) (*types.NetworksPruneReport, error)
|
||||
}
|
||||
|
||||
@@ -37,7 +37,7 @@ func (r *networkRouter) initRoutes() {
|
||||
router.NewPostRoute("/networks/create", r.postNetworkCreate),
|
||||
router.NewPostRoute("/networks/{id:.*}/connect", r.postNetworkConnect),
|
||||
router.NewPostRoute("/networks/{id:.*}/disconnect", r.postNetworkDisconnect),
|
||||
router.NewPostRoute("/networks/prune", r.postNetworksPrune),
|
||||
router.NewPostRoute("/networks/prune", r.postNetworksPrune, router.WithCancel),
|
||||
// DELETE
|
||||
router.NewDeleteRoute("/networks/{id:.*}", r.deleteNetwork),
|
||||
}
|
||||
|
||||
@@ -127,6 +127,15 @@ func (n *networkRouter) getNetwork(ctx context.Context, w http.ResponseWriter, r
|
||||
}
|
||||
}
|
||||
|
||||
nwk, err := n.cluster.GetNetwork(term)
|
||||
if err == nil {
|
||||
// If the get network is passed with a specific network ID / partial network ID
|
||||
// return the network.
|
||||
if strings.HasPrefix(nwk.ID, term) {
|
||||
return httputils.WriteJSON(w, http.StatusOK, nwk)
|
||||
}
|
||||
}
|
||||
|
||||
nr, _ := n.cluster.GetNetworks()
|
||||
for _, network := range nr {
|
||||
if network.ID == term {
|
||||
@@ -283,13 +292,6 @@ func (n *networkRouter) buildNetworkResource(nw libnetwork.Network) *types.Netwo
|
||||
r.ID = nw.ID()
|
||||
r.Created = info.Created()
|
||||
r.Scope = info.Scope()
|
||||
if n.cluster.IsManager() {
|
||||
if _, err := n.cluster.GetNetwork(nw.ID()); err == nil {
|
||||
r.Scope = "swarm"
|
||||
}
|
||||
} else if info.Dynamic() {
|
||||
r.Scope = "swarm"
|
||||
}
|
||||
r.Driver = nw.Type()
|
||||
r.EnableIPv6 = info.IPv6Enabled()
|
||||
r.Internal = info.Internal()
|
||||
@@ -299,6 +301,11 @@ func (n *networkRouter) buildNetworkResource(nw libnetwork.Network) *types.Netwo
|
||||
r.Containers = make(map[string]types.EndpointResource)
|
||||
buildIpamResources(r, info)
|
||||
r.Labels = info.Labels()
|
||||
r.ConfigOnly = info.ConfigOnly()
|
||||
|
||||
if cn := info.ConfigFrom(); cn != "" {
|
||||
r.ConfigFrom = network.ConfigReference{Network: cn}
|
||||
}
|
||||
|
||||
peers := info.Peers()
|
||||
if len(peers) != 0 {
|
||||
@@ -391,7 +398,9 @@ func buildIpamResources(r *types.NetworkResource, nwInfo libnetwork.NetworkInfo)
|
||||
for _, ip4Info := range ipv4Info {
|
||||
iData := network.IPAMConfig{}
|
||||
iData.Subnet = ip4Info.IPAMData.Pool.String()
|
||||
iData.Gateway = ip4Info.IPAMData.Gateway.IP.String()
|
||||
if ip4Info.IPAMData.Gateway != nil {
|
||||
iData.Gateway = ip4Info.IPAMData.Gateway.IP.String()
|
||||
}
|
||||
r.IPAM.Config = append(r.IPAM.Config, iData)
|
||||
}
|
||||
}
|
||||
@@ -412,6 +421,9 @@ func buildIpamResources(r *types.NetworkResource, nwInfo libnetwork.NetworkInfo)
|
||||
|
||||
if !hasIpv6Conf {
|
||||
for _, ip6Info := range ipv6Info {
|
||||
if ip6Info.IPAMData.Pool == nil {
|
||||
continue
|
||||
}
|
||||
iData := network.IPAMConfig{}
|
||||
iData.Subnet = ip6Info.IPAMData.Pool.String()
|
||||
iData.Gateway = ip6Info.IPAMData.Gateway.String()
|
||||
@@ -455,7 +467,7 @@ func (n *networkRouter) postNetworksPrune(ctx context.Context, w http.ResponseWr
|
||||
return err
|
||||
}
|
||||
|
||||
pruneReport, err := n.backend.NetworksPrune(pruneFilters)
|
||||
pruneReport, err := n.backend.NetworksPrune(ctx, pruneFilters)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -30,9 +30,9 @@ func (r *pluginRouter) initRoutes() {
|
||||
router.NewDeleteRoute("/plugins/{name:.*}", r.removePlugin),
|
||||
router.NewPostRoute("/plugins/{name:.*}/enable", r.enablePlugin), // PATCH?
|
||||
router.NewPostRoute("/plugins/{name:.*}/disable", r.disablePlugin),
|
||||
router.Cancellable(router.NewPostRoute("/plugins/pull", r.pullPlugin)),
|
||||
router.Cancellable(router.NewPostRoute("/plugins/{name:.*}/push", r.pushPlugin)),
|
||||
router.Cancellable(router.NewPostRoute("/plugins/{name:.*}/upgrade", r.upgradePlugin)),
|
||||
router.NewPostRoute("/plugins/pull", r.pullPlugin, router.WithCancel),
|
||||
router.NewPostRoute("/plugins/{name:.*}/push", r.pushPlugin, router.WithCancel),
|
||||
router.NewPostRoute("/plugins/{name:.*}/upgrade", r.upgradePlugin, router.WithCancel),
|
||||
router.NewPostRoute("/plugins/{name:.*}/set", r.setPlugin),
|
||||
router.NewPostRoute("/plugins/create", r.createPlugin),
|
||||
}
|
||||
|
||||
@@ -121,7 +121,7 @@ func (pr *pluginRouter) upgradePlugin(ctx context.Context, w http.ResponseWriter
|
||||
if !output.Flushed() {
|
||||
return err
|
||||
}
|
||||
output.Write(streamformatter.NewJSONStreamFormatter().FormatError(err))
|
||||
output.Write(streamformatter.FormatError(err))
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -160,7 +160,7 @@ func (pr *pluginRouter) pullPlugin(ctx context.Context, w http.ResponseWriter, r
|
||||
if !output.Flushed() {
|
||||
return err
|
||||
}
|
||||
output.Write(streamformatter.NewJSONStreamFormatter().FormatError(err))
|
||||
output.Write(streamformatter.FormatError(err))
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -268,7 +268,7 @@ func (pr *pluginRouter) pushPlugin(ctx context.Context, w http.ResponseWriter, r
|
||||
if !output.Flushed() {
|
||||
return err
|
||||
}
|
||||
output.Write(streamformatter.NewJSONStreamFormatter().FormatError(err))
|
||||
output.Write(streamformatter.FormatError(err))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -16,21 +16,32 @@ type Backend interface {
|
||||
Update(uint64, types.Spec, types.UpdateFlags) error
|
||||
GetUnlockKey() (string, error)
|
||||
UnlockSwarm(req types.UnlockRequest) error
|
||||
|
||||
GetServices(basictypes.ServiceListOptions) ([]types.Service, error)
|
||||
GetService(idOrName string, insertDefaults bool) (types.Service, error)
|
||||
CreateService(types.ServiceSpec, string) (*basictypes.ServiceCreateResponse, error)
|
||||
UpdateService(string, uint64, types.ServiceSpec, basictypes.ServiceUpdateOptions) (*basictypes.ServiceUpdateResponse, error)
|
||||
CreateService(types.ServiceSpec, string, bool) (*basictypes.ServiceCreateResponse, error)
|
||||
UpdateService(string, uint64, types.ServiceSpec, basictypes.ServiceUpdateOptions, bool) (*basictypes.ServiceUpdateResponse, error)
|
||||
RemoveService(string) error
|
||||
|
||||
ServiceLogs(context.Context, *backend.LogSelector, *basictypes.ContainerLogsOptions) (<-chan *backend.LogMessage, error)
|
||||
|
||||
GetNodes(basictypes.NodeListOptions) ([]types.Node, error)
|
||||
GetNode(string) (types.Node, error)
|
||||
UpdateNode(string, uint64, types.NodeSpec) error
|
||||
RemoveNode(string, bool) error
|
||||
|
||||
GetTasks(basictypes.TaskListOptions) ([]types.Task, error)
|
||||
GetTask(string) (types.Task, error)
|
||||
|
||||
GetSecrets(opts basictypes.SecretListOptions) ([]types.Secret, error)
|
||||
CreateSecret(s types.SecretSpec) (string, error)
|
||||
RemoveSecret(idOrName string) error
|
||||
GetSecret(id string) (types.Secret, error)
|
||||
UpdateSecret(idOrName string, version uint64, spec types.SecretSpec) error
|
||||
|
||||
GetConfigs(opts basictypes.ConfigListOptions) ([]types.Config, error)
|
||||
CreateConfig(s types.ConfigSpec) (string, error)
|
||||
RemoveConfig(id string) error
|
||||
GetConfig(id string) (types.Config, error)
|
||||
UpdateConfig(idOrName string, version uint64, spec types.ConfigSpec) error
|
||||
}
|
||||
|
||||
@@ -31,23 +31,33 @@ func (sr *swarmRouter) initRoutes() {
|
||||
router.NewGetRoute("/swarm/unlockkey", sr.getUnlockKey),
|
||||
router.NewPostRoute("/swarm/update", sr.updateCluster),
|
||||
router.NewPostRoute("/swarm/unlock", sr.unlockCluster),
|
||||
|
||||
router.NewGetRoute("/services", sr.getServices),
|
||||
router.NewGetRoute("/services/{id}", sr.getService),
|
||||
router.NewPostRoute("/services/create", sr.createService),
|
||||
router.NewPostRoute("/services/{id}/update", sr.updateService),
|
||||
router.NewDeleteRoute("/services/{id}", sr.removeService),
|
||||
router.Cancellable(router.NewGetRoute("/services/{id}/logs", sr.getServiceLogs)),
|
||||
router.NewGetRoute("/services/{id}/logs", sr.getServiceLogs, router.WithCancel),
|
||||
|
||||
router.NewGetRoute("/nodes", sr.getNodes),
|
||||
router.NewGetRoute("/nodes/{id}", sr.getNode),
|
||||
router.NewDeleteRoute("/nodes/{id}", sr.removeNode),
|
||||
router.NewPostRoute("/nodes/{id}/update", sr.updateNode),
|
||||
|
||||
router.NewGetRoute("/tasks", sr.getTasks),
|
||||
router.NewGetRoute("/tasks/{id}", sr.getTask),
|
||||
router.Cancellable(router.NewGetRoute("/tasks/{id}/logs", sr.getTaskLogs)),
|
||||
router.NewGetRoute("/tasks/{id}/logs", sr.getTaskLogs, router.WithCancel),
|
||||
|
||||
router.NewGetRoute("/secrets", sr.getSecrets),
|
||||
router.NewPostRoute("/secrets/create", sr.createSecret),
|
||||
router.NewDeleteRoute("/secrets/{id}", sr.removeSecret),
|
||||
router.NewGetRoute("/secrets/{id}", sr.getSecret),
|
||||
router.NewPostRoute("/secrets/{id}/update", sr.updateSecret),
|
||||
|
||||
router.NewGetRoute("/configs", sr.getConfigs),
|
||||
router.NewPostRoute("/configs/create", sr.createConfig),
|
||||
router.NewDeleteRoute("/configs/{id}", sr.removeConfig),
|
||||
router.NewGetRoute("/configs/{id}", sr.getConfig),
|
||||
router.NewPostRoute("/configs/{id}/update", sr.updateConfig),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"github.com/docker/docker/api/types/backend"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
types "github.com/docker/docker/api/types/swarm"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
@@ -178,8 +179,13 @@ func (sr *swarmRouter) createService(ctx context.Context, w http.ResponseWriter,
|
||||
|
||||
// Get returns "" if the header does not exist
|
||||
encodedAuth := r.Header.Get("X-Registry-Auth")
|
||||
cliVersion := r.Header.Get("version")
|
||||
queryRegistry := false
|
||||
if cliVersion != "" && versions.LessThan(cliVersion, "1.30") {
|
||||
queryRegistry = true
|
||||
}
|
||||
|
||||
resp, err := sr.backend.CreateService(service, encodedAuth)
|
||||
resp, err := sr.backend.CreateService(service, encodedAuth, queryRegistry)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error creating service %s: %v", service.Name, err)
|
||||
return err
|
||||
@@ -207,8 +213,13 @@ func (sr *swarmRouter) updateService(ctx context.Context, w http.ResponseWriter,
|
||||
flags.EncodedRegistryAuth = r.Header.Get("X-Registry-Auth")
|
||||
flags.RegistryAuthFrom = r.URL.Query().Get("registryAuthFrom")
|
||||
flags.Rollback = r.URL.Query().Get("rollback")
|
||||
cliVersion := r.Header.Get("version")
|
||||
queryRegistry := false
|
||||
if cliVersion != "" && versions.LessThan(cliVersion, "1.30") {
|
||||
queryRegistry = true
|
||||
}
|
||||
|
||||
resp, err := sr.backend.UpdateService(vars["id"], version, service, flags)
|
||||
resp, err := sr.backend.UpdateService(vars["id"], version, service, flags, queryRegistry)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error updating service %s: %v", vars["id"], err)
|
||||
return err
|
||||
@@ -408,3 +419,74 @@ func (sr *swarmRouter) updateSecret(ctx context.Context, w http.ResponseWriter,
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sr *swarmRouter) getConfigs(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
if err := httputils.ParseForm(r); err != nil {
|
||||
return err
|
||||
}
|
||||
filters, err := filters.FromParam(r.Form.Get("filters"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
configs, err := sr.backend.GetConfigs(basictypes.ConfigListOptions{Filters: filters})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return httputils.WriteJSON(w, http.StatusOK, configs)
|
||||
}
|
||||
|
||||
func (sr *swarmRouter) createConfig(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
var config types.ConfigSpec
|
||||
if err := json.NewDecoder(r.Body).Decode(&config); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
id, err := sr.backend.CreateConfig(config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return httputils.WriteJSON(w, http.StatusCreated, &basictypes.ConfigCreateResponse{
|
||||
ID: id,
|
||||
})
|
||||
}
|
||||
|
||||
func (sr *swarmRouter) removeConfig(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
if err := sr.backend.RemoveConfig(vars["id"]); err != nil {
|
||||
return err
|
||||
}
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sr *swarmRouter) getConfig(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
config, err := sr.backend.GetConfig(vars["id"])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return httputils.WriteJSON(w, http.StatusOK, config)
|
||||
}
|
||||
|
||||
func (sr *swarmRouter) updateConfig(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
var config types.ConfigSpec
|
||||
if err := json.NewDecoder(r.Body).Decode(&config); err != nil {
|
||||
return errors.NewBadRequestError(err)
|
||||
}
|
||||
|
||||
rawVersion := r.URL.Query().Get("version")
|
||||
version, err := strconv.ParseUint(rawVersion, 10, 64)
|
||||
if err != nil {
|
||||
return errors.NewBadRequestError(fmt.Errorf("invalid config version"))
|
||||
}
|
||||
|
||||
id := vars["id"]
|
||||
if err := sr.backend.UpdateConfig(id, version, config); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
type Backend interface {
|
||||
SystemInfo() (*types.Info, error)
|
||||
SystemVersion() types.Version
|
||||
SystemDiskUsage() (*types.DiskUsage, error)
|
||||
SystemDiskUsage(ctx context.Context) (*types.DiskUsage, error)
|
||||
SubscribeToEvents(since, until time.Time, ef filters.Args) ([]events.Message, chan interface{})
|
||||
UnsubscribeFromEvents(chan interface{})
|
||||
AuthenticateToRegistry(ctx context.Context, authConfig *types.AuthConfig) (string, string, error)
|
||||
|
||||
@@ -23,10 +23,10 @@ func NewRouter(b Backend, c *cluster.Cluster) router.Router {
|
||||
r.routes = []router.Route{
|
||||
router.NewOptionsRoute("/{anyroute:.*}", optionsHandler),
|
||||
router.NewGetRoute("/_ping", pingHandler),
|
||||
router.Cancellable(router.NewGetRoute("/events", r.getEvents)),
|
||||
router.NewGetRoute("/events", r.getEvents, router.WithCancel),
|
||||
router.NewGetRoute("/info", r.getInfo),
|
||||
router.NewGetRoute("/version", r.getVersion),
|
||||
router.NewGetRoute("/system/df", r.getDiskUsage),
|
||||
router.NewGetRoute("/system/df", r.getDiskUsage, router.WithCancel),
|
||||
router.NewPostRoute("/auth", r.postAuth),
|
||||
}
|
||||
|
||||
|
||||
@@ -71,7 +71,7 @@ func (s *systemRouter) getVersion(ctx context.Context, w http.ResponseWriter, r
|
||||
}
|
||||
|
||||
func (s *systemRouter) getDiskUsage(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
du, err := s.backend.SystemDiskUsage()
|
||||
du, err := s.backend.SystemDiskUsage(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package volume
|
||||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
|
||||
// TODO return types need to be refactored into pkg
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
@@ -13,5 +15,5 @@ type Backend interface {
|
||||
VolumeInspect(name string) (*types.Volume, error)
|
||||
VolumeCreate(name, driverName string, opts, labels map[string]string) (*types.Volume, error)
|
||||
VolumeRm(name string, force bool) error
|
||||
VolumesPrune(pruneFilters filters.Args) (*types.VolumesPruneReport, error)
|
||||
VolumesPrune(ctx context.Context, pruneFilters filters.Args) (*types.VolumesPruneReport, error)
|
||||
}
|
||||
|
||||
@@ -29,7 +29,7 @@ func (r *volumeRouter) initRoutes() {
|
||||
router.NewGetRoute("/volumes/{name:.*}", r.getVolumeByName),
|
||||
// POST
|
||||
router.NewPostRoute("/volumes/create", r.postVolumesCreate),
|
||||
router.NewPostRoute("/volumes/prune", r.postVolumesPrune),
|
||||
router.NewPostRoute("/volumes/prune", r.postVolumesPrune, router.WithCancel),
|
||||
// DELETE
|
||||
router.NewDeleteRoute("/volumes/{name:.*}", r.deleteVolumes),
|
||||
}
|
||||
|
||||
@@ -77,7 +77,7 @@ func (v *volumeRouter) postVolumesPrune(ctx context.Context, w http.ResponseWrit
|
||||
return err
|
||||
}
|
||||
|
||||
pruneReport, err := v.backend.VolumesPrune(pruneFilters)
|
||||
pruneReport, err := v.backend.VolumesPrune(ctx, pruneFilters)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -92,13 +92,12 @@ func (s *Server) serveAPI() error {
|
||||
}(srv)
|
||||
}
|
||||
|
||||
for i := 0; i < len(s.servers); i++ {
|
||||
for range s.servers {
|
||||
err := <-chErrors
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
772
api/swagger.yaml
772
api/swagger.yaml
File diff suppressed because it is too large
Load Diff
@@ -6,7 +6,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/pkg/streamformatter"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
)
|
||||
|
||||
// ContainerAttachConfig holds the streams to use when connecting to a container to view logs.
|
||||
@@ -98,13 +98,7 @@ type ExecProcessConfig struct {
|
||||
type ContainerCommitConfig struct {
|
||||
types.ContainerCommitConfig
|
||||
Changes []string
|
||||
}
|
||||
|
||||
// ProgressWriter is an interface
|
||||
// to transport progress streams.
|
||||
type ProgressWriter struct {
|
||||
Output io.Writer
|
||||
StdoutFormatter *streamformatter.StdoutFormatter
|
||||
StderrFormatter *streamformatter.StderrFormatter
|
||||
ProgressReaderFunc func(io.ReadCloser) io.ReadCloser
|
||||
// TODO: ContainerConfig is only used by the dockerfile Builder, so remove it
|
||||
// once the Builder has been updated to use a different interface
|
||||
ContainerConfig *container.Config
|
||||
}
|
||||
|
||||
31
api/types/backend/build.go
Normal file
31
api/types/backend/build.go
Normal file
@@ -0,0 +1,31 @@
|
||||
package backend
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/pkg/streamformatter"
|
||||
)
|
||||
|
||||
// ProgressWriter is a data object to transport progress streams to the client
|
||||
type ProgressWriter struct {
|
||||
Output io.Writer
|
||||
StdoutFormatter io.Writer
|
||||
StderrFormatter io.Writer
|
||||
AuxFormatter *streamformatter.AuxFormatter
|
||||
ProgressReaderFunc func(io.ReadCloser) io.ReadCloser
|
||||
}
|
||||
|
||||
// BuildConfig is the configuration used by a BuildManager to start a build
|
||||
type BuildConfig struct {
|
||||
Source io.ReadCloser
|
||||
ProgressWriter ProgressWriter
|
||||
Options *types.ImageBuildOptions
|
||||
}
|
||||
|
||||
// GetImageAndLayerOptions are the options supported by GetImageAndReleasableLayer
|
||||
type GetImageAndLayerOptions struct {
|
||||
ForcePull bool
|
||||
AuthConfig map[string]types.AuthConfig
|
||||
Output io.Writer
|
||||
}
|
||||
@@ -97,6 +97,7 @@ type ContainerStartOptions struct {
|
||||
// about files to copy into a container
|
||||
type CopyToContainerOptions struct {
|
||||
AllowOverwriteDirWithFile bool
|
||||
CopyUIDGID bool
|
||||
}
|
||||
|
||||
// EventsOptions holds parameters to filter events with.
|
||||
@@ -275,6 +276,12 @@ type ServiceCreateOptions struct {
|
||||
//
|
||||
// This field follows the format of the X-Registry-Auth header.
|
||||
EncodedRegistryAuth string
|
||||
|
||||
// QueryRegistry indicates whether the service update requires
|
||||
// contacting a registry. A registry may be contacted to retrieve
|
||||
// the image digest and manifest, which in turn can be used to update
|
||||
// platform or other information about the service.
|
||||
QueryRegistry bool
|
||||
}
|
||||
|
||||
// ServiceCreateResponse contains the information returned to a client
|
||||
@@ -314,6 +321,12 @@ type ServiceUpdateOptions struct {
|
||||
// The valid values are "previous" and "none". An empty value is the
|
||||
// same as "none".
|
||||
Rollback string
|
||||
|
||||
// QueryRegistry indicates whether the service update requires
|
||||
// contacting a registry. A registry may be contacted to retrieve
|
||||
// the image digest and manifest, which in turn can be used to update
|
||||
// platform or other information about the service.
|
||||
QueryRegistry bool
|
||||
}
|
||||
|
||||
// ServiceListOptions holds parameters to list services with.
|
||||
|
||||
@@ -7,6 +7,12 @@ import (
|
||||
"github.com/docker/go-connections/nat"
|
||||
)
|
||||
|
||||
// MinimumDuration puts a minimum on user configured duration.
|
||||
// This is to prevent API error on time unit. For example, API may
|
||||
// set 3 as healthcheck interval with intention of 3 seconds, but
|
||||
// Docker interprets it as 3 nanoseconds.
|
||||
const MinimumDuration = 1 * time.Millisecond
|
||||
|
||||
// HealthConfig holds configuration settings for the HEALTHCHECK feature.
|
||||
type HealthConfig struct {
|
||||
// Test is the test to perform to check that the container is healthy.
|
||||
|
||||
@@ -377,7 +377,4 @@ type HostConfig struct {
|
||||
|
||||
// Run a custom init inside the container, if null, use the daemon's configured settings
|
||||
Init *bool `json:",omitempty"`
|
||||
|
||||
// Custom init path
|
||||
InitPath string `json:",omitempty"`
|
||||
}
|
||||
|
||||
22
api/types/container/waitcondition.go
Normal file
22
api/types/container/waitcondition.go
Normal file
@@ -0,0 +1,22 @@
|
||||
package container
|
||||
|
||||
// WaitCondition is a type used to specify a container state for which
|
||||
// to wait.
|
||||
type WaitCondition string
|
||||
|
||||
// Possible WaitCondition Values.
|
||||
//
|
||||
// WaitConditionNotRunning (default) is used to wait for any of the non-running
|
||||
// states: "created", "exited", "dead", "removing", or "removed".
|
||||
//
|
||||
// WaitConditionNextExit is used to wait for the next time the state changes
|
||||
// to a non-running state. If the state is currently "created" or "exited",
|
||||
// this would cause Wait() to block until either the container runs and exits
|
||||
// or is removed.
|
||||
//
|
||||
// WaitConditionRemoved is used to wait for the container to be removed.
|
||||
const (
|
||||
WaitConditionNotRunning WaitCondition = "not-running"
|
||||
WaitConditionNextExit WaitCondition = "next-exit"
|
||||
WaitConditionRemoved WaitCondition = "removed"
|
||||
)
|
||||
@@ -13,6 +13,12 @@ const (
|
||||
PluginEventType = "plugin"
|
||||
// VolumeEventType is the event type that volumes generate
|
||||
VolumeEventType = "volume"
|
||||
// ServiceEventType is the event type that services generate
|
||||
ServiceEventType = "service"
|
||||
// NodeEventType is the event type that nodes generate
|
||||
NodeEventType = "node"
|
||||
// SecretEventType is the event type that secrets generate
|
||||
SecretEventType = "secret"
|
||||
)
|
||||
|
||||
// Actor describes something that generates events,
|
||||
@@ -36,6 +42,8 @@ type Message struct {
|
||||
Type string
|
||||
Action string
|
||||
Actor Actor
|
||||
// Engine events are local scope. Cluster events are swarm scope.
|
||||
Scope string `json:"scope,omitempty"`
|
||||
|
||||
Time int64 `json:"time,omitempty"`
|
||||
TimeNano int64 `json:"timeNano,omitempty"`
|
||||
|
||||
@@ -58,6 +58,7 @@ type EndpointSettings struct {
|
||||
GlobalIPv6Address string
|
||||
GlobalIPv6PrefixLen int
|
||||
MacAddress string
|
||||
DriverOpts map[string]string
|
||||
}
|
||||
|
||||
// Task carries the information about one backend task
|
||||
@@ -100,3 +101,8 @@ func (es *EndpointSettings) Copy() *EndpointSettings {
|
||||
type NetworkingConfig struct {
|
||||
EndpointsConfig map[string]*EndpointSettings // Endpoint configs for each connecting network
|
||||
}
|
||||
|
||||
// ConfigReference specifies the source which provides a network's configuration
|
||||
type ConfigReference struct {
|
||||
Network string
|
||||
}
|
||||
|
||||
@@ -11,7 +11,7 @@ type Plugin struct {
|
||||
// Required: true
|
||||
Config PluginConfig `json:"Config"`
|
||||
|
||||
// True when the plugin is running. False when the plugin is not running, only installed.
|
||||
// True if the plugin is running. False if the plugin is not running, only installed.
|
||||
// Required: true
|
||||
Enabled bool `json:"Enabled"`
|
||||
|
||||
|
||||
@@ -3,13 +3,17 @@ package registry
|
||||
import (
|
||||
"encoding/json"
|
||||
"net"
|
||||
|
||||
"github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// ServiceConfig stores daemon registry services configuration.
|
||||
type ServiceConfig struct {
|
||||
InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"`
|
||||
IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"`
|
||||
Mirrors []string
|
||||
AllowNondistributableArtifactsCIDRs []*NetIPNet
|
||||
AllowNondistributableArtifactsHostnames []string
|
||||
InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"`
|
||||
IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"`
|
||||
Mirrors []string
|
||||
}
|
||||
|
||||
// NetIPNet is the net.IPNet type, which can be marshalled and
|
||||
@@ -102,3 +106,14 @@ type SearchResults struct {
|
||||
// Results is a slice containing the actual results for the search
|
||||
Results []SearchResult `json:"results"`
|
||||
}
|
||||
|
||||
// DistributionInspect describes the result obtained from contacting the
|
||||
// registry to retrieve image metadata
|
||||
type DistributionInspect struct {
|
||||
// Descriptor contains information about the manifest, including
|
||||
// the content addressable digest
|
||||
Descriptor v1.Descriptor
|
||||
// Platforms contains the list of platforms supported by the image,
|
||||
// obtained by parsing the manifest
|
||||
Platforms []v1.Platform
|
||||
}
|
||||
|
||||
@@ -25,3 +25,16 @@ type Driver struct {
|
||||
Name string `json:",omitempty"`
|
||||
Options map[string]string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// TLSInfo represents the TLS information about what CA certificate is trusted,
|
||||
// and who the issuer for a TLS certificate is
|
||||
type TLSInfo struct {
|
||||
// TrustRoot is the trusted CA root certificate in PEM format
|
||||
TrustRoot string `json:",omitempty"`
|
||||
|
||||
// CertIssuer is the raw subject bytes of the issuer
|
||||
CertIssuerSubject []byte `json:",omitempty"`
|
||||
|
||||
// CertIssuerPublicKey is the raw public key bytes of the issuer
|
||||
CertIssuerPublicKey []byte `json:",omitempty"`
|
||||
}
|
||||
|
||||
31
api/types/swarm/config.go
Normal file
31
api/types/swarm/config.go
Normal file
@@ -0,0 +1,31 @@
|
||||
package swarm
|
||||
|
||||
import "os"
|
||||
|
||||
// Config represents a config.
|
||||
type Config struct {
|
||||
ID string
|
||||
Meta
|
||||
Spec ConfigSpec
|
||||
}
|
||||
|
||||
// ConfigSpec represents a config specification from a config in swarm
|
||||
type ConfigSpec struct {
|
||||
Annotations
|
||||
Data []byte `json:",omitempty"`
|
||||
}
|
||||
|
||||
// ConfigReferenceFileTarget is a file target in a config reference
|
||||
type ConfigReferenceFileTarget struct {
|
||||
Name string
|
||||
UID string
|
||||
GID string
|
||||
Mode os.FileMode
|
||||
}
|
||||
|
||||
// ConfigReference is a reference to a config in swarm
|
||||
type ConfigReference struct {
|
||||
File *ConfigReferenceFileTarget
|
||||
ConfigID string
|
||||
ConfigName string
|
||||
}
|
||||
@@ -68,4 +68,5 @@ type ContainerSpec struct {
|
||||
Hosts []string `json:",omitempty"`
|
||||
DNSConfig *DNSConfig `json:",omitempty"`
|
||||
Secrets []*SecretReference `json:",omitempty"`
|
||||
Configs []*ConfigReference `json:",omitempty"`
|
||||
}
|
||||
|
||||
@@ -1,5 +1,9 @@
|
||||
package swarm
|
||||
|
||||
import (
|
||||
"github.com/docker/docker/api/types/network"
|
||||
)
|
||||
|
||||
// Endpoint represents an endpoint.
|
||||
type Endpoint struct {
|
||||
Spec EndpointSpec `json:",omitempty"`
|
||||
@@ -78,18 +82,21 @@ type Network struct {
|
||||
// NetworkSpec represents the spec of a network.
|
||||
type NetworkSpec struct {
|
||||
Annotations
|
||||
DriverConfiguration *Driver `json:",omitempty"`
|
||||
IPv6Enabled bool `json:",omitempty"`
|
||||
Internal bool `json:",omitempty"`
|
||||
Attachable bool `json:",omitempty"`
|
||||
Ingress bool `json:",omitempty"`
|
||||
IPAMOptions *IPAMOptions `json:",omitempty"`
|
||||
DriverConfiguration *Driver `json:",omitempty"`
|
||||
IPv6Enabled bool `json:",omitempty"`
|
||||
Internal bool `json:",omitempty"`
|
||||
Attachable bool `json:",omitempty"`
|
||||
Ingress bool `json:",omitempty"`
|
||||
IPAMOptions *IPAMOptions `json:",omitempty"`
|
||||
ConfigFrom *network.ConfigReference `json:",omitempty"`
|
||||
Scope string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// NetworkAttachmentConfig represents the configuration of a network attachment.
|
||||
type NetworkAttachmentConfig struct {
|
||||
Target string `json:",omitempty"`
|
||||
Aliases []string `json:",omitempty"`
|
||||
Target string `json:",omitempty"`
|
||||
Aliases []string `json:",omitempty"`
|
||||
DriverOpts map[string]string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// NetworkAttachment represents a network attachment.
|
||||
|
||||
@@ -52,6 +52,7 @@ type NodeDescription struct {
|
||||
Platform Platform `json:",omitempty"`
|
||||
Resources Resources `json:",omitempty"`
|
||||
Engine EngineDescription `json:",omitempty"`
|
||||
TLSInfo TLSInfo `json:",omitempty"`
|
||||
}
|
||||
|
||||
// Platform represents the platform (Arch/OS).
|
||||
|
||||
19
api/types/swarm/runtime.go
Normal file
19
api/types/swarm/runtime.go
Normal file
@@ -0,0 +1,19 @@
|
||||
package swarm
|
||||
|
||||
// RuntimeType is the type of runtime used for the TaskSpec
|
||||
type RuntimeType string
|
||||
|
||||
// RuntimeURL is the proto type url
|
||||
type RuntimeURL string
|
||||
|
||||
const (
|
||||
// RuntimeContainer is the container based runtime
|
||||
RuntimeContainer RuntimeType = "container"
|
||||
// RuntimePlugin is the plugin based runtime
|
||||
RuntimePlugin RuntimeType = "plugin"
|
||||
|
||||
// RuntimeURLContainer is the proto url for the container type
|
||||
RuntimeURLContainer RuntimeURL = "types.docker.com/RuntimeContainer"
|
||||
// RuntimeURLPlugin is the proto url for the plugin type
|
||||
RuntimeURLPlugin RuntimeURL = "types.docker.com/RuntimePlugin"
|
||||
)
|
||||
@@ -7,7 +7,9 @@ import "time"
|
||||
type ClusterInfo struct {
|
||||
ID string
|
||||
Meta
|
||||
Spec Spec
|
||||
Spec Spec
|
||||
TLSInfo TLSInfo
|
||||
RootRotationInProgress bool
|
||||
}
|
||||
|
||||
// Swarm represents a swarm.
|
||||
@@ -107,6 +109,16 @@ type CAConfig struct {
|
||||
// ExternalCAs is a list of CAs to which a manager node will make
|
||||
// certificate signing requests for node certificates.
|
||||
ExternalCAs []*ExternalCA `json:",omitempty"`
|
||||
|
||||
// SigningCACert and SigningCAKey specify the desired signing root CA and
|
||||
// root CA key for the swarm. When inspecting the cluster, the key will
|
||||
// be redacted.
|
||||
SigningCACert string `json:",omitempty"`
|
||||
SigningCAKey string `json:",omitempty"`
|
||||
|
||||
// If this value changes, and there is no specified signing cert and key,
|
||||
// then the swarm is forced to generate a new root certificate ane key.
|
||||
ForceRotate uint64 `json:",omitempty"`
|
||||
}
|
||||
|
||||
// ExternalCAProtocol represents type of external CA.
|
||||
@@ -126,12 +138,17 @@ type ExternalCA struct {
|
||||
// Options is a set of additional key/value pairs whose interpretation
|
||||
// depends on the specified CA type.
|
||||
Options map[string]string `json:",omitempty"`
|
||||
|
||||
// CACert specifies which root CA is used by this external CA. This certificate must
|
||||
// be in PEM format.
|
||||
CACert string
|
||||
}
|
||||
|
||||
// InitRequest is the request used to init a swarm.
|
||||
type InitRequest struct {
|
||||
ListenAddr string
|
||||
AdvertiseAddr string
|
||||
DataPathAddr string
|
||||
ForceNewCluster bool
|
||||
Spec Spec
|
||||
AutoLockManagers bool
|
||||
@@ -142,6 +159,7 @@ type InitRequest struct {
|
||||
type JoinRequest struct {
|
||||
ListenAddr string
|
||||
AdvertiseAddr string
|
||||
DataPathAddr string
|
||||
RemoteAddrs []string
|
||||
JoinToken string // accept by secret
|
||||
Availability NodeAvailability
|
||||
|
||||
@@ -65,6 +65,8 @@ type TaskSpec struct {
|
||||
// ForceUpdate is a counter that triggers an update even if no relevant
|
||||
// parameters have been changed.
|
||||
ForceUpdate uint64
|
||||
|
||||
Runtime RuntimeType `json:",omitempty"`
|
||||
}
|
||||
|
||||
// Resources represents resources (CPU/Memory).
|
||||
@@ -83,6 +85,11 @@ type ResourceRequirements struct {
|
||||
type Placement struct {
|
||||
Constraints []string `json:",omitempty"`
|
||||
Preferences []PlacementPreference `json:",omitempty"`
|
||||
|
||||
// Platforms stores all the platforms that the image can run on.
|
||||
// This field is used in the platform filter for scheduling. If empty,
|
||||
// then the platform filter is off, meaning there are no scheduling restrictions.
|
||||
Platforms []Platform `json:",omitempty"`
|
||||
}
|
||||
|
||||
// PlacementPreference provides a way to make the scheduler aware of factors
|
||||
|
||||
@@ -238,6 +238,8 @@ type PluginsInfo struct {
|
||||
Network []string
|
||||
// List of Authorization plugins registered
|
||||
Authorization []string
|
||||
// List of Log plugins registered
|
||||
Log []string
|
||||
}
|
||||
|
||||
// ExecStartCheck is a temp struct used by execStart
|
||||
@@ -275,7 +277,7 @@ type Health struct {
|
||||
// ContainerState stores container's running state
|
||||
// it's part of ContainerJSONBase and will return by "inspect" command
|
||||
type ContainerState struct {
|
||||
Status string
|
||||
Status string // String representation of the container state. Can be one of "created", "running", "paused", "restarting", "removing", "exited", or "dead"
|
||||
Running bool
|
||||
Paused bool
|
||||
Restarting bool
|
||||
@@ -394,13 +396,15 @@ type NetworkResource struct {
|
||||
Name string // Name is the requested name of the network
|
||||
ID string `json:"Id"` // ID uniquely identifies a network on a single machine
|
||||
Created time.Time // Created is the time the network created
|
||||
Scope string // Scope describes the level at which the network exists (e.g. `global` for cluster-wide or `local` for machine level)
|
||||
Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level)
|
||||
Driver string // Driver is the Driver name used to create the network (e.g. `bridge`, `overlay`)
|
||||
EnableIPv6 bool // EnableIPv6 represents whether to enable IPv6
|
||||
IPAM network.IPAM // IPAM is the network's IP Address Management
|
||||
Internal bool // Internal represents if the network is used internal only
|
||||
Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode.
|
||||
Ingress bool // Ingress indicates the network is providing the routing-mesh for the swarm cluster.
|
||||
ConfigFrom network.ConfigReference // ConfigFrom specifies the source which will provide the configuration for this network.
|
||||
ConfigOnly bool // ConfigOnly networks are place-holder networks for network configurations to be used by other networks. ConfigOnly networks cannot be used directly to run containers or services.
|
||||
Containers map[string]EndpointResource // Containers contains endpoints belonging to the network
|
||||
Options map[string]string // Options holds the network specific options to use for when creating the network
|
||||
Labels map[string]string // Labels holds metadata specific to the network being created
|
||||
@@ -428,11 +432,14 @@ type NetworkCreate struct {
|
||||
// which has the same name but it is not guaranteed to catch all name collisions.
|
||||
CheckDuplicate bool
|
||||
Driver string
|
||||
Scope string
|
||||
EnableIPv6 bool
|
||||
IPAM *network.IPAM
|
||||
Internal bool
|
||||
Attachable bool
|
||||
Ingress bool
|
||||
ConfigOnly bool
|
||||
ConfigFrom *network.ConfigReference
|
||||
Options map[string]string
|
||||
Labels map[string]string
|
||||
}
|
||||
@@ -520,6 +527,18 @@ type SecretListOptions struct {
|
||||
Filters filters.Args
|
||||
}
|
||||
|
||||
// ConfigCreateResponse contains the information returned to a client
|
||||
// on the creation of a new config.
|
||||
type ConfigCreateResponse struct {
|
||||
// ID is the id of the created config.
|
||||
ID string
|
||||
}
|
||||
|
||||
// ConfigListOptions holds parameters to list configs
|
||||
type ConfigListOptions struct {
|
||||
Filters filters.Args
|
||||
}
|
||||
|
||||
// PushResult contains the tag, manifest digest, and manifest size from the
|
||||
// push. It's used to signal this information to the trust code in the client
|
||||
// so it can sign the manifest if necessary.
|
||||
@@ -528,3 +547,8 @@ type PushResult struct {
|
||||
Digest string
|
||||
Size int
|
||||
}
|
||||
|
||||
// BuildResult contains the image id of a successful build
|
||||
type BuildResult struct {
|
||||
ID string
|
||||
}
|
||||
|
||||
@@ -44,15 +44,23 @@ type Volume struct {
|
||||
UsageData *VolumeUsageData `json:"UsageData,omitempty"`
|
||||
}
|
||||
|
||||
// VolumeUsageData volume usage data
|
||||
// VolumeUsageData Usage details about the volume. This information is used by the
|
||||
// `GET /system/df` endpoint, and omitted in other endpoints.
|
||||
//
|
||||
// swagger:model VolumeUsageData
|
||||
type VolumeUsageData struct {
|
||||
|
||||
// The number of containers referencing this volume.
|
||||
// The number of containers referencing this volume. This field
|
||||
// is set to `-1` if the reference-count is not available.
|
||||
//
|
||||
// Required: true
|
||||
RefCount int64 `json:"RefCount"`
|
||||
|
||||
// The disk space used by the volume (local driver only)
|
||||
// Amount of disk space used by the volume (in bytes). This information
|
||||
// is only available for volumes created with the `"local"` volume
|
||||
// driver. For volumes created with other volume drivers, this field
|
||||
// is set to `-1` ("not available")
|
||||
//
|
||||
// Required: true
|
||||
Size int64 `json:"Size"`
|
||||
}
|
||||
|
||||
@@ -6,15 +6,13 @@ package builder
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/docker/distribution/reference"
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/backend"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/image"
|
||||
"golang.org/x/net/context"
|
||||
containerpkg "github.com/docker/docker/container"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -22,99 +20,25 @@ const (
|
||||
DefaultDockerfileName string = "Dockerfile"
|
||||
)
|
||||
|
||||
// Context represents a file system tree.
|
||||
type Context interface {
|
||||
// Source defines a location that can be used as a source for the ADD/COPY
|
||||
// instructions in the builder.
|
||||
type Source interface {
|
||||
// Root returns root path for accessing source
|
||||
Root() string
|
||||
// Close allows to signal that the filesystem tree won't be used anymore.
|
||||
// For Context implementations using a temporary directory, it is recommended to
|
||||
// delete the temporary directory in Close().
|
||||
Close() error
|
||||
// Stat returns an entry corresponding to path if any.
|
||||
// It is recommended to return an error if path was not found.
|
||||
// If path is a symlink it also returns the path to the target file.
|
||||
Stat(path string) (string, FileInfo, error)
|
||||
// Open opens path from the context and returns a readable stream of it.
|
||||
Open(path string) (io.ReadCloser, error)
|
||||
// Walk walks the tree of the context with the function passed to it.
|
||||
Walk(root string, walkFn WalkFunc) error
|
||||
}
|
||||
|
||||
// WalkFunc is the type of the function called for each file or directory visited by Context.Walk().
|
||||
type WalkFunc func(path string, fi FileInfo, err error) error
|
||||
|
||||
// ModifiableContext represents a modifiable Context.
|
||||
// TODO: remove this interface once we can get rid of Remove()
|
||||
type ModifiableContext interface {
|
||||
Context
|
||||
// Remove deletes the entry specified by `path`.
|
||||
// It is usual for directory entries to delete all its subentries.
|
||||
Remove(path string) error
|
||||
}
|
||||
|
||||
// FileInfo extends os.FileInfo to allow retrieving an absolute path to the file.
|
||||
// TODO: remove this interface once pkg/archive exposes a walk function that Context can use.
|
||||
type FileInfo interface {
|
||||
os.FileInfo
|
||||
Path() string
|
||||
}
|
||||
|
||||
// PathFileInfo is a convenience struct that implements the FileInfo interface.
|
||||
type PathFileInfo struct {
|
||||
os.FileInfo
|
||||
// FilePath holds the absolute path to the file.
|
||||
FilePath string
|
||||
// FileName holds the basename for the file.
|
||||
FileName string
|
||||
}
|
||||
|
||||
// Path returns the absolute path to the file.
|
||||
func (fi PathFileInfo) Path() string {
|
||||
return fi.FilePath
|
||||
}
|
||||
|
||||
// Name returns the basename of the file.
|
||||
func (fi PathFileInfo) Name() string {
|
||||
if fi.FileName != "" {
|
||||
return fi.FileName
|
||||
}
|
||||
return fi.FileInfo.Name()
|
||||
}
|
||||
|
||||
// Hashed defines an extra method intended for implementations of os.FileInfo.
|
||||
type Hashed interface {
|
||||
// Hash returns the hash of a file.
|
||||
Hash() string
|
||||
SetHash(string)
|
||||
}
|
||||
|
||||
// HashedFileInfo is a convenient struct that augments FileInfo with a field.
|
||||
type HashedFileInfo struct {
|
||||
FileInfo
|
||||
// FileHash represents the hash of a file.
|
||||
FileHash string
|
||||
}
|
||||
|
||||
// Hash returns the hash of a file.
|
||||
func (fi HashedFileInfo) Hash() string {
|
||||
return fi.FileHash
|
||||
}
|
||||
|
||||
// SetHash sets the hash of a file.
|
||||
func (fi *HashedFileInfo) SetHash(h string) {
|
||||
fi.FileHash = h
|
||||
// Hash returns a checksum for a file
|
||||
Hash(path string) (string, error)
|
||||
}
|
||||
|
||||
// Backend abstracts calls to a Docker Daemon.
|
||||
type Backend interface {
|
||||
// TODO: use digest reference instead of name
|
||||
ImageBackend
|
||||
|
||||
// GetImageOnBuild looks up a Docker image referenced by `name`.
|
||||
GetImageOnBuild(name string) (Image, error)
|
||||
// TagImageWithReference tags an image with newTag
|
||||
TagImageWithReference(image.ID, reference.Named) error
|
||||
// PullOnBuild tells Docker to pull image referenced by `name`.
|
||||
PullOnBuild(ctx context.Context, name string, authConfigs map[string]types.AuthConfig, output io.Writer) (Image, error)
|
||||
// ContainerAttachRaw attaches to container.
|
||||
ContainerAttachRaw(cID string, stdin io.ReadCloser, stdout, stderr io.Writer, stream bool) error
|
||||
ContainerAttachRaw(cID string, stdin io.ReadCloser, stdout, stderr io.Writer, stream bool, attached chan struct{}) error
|
||||
// ContainerCreate creates a new Docker container and returns potential warnings
|
||||
ContainerCreate(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error)
|
||||
// ContainerRm removes a container specified by `id`.
|
||||
@@ -126,35 +50,26 @@ type Backend interface {
|
||||
// ContainerStart starts a new container
|
||||
ContainerStart(containerID string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error
|
||||
// ContainerWait stops processing until the given container is stopped.
|
||||
ContainerWait(containerID string, timeout time.Duration) (int, error)
|
||||
// ContainerUpdateCmdOnBuild updates container.Path and container.Args
|
||||
ContainerUpdateCmdOnBuild(containerID string, cmd []string) error
|
||||
ContainerWait(ctx context.Context, name string, condition containerpkg.WaitCondition) (<-chan containerpkg.StateStatus, error)
|
||||
// ContainerCreateWorkdir creates the workdir
|
||||
ContainerCreateWorkdir(containerID string) error
|
||||
|
||||
// ContainerCopy copies/extracts a source FileInfo to a destination path inside a container
|
||||
// specified by a container object.
|
||||
// TODO: make an Extract method instead of passing `decompress`
|
||||
// TODO: do not pass a FileInfo, instead refactor the archive package to export a Walk function that can be used
|
||||
// with Context.Walk
|
||||
// ContainerCopy(name string, res string) (io.ReadCloser, error)
|
||||
// TODO: use copyBackend api
|
||||
CopyOnBuild(containerID string, destPath string, src FileInfo, decompress bool) error
|
||||
|
||||
// HasExperimental checks if the backend supports experimental features
|
||||
HasExperimental() bool
|
||||
|
||||
// SquashImage squashes the fs layers from the provided image down to the specified `to` image
|
||||
SquashImage(from string, to string) (string, error)
|
||||
|
||||
// MountImage returns mounted path with rootfs of an image.
|
||||
MountImage(name string) (string, func() error, error)
|
||||
// TODO: extract in the builder instead of passing `decompress`
|
||||
// TODO: use containerd/fs.changestream instead as a source
|
||||
CopyOnBuild(containerID string, destPath string, srcRoot string, srcPath string, decompress bool) error
|
||||
}
|
||||
|
||||
// Image represents a Docker image used by the builder.
|
||||
type Image interface {
|
||||
ImageID() string
|
||||
RunConfig() *container.Config
|
||||
// ImageBackend are the interface methods required from an image component
|
||||
type ImageBackend interface {
|
||||
GetImageAndReleasableLayer(ctx context.Context, refOrID string, opts backend.GetImageAndLayerOptions) (Image, ReleaseableLayer, error)
|
||||
}
|
||||
|
||||
// Result is the output produced by a Builder
|
||||
type Result struct {
|
||||
ImageID string
|
||||
FromImage Image
|
||||
}
|
||||
|
||||
// ImageCacheBuilder represents a generator for stateful image cache.
|
||||
@@ -170,3 +85,15 @@ type ImageCache interface {
|
||||
// and runconfig equals `cfg`. A cache miss is expected to return an empty ID and a nil error.
|
||||
GetCache(parentID string, cfg *container.Config) (imageID string, err error)
|
||||
}
|
||||
|
||||
// Image represents a Docker image used by the builder.
|
||||
type Image interface {
|
||||
ImageID() string
|
||||
RunConfig() *container.Config
|
||||
}
|
||||
|
||||
// ReleaseableLayer is an image layer that can be mounted and released
|
||||
type ReleaseableLayer interface {
|
||||
Release() error
|
||||
Mount() (string, error)
|
||||
}
|
||||
|
||||
@@ -37,6 +37,13 @@ func NewBFlags() *BFlags {
|
||||
}
|
||||
}
|
||||
|
||||
// NewBFlagsWithArgs returns the new BFlags struct with Args set to args
|
||||
func NewBFlagsWithArgs(args []string) *BFlags {
|
||||
flags := NewBFlags()
|
||||
flags.Args = args
|
||||
return flags
|
||||
}
|
||||
|
||||
// AddBool adds a bool flag to BFlags
|
||||
// Note, any error will be generated when Parse() is called (see Parse).
|
||||
func (bf *BFlags) AddBool(name string, def bool) *Flag {
|
||||
|
||||
@@ -1,5 +1,11 @@
|
||||
package dockerfile
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/docker/docker/runconfig/opts"
|
||||
"io"
|
||||
)
|
||||
|
||||
// builtinAllowedBuildArgs is list of built-in allowed build args
|
||||
// these args are considered transparent and are excluded from the image history.
|
||||
// Filtering from history is implemented in dispatchers.go
|
||||
@@ -35,16 +41,20 @@ func newBuildArgs(argsFromOptions map[string]*string) *buildArgs {
|
||||
}
|
||||
}
|
||||
|
||||
// UnreferencedOptionArgs returns the list of args that were set from options but
|
||||
// were never referenced from the Dockerfile
|
||||
func (b *buildArgs) UnreferencedOptionArgs() []string {
|
||||
// WarnOnUnusedBuildArgs checks if there are any leftover build-args that were
|
||||
// passed but not consumed during build. Print a warning, if there are any.
|
||||
func (b *buildArgs) WarnOnUnusedBuildArgs(out io.Writer) {
|
||||
leftoverArgs := []string{}
|
||||
for arg := range b.argsFromOptions {
|
||||
if _, ok := b.referencedArgs[arg]; !ok {
|
||||
_, isReferenced := b.referencedArgs[arg]
|
||||
_, isBuiltin := builtinAllowedBuildArgs[arg]
|
||||
if !isBuiltin && !isReferenced {
|
||||
leftoverArgs = append(leftoverArgs, arg)
|
||||
}
|
||||
}
|
||||
return leftoverArgs
|
||||
if len(leftoverArgs) > 0 {
|
||||
fmt.Fprintf(out, "[Warning] One or more build-args %v were not consumed\n", leftoverArgs)
|
||||
}
|
||||
}
|
||||
|
||||
// ResetAllowed clears the list of args that are allowed to be used by a
|
||||
@@ -64,13 +74,13 @@ func (b *buildArgs) AddArg(key string, value *string) {
|
||||
b.referencedArgs[key] = struct{}{}
|
||||
}
|
||||
|
||||
// IsUnreferencedBuiltin checks if the key is a built-in arg, or if it has been
|
||||
// referenced by the Dockerfile. Returns true if the arg is a builtin that has
|
||||
// not been referenced in the Dockerfile.
|
||||
func (b *buildArgs) IsUnreferencedBuiltin(key string) bool {
|
||||
// IsReferencedOrNotBuiltin checks if the key is a built-in arg, or if it has been
|
||||
// referenced by the Dockerfile. Returns true if the arg is not a builtin or
|
||||
// if the builtin has been referenced in the Dockerfile.
|
||||
func (b *buildArgs) IsReferencedOrNotBuiltin(key string) bool {
|
||||
_, isBuiltin := builtinAllowedBuildArgs[key]
|
||||
_, isAllowed := b.allowedBuildArgs[key]
|
||||
return isBuiltin && !isAllowed
|
||||
return isAllowed || !isBuiltin
|
||||
}
|
||||
|
||||
// GetAllAllowed returns a mapping with all the allowed args
|
||||
@@ -96,6 +106,19 @@ func (b *buildArgs) getAllFromMapping(source map[string]*string) map[string]stri
|
||||
return m
|
||||
}
|
||||
|
||||
// FilterAllowed returns all allowed args without the filtered args
|
||||
func (b *buildArgs) FilterAllowed(filter []string) []string {
|
||||
envs := []string{}
|
||||
configEnv := opts.ConvertKVStringsToMap(filter)
|
||||
|
||||
for key, val := range b.GetAllAllowed() {
|
||||
if _, ok := configEnv[key]; !ok {
|
||||
envs = append(envs, fmt.Sprintf("%s=%s", key, val))
|
||||
}
|
||||
}
|
||||
return envs
|
||||
}
|
||||
|
||||
func (b *buildArgs) getBuildArg(key string, mapping map[string]*string) (string, bool) {
|
||||
defaultValue, exists := mapping[key]
|
||||
// Return override from options if one is defined
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
package dockerfile
|
||||
|
||||
import (
|
||||
"github.com/docker/docker/pkg/testutil/assert"
|
||||
"testing"
|
||||
|
||||
"bytes"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func strPtr(source string) *string {
|
||||
@@ -37,7 +39,7 @@ func TestGetAllAllowed(t *testing.T) {
|
||||
"ArgFromMeta": "frommeta1",
|
||||
"ArgFromMetaOverriden": "fromdockerfile3",
|
||||
}
|
||||
assert.DeepEqual(t, all, expected)
|
||||
assert.Equal(t, expected, all)
|
||||
}
|
||||
|
||||
func TestGetAllMeta(t *testing.T) {
|
||||
@@ -59,5 +61,40 @@ func TestGetAllMeta(t *testing.T) {
|
||||
"ArgOverriddenByOptions": "fromopt2",
|
||||
"ArgNoDefaultInMetaFromOptions": "fromopt3",
|
||||
}
|
||||
assert.DeepEqual(t, all, expected)
|
||||
assert.Equal(t, expected, all)
|
||||
}
|
||||
|
||||
func TestWarnOnUnusedBuildArgs(t *testing.T) {
|
||||
buildArgs := newBuildArgs(map[string]*string{
|
||||
"ThisArgIsUsed": strPtr("fromopt1"),
|
||||
"ThisArgIsNotUsed": strPtr("fromopt2"),
|
||||
"HTTPS_PROXY": strPtr("referenced builtin"),
|
||||
"HTTP_PROXY": strPtr("unreferenced builtin"),
|
||||
})
|
||||
buildArgs.AddArg("ThisArgIsUsed", nil)
|
||||
buildArgs.AddArg("HTTPS_PROXY", nil)
|
||||
|
||||
buffer := new(bytes.Buffer)
|
||||
buildArgs.WarnOnUnusedBuildArgs(buffer)
|
||||
out := buffer.String()
|
||||
assert.NotContains(t, out, "ThisArgIsUsed")
|
||||
assert.NotContains(t, out, "HTTPS_PROXY")
|
||||
assert.NotContains(t, out, "HTTP_PROXY")
|
||||
assert.Contains(t, out, "ThisArgIsNotUsed")
|
||||
}
|
||||
|
||||
func TestIsUnreferencedBuiltin(t *testing.T) {
|
||||
buildArgs := newBuildArgs(map[string]*string{
|
||||
"ThisArgIsUsed": strPtr("fromopt1"),
|
||||
"ThisArgIsNotUsed": strPtr("fromopt2"),
|
||||
"HTTPS_PROXY": strPtr("referenced builtin"),
|
||||
"HTTP_PROXY": strPtr("unreferenced builtin"),
|
||||
})
|
||||
buildArgs.AddArg("ThisArgIsUsed", nil)
|
||||
buildArgs.AddArg("HTTPS_PROXY", nil)
|
||||
|
||||
assert.True(t, buildArgs.IsReferencedOrNotBuiltin("ThisArgIsUsed"))
|
||||
assert.True(t, buildArgs.IsReferencedOrNotBuiltin("ThisArgIsNotUsed"))
|
||||
assert.True(t, buildArgs.IsReferencedOrNotBuiltin("HTTPS_PROXY"))
|
||||
assert.False(t, buildArgs.IsReferencedOrNotBuiltin("HTTP_PROXY"))
|
||||
}
|
||||
|
||||
@@ -2,26 +2,24 @@ package dockerfile
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/distribution/reference"
|
||||
apierrors "github.com/docker/docker/api/errors"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/backend"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/builder"
|
||||
"github.com/docker/docker/builder/dockerfile/command"
|
||||
"github.com/docker/docker/builder/dockerfile/parser"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/builder/remotecontext"
|
||||
"github.com/docker/docker/pkg/streamformatter"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
perrors "github.com/pkg/errors"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/sync/syncmap"
|
||||
)
|
||||
|
||||
var validCommitCommands = map[string]bool{
|
||||
@@ -39,6 +37,56 @@ var validCommitCommands = map[string]bool{
|
||||
|
||||
var defaultLogConfig = container.LogConfig{Type: "none"}
|
||||
|
||||
// BuildManager is shared across all Builder objects
|
||||
type BuildManager struct {
|
||||
backend builder.Backend
|
||||
pathCache pathCache // TODO: make this persistent
|
||||
}
|
||||
|
||||
// NewBuildManager creates a BuildManager
|
||||
func NewBuildManager(b builder.Backend) *BuildManager {
|
||||
return &BuildManager{
|
||||
backend: b,
|
||||
pathCache: &syncmap.Map{},
|
||||
}
|
||||
}
|
||||
|
||||
// Build starts a new build from a BuildConfig
|
||||
func (bm *BuildManager) Build(ctx context.Context, config backend.BuildConfig) (*builder.Result, error) {
|
||||
buildsTriggered.Inc()
|
||||
if config.Options.Dockerfile == "" {
|
||||
config.Options.Dockerfile = builder.DefaultDockerfileName
|
||||
}
|
||||
|
||||
source, dockerfile, err := remotecontext.Detect(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if source != nil {
|
||||
defer func() {
|
||||
if err := source.Close(); err != nil {
|
||||
logrus.Debugf("[BUILDER] failed to remove temporary context: %v", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
builderOptions := builderOptions{
|
||||
Options: config.Options,
|
||||
ProgressWriter: config.ProgressWriter,
|
||||
Backend: bm.backend,
|
||||
PathCache: bm.pathCache,
|
||||
}
|
||||
return newBuilder(ctx, builderOptions).build(source, dockerfile)
|
||||
}
|
||||
|
||||
// builderOptions are the dependencies required by the builder
|
||||
type builderOptions struct {
|
||||
Options *types.ImageBuildOptions
|
||||
Backend builder.Backend
|
||||
ProgressWriter backend.ProgressWriter
|
||||
PathCache pathCache
|
||||
}
|
||||
|
||||
// Builder is a Dockerfile builder
|
||||
// It implements the builder.Backend interface.
|
||||
type Builder struct {
|
||||
@@ -46,244 +94,145 @@ type Builder struct {
|
||||
|
||||
Stdout io.Writer
|
||||
Stderr io.Writer
|
||||
Aux *streamformatter.AuxFormatter
|
||||
Output io.Writer
|
||||
|
||||
docker builder.Backend
|
||||
context builder.Context
|
||||
clientCtx context.Context
|
||||
cancel context.CancelFunc
|
||||
|
||||
runConfig *container.Config // runconfig for cmd, run, entrypoint etc.
|
||||
flags *BFlags
|
||||
tmpContainers map[string]struct{}
|
||||
image string // imageID
|
||||
imageContexts *imageContexts // helper for storing contexts from builds
|
||||
noBaseImage bool // A flag to track the use of `scratch` as the base image
|
||||
maintainer string
|
||||
cmdSet bool
|
||||
buildStages *buildStages
|
||||
disableCommit bool
|
||||
cacheBusted bool
|
||||
buildArgs *buildArgs
|
||||
directive parser.Directive
|
||||
|
||||
// TODO: remove once docker.Commit can receive a tag
|
||||
id string
|
||||
|
||||
imageCache builder.ImageCache
|
||||
from builder.Image
|
||||
imageCache builder.ImageCache
|
||||
imageSources *imageSources
|
||||
pathCache pathCache
|
||||
}
|
||||
|
||||
// BuildManager implements builder.Backend and is shared across all Builder objects.
|
||||
type BuildManager struct {
|
||||
backend builder.Backend
|
||||
pathCache *pathCache // TODO: make this persistent
|
||||
}
|
||||
|
||||
// NewBuildManager creates a BuildManager.
|
||||
func NewBuildManager(b builder.Backend) (bm *BuildManager) {
|
||||
return &BuildManager{backend: b, pathCache: &pathCache{}}
|
||||
}
|
||||
|
||||
// BuildFromContext builds a new image from a given context.
|
||||
func (bm *BuildManager) BuildFromContext(ctx context.Context, src io.ReadCloser, remote string, buildOptions *types.ImageBuildOptions, pg backend.ProgressWriter) (string, error) {
|
||||
if buildOptions.Squash && !bm.backend.HasExperimental() {
|
||||
return "", apierrors.NewBadRequestError(errors.New("squash is only supported with experimental mode"))
|
||||
}
|
||||
buildContext, dockerfileName, err := builder.DetectContextFromRemoteURL(src, remote, pg.ProgressReaderFunc)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer func() {
|
||||
if err := buildContext.Close(); err != nil {
|
||||
logrus.Debugf("[BUILDER] failed to remove temporary context: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if len(dockerfileName) > 0 {
|
||||
buildOptions.Dockerfile = dockerfileName
|
||||
}
|
||||
b, err := NewBuilder(ctx, buildOptions, bm.backend, builder.DockerIgnoreContext{ModifiableContext: buildContext})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
b.imageContexts.cache = bm.pathCache
|
||||
return b.build(pg.StdoutFormatter, pg.StderrFormatter, pg.Output)
|
||||
}
|
||||
|
||||
// NewBuilder creates a new Dockerfile builder from an optional dockerfile and a Config.
|
||||
// If dockerfile is nil, the Dockerfile specified by Config.DockerfileName,
|
||||
// will be read from the Context passed to Build().
|
||||
func NewBuilder(clientCtx context.Context, config *types.ImageBuildOptions, backend builder.Backend, buildContext builder.Context) (b *Builder, err error) {
|
||||
// newBuilder creates a new Dockerfile builder from an optional dockerfile and a Options.
|
||||
func newBuilder(clientCtx context.Context, options builderOptions) *Builder {
|
||||
config := options.Options
|
||||
if config == nil {
|
||||
config = new(types.ImageBuildOptions)
|
||||
}
|
||||
ctx, cancel := context.WithCancel(clientCtx)
|
||||
b = &Builder{
|
||||
clientCtx: ctx,
|
||||
cancel: cancel,
|
||||
b := &Builder{
|
||||
clientCtx: clientCtx,
|
||||
options: config,
|
||||
Stdout: os.Stdout,
|
||||
Stderr: os.Stderr,
|
||||
docker: backend,
|
||||
context: buildContext,
|
||||
runConfig: new(container.Config),
|
||||
Stdout: options.ProgressWriter.StdoutFormatter,
|
||||
Stderr: options.ProgressWriter.StderrFormatter,
|
||||
Aux: options.ProgressWriter.AuxFormatter,
|
||||
Output: options.ProgressWriter.Output,
|
||||
docker: options.Backend,
|
||||
tmpContainers: map[string]struct{}{},
|
||||
id: stringid.GenerateNonCryptoID(),
|
||||
buildArgs: newBuildArgs(config.BuildArgs),
|
||||
directive: parser.Directive{
|
||||
EscapeSeen: false,
|
||||
LookingForDirectives: true,
|
||||
},
|
||||
buildStages: newBuildStages(),
|
||||
imageSources: newImageSources(clientCtx, options),
|
||||
pathCache: options.PathCache,
|
||||
}
|
||||
b.imageContexts = &imageContexts{b: b}
|
||||
|
||||
parser.SetEscapeToken(parser.DefaultEscapeToken, &b.directive) // Assume the default token for escape
|
||||
return b, nil
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *Builder) resetImageCache() {
|
||||
if icb, ok := b.docker.(builder.ImageCacheBuilder); ok {
|
||||
b.imageCache = icb.MakeImageCache(b.options.CacheFrom)
|
||||
}
|
||||
b.noBaseImage = false
|
||||
b.cacheBusted = false
|
||||
}
|
||||
|
||||
// sanitizeRepoAndTags parses the raw "t" parameter received from the client
|
||||
// to a slice of repoAndTag.
|
||||
// It also validates each repoName and tag.
|
||||
func sanitizeRepoAndTags(names []string) ([]reference.Named, error) {
|
||||
var (
|
||||
repoAndTags []reference.Named
|
||||
// This map is used for deduplicating the "-t" parameter.
|
||||
uniqNames = make(map[string]struct{})
|
||||
)
|
||||
for _, repo := range names {
|
||||
if repo == "" {
|
||||
continue
|
||||
}
|
||||
// Build runs the Dockerfile builder by parsing the Dockerfile and executing
|
||||
// the instructions from the file.
|
||||
func (b *Builder) build(source builder.Source, dockerfile *parser.Result) (*builder.Result, error) {
|
||||
defer b.imageSources.Unmount()
|
||||
|
||||
ref, err := reference.ParseNormalizedNamed(repo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
addNodesForLabelOption(dockerfile.AST, b.options.Labels)
|
||||
|
||||
if _, isCanonical := ref.(reference.Canonical); isCanonical {
|
||||
return nil, errors.New("build tag cannot contain a digest")
|
||||
}
|
||||
|
||||
ref = reference.TagNameOnly(ref)
|
||||
|
||||
nameWithTag := ref.String()
|
||||
|
||||
if _, exists := uniqNames[nameWithTag]; !exists {
|
||||
uniqNames[nameWithTag] = struct{}{}
|
||||
repoAndTags = append(repoAndTags, ref)
|
||||
}
|
||||
if err := checkDispatchDockerfile(dockerfile.AST); err != nil {
|
||||
buildsFailed.WithValues(metricsDockerfileSyntaxError).Inc()
|
||||
return nil, err
|
||||
}
|
||||
return repoAndTags, nil
|
||||
|
||||
dispatchState, err := b.dispatchDockerfileWithCancellation(dockerfile, source)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if b.options.Target != "" && !dispatchState.isCurrentStage(b.options.Target) {
|
||||
buildsFailed.WithValues(metricsBuildTargetNotReachableError).Inc()
|
||||
return nil, errors.Errorf("failed to reach build target %s in Dockerfile", b.options.Target)
|
||||
}
|
||||
|
||||
b.buildArgs.WarnOnUnusedBuildArgs(b.Stderr)
|
||||
|
||||
if dispatchState.imageID == "" {
|
||||
buildsFailed.WithValues(metricsDockerfileEmptyError).Inc()
|
||||
return nil, errors.New("No image was generated. Is your Dockerfile empty?")
|
||||
}
|
||||
return &builder.Result{ImageID: dispatchState.imageID, FromImage: dispatchState.baseImage}, nil
|
||||
}
|
||||
|
||||
// build runs the Dockerfile builder from a context and a docker object that allows to make calls
|
||||
// to Docker.
|
||||
//
|
||||
// This will (barring errors):
|
||||
//
|
||||
// * read the dockerfile from context
|
||||
// * parse the dockerfile if not already parsed
|
||||
// * walk the AST and execute it by dispatching to handlers. If Remove
|
||||
// or ForceRemove is set, additional cleanup around containers happens after
|
||||
// processing.
|
||||
// * Tag image, if applicable.
|
||||
// * Print a happy message and return the image ID.
|
||||
//
|
||||
func (b *Builder) build(stdout io.Writer, stderr io.Writer, out io.Writer) (string, error) {
|
||||
defer b.imageContexts.unmount()
|
||||
|
||||
b.Stdout = stdout
|
||||
b.Stderr = stderr
|
||||
b.Output = out
|
||||
|
||||
dockerfile, err := b.readDockerfile()
|
||||
if err != nil {
|
||||
return "", err
|
||||
func emitImageID(aux *streamformatter.AuxFormatter, state *dispatchState) error {
|
||||
if aux == nil || state.imageID == "" {
|
||||
return nil
|
||||
}
|
||||
return aux.Emit(types.BuildResult{ID: state.imageID})
|
||||
}
|
||||
|
||||
repoAndTags, err := sanitizeRepoAndTags(b.options.Tags)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
addNodesForLabelOption(dockerfile, b.options.Labels)
|
||||
|
||||
var shortImgID string
|
||||
total := len(dockerfile.Children)
|
||||
for _, n := range dockerfile.Children {
|
||||
if err := b.checkDispatch(n, false); err != nil {
|
||||
return "", perrors.Wrapf(err, "Dockerfile parse error line %d", n.StartLine)
|
||||
}
|
||||
}
|
||||
|
||||
for i, n := range dockerfile.Children {
|
||||
func (b *Builder) dispatchDockerfileWithCancellation(dockerfile *parser.Result, source builder.Source) (*dispatchState, error) {
|
||||
shlex := NewShellLex(dockerfile.EscapeToken)
|
||||
state := newDispatchState()
|
||||
total := len(dockerfile.AST.Children)
|
||||
var err error
|
||||
for i, n := range dockerfile.AST.Children {
|
||||
select {
|
||||
case <-b.clientCtx.Done():
|
||||
logrus.Debug("Builder: build cancelled!")
|
||||
fmt.Fprint(b.Stdout, "Build cancelled")
|
||||
return "", errors.New("Build cancelled")
|
||||
buildsFailed.WithValues(metricsBuildCanceled).Inc()
|
||||
return nil, errors.New("Build cancelled")
|
||||
default:
|
||||
// Not cancelled yet, keep going...
|
||||
}
|
||||
|
||||
if command.From == n.Value && b.imageContexts.isCurrentTarget(b.options.Target) {
|
||||
// If this is a FROM and we have a previous image then
|
||||
// emit an aux message for that image since it is the
|
||||
// end of the previous stage
|
||||
if n.Value == command.From {
|
||||
if err := emitImageID(b.Aux, state); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if n.Value == command.From && state.isCurrentStage(b.options.Target) {
|
||||
break
|
||||
}
|
||||
|
||||
if err := b.dispatch(i, total, n); err != nil {
|
||||
opts := dispatchOptions{
|
||||
state: state,
|
||||
stepMsg: formatStep(i, total),
|
||||
node: n,
|
||||
shlex: shlex,
|
||||
source: source,
|
||||
}
|
||||
if state, err = b.dispatch(opts); err != nil {
|
||||
if b.options.ForceRemove {
|
||||
b.clearTmp()
|
||||
}
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
shortImgID = stringid.TruncateID(b.image)
|
||||
fmt.Fprintf(b.Stdout, " ---> %s\n", shortImgID)
|
||||
fmt.Fprintf(b.Stdout, " ---> %s\n", stringid.TruncateID(state.imageID))
|
||||
if b.options.Remove {
|
||||
b.clearTmp()
|
||||
}
|
||||
}
|
||||
|
||||
if b.options.Target != "" && !b.imageContexts.isCurrentTarget(b.options.Target) {
|
||||
return "", perrors.Errorf("failed to reach build target %s in Dockerfile", b.options.Target)
|
||||
// Emit a final aux message for the final image
|
||||
if err := emitImageID(b.Aux, state); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
b.warnOnUnusedBuildArgs()
|
||||
|
||||
if b.image == "" {
|
||||
return "", errors.New("No image was generated. Is your Dockerfile empty?")
|
||||
}
|
||||
|
||||
if b.options.Squash {
|
||||
var fromID string
|
||||
if b.from != nil {
|
||||
fromID = b.from.ImageID()
|
||||
}
|
||||
b.image, err = b.docker.SquashImage(b.image, fromID)
|
||||
if err != nil {
|
||||
return "", perrors.Wrap(err, "error squashing image")
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Fprintf(b.Stdout, "Successfully built %s\n", shortImgID)
|
||||
|
||||
imageID := image.ID(b.image)
|
||||
for _, rt := range repoAndTags {
|
||||
if err := b.docker.TagImageWithReference(imageID, rt); err != nil {
|
||||
return "", err
|
||||
}
|
||||
fmt.Fprintf(b.Stdout, "Successfully tagged %s\n", reference.FamiliarString(rt))
|
||||
}
|
||||
|
||||
return b.image, nil
|
||||
return state, nil
|
||||
}
|
||||
|
||||
func addNodesForLabelOption(dockerfile *parser.Node, labels map[string]string) {
|
||||
@@ -295,25 +244,6 @@ func addNodesForLabelOption(dockerfile *parser.Node, labels map[string]string) {
|
||||
dockerfile.Children = append(dockerfile.Children, node)
|
||||
}
|
||||
|
||||
// check if there are any leftover build-args that were passed but not
|
||||
// consumed during build. Print a warning, if there are any.
|
||||
func (b *Builder) warnOnUnusedBuildArgs() {
|
||||
leftoverArgs := b.buildArgs.UnreferencedOptionArgs()
|
||||
if len(leftoverArgs) > 0 {
|
||||
fmt.Fprintf(b.Stderr, "[Warning] One or more build-args %v were not consumed\n", leftoverArgs)
|
||||
}
|
||||
}
|
||||
|
||||
// hasFromImage returns true if the builder has processed a `FROM <image>` line
|
||||
func (b *Builder) hasFromImage() bool {
|
||||
return b.image != "" || b.noBaseImage
|
||||
}
|
||||
|
||||
// Cancel cancels an ongoing Dockerfile build.
|
||||
func (b *Builder) Cancel() {
|
||||
b.cancel()
|
||||
}
|
||||
|
||||
// BuildFromConfig builds directly from `changes`, treating it as if it were the contents of a Dockerfile
|
||||
// It will:
|
||||
// - Call parse.Parse() to get an AST root for the concatenated Dockerfile entries.
|
||||
@@ -324,40 +254,61 @@ func (b *Builder) Cancel() {
|
||||
//
|
||||
// TODO: Remove?
|
||||
func BuildFromConfig(config *container.Config, changes []string) (*container.Config, error) {
|
||||
b, err := NewBuilder(context.Background(), nil, nil, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if len(changes) == 0 {
|
||||
return config, nil
|
||||
}
|
||||
|
||||
ast, err := parser.Parse(bytes.NewBufferString(strings.Join(changes, "\n")), &b.directive)
|
||||
b := newBuilder(context.Background(), builderOptions{})
|
||||
|
||||
dockerfile, err := parser.Parse(bytes.NewBufferString(strings.Join(changes, "\n")))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// ensure that the commands are valid
|
||||
for _, n := range ast.Children {
|
||||
for _, n := range dockerfile.AST.Children {
|
||||
if !validCommitCommands[n.Value] {
|
||||
return nil, fmt.Errorf("%s is not a valid change command", n.Value)
|
||||
}
|
||||
}
|
||||
|
||||
b.runConfig = config
|
||||
b.Stdout = ioutil.Discard
|
||||
b.Stderr = ioutil.Discard
|
||||
b.disableCommit = true
|
||||
|
||||
total := len(ast.Children)
|
||||
for _, n := range ast.Children {
|
||||
if err := b.checkDispatch(n, false); err != nil {
|
||||
return nil, err
|
||||
if err := checkDispatchDockerfile(dockerfile.AST); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dispatchState := newDispatchState()
|
||||
dispatchState.runConfig = config
|
||||
return dispatchFromDockerfile(b, dockerfile, dispatchState, nil)
|
||||
}
|
||||
|
||||
func checkDispatchDockerfile(dockerfile *parser.Node) error {
|
||||
for _, n := range dockerfile.Children {
|
||||
if err := checkDispatch(n); err != nil {
|
||||
return errors.Wrapf(err, "Dockerfile parse error line %d", n.StartLine)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func dispatchFromDockerfile(b *Builder, result *parser.Result, dispatchState *dispatchState, source builder.Source) (*container.Config, error) {
|
||||
shlex := NewShellLex(result.EscapeToken)
|
||||
ast := result.AST
|
||||
total := len(ast.Children)
|
||||
|
||||
for i, n := range ast.Children {
|
||||
if err := b.dispatch(i, total, n); err != nil {
|
||||
opts := dispatchOptions{
|
||||
state: dispatchState,
|
||||
stepMsg: formatStep(i, total),
|
||||
node: n,
|
||||
shlex: shlex,
|
||||
source: source,
|
||||
}
|
||||
if _, err := b.dispatch(opts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return b.runConfig, nil
|
||||
return dispatchState.runConfig, nil
|
||||
}
|
||||
|
||||
@@ -5,15 +5,13 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/builder/dockerfile/parser"
|
||||
"github.com/docker/docker/pkg/testutil/assert"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestAddNodesForLabelOption(t *testing.T) {
|
||||
dockerfile := "FROM scratch"
|
||||
d := parser.Directive{}
|
||||
parser.SetEscapeToken(parser.DefaultEscapeToken, &d)
|
||||
nodes, err := parser.Parse(strings.NewReader(dockerfile), &d)
|
||||
assert.NilError(t, err)
|
||||
result, err := parser.Parse(strings.NewReader(dockerfile))
|
||||
assert.NoError(t, err)
|
||||
|
||||
labels := map[string]string{
|
||||
"org.e": "cli-e",
|
||||
@@ -22,14 +20,15 @@ func TestAddNodesForLabelOption(t *testing.T) {
|
||||
"org.b": "cli-b",
|
||||
"org.a": "cli-a",
|
||||
}
|
||||
nodes := result.AST
|
||||
addNodesForLabelOption(nodes, labels)
|
||||
|
||||
expected := []string{
|
||||
"FROM scratch",
|
||||
`LABEL "org.a"='cli-a' "org.b"='cli-b' "org.c"='cli-c' "org.d"='cli-d' "org.e"='cli-e'`,
|
||||
}
|
||||
assert.Equal(t, len(nodes.Children), 2)
|
||||
assert.Len(t, nodes.Children, 2)
|
||||
for i, v := range nodes.Children {
|
||||
assert.Equal(t, v.Original, expected[i])
|
||||
assert.Equal(t, expected[i], v.Original)
|
||||
}
|
||||
}
|
||||
|
||||
363
builder/dockerfile/copy.go
Normal file
363
builder/dockerfile/copy.go
Normal file
@@ -0,0 +1,363 @@
|
||||
package dockerfile
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/builder"
|
||||
"github.com/docker/docker/builder/remotecontext"
|
||||
"github.com/docker/docker/pkg/httputils"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/docker/docker/pkg/progress"
|
||||
"github.com/docker/docker/pkg/streamformatter"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/docker/docker/pkg/urlutil"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type pathCache interface {
|
||||
Load(key interface{}) (value interface{}, ok bool)
|
||||
Store(key, value interface{})
|
||||
}
|
||||
|
||||
// copyInfo is a data object which stores the metadata about each source file in
|
||||
// a copyInstruction
|
||||
type copyInfo struct {
|
||||
root string
|
||||
path string
|
||||
hash string
|
||||
noDecompress bool
|
||||
}
|
||||
|
||||
func newCopyInfoFromSource(source builder.Source, path string, hash string) copyInfo {
|
||||
return copyInfo{root: source.Root(), path: path, hash: hash}
|
||||
}
|
||||
|
||||
func newCopyInfos(copyInfos ...copyInfo) []copyInfo {
|
||||
return copyInfos
|
||||
}
|
||||
|
||||
// copyInstruction is a fully parsed COPY or ADD command that is passed to
|
||||
// Builder.performCopy to copy files into the image filesystem
|
||||
type copyInstruction struct {
|
||||
cmdName string
|
||||
infos []copyInfo
|
||||
dest string
|
||||
allowLocalDecompression bool
|
||||
}
|
||||
|
||||
// copier reads a raw COPY or ADD command, fetches remote sources using a downloader,
|
||||
// and creates a copyInstruction
|
||||
type copier struct {
|
||||
imageSource *imageMount
|
||||
source builder.Source
|
||||
pathCache pathCache
|
||||
download sourceDownloader
|
||||
tmpPaths []string
|
||||
}
|
||||
|
||||
func copierFromDispatchRequest(req dispatchRequest, download sourceDownloader, imageSource *imageMount) copier {
|
||||
return copier{
|
||||
source: req.source,
|
||||
pathCache: req.builder.pathCache,
|
||||
download: download,
|
||||
imageSource: imageSource,
|
||||
}
|
||||
}
|
||||
|
||||
func (o *copier) createCopyInstruction(args []string, cmdName string) (copyInstruction, error) {
|
||||
inst := copyInstruction{cmdName: cmdName}
|
||||
last := len(args) - 1
|
||||
|
||||
// Work in daemon-specific filepath semantics
|
||||
inst.dest = filepath.FromSlash(args[last])
|
||||
|
||||
infos, err := o.getCopyInfosForSourcePaths(args[0:last])
|
||||
if err != nil {
|
||||
return inst, errors.Wrapf(err, "%s failed", cmdName)
|
||||
}
|
||||
if len(infos) > 1 && !strings.HasSuffix(inst.dest, string(os.PathSeparator)) {
|
||||
return inst, errors.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName)
|
||||
}
|
||||
inst.infos = infos
|
||||
return inst, nil
|
||||
}
|
||||
|
||||
// getCopyInfosForSourcePaths iterates over the source files and calculate the info
|
||||
// needed to copy (e.g. hash value if cached)
|
||||
func (o *copier) getCopyInfosForSourcePaths(sources []string) ([]copyInfo, error) {
|
||||
var infos []copyInfo
|
||||
for _, orig := range sources {
|
||||
subinfos, err := o.getCopyInfoForSourcePath(orig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
infos = append(infos, subinfos...)
|
||||
}
|
||||
|
||||
if len(infos) == 0 {
|
||||
return nil, errors.New("no source files were specified")
|
||||
}
|
||||
return infos, nil
|
||||
}
|
||||
|
||||
func (o *copier) getCopyInfoForSourcePath(orig string) ([]copyInfo, error) {
|
||||
if !urlutil.IsURL(orig) {
|
||||
return o.calcCopyInfo(orig, true)
|
||||
}
|
||||
remote, path, err := o.download(orig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
o.tmpPaths = append(o.tmpPaths, remote.Root())
|
||||
|
||||
hash, err := remote.Hash(path)
|
||||
ci := newCopyInfoFromSource(remote, path, hash)
|
||||
ci.noDecompress = true // data from http shouldn't be extracted even on ADD
|
||||
return newCopyInfos(ci), err
|
||||
}
|
||||
|
||||
// Cleanup removes any temporary directories created as part of downloading
|
||||
// remote files.
|
||||
func (o *copier) Cleanup() {
|
||||
for _, path := range o.tmpPaths {
|
||||
os.RemoveAll(path)
|
||||
}
|
||||
o.tmpPaths = []string{}
|
||||
}
|
||||
|
||||
// TODO: allowWildcards can probably be removed by refactoring this function further.
|
||||
func (o *copier) calcCopyInfo(origPath string, allowWildcards bool) ([]copyInfo, error) {
|
||||
imageSource := o.imageSource
|
||||
if err := validateCopySourcePath(imageSource, origPath); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Work in daemon-specific OS filepath semantics
|
||||
origPath = filepath.FromSlash(origPath)
|
||||
origPath = strings.TrimPrefix(origPath, string(os.PathSeparator))
|
||||
origPath = strings.TrimPrefix(origPath, "."+string(os.PathSeparator))
|
||||
|
||||
// TODO: do this when creating copier. Requires validateCopySourcePath
|
||||
// (and other below) to be aware of the difference sources. Why is it only
|
||||
// done on image Source?
|
||||
if imageSource != nil {
|
||||
var err error
|
||||
o.source, err = imageSource.Source()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to copy")
|
||||
}
|
||||
}
|
||||
|
||||
if o.source == nil {
|
||||
return nil, errors.Errorf("missing build context")
|
||||
}
|
||||
|
||||
// Deal with wildcards
|
||||
if allowWildcards && containsWildcards(origPath) {
|
||||
return o.copyWithWildcards(origPath)
|
||||
}
|
||||
|
||||
if imageSource != nil && imageSource.ImageID() != "" {
|
||||
// return a cached copy if one exists
|
||||
if h, ok := o.pathCache.Load(imageSource.ImageID() + origPath); ok {
|
||||
return newCopyInfos(newCopyInfoFromSource(o.source, origPath, h.(string))), nil
|
||||
}
|
||||
}
|
||||
|
||||
// Deal with the single file case
|
||||
copyInfo, err := copyInfoForFile(o.source, origPath)
|
||||
switch {
|
||||
case err != nil:
|
||||
return nil, err
|
||||
case copyInfo.hash != "":
|
||||
o.storeInPathCache(imageSource, origPath, copyInfo.hash)
|
||||
return newCopyInfos(copyInfo), err
|
||||
}
|
||||
|
||||
// TODO: remove, handle dirs in Hash()
|
||||
subfiles, err := walkSource(o.source, origPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hash := hashStringSlice("dir", subfiles)
|
||||
o.storeInPathCache(imageSource, origPath, hash)
|
||||
return newCopyInfos(newCopyInfoFromSource(o.source, origPath, hash)), nil
|
||||
}
|
||||
|
||||
func (o *copier) storeInPathCache(im *imageMount, path string, hash string) {
|
||||
if im != nil {
|
||||
o.pathCache.Store(im.ImageID()+path, hash)
|
||||
}
|
||||
}
|
||||
|
||||
func (o *copier) copyWithWildcards(origPath string) ([]copyInfo, error) {
|
||||
var copyInfos []copyInfo
|
||||
if err := filepath.Walk(o.source.Root(), func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rel, err := remotecontext.Rel(o.source.Root(), path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if rel == "." {
|
||||
return nil
|
||||
}
|
||||
if match, _ := filepath.Match(origPath, rel); !match {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Note we set allowWildcards to false in case the name has
|
||||
// a * in it
|
||||
subInfos, err := o.calcCopyInfo(rel, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
copyInfos = append(copyInfos, subInfos...)
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return copyInfos, nil
|
||||
}
|
||||
|
||||
func copyInfoForFile(source builder.Source, path string) (copyInfo, error) {
|
||||
fi, err := remotecontext.StatAt(source, path)
|
||||
if err != nil {
|
||||
return copyInfo{}, err
|
||||
}
|
||||
|
||||
if fi.IsDir() {
|
||||
return copyInfo{}, nil
|
||||
}
|
||||
hash, err := source.Hash(path)
|
||||
if err != nil {
|
||||
return copyInfo{}, err
|
||||
}
|
||||
return newCopyInfoFromSource(source, path, "file:"+hash), nil
|
||||
}
|
||||
|
||||
// TODO: dedupe with copyWithWildcards()
|
||||
func walkSource(source builder.Source, origPath string) ([]string, error) {
|
||||
fp, err := remotecontext.FullPath(source, origPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Must be a dir
|
||||
var subfiles []string
|
||||
err = filepath.Walk(fp, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rel, err := remotecontext.Rel(source.Root(), path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if rel == "." {
|
||||
return nil
|
||||
}
|
||||
hash, err := source.Hash(rel)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
// we already checked handleHash above
|
||||
subfiles = append(subfiles, hash)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sort.Strings(subfiles)
|
||||
return subfiles, nil
|
||||
}
|
||||
|
||||
type sourceDownloader func(string) (builder.Source, string, error)
|
||||
|
||||
func newRemoteSourceDownloader(output, stdout io.Writer) sourceDownloader {
|
||||
return func(url string) (builder.Source, string, error) {
|
||||
return downloadSource(output, stdout, url)
|
||||
}
|
||||
}
|
||||
|
||||
func errOnSourceDownload(_ string) (builder.Source, string, error) {
|
||||
return nil, "", errors.New("source can't be a URL for COPY")
|
||||
}
|
||||
|
||||
func downloadSource(output io.Writer, stdout io.Writer, srcURL string) (remote builder.Source, p string, err error) {
|
||||
// get filename from URL
|
||||
u, err := url.Parse(srcURL)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
filename := filepath.Base(filepath.FromSlash(u.Path)) // Ensure in platform semantics
|
||||
if filename == "" {
|
||||
err = errors.Errorf("cannot determine filename from url: %s", u)
|
||||
return
|
||||
}
|
||||
|
||||
// Initiate the download
|
||||
resp, err := httputils.Download(srcURL)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Prepare file in a tmp dir
|
||||
tmpDir, err := ioutils.TempDir("", "docker-remote")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
os.RemoveAll(tmpDir)
|
||||
}
|
||||
}()
|
||||
tmpFileName := filepath.Join(tmpDir, filename)
|
||||
tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
progressOutput := streamformatter.NewJSONProgressOutput(output, true)
|
||||
progressReader := progress.NewProgressReader(resp.Body, progressOutput, resp.ContentLength, "", "Downloading")
|
||||
// Download and dump result to tmp file
|
||||
// TODO: add filehash directly
|
||||
if _, err = io.Copy(tmpFile, progressReader); err != nil {
|
||||
tmpFile.Close()
|
||||
return
|
||||
}
|
||||
// TODO: how important is this random blank line to the output?
|
||||
fmt.Fprintln(stdout)
|
||||
|
||||
// Set the mtime to the Last-Modified header value if present
|
||||
// Otherwise just remove atime and mtime
|
||||
mTime := time.Time{}
|
||||
|
||||
lastMod := resp.Header.Get("Last-Modified")
|
||||
if lastMod != "" {
|
||||
// If we can't parse it then just let it default to 'zero'
|
||||
// otherwise use the parsed time value
|
||||
if parsedMTime, err := http.ParseTime(lastMod); err == nil {
|
||||
mTime = parsedMTime
|
||||
}
|
||||
}
|
||||
|
||||
tmpFile.Close()
|
||||
|
||||
if err = system.Chtimes(tmpFileName, mTime, mTime); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
lc, err := remotecontext.NewLazyContext(tmpDir)
|
||||
return lc, filename, err
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -3,15 +3,21 @@ package dockerfile
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"bytes"
|
||||
"context"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/backend"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/strslice"
|
||||
"github.com/docker/docker/builder"
|
||||
"github.com/docker/docker/pkg/testutil/assert"
|
||||
"github.com/docker/docker/builder/dockerfile/parser"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/docker/docker/pkg/testutil"
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type commandWithFunction struct {
|
||||
@@ -19,155 +25,152 @@ type commandWithFunction struct {
|
||||
function func(args []string) error
|
||||
}
|
||||
|
||||
func withArgs(f dispatcher) func([]string) error {
|
||||
return func(args []string) error {
|
||||
return f(dispatchRequest{args: args})
|
||||
}
|
||||
}
|
||||
|
||||
func withBuilderAndArgs(builder *Builder, f dispatcher) func([]string) error {
|
||||
return func(args []string) error {
|
||||
return f(defaultDispatchReq(builder, args...))
|
||||
}
|
||||
}
|
||||
|
||||
func defaultDispatchReq(builder *Builder, args ...string) dispatchRequest {
|
||||
return dispatchRequest{
|
||||
builder: builder,
|
||||
args: args,
|
||||
flags: NewBFlags(),
|
||||
shlex: NewShellLex(parser.DefaultEscapeToken),
|
||||
state: &dispatchState{runConfig: &container.Config{}},
|
||||
}
|
||||
}
|
||||
|
||||
func newBuilderWithMockBackend() *Builder {
|
||||
mockBackend := &MockBackend{}
|
||||
ctx := context.Background()
|
||||
b := &Builder{
|
||||
options: &types.ImageBuildOptions{},
|
||||
docker: mockBackend,
|
||||
buildArgs: newBuildArgs(make(map[string]*string)),
|
||||
tmpContainers: make(map[string]struct{}),
|
||||
Stdout: new(bytes.Buffer),
|
||||
clientCtx: ctx,
|
||||
disableCommit: true,
|
||||
imageSources: newImageSources(ctx, builderOptions{
|
||||
Options: &types.ImageBuildOptions{},
|
||||
Backend: mockBackend,
|
||||
}),
|
||||
buildStages: newBuildStages(),
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func TestCommandsExactlyOneArgument(t *testing.T) {
|
||||
commands := []commandWithFunction{
|
||||
{"MAINTAINER", func(args []string) error { return maintainer(nil, args, nil, "") }},
|
||||
{"FROM", func(args []string) error { return from(nil, args, nil, "") }},
|
||||
{"WORKDIR", func(args []string) error { return workdir(nil, args, nil, "") }},
|
||||
{"USER", func(args []string) error { return user(nil, args, nil, "") }},
|
||||
{"STOPSIGNAL", func(args []string) error { return stopSignal(nil, args, nil, "") }}}
|
||||
{"MAINTAINER", withArgs(maintainer)},
|
||||
{"WORKDIR", withArgs(workdir)},
|
||||
{"USER", withArgs(user)},
|
||||
{"STOPSIGNAL", withArgs(stopSignal)},
|
||||
}
|
||||
|
||||
for _, command := range commands {
|
||||
err := command.function([]string{})
|
||||
|
||||
if err == nil {
|
||||
t.Fatalf("Error should be present for %s command", command.name)
|
||||
}
|
||||
|
||||
expectedError := errExactlyOneArgument(command.name)
|
||||
|
||||
if err.Error() != expectedError.Error() {
|
||||
t.Fatalf("Wrong error message for %s. Got: %s. Should be: %s", command.name, err.Error(), expectedError)
|
||||
}
|
||||
assert.EqualError(t, err, errExactlyOneArgument(command.name).Error())
|
||||
}
|
||||
}
|
||||
|
||||
func TestCommandsAtLeastOneArgument(t *testing.T) {
|
||||
commands := []commandWithFunction{
|
||||
{"ENV", func(args []string) error { return env(nil, args, nil, "") }},
|
||||
{"LABEL", func(args []string) error { return label(nil, args, nil, "") }},
|
||||
{"ONBUILD", func(args []string) error { return onbuild(nil, args, nil, "") }},
|
||||
{"HEALTHCHECK", func(args []string) error { return healthcheck(nil, args, nil, "") }},
|
||||
{"EXPOSE", func(args []string) error { return expose(nil, args, nil, "") }},
|
||||
{"VOLUME", func(args []string) error { return volume(nil, args, nil, "") }}}
|
||||
{"ENV", withArgs(env)},
|
||||
{"LABEL", withArgs(label)},
|
||||
{"ONBUILD", withArgs(onbuild)},
|
||||
{"HEALTHCHECK", withArgs(healthcheck)},
|
||||
{"EXPOSE", withArgs(expose)},
|
||||
{"VOLUME", withArgs(volume)},
|
||||
}
|
||||
|
||||
for _, command := range commands {
|
||||
err := command.function([]string{})
|
||||
|
||||
if err == nil {
|
||||
t.Fatalf("Error should be present for %s command", command.name)
|
||||
}
|
||||
|
||||
expectedError := errAtLeastOneArgument(command.name)
|
||||
|
||||
if err.Error() != expectedError.Error() {
|
||||
t.Fatalf("Wrong error message for %s. Got: %s. Should be: %s", command.name, err.Error(), expectedError)
|
||||
}
|
||||
assert.EqualError(t, err, errAtLeastOneArgument(command.name).Error())
|
||||
}
|
||||
}
|
||||
|
||||
func TestCommandsAtLeastTwoArguments(t *testing.T) {
|
||||
commands := []commandWithFunction{
|
||||
{"ADD", func(args []string) error { return add(nil, args, nil, "") }},
|
||||
{"COPY", func(args []string) error { return dispatchCopy(nil, args, nil, "") }}}
|
||||
{"ADD", withArgs(add)},
|
||||
{"COPY", withArgs(dispatchCopy)}}
|
||||
|
||||
for _, command := range commands {
|
||||
err := command.function([]string{"arg1"})
|
||||
|
||||
if err == nil {
|
||||
t.Fatalf("Error should be present for %s command", command.name)
|
||||
}
|
||||
|
||||
expectedError := errAtLeastTwoArguments(command.name)
|
||||
|
||||
if err.Error() != expectedError.Error() {
|
||||
t.Fatalf("Wrong error message for %s. Got: %s. Should be: %s", command.name, err.Error(), expectedError)
|
||||
}
|
||||
assert.EqualError(t, err, errAtLeastTwoArguments(command.name).Error())
|
||||
}
|
||||
}
|
||||
|
||||
func TestCommandsTooManyArguments(t *testing.T) {
|
||||
commands := []commandWithFunction{
|
||||
{"ENV", func(args []string) error { return env(nil, args, nil, "") }},
|
||||
{"LABEL", func(args []string) error { return label(nil, args, nil, "") }}}
|
||||
{"ENV", withArgs(env)},
|
||||
{"LABEL", withArgs(label)}}
|
||||
|
||||
for _, command := range commands {
|
||||
err := command.function([]string{"arg1", "arg2", "arg3"})
|
||||
|
||||
if err == nil {
|
||||
t.Fatalf("Error should be present for %s command", command.name)
|
||||
}
|
||||
|
||||
expectedError := errTooManyArguments(command.name)
|
||||
|
||||
if err.Error() != expectedError.Error() {
|
||||
t.Fatalf("Wrong error message for %s. Got: %s. Should be: %s", command.name, err.Error(), expectedError)
|
||||
}
|
||||
assert.EqualError(t, err, errTooManyArguments(command.name).Error())
|
||||
}
|
||||
}
|
||||
|
||||
func TestCommandseBlankNames(t *testing.T) {
|
||||
bflags := &BFlags{}
|
||||
config := &container.Config{}
|
||||
|
||||
b := &Builder{flags: bflags, runConfig: config, disableCommit: true}
|
||||
|
||||
func TestCommandsBlankNames(t *testing.T) {
|
||||
builder := newBuilderWithMockBackend()
|
||||
commands := []commandWithFunction{
|
||||
{"ENV", func(args []string) error { return env(b, args, nil, "") }},
|
||||
{"LABEL", func(args []string) error { return label(b, args, nil, "") }},
|
||||
{"ENV", withBuilderAndArgs(builder, env)},
|
||||
{"LABEL", withBuilderAndArgs(builder, label)},
|
||||
}
|
||||
|
||||
for _, command := range commands {
|
||||
err := command.function([]string{"", ""})
|
||||
|
||||
if err == nil {
|
||||
t.Fatalf("Error should be present for %s command", command.name)
|
||||
}
|
||||
|
||||
expectedError := errBlankCommandNames(command.name)
|
||||
|
||||
if err.Error() != expectedError.Error() {
|
||||
t.Fatalf("Wrong error message for %s. Got: %s. Should be: %s", command.name, err.Error(), expectedError)
|
||||
}
|
||||
assert.EqualError(t, err, errBlankCommandNames(command.name).Error())
|
||||
}
|
||||
}
|
||||
|
||||
func TestEnv2Variables(t *testing.T) {
|
||||
variables := []string{"var1", "val1", "var2", "val2"}
|
||||
b := newBuilderWithMockBackend()
|
||||
|
||||
bflags := &BFlags{}
|
||||
config := &container.Config{}
|
||||
args := []string{"var1", "val1", "var2", "val2"}
|
||||
req := defaultDispatchReq(b, args...)
|
||||
err := env(req)
|
||||
require.NoError(t, err)
|
||||
|
||||
b := &Builder{flags: bflags, runConfig: config, disableCommit: true}
|
||||
|
||||
if err := env(b, variables, nil, ""); err != nil {
|
||||
t.Fatalf("Error when executing env: %s", err.Error())
|
||||
expected := []string{
|
||||
fmt.Sprintf("%s=%s", args[0], args[1]),
|
||||
fmt.Sprintf("%s=%s", args[2], args[3]),
|
||||
}
|
||||
assert.Equal(t, expected, req.state.runConfig.Env)
|
||||
}
|
||||
|
||||
expectedVar1 := fmt.Sprintf("%s=%s", variables[0], variables[1])
|
||||
expectedVar2 := fmt.Sprintf("%s=%s", variables[2], variables[3])
|
||||
func TestEnvValueWithExistingRunConfigEnv(t *testing.T) {
|
||||
b := newBuilderWithMockBackend()
|
||||
|
||||
if b.runConfig.Env[0] != expectedVar1 {
|
||||
t.Fatalf("Wrong env output for first variable. Got: %s. Should be: %s", b.runConfig.Env[0], expectedVar1)
|
||||
}
|
||||
|
||||
if b.runConfig.Env[1] != expectedVar2 {
|
||||
t.Fatalf("Wrong env output for second variable. Got: %s, Should be: %s", b.runConfig.Env[1], expectedVar2)
|
||||
args := []string{"var1", "val1"}
|
||||
req := defaultDispatchReq(b, args...)
|
||||
req.state.runConfig.Env = []string{"var1=old", "var2=fromenv"}
|
||||
err := env(req)
|
||||
require.NoError(t, err)
|
||||
|
||||
expected := []string{
|
||||
fmt.Sprintf("%s=%s", args[0], args[1]),
|
||||
"var2=fromenv",
|
||||
}
|
||||
assert.Equal(t, expected, req.state.runConfig.Env)
|
||||
}
|
||||
|
||||
func TestMaintainer(t *testing.T) {
|
||||
maintainerEntry := "Some Maintainer <maintainer@example.com>"
|
||||
|
||||
b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true}
|
||||
|
||||
if err := maintainer(b, []string{maintainerEntry}, nil, ""); err != nil {
|
||||
t.Fatalf("Error when executing maintainer: %s", err.Error())
|
||||
}
|
||||
|
||||
if b.maintainer != maintainerEntry {
|
||||
t.Fatalf("Maintainer in builder should be set to %s. Got: %s", maintainerEntry, b.maintainer)
|
||||
}
|
||||
b := newBuilderWithMockBackend()
|
||||
req := defaultDispatchReq(b, maintainerEntry)
|
||||
err := maintainer(req)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, maintainerEntry, req.state.maintainer)
|
||||
}
|
||||
|
||||
func TestLabel(t *testing.T) {
|
||||
@@ -175,83 +178,82 @@ func TestLabel(t *testing.T) {
|
||||
labelValue := "value"
|
||||
|
||||
labelEntry := []string{labelName, labelValue}
|
||||
b := newBuilderWithMockBackend()
|
||||
req := defaultDispatchReq(b, labelEntry...)
|
||||
err := label(req)
|
||||
require.NoError(t, err)
|
||||
|
||||
b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true}
|
||||
|
||||
if err := label(b, labelEntry, nil, ""); err != nil {
|
||||
t.Fatalf("Error when executing label: %s", err.Error())
|
||||
}
|
||||
|
||||
if val, ok := b.runConfig.Labels[labelName]; ok {
|
||||
if val != labelValue {
|
||||
t.Fatalf("Label %s should have value %s, had %s instead", labelName, labelValue, val)
|
||||
}
|
||||
} else {
|
||||
t.Fatalf("Label %s should be present but it is not", labelName)
|
||||
}
|
||||
}
|
||||
|
||||
func newBuilderWithMockBackend() *Builder {
|
||||
b := &Builder{
|
||||
flags: &BFlags{},
|
||||
runConfig: &container.Config{},
|
||||
options: &types.ImageBuildOptions{},
|
||||
docker: &MockBackend{},
|
||||
buildArgs: newBuildArgs(make(map[string]*string)),
|
||||
}
|
||||
b.imageContexts = &imageContexts{b: b}
|
||||
return b
|
||||
require.Contains(t, req.state.runConfig.Labels, labelName)
|
||||
assert.Equal(t, req.state.runConfig.Labels[labelName], labelValue)
|
||||
}
|
||||
|
||||
func TestFromScratch(t *testing.T) {
|
||||
b := newBuilderWithMockBackend()
|
||||
|
||||
err := from(b, []string{"scratch"}, nil, "")
|
||||
req := defaultDispatchReq(b, "scratch")
|
||||
err := from(req)
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
assert.Error(t, err, "Windows does not support FROM scratch")
|
||||
assert.EqualError(t, err, "Windows does not support FROM scratch")
|
||||
return
|
||||
}
|
||||
|
||||
assert.NilError(t, err)
|
||||
assert.Equal(t, b.image, "")
|
||||
assert.Equal(t, b.noBaseImage, true)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, req.state.hasFromImage())
|
||||
assert.Equal(t, "", req.state.imageID)
|
||||
assert.Equal(t, []string{"PATH=" + system.DefaultPathEnv}, req.state.runConfig.Env)
|
||||
}
|
||||
|
||||
func TestFromWithArg(t *testing.T) {
|
||||
tag, expected := ":sometag", "expectedthisid"
|
||||
|
||||
getImage := func(name string) (builder.Image, error) {
|
||||
assert.Equal(t, name, "alpine"+tag)
|
||||
return &mockImage{id: "expectedthisid"}, nil
|
||||
getImage := func(name string) (builder.Image, builder.ReleaseableLayer, error) {
|
||||
assert.Equal(t, "alpine"+tag, name)
|
||||
return &mockImage{id: "expectedthisid"}, nil, nil
|
||||
}
|
||||
b := newBuilderWithMockBackend()
|
||||
b.docker.(*MockBackend).getImageOnBuildFunc = getImage
|
||||
b.docker.(*MockBackend).getImageFunc = getImage
|
||||
|
||||
assert.NilError(t, arg(b, []string{"THETAG=" + tag}, nil, ""))
|
||||
err := from(b, []string{"alpine${THETAG}"}, nil, "")
|
||||
require.NoError(t, arg(defaultDispatchReq(b, "THETAG="+tag)))
|
||||
req := defaultDispatchReq(b, "alpine${THETAG}")
|
||||
err := from(req)
|
||||
|
||||
assert.NilError(t, err)
|
||||
assert.Equal(t, b.image, expected)
|
||||
assert.Equal(t, b.from.ImageID(), expected)
|
||||
assert.Equal(t, len(b.buildArgs.GetAllAllowed()), 0)
|
||||
assert.Equal(t, len(b.buildArgs.GetAllMeta()), 1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expected, req.state.imageID)
|
||||
assert.Equal(t, expected, req.state.baseImage.ImageID())
|
||||
assert.Len(t, b.buildArgs.GetAllAllowed(), 0)
|
||||
assert.Len(t, b.buildArgs.GetAllMeta(), 1)
|
||||
}
|
||||
|
||||
func TestFromWithUndefinedArg(t *testing.T) {
|
||||
tag, expected := "sometag", "expectedthisid"
|
||||
|
||||
getImage := func(name string) (builder.Image, error) {
|
||||
assert.Equal(t, name, "alpine")
|
||||
return &mockImage{id: "expectedthisid"}, nil
|
||||
getImage := func(name string) (builder.Image, builder.ReleaseableLayer, error) {
|
||||
assert.Equal(t, "alpine", name)
|
||||
return &mockImage{id: "expectedthisid"}, nil, nil
|
||||
}
|
||||
b := newBuilderWithMockBackend()
|
||||
b.docker.(*MockBackend).getImageOnBuildFunc = getImage
|
||||
b.docker.(*MockBackend).getImageFunc = getImage
|
||||
b.options.BuildArgs = map[string]*string{"THETAG": &tag}
|
||||
|
||||
err := from(b, []string{"alpine${THETAG}"}, nil, "")
|
||||
assert.NilError(t, err)
|
||||
assert.Equal(t, b.image, expected)
|
||||
req := defaultDispatchReq(b, "alpine${THETAG}")
|
||||
err := from(req)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expected, req.state.imageID)
|
||||
}
|
||||
|
||||
func TestFromMultiStageWithScratchNamedStage(t *testing.T) {
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("Windows does not support scratch")
|
||||
}
|
||||
b := newBuilderWithMockBackend()
|
||||
req := defaultDispatchReq(b, "scratch", "AS", "base")
|
||||
|
||||
require.NoError(t, from(req))
|
||||
assert.True(t, req.state.hasFromImage())
|
||||
|
||||
req.args = []string{"base"}
|
||||
require.NoError(t, from(req))
|
||||
assert.True(t, req.state.hasFromImage())
|
||||
}
|
||||
|
||||
func TestOnbuildIllegalTriggers(t *testing.T) {
|
||||
@@ -261,237 +263,147 @@ func TestOnbuildIllegalTriggers(t *testing.T) {
|
||||
{"FROM", "FROM isn't allowed as an ONBUILD trigger"}}
|
||||
|
||||
for _, trigger := range triggers {
|
||||
b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true}
|
||||
b := newBuilderWithMockBackend()
|
||||
|
||||
err := onbuild(b, []string{trigger.command}, nil, "")
|
||||
|
||||
if err == nil {
|
||||
t.Fatal("Error should not be nil")
|
||||
}
|
||||
|
||||
if !strings.Contains(err.Error(), trigger.expectedError) {
|
||||
t.Fatalf("Error message not correct. Should be: %s, got: %s", trigger.expectedError, err.Error())
|
||||
}
|
||||
err := onbuild(defaultDispatchReq(b, trigger.command))
|
||||
testutil.ErrorContains(t, err, trigger.expectedError)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOnbuild(t *testing.T) {
|
||||
b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true}
|
||||
b := newBuilderWithMockBackend()
|
||||
|
||||
err := onbuild(b, []string{"ADD", ".", "/app/src"}, nil, "ONBUILD ADD . /app/src")
|
||||
req := defaultDispatchReq(b, "ADD", ".", "/app/src")
|
||||
req.original = "ONBUILD ADD . /app/src"
|
||||
req.state.runConfig = &container.Config{}
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Error should be empty, got: %s", err.Error())
|
||||
}
|
||||
|
||||
expectedOnbuild := "ADD . /app/src"
|
||||
|
||||
if b.runConfig.OnBuild[0] != expectedOnbuild {
|
||||
t.Fatalf("Wrong ONBUILD command. Expected: %s, got: %s", expectedOnbuild, b.runConfig.OnBuild[0])
|
||||
}
|
||||
err := onbuild(req)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "ADD . /app/src", req.state.runConfig.OnBuild[0])
|
||||
}
|
||||
|
||||
func TestWorkdir(t *testing.T) {
|
||||
b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true}
|
||||
|
||||
b := newBuilderWithMockBackend()
|
||||
workingDir := "/app"
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
workingDir = "C:\app"
|
||||
}
|
||||
|
||||
err := workdir(b, []string{workingDir}, nil, "")
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Error should be empty, got: %s", err.Error())
|
||||
}
|
||||
|
||||
if b.runConfig.WorkingDir != workingDir {
|
||||
t.Fatalf("WorkingDir should be set to %s, got %s", workingDir, b.runConfig.WorkingDir)
|
||||
}
|
||||
|
||||
req := defaultDispatchReq(b, workingDir)
|
||||
err := workdir(req)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, workingDir, req.state.runConfig.WorkingDir)
|
||||
}
|
||||
|
||||
func TestCmd(t *testing.T) {
|
||||
b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true}
|
||||
|
||||
b := newBuilderWithMockBackend()
|
||||
command := "./executable"
|
||||
|
||||
err := cmd(b, []string{command}, nil, "")
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Error should be empty, got: %s", err.Error())
|
||||
}
|
||||
req := defaultDispatchReq(b, command)
|
||||
err := cmd(req)
|
||||
require.NoError(t, err)
|
||||
|
||||
var expectedCommand strslice.StrSlice
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
expectedCommand = strslice.StrSlice(append([]string{"cmd"}, "/S", "/C", command))
|
||||
} else {
|
||||
expectedCommand = strslice.StrSlice(append([]string{"/bin/sh"}, "-c", command))
|
||||
}
|
||||
|
||||
if !compareStrSlice(b.runConfig.Cmd, expectedCommand) {
|
||||
t.Fatalf("Command should be set to %s, got %s", command, b.runConfig.Cmd)
|
||||
}
|
||||
|
||||
if !b.cmdSet {
|
||||
t.Fatal("Command should be marked as set")
|
||||
}
|
||||
}
|
||||
|
||||
func compareStrSlice(slice1, slice2 strslice.StrSlice) bool {
|
||||
if len(slice1) != len(slice2) {
|
||||
return false
|
||||
}
|
||||
|
||||
for i := range slice1 {
|
||||
if slice1[i] != slice2[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
assert.Equal(t, expectedCommand, req.state.runConfig.Cmd)
|
||||
assert.True(t, req.state.cmdSet)
|
||||
}
|
||||
|
||||
func TestHealthcheckNone(t *testing.T) {
|
||||
b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true}
|
||||
b := newBuilderWithMockBackend()
|
||||
|
||||
if err := healthcheck(b, []string{"NONE"}, nil, ""); err != nil {
|
||||
t.Fatalf("Error should be empty, got: %s", err.Error())
|
||||
}
|
||||
req := defaultDispatchReq(b, "NONE")
|
||||
err := healthcheck(req)
|
||||
require.NoError(t, err)
|
||||
|
||||
if b.runConfig.Healthcheck == nil {
|
||||
t.Fatal("Healthcheck should be set, got nil")
|
||||
}
|
||||
|
||||
expectedTest := strslice.StrSlice(append([]string{"NONE"}))
|
||||
|
||||
if !compareStrSlice(expectedTest, b.runConfig.Healthcheck.Test) {
|
||||
t.Fatalf("Command should be set to %s, got %s", expectedTest, b.runConfig.Healthcheck.Test)
|
||||
}
|
||||
require.NotNil(t, req.state.runConfig.Healthcheck)
|
||||
assert.Equal(t, []string{"NONE"}, req.state.runConfig.Healthcheck.Test)
|
||||
}
|
||||
|
||||
func TestHealthcheckCmd(t *testing.T) {
|
||||
b := &Builder{flags: &BFlags{flags: make(map[string]*Flag)}, runConfig: &container.Config{}, disableCommit: true}
|
||||
b := newBuilderWithMockBackend()
|
||||
|
||||
if err := healthcheck(b, []string{"CMD", "curl", "-f", "http://localhost/", "||", "exit", "1"}, nil, ""); err != nil {
|
||||
t.Fatalf("Error should be empty, got: %s", err.Error())
|
||||
}
|
||||
args := []string{"CMD", "curl", "-f", "http://localhost/", "||", "exit", "1"}
|
||||
req := defaultDispatchReq(b, args...)
|
||||
err := healthcheck(req)
|
||||
require.NoError(t, err)
|
||||
|
||||
if b.runConfig.Healthcheck == nil {
|
||||
t.Fatal("Healthcheck should be set, got nil")
|
||||
}
|
||||
|
||||
expectedTest := strslice.StrSlice(append([]string{"CMD-SHELL"}, "curl -f http://localhost/ || exit 1"))
|
||||
|
||||
if !compareStrSlice(expectedTest, b.runConfig.Healthcheck.Test) {
|
||||
t.Fatalf("Command should be set to %s, got %s", expectedTest, b.runConfig.Healthcheck.Test)
|
||||
}
|
||||
require.NotNil(t, req.state.runConfig.Healthcheck)
|
||||
expectedTest := []string{"CMD-SHELL", "curl -f http://localhost/ || exit 1"}
|
||||
assert.Equal(t, expectedTest, req.state.runConfig.Healthcheck.Test)
|
||||
}
|
||||
|
||||
func TestEntrypoint(t *testing.T) {
|
||||
b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true}
|
||||
|
||||
b := newBuilderWithMockBackend()
|
||||
entrypointCmd := "/usr/sbin/nginx"
|
||||
|
||||
if err := entrypoint(b, []string{entrypointCmd}, nil, ""); err != nil {
|
||||
t.Fatalf("Error should be empty, got: %s", err.Error())
|
||||
}
|
||||
|
||||
if b.runConfig.Entrypoint == nil {
|
||||
t.Fatal("Entrypoint should be set")
|
||||
}
|
||||
req := defaultDispatchReq(b, entrypointCmd)
|
||||
err := entrypoint(req)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, req.state.runConfig.Entrypoint)
|
||||
|
||||
var expectedEntrypoint strslice.StrSlice
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
expectedEntrypoint = strslice.StrSlice(append([]string{"cmd"}, "/S", "/C", entrypointCmd))
|
||||
} else {
|
||||
expectedEntrypoint = strslice.StrSlice(append([]string{"/bin/sh"}, "-c", entrypointCmd))
|
||||
}
|
||||
|
||||
if !compareStrSlice(expectedEntrypoint, b.runConfig.Entrypoint) {
|
||||
t.Fatalf("Entrypoint command should be set to %s, got %s", expectedEntrypoint, b.runConfig.Entrypoint)
|
||||
}
|
||||
assert.Equal(t, expectedEntrypoint, req.state.runConfig.Entrypoint)
|
||||
}
|
||||
|
||||
func TestExpose(t *testing.T) {
|
||||
b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true}
|
||||
b := newBuilderWithMockBackend()
|
||||
|
||||
exposedPort := "80"
|
||||
req := defaultDispatchReq(b, exposedPort)
|
||||
err := expose(req)
|
||||
require.NoError(t, err)
|
||||
|
||||
if err := expose(b, []string{exposedPort}, nil, ""); err != nil {
|
||||
t.Fatalf("Error should be empty, got: %s", err.Error())
|
||||
}
|
||||
|
||||
if b.runConfig.ExposedPorts == nil {
|
||||
t.Fatal("ExposedPorts should be set")
|
||||
}
|
||||
|
||||
if len(b.runConfig.ExposedPorts) != 1 {
|
||||
t.Fatalf("ExposedPorts should contain only 1 element. Got %s", b.runConfig.ExposedPorts)
|
||||
}
|
||||
require.NotNil(t, req.state.runConfig.ExposedPorts)
|
||||
require.Len(t, req.state.runConfig.ExposedPorts, 1)
|
||||
|
||||
portsMapping, err := nat.ParsePortSpec(exposedPort)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Error when parsing port spec: %s", err.Error())
|
||||
}
|
||||
|
||||
if _, ok := b.runConfig.ExposedPorts[portsMapping[0].Port]; !ok {
|
||||
t.Fatalf("Port %s should be present. Got %s", exposedPort, b.runConfig.ExposedPorts)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, req.state.runConfig.ExposedPorts, portsMapping[0].Port)
|
||||
}
|
||||
|
||||
func TestUser(t *testing.T) {
|
||||
b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true}
|
||||
|
||||
b := newBuilderWithMockBackend()
|
||||
userCommand := "foo"
|
||||
|
||||
if err := user(b, []string{userCommand}, nil, ""); err != nil {
|
||||
t.Fatalf("Error should be empty, got: %s", err.Error())
|
||||
}
|
||||
|
||||
if b.runConfig.User != userCommand {
|
||||
t.Fatalf("User should be set to %s, got %s", userCommand, b.runConfig.User)
|
||||
}
|
||||
req := defaultDispatchReq(b, userCommand)
|
||||
err := user(req)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, userCommand, req.state.runConfig.User)
|
||||
}
|
||||
|
||||
func TestVolume(t *testing.T) {
|
||||
b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true}
|
||||
b := newBuilderWithMockBackend()
|
||||
|
||||
exposedVolume := "/foo"
|
||||
|
||||
if err := volume(b, []string{exposedVolume}, nil, ""); err != nil {
|
||||
t.Fatalf("Error should be empty, got: %s", err.Error())
|
||||
}
|
||||
req := defaultDispatchReq(b, exposedVolume)
|
||||
err := volume(req)
|
||||
require.NoError(t, err)
|
||||
|
||||
if b.runConfig.Volumes == nil {
|
||||
t.Fatal("Volumes should be set")
|
||||
}
|
||||
|
||||
if len(b.runConfig.Volumes) != 1 {
|
||||
t.Fatalf("Volumes should contain only 1 element. Got %s", b.runConfig.Volumes)
|
||||
}
|
||||
|
||||
if _, ok := b.runConfig.Volumes[exposedVolume]; !ok {
|
||||
t.Fatalf("Volume %s should be present. Got %s", exposedVolume, b.runConfig.Volumes)
|
||||
}
|
||||
require.NotNil(t, req.state.runConfig.Volumes)
|
||||
assert.Len(t, req.state.runConfig.Volumes, 1)
|
||||
assert.Contains(t, req.state.runConfig.Volumes, exposedVolume)
|
||||
}
|
||||
|
||||
func TestStopSignal(t *testing.T) {
|
||||
b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true}
|
||||
|
||||
b := newBuilderWithMockBackend()
|
||||
signal := "SIGKILL"
|
||||
|
||||
if err := stopSignal(b, []string{signal}, nil, ""); err != nil {
|
||||
t.Fatalf("Error should be empty, got: %s", err.Error())
|
||||
}
|
||||
|
||||
if b.runConfig.StopSignal != signal {
|
||||
t.Fatalf("StopSignal should be set to %s, got %s", signal, b.runConfig.StopSignal)
|
||||
}
|
||||
req := defaultDispatchReq(b, signal)
|
||||
err := stopSignal(req)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, signal, req.state.runConfig.StopSignal)
|
||||
}
|
||||
|
||||
func TestArg(t *testing.T) {
|
||||
@@ -501,33 +413,103 @@ func TestArg(t *testing.T) {
|
||||
argVal := "bar"
|
||||
argDef := fmt.Sprintf("%s=%s", argName, argVal)
|
||||
|
||||
err := arg(b, []string{argDef}, nil, "")
|
||||
assert.NilError(t, err)
|
||||
err := arg(defaultDispatchReq(b, argDef))
|
||||
require.NoError(t, err)
|
||||
|
||||
expected := map[string]string{argName: argVal}
|
||||
allowed := b.buildArgs.GetAllAllowed()
|
||||
assert.DeepEqual(t, allowed, expected)
|
||||
assert.Equal(t, expected, b.buildArgs.GetAllAllowed())
|
||||
}
|
||||
|
||||
func TestShell(t *testing.T) {
|
||||
b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true}
|
||||
b := newBuilderWithMockBackend()
|
||||
|
||||
shellCmd := "powershell"
|
||||
req := defaultDispatchReq(b, shellCmd)
|
||||
req.attributes = map[string]bool{"json": true}
|
||||
|
||||
attrs := make(map[string]bool)
|
||||
attrs["json"] = true
|
||||
|
||||
if err := shell(b, []string{shellCmd}, attrs, ""); err != nil {
|
||||
t.Fatalf("Error should be empty, got: %s", err.Error())
|
||||
}
|
||||
|
||||
if b.runConfig.Shell == nil {
|
||||
t.Fatal("Shell should be set")
|
||||
}
|
||||
err := shell(req)
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedShell := strslice.StrSlice([]string{shellCmd})
|
||||
|
||||
if !compareStrSlice(expectedShell, b.runConfig.Shell) {
|
||||
t.Fatalf("Shell should be set to %s, got %s", expectedShell, b.runConfig.Shell)
|
||||
}
|
||||
assert.Equal(t, expectedShell, req.state.runConfig.Shell)
|
||||
}
|
||||
|
||||
func TestParseOptInterval(t *testing.T) {
|
||||
flInterval := &Flag{
|
||||
name: "interval",
|
||||
flagType: stringType,
|
||||
Value: "50ns",
|
||||
}
|
||||
_, err := parseOptInterval(flInterval)
|
||||
testutil.ErrorContains(t, err, "cannot be less than 1ms")
|
||||
|
||||
flInterval.Value = "1ms"
|
||||
_, err = parseOptInterval(flInterval)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestPrependEnvOnCmd(t *testing.T) {
|
||||
buildArgs := newBuildArgs(nil)
|
||||
buildArgs.AddArg("NO_PROXY", nil)
|
||||
|
||||
args := []string{"sorted=nope", "args=not", "http_proxy=foo", "NO_PROXY=YA"}
|
||||
cmd := []string{"foo", "bar"}
|
||||
cmdWithEnv := prependEnvOnCmd(buildArgs, args, cmd)
|
||||
expected := strslice.StrSlice([]string{
|
||||
"|3", "NO_PROXY=YA", "args=not", "sorted=nope", "foo", "bar"})
|
||||
assert.Equal(t, expected, cmdWithEnv)
|
||||
}
|
||||
|
||||
func TestRunWithBuildArgs(t *testing.T) {
|
||||
b := newBuilderWithMockBackend()
|
||||
b.buildArgs.argsFromOptions["HTTP_PROXY"] = strPtr("FOO")
|
||||
b.disableCommit = false
|
||||
|
||||
runConfig := &container.Config{}
|
||||
origCmd := strslice.StrSlice([]string{"cmd", "in", "from", "image"})
|
||||
cmdWithShell := strslice.StrSlice(append(getShell(runConfig), "echo foo"))
|
||||
envVars := []string{"|1", "one=two"}
|
||||
cachedCmd := strslice.StrSlice(append(envVars, cmdWithShell...))
|
||||
|
||||
imageCache := &mockImageCache{
|
||||
getCacheFunc: func(parentID string, cfg *container.Config) (string, error) {
|
||||
// Check the runConfig.Cmd sent to probeCache()
|
||||
assert.Equal(t, cachedCmd, cfg.Cmd)
|
||||
assert.Equal(t, strslice.StrSlice(nil), cfg.Entrypoint)
|
||||
return "", nil
|
||||
},
|
||||
}
|
||||
b.imageCache = imageCache
|
||||
|
||||
mockBackend := b.docker.(*MockBackend)
|
||||
mockBackend.getImageFunc = func(_ string) (builder.Image, builder.ReleaseableLayer, error) {
|
||||
return &mockImage{
|
||||
id: "abcdef",
|
||||
config: &container.Config{Cmd: origCmd},
|
||||
}, nil, nil
|
||||
}
|
||||
mockBackend.containerCreateFunc = func(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error) {
|
||||
// Check the runConfig.Cmd sent to create()
|
||||
assert.Equal(t, cmdWithShell, config.Config.Cmd)
|
||||
assert.Contains(t, config.Config.Env, "one=two")
|
||||
assert.Equal(t, strslice.StrSlice{""}, config.Config.Entrypoint)
|
||||
return container.ContainerCreateCreatedBody{ID: "12345"}, nil
|
||||
}
|
||||
mockBackend.commitFunc = func(cID string, cfg *backend.ContainerCommitConfig) (string, error) {
|
||||
// Check the runConfig.Cmd sent to commit()
|
||||
assert.Equal(t, origCmd, cfg.Config.Cmd)
|
||||
assert.Equal(t, cachedCmd, cfg.ContainerConfig.Cmd)
|
||||
assert.Equal(t, strslice.StrSlice(nil), cfg.Config.Entrypoint)
|
||||
return "", nil
|
||||
}
|
||||
|
||||
req := defaultDispatchReq(b, "abcdef")
|
||||
require.NoError(t, from(req))
|
||||
b.buildArgs.AddArg("one", strPtr("two"))
|
||||
|
||||
req.args = []string{"echo foo"}
|
||||
require.NoError(t, run(req))
|
||||
|
||||
// Check that runConfig.Cmd has not been modified by run
|
||||
assert.Equal(t, origCmd, req.state.runConfig.Cmd)
|
||||
}
|
||||
|
||||
@@ -26,3 +26,9 @@ func normaliseWorkdir(current string, requested string) (string, error) {
|
||||
func errNotJSON(command, _ string) error {
|
||||
return fmt.Errorf("%s requires the arguments to be in JSON form", command)
|
||||
}
|
||||
|
||||
// equalEnvKeys compare two strings and returns true if they are equal. On
|
||||
// Windows this comparison is case insensitive.
|
||||
func equalEnvKeys(from, to string) bool {
|
||||
return from == to
|
||||
}
|
||||
|
||||
@@ -85,3 +85,9 @@ func errNotJSON(command, original string) error {
|
||||
}
|
||||
return fmt.Errorf("%s requires the arguments to be in JSON form%s", command, extra)
|
||||
}
|
||||
|
||||
// equalEnvKeys compare two strings and returns true if they are equal. On
|
||||
// Windows this comparison is case insensitive.
|
||||
func equalEnvKeys(from, to string) bool {
|
||||
return strings.ToUpper(from) == strings.ToUpper(to)
|
||||
}
|
||||
|
||||
@@ -1,18 +1,21 @@
|
||||
A|hello | hello
|
||||
A|he'll'o | hello
|
||||
A|he'llo | hello
|
||||
A|he'llo | error
|
||||
A|he\'llo | he'llo
|
||||
A|he\\'llo | he\llo
|
||||
A|he\\'llo | error
|
||||
A|abc\tdef | abctdef
|
||||
A|"abc\tdef" | abc\tdef
|
||||
A|"abc\\tdef" | abc\tdef
|
||||
A|'abc\tdef' | abc\tdef
|
||||
A|hello\ | hello
|
||||
A|hello\\ | hello\
|
||||
A|"hello | hello
|
||||
A|"hello\" | hello"
|
||||
A|"hello | error
|
||||
A|"hello\" | error
|
||||
A|"hel'lo" | hel'lo
|
||||
A|'hello | hello
|
||||
A|'hello | error
|
||||
A|'hello\' | hello\
|
||||
A|'hello\there' | hello\there
|
||||
A|'hello\\there' | hello\\there
|
||||
A|"''" | ''
|
||||
A|$. | $.
|
||||
A|$1 |
|
||||
@@ -24,6 +27,8 @@ W|he$pwd. | he/home.
|
||||
A|he$PWD | he/home
|
||||
A|he\$PWD | he$PWD
|
||||
A|he\\$PWD | he\/home
|
||||
A|"he\$PWD" | he$PWD
|
||||
A|"he\\$PWD" | he\/home
|
||||
A|he\${} | he${}
|
||||
A|he\${}xx | he${}xx
|
||||
A|he${} | he
|
||||
@@ -60,18 +65,18 @@ A|he${XXX:-\$PWD:}xx | he$PWD:xx
|
||||
A|he${XXX:-\${PWD}z}xx | he${PWDz}xx
|
||||
A|안녕하세요 | 안녕하세요
|
||||
A|안'녕'하세요 | 안녕하세요
|
||||
A|안'녕하세요 | 안녕하세요
|
||||
A|안'녕하세요 | error
|
||||
A|안녕\'하세요 | 안녕'하세요
|
||||
A|안\\'녕하세요 | 안\녕하세요
|
||||
A|안\\'녕하세요 | error
|
||||
A|안녕\t하세요 | 안녕t하세요
|
||||
A|"안녕\t하세요" | 안녕\t하세요
|
||||
A|'안녕\t하세요 | 안녕\t하세요
|
||||
A|'안녕\t하세요 | error
|
||||
A|안녕하세요\ | 안녕하세요
|
||||
A|안녕하세요\\ | 안녕하세요\
|
||||
A|"안녕하세요 | 안녕하세요
|
||||
A|"안녕하세요\" | 안녕하세요"
|
||||
A|"안녕하세요 | error
|
||||
A|"안녕하세요\" | error
|
||||
A|"안녕'하세요" | 안녕'하세요
|
||||
A|'안녕하세요 | 안녕하세요
|
||||
A|'안녕하세요 | error
|
||||
A|'안녕하세요\' | 안녕하세요\
|
||||
A|안녕$1x | 안녕x
|
||||
A|안녕$.x | 안녕$.x
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
//
|
||||
// It incorporates a dispatch table based on the parser.Node values (see the
|
||||
// parser package for more information) that are yielded from the parser itself.
|
||||
// Calling NewBuilder with the BuildOpts struct can be used to customize the
|
||||
// Calling newBuilder with the BuildOpts struct can be used to customize the
|
||||
// experience for execution purposes only. Parsing is controlled in the parser
|
||||
// package, and this division of responsibility should be respected.
|
||||
//
|
||||
@@ -20,13 +20,17 @@
|
||||
package dockerfile
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/builder"
|
||||
"github.com/docker/docker/builder/dockerfile/command"
|
||||
"github.com/docker/docker/builder/dockerfile/parser"
|
||||
runconfigopts "github.com/docker/docker/runconfig/opts"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/docker/docker/runconfig/opts"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Environment variable interpolation will happen on these statements only.
|
||||
@@ -56,10 +60,36 @@ var allowWordExpansion = map[string]bool{
|
||||
command.Expose: true,
|
||||
}
|
||||
|
||||
var evaluateTable map[string]func(*Builder, []string, map[string]bool, string) error
|
||||
type dispatchRequest struct {
|
||||
builder *Builder // TODO: replace this with a smaller interface
|
||||
args []string
|
||||
attributes map[string]bool
|
||||
flags *BFlags
|
||||
original string
|
||||
shlex *ShellLex
|
||||
state *dispatchState
|
||||
source builder.Source
|
||||
}
|
||||
|
||||
func newDispatchRequestFromOptions(options dispatchOptions, builder *Builder, args []string) dispatchRequest {
|
||||
return dispatchRequest{
|
||||
builder: builder,
|
||||
args: args,
|
||||
attributes: options.node.Attributes,
|
||||
original: options.node.Original,
|
||||
flags: NewBFlagsWithArgs(options.node.Flags),
|
||||
shlex: options.shlex,
|
||||
state: options.state,
|
||||
source: options.source,
|
||||
}
|
||||
}
|
||||
|
||||
type dispatcher func(dispatchRequest) error
|
||||
|
||||
var evaluateTable map[string]dispatcher
|
||||
|
||||
func init() {
|
||||
evaluateTable = map[string]func(*Builder, []string, map[string]bool, string) error{
|
||||
evaluateTable = map[string]dispatcher{
|
||||
command.Add: add,
|
||||
command.Arg: arg,
|
||||
command.Cmd: cmd,
|
||||
@@ -81,6 +111,10 @@ func init() {
|
||||
}
|
||||
}
|
||||
|
||||
func formatStep(stepN int, stepTotal int) string {
|
||||
return fmt.Sprintf("%d/%d", stepN+1, stepTotal)
|
||||
}
|
||||
|
||||
// This method is the entrypoint to all statement handling routines.
|
||||
//
|
||||
// Almost all nodes will have this structure:
|
||||
@@ -95,106 +129,164 @@ func init() {
|
||||
// such as `RUN` in ONBUILD RUN foo. There is special case logic in here to
|
||||
// deal with that, at least until it becomes more of a general concern with new
|
||||
// features.
|
||||
func (b *Builder) dispatch(stepN int, stepTotal int, ast *parser.Node) error {
|
||||
cmd := ast.Value
|
||||
func (b *Builder) dispatch(options dispatchOptions) (*dispatchState, error) {
|
||||
node := options.node
|
||||
cmd := node.Value
|
||||
upperCasedCmd := strings.ToUpper(cmd)
|
||||
|
||||
// To ensure the user is given a decent error message if the platform
|
||||
// on which the daemon is running does not support a builder command.
|
||||
if err := platformSupports(strings.ToLower(cmd)); err != nil {
|
||||
return err
|
||||
buildsFailed.WithValues(metricsCommandNotSupportedError).Inc()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
attrs := ast.Attributes
|
||||
original := ast.Original
|
||||
flags := ast.Flags
|
||||
strList := []string{}
|
||||
msg := fmt.Sprintf("Step %d/%d : %s", stepN+1, stepTotal, upperCasedCmd)
|
||||
msg := bytes.NewBufferString(fmt.Sprintf("Step %s : %s%s",
|
||||
options.stepMsg, upperCasedCmd, formatFlags(node.Flags)))
|
||||
|
||||
if len(ast.Flags) > 0 {
|
||||
msg += " " + strings.Join(ast.Flags, " ")
|
||||
}
|
||||
|
||||
if cmd == "onbuild" {
|
||||
if ast.Next == nil {
|
||||
return errors.New("ONBUILD requires at least one argument")
|
||||
args := []string{}
|
||||
ast := node
|
||||
if cmd == command.Onbuild {
|
||||
var err error
|
||||
ast, args, err = handleOnBuildNode(node, msg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ast = ast.Next.Children[0]
|
||||
strList = append(strList, ast.Value)
|
||||
msg += " " + ast.Value
|
||||
|
||||
if len(ast.Flags) > 0 {
|
||||
msg += " " + strings.Join(ast.Flags, " ")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
msgList := initMsgList(ast)
|
||||
// Append build args to runConfig environment variables
|
||||
envs := append(b.runConfig.Env, b.buildArgsWithoutConfigEnv()...)
|
||||
runConfigEnv := options.state.runConfig.Env
|
||||
envs := append(runConfigEnv, b.buildArgs.FilterAllowed(runConfigEnv)...)
|
||||
processFunc := createProcessWordFunc(options.shlex, cmd, envs)
|
||||
words, err := getDispatchArgsFromNode(ast, processFunc, msg)
|
||||
if err != nil {
|
||||
buildsFailed.WithValues(metricsErrorProcessingCommandsError).Inc()
|
||||
return nil, err
|
||||
}
|
||||
args = append(args, words...)
|
||||
|
||||
fmt.Fprintln(b.Stdout, msg.String())
|
||||
|
||||
f, ok := evaluateTable[cmd]
|
||||
if !ok {
|
||||
buildsFailed.WithValues(metricsUnknownInstructionError).Inc()
|
||||
return nil, fmt.Errorf("unknown instruction: %s", upperCasedCmd)
|
||||
}
|
||||
options.state.updateRunConfig()
|
||||
err = f(newDispatchRequestFromOptions(options, b, args))
|
||||
return options.state, err
|
||||
}
|
||||
|
||||
type dispatchOptions struct {
|
||||
state *dispatchState
|
||||
stepMsg string
|
||||
node *parser.Node
|
||||
shlex *ShellLex
|
||||
source builder.Source
|
||||
}
|
||||
|
||||
// dispatchState is a data object which is modified by dispatchers
|
||||
type dispatchState struct {
|
||||
runConfig *container.Config
|
||||
maintainer string
|
||||
cmdSet bool
|
||||
imageID string
|
||||
baseImage builder.Image
|
||||
stageName string
|
||||
}
|
||||
|
||||
func newDispatchState() *dispatchState {
|
||||
return &dispatchState{runConfig: &container.Config{}}
|
||||
}
|
||||
|
||||
func (s *dispatchState) updateRunConfig() {
|
||||
s.runConfig.Image = s.imageID
|
||||
}
|
||||
|
||||
// hasFromImage returns true if the builder has processed a `FROM <image>` line
|
||||
func (s *dispatchState) hasFromImage() bool {
|
||||
return s.imageID != "" || (s.baseImage != nil && s.baseImage.ImageID() == "")
|
||||
}
|
||||
|
||||
func (s *dispatchState) isCurrentStage(target string) bool {
|
||||
if target == "" {
|
||||
return false
|
||||
}
|
||||
return strings.EqualFold(s.stageName, target)
|
||||
}
|
||||
|
||||
func (s *dispatchState) beginStage(stageName string, image builder.Image) {
|
||||
s.stageName = stageName
|
||||
s.imageID = image.ImageID()
|
||||
|
||||
if image.RunConfig() != nil {
|
||||
s.runConfig = image.RunConfig()
|
||||
} else {
|
||||
s.runConfig = &container.Config{}
|
||||
}
|
||||
s.baseImage = image
|
||||
s.setDefaultPath()
|
||||
}
|
||||
|
||||
// Add the default PATH to runConfig.ENV if one exists for the platform and there
|
||||
// is no PATH set. Note that windows won't have one as it's set by HCS
|
||||
func (s *dispatchState) setDefaultPath() {
|
||||
if system.DefaultPathEnv == "" {
|
||||
return
|
||||
}
|
||||
envMap := opts.ConvertKVStringsToMap(s.runConfig.Env)
|
||||
if _, ok := envMap["PATH"]; !ok {
|
||||
s.runConfig.Env = append(s.runConfig.Env, "PATH="+system.DefaultPathEnv)
|
||||
}
|
||||
}
|
||||
|
||||
func handleOnBuildNode(ast *parser.Node, msg *bytes.Buffer) (*parser.Node, []string, error) {
|
||||
if ast.Next == nil {
|
||||
return nil, nil, errors.New("ONBUILD requires at least one argument")
|
||||
}
|
||||
ast = ast.Next.Children[0]
|
||||
msg.WriteString(" " + ast.Value + formatFlags(ast.Flags))
|
||||
return ast, []string{ast.Value}, nil
|
||||
}
|
||||
|
||||
func formatFlags(flags []string) string {
|
||||
if len(flags) > 0 {
|
||||
return " " + strings.Join(flags, " ")
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func getDispatchArgsFromNode(ast *parser.Node, processFunc processWordFunc, msg *bytes.Buffer) ([]string, error) {
|
||||
args := []string{}
|
||||
for i := 0; ast.Next != nil; i++ {
|
||||
ast = ast.Next
|
||||
words, err := b.evaluateEnv(cmd, ast.Value, envs)
|
||||
words, err := processFunc(ast.Value)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
strList = append(strList, words...)
|
||||
msgList[i] = ast.Value
|
||||
args = append(args, words...)
|
||||
msg.WriteString(" " + ast.Value)
|
||||
}
|
||||
|
||||
msg += " " + strings.Join(msgList, " ")
|
||||
fmt.Fprintln(b.Stdout, msg)
|
||||
|
||||
// XXX yes, we skip any cmds that are not valid; the parser should have
|
||||
// picked these out already.
|
||||
if f, ok := evaluateTable[cmd]; ok {
|
||||
b.flags = NewBFlags()
|
||||
b.flags.Args = flags
|
||||
return f(b, strList, attrs, original)
|
||||
}
|
||||
|
||||
return fmt.Errorf("Unknown instruction: %s", upperCasedCmd)
|
||||
return args, nil
|
||||
}
|
||||
|
||||
// count the number of nodes that we are going to traverse first
|
||||
// allocation of those list a lot when they have a lot of arguments
|
||||
func initMsgList(cursor *parser.Node) []string {
|
||||
var n int
|
||||
for ; cursor.Next != nil; n++ {
|
||||
cursor = cursor.Next
|
||||
}
|
||||
return make([]string, n)
|
||||
}
|
||||
type processWordFunc func(string) ([]string, error)
|
||||
|
||||
func (b *Builder) evaluateEnv(cmd string, str string, envs []string) ([]string, error) {
|
||||
if !replaceEnvAllowed[cmd] {
|
||||
return []string{str}, nil
|
||||
}
|
||||
var processFunc func(string, []string, rune) ([]string, error)
|
||||
if allowWordExpansion[cmd] {
|
||||
processFunc = ProcessWords
|
||||
} else {
|
||||
processFunc = func(word string, envs []string, escape rune) ([]string, error) {
|
||||
word, err := ProcessWord(word, envs, escape)
|
||||
func createProcessWordFunc(shlex *ShellLex, cmd string, envs []string) processWordFunc {
|
||||
switch {
|
||||
case !replaceEnvAllowed[cmd]:
|
||||
return func(word string) ([]string, error) {
|
||||
return []string{word}, nil
|
||||
}
|
||||
case allowWordExpansion[cmd]:
|
||||
return func(word string) ([]string, error) {
|
||||
return shlex.ProcessWords(word, envs)
|
||||
}
|
||||
default:
|
||||
return func(word string) ([]string, error) {
|
||||
word, err := shlex.ProcessWord(word, envs)
|
||||
return []string{word}, err
|
||||
}
|
||||
}
|
||||
return processFunc(str, envs, b.directive.EscapeToken)
|
||||
}
|
||||
|
||||
// buildArgsWithoutConfigEnv returns a list of key=value pairs for all the build
|
||||
// args that are not overriden by runConfig environment variables.
|
||||
func (b *Builder) buildArgsWithoutConfigEnv() []string {
|
||||
envs := []string{}
|
||||
configEnv := runconfigopts.ConvertKVStringsToMap(b.runConfig.Env)
|
||||
|
||||
for key, val := range b.buildArgs.GetAllAllowed() {
|
||||
if _, ok := configEnv[key]; !ok {
|
||||
envs = append(envs, fmt.Sprintf("%s=%s", key, val))
|
||||
}
|
||||
}
|
||||
return envs
|
||||
}
|
||||
|
||||
// checkDispatch does a simple check for syntax errors of the Dockerfile.
|
||||
@@ -202,8 +294,7 @@ func (b *Builder) buildArgsWithoutConfigEnv() []string {
|
||||
// arg, env, etc., this syntax check will not be complete and could not replace
|
||||
// the runtime check. Instead, this function is only a helper that allows
|
||||
// user to find out the obvious error in Dockerfile earlier on.
|
||||
// onbuild bool: indicate if instruction XXX is part of `ONBUILD XXX` trigger
|
||||
func (b *Builder) checkDispatch(ast *parser.Node, onbuild bool) error {
|
||||
func checkDispatch(ast *parser.Node) error {
|
||||
cmd := ast.Value
|
||||
upperCasedCmd := strings.ToUpper(cmd)
|
||||
|
||||
@@ -217,23 +308,14 @@ func (b *Builder) checkDispatch(ast *parser.Node, onbuild bool) error {
|
||||
// least one argument
|
||||
if upperCasedCmd == "ONBUILD" {
|
||||
if ast.Next == nil {
|
||||
buildsFailed.WithValues(metricsMissingOnbuildArgumentsError).Inc()
|
||||
return errors.New("ONBUILD requires at least one argument")
|
||||
}
|
||||
}
|
||||
|
||||
// The instruction is part of ONBUILD trigger (not the instruction itself)
|
||||
if onbuild {
|
||||
switch upperCasedCmd {
|
||||
case "ONBUILD":
|
||||
return errors.New("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed")
|
||||
case "MAINTAINER", "FROM":
|
||||
return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", upperCasedCmd)
|
||||
}
|
||||
}
|
||||
|
||||
if _, ok := evaluateTable[cmd]; ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("Unknown instruction: %s", upperCasedCmd)
|
||||
buildsFailed.WithValues(metricsUnknownInstructionError).Inc()
|
||||
return errors.Errorf("unknown instruction: %s", upperCasedCmd)
|
||||
}
|
||||
|
||||
@@ -7,8 +7,8 @@ import (
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/builder"
|
||||
"github.com/docker/docker/builder/dockerfile/parser"
|
||||
"github.com/docker/docker/builder/remotecontext"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
)
|
||||
@@ -105,13 +105,13 @@ func initDispatchTestCases() []dispatchTestCase {
|
||||
{
|
||||
name: "COPY wildcard no files",
|
||||
dockerfile: `COPY file*.txt /tmp/`,
|
||||
expectedError: "No source files were specified",
|
||||
expectedError: "COPY failed: no source files were specified",
|
||||
files: nil,
|
||||
},
|
||||
{
|
||||
name: "COPY url",
|
||||
dockerfile: `COPY https://index.docker.io/robots.txt /`,
|
||||
expectedError: "Source can't be a URL for COPY",
|
||||
expectedError: "source can't be a URL for COPY",
|
||||
files: nil,
|
||||
},
|
||||
{
|
||||
@@ -123,7 +123,7 @@ func initDispatchTestCases() []dispatchTestCase {
|
||||
{
|
||||
name: "Invalid instruction",
|
||||
dockerfile: `foo bar`,
|
||||
expectedError: "Unknown instruction: FOO",
|
||||
expectedError: "unknown instruction: FOO",
|
||||
files: nil,
|
||||
}}
|
||||
|
||||
@@ -158,7 +158,7 @@ func executeTestCase(t *testing.T, testCase dispatchTestCase) {
|
||||
}
|
||||
}()
|
||||
|
||||
context, err := builder.MakeTarSumContext(tarStream)
|
||||
context, err := remotecontext.MakeTarSumContext(tarStream)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Error when creating tar context: %s", err)
|
||||
@@ -171,28 +171,33 @@ func executeTestCase(t *testing.T, testCase dispatchTestCase) {
|
||||
}()
|
||||
|
||||
r := strings.NewReader(testCase.dockerfile)
|
||||
d := parser.Directive{}
|
||||
parser.SetEscapeToken(parser.DefaultEscapeToken, &d)
|
||||
n, err := parser.Parse(r, &d)
|
||||
result, err := parser.Parse(r)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Error when parsing Dockerfile: %s", err)
|
||||
}
|
||||
|
||||
config := &container.Config{}
|
||||
options := &types.ImageBuildOptions{
|
||||
BuildArgs: make(map[string]*string),
|
||||
}
|
||||
|
||||
b := &Builder{
|
||||
runConfig: config,
|
||||
options: options,
|
||||
Stdout: ioutil.Discard,
|
||||
context: context,
|
||||
buildArgs: newBuildArgs(options.BuildArgs),
|
||||
}
|
||||
|
||||
err = b.dispatch(0, len(n.Children), n.Children[0])
|
||||
shlex := NewShellLex(parser.DefaultEscapeToken)
|
||||
n := result.AST
|
||||
state := &dispatchState{runConfig: &container.Config{}}
|
||||
opts := dispatchOptions{
|
||||
state: state,
|
||||
stepMsg: formatStep(0, len(n.Children)),
|
||||
node: n.Children[0],
|
||||
shlex: shlex,
|
||||
source: context,
|
||||
}
|
||||
state, err = b.dispatch(opts)
|
||||
|
||||
if err == nil {
|
||||
t.Fatalf("No error when executing test %s", testCase.name)
|
||||
|
||||
@@ -3,85 +3,138 @@ package dockerfile
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/docker/api/types/backend"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/builder"
|
||||
"github.com/docker/docker/builder/remotecontext"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// imageContexts is a helper for stacking up built image rootfs and reusing
|
||||
// them as contexts
|
||||
type imageContexts struct {
|
||||
b *Builder
|
||||
list []*imageMount
|
||||
byName map[string]*imageMount
|
||||
cache *pathCache
|
||||
currentName string
|
||||
type buildStage struct {
|
||||
id string
|
||||
config *container.Config
|
||||
}
|
||||
|
||||
func (ic *imageContexts) new(name string, increment bool) (*imageMount, error) {
|
||||
im := &imageMount{ic: ic}
|
||||
if len(name) > 0 {
|
||||
if ic.byName == nil {
|
||||
ic.byName = make(map[string]*imageMount)
|
||||
func newBuildStageFromImage(image builder.Image) *buildStage {
|
||||
return &buildStage{id: image.ImageID(), config: image.RunConfig()}
|
||||
}
|
||||
|
||||
func (b *buildStage) ImageID() string {
|
||||
return b.id
|
||||
}
|
||||
|
||||
func (b *buildStage) RunConfig() *container.Config {
|
||||
return b.config
|
||||
}
|
||||
|
||||
func (b *buildStage) update(imageID string, runConfig *container.Config) {
|
||||
b.id = imageID
|
||||
b.config = runConfig
|
||||
}
|
||||
|
||||
var _ builder.Image = &buildStage{}
|
||||
|
||||
// buildStages tracks each stage of a build so they can be retrieved by index
|
||||
// or by name.
|
||||
type buildStages struct {
|
||||
sequence []*buildStage
|
||||
byName map[string]*buildStage
|
||||
}
|
||||
|
||||
func newBuildStages() *buildStages {
|
||||
return &buildStages{byName: make(map[string]*buildStage)}
|
||||
}
|
||||
|
||||
func (s *buildStages) getByName(name string) (builder.Image, bool) {
|
||||
stage, ok := s.byName[strings.ToLower(name)]
|
||||
return stage, ok
|
||||
}
|
||||
|
||||
func (s *buildStages) get(indexOrName string) (builder.Image, error) {
|
||||
index, err := strconv.Atoi(indexOrName)
|
||||
if err == nil {
|
||||
if err := s.validateIndex(index); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, ok := ic.byName[name]; ok {
|
||||
return nil, errors.Errorf("duplicate name %s", name)
|
||||
}
|
||||
ic.byName[name] = im
|
||||
return s.sequence[index], nil
|
||||
}
|
||||
if increment {
|
||||
ic.list = append(ic.list, im)
|
||||
if im, ok := s.byName[strings.ToLower(indexOrName)]; ok {
|
||||
return im, nil
|
||||
}
|
||||
ic.currentName = name
|
||||
return im, nil
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (ic *imageContexts) update(imageID string, runConfig *container.Config) {
|
||||
ic.list[len(ic.list)-1].id = imageID
|
||||
ic.list[len(ic.list)-1].runConfig = runConfig
|
||||
}
|
||||
|
||||
func (ic *imageContexts) validate(i int) error {
|
||||
if i < 0 || i >= len(ic.list)-1 {
|
||||
var extraMsg string
|
||||
if i == len(ic.list)-1 {
|
||||
extraMsg = " refers current build block"
|
||||
func (s *buildStages) validateIndex(i int) error {
|
||||
if i < 0 || i >= len(s.sequence)-1 {
|
||||
if i == len(s.sequence)-1 {
|
||||
return errors.New("refers to current build stage")
|
||||
}
|
||||
return errors.Errorf("invalid from flag value %d%s", i, extraMsg)
|
||||
return errors.New("index out of bounds")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ic *imageContexts) get(indexOrName string) (*imageMount, error) {
|
||||
index, err := strconv.Atoi(indexOrName)
|
||||
if err == nil {
|
||||
if err := ic.validate(index); err != nil {
|
||||
return nil, err
|
||||
func (s *buildStages) add(name string, image builder.Image) error {
|
||||
stage := newBuildStageFromImage(image)
|
||||
name = strings.ToLower(name)
|
||||
if len(name) > 0 {
|
||||
if _, ok := s.byName[name]; ok {
|
||||
return errors.Errorf("duplicate name %s", name)
|
||||
}
|
||||
return ic.list[index], nil
|
||||
s.byName[name] = stage
|
||||
}
|
||||
if im, ok := ic.byName[strings.ToLower(indexOrName)]; ok {
|
||||
s.sequence = append(s.sequence, stage)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *buildStages) update(imageID string, runConfig *container.Config) {
|
||||
s.sequence[len(s.sequence)-1].update(imageID, runConfig)
|
||||
}
|
||||
|
||||
type getAndMountFunc func(string) (builder.Image, builder.ReleaseableLayer, error)
|
||||
|
||||
// imageSources mounts images and provides a cache for mounted images. It tracks
|
||||
// all images so they can be unmounted at the end of the build.
|
||||
type imageSources struct {
|
||||
byImageID map[string]*imageMount
|
||||
getImage getAndMountFunc
|
||||
cache pathCache // TODO: remove
|
||||
}
|
||||
|
||||
func newImageSources(ctx context.Context, options builderOptions) *imageSources {
|
||||
getAndMount := func(idOrRef string) (builder.Image, builder.ReleaseableLayer, error) {
|
||||
return options.Backend.GetImageAndReleasableLayer(ctx, idOrRef, backend.GetImageAndLayerOptions{
|
||||
ForcePull: options.Options.PullParent,
|
||||
AuthConfig: options.Options.AuthConfigs,
|
||||
Output: options.ProgressWriter.Output,
|
||||
})
|
||||
}
|
||||
|
||||
return &imageSources{
|
||||
byImageID: make(map[string]*imageMount),
|
||||
getImage: getAndMount,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *imageSources) Get(idOrRef string) (*imageMount, error) {
|
||||
if im, ok := m.byImageID[idOrRef]; ok {
|
||||
return im, nil
|
||||
}
|
||||
im, err := mountByRef(ic.b, indexOrName)
|
||||
|
||||
image, layer, err := m.getImage(idOrRef)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "invalid from flag value %s", indexOrName)
|
||||
return nil, err
|
||||
}
|
||||
im := newImageMount(image, layer)
|
||||
m.byImageID[image.ImageID()] = im
|
||||
return im, nil
|
||||
}
|
||||
|
||||
func (ic *imageContexts) unmount() (retErr error) {
|
||||
for _, im := range ic.list {
|
||||
if err := im.unmount(); err != nil {
|
||||
logrus.Error(err)
|
||||
retErr = err
|
||||
}
|
||||
}
|
||||
for _, im := range ic.byName {
|
||||
func (m *imageSources) Unmount() (retErr error) {
|
||||
for _, im := range m.byImageID {
|
||||
if err := im.unmount(); err != nil {
|
||||
logrus.Error(err)
|
||||
retErr = err
|
||||
@@ -90,96 +143,50 @@ func (ic *imageContexts) unmount() (retErr error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (ic *imageContexts) isCurrentTarget(target string) bool {
|
||||
if target == "" {
|
||||
return false
|
||||
}
|
||||
return strings.EqualFold(ic.currentName, target)
|
||||
}
|
||||
|
||||
func (ic *imageContexts) getCache(id, path string) (interface{}, bool) {
|
||||
if ic.cache != nil {
|
||||
if id == "" {
|
||||
return nil, false
|
||||
}
|
||||
return ic.cache.get(id + path)
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func (ic *imageContexts) setCache(id, path string, v interface{}) {
|
||||
if ic.cache != nil {
|
||||
ic.cache.set(id+path, v)
|
||||
}
|
||||
}
|
||||
|
||||
// imageMount is a reference for getting access to a buildcontext that is backed
|
||||
// by an existing image
|
||||
// imageMount is a reference to an image that can be used as a builder.Source
|
||||
type imageMount struct {
|
||||
id string
|
||||
ctx builder.Context
|
||||
release func() error
|
||||
ic *imageContexts
|
||||
runConfig *container.Config
|
||||
image builder.Image
|
||||
source builder.Source
|
||||
layer builder.ReleaseableLayer
|
||||
}
|
||||
|
||||
func (im *imageMount) context() (builder.Context, error) {
|
||||
if im.ctx == nil {
|
||||
if im.id == "" {
|
||||
return nil, errors.Errorf("could not copy from empty context")
|
||||
func newImageMount(image builder.Image, layer builder.ReleaseableLayer) *imageMount {
|
||||
im := &imageMount{image: image, layer: layer}
|
||||
return im
|
||||
}
|
||||
|
||||
func (im *imageMount) Source() (builder.Source, error) {
|
||||
if im.source == nil {
|
||||
if im.layer == nil {
|
||||
return nil, errors.Errorf("empty context")
|
||||
}
|
||||
p, release, err := im.ic.b.docker.MountImage(im.id)
|
||||
mountPath, err := im.layer.Mount()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to mount %s", im.id)
|
||||
return nil, errors.Wrapf(err, "failed to mount %s", im.image.ImageID())
|
||||
}
|
||||
ctx, err := remotecontext.NewLazyContext(p)
|
||||
source, err := remotecontext.NewLazyContext(mountPath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to create lazycontext for %s", p)
|
||||
return nil, errors.Wrapf(err, "failed to create lazycontext for %s", mountPath)
|
||||
}
|
||||
im.release = release
|
||||
im.ctx = ctx
|
||||
im.source = source
|
||||
}
|
||||
return im.ctx, nil
|
||||
return im.source, nil
|
||||
}
|
||||
|
||||
func (im *imageMount) unmount() error {
|
||||
if im.release != nil {
|
||||
if err := im.release(); err != nil {
|
||||
return errors.Wrapf(err, "failed to unmount previous build image %s", im.id)
|
||||
}
|
||||
im.release = nil
|
||||
if im.layer == nil {
|
||||
return nil
|
||||
}
|
||||
if err := im.layer.Release(); err != nil {
|
||||
return errors.Wrapf(err, "failed to unmount previous build image %s", im.image.ImageID())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (im *imageMount) Image() builder.Image {
|
||||
return im.image
|
||||
}
|
||||
|
||||
func (im *imageMount) ImageID() string {
|
||||
return im.id
|
||||
}
|
||||
func (im *imageMount) RunConfig() *container.Config {
|
||||
return im.runConfig
|
||||
}
|
||||
|
||||
type pathCache struct {
|
||||
mu sync.Mutex
|
||||
items map[string]interface{}
|
||||
}
|
||||
|
||||
func (c *pathCache) set(k string, v interface{}) {
|
||||
c.mu.Lock()
|
||||
if c.items == nil {
|
||||
c.items = make(map[string]interface{})
|
||||
}
|
||||
c.items[k] = v
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
func (c *pathCache) get(k string) (interface{}, bool) {
|
||||
c.mu.Lock()
|
||||
if c.items == nil {
|
||||
c.mu.Unlock()
|
||||
return nil, false
|
||||
}
|
||||
v, ok := c.items[k]
|
||||
c.mu.Unlock()
|
||||
return v, ok
|
||||
return im.image.ImageID()
|
||||
}
|
||||
|
||||
@@ -7,74 +7,53 @@ import (
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/backend"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/strslice"
|
||||
"github.com/docker/docker/builder"
|
||||
"github.com/docker/docker/builder/dockerfile/parser"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/httputils"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
containerpkg "github.com/docker/docker/container"
|
||||
"github.com/docker/docker/pkg/jsonmessage"
|
||||
"github.com/docker/docker/pkg/progress"
|
||||
"github.com/docker/docker/pkg/streamformatter"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/docker/docker/pkg/tarsum"
|
||||
"github.com/docker/docker/pkg/urlutil"
|
||||
"github.com/docker/docker/runconfig/opts"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func (b *Builder) commit(id string, autoCmd strslice.StrSlice, comment string) error {
|
||||
func (b *Builder) commit(dispatchState *dispatchState, comment string) error {
|
||||
if b.disableCommit {
|
||||
return nil
|
||||
}
|
||||
if !b.hasFromImage() {
|
||||
if !dispatchState.hasFromImage() {
|
||||
return errors.New("Please provide a source image with `from` prior to commit")
|
||||
}
|
||||
b.runConfig.Image = b.image
|
||||
|
||||
if id == "" {
|
||||
cmd := b.runConfig.Cmd
|
||||
b.runConfig.Cmd = strslice.StrSlice(append(getShell(b.runConfig), "#(nop) ", comment))
|
||||
defer func(cmd strslice.StrSlice) { b.runConfig.Cmd = cmd }(cmd)
|
||||
|
||||
hit, err := b.probeCache()
|
||||
if err != nil {
|
||||
return err
|
||||
} else if hit {
|
||||
return nil
|
||||
}
|
||||
id, err = b.create()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
runConfigWithCommentCmd := copyRunConfig(dispatchState.runConfig, withCmdComment(comment))
|
||||
hit, err := b.probeCache(dispatchState, runConfigWithCommentCmd)
|
||||
if err != nil || hit {
|
||||
return err
|
||||
}
|
||||
id, err := b.create(runConfigWithCommentCmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Note: Actually copy the struct
|
||||
autoConfig := *b.runConfig
|
||||
autoConfig.Cmd = autoCmd
|
||||
return b.commitContainer(dispatchState, id, runConfigWithCommentCmd)
|
||||
}
|
||||
|
||||
// TODO: see if any args can be dropped
|
||||
func (b *Builder) commitContainer(dispatchState *dispatchState, id string, containerConfig *container.Config) error {
|
||||
if b.disableCommit {
|
||||
return nil
|
||||
}
|
||||
|
||||
commitCfg := &backend.ContainerCommitConfig{
|
||||
ContainerCommitConfig: types.ContainerCommitConfig{
|
||||
Author: b.maintainer,
|
||||
Author: dispatchState.maintainer,
|
||||
Pause: true,
|
||||
Config: &autoConfig,
|
||||
// TODO: this should be done by Commit()
|
||||
Config: copyRunConfig(dispatchState.runConfig),
|
||||
},
|
||||
ContainerConfig: containerConfig,
|
||||
}
|
||||
|
||||
// Commit the container
|
||||
@@ -83,104 +62,24 @@ func (b *Builder) commit(id string, autoCmd strslice.StrSlice, comment string) e
|
||||
return err
|
||||
}
|
||||
|
||||
b.image = imageID
|
||||
b.imageContexts.update(imageID, &autoConfig)
|
||||
dispatchState.imageID = imageID
|
||||
b.buildStages.update(imageID, dispatchState.runConfig)
|
||||
return nil
|
||||
}
|
||||
|
||||
type copyInfo struct {
|
||||
builder.FileInfo
|
||||
decompress bool
|
||||
}
|
||||
func (b *Builder) performCopy(state *dispatchState, inst copyInstruction) error {
|
||||
srcHash := getSourceHashFromInfos(inst.infos)
|
||||
|
||||
func (b *Builder) runContextCommand(args []string, allowRemote bool, allowLocalDecompression bool, cmdName string, imageSource *imageMount) error {
|
||||
if len(args) < 2 {
|
||||
return fmt.Errorf("Invalid %s format - at least two arguments required", cmdName)
|
||||
}
|
||||
|
||||
// Work in daemon-specific filepath semantics
|
||||
dest := filepath.FromSlash(args[len(args)-1]) // last one is always the dest
|
||||
|
||||
b.runConfig.Image = b.image
|
||||
|
||||
var infos []copyInfo
|
||||
|
||||
// Loop through each src file and calculate the info we need to
|
||||
// do the copy (e.g. hash value if cached). Don't actually do
|
||||
// the copy until we've looked at all src files
|
||||
var err error
|
||||
for _, orig := range args[0 : len(args)-1] {
|
||||
var fi builder.FileInfo
|
||||
if urlutil.IsURL(orig) {
|
||||
if !allowRemote {
|
||||
return fmt.Errorf("Source can't be a URL for %s", cmdName)
|
||||
}
|
||||
fi, err = b.download(orig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer os.RemoveAll(filepath.Dir(fi.Path()))
|
||||
infos = append(infos, copyInfo{
|
||||
FileInfo: fi,
|
||||
decompress: false,
|
||||
})
|
||||
continue
|
||||
}
|
||||
// not a URL
|
||||
subInfos, err := b.calcCopyInfo(cmdName, orig, allowLocalDecompression, true, imageSource)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
infos = append(infos, subInfos...)
|
||||
}
|
||||
|
||||
if len(infos) == 0 {
|
||||
return errors.New("No source files were specified")
|
||||
}
|
||||
if len(infos) > 1 && !strings.HasSuffix(dest, string(os.PathSeparator)) {
|
||||
return fmt.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName)
|
||||
}
|
||||
|
||||
// For backwards compat, if there's just one info then use it as the
|
||||
// cache look-up string, otherwise hash 'em all into one
|
||||
var srcHash string
|
||||
var origPaths string
|
||||
|
||||
if len(infos) == 1 {
|
||||
fi := infos[0].FileInfo
|
||||
origPaths = fi.Name()
|
||||
if hfi, ok := fi.(builder.Hashed); ok {
|
||||
srcHash = hfi.Hash()
|
||||
}
|
||||
} else {
|
||||
var hashs []string
|
||||
var origs []string
|
||||
for _, info := range infos {
|
||||
fi := info.FileInfo
|
||||
origs = append(origs, fi.Name())
|
||||
if hfi, ok := fi.(builder.Hashed); ok {
|
||||
hashs = append(hashs, hfi.Hash())
|
||||
}
|
||||
}
|
||||
hasher := sha256.New()
|
||||
hasher.Write([]byte(strings.Join(hashs, ",")))
|
||||
srcHash = "multi:" + hex.EncodeToString(hasher.Sum(nil))
|
||||
origPaths = strings.Join(origs, " ")
|
||||
}
|
||||
|
||||
cmd := b.runConfig.Cmd
|
||||
b.runConfig.Cmd = strslice.StrSlice(append(getShell(b.runConfig), fmt.Sprintf("#(nop) %s %s in %s ", cmdName, srcHash, dest)))
|
||||
defer func(cmd strslice.StrSlice) { b.runConfig.Cmd = cmd }(cmd)
|
||||
|
||||
if hit, err := b.probeCache(); err != nil {
|
||||
// TODO: should this have been using origPaths instead of srcHash in the comment?
|
||||
runConfigWithCommentCmd := copyRunConfig(
|
||||
state.runConfig,
|
||||
withCmdCommentString(fmt.Sprintf("%s %s in %s ", inst.cmdName, srcHash, inst.dest)))
|
||||
if hit, err := b.probeCache(state, runConfigWithCommentCmd); err != nil || hit {
|
||||
return err
|
||||
} else if hit {
|
||||
return nil
|
||||
}
|
||||
|
||||
container, err := b.docker.ContainerCreate(types.ContainerCreateConfig{
|
||||
Config: b.runConfig,
|
||||
Config: runConfigWithCommentCmd,
|
||||
// Set a log config to override any default value set on the daemon
|
||||
HostConfig: &container.HostConfig{LogConfig: defaultLogConfig},
|
||||
})
|
||||
@@ -189,338 +88,132 @@ func (b *Builder) runContextCommand(args []string, allowRemote bool, allowLocalD
|
||||
}
|
||||
b.tmpContainers[container.ID] = struct{}{}
|
||||
|
||||
comment := fmt.Sprintf("%s %s in %s", cmdName, origPaths, dest)
|
||||
|
||||
// Twiddle the destination when it's a relative path - meaning, make it
|
||||
// relative to the WORKINGDIR
|
||||
if dest, err = normaliseDest(cmdName, b.runConfig.WorkingDir, dest); err != nil {
|
||||
dest, err := normaliseDest(inst.cmdName, state.runConfig.WorkingDir, inst.dest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, info := range inst.infos {
|
||||
if err := b.docker.CopyOnBuild(container.ID, dest, info.root, info.path, inst.allowLocalDecompression); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return b.commitContainer(state, container.ID, runConfigWithCommentCmd)
|
||||
}
|
||||
|
||||
// For backwards compat, if there's just one info then use it as the
|
||||
// cache look-up string, otherwise hash 'em all into one
|
||||
func getSourceHashFromInfos(infos []copyInfo) string {
|
||||
if len(infos) == 1 {
|
||||
return infos[0].hash
|
||||
}
|
||||
var hashs []string
|
||||
for _, info := range infos {
|
||||
if err := b.docker.CopyOnBuild(container.ID, dest, info.FileInfo, info.decompress); err != nil {
|
||||
return err
|
||||
}
|
||||
hashs = append(hashs, info.hash)
|
||||
}
|
||||
|
||||
return b.commit(container.ID, cmd, comment)
|
||||
return hashStringSlice("multi", hashs)
|
||||
}
|
||||
|
||||
func (b *Builder) download(srcURL string) (fi builder.FileInfo, err error) {
|
||||
// get filename from URL
|
||||
u, err := url.Parse(srcURL)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
path := filepath.FromSlash(u.Path) // Ensure in platform semantics
|
||||
if strings.HasSuffix(path, string(os.PathSeparator)) {
|
||||
path = path[:len(path)-1]
|
||||
}
|
||||
parts := strings.Split(path, string(os.PathSeparator))
|
||||
filename := parts[len(parts)-1]
|
||||
if filename == "" {
|
||||
err = fmt.Errorf("cannot determine filename from url: %s", u)
|
||||
return
|
||||
}
|
||||
|
||||
// Initiate the download
|
||||
resp, err := httputils.Download(srcURL)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Prepare file in a tmp dir
|
||||
tmpDir, err := ioutils.TempDir("", "docker-remote")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
os.RemoveAll(tmpDir)
|
||||
}
|
||||
}()
|
||||
tmpFileName := filepath.Join(tmpDir, filename)
|
||||
tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
stdoutFormatter := b.Stdout.(*streamformatter.StdoutFormatter)
|
||||
progressOutput := stdoutFormatter.StreamFormatter.NewProgressOutput(stdoutFormatter.Writer, true)
|
||||
progressReader := progress.NewProgressReader(resp.Body, progressOutput, resp.ContentLength, "", "Downloading")
|
||||
// Download and dump result to tmp file
|
||||
if _, err = io.Copy(tmpFile, progressReader); err != nil {
|
||||
tmpFile.Close()
|
||||
return
|
||||
}
|
||||
fmt.Fprintln(b.Stdout)
|
||||
// ignoring error because the file was already opened successfully
|
||||
tmpFileSt, err := tmpFile.Stat()
|
||||
if err != nil {
|
||||
tmpFile.Close()
|
||||
return
|
||||
}
|
||||
|
||||
// Set the mtime to the Last-Modified header value if present
|
||||
// Otherwise just remove atime and mtime
|
||||
mTime := time.Time{}
|
||||
|
||||
lastMod := resp.Header.Get("Last-Modified")
|
||||
if lastMod != "" {
|
||||
// If we can't parse it then just let it default to 'zero'
|
||||
// otherwise use the parsed time value
|
||||
if parsedMTime, err := http.ParseTime(lastMod); err == nil {
|
||||
mTime = parsedMTime
|
||||
}
|
||||
}
|
||||
|
||||
tmpFile.Close()
|
||||
|
||||
if err = system.Chtimes(tmpFileName, mTime, mTime); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Calc the checksum, even if we're using the cache
|
||||
r, err := archive.Tar(tmpFileName, archive.Uncompressed)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
tarSum, err := tarsum.NewTarSum(r, true, tarsum.Version1)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if _, err = io.Copy(ioutil.Discard, tarSum); err != nil {
|
||||
return
|
||||
}
|
||||
hash := tarSum.Sum(nil)
|
||||
r.Close()
|
||||
return &builder.HashedFileInfo{FileInfo: builder.PathFileInfo{FileInfo: tmpFileSt, FilePath: tmpFileName}, FileHash: hash}, nil
|
||||
}
|
||||
|
||||
var windowsBlacklist = map[string]bool{
|
||||
"c:\\": true,
|
||||
"c:\\windows": true,
|
||||
}
|
||||
|
||||
func (b *Builder) calcCopyInfo(cmdName, origPath string, allowLocalDecompression, allowWildcards bool, imageSource *imageMount) ([]copyInfo, error) {
|
||||
|
||||
// Work in daemon-specific OS filepath semantics
|
||||
origPath = filepath.FromSlash(origPath)
|
||||
// validate windows paths from other images
|
||||
if imageSource != nil && runtime.GOOS == "windows" {
|
||||
p := strings.ToLower(filepath.Clean(origPath))
|
||||
if !filepath.IsAbs(p) {
|
||||
if filepath.VolumeName(p) != "" {
|
||||
if p[len(p)-2:] == ":." { // case where clean returns weird c:. paths
|
||||
p = p[:len(p)-1]
|
||||
}
|
||||
p += "\\"
|
||||
} else {
|
||||
p = filepath.Join("c:\\", p)
|
||||
}
|
||||
}
|
||||
if _, blacklisted := windowsBlacklist[p]; blacklisted {
|
||||
return nil, errors.New("copy from c:\\ or c:\\windows is not allowed on windows")
|
||||
}
|
||||
}
|
||||
|
||||
if origPath != "" && origPath[0] == os.PathSeparator && len(origPath) > 1 {
|
||||
origPath = origPath[1:]
|
||||
}
|
||||
origPath = strings.TrimPrefix(origPath, "."+string(os.PathSeparator))
|
||||
|
||||
context := b.context
|
||||
var err error
|
||||
if imageSource != nil {
|
||||
context, err = imageSource.context()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if context == nil {
|
||||
return nil, errors.Errorf("No context given. Impossible to use %s", cmdName)
|
||||
}
|
||||
|
||||
// Deal with wildcards
|
||||
if allowWildcards && containsWildcards(origPath) {
|
||||
var copyInfos []copyInfo
|
||||
if err := context.Walk("", func(path string, info builder.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if info.Name() == "" {
|
||||
// Why are we doing this check?
|
||||
return nil
|
||||
}
|
||||
if match, _ := filepath.Match(origPath, path); !match {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Note we set allowWildcards to false in case the name has
|
||||
// a * in it
|
||||
subInfos, err := b.calcCopyInfo(cmdName, path, allowLocalDecompression, false, imageSource)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
copyInfos = append(copyInfos, subInfos...)
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return copyInfos, nil
|
||||
}
|
||||
|
||||
// Must be a dir or a file
|
||||
statPath, fi, err := context.Stat(origPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
copyInfos := []copyInfo{{FileInfo: fi, decompress: allowLocalDecompression}}
|
||||
|
||||
hfi, handleHash := fi.(builder.Hashed)
|
||||
if !handleHash {
|
||||
return copyInfos, nil
|
||||
}
|
||||
if imageSource != nil {
|
||||
// fast-cache based on imageID
|
||||
if h, ok := b.imageContexts.getCache(imageSource.id, origPath); ok {
|
||||
hfi.SetHash(h.(string))
|
||||
return copyInfos, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Deal with the single file case
|
||||
if !fi.IsDir() {
|
||||
hfi.SetHash("file:" + hfi.Hash())
|
||||
return copyInfos, nil
|
||||
}
|
||||
// Must be a dir
|
||||
var subfiles []string
|
||||
err = context.Walk(statPath, func(path string, info builder.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// we already checked handleHash above
|
||||
subfiles = append(subfiles, info.(builder.Hashed).Hash())
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sort.Strings(subfiles)
|
||||
func hashStringSlice(prefix string, slice []string) string {
|
||||
hasher := sha256.New()
|
||||
hasher.Write([]byte(strings.Join(subfiles, ",")))
|
||||
hfi.SetHash("dir:" + hex.EncodeToString(hasher.Sum(nil)))
|
||||
if imageSource != nil {
|
||||
b.imageContexts.setCache(imageSource.id, origPath, hfi.Hash())
|
||||
}
|
||||
|
||||
return copyInfos, nil
|
||||
hasher.Write([]byte(strings.Join(slice, ",")))
|
||||
return prefix + ":" + hex.EncodeToString(hasher.Sum(nil))
|
||||
}
|
||||
|
||||
func (b *Builder) processImageFrom(img builder.Image) error {
|
||||
if img != nil {
|
||||
b.image = img.ImageID()
|
||||
type runConfigModifier func(*container.Config)
|
||||
|
||||
if img.RunConfig() != nil {
|
||||
b.runConfig = img.RunConfig()
|
||||
func copyRunConfig(runConfig *container.Config, modifiers ...runConfigModifier) *container.Config {
|
||||
copy := *runConfig
|
||||
for _, modifier := range modifiers {
|
||||
modifier(©)
|
||||
}
|
||||
return ©
|
||||
}
|
||||
|
||||
func withCmd(cmd []string) runConfigModifier {
|
||||
return func(runConfig *container.Config) {
|
||||
runConfig.Cmd = cmd
|
||||
}
|
||||
}
|
||||
|
||||
// withCmdComment sets Cmd to a nop comment string. See withCmdCommentString for
|
||||
// why there are two almost identical versions of this.
|
||||
func withCmdComment(comment string) runConfigModifier {
|
||||
return func(runConfig *container.Config) {
|
||||
runConfig.Cmd = append(getShell(runConfig), "#(nop) ", comment)
|
||||
}
|
||||
}
|
||||
|
||||
// withCmdCommentString exists to maintain compatibility with older versions.
|
||||
// A few instructions (workdir, copy, add) used a nop comment that is a single arg
|
||||
// where as all the other instructions used a two arg comment string. This
|
||||
// function implements the single arg version.
|
||||
func withCmdCommentString(comment string) runConfigModifier {
|
||||
return func(runConfig *container.Config) {
|
||||
runConfig.Cmd = append(getShell(runConfig), "#(nop) "+comment)
|
||||
}
|
||||
}
|
||||
|
||||
func withEnv(env []string) runConfigModifier {
|
||||
return func(runConfig *container.Config) {
|
||||
runConfig.Env = env
|
||||
}
|
||||
}
|
||||
|
||||
// withEntrypointOverride sets an entrypoint on runConfig if the command is
|
||||
// not empty. The entrypoint is left unmodified if command is empty.
|
||||
//
|
||||
// The dockerfile RUN instruction expect to run without an entrypoint
|
||||
// so the runConfig entrypoint needs to be modified accordingly. ContainerCreate
|
||||
// will change a []string{""} entrypoint to nil, so we probe the cache with the
|
||||
// nil entrypoint.
|
||||
func withEntrypointOverride(cmd []string, entrypoint []string) runConfigModifier {
|
||||
return func(runConfig *container.Config) {
|
||||
if len(cmd) > 0 {
|
||||
runConfig.Entrypoint = entrypoint
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check to see if we have a default PATH, note that windows won't
|
||||
// have one as it's set by HCS
|
||||
if system.DefaultPathEnv != "" {
|
||||
// Convert the slice of strings that represent the current list
|
||||
// of env vars into a map so we can see if PATH is already set.
|
||||
// If it's not set then go ahead and give it our default value
|
||||
configEnv := opts.ConvertKVStringsToMap(b.runConfig.Env)
|
||||
if _, ok := configEnv["PATH"]; !ok {
|
||||
b.runConfig.Env = append(b.runConfig.Env,
|
||||
"PATH="+system.DefaultPathEnv)
|
||||
}
|
||||
// getShell is a helper function which gets the right shell for prefixing the
|
||||
// shell-form of RUN, ENTRYPOINT and CMD instructions
|
||||
func getShell(c *container.Config) []string {
|
||||
if 0 == len(c.Shell) {
|
||||
return append([]string{}, defaultShell[:]...)
|
||||
}
|
||||
|
||||
if img == nil {
|
||||
// Typically this means they used "FROM scratch"
|
||||
return nil
|
||||
}
|
||||
|
||||
// Process ONBUILD triggers if they exist
|
||||
if nTriggers := len(b.runConfig.OnBuild); nTriggers != 0 {
|
||||
word := "trigger"
|
||||
if nTriggers > 1 {
|
||||
word = "triggers"
|
||||
}
|
||||
fmt.Fprintf(b.Stderr, "# Executing %d build %s...\n", nTriggers, word)
|
||||
}
|
||||
|
||||
// Copy the ONBUILD triggers, and remove them from the config, since the config will be committed.
|
||||
onBuildTriggers := b.runConfig.OnBuild
|
||||
b.runConfig.OnBuild = []string{}
|
||||
|
||||
// Reset stdin settings as all build actions run without stdin
|
||||
b.runConfig.OpenStdin = false
|
||||
b.runConfig.StdinOnce = false
|
||||
|
||||
// parse the ONBUILD triggers by invoking the parser
|
||||
for _, step := range onBuildTriggers {
|
||||
ast, err := parser.Parse(strings.NewReader(step), &b.directive)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
total := len(ast.Children)
|
||||
for _, n := range ast.Children {
|
||||
if err := b.checkDispatch(n, true); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for i, n := range ast.Children {
|
||||
if err := b.dispatch(i, total, n); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
return append([]string{}, c.Shell[:]...)
|
||||
}
|
||||
|
||||
// probeCache checks if cache match can be found for current build instruction.
|
||||
// If an image is found, probeCache returns `(true, nil)`.
|
||||
// If no image is found, it returns `(false, nil)`.
|
||||
// If there is any error, it returns `(false, err)`.
|
||||
func (b *Builder) probeCache() (bool, error) {
|
||||
func (b *Builder) probeCache(dispatchState *dispatchState, runConfig *container.Config) (bool, error) {
|
||||
c := b.imageCache
|
||||
if c == nil || b.options.NoCache || b.cacheBusted {
|
||||
return false, nil
|
||||
}
|
||||
cache, err := c.GetCache(b.image, b.runConfig)
|
||||
cache, err := c.GetCache(dispatchState.imageID, runConfig)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if len(cache) == 0 {
|
||||
logrus.Debugf("[BUILDER] Cache miss: %s", b.runConfig.Cmd)
|
||||
logrus.Debugf("[BUILDER] Cache miss: %s", runConfig.Cmd)
|
||||
b.cacheBusted = true
|
||||
return false, nil
|
||||
}
|
||||
|
||||
fmt.Fprint(b.Stdout, " ---> Using cache\n")
|
||||
logrus.Debugf("[BUILDER] Use cached version: %s", b.runConfig.Cmd)
|
||||
b.image = string(cache)
|
||||
b.imageContexts.update(b.image, b.runConfig)
|
||||
logrus.Debugf("[BUILDER] Use cached version: %s", runConfig.Cmd)
|
||||
dispatchState.imageID = string(cache)
|
||||
b.buildStages.update(dispatchState.imageID, runConfig)
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (b *Builder) create() (string, error) {
|
||||
if !b.hasFromImage() {
|
||||
return "", errors.New("Please provide a source image with `from` prior to run")
|
||||
}
|
||||
b.runConfig.Image = b.image
|
||||
|
||||
func (b *Builder) create(runConfig *container.Config) (string, error) {
|
||||
resources := container.Resources{
|
||||
CgroupParent: b.options.CgroupParent,
|
||||
CPUShares: b.options.CPUShares,
|
||||
@@ -545,11 +238,9 @@ func (b *Builder) create() (string, error) {
|
||||
ExtraHosts: b.options.ExtraHosts,
|
||||
}
|
||||
|
||||
config := *b.runConfig
|
||||
|
||||
// Create the container
|
||||
c, err := b.docker.ContainerCreate(types.ContainerCreateConfig{
|
||||
Config: b.runConfig,
|
||||
Config: runConfig,
|
||||
HostConfig: hostConfig,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -561,23 +252,24 @@ func (b *Builder) create() (string, error) {
|
||||
|
||||
b.tmpContainers[c.ID] = struct{}{}
|
||||
fmt.Fprintf(b.Stdout, " ---> Running in %s\n", stringid.TruncateID(c.ID))
|
||||
|
||||
// override the entry point that may have been picked up from the base image
|
||||
if err := b.docker.ContainerUpdateCmdOnBuild(c.ID, config.Cmd); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return c.ID, nil
|
||||
}
|
||||
|
||||
var errCancelled = errors.New("build cancelled")
|
||||
|
||||
func (b *Builder) run(cID string) (err error) {
|
||||
func (b *Builder) run(cID string, cmd []string) (err error) {
|
||||
attached := make(chan struct{})
|
||||
errCh := make(chan error)
|
||||
go func() {
|
||||
errCh <- b.docker.ContainerAttachRaw(cID, nil, b.Stdout, b.Stderr, true)
|
||||
errCh <- b.docker.ContainerAttachRaw(cID, nil, b.Stdout, b.Stderr, true, attached)
|
||||
}()
|
||||
|
||||
select {
|
||||
case err := <-errCh:
|
||||
return err
|
||||
case <-attached:
|
||||
}
|
||||
|
||||
finished := make(chan struct{})
|
||||
cancelErrCh := make(chan error, 1)
|
||||
go func() {
|
||||
@@ -611,16 +303,25 @@ func (b *Builder) run(cID string) (err error) {
|
||||
return err
|
||||
}
|
||||
|
||||
if ret, _ := b.docker.ContainerWait(cID, -1); ret != 0 {
|
||||
waitC, err := b.docker.ContainerWait(b.clientCtx, cID, containerpkg.WaitConditionNotRunning)
|
||||
if err != nil {
|
||||
// Unable to begin waiting for container.
|
||||
close(finished)
|
||||
if cancelErr := <-cancelErrCh; cancelErr != nil {
|
||||
logrus.Debugf("Build cancelled (%v) and got a non-zero code from ContainerWait: %d",
|
||||
cancelErr, ret)
|
||||
logrus.Debugf("Build cancelled (%v) and unable to begin ContainerWait: %d", cancelErr, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if status := <-waitC; status.ExitCode() != 0 {
|
||||
close(finished)
|
||||
if cancelErr := <-cancelErrCh; cancelErr != nil {
|
||||
logrus.Debugf("Build cancelled (%v) and got a non-zero code from ContainerWait: %d", cancelErr, status.ExitCode())
|
||||
}
|
||||
// TODO: change error type, because jsonmessage.JSONError assumes HTTP
|
||||
return &jsonmessage.JSONError{
|
||||
Message: fmt.Sprintf("The command '%s' returned a non-zero code: %d", strings.Join(b.runConfig.Cmd, " "), ret),
|
||||
Code: ret,
|
||||
Message: fmt.Sprintf("The command '%s' returned a non-zero code: %d", strings.Join(cmd, " "), status.ExitCode()),
|
||||
Code: status.ExitCode(),
|
||||
}
|
||||
}
|
||||
close(finished)
|
||||
@@ -648,59 +349,3 @@ func (b *Builder) clearTmp() {
|
||||
fmt.Fprintf(b.Stdout, "Removing intermediate container %s\n", stringid.TruncateID(c))
|
||||
}
|
||||
}
|
||||
|
||||
// readDockerfile reads a Dockerfile from the current context.
|
||||
func (b *Builder) readDockerfile() (*parser.Node, error) {
|
||||
// If no -f was specified then look for 'Dockerfile'. If we can't find
|
||||
// that then look for 'dockerfile'. If neither are found then default
|
||||
// back to 'Dockerfile' and use that in the error message.
|
||||
if b.options.Dockerfile == "" {
|
||||
b.options.Dockerfile = builder.DefaultDockerfileName
|
||||
if _, _, err := b.context.Stat(b.options.Dockerfile); os.IsNotExist(err) {
|
||||
lowercase := strings.ToLower(b.options.Dockerfile)
|
||||
if _, _, err := b.context.Stat(lowercase); err == nil {
|
||||
b.options.Dockerfile = lowercase
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
nodes, err := b.parseDockerfile()
|
||||
if err != nil {
|
||||
return nodes, err
|
||||
}
|
||||
|
||||
// After the Dockerfile has been parsed, we need to check the .dockerignore
|
||||
// file for either "Dockerfile" or ".dockerignore", and if either are
|
||||
// present then erase them from the build context. These files should never
|
||||
// have been sent from the client but we did send them to make sure that
|
||||
// we had the Dockerfile to actually parse, and then we also need the
|
||||
// .dockerignore file to know whether either file should be removed.
|
||||
// Note that this assumes the Dockerfile has been read into memory and
|
||||
// is now safe to be removed.
|
||||
if dockerIgnore, ok := b.context.(builder.DockerIgnoreContext); ok {
|
||||
dockerIgnore.Process([]string{b.options.Dockerfile})
|
||||
}
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
func (b *Builder) parseDockerfile() (*parser.Node, error) {
|
||||
f, err := b.context.Open(b.options.Dockerfile)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, fmt.Errorf("Cannot locate specified Dockerfile: %s", b.options.Dockerfile)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
if f, ok := f.(*os.File); ok {
|
||||
// ignoring error because Open already succeeded
|
||||
fi, err := f.Stat()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Unexpected error reading Dockerfile: %v", err)
|
||||
}
|
||||
if fi.Size() == 0 {
|
||||
return nil, fmt.Errorf("The Dockerfile (%s) cannot be empty", b.options.Dockerfile)
|
||||
}
|
||||
}
|
||||
return parser.Parse(f, &b.directive)
|
||||
}
|
||||
|
||||
@@ -5,9 +5,13 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/backend"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/builder"
|
||||
"github.com/docker/docker/builder/remotecontext"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/testutil/assert"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestEmptyDockerfile(t *testing.T) {
|
||||
@@ -16,7 +20,7 @@ func TestEmptyDockerfile(t *testing.T) {
|
||||
|
||||
createTestTempFile(t, contextDir, builder.DefaultDockerfileName, "", 0777)
|
||||
|
||||
readAndCheckDockerfile(t, "emptyDockerfile", contextDir, "", "The Dockerfile (Dockerfile) cannot be empty")
|
||||
readAndCheckDockerfile(t, "emptyDockerfile", contextDir, "", "the Dockerfile (Dockerfile) cannot be empty")
|
||||
}
|
||||
|
||||
func TestSymlinkDockerfile(t *testing.T) {
|
||||
@@ -38,7 +42,7 @@ func TestDockerfileOutsideTheBuildContext(t *testing.T) {
|
||||
contextDir, cleanup := createTestTempDir(t, "", "builder-dockerfile-test")
|
||||
defer cleanup()
|
||||
|
||||
expectedError := "Forbidden path outside the build context"
|
||||
expectedError := "Forbidden path outside the build context: ../../Dockerfile ()"
|
||||
|
||||
readAndCheckDockerfile(t, "DockerfileOutsideTheBuildContext", contextDir, "../../Dockerfile", expectedError)
|
||||
}
|
||||
@@ -54,7 +58,7 @@ func TestNonExistingDockerfile(t *testing.T) {
|
||||
|
||||
func readAndCheckDockerfile(t *testing.T, testName, contextDir, dockerfilePath, expectedError string) {
|
||||
tarStream, err := archive.Tar(contextDir, archive.Uncompressed)
|
||||
assert.NilError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
defer func() {
|
||||
if err = tarStream.Close(); err != nil {
|
||||
@@ -62,21 +66,65 @@ func readAndCheckDockerfile(t *testing.T, testName, contextDir, dockerfilePath,
|
||||
}
|
||||
}()
|
||||
|
||||
context, err := builder.MakeTarSumContext(tarStream)
|
||||
assert.NilError(t, err)
|
||||
|
||||
defer func() {
|
||||
if err = context.Close(); err != nil {
|
||||
t.Fatalf("Error when closing tar context: %s", err)
|
||||
}
|
||||
}()
|
||||
|
||||
options := &types.ImageBuildOptions{
|
||||
Dockerfile: dockerfilePath,
|
||||
if dockerfilePath == "" { // handled in BuildWithContext
|
||||
dockerfilePath = builder.DefaultDockerfileName
|
||||
}
|
||||
|
||||
config := backend.BuildConfig{
|
||||
Options: &types.ImageBuildOptions{Dockerfile: dockerfilePath},
|
||||
Source: tarStream,
|
||||
}
|
||||
_, _, err = remotecontext.Detect(config)
|
||||
assert.EqualError(t, err, expectedError)
|
||||
}
|
||||
|
||||
func TestCopyRunConfig(t *testing.T) {
|
||||
defaultEnv := []string{"foo=1"}
|
||||
defaultCmd := []string{"old"}
|
||||
|
||||
var testcases = []struct {
|
||||
doc string
|
||||
modifiers []runConfigModifier
|
||||
expected *container.Config
|
||||
}{
|
||||
{
|
||||
doc: "Set the command",
|
||||
modifiers: []runConfigModifier{withCmd([]string{"new"})},
|
||||
expected: &container.Config{
|
||||
Cmd: []string{"new"},
|
||||
Env: defaultEnv,
|
||||
},
|
||||
},
|
||||
{
|
||||
doc: "Set the command to a comment",
|
||||
modifiers: []runConfigModifier{withCmdComment("comment")},
|
||||
expected: &container.Config{
|
||||
Cmd: append(defaultShell, "#(nop) ", "comment"),
|
||||
Env: defaultEnv,
|
||||
},
|
||||
},
|
||||
{
|
||||
doc: "Set the command and env",
|
||||
modifiers: []runConfigModifier{
|
||||
withCmd([]string{"new"}),
|
||||
withEnv([]string{"one", "two"}),
|
||||
},
|
||||
expected: &container.Config{
|
||||
Cmd: []string{"new"},
|
||||
Env: []string{"one", "two"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, testcase := range testcases {
|
||||
runConfig := &container.Config{
|
||||
Cmd: defaultCmd,
|
||||
Env: defaultEnv,
|
||||
}
|
||||
runConfigCopy := copyRunConfig(runConfig, testcase.modifiers...)
|
||||
assert.Equal(t, testcase.expected, runConfigCopy, testcase.doc)
|
||||
// Assert the original was not modified
|
||||
assert.NotEqual(t, runConfig, runConfigCopy, testcase.doc)
|
||||
}
|
||||
|
||||
b := &Builder{options: options, context: context}
|
||||
|
||||
_, err = b.readDockerfile()
|
||||
assert.Error(t, err, expectedError)
|
||||
}
|
||||
|
||||
@@ -36,3 +36,7 @@ func containsWildcards(name string) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func validateCopySourcePath(imageSource *imageMount, origPath string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// normaliseDest normalises the destination of a COPY/ADD command in a
|
||||
@@ -64,3 +65,31 @@ func containsWildcards(name string) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
var pathBlacklist = map[string]bool{
|
||||
"c:\\": true,
|
||||
"c:\\windows": true,
|
||||
}
|
||||
|
||||
func validateCopySourcePath(imageSource *imageMount, origPath string) error {
|
||||
// validate windows paths from other images
|
||||
if imageSource == nil {
|
||||
return nil
|
||||
}
|
||||
origPath = filepath.FromSlash(origPath)
|
||||
p := strings.ToLower(filepath.Clean(origPath))
|
||||
if !filepath.IsAbs(p) {
|
||||
if filepath.VolumeName(p) != "" {
|
||||
if p[len(p)-2:] == ":." { // case where clean returns weird c:. paths
|
||||
p = p[:len(p)-1]
|
||||
}
|
||||
p += "\\"
|
||||
} else {
|
||||
p = filepath.Join("c:\\", p)
|
||||
}
|
||||
}
|
||||
if _, blacklisted := pathBlacklist[p]; blacklisted {
|
||||
return errors.New("copy from c:\\ or c:\\windows is not allowed on windows")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
44
builder/dockerfile/metrics.go
Normal file
44
builder/dockerfile/metrics.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package dockerfile
|
||||
|
||||
import (
|
||||
"github.com/docker/go-metrics"
|
||||
)
|
||||
|
||||
var (
|
||||
buildsTriggered metrics.Counter
|
||||
buildsFailed metrics.LabeledCounter
|
||||
)
|
||||
|
||||
// Build metrics prometheus messages, these values must be initialized before
|
||||
// using them. See the example below in the "builds_failed" metric definition.
|
||||
const (
|
||||
metricsDockerfileSyntaxError = "dockerfile_syntax_error"
|
||||
metricsDockerfileEmptyError = "dockerfile_empty_error"
|
||||
metricsCommandNotSupportedError = "command_not_supported_error"
|
||||
metricsErrorProcessingCommandsError = "error_processing_commands_error"
|
||||
metricsBuildTargetNotReachableError = "build_target_not_reachable_error"
|
||||
metricsMissingOnbuildArgumentsError = "missing_onbuild_arguments_error"
|
||||
metricsUnknownInstructionError = "unknown_instruction_error"
|
||||
metricsBuildCanceled = "build_canceled"
|
||||
)
|
||||
|
||||
func init() {
|
||||
buildMetrics := metrics.NewNamespace("builder", "", nil)
|
||||
|
||||
buildsTriggered = buildMetrics.NewCounter("builds_triggered", "Number of triggered image builds")
|
||||
buildsFailed = buildMetrics.NewLabeledCounter("builds_failed", "Number of failed image builds", "reason")
|
||||
for _, r := range []string{
|
||||
metricsDockerfileSyntaxError,
|
||||
metricsDockerfileEmptyError,
|
||||
metricsCommandNotSupportedError,
|
||||
metricsErrorProcessingCommandsError,
|
||||
metricsBuildTargetNotReachableError,
|
||||
metricsMissingOnbuildArgumentsError,
|
||||
metricsUnknownInstructionError,
|
||||
metricsBuildCanceled,
|
||||
} {
|
||||
buildsFailed.WithValues(r)
|
||||
}
|
||||
|
||||
metrics.Register(buildMetrics)
|
||||
}
|
||||
@@ -2,42 +2,36 @@ package dockerfile
|
||||
|
||||
import (
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/backend"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/builder"
|
||||
containerpkg "github.com/docker/docker/container"
|
||||
"github.com/docker/docker/image"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// MockBackend implements the builder.Backend interface for unit testing
|
||||
type MockBackend struct {
|
||||
getImageOnBuildFunc func(string) (builder.Image, error)
|
||||
}
|
||||
|
||||
func (m *MockBackend) GetImageOnBuild(name string) (builder.Image, error) {
|
||||
if m.getImageOnBuildFunc != nil {
|
||||
return m.getImageOnBuildFunc(name)
|
||||
}
|
||||
return &mockImage{id: "theid"}, nil
|
||||
containerCreateFunc func(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error)
|
||||
commitFunc func(string, *backend.ContainerCommitConfig) (string, error)
|
||||
getImageFunc func(string) (builder.Image, builder.ReleaseableLayer, error)
|
||||
}
|
||||
|
||||
func (m *MockBackend) TagImageWithReference(image.ID, reference.Named) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MockBackend) PullOnBuild(ctx context.Context, name string, authConfigs map[string]types.AuthConfig, output io.Writer) (builder.Image, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (m *MockBackend) ContainerAttachRaw(cID string, stdin io.ReadCloser, stdout, stderr io.Writer, stream bool) error {
|
||||
func (m *MockBackend) ContainerAttachRaw(cID string, stdin io.ReadCloser, stdout, stderr io.Writer, stream bool, attached chan struct{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MockBackend) ContainerCreate(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error) {
|
||||
if m.containerCreateFunc != nil {
|
||||
return m.containerCreateFunc(config)
|
||||
}
|
||||
return container.ContainerCreateCreatedBody{}, nil
|
||||
}
|
||||
|
||||
@@ -45,7 +39,10 @@ func (m *MockBackend) ContainerRm(name string, config *types.ContainerRmConfig)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MockBackend) Commit(string, *backend.ContainerCommitConfig) (string, error) {
|
||||
func (m *MockBackend) Commit(cID string, cfg *backend.ContainerCommitConfig) (string, error) {
|
||||
if m.commitFunc != nil {
|
||||
return m.commitFunc(cID, cfg)
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
@@ -57,32 +54,24 @@ func (m *MockBackend) ContainerStart(containerID string, hostConfig *container.H
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MockBackend) ContainerWait(containerID string, timeout time.Duration) (int, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (m *MockBackend) ContainerUpdateCmdOnBuild(containerID string, cmd []string) error {
|
||||
return nil
|
||||
func (m *MockBackend) ContainerWait(ctx context.Context, containerID string, condition containerpkg.WaitCondition) (<-chan containerpkg.StateStatus, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (m *MockBackend) ContainerCreateWorkdir(containerID string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MockBackend) CopyOnBuild(containerID string, destPath string, src builder.FileInfo, decompress bool) error {
|
||||
func (m *MockBackend) CopyOnBuild(containerID string, destPath string, srcRoot string, srcPath string, decompress bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MockBackend) HasExperimental() bool {
|
||||
return false
|
||||
}
|
||||
func (m *MockBackend) GetImageAndReleasableLayer(ctx context.Context, refOrID string, opts backend.GetImageAndLayerOptions) (builder.Image, builder.ReleaseableLayer, error) {
|
||||
if m.getImageFunc != nil {
|
||||
return m.getImageFunc(refOrID)
|
||||
}
|
||||
|
||||
func (m *MockBackend) SquashImage(from string, to string) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (m *MockBackend) MountImage(name string) (string, func() error, error) {
|
||||
return "", func() error { return nil }, nil
|
||||
return &mockImage{id: "theid"}, &mockLayer{}, nil
|
||||
}
|
||||
|
||||
type mockImage struct {
|
||||
@@ -97,3 +86,24 @@ func (i *mockImage) ImageID() string {
|
||||
func (i *mockImage) RunConfig() *container.Config {
|
||||
return i.config
|
||||
}
|
||||
|
||||
type mockImageCache struct {
|
||||
getCacheFunc func(parentID string, cfg *container.Config) (string, error)
|
||||
}
|
||||
|
||||
func (mic *mockImageCache) GetCache(parentID string, cfg *container.Config) (string, error) {
|
||||
if mic.getCacheFunc != nil {
|
||||
return mic.getCacheFunc(parentID, cfg)
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
type mockLayer struct{}
|
||||
|
||||
func (l *mockLayer) Release() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *mockLayer) Mount() (string, error) {
|
||||
return "mountPath", nil
|
||||
}
|
||||
|
||||
@@ -23,14 +23,10 @@ func main() {
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
d := parser.Directive{LookingForDirectives: true}
|
||||
parser.SetEscapeToken(parser.DefaultEscapeToken, &d)
|
||||
|
||||
ast, err := parser.Parse(f, &d)
|
||||
result, err := parser.Parse(f)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
} else {
|
||||
fmt.Println(ast.Dump())
|
||||
}
|
||||
fmt.Println(result.AST.Dump())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -28,10 +28,9 @@ var validJSONArraysOfStrings = map[string][]string{
|
||||
|
||||
func TestJSONArraysOfStrings(t *testing.T) {
|
||||
for json, expected := range validJSONArraysOfStrings {
|
||||
d := Directive{}
|
||||
SetEscapeToken(DefaultEscapeToken, &d)
|
||||
d := NewDefaultDirective()
|
||||
|
||||
if node, _, err := parseJSON(json, &d); err != nil {
|
||||
if node, _, err := parseJSON(json, d); err != nil {
|
||||
t.Fatalf("%q should be a valid JSON array of strings, but wasn't! (err: %q)", json, err)
|
||||
} else {
|
||||
i := 0
|
||||
@@ -51,10 +50,9 @@ func TestJSONArraysOfStrings(t *testing.T) {
|
||||
}
|
||||
}
|
||||
for _, json := range invalidJSONArraysOfStrings {
|
||||
d := Directive{}
|
||||
SetEscapeToken(DefaultEscapeToken, &d)
|
||||
d := NewDefaultDirective()
|
||||
|
||||
if _, _, err := parseJSON(json, &d); err != errDockerfileNotStringArray {
|
||||
if _, _, err := parseJSON(json, d); err != errDockerfileNotStringArray {
|
||||
t.Fatalf("%q should be an invalid JSON array of strings, but wasn't!", json)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -42,7 +42,7 @@ func parseSubCommand(rest string, d *Directive) (*Node, map[string]bool, error)
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
_, child, err := ParseLine(rest, d, false)
|
||||
child, err := newNodeFromLine(rest, d)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -103,7 +103,7 @@ func parseWords(rest string, d *Directive) []string {
|
||||
blankOK = true
|
||||
phase = inQuote
|
||||
}
|
||||
if ch == d.EscapeToken {
|
||||
if ch == d.escapeToken {
|
||||
if pos+chWidth == len(rest) {
|
||||
continue // just skip an escape token at end of line
|
||||
}
|
||||
@@ -122,7 +122,7 @@ func parseWords(rest string, d *Directive) []string {
|
||||
phase = inWord
|
||||
}
|
||||
// The escape token is special except for ' quotes - can't escape anything for '
|
||||
if ch == d.EscapeToken && quote != '\'' {
|
||||
if ch == d.escapeToken && quote != '\'' {
|
||||
if pos+chWidth == len(rest) {
|
||||
phase = inWord
|
||||
continue // just skip the escape token at end
|
||||
|
||||
@@ -1,26 +1,27 @@
|
||||
package parser
|
||||
|
||||
import (
|
||||
"github.com/docker/docker/pkg/testutil/assert"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestParseNameValOldFormat(t *testing.T) {
|
||||
directive := Directive{}
|
||||
node, err := parseNameVal("foo bar", "LABEL", &directive)
|
||||
assert.NilError(t, err)
|
||||
assert.NoError(t, err)
|
||||
|
||||
expected := &Node{
|
||||
Value: "foo",
|
||||
Next: &Node{Value: "bar"},
|
||||
}
|
||||
assert.DeepEqual(t, node, expected)
|
||||
assert.Equal(t, expected, node)
|
||||
}
|
||||
|
||||
func TestParseNameValNewFormat(t *testing.T) {
|
||||
directive := Directive{}
|
||||
node, err := parseNameVal("foo=bar thing=star", "LABEL", &directive)
|
||||
assert.NilError(t, err)
|
||||
assert.NoError(t, err)
|
||||
|
||||
expected := &Node{
|
||||
Value: "foo",
|
||||
@@ -34,7 +35,7 @@ func TestParseNameValNewFormat(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
assert.DeepEqual(t, node, expected)
|
||||
assert.Equal(t, expected, node)
|
||||
}
|
||||
|
||||
func TestNodeFromLabels(t *testing.T) {
|
||||
@@ -60,6 +61,6 @@ func TestNodeFromLabels(t *testing.T) {
|
||||
}
|
||||
|
||||
node := NodeFromLabels(labels)
|
||||
assert.DeepEqual(t, node, expected)
|
||||
assert.Equal(t, expected, node)
|
||||
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"unicode"
|
||||
|
||||
"github.com/docker/docker/builder/dockerfile/command"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Node is a structure used to represent a parse tree.
|
||||
@@ -34,7 +35,7 @@ type Node struct {
|
||||
Original string // original line used before parsing
|
||||
Flags []string // only top Node should have this set
|
||||
StartLine int // the line in the original dockerfile where the node begins
|
||||
EndLine int // the line in the original dockerfile where the node ends
|
||||
endLine int // the line in the original dockerfile where the node ends
|
||||
}
|
||||
|
||||
// Dump dumps the AST defined by `node` as a list of sexps.
|
||||
@@ -62,13 +63,19 @@ func (node *Node) Dump() string {
|
||||
return strings.TrimSpace(str)
|
||||
}
|
||||
|
||||
// Directive is the structure used during a build run to hold the state of
|
||||
// parsing directives.
|
||||
type Directive struct {
|
||||
EscapeToken rune // Current escape token
|
||||
LineContinuationRegex *regexp.Regexp // Current line continuation regex
|
||||
LookingForDirectives bool // Whether we are currently looking for directives
|
||||
EscapeSeen bool // Whether the escape directive has been seen
|
||||
func (node *Node) lines(start, end int) {
|
||||
node.StartLine = start
|
||||
node.endLine = end
|
||||
}
|
||||
|
||||
// AddChild adds a new child node, and updates line information
|
||||
func (node *Node) AddChild(child *Node, startLine, endLine int) {
|
||||
child.lines(startLine, endLine)
|
||||
if node.StartLine < 0 {
|
||||
node.StartLine = startLine
|
||||
}
|
||||
node.endLine = endLine
|
||||
node.Children = append(node.Children, child)
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -79,18 +86,60 @@ var (
|
||||
)
|
||||
|
||||
// DefaultEscapeToken is the default escape token
|
||||
const DefaultEscapeToken = "\\"
|
||||
const DefaultEscapeToken = '\\'
|
||||
|
||||
// SetEscapeToken sets the default token for escaping characters in a Dockerfile.
|
||||
func SetEscapeToken(s string, d *Directive) error {
|
||||
// Directive is the structure used during a build run to hold the state of
|
||||
// parsing directives.
|
||||
type Directive struct {
|
||||
escapeToken rune // Current escape token
|
||||
lineContinuationRegex *regexp.Regexp // Current line continuation regex
|
||||
processingComplete bool // Whether we are done looking for directives
|
||||
escapeSeen bool // Whether the escape directive has been seen
|
||||
}
|
||||
|
||||
// setEscapeToken sets the default token for escaping characters in a Dockerfile.
|
||||
func (d *Directive) setEscapeToken(s string) error {
|
||||
if s != "`" && s != "\\" {
|
||||
return fmt.Errorf("invalid ESCAPE '%s'. Must be ` or \\", s)
|
||||
}
|
||||
d.EscapeToken = rune(s[0])
|
||||
d.LineContinuationRegex = regexp.MustCompile(`\` + s + `[ \t]*$`)
|
||||
d.escapeToken = rune(s[0])
|
||||
d.lineContinuationRegex = regexp.MustCompile(`\` + s + `[ \t]*$`)
|
||||
return nil
|
||||
}
|
||||
|
||||
// processLine looks for a parser directive '# escapeToken=<char>. Parser
|
||||
// directives must precede any builder instruction or other comments, and cannot
|
||||
// be repeated.
|
||||
func (d *Directive) processLine(line string) error {
|
||||
if d.processingComplete {
|
||||
return nil
|
||||
}
|
||||
// Processing is finished after the first call
|
||||
defer func() { d.processingComplete = true }()
|
||||
|
||||
tecMatch := tokenEscapeCommand.FindStringSubmatch(strings.ToLower(line))
|
||||
if len(tecMatch) == 0 {
|
||||
return nil
|
||||
}
|
||||
if d.escapeSeen == true {
|
||||
return errors.New("only one escape parser directive can be used")
|
||||
}
|
||||
for i, n := range tokenEscapeCommand.SubexpNames() {
|
||||
if n == "escapechar" {
|
||||
d.escapeSeen = true
|
||||
return d.setEscapeToken(tecMatch[i])
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewDefaultDirective returns a new Directive with the default escapeToken token
|
||||
func NewDefaultDirective() *Directive {
|
||||
directive := Directive{}
|
||||
directive.setEscapeToken(string(DefaultEscapeToken))
|
||||
return &directive
|
||||
}
|
||||
|
||||
func init() {
|
||||
// Dispatch Table. see line_parsers.go for the parse functions.
|
||||
// The command is parsed and mapped to the line parser. The line parser
|
||||
@@ -120,28 +169,6 @@ func init() {
|
||||
}
|
||||
}
|
||||
|
||||
// ParseLine parses a line and returns the remainder.
|
||||
func ParseLine(line string, d *Directive, ignoreCont bool) (string, *Node, error) {
|
||||
if escapeFound, err := handleParserDirective(line, d); err != nil || escapeFound {
|
||||
d.EscapeSeen = escapeFound
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
d.LookingForDirectives = false
|
||||
|
||||
if line = stripComments(line); line == "" {
|
||||
return "", nil, nil
|
||||
}
|
||||
|
||||
if !ignoreCont && d.LineContinuationRegex.MatchString(line) {
|
||||
line = d.LineContinuationRegex.ReplaceAllString(line, "")
|
||||
return line, nil, nil
|
||||
}
|
||||
|
||||
node, err := newNodeFromLine(line, d)
|
||||
return "", node, err
|
||||
}
|
||||
|
||||
// newNodeFromLine splits the line into parts, and dispatches to a function
|
||||
// based on the command and command arguments. A Node is created from the
|
||||
// result of the dispatch.
|
||||
@@ -170,109 +197,98 @@ func newNodeFromLine(line string, directive *Directive) (*Node, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Handle the parser directive '# escape=<char>. Parser directives must precede
|
||||
// any builder instruction or other comments, and cannot be repeated.
|
||||
func handleParserDirective(line string, d *Directive) (bool, error) {
|
||||
if !d.LookingForDirectives {
|
||||
return false, nil
|
||||
}
|
||||
tecMatch := tokenEscapeCommand.FindStringSubmatch(strings.ToLower(line))
|
||||
if len(tecMatch) == 0 {
|
||||
return false, nil
|
||||
}
|
||||
if d.EscapeSeen == true {
|
||||
return false, fmt.Errorf("only one escape parser directive can be used")
|
||||
}
|
||||
for i, n := range tokenEscapeCommand.SubexpNames() {
|
||||
if n == "escapechar" {
|
||||
if err := SetEscapeToken(tecMatch[i], d); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
// Result is the result of parsing a Dockerfile
|
||||
type Result struct {
|
||||
AST *Node
|
||||
EscapeToken rune
|
||||
}
|
||||
|
||||
// Parse is the main parse routine.
|
||||
// It handles an io.ReadWriteCloser and returns the root of the AST.
|
||||
func Parse(rwc io.Reader, d *Directive) (*Node, error) {
|
||||
// Parse reads lines from a Reader, parses the lines into an AST and returns
|
||||
// the AST and escape token
|
||||
func Parse(rwc io.Reader) (*Result, error) {
|
||||
d := NewDefaultDirective()
|
||||
currentLine := 0
|
||||
root := &Node{}
|
||||
root.StartLine = -1
|
||||
root := &Node{StartLine: -1}
|
||||
scanner := bufio.NewScanner(rwc)
|
||||
|
||||
utf8bom := []byte{0xEF, 0xBB, 0xBF}
|
||||
var err error
|
||||
for scanner.Scan() {
|
||||
scannedBytes := scanner.Bytes()
|
||||
// We trim UTF8 BOM
|
||||
if currentLine == 0 {
|
||||
scannedBytes = bytes.TrimPrefix(scannedBytes, utf8bom)
|
||||
bytes := scanner.Bytes()
|
||||
switch currentLine {
|
||||
case 0:
|
||||
bytes, err = processFirstLine(d, bytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
bytes = processLine(bytes, true)
|
||||
}
|
||||
scannedLine := strings.TrimLeftFunc(string(scannedBytes), unicode.IsSpace)
|
||||
currentLine++
|
||||
line, child, err := ParseLine(scannedLine, d, false)
|
||||
|
||||
startLine := currentLine
|
||||
line, isEndOfLine := trimContinuationCharacter(string(bytes), d)
|
||||
if isEndOfLine && line == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
for !isEndOfLine && scanner.Scan() {
|
||||
bytes := processLine(scanner.Bytes(), false)
|
||||
currentLine++
|
||||
|
||||
// TODO: warn this is being deprecated/removed
|
||||
if isEmptyContinuationLine(bytes) {
|
||||
continue
|
||||
}
|
||||
|
||||
continuationLine := string(bytes)
|
||||
continuationLine, isEndOfLine = trimContinuationCharacter(continuationLine, d)
|
||||
line += continuationLine
|
||||
}
|
||||
|
||||
child, err := newNodeFromLine(line, d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
startLine := currentLine
|
||||
|
||||
if line != "" && child == nil {
|
||||
for scanner.Scan() {
|
||||
newline := scanner.Text()
|
||||
currentLine++
|
||||
|
||||
if stripComments(strings.TrimSpace(newline)) == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
line, child, err = ParseLine(line+newline, d, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if child != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
if child == nil && line != "" {
|
||||
// When we call ParseLine we'll pass in 'true' for
|
||||
// the ignoreCont param if we're at the EOF. This will
|
||||
// prevent the func from returning immediately w/o
|
||||
// parsing the line thinking that there's more input
|
||||
// to come.
|
||||
|
||||
_, child, err = ParseLine(line, d, scanner.Err() == nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if child != nil {
|
||||
// Update the line information for the current child.
|
||||
child.StartLine = startLine
|
||||
child.EndLine = currentLine
|
||||
// Update the line information for the root. The starting line of the root is always the
|
||||
// starting line of the first child and the ending line is the ending line of the last child.
|
||||
if root.StartLine < 0 {
|
||||
root.StartLine = currentLine
|
||||
}
|
||||
root.EndLine = currentLine
|
||||
root.Children = append(root.Children, child)
|
||||
}
|
||||
root.AddChild(child, startLine, currentLine)
|
||||
}
|
||||
|
||||
return root, nil
|
||||
return &Result{AST: root, EscapeToken: d.escapeToken}, nil
|
||||
}
|
||||
|
||||
// covers comments and empty lines. Lines should be trimmed before passing to
|
||||
// this function.
|
||||
func stripComments(line string) string {
|
||||
// string is already trimmed at this point
|
||||
if tokenComment.MatchString(line) {
|
||||
return tokenComment.ReplaceAllString(line, "")
|
||||
func trimComments(src []byte) []byte {
|
||||
return tokenComment.ReplaceAll(src, []byte{})
|
||||
}
|
||||
|
||||
func trimWhitespace(src []byte) []byte {
|
||||
return bytes.TrimLeftFunc(src, unicode.IsSpace)
|
||||
}
|
||||
|
||||
func isEmptyContinuationLine(line []byte) bool {
|
||||
return len(trimComments(trimWhitespace(line))) == 0
|
||||
}
|
||||
|
||||
var utf8bom = []byte{0xEF, 0xBB, 0xBF}
|
||||
|
||||
func trimContinuationCharacter(line string, d *Directive) (string, bool) {
|
||||
if d.lineContinuationRegex.MatchString(line) {
|
||||
line = d.lineContinuationRegex.ReplaceAllString(line, "")
|
||||
return line, false
|
||||
}
|
||||
|
||||
return line
|
||||
return line, true
|
||||
}
|
||||
|
||||
// TODO: remove stripLeftWhitespace after deprecation period. It seems silly
|
||||
// to preserve whitespace on continuation lines. Why is that done?
|
||||
func processLine(token []byte, stripLeftWhitespace bool) []byte {
|
||||
if stripLeftWhitespace {
|
||||
token = trimWhitespace(token)
|
||||
}
|
||||
return trimComments(token)
|
||||
}
|
||||
|
||||
func processFirstLine(d *Directive, token []byte) ([]byte, error) {
|
||||
token = bytes.TrimPrefix(token, utf8bom)
|
||||
token = trimWhitespace(token)
|
||||
err := d.processLine(string(token))
|
||||
return trimComments(token), err
|
||||
}
|
||||
|
||||
@@ -8,6 +8,9 @@ import (
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const testDir = "testfiles"
|
||||
@@ -16,17 +19,11 @@ const testFileLineInfo = "testfile-line/Dockerfile"
|
||||
|
||||
func getDirs(t *testing.T, dir string) []string {
|
||||
f, err := os.Open(dir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
defer f.Close()
|
||||
|
||||
dirs, err := f.Readdirnames(0)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
return dirs
|
||||
}
|
||||
|
||||
@@ -35,17 +32,11 @@ func TestTestNegative(t *testing.T) {
|
||||
dockerfile := filepath.Join(negativeTestDir, dir, "Dockerfile")
|
||||
|
||||
df, err := os.Open(dockerfile)
|
||||
if err != nil {
|
||||
t.Fatalf("Dockerfile missing for %s: %v", dir, err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
defer df.Close()
|
||||
|
||||
d := Directive{LookingForDirectives: true}
|
||||
SetEscapeToken(DefaultEscapeToken, &d)
|
||||
_, err = Parse(df, &d)
|
||||
if err == nil {
|
||||
t.Fatalf("No error parsing broken dockerfile for %s", dir)
|
||||
}
|
||||
_, err = Parse(df)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -55,33 +46,21 @@ func TestTestData(t *testing.T) {
|
||||
resultfile := filepath.Join(testDir, dir, "result")
|
||||
|
||||
df, err := os.Open(dockerfile)
|
||||
if err != nil {
|
||||
t.Fatalf("Dockerfile missing for %s: %v", dir, err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
defer df.Close()
|
||||
|
||||
d := Directive{LookingForDirectives: true}
|
||||
SetEscapeToken(DefaultEscapeToken, &d)
|
||||
ast, err := Parse(df, &d)
|
||||
if err != nil {
|
||||
t.Fatalf("Error parsing %s's dockerfile: %v", dir, err)
|
||||
}
|
||||
result, err := Parse(df)
|
||||
require.NoError(t, err)
|
||||
|
||||
content, err := ioutil.ReadFile(resultfile)
|
||||
if err != nil {
|
||||
t.Fatalf("Error reading %s's result file: %v", dir, err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
// CRLF --> CR to match Unix behavior
|
||||
content = bytes.Replace(content, []byte{'\x0d', '\x0a'}, []byte{'\x0a'}, -1)
|
||||
}
|
||||
|
||||
if ast.Dump()+"\n" != string(content) {
|
||||
fmt.Fprintln(os.Stderr, "Result:\n"+ast.Dump())
|
||||
fmt.Fprintln(os.Stderr, "Expected:\n"+string(content))
|
||||
t.Fatalf("%s: AST dump of dockerfile does not match result", dir)
|
||||
}
|
||||
assert.Contains(t, result.AST.Dump()+"\n", string(content), "In "+dockerfile)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -122,51 +101,34 @@ func TestParseWords(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
d := Directive{LookingForDirectives: true}
|
||||
SetEscapeToken(DefaultEscapeToken, &d)
|
||||
words := parseWords(test["input"][0], &d)
|
||||
if len(words) != len(test["expect"]) {
|
||||
t.Fatalf("length check failed. input: %v, expect: %q, output: %q", test["input"][0], test["expect"], words)
|
||||
}
|
||||
for i, word := range words {
|
||||
if word != test["expect"][i] {
|
||||
t.Fatalf("word check failed for word: %q. input: %q, expect: %q, output: %q", word, test["input"][0], test["expect"], words)
|
||||
}
|
||||
}
|
||||
words := parseWords(test["input"][0], NewDefaultDirective())
|
||||
assert.Equal(t, test["expect"], words)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLineInformation(t *testing.T) {
|
||||
df, err := os.Open(testFileLineInfo)
|
||||
if err != nil {
|
||||
t.Fatalf("Dockerfile missing for %s: %v", testFileLineInfo, err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
defer df.Close()
|
||||
|
||||
d := Directive{LookingForDirectives: true}
|
||||
SetEscapeToken(DefaultEscapeToken, &d)
|
||||
ast, err := Parse(df, &d)
|
||||
if err != nil {
|
||||
t.Fatalf("Error parsing dockerfile %s: %v", testFileLineInfo, err)
|
||||
}
|
||||
result, err := Parse(df)
|
||||
require.NoError(t, err)
|
||||
|
||||
if ast.StartLine != 5 || ast.EndLine != 31 {
|
||||
fmt.Fprintf(os.Stderr, "Wrong root line information: expected(%d-%d), actual(%d-%d)\n", 5, 31, ast.StartLine, ast.EndLine)
|
||||
ast := result.AST
|
||||
if ast.StartLine != 5 || ast.endLine != 31 {
|
||||
fmt.Fprintf(os.Stderr, "Wrong root line information: expected(%d-%d), actual(%d-%d)\n", 5, 31, ast.StartLine, ast.endLine)
|
||||
t.Fatal("Root line information doesn't match result.")
|
||||
}
|
||||
if len(ast.Children) != 3 {
|
||||
fmt.Fprintf(os.Stderr, "Wrong number of child: expected(%d), actual(%d)\n", 3, len(ast.Children))
|
||||
t.Fatalf("Root line information doesn't match result for %s", testFileLineInfo)
|
||||
}
|
||||
assert.Len(t, ast.Children, 3)
|
||||
expected := [][]int{
|
||||
{5, 5},
|
||||
{11, 12},
|
||||
{17, 31},
|
||||
}
|
||||
for i, child := range ast.Children {
|
||||
if child.StartLine != expected[i][0] || child.EndLine != expected[i][1] {
|
||||
if child.StartLine != expected[i][0] || child.endLine != expected[i][1] {
|
||||
t.Logf("Wrong line information for child %d: expected(%d-%d), actual(%d-%d)\n",
|
||||
i, expected[i][0], expected[i][1], child.StartLine, child.EndLine)
|
||||
i, expected[i][0], expected[i][1], child.StartLine, child.endLine)
|
||||
t.Fatal("Root line information doesn't match result.")
|
||||
}
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user