mirror of
https://github.com/moby/moby.git
synced 2026-01-17 22:58:08 +00:00
Compare commits
1223 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
010d74ec2f | ||
|
|
bb76985d39 | ||
|
|
28ec47c441 | ||
|
|
d003cfea25 | ||
|
|
f3103e5c91 | ||
|
|
ef7e000a13 | ||
|
|
811341423b | ||
|
|
b3addb5fb8 | ||
|
|
00ee6d1925 | ||
|
|
6f8a79c23c | ||
|
|
cf8063d152 | ||
|
|
3e10fe1a15 | ||
|
|
45ecdf9c8e | ||
|
|
b942f24ba8 | ||
|
|
3779291e9b | ||
|
|
fa14a1b983 | ||
|
|
f9e14cc838 | ||
|
|
f2ea539467 | ||
|
|
7c4e5fbd46 | ||
|
|
97ef8a067c | ||
|
|
4b9e475a3d | ||
|
|
1d0aeae339 | ||
|
|
859856b3e4 | ||
|
|
b8b18a2b42 | ||
|
|
561d1db074 | ||
|
|
1f9abfe841 | ||
|
|
2b93f18223 | ||
|
|
8f3b8f3835 | ||
|
|
fca83b4cfb | ||
|
|
444a087ac2 | ||
|
|
e1c861cf33 | ||
|
|
6fe3da9924 | ||
|
|
f43f3fa218 | ||
|
|
1e551c7cc5 | ||
|
|
2c395ce8fb | ||
|
|
7799ae27ca | ||
|
|
bb754fd34d | ||
|
|
a0298c0bd0 | ||
|
|
a86a82cb7e | ||
|
|
36ab1836f9 | ||
|
|
5e8912e0e8 | ||
|
|
4e414f6205 | ||
|
|
4f31141e13 | ||
|
|
ee6823d797 | ||
|
|
1363dfdd1d | ||
|
|
b308e33106 | ||
|
|
e5b09523dc | ||
|
|
d44abae873 | ||
|
|
323c4b5211 | ||
|
|
5b4a0cac4e | ||
|
|
73294b6d56 | ||
|
|
d6ca05f7cb | ||
|
|
dfc2dc4d35 | ||
|
|
ea762c1a51 | ||
|
|
599009191a | ||
|
|
af50b2f17c | ||
|
|
2a1181f404 | ||
|
|
f7afbf34fe | ||
|
|
3069bf9460 | ||
|
|
32b9a429c5 | ||
|
|
76910d16cc | ||
|
|
42c7dc448f | ||
|
|
8502ad4ba7 | ||
|
|
58ec7855bc | ||
|
|
949fde88df | ||
|
|
5a9f45cb7a | ||
|
|
8f4a54734f | ||
|
|
9359d79c4f | ||
|
|
69db6ea867 | ||
|
|
3b89187d03 | ||
|
|
82a47b0e82 | ||
|
|
e0f07bc186 | ||
|
|
194eb246ef | ||
|
|
f560b87a86 | ||
|
|
c561212b83 | ||
|
|
81e596e272 | ||
|
|
acfdfa81be | ||
|
|
589515c717 | ||
|
|
523f726716 | ||
|
|
7fd6dcc831 | ||
|
|
848f290012 | ||
|
|
add97f7eb0 | ||
|
|
9dd7ae4074 | ||
|
|
5f55784224 | ||
|
|
f3816ee024 | ||
|
|
0b3e153588 | ||
|
|
2226989410 | ||
|
|
c23b15b9d8 | ||
|
|
055b32e3f4 | ||
|
|
907d9ce13c | ||
|
|
74d45789dd | ||
|
|
40522c0380 | ||
|
|
d5bb0ff80a | ||
|
|
ad80da3389 | ||
|
|
1f80c2a652 | ||
|
|
1bc3f6b7b5 | ||
|
|
643621133f | ||
|
|
fd240413ff | ||
|
|
392b1e99b2 | ||
|
|
0dfebf2d93 | ||
|
|
40aaebe56a | ||
|
|
a1dba16fe8 | ||
|
|
e31f1f1eba | ||
|
|
7e720d0a77 | ||
|
|
237868e9c3 | ||
|
|
fc197188d7 | ||
|
|
d59080d119 | ||
|
|
c6dcee329d | ||
|
|
484a75f354 | ||
|
|
434cf6c8ca | ||
|
|
83d631b6a4 | ||
|
|
8b82b0dfe7 | ||
|
|
e93b7b4647 | ||
|
|
06a818616b | ||
|
|
f50b8b08b5 | ||
|
|
cda146547e | ||
|
|
a17fd7b294 | ||
|
|
22162687df | ||
|
|
d256f3049b | ||
|
|
a1a4a99d7e | ||
|
|
4986958e7e | ||
|
|
cd735496da | ||
|
|
894d4a23fb | ||
|
|
fc9f4d8bad | ||
|
|
1d4b7d8fa1 | ||
|
|
360078d761 | ||
|
|
808f2d39bd | ||
|
|
d1ca12e81b | ||
|
|
a042c9fb1b | ||
|
|
721bb410f6 | ||
|
|
029625981d | ||
|
|
0fccf0f686 | ||
|
|
efaf2cac5c | ||
|
|
cb1fe939a8 | ||
|
|
c654aea4f2 | ||
|
|
d2d8a4a6c5 | ||
|
|
4100e9b7df | ||
|
|
5875953d9b | ||
|
|
3264c1c5eb | ||
|
|
f4ce106e02 | ||
|
|
7ec1236cee | ||
|
|
2b4bb67ce0 | ||
|
|
6155f07561 | ||
|
|
e6e35e5984 | ||
|
|
0d207abf8e | ||
|
|
a009d4ae8d | ||
|
|
b75f385abd | ||
|
|
7ce7516c12 | ||
|
|
f6b91262a7 | ||
|
|
d16d748132 | ||
|
|
3fc9de3d03 | ||
|
|
652c2c2a80 | ||
|
|
8e7db0432e | ||
|
|
e1a15b25dc | ||
|
|
b1a3a55802 | ||
|
|
614bc5c1e1 | ||
|
|
3fe4d5477a | ||
|
|
cda24e345c | ||
|
|
88037b2877 | ||
|
|
6cdd1aa350 | ||
|
|
ea8a3438f7 | ||
|
|
954158ce52 | ||
|
|
bf17383e35 | ||
|
|
83d81758b0 | ||
|
|
e3b878ce98 | ||
|
|
1e5f9334e0 | ||
|
|
3edbf416bf | ||
|
|
c2364b978d | ||
|
|
158e3d60ec | ||
|
|
e4e579b40d | ||
|
|
071528e103 | ||
|
|
a2fcd3d8f0 | ||
|
|
7d2e851d8e | ||
|
|
85f9b778f5 | ||
|
|
369cde4ad7 | ||
|
|
3ffc52bcf5 | ||
|
|
8dcca2125a | ||
|
|
cdd14b1a31 | ||
|
|
37ed178611 | ||
|
|
c995c9bb91 | ||
|
|
aa619de748 | ||
|
|
6fde28c293 | ||
|
|
f4358fc647 | ||
|
|
57e19b1475 | ||
|
|
8051b6c1a1 | ||
|
|
566ff54d0d | ||
|
|
f9359f59a8 | ||
|
|
e4561438f1 | ||
|
|
b8cd2bc94d | ||
|
|
f7ba1c34bb | ||
|
|
df87919165 | ||
|
|
733bf5d3dd | ||
|
|
efde305c05 | ||
|
|
636dfc82b0 | ||
|
|
93abcc3a3b | ||
|
|
c3ec696284 | ||
|
|
fdd81b423b | ||
|
|
cd89fe5c4f | ||
|
|
1636ed9826 | ||
|
|
8072d3a4e0 | ||
|
|
d215724ad6 | ||
|
|
0e6f0c4e02 | ||
|
|
629cc2fce4 | ||
|
|
8c52140059 | ||
|
|
f21bd80e90 | ||
|
|
4bdd4599f0 | ||
|
|
ed93dab9a8 | ||
|
|
62a81370ff | ||
|
|
e74c65c3db | ||
|
|
248eadd341 | ||
|
|
e829d5b6d2 | ||
|
|
35d8ac94f3 | ||
|
|
94821a3353 | ||
|
|
d14c162fd6 | ||
|
|
14d1c5a2c3 | ||
|
|
329d154209 | ||
|
|
7bc96aec7b | ||
|
|
a6fdc5d208 | ||
|
|
681b40c801 | ||
|
|
536da93380 | ||
|
|
45d7dcfea2 | ||
|
|
210fa0871c | ||
|
|
f768c6adb7 | ||
|
|
fde909ffb8 | ||
|
|
553b4dae45 | ||
|
|
929662a4d5 | ||
|
|
fbac812540 | ||
|
|
e481c82fa9 | ||
|
|
73a1ef7c22 | ||
|
|
c91c365f88 | ||
|
|
b8a4f570fb | ||
|
|
70c7220a99 | ||
|
|
0f45e3c6e0 | ||
|
|
be0beb897a | ||
|
|
8fa4c4b062 | ||
|
|
c06ab5f9c2 | ||
|
|
3ec39ad01a | ||
|
|
1940015824 | ||
|
|
1acefac97e | ||
|
|
f630fbc7cf | ||
|
|
e61f327ec9 | ||
|
|
c4444ce48f | ||
|
|
7ba0f1f421 | ||
|
|
30454bb85c | ||
|
|
2deb0c3365 | ||
|
|
efc0610c0e | ||
|
|
391676b598 | ||
|
|
5204feeaa9 | ||
|
|
81d112cb7f | ||
|
|
25be0b1e98 | ||
|
|
c56b045270 | ||
|
|
d9a1cc7e2b | ||
|
|
30b4a0f76a | ||
|
|
7d95145b76 | ||
|
|
379a7fab07 | ||
|
|
36e060299f | ||
|
|
a96a26c62f | ||
|
|
c3705e83e7 | ||
|
|
5e9b4a23e6 | ||
|
|
a1c5e276f4 | ||
|
|
eddda577a4 | ||
|
|
2ed1001c57 | ||
|
|
f02d766f9a | ||
|
|
2035af44aa | ||
|
|
746ae155fb | ||
|
|
a26801c73f | ||
|
|
670b326c1b | ||
|
|
15a6854119 | ||
|
|
3f9416b58d | ||
|
|
7afd7a82bd | ||
|
|
124da338fd | ||
|
|
69a31c3386 | ||
|
|
20605eb310 | ||
|
|
945a1f06f9 | ||
|
|
64136071c6 | ||
|
|
28b162eeb4 | ||
|
|
e960152a1e | ||
|
|
fe956ad449 | ||
|
|
47375ddf54 | ||
|
|
f0d6a91a1b | ||
|
|
62213ee314 | ||
|
|
fa48f17493 | ||
|
|
41d972baf1 | ||
|
|
b3ad330782 | ||
|
|
6721525068 | ||
|
|
5cfcb05486 | ||
|
|
78c22c24b3 | ||
|
|
4faba4fae7 | ||
|
|
e1efd4cb8c | ||
|
|
606cacdca0 | ||
|
|
d526038503 | ||
|
|
58daccab26 | ||
|
|
12fb508262 | ||
|
|
0a3eedd4c9 | ||
|
|
a6928e70ac | ||
|
|
20197385b2 | ||
|
|
85b9338205 | ||
|
|
51e2c1794b | ||
|
|
20899cdb34 | ||
|
|
f5ab2516d8 | ||
|
|
d5f5ecb658 | ||
|
|
4b5ceb0f24 | ||
|
|
906b481148 | ||
|
|
930ec9f52c | ||
|
|
aaa1c48d24 | ||
|
|
d7123a597f | ||
|
|
9a9ecda7c8 | ||
|
|
071338172c | ||
|
|
4975c1b549 | ||
|
|
73e8a39ff2 | ||
|
|
847cf5b599 | ||
|
|
bf91636558 | ||
|
|
1e85aabf71 | ||
|
|
4fe0a9b6a0 | ||
|
|
f63cdf0260 | ||
|
|
9fb1ba97b1 | ||
|
|
59dc2876a7 | ||
|
|
23ab0af2ff | ||
|
|
b8a16b3459 | ||
|
|
a530b8d981 | ||
|
|
89beb55c32 | ||
|
|
f9328ad9cc | ||
|
|
20759c3ef7 | ||
|
|
5d81776714 | ||
|
|
0ef1ff91cb | ||
|
|
a68d7f3d70 | ||
|
|
a8af12f80a | ||
|
|
10cd902f90 | ||
|
|
818c249bae | ||
|
|
5a89c6f6df | ||
|
|
2e6dbe87ad | ||
|
|
e877294321 | ||
|
|
ecc51cd465 | ||
|
|
f7c7f7978c | ||
|
|
8224e13bd2 | ||
|
|
912bf8ff92 | ||
|
|
e43ff2f6f2 | ||
|
|
b8f1c73705 | ||
|
|
1572989201 | ||
|
|
bd02d6e662 | ||
|
|
2d1f61ef0e | ||
|
|
54df95f26c | ||
|
|
5b33ae5971 | ||
|
|
0db1c60542 | ||
|
|
f216448c82 | ||
|
|
f26a9d456c | ||
|
|
bf5b949ffc | ||
|
|
621523a041 | ||
|
|
8fd9633a6b | ||
|
|
1124261158 | ||
|
|
b722f809e7 | ||
|
|
f396c42cad | ||
|
|
8874f2aef9 | ||
|
|
e8ec3dba7b | ||
|
|
4eda2a54de | ||
|
|
d3292078dc | ||
|
|
6ba456ff87 | ||
|
|
44984602c7 | ||
|
|
d534e1c3a1 | ||
|
|
d56d8ab96e | ||
|
|
6cf8ec606e | ||
|
|
db3019d50b | ||
|
|
42c38bf34d | ||
|
|
11b3fbb3bd | ||
|
|
036f41fde3 | ||
|
|
6e9c1590c6 | ||
|
|
39cc8a32b1 | ||
|
|
31961ccd94 | ||
|
|
eec48f93a3 | ||
|
|
dbe1915fee | ||
|
|
bef8de9319 | ||
|
|
81fc368a6d | ||
|
|
bd292759f0 | ||
|
|
5fd3c8204d | ||
|
|
af21908493 | ||
|
|
7edd1f6bad | ||
|
|
d878632b25 | ||
|
|
be13735001 | ||
|
|
fb9ddc5de5 | ||
|
|
27646c4459 | ||
|
|
b98d51dddb | ||
|
|
0025e9bd71 | ||
|
|
4c6e528f13 | ||
|
|
95f061b408 | ||
|
|
761184df52 | ||
|
|
78b85220be | ||
|
|
8814c11b14 | ||
|
|
09d2c2351c | ||
|
|
c618a906a4 | ||
|
|
9c1e9a5157 | ||
|
|
0b0b0ca0f9 | ||
|
|
ac1093b83a | ||
|
|
c9cedb4c04 | ||
|
|
a74be95b23 | ||
|
|
8291f00a0e | ||
|
|
b7bc80cba9 | ||
|
|
864729b96f | ||
|
|
a67571668e | ||
|
|
776bb43c9e | ||
|
|
75bd5bea70 | ||
|
|
e2ee5c71fc | ||
|
|
f0879a1e14 | ||
|
|
ca405786f4 | ||
|
|
cdc07f7d5c | ||
|
|
f379f667a2 | ||
|
|
45cea94a82 | ||
|
|
8ec96c9605 | ||
|
|
c094807a1b | ||
|
|
bac3a8e6f5 | ||
|
|
dcfc4ada4d | ||
|
|
416b16e1e2 | ||
|
|
f832b76bdf | ||
|
|
d502f0cfac | ||
|
|
16fad96007 | ||
|
|
de35b346d1 | ||
|
|
869a11bc93 | ||
|
|
f806818154 | ||
|
|
a7a171b6c2 | ||
|
|
a80c059bae | ||
|
|
edace08327 | ||
|
|
9656cdf0c2 | ||
|
|
50f3a696bd | ||
|
|
f4676f0ffa | ||
|
|
3c1f3be032 | ||
|
|
aeba4e6482 | ||
|
|
3569d080af | ||
|
|
427bdb60e7 | ||
|
|
9b1930c5a0 | ||
|
|
2546a2c645 | ||
|
|
fdb3de7b11 | ||
|
|
88df052197 | ||
|
|
04ffa53ba8 | ||
|
|
07f7643bbc | ||
|
|
228091c79e | ||
|
|
6fa1463614 | ||
|
|
f28445254f | ||
|
|
0969be5ddb | ||
|
|
95c0ade04b | ||
|
|
e01732f857 | ||
|
|
9b644ff246 | ||
|
|
2c646b2d46 | ||
|
|
becb13dc26 | ||
|
|
05f416d869 | ||
|
|
7fd64e0196 | ||
|
|
13da09d22b | ||
|
|
6720bfb243 | ||
|
|
d75fc6e529 | ||
|
|
4a148919c3 | ||
|
|
c7d75588f4 | ||
|
|
dfade9e2d8 | ||
|
|
b655406faa | ||
|
|
a015f38f4a | ||
|
|
02ef8ec3ca | ||
|
|
25d3db048e | ||
|
|
a69bb25820 | ||
|
|
5f5949f6a6 | ||
|
|
58b75f8f29 | ||
|
|
aea7418d8a | ||
|
|
f9147effac | ||
|
|
0e2b0f284c | ||
|
|
80dfa23da8 | ||
|
|
bc9b239d74 | ||
|
|
4bea68dfa6 | ||
|
|
ea0ed9a915 | ||
|
|
e39d35deda | ||
|
|
4acd579226 | ||
|
|
c764fb0c29 | ||
|
|
de090116dd | ||
|
|
7a87023587 | ||
|
|
584164177e | ||
|
|
35e80868ad | ||
|
|
2acea6090f | ||
|
|
81b25fde79 | ||
|
|
0189a99471 | ||
|
|
7bf3a07371 | ||
|
|
9320f4e2d1 | ||
|
|
d1a4f83e5e | ||
|
|
fb810b54ff | ||
|
|
eac95671f5 | ||
|
|
06379d8bd9 | ||
|
|
a96bf74397 | ||
|
|
cc0466bb68 | ||
|
|
0a7e0f0819 | ||
|
|
ef157cee30 | ||
|
|
7ab4f37d60 | ||
|
|
5d022f0445 | ||
|
|
697707e4af | ||
|
|
e3c3f3c324 | ||
|
|
797bac2344 | ||
|
|
7a94cdf8ed | ||
|
|
61fbf3d8e2 | ||
|
|
f49eb29497 | ||
|
|
1525f71b5a | ||
|
|
4188cd6bcd | ||
|
|
e304e8936b | ||
|
|
03f8a3bbae | ||
|
|
066b961a0c | ||
|
|
f95f2789f2 | ||
|
|
a8e99d9235 | ||
|
|
5a17c208cd | ||
|
|
e9bf971e69 | ||
|
|
909da5d524 | ||
|
|
04c32495f6 | ||
|
|
af020e2d67 | ||
|
|
94d46a8d3a | ||
|
|
ec9f2f1d0f | ||
|
|
0bfa22124e | ||
|
|
79031c4f8c | ||
|
|
5f55c1aee1 | ||
|
|
b4e21ad1da | ||
|
|
97088ebef7 | ||
|
|
c35cebaa06 | ||
|
|
41b5e87873 | ||
|
|
9dfc7bc36f | ||
|
|
afbea3f13f | ||
|
|
5dab47a475 | ||
|
|
3ba279a370 | ||
|
|
944c1f10ea | ||
|
|
0d1506adb3 | ||
|
|
3a8222dfa5 | ||
|
|
00030ced4b | ||
|
|
f95621fd05 | ||
|
|
4328926acc | ||
|
|
8f382aaecd | ||
|
|
3df5d120de | ||
|
|
1b5517b68f | ||
|
|
4bc100b494 | ||
|
|
be282b57d5 | ||
|
|
12180948be | ||
|
|
6cf2c14c00 | ||
|
|
dc9f8bf072 | ||
|
|
0862756beb | ||
|
|
de60bee3d4 | ||
|
|
51b9fe7301 | ||
|
|
61aad8fc10 | ||
|
|
6ddea783ef | ||
|
|
58c33360b0 | ||
|
|
40fe9f581b | ||
|
|
258d707548 | ||
|
|
99e4f56353 | ||
|
|
0132547a38 | ||
|
|
84f78d9cad | ||
|
|
f8176de191 | ||
|
|
f50fe14e13 | ||
|
|
45567f2209 | ||
|
|
2fd76fc0b8 | ||
|
|
b699aee91f | ||
|
|
64439505c7 | ||
|
|
664174c7aa | ||
|
|
7428e6a5f0 | ||
|
|
d21563ced3 | ||
|
|
6a55169e2e | ||
|
|
5976c26c1e | ||
|
|
b59dea6767 | ||
|
|
9be5db8704 | ||
|
|
3f92163989 | ||
|
|
3b5010e90b | ||
|
|
ec4863ae55 | ||
|
|
a02bc8a5db | ||
|
|
045989e3d8 | ||
|
|
bbf9135adc | ||
|
|
1cb1e08644 | ||
|
|
682cf48d1d | ||
|
|
48e1766527 | ||
|
|
3ddbb36a84 | ||
|
|
62263967b9 | ||
|
|
3ed0ff85f5 | ||
|
|
c4c90e9cec | ||
|
|
650d4cc644 | ||
|
|
d9b742419c | ||
|
|
c81bb20f5b | ||
|
|
6c70d23e0d | ||
|
|
c9432cf51a | ||
|
|
829b118dd8 | ||
|
|
3ac76cfeff | ||
|
|
5a9cf7e754 | ||
|
|
e4aba11e80 | ||
|
|
9d62dc1a08 | ||
|
|
0017c68f4a | ||
|
|
3cd9b2aadf | ||
|
|
8afb0abbee | ||
|
|
98ed1dc433 | ||
|
|
7aec93c370 | ||
|
|
62f0e5aef9 | ||
|
|
59a85798fa | ||
|
|
67c03552f6 | ||
|
|
4fdc117ad2 | ||
|
|
5cd09dc115 | ||
|
|
6ea3b9651b | ||
|
|
de4429f70d | ||
|
|
8cc524996a | ||
|
|
d9fbdd7b3f | ||
|
|
4ad3dfb05f | ||
|
|
1d503be466 | ||
|
|
9837ad8e9b | ||
|
|
70b586702c | ||
|
|
4b35c1b6a6 | ||
|
|
fe571dd293 | ||
|
|
e1414a4c39 | ||
|
|
d4ebba703c | ||
|
|
e4cb83c50e | ||
|
|
d7dd19d22e | ||
|
|
5f2313aad3 | ||
|
|
d6cdbca6c1 | ||
|
|
751250015b | ||
|
|
b04c6466cd | ||
|
|
b9ad0c9f74 | ||
|
|
dbb47f63ab | ||
|
|
c4548506c5 | ||
|
|
26cf8b9aff | ||
|
|
7f1a91121c | ||
|
|
c30e2dc28c | ||
|
|
d9cdd45d2e | ||
|
|
5c5f670901 | ||
|
|
fea432bdf5 | ||
|
|
39aac21db4 | ||
|
|
56ab9cb0d5 | ||
|
|
d8ee08ba7b | ||
|
|
e8437e8fcf | ||
|
|
4e030c78d2 | ||
|
|
62b1faf28c | ||
|
|
2dac7b5209 | ||
|
|
1890301e67 | ||
|
|
a6c9a332d0 | ||
|
|
65db62619c | ||
|
|
35d54c6655 | ||
|
|
3553a803e3 | ||
|
|
a4f8a2494b | ||
|
|
fe72f15e4a | ||
|
|
a37b155384 | ||
|
|
82cecb34b5 | ||
|
|
e1278e9ec2 | ||
|
|
db7c55ba7f | ||
|
|
0d3f4017cf | ||
|
|
ab35aef6b5 | ||
|
|
bb284ce59d | ||
|
|
34353e782e | ||
|
|
ca98434a45 | ||
|
|
86c00be180 | ||
|
|
2ec1146679 | ||
|
|
2e6a958612 | ||
|
|
697be6aaa0 | ||
|
|
c13821ad0b | ||
|
|
aa68656cd3 | ||
|
|
63d6cbe3e4 | ||
|
|
67e9e0e11b | ||
|
|
fbebe20bc6 | ||
|
|
e535f544c7 | ||
|
|
fe727e2a87 | ||
|
|
f72e604872 | ||
|
|
926f7b579e | ||
|
|
ff5747728c | ||
|
|
6c56993639 | ||
|
|
ba5268d382 | ||
|
|
8291d509c2 | ||
|
|
139644895e | ||
|
|
cca9e51f5d | ||
|
|
668d22be54 | ||
|
|
77c94175bd | ||
|
|
f94ea7769f | ||
|
|
39bec226c0 | ||
|
|
677e2ad92e | ||
|
|
d3cc558d14 | ||
|
|
ad43d88af5 | ||
|
|
1fe1b216ad | ||
|
|
3faf450f11 | ||
|
|
b36dd3f9cc | ||
|
|
a0525d90ab | ||
|
|
ebc36b879d | ||
|
|
14425c1690 | ||
|
|
aae23255a0 | ||
|
|
2bbc90e92f | ||
|
|
0c758e9312 | ||
|
|
597e0e69b4 | ||
|
|
261bd0d187 | ||
|
|
3d0486979e | ||
|
|
377817db1b | ||
|
|
a990b3aeb9 | ||
|
|
9f46779d42 | ||
|
|
533067bba4 | ||
|
|
438607ecc3 | ||
|
|
d47507791e | ||
|
|
bdfe8ed403 | ||
|
|
f1e44e0b0c | ||
|
|
c226ab6d9e | ||
|
|
74ea136a49 | ||
|
|
24c03b2d93 | ||
|
|
a58fef9f13 | ||
|
|
597ca192e7 | ||
|
|
8b2a7e35c3 | ||
|
|
8a5d927a53 | ||
|
|
1214b8897b | ||
|
|
eb528b959e | ||
|
|
75e9cff98c | ||
|
|
74c8f7af75 | ||
|
|
2c27da8818 | ||
|
|
39f21af687 | ||
|
|
d1a631cedb | ||
|
|
7f9cdaa342 | ||
|
|
e4ae44b844 | ||
|
|
89454851d1 | ||
|
|
f75dc36204 | ||
|
|
5fe5055bd9 | ||
|
|
4e826e99b2 | ||
|
|
788feab3a7 | ||
|
|
682a188ead | ||
|
|
45b1e8c236 | ||
|
|
ae474e05f5 | ||
|
|
0ff9bc1be3 | ||
|
|
b3e8ba1908 | ||
|
|
7b95d41092 | ||
|
|
1cb7b9adc6 | ||
|
|
d370a889c3 | ||
|
|
6d34c50e89 | ||
|
|
6344e6f258 | ||
|
|
462e30dcbd | ||
|
|
c7661f40b6 | ||
|
|
c707c587c1 | ||
|
|
5e3f6e7023 | ||
|
|
1beb5005d1 | ||
|
|
1ba11384bf | ||
|
|
8398abf0dc | ||
|
|
ab3a83c617 | ||
|
|
8b99e4ed37 | ||
|
|
17efa9dc2d | ||
|
|
76c71260f1 | ||
|
|
8267437294 | ||
|
|
9c15322894 | ||
|
|
0d078b6581 | ||
|
|
06d5e25224 | ||
|
|
009024ad64 | ||
|
|
14d9f04e89 | ||
|
|
18d08d0d42 | ||
|
|
0bb2c0b1d0 | ||
|
|
1af6ffb9bb | ||
|
|
233ad38802 | ||
|
|
db28e839e0 | ||
|
|
de30ffb2c3 | ||
|
|
5c5ee194cb | ||
|
|
b6dd67c707 | ||
|
|
740958dda7 | ||
|
|
c38386d876 | ||
|
|
4267fb66ef | ||
|
|
a74b512540 | ||
|
|
60809a4f72 | ||
|
|
65fcc81b42 | ||
|
|
06cf8fee1b | ||
|
|
c92dab0eb4 | ||
|
|
6ad5b2bcf4 | ||
|
|
77f1362c64 | ||
|
|
4049359bee | ||
|
|
7daefc9d3f | ||
|
|
d4c32b9015 | ||
|
|
8bd6127ab3 | ||
|
|
2302293244 | ||
|
|
fd7ff6411d | ||
|
|
59f76bf1c7 | ||
|
|
02cb7f45fa | ||
|
|
a937313747 | ||
|
|
fb3d60f27a | ||
|
|
5ff74e268d | ||
|
|
09b7b55e2c | ||
|
|
110c4f2043 | ||
|
|
0d1b5d7676 | ||
|
|
5242a49f3f | ||
|
|
2586c042ae | ||
|
|
688e86c625 | ||
|
|
750d2d8d07 | ||
|
|
19df6c32c0 | ||
|
|
1d903da6fd | ||
|
|
aaefb8c07c | ||
|
|
b3959e69b5 | ||
|
|
43c7df946d | ||
|
|
6acdf68ee1 | ||
|
|
487b3d8a8c | ||
|
|
33f70f8978 | ||
|
|
809239c0af | ||
|
|
937f52aef9 | ||
|
|
aa48acc5ec | ||
|
|
ac70e296db | ||
|
|
e2c3860ec3 | ||
|
|
2d715bf3c0 | ||
|
|
d9e54e28e7 | ||
|
|
78d2e2dc37 | ||
|
|
abfdaca3f8 | ||
|
|
3a2fbcfdec | ||
|
|
ba2b36e192 | ||
|
|
d47d49a2f9 | ||
|
|
8b0b10b6f9 | ||
|
|
399c71de83 | ||
|
|
d8f4b733f2 | ||
|
|
b4eeb6be61 | ||
|
|
41704d8933 | ||
|
|
64dd4afed6 | ||
|
|
5da1ed3291 | ||
|
|
ad23745456 | ||
|
|
cee0a292d0 | ||
|
|
b702edadb7 | ||
|
|
f16c45f8b0 | ||
|
|
07180f3aa7 | ||
|
|
a606474825 | ||
|
|
5d6ef3177b | ||
|
|
0a89db04fe | ||
|
|
1cce9f25b2 | ||
|
|
f1b3e278b9 | ||
|
|
e288e7763e | ||
|
|
9696ec509a | ||
|
|
96b5be9dd9 | ||
|
|
ba6dd1d8d6 | ||
|
|
c67f9b671d | ||
|
|
1c8ae47770 | ||
|
|
d55998be81 | ||
|
|
e69bbd239e | ||
|
|
a26f9183bd | ||
|
|
944a48ec5a | ||
|
|
79e2b33ede | ||
|
|
076c0eab70 | ||
|
|
1f9223a7c2 | ||
|
|
476559458d | ||
|
|
d4c8fb9ee2 | ||
|
|
ae8c589d35 | ||
|
|
6130f2531e | ||
|
|
ef14aaf627 | ||
|
|
1e7c04fcfe | ||
|
|
37e0083169 | ||
|
|
8b0cd60019 | ||
|
|
0198f8a879 | ||
|
|
b3f5973f41 | ||
|
|
3314e005f3 | ||
|
|
a93e40a158 | ||
|
|
58f8503b73 | ||
|
|
cb48ecc9dc | ||
|
|
0cecc2a78c | ||
|
|
437bdeee59 | ||
|
|
806abe90ba | ||
|
|
bc82940a57 | ||
|
|
f1e6dce047 | ||
|
|
efd0e13ca7 | ||
|
|
da824b4a5a | ||
|
|
1ab6b8bf49 | ||
|
|
253214f07d | ||
|
|
a2c9d2da93 | ||
|
|
df258f5861 | ||
|
|
60f728b170 | ||
|
|
2b7c63b1b5 | ||
|
|
a9230af52e | ||
|
|
2f0d18ac4a | ||
|
|
6469422465 | ||
|
|
5306053e21 | ||
|
|
e2390318bb | ||
|
|
4e0c76b321 | ||
|
|
da514223d1 | ||
|
|
023ff36704 | ||
|
|
8fdbf46afb | ||
|
|
d233894c25 | ||
|
|
8a756f417e | ||
|
|
a39bd65662 | ||
|
|
5690139785 | ||
|
|
92f94f06ae | ||
|
|
2382a0f920 | ||
|
|
579a5c843b | ||
|
|
b3bee7e0c4 | ||
|
|
6ebb236aa1 | ||
|
|
1b28cdc7f9 | ||
|
|
60cb5f1a34 | ||
|
|
cfdc284abe | ||
|
|
7192be47c5 | ||
|
|
24586d7af5 | ||
|
|
880a0e9c36 | ||
|
|
cd022376b8 | ||
|
|
d4ef551d65 | ||
|
|
76f54f3a28 | ||
|
|
43b709ab36 | ||
|
|
43899a77bf | ||
|
|
f6629bbbd5 | ||
|
|
91330243b5 | ||
|
|
3e0a5ac48b | ||
|
|
0d603df6dc | ||
|
|
12e993549d | ||
|
|
28d4cbbc59 | ||
|
|
5a4113140e | ||
|
|
85b5062502 | ||
|
|
4c2624a277 | ||
|
|
f4b3b7c055 | ||
|
|
9af1b07086 | ||
|
|
0ed762f2d2 | ||
|
|
242fd4b3ef | ||
|
|
51a972f38d | ||
|
|
aea6001baf | ||
|
|
6dbeed89c0 | ||
|
|
5d76681c3d | ||
|
|
9415c2b1f0 | ||
|
|
a0224e61b4 | ||
|
|
af753cbad8 | ||
|
|
80aecc7014 | ||
|
|
3edb4af663 | ||
|
|
00401a30b7 | ||
|
|
5ee8e41e43 | ||
|
|
29f07f8544 | ||
|
|
f88b760809 | ||
|
|
94e854823f | ||
|
|
e42b574579 | ||
|
|
8ed4307f50 | ||
|
|
427649eee1 | ||
|
|
a518b84751 | ||
|
|
cbd1281ec9 | ||
|
|
035c144242 | ||
|
|
fb245f7903 | ||
|
|
dc5b7b32c3 | ||
|
|
7cad77b1e2 | ||
|
|
2b23da1d2f | ||
|
|
bcf96d95bc | ||
|
|
10f23a94f6 | ||
|
|
243843c078 | ||
|
|
062810caed | ||
|
|
f7238f94e8 | ||
|
|
d69a6a20f0 | ||
|
|
4908d7f81d | ||
|
|
29fa1b6666 | ||
|
|
0c38f86e5e | ||
|
|
8a358e8833 | ||
|
|
eb06a7b8f8 | ||
|
|
7cf60da388 | ||
|
|
006e2a600c | ||
|
|
eace2dbe1d | ||
|
|
099c53b28e | ||
|
|
7b2d59b91e | ||
|
|
529c30261e | ||
|
|
7267c4b746 | ||
|
|
26533eb2c4 | ||
|
|
bbc9fc7907 | ||
|
|
7682ec04cd | ||
|
|
590465b395 | ||
|
|
05d70cbcf4 | ||
|
|
539708aa8a | ||
|
|
3aaef96e36 | ||
|
|
921e2e9ae2 | ||
|
|
b50838c359 | ||
|
|
4bebca848e | ||
|
|
6b3dd02bb8 | ||
|
|
80e7319558 | ||
|
|
1d188c8737 | ||
|
|
a69d86e0b1 | ||
|
|
46ab7d1e8a | ||
|
|
e3597624dd | ||
|
|
a4f14528c2 | ||
|
|
a06edd77e5 | ||
|
|
08623dc216 | ||
|
|
50d80a8938 | ||
|
|
6f3c32eb18 | ||
|
|
0dd92d8f1c | ||
|
|
7301fbe035 | ||
|
|
4ec05b5dbf | ||
|
|
5d9723002b | ||
|
|
2c7f50a77d | ||
|
|
4d1a537433 | ||
|
|
ec6fe9f200 | ||
|
|
e87c3ea342 | ||
|
|
e82f8c1661 | ||
|
|
b6ef4bc952 | ||
|
|
b5795749d1 | ||
|
|
948bb29d27 | ||
|
|
1f35531f39 | ||
|
|
f30c660f6f | ||
|
|
ddb27268c9 | ||
|
|
1eb00e1d5b | ||
|
|
9514767587 | ||
|
|
f512049c8f | ||
|
|
20f690f176 | ||
|
|
6669c86fdf | ||
|
|
78ef0bd998 | ||
|
|
c1563de7a1 | ||
|
|
08a276986c | ||
|
|
ed18844613 | ||
|
|
52f31657cc | ||
|
|
6d0b3f350e | ||
|
|
51c93c0f33 | ||
|
|
8c21d2acd3 | ||
|
|
1e5c61041f | ||
|
|
99210c9c6e | ||
|
|
1764cf1990 | ||
|
|
9afe475edb | ||
|
|
043a576171 | ||
|
|
81674fbbdf | ||
|
|
0a9df6bc1a | ||
|
|
12ffead71a | ||
|
|
b8b509e1c8 | ||
|
|
01990b65a3 | ||
|
|
9ae4bcaaf8 | ||
|
|
f1127b9308 | ||
|
|
165d1bdbc0 | ||
|
|
4a8a7d4edd | ||
|
|
f2bab1557c | ||
|
|
484804abff | ||
|
|
a63ff8da46 | ||
|
|
77ae9789d3 | ||
|
|
4d90e91243 | ||
|
|
140da580d4 | ||
|
|
ed572b457d | ||
|
|
66bf395e4d | ||
|
|
5bad7d7229 | ||
|
|
752bfba2c5 | ||
|
|
d11f75b505 | ||
|
|
24dd838aee | ||
|
|
98c3693acf | ||
|
|
6c77f2c189 | ||
|
|
b77c5c5984 | ||
|
|
3cd4232e52 | ||
|
|
ff42748bc5 | ||
|
|
d23b9e8734 | ||
|
|
d2c2c2c116 | ||
|
|
d64df7c765 | ||
|
|
86dd2473c1 | ||
|
|
318dd33fb7 | ||
|
|
a5031d47d9 | ||
|
|
36c7a7ae94 | ||
|
|
699a1074fb | ||
|
|
1fe08e0046 | ||
|
|
bc1c5ddf2e | ||
|
|
e9ee860c91 | ||
|
|
c2175ae736 | ||
|
|
2812baf395 | ||
|
|
572b1fd9be | ||
|
|
4756ad248a | ||
|
|
dfb77274ce | ||
|
|
73545199a8 | ||
|
|
0ad35c6746 | ||
|
|
5e363072f5 | ||
|
|
cad913c57b | ||
|
|
5f58a1fbe4 | ||
|
|
07e09d57af | ||
|
|
9f85a967bb | ||
|
|
85a36b3b53 | ||
|
|
d3505d836a | ||
|
|
c47e93fcbe | ||
|
|
5cd7de5de8 | ||
|
|
d034aafac7 | ||
|
|
4bd6021806 | ||
|
|
ed03dbfe82 | ||
|
|
330062ef72 | ||
|
|
31b883b076 | ||
|
|
bdb3b2a88c | ||
|
|
a14496ce89 | ||
|
|
8abcc8e713 | ||
|
|
d733cdcebb | ||
|
|
0aee096fd7 | ||
|
|
e6a73e65a2 | ||
|
|
ad2fbd9e87 | ||
|
|
e5d7472a0d | ||
|
|
7d3c7e2b29 | ||
|
|
f1d07e2dbe | ||
|
|
ad968ef3ef | ||
|
|
c688e9b5a6 | ||
|
|
f3e6d34df2 | ||
|
|
ea04f3de72 | ||
|
|
153248b60f | ||
|
|
1711de4b09 | ||
|
|
11d695a297 | ||
|
|
acf58362cb | ||
|
|
3a246ac3d1 | ||
|
|
1da335f784 | ||
|
|
cd61fb2e6f | ||
|
|
fdba1aeed8 | ||
|
|
c9f3e54c31 | ||
|
|
3997b8a923 | ||
|
|
ec885d9180 | ||
|
|
54b0cd7cd1 | ||
|
|
66db2ac9d8 | ||
|
|
5ebaca7e55 | ||
|
|
432ff7e3c3 | ||
|
|
80bd64245f | ||
|
|
4431e9edb7 | ||
|
|
7093411a8d | ||
|
|
5778ed7db2 | ||
|
|
3455c1a098 | ||
|
|
5dd12ba20a | ||
|
|
5892c8e469 | ||
|
|
20e3e8c07d | ||
|
|
79c0c4470f | ||
|
|
4dedd9a9aa | ||
|
|
063ebbab68 | ||
|
|
ea92dc2e8c | ||
|
|
7f429e0ceb | ||
|
|
bb42801cdc | ||
|
|
8e4b3a3390 | ||
|
|
48070274ee | ||
|
|
c3f1bb3287 | ||
|
|
562e4f1e23 | ||
|
|
a263e07678 | ||
|
|
e7986da531 | ||
|
|
b440ec0136 | ||
|
|
c77697a45c | ||
|
|
1804fcba93 | ||
|
|
821a82ac6c | ||
|
|
f6913592a1 | ||
|
|
aaf1f73bcc | ||
|
|
9b65c7cf49 | ||
|
|
f7e374fb3a | ||
|
|
f29c500d8d | ||
|
|
b843998718 | ||
|
|
1a1be5a87c | ||
|
|
7b58e15b08 | ||
|
|
8b2f4aab23 | ||
|
|
06d0843a61 | ||
|
|
deb05a36e8 | ||
|
|
55189307d0 | ||
|
|
55e1782d66 | ||
|
|
152302e379 | ||
|
|
72a08a5458 | ||
|
|
aeb89ffbba | ||
|
|
0484b2c325 | ||
|
|
be6fef0254 | ||
|
|
071cc18b58 | ||
|
|
6ec5585501 | ||
|
|
6f57e8025a | ||
|
|
b0a9147fd5 | ||
|
|
a7e876e357 | ||
|
|
ecdbdfdaea | ||
|
|
2c71710b74 | ||
|
|
bbc72c85f7 | ||
|
|
1a082ed245 | ||
|
|
86421e8b5e | ||
|
|
91c69fd353 | ||
|
|
43a7d3d0e9 | ||
|
|
60f552cac3 | ||
|
|
ad402763e1 | ||
|
|
5d2ace3424 | ||
|
|
727e7fccca | ||
|
|
7d566b4f76 | ||
|
|
fdbc2695fe | ||
|
|
429587779a | ||
|
|
145024c6cc | ||
|
|
c138801073 | ||
|
|
0722786600 | ||
|
|
5f8e24f842 | ||
|
|
0d7ab8db03 | ||
|
|
ed741f7b27 | ||
|
|
9e64ebb295 | ||
|
|
d951911b23 | ||
|
|
7fb60caa5d | ||
|
|
76a2ab6e34 | ||
|
|
6094257b28 | ||
|
|
52294192b2 | ||
|
|
381ce94ef4 | ||
|
|
99393cf3cf | ||
|
|
30890c7763 | ||
|
|
b0626f403b | ||
|
|
fda6ff9c27 | ||
|
|
b86f67126c | ||
|
|
1c5dc26a7c | ||
|
|
8e7cbbff50 | ||
|
|
074f38d493 | ||
|
|
a9ec1dbc9b | ||
|
|
d2ba3e2005 | ||
|
|
8f7361279c | ||
|
|
ca2f7f955e | ||
|
|
1d36b8c7b7 | ||
|
|
e6216793d9 | ||
|
|
e368c8bb01 | ||
|
|
167601e858 | ||
|
|
b8dc7b5f1a | ||
|
|
7fb3bfed03 | ||
|
|
374a5e9913 | ||
|
|
459bac7127 | ||
|
|
514886c73d | ||
|
|
75e958bf48 | ||
|
|
ebfa24acb0 | ||
|
|
5e1d540209 | ||
|
|
c1e25d7273 | ||
|
|
d263aa6ca9 | ||
|
|
03320f0d1c | ||
|
|
6c7ae06435 | ||
|
|
395bce4c41 | ||
|
|
41399ac005 | ||
|
|
67788723c9 | ||
|
|
f99f39abaa | ||
|
|
009d0f9d81 | ||
|
|
ed65815613 | ||
|
|
cc28829429 | ||
|
|
062a2b32e9 | ||
|
|
cda8754013 | ||
|
|
5415804c9d | ||
|
|
adae684987 | ||
|
|
ad0a6a03e3 | ||
|
|
36603e68e3 | ||
|
|
99c7d129f4 | ||
|
|
02b5f1369c | ||
|
|
d478a4bb54 | ||
|
|
c199ed228b | ||
|
|
e40f5c7cb9 | ||
|
|
d80be57c15 | ||
|
|
20bac716b5 | ||
|
|
2566e2604c | ||
|
|
e1c418cac3 | ||
|
|
3343b3f8f8 | ||
|
|
c6e8813c97 | ||
|
|
251a7ed437 | ||
|
|
261b0b01df | ||
|
|
a7fd1fce5d | ||
|
|
6938a36c69 | ||
|
|
bc7fa7b957 | ||
|
|
d47c18c5fb | ||
|
|
0e686fa2f4 | ||
|
|
3f3f5f0bba | ||
|
|
19ba0b851b | ||
|
|
94fa3c7bb5 | ||
|
|
223280f319 | ||
|
|
8f23945f7f | ||
|
|
8e8ef7cb5b | ||
|
|
8f343ea65a | ||
|
|
b125f2334c | ||
|
|
a89a51128e | ||
|
|
fcd41fe51a | ||
|
|
53851474c0 | ||
|
|
f317a6b6fe | ||
|
|
87e248f524 | ||
|
|
ac194fc696 | ||
|
|
8637ba710e | ||
|
|
0f5ccf934e | ||
|
|
250bc3f615 | ||
|
|
2b1dc8a8a3 | ||
|
|
0b12702c0c | ||
|
|
739af0a17f |
3
.gitignore
vendored
3
.gitignore
vendored
@@ -1,3 +1,6 @@
|
||||
# Docker project generated files to ignore
|
||||
# if you want to ignore files created by your editor/tools,
|
||||
# please consider a global .gitignore https://help.github.com/articles/ignoring-files
|
||||
.vagrant*
|
||||
bin
|
||||
docker/docker
|
||||
|
||||
23
.travis.yml
Normal file
23
.travis.yml
Normal file
@@ -0,0 +1,23 @@
|
||||
# Note: right now we don't use go-specific features of travis.
|
||||
# Later we might automate "go test" etc. (or do it inside a docker container...?)
|
||||
|
||||
language: go
|
||||
|
||||
go: 1.2
|
||||
|
||||
# Disable the normal go build.
|
||||
install: true
|
||||
|
||||
before_script:
|
||||
- env | sort
|
||||
- sudo apt-get update -qq
|
||||
- sudo apt-get install -qq python-yaml
|
||||
- git remote add upstream git://github.com/dotcloud/docker.git
|
||||
- git fetch --append --no-tags upstream refs/heads/master:refs/remotes/upstream/master
|
||||
# sometimes we have upstream master already as origin/master (PRs), but other times we don't, so let's just make sure we have a completely unambiguous way to specify "upstream master" from here out
|
||||
|
||||
script:
|
||||
- hack/travis/dco.py
|
||||
- hack/travis/gofmt.py
|
||||
|
||||
# vim:set sw=2 ts=2:
|
||||
3
AUTHORS
3
AUTHORS
@@ -20,6 +20,7 @@ Antony Messerli <amesserl@rackspace.com>
|
||||
Asbjørn Enge <asbjorn@hanafjedle.net>
|
||||
Barry Allard <barry.allard@gmail.com>
|
||||
Ben Toews <mastahyeti@gmail.com>
|
||||
Ben Wiklund <ben@daisyowl.com>
|
||||
Benoit Chesneau <bchesneau@gmail.com>
|
||||
Bhiraj Butala <abhiraj.butala@gmail.com>
|
||||
Bouke Haarsma <bouke@webatoom.nl>
|
||||
@@ -68,6 +69,7 @@ Francisco Souza <f@souza.cc>
|
||||
Frederick F. Kautz IV <fkautz@alumni.cmu.edu>
|
||||
Gabriel Monroy <gabriel@opdemand.com>
|
||||
Gareth Rushgrove <gareth@morethanseven.net>
|
||||
Graydon Hoare <graydon@pobox.com>
|
||||
Greg Thornton <xdissent@me.com>
|
||||
Guillaume J. Charmes <guillaume.charmes@dotcloud.com>
|
||||
Gurjeet Singh <gurjeet@singh.im>
|
||||
@@ -113,6 +115,7 @@ Kyle Conroy <kyle.j.conroy@gmail.com>
|
||||
Laurie Voss <github@seldo.com>
|
||||
Louis Opter <kalessin@kalessin.fr>
|
||||
Manuel Meurer <manuel@krautcomputing.com>
|
||||
Manuel Woelker <docker@manuel.woelker.org>
|
||||
Marco Hennings <marco.hennings@freiheit.com>
|
||||
Marcus Farkas <toothlessgear@finitebox.com>
|
||||
Marcus Ramberg <marcus@nordaaker.com>
|
||||
|
||||
223
CHANGELOG.md
223
CHANGELOG.md
@@ -1,10 +1,211 @@
|
||||
# Changelog
|
||||
|
||||
## 0.7.4 (2014-01-07)
|
||||
|
||||
#### Builder
|
||||
|
||||
- Fix ADD caching issue with . prefixed path
|
||||
- Fix docker build on devicemapper by reverting sparse file tar option
|
||||
- Fix issue with file caching and prevent wrong cache hit
|
||||
* Use same error handling while unmarshalling CMD and ENTRYPOINT
|
||||
|
||||
#### Documentation
|
||||
|
||||
* Simplify and streamline Amazon Quickstart
|
||||
* Install instructions use unprefixed fedora image
|
||||
* Update instructions for mtu flag for Docker on GCE
|
||||
+ Add Ubuntu Saucy to installation
|
||||
- Fix for wrong version warning on master instead of latest
|
||||
|
||||
#### Runtime
|
||||
|
||||
- Only get the image's rootfs when we need to calculate the image size
|
||||
- Correctly handle unmapping UDP ports
|
||||
* Make CopyFileWithTar use a pipe instead of a buffer to save memory on docker build
|
||||
- Fix login message to say pull instead of push
|
||||
- Fix "docker load" help by removing "SOURCE" prompt and mentioning STDIN
|
||||
* Make blank -H option default to the same as no -H was sent
|
||||
* Extract cgroups utilities to own submodule
|
||||
|
||||
#### Other
|
||||
|
||||
+ Add Travis CI configuration to validate DCO and gofmt requirements
|
||||
+ Add Developer Certificate of Origin Text
|
||||
* Upgrade VBox Guest Additions
|
||||
* Check standalone header when pinging a registry server
|
||||
|
||||
## 0.7.3 (2014-01-02)
|
||||
|
||||
#### Builder
|
||||
|
||||
+ Update ADD to use the image cache, based on a hash of the added content
|
||||
* Add error message for empty Dockerfile
|
||||
|
||||
#### Documentation
|
||||
|
||||
- Fix outdated link to the "Introduction" on www.docker.io
|
||||
+ Update the docs to get wider when the screen does
|
||||
- Add information about needing to install LXC when using raw binaries
|
||||
* Update Fedora documentation to disentangle the docker and docker.io conflict
|
||||
* Add a note about using the new `-mtu` flag in several GCE zones
|
||||
+ Add FrugalWare installation instructions
|
||||
+ Add a more complete example of `docker run`
|
||||
- Fix API documentation for creating and starting Privileged containers
|
||||
- Add missing "name" parameter documentation on "/containers/create"
|
||||
* Add a mention of `lxc-checkconfig` as a way to check for some of the necessary kernel configuration
|
||||
- Update the 1.8 API documentation with some additions that were added to the docs for 1.7
|
||||
|
||||
#### Hack
|
||||
|
||||
- Add missing libdevmapper dependency to the packagers documentation
|
||||
* Update minimum Go requirement to a hard line at Go 1.2+
|
||||
* Many minor improvements to the Vagrantfile
|
||||
+ Add ability to customize dockerinit search locations when compiling (to be used very sparingly only by packagers of platforms who require a nonstandard location)
|
||||
+ Add coverprofile generation reporting
|
||||
- Add `-a` to our Go build flags, removing the need for recompiling the stdlib manually
|
||||
* Update Dockerfile to be more canonical and have less spurious warnings during build
|
||||
- Fix some miscellaneous `docker pull` progress bar display issues
|
||||
* Migrate more miscellaneous packages under the "pkg" folder
|
||||
* Update TextMate highlighting to automatically be enabled for files named "Dockerfile"
|
||||
* Reorganize syntax highlighting files under a common "contrib/syntax" directory
|
||||
* Update install.sh script (https://get.docker.io/) to not fail if busybox fails to download or run at the end of the Ubuntu/Debian installation
|
||||
* Add support for container names in bash completion
|
||||
|
||||
#### Packaging
|
||||
|
||||
+ Add an official Docker client binary for Darwin (Mac OS X)
|
||||
* Remove empty "Vendor" string and added "License" on deb package
|
||||
+ Add a stubbed version of "/etc/default/docker" in the deb package
|
||||
|
||||
#### Runtime
|
||||
|
||||
* Update layer application to extract tars in place, avoiding file churn while handling whiteouts
|
||||
- Fix permissiveness of mtime comparisons in tar handling (since GNU tar and Go tar do not yet support sub-second mtime precision)
|
||||
* Reimplement `docker top` in pure Go to work more consistently, and even inside Docker-in-Docker (thus removing the shell injection vulnerability present in some versions of `lxc-ps`)
|
||||
+ Update `-H unix://` to work similarly to `-H tcp://` by inserting the default values for missing portions
|
||||
- Fix more edge cases regarding dockerinit and deleted or replaced docker or dockerinit files
|
||||
* Update container name validation to include '.'
|
||||
- Fix use of a symlink or non-absolute path as the argument to `-g` to work as expected
|
||||
* Update to handle external mounts outside of LXC, fixing many small mounting quirks and making future execution backends and other features simpler
|
||||
* Update to use proper box-drawing characters everywhere in `docker images -tree`
|
||||
* Move MTU setting from LXC configuration to directly use netlink
|
||||
* Add `-S` option to external tar invocation for more efficient spare file handling
|
||||
+ Add arch/os info to User-Agent string, especially for registry requests
|
||||
+ Add `-mtu` option to Docker daemon for configuring MTU
|
||||
- Fix `docker build` to exit with a non-zero exit code on error
|
||||
+ Add `DOCKER_HOST` environment variable to configure the client `-H` flag without specifying it manually for every invocation
|
||||
|
||||
## 0.7.2 (2013-12-16)
|
||||
|
||||
#### Runtime
|
||||
|
||||
+ Validate container names on creation with standard regex
|
||||
* Increase maximum image depth to 127 from 42
|
||||
* Continue to move api endpoints to the job api
|
||||
+ Add -bip flag to allow specification of dynamic bridge IP via CIDR
|
||||
- Allow bridge creation when ipv6 is not enabled on certain systems
|
||||
* Set hostname and IP address from within dockerinit
|
||||
* Drop capabilities from within dockerinit
|
||||
- Fix volumes on host when symlink is present the image
|
||||
- Prevent deletion of image if ANY container is depending on it even if the container is not running
|
||||
* Update docker push to use new progress display
|
||||
* Use os.Lstat to allow mounting unix sockets when inspecting volumes
|
||||
- Adjust handling of inactive user login
|
||||
- Add missing defines in devicemapper for older kernels
|
||||
- Allow untag operations with no container validation
|
||||
- Add auth config to docker build
|
||||
|
||||
#### Documentation
|
||||
|
||||
* Add more information about Docker logging
|
||||
+ Add RHEL documentation
|
||||
* Add a direct example for changing the CMD that is run in a container
|
||||
* Update Arch installation documentation
|
||||
+ Add section on Trusted Builds
|
||||
+ Add Network documentation page
|
||||
|
||||
#### Other
|
||||
|
||||
+ Add new cover bundle for providing code coverage reporting
|
||||
* Separate integration tests in bundles
|
||||
* Make Tianon the hack maintainer
|
||||
* Update mkimage-debootstrap with more tweaks for keeping images small
|
||||
* Use https to get the install script
|
||||
* Remove vendored dotcloud/tar now that Go 1.2 has been released
|
||||
|
||||
## 0.7.1 (2013-12-05)
|
||||
|
||||
#### Documentation
|
||||
|
||||
+ Add @SvenDowideit as documentation maintainer
|
||||
+ Add links example
|
||||
+ Add documentation regarding ambassador pattern
|
||||
+ Add Google Cloud Platform docs
|
||||
+ Add dockerfile best practices
|
||||
* Update doc for RHEL
|
||||
* Update doc for registry
|
||||
* Update Postgres examples
|
||||
* Update doc for Ubuntu install
|
||||
* Improve remote api doc
|
||||
|
||||
#### Runtime
|
||||
|
||||
+ Add hostconfig to docker inspect
|
||||
+ Implement `docker log -f` to stream logs
|
||||
+ Add env variable to disable kernel version warning
|
||||
+ Add -format to `docker inspect`
|
||||
+ Support bind-mount for files
|
||||
- Fix bridge creation on RHEL
|
||||
- Fix image size calculation
|
||||
- Make sure iptables are called even if the bridge already exists
|
||||
- Fix issue with stderr only attach
|
||||
- Remove init layer when destroying a container
|
||||
- Fix same port binding on different interfaces
|
||||
- `docker build` now returns the correct exit code
|
||||
- Fix `docker port` to display correct port
|
||||
- `docker build` now check that the dockerfile exists client side
|
||||
- `docker attach` now returns the correct exit code
|
||||
- Remove the name entry when the container does not exist
|
||||
|
||||
#### Registry
|
||||
|
||||
* Improve progress bars, add ETA for downloads
|
||||
* Simultaneous pulls now waits for the first to finish instead of failing
|
||||
- Tag only the top-layer image when pushing to registry
|
||||
- Fix issue with offline image transfer
|
||||
- Fix issue preventing using ':' in password for registry
|
||||
|
||||
#### Other
|
||||
|
||||
+ Add pprof handler for debug
|
||||
+ Create a Makefile
|
||||
* Use stdlib tar that now includes fix
|
||||
* Improve make.sh test script
|
||||
* Handle SIGQUIT on the daemon
|
||||
* Disable verbose during tests
|
||||
* Upgrade to go1.2 for official build
|
||||
* Improve unit tests
|
||||
* The test suite now runs all tests even if one fails
|
||||
* Refactor C in Go (Devmapper)
|
||||
- Fix OSX compilation
|
||||
|
||||
## 0.7.0 (2013-11-25)
|
||||
|
||||
#### Notable features since 0.6.0
|
||||
|
||||
* Storage drivers: choose from aufs, device-mapper, or vfs.
|
||||
* Standard Linux support: docker now runs on unmodified Linux kernels and all major distributions.
|
||||
* Links: compose complex software stacks by connecting containers to each other.
|
||||
* Container naming: organize your containers by giving them memorable names.
|
||||
* Advanced port redirects: specify port redirects per interface, or keep sensitive ports private.
|
||||
* Offline transfer: push and pull images to the filesystem without losing information.
|
||||
* Quality: numerous bugfixes and small usability improvements. Significant increase in test coverage.
|
||||
|
||||
## 0.6.7 (2013-11-21)
|
||||
|
||||
#### Runtime
|
||||
|
||||
* Improved stability, fixes some race conditons
|
||||
* Improve stability, fixes some race conditons
|
||||
* Skip the volumes mounted when deleting the volumes of container.
|
||||
* Fix layer size computation: handle hard links correctly
|
||||
* Use the work Path for docker cp CONTAINER:PATH
|
||||
@@ -47,7 +248,7 @@
|
||||
+ Add lock around write operations in graph
|
||||
* Check if port is valid
|
||||
* Fix restart runtime error with ghost container networking
|
||||
+ Added some more colors and animals to increase the pool of generated names
|
||||
+ Add some more colors and animals to increase the pool of generated names
|
||||
* Fix issues in docker inspect
|
||||
+ Escape apparmor confinement
|
||||
+ Set environment variables using a file.
|
||||
@@ -201,7 +402,7 @@
|
||||
* Improve network performance for VirtualBox
|
||||
* Revamp install.sh to be usable by more people, and to use official install methods whenever possible (apt repo, portage tree, etc.)
|
||||
- Fix contrib/mkimage-debian.sh apt caching prevention
|
||||
+ Added Dockerfile.tmLanguage to contrib
|
||||
+ Add Dockerfile.tmLanguage to contrib
|
||||
* Configured FPM to make /etc/init/docker.conf a config file
|
||||
* Enable SSH Agent forwarding in Vagrant VM
|
||||
* Several small tweaks/fixes for contrib/mkimage-debian.sh
|
||||
@@ -315,7 +516,7 @@
|
||||
* Mount /dev/shm as a tmpfs
|
||||
- Switch from http to https for get.docker.io
|
||||
* Let userland proxy handle container-bound traffic
|
||||
* Updated the Docker CLI to specify a value for the "Host" header.
|
||||
* Update the Docker CLI to specify a value for the "Host" header.
|
||||
- Change network range to avoid conflict with EC2 DNS
|
||||
- Reduce connect and read timeout when pinging the registry
|
||||
* Parallel pull
|
||||
@@ -511,7 +712,7 @@
|
||||
|
||||
+ Builder: 'docker build git://URL' fetches and builds a remote git repository
|
||||
* Runtime: 'docker ps -s' optionally prints container size
|
||||
* Tests: Improved and simplified
|
||||
* Tests: improved and simplified
|
||||
- Runtime: fix a regression introduced in 0.4.3 which caused the logs command to fail.
|
||||
- Builder: fix a regression when using ADD with single regular file.
|
||||
|
||||
@@ -526,7 +727,7 @@
|
||||
+ ADD of a local file will detect tar archives and unpack them
|
||||
* ADD improvements: use tar for copy + automatically unpack local archives
|
||||
* ADD uses tar/untar for copies instead of calling 'cp -ar'
|
||||
* Fixed the behavior of ADD to be (mostly) reverse-compatible, predictable and well-documented.
|
||||
* Fix the behavior of ADD to be (mostly) reverse-compatible, predictable and well-documented.
|
||||
- Fix a bug which caused builds to fail if ADD was the first command
|
||||
* Nicer output for 'docker build'
|
||||
|
||||
@@ -571,7 +772,7 @@
|
||||
+ Detect faulty DNS configuration and replace it with a public default
|
||||
+ Allow docker run <name>:<id>
|
||||
+ You can now specify public port (ex: -p 80:4500)
|
||||
* Improved image removal to garbage-collect unreferenced parents
|
||||
* Improve image removal to garbage-collect unreferenced parents
|
||||
|
||||
#### Client
|
||||
|
||||
@@ -625,7 +826,7 @@
|
||||
|
||||
#### Documentation
|
||||
|
||||
* Improved install instructions.
|
||||
* Improve install instructions.
|
||||
|
||||
## 0.3.3 (2013-05-23)
|
||||
|
||||
@@ -710,7 +911,7 @@
|
||||
|
||||
+ Support for data volumes ('docker run -v=PATH')
|
||||
+ Share data volumes between containers ('docker run -volumes-from')
|
||||
+ Improved documentation
|
||||
+ Improve documentation
|
||||
* Upgrade to Go 1.0.3
|
||||
* Various upgrades to the dev environment for contributors
|
||||
|
||||
@@ -766,7 +967,7 @@
|
||||
- Add debian packaging
|
||||
- Documentation: installing on Arch Linux
|
||||
- Documentation: running Redis on docker
|
||||
- Fixed lxc 0.9 compatibility
|
||||
- Fix lxc 0.9 compatibility
|
||||
- Automatically load aufs module
|
||||
- Various bugfixes and stability improvements
|
||||
|
||||
@@ -801,7 +1002,7 @@
|
||||
- Stabilize process management
|
||||
- Layers can include a commit message
|
||||
- Simplified 'docker attach'
|
||||
- Fixed support for re-attaching
|
||||
- Fix support for re-attaching
|
||||
- Various bugfixes and stability improvements
|
||||
- Auto-download at run
|
||||
- Auto-login on push
|
||||
|
||||
@@ -4,6 +4,13 @@ Want to hack on Docker? Awesome! Here are instructions to get you
|
||||
started. They are probably not perfect, please let us know if anything
|
||||
feels wrong or incomplete.
|
||||
|
||||
## Reporting Issues
|
||||
|
||||
When reporting [issues](https://github.com/dotcloud/docker/issues)
|
||||
on Github please include your host OS ( Ubuntu 12.04, Fedora 19, etc... )
|
||||
and the output of `docker version` along with the output of `docker info` if possible.
|
||||
This information will help us review and fix your issue faster.
|
||||
|
||||
## Build Environment
|
||||
|
||||
For instructions on setting up your development environment, please
|
||||
@@ -64,7 +71,7 @@ your branch before submitting a pull request.
|
||||
|
||||
Update the documentation when creating or modifying features. Test
|
||||
your documentation changes for clarity, concision, and correctness, as
|
||||
well as a clean docmuent build. See ``docs/README.md`` for more
|
||||
well as a clean documentation build. See ``docs/README.md`` for more
|
||||
information on building the docs and how docs get released.
|
||||
|
||||
Write clean code. Universally formatted code promotes ease of writing, reading,
|
||||
@@ -98,23 +105,46 @@ name and email address match your git configuration. The AUTHORS file is
|
||||
regenerated occasionally from the git commit history, so a mismatch may result
|
||||
in your changes being overwritten.
|
||||
|
||||
### Approval
|
||||
### Sign your work
|
||||
|
||||
Docker maintainers use LGTM (looks good to me) in comments on the code review
|
||||
to indicate acceptance.
|
||||
The sign-off is a simple line at the end of the explanation for the
|
||||
patch, which certifies that you wrote it or otherwise have the right to
|
||||
pass it on as an open-source patch. The rules are pretty simple: if you
|
||||
can certify the below:
|
||||
|
||||
```
|
||||
Docker Developer Grant and Certificate of Origin 1.0
|
||||
|
||||
By making a contribution to the Docker Project ("Project"), I represent and warrant that:
|
||||
|
||||
a. The contribution was created in whole or in part by me and I have the right to submit the contribution on my own behalf or on behalf of a third party who has authorized me to submit this contribution to the Project; or
|
||||
|
||||
|
||||
b. The contribution is based upon previous work that, to the best of my knowledge, is covered under an appropriate open source license and I have the right and authorization to submit that work with modifications, whether created in whole or in part by me, under the same open source license (unless I am permitted to submit under a different license) that I have identified in the contribution; or
|
||||
|
||||
c. The contribution was provided directly to me by some other person who represented and warranted (a) or (b) and I have not modified it.
|
||||
|
||||
d. I understand and agree that this Project and the contribution are publicly known and that a record of the contribution (including all personal information I submit with it, including my sign-off record) is maintained indefinitely and may be redistributed consistent with this Project or the open source license(s) involved.
|
||||
|
||||
e. I hereby grant to the Project, Docker, Inc and its successors; and recipients of software distributed by the Project a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, modify, prepare derivative works of, publicly display, publicly perform, sublicense, and distribute this contribution and such modifications and derivative works consistent with this Project, the open source license indicated in the previous work or other appropriate open source license specified by the Project and approved by the Open Source Initiative(OSI) at http://www.opensource.org.
|
||||
```
|
||||
|
||||
then you just add a line saying
|
||||
|
||||
Docker-DCO-1.0-Signed-off-by: Joe Smith <joe.smith@email.com> (github: github_handle)
|
||||
|
||||
using your real name (sorry, no pseudonyms or anonymous contributions.)
|
||||
|
||||
If you have any questions, please refer to the FAQ in the [docs](http://docs.docker.io)
|
||||
|
||||
A change requires LGTMs from an absolute majority of the maintainers of each
|
||||
component affected. For example, if a change affects docs/ and registry/, it
|
||||
needs an absolute majority from the maintainers of docs/ AND, separately, an
|
||||
absolute majority of the maintainers of registry
|
||||
|
||||
For more details see [MAINTAINERS.md](hack/MAINTAINERS.md)
|
||||
|
||||
### How can I become a maintainer?
|
||||
|
||||
* Step 1: learn the component inside out
|
||||
* Step 2: make yourself useful by contributing code, bugfixes, support etc.
|
||||
* Step 3: volunteer on the irc channel (#docker@freenode)
|
||||
* Step 4: propose yourself at a scheduled #docker-meeting
|
||||
|
||||
Don't forget: being a maintainer is a time investment. Make sure you will have time to make yourself available.
|
||||
You don't have to be a maintainer to make a difference on the project!
|
||||
|
||||
81
Dockerfile
81
Dockerfile
@@ -23,44 +23,65 @@
|
||||
# the case. Therefore, you don't have to disable it anymore.
|
||||
#
|
||||
|
||||
docker-version 0.6.1
|
||||
from ubuntu:12.04
|
||||
maintainer Solomon Hykes <solomon@dotcloud.com>
|
||||
docker-version 0.6.1
|
||||
FROM stackbrew/ubuntu:12.04
|
||||
MAINTAINER Tianon Gravi <admwiggin@gmail.com> (@tianon)
|
||||
|
||||
# Build dependencies
|
||||
run echo 'deb http://archive.ubuntu.com/ubuntu precise main universe' > /etc/apt/sources.list
|
||||
run apt-get update
|
||||
run apt-get install -y -q curl
|
||||
run apt-get install -y -q git
|
||||
run apt-get install -y -q mercurial
|
||||
run apt-get install -y -q build-essential libsqlite3-dev
|
||||
# Add precise-backports to get s3cmd >= 1.1.0 (so we get ENV variable support in our .s3cfg)
|
||||
RUN echo 'deb http://archive.ubuntu.com/ubuntu precise-backports main universe' > /etc/apt/sources.list.d/backports.list
|
||||
|
||||
# Packaged dependencies
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq \
|
||||
apt-utils \
|
||||
aufs-tools \
|
||||
build-essential \
|
||||
curl \
|
||||
dpkg-sig \
|
||||
git \
|
||||
iptables \
|
||||
libsqlite3-dev \
|
||||
lxc \
|
||||
mercurial \
|
||||
reprepro \
|
||||
ruby1.9.1 \
|
||||
ruby1.9.1-dev \
|
||||
s3cmd=1.1.0* \
|
||||
--no-install-recommends
|
||||
|
||||
# Get lvm2 source for compiling statically
|
||||
RUN git clone https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout -q v2_02_103
|
||||
# see https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags
|
||||
# note: we can't use "git clone -b" above because it requires at least git 1.7.10 to be able to use that on a tag instead of a branch and we only have 1.7.9.5
|
||||
|
||||
# Compile and install lvm2
|
||||
RUN cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper && make install_device-mapper
|
||||
# see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL
|
||||
|
||||
# Install Go
|
||||
run curl -s https://go.googlecode.com/files/go1.2rc5.src.tar.gz | tar -v -C /usr/local -xz
|
||||
env PATH /usr/local/go/bin:/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/sbin:/bin:/sbin
|
||||
env GOPATH /go:/go/src/github.com/dotcloud/docker/vendor
|
||||
run cd /usr/local/go/src && ./make.bash && go install -ldflags '-w -linkmode external -extldflags "-static -Wl,--unresolved-symbols=ignore-in-shared-libs"' -tags netgo -a std
|
||||
RUN curl -s https://go.googlecode.com/files/go1.2.src.tar.gz | tar -v -C /usr/local -xz
|
||||
ENV PATH /usr/local/go/bin:$PATH
|
||||
ENV GOPATH /go:/go/src/github.com/dotcloud/docker/vendor
|
||||
RUN cd /usr/local/go/src && ./make.bash --no-clean 2>&1
|
||||
|
||||
# Ubuntu stuff
|
||||
run apt-get install -y -q ruby1.9.3 rubygems libffi-dev
|
||||
run gem install --no-rdoc --no-ri fpm
|
||||
run apt-get install -y -q reprepro dpkg-sig
|
||||
# Compile Go for cross compilation
|
||||
ENV DOCKER_CROSSPLATFORMS darwin/amd64 darwin/386
|
||||
# TODO add linux/386 and linux/arm
|
||||
RUN cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done'
|
||||
|
||||
run apt-get install -y -q python-pip
|
||||
run pip install s3cmd==1.1.0-beta3
|
||||
run pip install python-magic==0.4.6
|
||||
run /bin/echo -e '[default]\naccess_key=$AWS_ACCESS_KEY\nsecret_key=$AWS_SECRET_KEY\n' > /.s3cfg
|
||||
# Grab Go's cover tool for dead-simple code coverage testing
|
||||
RUN go get code.google.com/p/go.tools/cmd/cover
|
||||
|
||||
# Runtime dependencies
|
||||
run apt-get install -y -q iptables
|
||||
run apt-get install -y -q lxc
|
||||
run apt-get install -y -q aufs-tools
|
||||
# TODO replace FPM with some very minimal debhelper stuff
|
||||
RUN gem install --no-rdoc --no-ri fpm --version 1.0.1
|
||||
|
||||
volume /var/lib/docker
|
||||
workdir /go/src/github.com/dotcloud/docker
|
||||
# Setup s3cmd config
|
||||
RUN /bin/echo -e '[default]\naccess_key=$AWS_ACCESS_KEY\nsecret_key=$AWS_SECRET_KEY' > /.s3cfg
|
||||
|
||||
VOLUME /var/lib/docker
|
||||
WORKDIR /go/src/github.com/dotcloud/docker
|
||||
|
||||
# Wrap all commands in the "docker-in-docker" script to allow nested containers
|
||||
entrypoint ["hack/dind"]
|
||||
ENTRYPOINT ["hack/dind"]
|
||||
|
||||
# Upload docker source
|
||||
add . /go/src/github.com/dotcloud/docker
|
||||
ADD . /go/src/github.com/dotcloud/docker
|
||||
|
||||
@@ -2,5 +2,8 @@ Solomon Hykes <solomon@dotcloud.com> (@shykes)
|
||||
Guillaume Charmes <guillaume@dotcloud.com> (@creack)
|
||||
Victor Vieux <victor@dotcloud.com> (@vieux)
|
||||
Michael Crosby <michael@crosbymichael.com> (@crosbymichael)
|
||||
.travis.yml: Tianon Gravi <admwiggin@gmail.com> (@tianon)
|
||||
api.go: Victor Vieux <victor@dotcloud.com> (@vieux)
|
||||
Dockerfile: Tianon Gravi <admwiggin@gmail.com> (@tianon)
|
||||
Makefile: Tianon Gravi <admwiggin@gmail.com> (@tianon)
|
||||
Vagrantfile: Daniel Mizyrycki <daniel@dotcloud.com> (@mzdaniel)
|
||||
|
||||
29
Makefile
Normal file
29
Makefile
Normal file
@@ -0,0 +1,29 @@
|
||||
.PHONY: all binary build cross default docs shell test
|
||||
|
||||
DOCKER_RUN_DOCKER := docker run -rm -i -t -privileged -e TESTFLAGS -v $(CURDIR)/bundles:/go/src/github.com/dotcloud/docker/bundles docker
|
||||
|
||||
default: binary
|
||||
|
||||
all: build
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh
|
||||
|
||||
binary: build
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh binary
|
||||
|
||||
cross: build
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh binary cross
|
||||
|
||||
docs:
|
||||
docker build -t docker-docs docs && docker run -p 8000:8000 docker-docs
|
||||
|
||||
test: build
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh test test-integration
|
||||
|
||||
shell: build
|
||||
$(DOCKER_RUN_DOCKER) bash
|
||||
|
||||
build: bundles
|
||||
docker build -t docker .
|
||||
|
||||
bundles:
|
||||
mkdir bundles
|
||||
46
REMOTE_TODO.md
Normal file
46
REMOTE_TODO.md
Normal file
@@ -0,0 +1,46 @@
|
||||
```
|
||||
**GET**
|
||||
send objects deprecate multi-stream
|
||||
TODO "/events": getEvents, N
|
||||
ok "/info": getInfo, 1
|
||||
ok "/version": getVersion, 1
|
||||
... "/images/json": getImagesJSON, N
|
||||
TODO "/images/viz": getImagesViz, 0 yes
|
||||
TODO "/images/search": getImagesSearch, N
|
||||
#3490 "/images/{name:.*}/get": getImagesGet, 0
|
||||
TODO "/images/{name:.*}/history": getImagesHistory, 1
|
||||
TODO "/images/{name:.*}/json": getImagesByName, 1
|
||||
TODO "/containers/ps": getContainersJSON, N
|
||||
TODO "/containers/json": getContainersJSON, 1
|
||||
ok "/containers/{name:.*}/export": getContainersExport, 0
|
||||
TODO "/containers/{name:.*}/changes": getContainersChanges, 1
|
||||
TODO "/containers/{name:.*}/json": getContainersByName, 1
|
||||
TODO "/containers/{name:.*}/top": getContainersTop, N
|
||||
TODO "/containers/{name:.*}/attach/ws": wsContainersAttach, 0 yes
|
||||
|
||||
**POST**
|
||||
TODO "/auth": postAuth, 0 yes
|
||||
ok "/commit": postCommit, 0
|
||||
TODO "/build": postBuild, 0 yes
|
||||
TODO "/images/create": postImagesCreate, N yes yes (pull)
|
||||
TODO "/images/{name:.*}/insert": postImagesInsert, N yes yes
|
||||
TODO "/images/load": postImagesLoad, 1 yes (stdin)
|
||||
TODO "/images/{name:.*}/push": postImagesPush, N yes
|
||||
ok "/images/{name:.*}/tag": postImagesTag, 0
|
||||
ok "/containers/create": postContainersCreate, 0
|
||||
ok "/containers/{name:.*}/kill": postContainersKill, 0
|
||||
#3476 "/containers/{name:.*}/restart": postContainersRestart, 0
|
||||
ok "/containers/{name:.*}/start": postContainersStart, 0
|
||||
ok "/containers/{name:.*}/stop": postContainersStop, 0
|
||||
ok "/containers/{name:.*}/wait": postContainersWait, 0
|
||||
ok "/containers/{name:.*}/resize": postContainersResize, 0
|
||||
TODO "/containers/{name:.*}/attach": postContainersAttach, 0 yes
|
||||
TODO "/containers/{name:.*}/copy": postContainersCopy, 0 yes
|
||||
|
||||
**DELETE**
|
||||
#3180 "/containers/{name:.*}": deleteContainers, 0
|
||||
TODO "/images/{name:.*}": deleteImages, N
|
||||
|
||||
**OPTIONS**
|
||||
ok "": optionsHandler, 0
|
||||
```
|
||||
11
Vagrantfile
vendored
11
Vagrantfile
vendored
@@ -26,7 +26,7 @@ fi
|
||||
# Adding an apt gpg key is idempotent.
|
||||
wget -q -O - https://get.docker.io/gpg | apt-key add -
|
||||
|
||||
# Creating the docker.list file is idempotent, but it may overrite desired
|
||||
# Creating the docker.list file is idempotent, but it may overwrite desired
|
||||
# settings if it already exists. This could be solved with md5sum but it
|
||||
# doesn't seem worth it.
|
||||
echo 'deb http://get.docker.io/ubuntu docker main' > \
|
||||
@@ -41,7 +41,7 @@ apt-get install -q -y lxc-docker
|
||||
usermod -a -G docker "$user"
|
||||
|
||||
tmp=`mktemp -q` && {
|
||||
# Only install the backport kernel, don't bother upgrade if the backport is
|
||||
# Only install the backport kernel, don't bother upgrading if the backport is
|
||||
# already installed. We want parse the output of apt so we need to save it
|
||||
# with 'tee'. NOTE: The installation of the kernel will trigger dkms to
|
||||
# install vboxguest if needed.
|
||||
@@ -70,7 +70,7 @@ SCRIPT
|
||||
# trigger dkms to build the virtualbox guest module install.
|
||||
$vbox_script = <<VBOX_SCRIPT + $script
|
||||
# Install the VirtualBox guest additions if they aren't already installed.
|
||||
if [ ! -d /opt/VBoxGuestAdditions-4.3.2/ ]; then
|
||||
if [ ! -d /opt/VBoxGuestAdditions-4.3.6/ ]; then
|
||||
# Update remote package metadata. 'apt-get update' is idempotent.
|
||||
apt-get update -q
|
||||
|
||||
@@ -79,9 +79,10 @@ if [ ! -d /opt/VBoxGuestAdditions-4.3.2/ ]; then
|
||||
apt-get install -q -y linux-headers-generic-lts-raring dkms
|
||||
|
||||
echo 'Downloading VBox Guest Additions...'
|
||||
wget -cq http://dlc.sun.com.edgesuite.net/virtualbox/4.3.2/VBoxGuestAdditions_4.3.2.iso
|
||||
wget -cq http://dlc.sun.com.edgesuite.net/virtualbox/4.3.6/VBoxGuestAdditions_4.3.6.iso
|
||||
echo "95648fcdb5d028e64145a2fe2f2f28c946d219da366389295a61fed296ca79f0 VBoxGuestAdditions_4.3.6.iso" | sha256sum --check || exit 1
|
||||
|
||||
mount -o loop,ro /home/vagrant/VBoxGuestAdditions_4.3.2.iso /mnt
|
||||
mount -o loop,ro /home/vagrant/VBoxGuestAdditions_4.3.6.iso /mnt
|
||||
/mnt/VBoxLinuxAdditions.run --nox11
|
||||
umount /mnt
|
||||
fi
|
||||
|
||||
202
api.go
202
api.go
@@ -1,12 +1,16 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"code.google.com/p/go.net/websocket"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"expvar"
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/archive"
|
||||
"github.com/dotcloud/docker/auth"
|
||||
"github.com/dotcloud/docker/pkg/systemd"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"github.com/gorilla/mux"
|
||||
"io"
|
||||
@@ -15,6 +19,7 @@ import (
|
||||
"mime"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/pprof"
|
||||
"os"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
@@ -23,7 +28,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
APIVERSION = 1.7
|
||||
APIVERSION = 1.8
|
||||
DEFAULTHTTPHOST = "127.0.0.1"
|
||||
DEFAULTHTTPPORT = 4243
|
||||
DEFAULTUNIXSOCKET = "/var/run/docker.sock"
|
||||
@@ -135,7 +140,8 @@ func postAuth(srv *Server, version float64, w http.ResponseWriter, r *http.Reque
|
||||
}
|
||||
|
||||
func getVersion(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
return writeJSON(w, http.StatusOK, srv.DockerVersion())
|
||||
srv.Eng.ServeHTTP(w, r)
|
||||
return nil
|
||||
}
|
||||
|
||||
func postContainersKill(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
@@ -145,19 +151,11 @@ func postContainersKill(srv *Server, version float64, w http.ResponseWriter, r *
|
||||
if err := parseForm(r); err != nil {
|
||||
return err
|
||||
}
|
||||
name := vars["name"]
|
||||
|
||||
signal := 0
|
||||
if r != nil {
|
||||
if s := r.Form.Get("signal"); s != "" {
|
||||
s, err := strconv.Atoi(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
signal = s
|
||||
}
|
||||
job := srv.Eng.Job("kill", vars["name"])
|
||||
if sig := r.Form.Get("signal"); sig != "" {
|
||||
job.Args = append(job.Args, sig)
|
||||
}
|
||||
if err := srv.ContainerKill(name, signal); err != nil {
|
||||
if err := job.Run(); err != nil {
|
||||
return err
|
||||
}
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
@@ -168,10 +166,11 @@ func getContainersExport(srv *Server, version float64, w http.ResponseWriter, r
|
||||
if vars == nil {
|
||||
return fmt.Errorf("Missing parameter")
|
||||
}
|
||||
name := vars["name"]
|
||||
|
||||
if err := srv.ContainerExport(name, w); err != nil {
|
||||
utils.Errorf("%s", err)
|
||||
job := srv.Eng.Job("export", vars["name"])
|
||||
if err := job.Stdout.Add(w); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := job.Run(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@@ -217,7 +216,8 @@ func getImagesViz(srv *Server, version float64, w http.ResponseWriter, r *http.R
|
||||
}
|
||||
|
||||
func getInfo(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
return writeJSON(w, http.StatusOK, srv.DockerInfo())
|
||||
srv.Eng.ServeHTTP(w, r)
|
||||
return nil
|
||||
}
|
||||
|
||||
func getEvents(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
@@ -254,7 +254,7 @@ func getEvents(srv *Server, version float64, w http.ResponseWriter, r *http.Requ
|
||||
wf.Flush()
|
||||
if since != 0 {
|
||||
// If since, send previous events that happened after the timestamp
|
||||
for _, event := range srv.events {
|
||||
for _, event := range srv.GetEvents() {
|
||||
if event.Time >= since {
|
||||
err := sendEvent(wf, &event)
|
||||
if err != nil && err.Error() == "JSON error" {
|
||||
@@ -357,18 +357,13 @@ func postImagesTag(srv *Server, version float64, w http.ResponseWriter, r *http.
|
||||
if err := parseForm(r); err != nil {
|
||||
return err
|
||||
}
|
||||
repo := r.Form.Get("repo")
|
||||
tag := r.Form.Get("tag")
|
||||
if vars == nil {
|
||||
return fmt.Errorf("Missing parameter")
|
||||
}
|
||||
name := vars["name"]
|
||||
force, err := getBoolParam(r.Form.Get("force"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := srv.ContainerTag(name, repo, tag, force); err != nil {
|
||||
job := srv.Eng.Job("tag", vars["name"], r.Form.Get("repo"), r.Form.Get("tag"))
|
||||
job.Setenv("force", r.Form.Get("force"))
|
||||
if err := job.Run(); err != nil {
|
||||
return err
|
||||
}
|
||||
w.WriteHeader(http.StatusCreated)
|
||||
@@ -383,13 +378,17 @@ func postCommit(srv *Server, version float64, w http.ResponseWriter, r *http.Req
|
||||
if err := json.NewDecoder(r.Body).Decode(config); err != nil && err != io.EOF {
|
||||
utils.Errorf("%s", err)
|
||||
}
|
||||
repo := r.Form.Get("repo")
|
||||
tag := r.Form.Get("tag")
|
||||
container := r.Form.Get("container")
|
||||
author := r.Form.Get("author")
|
||||
comment := r.Form.Get("comment")
|
||||
id, err := srv.ContainerCommit(container, repo, tag, author, comment, config)
|
||||
if err != nil {
|
||||
|
||||
job := srv.Eng.Job("commit", r.Form.Get("container"))
|
||||
job.Setenv("repo", r.Form.Get("repo"))
|
||||
job.Setenv("tag", r.Form.Get("tag"))
|
||||
job.Setenv("author", r.Form.Get("author"))
|
||||
job.Setenv("comment", r.Form.Get("comment"))
|
||||
job.SetenvJson("config", config)
|
||||
|
||||
var id string
|
||||
job.Stdout.AddString(&id)
|
||||
if err := job.Run(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -564,12 +563,18 @@ func postContainersCreate(srv *Server, version float64, w http.ResponseWriter, r
|
||||
job.SetenvList("Dns", defaultDns)
|
||||
}
|
||||
// Read container ID from the first line of stdout
|
||||
job.StdoutParseString(&out.ID)
|
||||
job.Stdout.AddString(&out.ID)
|
||||
// Read warnings from stderr
|
||||
job.StderrParseLines(&out.Warnings, 0)
|
||||
warnings := &bytes.Buffer{}
|
||||
job.Stderr.Add(warnings)
|
||||
if err := job.Run(); err != nil {
|
||||
return err
|
||||
}
|
||||
// Parse warnings from stderr
|
||||
scanner := bufio.NewScanner(warnings)
|
||||
for scanner.Scan() {
|
||||
out.Warnings = append(out.Warnings, scanner.Text())
|
||||
}
|
||||
if job.GetenvInt("Memory") > 0 && !srv.runtime.capabilities.MemoryLimit {
|
||||
log.Println("WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.")
|
||||
out.Warnings = append(out.Warnings, "Your kernel does not support memory limit capabilities. Limitation discarded.")
|
||||
@@ -678,17 +683,12 @@ func postContainersStop(srv *Server, version float64, w http.ResponseWriter, r *
|
||||
if err := parseForm(r); err != nil {
|
||||
return err
|
||||
}
|
||||
t, err := strconv.Atoi(r.Form.Get("t"))
|
||||
if err != nil || t < 0 {
|
||||
t = 10
|
||||
}
|
||||
|
||||
if vars == nil {
|
||||
return fmt.Errorf("Missing parameter")
|
||||
}
|
||||
name := vars["name"]
|
||||
|
||||
if err := srv.ContainerStop(name, t); err != nil {
|
||||
job := srv.Eng.Job("stop", vars["name"])
|
||||
job.Setenv("t", r.Form.Get("t"))
|
||||
if err := job.Run(); err != nil {
|
||||
return err
|
||||
}
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
@@ -699,33 +699,28 @@ func postContainersWait(srv *Server, version float64, w http.ResponseWriter, r *
|
||||
if vars == nil {
|
||||
return fmt.Errorf("Missing parameter")
|
||||
}
|
||||
name := vars["name"]
|
||||
|
||||
status, err := srv.ContainerWait(name)
|
||||
job := srv.Eng.Job("wait", vars["name"])
|
||||
var statusStr string
|
||||
job.Stdout.AddString(&statusStr)
|
||||
if err := job.Run(); err != nil {
|
||||
return err
|
||||
}
|
||||
// Parse a 16-bit encoded integer to map typical unix exit status.
|
||||
status, err := strconv.ParseInt(statusStr, 10, 16)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return writeJSON(w, http.StatusOK, &APIWait{StatusCode: status})
|
||||
return writeJSON(w, http.StatusOK, &APIWait{StatusCode: int(status)})
|
||||
}
|
||||
|
||||
func postContainersResize(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
if err := parseForm(r); err != nil {
|
||||
return err
|
||||
}
|
||||
height, err := strconv.Atoi(r.Form.Get("h"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
width, err := strconv.Atoi(r.Form.Get("w"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if vars == nil {
|
||||
return fmt.Errorf("Missing parameter")
|
||||
}
|
||||
name := vars["name"]
|
||||
if err := srv.ContainerResize(name, height, width); err != nil {
|
||||
if err := srv.Eng.Job("resize", vars["name"], r.Form.Get("h"), r.Form.Get("w")).Run(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@@ -865,7 +860,10 @@ func getContainersByName(srv *Server, version float64, w http.ResponseWriter, r
|
||||
return fmt.Errorf("Conflict between containers and images")
|
||||
}
|
||||
|
||||
return writeJSON(w, http.StatusOK, container)
|
||||
container.readHostConfig()
|
||||
c := APIContainer{container, container.hostConfig}
|
||||
|
||||
return writeJSON(w, http.StatusOK, c)
|
||||
}
|
||||
|
||||
func getImagesByName(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
@@ -891,12 +889,25 @@ func postBuild(srv *Server, version float64, w http.ResponseWriter, r *http.Requ
|
||||
if version < 1.3 {
|
||||
return fmt.Errorf("Multipart upload for build is no longer supported. Please upgrade your docker client.")
|
||||
}
|
||||
remoteURL := r.FormValue("remote")
|
||||
repoName := r.FormValue("t")
|
||||
rawSuppressOutput := r.FormValue("q")
|
||||
rawNoCache := r.FormValue("nocache")
|
||||
rawRm := r.FormValue("rm")
|
||||
repoName, tag := utils.ParseRepositoryTag(repoName)
|
||||
var (
|
||||
remoteURL = r.FormValue("remote")
|
||||
repoName = r.FormValue("t")
|
||||
rawSuppressOutput = r.FormValue("q")
|
||||
rawNoCache = r.FormValue("nocache")
|
||||
rawRm = r.FormValue("rm")
|
||||
authEncoded = r.Header.Get("X-Registry-Auth")
|
||||
authConfig = &auth.AuthConfig{}
|
||||
tag string
|
||||
)
|
||||
repoName, tag = utils.ParseRepositoryTag(repoName)
|
||||
if authEncoded != "" {
|
||||
authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded))
|
||||
if err := json.NewDecoder(authJson).Decode(authConfig); err != nil {
|
||||
// for a pull it is not an error if no auth was given
|
||||
// to increase compatibility with the existing api it is defaulting to be empty
|
||||
authConfig = &auth.AuthConfig{}
|
||||
}
|
||||
}
|
||||
|
||||
var context io.Reader
|
||||
|
||||
@@ -922,7 +933,7 @@ func postBuild(srv *Server, version float64, w http.ResponseWriter, r *http.Requ
|
||||
}
|
||||
context = c
|
||||
} else if utils.IsURL(remoteURL) {
|
||||
f, err := utils.Download(remoteURL, ioutil.Discard)
|
||||
f, err := utils.Download(remoteURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -951,9 +962,26 @@ func postBuild(srv *Server, version float64, w http.ResponseWriter, r *http.Requ
|
||||
return err
|
||||
}
|
||||
|
||||
b := NewBuildFile(srv, utils.NewWriteFlusher(w), !suppressOutput, !noCache, rm)
|
||||
if version >= 1.8 {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
}
|
||||
sf := utils.NewStreamFormatter(version >= 1.8)
|
||||
b := NewBuildFile(srv,
|
||||
&StdoutFormater{
|
||||
Writer: utils.NewWriteFlusher(w),
|
||||
StreamFormatter: sf,
|
||||
},
|
||||
&StderrFormater{
|
||||
Writer: utils.NewWriteFlusher(w),
|
||||
StreamFormatter: sf,
|
||||
},
|
||||
!suppressOutput, !noCache, rm, utils.NewWriteFlusher(w), sf, authConfig)
|
||||
id, err := b.Build(context)
|
||||
if err != nil {
|
||||
if sf.Used() {
|
||||
w.Write(sf.FormatError(err))
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("Error build: %s", err)
|
||||
}
|
||||
if repoName != "" {
|
||||
@@ -1037,9 +1065,37 @@ func makeHttpHandler(srv *Server, logging bool, localMethod string, localRoute s
|
||||
}
|
||||
}
|
||||
|
||||
// Replicated from expvar.go as not public.
|
||||
func expvarHandler(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
fmt.Fprintf(w, "{\n")
|
||||
first := true
|
||||
expvar.Do(func(kv expvar.KeyValue) {
|
||||
if !first {
|
||||
fmt.Fprintf(w, ",\n")
|
||||
}
|
||||
first = false
|
||||
fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value)
|
||||
})
|
||||
fmt.Fprintf(w, "\n}\n")
|
||||
}
|
||||
|
||||
func AttachProfiler(router *mux.Router) {
|
||||
router.HandleFunc("/debug/vars", expvarHandler)
|
||||
router.HandleFunc("/debug/pprof/", pprof.Index)
|
||||
router.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline)
|
||||
router.HandleFunc("/debug/pprof/profile", pprof.Profile)
|
||||
router.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
|
||||
router.HandleFunc("/debug/pprof/heap", pprof.Handler("heap").ServeHTTP)
|
||||
router.HandleFunc("/debug/pprof/goroutine", pprof.Handler("goroutine").ServeHTTP)
|
||||
router.HandleFunc("/debug/pprof/threadcreate", pprof.Handler("threadcreate").ServeHTTP)
|
||||
}
|
||||
|
||||
func createRouter(srv *Server, logging bool) (*mux.Router, error) {
|
||||
r := mux.NewRouter()
|
||||
|
||||
if os.Getenv("DEBUG") != "" {
|
||||
AttachProfiler(r)
|
||||
}
|
||||
m := map[string]map[string]HttpApiFunc{
|
||||
"GET": {
|
||||
"/events": getEvents,
|
||||
@@ -1126,8 +1182,6 @@ func ServeRequest(srv *Server, apiversion float64, w http.ResponseWriter, req *h
|
||||
}
|
||||
|
||||
func ListenAndServe(proto, addr string, srv *Server, logging bool) error {
|
||||
log.Printf("Listening for HTTP on %s (%s)\n", addr, proto)
|
||||
|
||||
r, err := createRouter(srv, logging)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1158,5 +1212,9 @@ func ListenAndServe(proto, addr string, srv *Server, logging bool) error {
|
||||
}
|
||||
}
|
||||
httpSrv := http.Server{Addr: addr, Handler: r}
|
||||
|
||||
log.Printf("Listening for HTTP on %s (%s)\n", addr, proto)
|
||||
// Tell the init daemon we are accepting requests
|
||||
go systemd.SdNotify("READY=1")
|
||||
return httpSrv.Serve(l)
|
||||
}
|
||||
|
||||
@@ -29,21 +29,6 @@ type (
|
||||
VirtualSize int64
|
||||
}
|
||||
|
||||
APIInfo struct {
|
||||
Debug bool
|
||||
Containers int
|
||||
Images int
|
||||
NFd int `json:",omitempty"`
|
||||
NGoroutines int `json:",omitempty"`
|
||||
MemoryLimit bool `json:",omitempty"`
|
||||
SwapLimit bool `json:",omitempty"`
|
||||
IPv4Forwarding bool `json:",omitempty"`
|
||||
LXCVersion string `json:",omitempty"`
|
||||
NEventsListener int `json:",omitempty"`
|
||||
KernelVersion string `json:",omitempty"`
|
||||
IndexServerAddress string `json:",omitempty"`
|
||||
}
|
||||
|
||||
APITop struct {
|
||||
Titles []string
|
||||
Processes [][]string
|
||||
@@ -93,12 +78,6 @@ type (
|
||||
IP string
|
||||
}
|
||||
|
||||
APIVersion struct {
|
||||
Version string
|
||||
GitCommit string `json:",omitempty"`
|
||||
GoVersion string `json:",omitempty"`
|
||||
}
|
||||
|
||||
APIWait struct {
|
||||
StatusCode int
|
||||
}
|
||||
@@ -116,6 +95,10 @@ type (
|
||||
Resource string
|
||||
HostPath string
|
||||
}
|
||||
APIContainer struct {
|
||||
*Container
|
||||
HostConfig *HostConfig
|
||||
}
|
||||
)
|
||||
|
||||
func (api APIImages) ToLegacy() []APIImagesOld {
|
||||
|
||||
@@ -3,6 +3,8 @@ package archive
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"compress/bzip2"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"io"
|
||||
@@ -15,7 +17,15 @@ import (
|
||||
|
||||
type Archive io.Reader
|
||||
|
||||
type Compression uint32
|
||||
type Compression int
|
||||
|
||||
type TarOptions struct {
|
||||
Includes []string
|
||||
Excludes []string
|
||||
Recursive bool
|
||||
Compression Compression
|
||||
CreateFiles []string
|
||||
}
|
||||
|
||||
const (
|
||||
Uncompressed Compression = iota
|
||||
@@ -51,6 +61,43 @@ func DetectCompression(source []byte) Compression {
|
||||
return Uncompressed
|
||||
}
|
||||
|
||||
func xzDecompress(archive io.Reader) (io.Reader, error) {
|
||||
args := []string{"xz", "-d", "-c", "-q"}
|
||||
|
||||
return CmdStream(exec.Command(args[0], args[1:]...), archive, nil)
|
||||
}
|
||||
|
||||
func DecompressStream(archive io.Reader) (io.Reader, error) {
|
||||
buf := make([]byte, 10)
|
||||
totalN := 0
|
||||
for totalN < 10 {
|
||||
n, err := archive.Read(buf[totalN:])
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
return nil, fmt.Errorf("Tarball too short")
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
totalN += n
|
||||
utils.Debugf("[tar autodetect] n: %d", n)
|
||||
}
|
||||
compression := DetectCompression(buf)
|
||||
wrap := io.MultiReader(bytes.NewReader(buf), archive)
|
||||
|
||||
switch compression {
|
||||
case Uncompressed:
|
||||
return wrap, nil
|
||||
case Gzip:
|
||||
return gzip.NewReader(wrap)
|
||||
case Bzip2:
|
||||
return bzip2.NewReader(wrap), nil
|
||||
case Xz:
|
||||
return xzDecompress(wrap)
|
||||
default:
|
||||
return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
|
||||
}
|
||||
}
|
||||
|
||||
func (compression *Compression) Flag() string {
|
||||
switch *compression {
|
||||
case Bzip2:
|
||||
@@ -80,20 +127,78 @@ func (compression *Compression) Extension() string {
|
||||
// Tar creates an archive from the directory at `path`, and returns it as a
|
||||
// stream of bytes.
|
||||
func Tar(path string, compression Compression) (io.Reader, error) {
|
||||
return TarFilter(path, compression, nil)
|
||||
return TarFilter(path, &TarOptions{Recursive: true, Compression: compression})
|
||||
}
|
||||
|
||||
func escapeName(name string) string {
|
||||
escaped := make([]byte, 0)
|
||||
for i, c := range []byte(name) {
|
||||
if i == 0 && c == '/' {
|
||||
continue
|
||||
}
|
||||
// all printable chars except "-" which is 0x2d
|
||||
if (0x20 <= c && c <= 0x7E) && c != 0x2d {
|
||||
escaped = append(escaped, c)
|
||||
} else {
|
||||
escaped = append(escaped, fmt.Sprintf("\\%03o", c)...)
|
||||
}
|
||||
}
|
||||
return string(escaped)
|
||||
}
|
||||
|
||||
// Tar creates an archive from the directory at `path`, only including files whose relative
|
||||
// paths are included in `filter`. If `filter` is nil, then all files are included.
|
||||
func TarFilter(path string, compression Compression, filter []string) (io.Reader, error) {
|
||||
args := []string{"tar", "--numeric-owner", "-f", "-", "-C", path}
|
||||
if filter == nil {
|
||||
filter = []string{"."}
|
||||
func TarFilter(path string, options *TarOptions) (io.Reader, error) {
|
||||
args := []string{"tar", "--numeric-owner", "-f", "-", "-C", path, "-T", "-"}
|
||||
if options.Includes == nil {
|
||||
options.Includes = []string{"."}
|
||||
}
|
||||
for _, f := range filter {
|
||||
args = append(args, "-c"+compression.Flag(), f)
|
||||
args = append(args, "-c"+options.Compression.Flag())
|
||||
|
||||
for _, exclude := range options.Excludes {
|
||||
args = append(args, fmt.Sprintf("--exclude=%s", exclude))
|
||||
}
|
||||
return CmdStream(exec.Command(args[0], args[1:]...))
|
||||
|
||||
if !options.Recursive {
|
||||
args = append(args, "--no-recursion")
|
||||
}
|
||||
|
||||
files := ""
|
||||
for _, f := range options.Includes {
|
||||
files = files + escapeName(f) + "\n"
|
||||
}
|
||||
|
||||
tmpDir := ""
|
||||
|
||||
if options.CreateFiles != nil {
|
||||
var err error // Can't use := here or we override the outer tmpDir
|
||||
tmpDir, err = ioutil.TempDir("", "docker-tar")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
files = files + "-C" + tmpDir + "\n"
|
||||
for _, f := range options.CreateFiles {
|
||||
path := filepath.Join(tmpDir, f)
|
||||
err := os.MkdirAll(filepath.Dir(path), 0600)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if file, err := os.OpenFile(path, os.O_CREATE, 0600); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
file.Close()
|
||||
}
|
||||
files = files + escapeName(f) + "\n"
|
||||
}
|
||||
}
|
||||
|
||||
return CmdStream(exec.Command(args[0], args[1:]...), bytes.NewBufferString(files), func() {
|
||||
if tmpDir != "" {
|
||||
_ = os.RemoveAll(tmpDir)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
|
||||
@@ -101,7 +206,7 @@ func TarFilter(path string, compression Compression, filter []string) (io.Reader
|
||||
// The archive may be compressed with one of the following algorithms:
|
||||
// identity (uncompressed), gzip, bzip2, xz.
|
||||
// FIXME: specify behavior when target path exists vs. doesn't exist.
|
||||
func Untar(archive io.Reader, path string) error {
|
||||
func Untar(archive io.Reader, path string, options *TarOptions) error {
|
||||
if archive == nil {
|
||||
return fmt.Errorf("Empty archive")
|
||||
}
|
||||
@@ -123,8 +228,15 @@ func Untar(archive io.Reader, path string) error {
|
||||
compression := DetectCompression(buf)
|
||||
|
||||
utils.Debugf("Archive compression detected: %s", compression.Extension())
|
||||
args := []string{"--numeric-owner", "-f", "-", "-C", path, "-x" + compression.Flag()}
|
||||
|
||||
cmd := exec.Command("tar", "--numeric-owner", "-f", "-", "-C", path, "-x"+compression.Flag())
|
||||
if options != nil {
|
||||
for _, exclude := range options.Excludes {
|
||||
args = append(args, fmt.Sprintf("--exclude=%s", exclude))
|
||||
}
|
||||
}
|
||||
|
||||
cmd := exec.Command("tar", args...)
|
||||
cmd.Stdin = io.MultiReader(bytes.NewReader(buf), archive)
|
||||
// Hardcode locale environment for predictable outcome regardless of host configuration.
|
||||
// (see https://github.com/dotcloud/docker/issues/355)
|
||||
@@ -141,11 +253,11 @@ func Untar(archive io.Reader, path string) error {
|
||||
// TarUntar aborts and returns the error.
|
||||
func TarUntar(src string, filter []string, dst string) error {
|
||||
utils.Debugf("TarUntar(%s %s %s)", src, filter, dst)
|
||||
archive, err := TarFilter(src, Uncompressed, filter)
|
||||
archive, err := TarFilter(src, &TarOptions{Compression: Uncompressed, Includes: filter, Recursive: true})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return Untar(archive, dst)
|
||||
return Untar(archive, dst, nil)
|
||||
}
|
||||
|
||||
// UntarPath is a convenience function which looks for an archive
|
||||
@@ -153,7 +265,7 @@ func TarUntar(src string, filter []string, dst string) error {
|
||||
func UntarPath(src, dst string) error {
|
||||
if archive, err := os.Open(src); err != nil {
|
||||
return err
|
||||
} else if err := Untar(archive, dst); err != nil {
|
||||
} else if err := Untar(archive, dst, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@@ -187,7 +299,7 @@ func CopyWithTar(src, dst string) error {
|
||||
//
|
||||
// If `dst` ends with a trailing slash '/', the final destination path
|
||||
// will be `dst/base(src)`.
|
||||
func CopyFileWithTar(src, dst string) error {
|
||||
func CopyFileWithTar(src, dst string) (err error) {
|
||||
utils.Debugf("CopyFileWithTar(%s, %s)", src, dst)
|
||||
srcSt, err := os.Stat(src)
|
||||
if err != nil {
|
||||
@@ -204,37 +316,70 @@ func CopyFileWithTar(src, dst string) error {
|
||||
if err := os.MkdirAll(filepath.Dir(dst), 0700); err != nil && !os.IsExist(err) {
|
||||
return err
|
||||
}
|
||||
buf := new(bytes.Buffer)
|
||||
tw := tar.NewWriter(buf)
|
||||
hdr, err := tar.FileInfoHeader(srcSt, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hdr.Name = filepath.Base(dst)
|
||||
if err := tw.WriteHeader(hdr); err != nil {
|
||||
return err
|
||||
}
|
||||
srcF, err := os.Open(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.Copy(tw, srcF); err != nil {
|
||||
return err
|
||||
}
|
||||
tw.Close()
|
||||
return Untar(buf, filepath.Dir(dst))
|
||||
|
||||
r, w := io.Pipe()
|
||||
errC := utils.Go(func() error {
|
||||
defer w.Close()
|
||||
|
||||
srcF, err := os.Open(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer srcF.Close()
|
||||
|
||||
tw := tar.NewWriter(w)
|
||||
hdr, err := tar.FileInfoHeader(srcSt, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hdr.Name = filepath.Base(dst)
|
||||
if err := tw.WriteHeader(hdr); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.Copy(tw, srcF); err != nil {
|
||||
return err
|
||||
}
|
||||
tw.Close()
|
||||
return nil
|
||||
})
|
||||
defer func() {
|
||||
if er := <-errC; err != nil {
|
||||
err = er
|
||||
}
|
||||
}()
|
||||
return Untar(r, filepath.Dir(dst), nil)
|
||||
}
|
||||
|
||||
// CmdStream executes a command, and returns its stdout as a stream.
|
||||
// If the command fails to run or doesn't complete successfully, an error
|
||||
// will be returned, including anything written on stderr.
|
||||
func CmdStream(cmd *exec.Cmd) (io.Reader, error) {
|
||||
func CmdStream(cmd *exec.Cmd, input io.Reader, atEnd func()) (io.Reader, error) {
|
||||
if input != nil {
|
||||
stdin, err := cmd.StdinPipe()
|
||||
if err != nil {
|
||||
if atEnd != nil {
|
||||
atEnd()
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
// Write stdin if any
|
||||
go func() {
|
||||
io.Copy(stdin, input)
|
||||
stdin.Close()
|
||||
}()
|
||||
}
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
if atEnd != nil {
|
||||
atEnd()
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
stderr, err := cmd.StderrPipe()
|
||||
if err != nil {
|
||||
if atEnd != nil {
|
||||
atEnd()
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
pipeR, pipeW := io.Pipe()
|
||||
@@ -259,6 +404,9 @@ func CmdStream(cmd *exec.Cmd) (io.Reader, error) {
|
||||
} else {
|
||||
pipeW.Close()
|
||||
}
|
||||
if atEnd != nil {
|
||||
atEnd()
|
||||
}
|
||||
}()
|
||||
// Run the command and return the pipe
|
||||
if err := cmd.Start(); err != nil {
|
||||
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
|
||||
func TestCmdStreamLargeStderr(t *testing.T) {
|
||||
cmd := exec.Command("/bin/sh", "-c", "dd if=/dev/zero bs=1k count=1000 of=/dev/stderr; echo hello")
|
||||
out, err := CmdStream(cmd)
|
||||
out, err := CmdStream(cmd, nil, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to start command: %s", err)
|
||||
}
|
||||
@@ -35,7 +35,7 @@ func TestCmdStreamLargeStderr(t *testing.T) {
|
||||
|
||||
func TestCmdStreamBad(t *testing.T) {
|
||||
badCmd := exec.Command("/bin/sh", "-c", "echo hello; echo >&2 error couldn\\'t reverse the phase pulser; exit 1")
|
||||
out, err := CmdStream(badCmd)
|
||||
out, err := CmdStream(badCmd, nil, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to start command: %s", err)
|
||||
}
|
||||
@@ -50,7 +50,7 @@ func TestCmdStreamBad(t *testing.T) {
|
||||
|
||||
func TestCmdStreamGood(t *testing.T) {
|
||||
cmd := exec.Command("/bin/sh", "-c", "echo hello; exit 0")
|
||||
out, err := CmdStream(cmd)
|
||||
out, err := CmdStream(cmd, nil, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -83,7 +83,7 @@ func tarUntar(t *testing.T, origin string, compression Compression) error {
|
||||
return err
|
||||
}
|
||||
defer os.RemoveAll(tmp)
|
||||
if err := Untar(archive, tmp); err != nil {
|
||||
if err := Untar(archive, tmp, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := os.Stat(tmp); err != nil {
|
||||
|
||||
333
archive/changes.go
Normal file
333
archive/changes.go
Normal file
@@ -0,0 +1,333 @@
|
||||
package archive
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
type ChangeType int
|
||||
|
||||
const (
|
||||
ChangeModify = iota
|
||||
ChangeAdd
|
||||
ChangeDelete
|
||||
)
|
||||
|
||||
type Change struct {
|
||||
Path string
|
||||
Kind ChangeType
|
||||
}
|
||||
|
||||
func (change *Change) String() string {
|
||||
var kind string
|
||||
switch change.Kind {
|
||||
case ChangeModify:
|
||||
kind = "C"
|
||||
case ChangeAdd:
|
||||
kind = "A"
|
||||
case ChangeDelete:
|
||||
kind = "D"
|
||||
}
|
||||
return fmt.Sprintf("%s %s", kind, change.Path)
|
||||
}
|
||||
|
||||
// Gnu tar and the go tar writer don't have sub-second mtime
|
||||
// precision, which is problematic when we apply changes via tar
|
||||
// files, we handle this by comparing for exact times, *or* same
|
||||
// second count and either a or b having exactly 0 nanoseconds
|
||||
func sameFsTime(a, b time.Time) bool {
|
||||
return a == b ||
|
||||
(a.Unix() == b.Unix() &&
|
||||
(a.Nanosecond() == 0 || b.Nanosecond() == 0))
|
||||
}
|
||||
|
||||
func sameFsTimeSpec(a, b syscall.Timespec) bool {
|
||||
return a.Sec == b.Sec &&
|
||||
(a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0)
|
||||
}
|
||||
|
||||
func Changes(layers []string, rw string) ([]Change, error) {
|
||||
var changes []Change
|
||||
err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Rebase path
|
||||
path, err = filepath.Rel(rw, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
path = filepath.Join("/", path)
|
||||
|
||||
// Skip root
|
||||
if path == "/" {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Skip AUFS metadata
|
||||
if matched, err := filepath.Match("/.wh..wh.*", path); err != nil || matched {
|
||||
return err
|
||||
}
|
||||
|
||||
change := Change{
|
||||
Path: path,
|
||||
}
|
||||
|
||||
// Find out what kind of modification happened
|
||||
file := filepath.Base(path)
|
||||
// If there is a whiteout, then the file was removed
|
||||
if strings.HasPrefix(file, ".wh.") {
|
||||
originalFile := file[len(".wh."):]
|
||||
change.Path = filepath.Join(filepath.Dir(path), originalFile)
|
||||
change.Kind = ChangeDelete
|
||||
} else {
|
||||
// Otherwise, the file was added
|
||||
change.Kind = ChangeAdd
|
||||
|
||||
// ...Unless it already existed in a top layer, in which case, it's a modification
|
||||
for _, layer := range layers {
|
||||
stat, err := os.Stat(filepath.Join(layer, path))
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
if err == nil {
|
||||
// The file existed in the top layer, so that's a modification
|
||||
|
||||
// However, if it's a directory, maybe it wasn't actually modified.
|
||||
// If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar
|
||||
if stat.IsDir() && f.IsDir() {
|
||||
if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) {
|
||||
// Both directories are the same, don't record the change
|
||||
return nil
|
||||
}
|
||||
}
|
||||
change.Kind = ChangeModify
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Record change
|
||||
changes = append(changes, change)
|
||||
return nil
|
||||
})
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
return changes, nil
|
||||
}
|
||||
|
||||
type FileInfo struct {
|
||||
parent *FileInfo
|
||||
name string
|
||||
stat syscall.Stat_t
|
||||
children map[string]*FileInfo
|
||||
}
|
||||
|
||||
func (root *FileInfo) LookUp(path string) *FileInfo {
|
||||
parent := root
|
||||
if path == "/" {
|
||||
return root
|
||||
}
|
||||
|
||||
pathElements := strings.Split(path, "/")
|
||||
for _, elem := range pathElements {
|
||||
if elem != "" {
|
||||
child := parent.children[elem]
|
||||
if child == nil {
|
||||
return nil
|
||||
}
|
||||
parent = child
|
||||
}
|
||||
}
|
||||
return parent
|
||||
}
|
||||
|
||||
func (info *FileInfo) path() string {
|
||||
if info.parent == nil {
|
||||
return "/"
|
||||
}
|
||||
return filepath.Join(info.parent.path(), info.name)
|
||||
}
|
||||
|
||||
func (info *FileInfo) isDir() bool {
|
||||
return info.parent == nil || info.stat.Mode&syscall.S_IFDIR == syscall.S_IFDIR
|
||||
}
|
||||
|
||||
func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
|
||||
if oldInfo == nil {
|
||||
// add
|
||||
change := Change{
|
||||
Path: info.path(),
|
||||
Kind: ChangeAdd,
|
||||
}
|
||||
*changes = append(*changes, change)
|
||||
}
|
||||
|
||||
// We make a copy so we can modify it to detect additions
|
||||
// also, we only recurse on the old dir if the new info is a directory
|
||||
// otherwise any previous delete/change is considered recursive
|
||||
oldChildren := make(map[string]*FileInfo)
|
||||
if oldInfo != nil && info.isDir() {
|
||||
for k, v := range oldInfo.children {
|
||||
oldChildren[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
for name, newChild := range info.children {
|
||||
oldChild, _ := oldChildren[name]
|
||||
if oldChild != nil {
|
||||
// change?
|
||||
oldStat := &oldChild.stat
|
||||
newStat := &newChild.stat
|
||||
// Note: We can't compare inode or ctime or blocksize here, because these change
|
||||
// when copying a file into a container. However, that is not generally a problem
|
||||
// because any content change will change mtime, and any status change should
|
||||
// be visible when actually comparing the stat fields. The only time this
|
||||
// breaks down is if some code intentionally hides a change by setting
|
||||
// back mtime
|
||||
if oldStat.Mode != newStat.Mode ||
|
||||
oldStat.Uid != newStat.Uid ||
|
||||
oldStat.Gid != newStat.Gid ||
|
||||
oldStat.Rdev != newStat.Rdev ||
|
||||
// Don't look at size for dirs, its not a good measure of change
|
||||
(oldStat.Size != newStat.Size && oldStat.Mode&syscall.S_IFDIR != syscall.S_IFDIR) ||
|
||||
!sameFsTimeSpec(getLastModification(oldStat), getLastModification(newStat)) {
|
||||
change := Change{
|
||||
Path: newChild.path(),
|
||||
Kind: ChangeModify,
|
||||
}
|
||||
*changes = append(*changes, change)
|
||||
}
|
||||
|
||||
// Remove from copy so we can detect deletions
|
||||
delete(oldChildren, name)
|
||||
}
|
||||
|
||||
newChild.addChanges(oldChild, changes)
|
||||
}
|
||||
for _, oldChild := range oldChildren {
|
||||
// delete
|
||||
change := Change{
|
||||
Path: oldChild.path(),
|
||||
Kind: ChangeDelete,
|
||||
}
|
||||
*changes = append(*changes, change)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (info *FileInfo) Changes(oldInfo *FileInfo) []Change {
|
||||
var changes []Change
|
||||
|
||||
info.addChanges(oldInfo, &changes)
|
||||
|
||||
return changes
|
||||
}
|
||||
|
||||
func newRootFileInfo() *FileInfo {
|
||||
root := &FileInfo{
|
||||
name: "/",
|
||||
children: make(map[string]*FileInfo),
|
||||
}
|
||||
return root
|
||||
}
|
||||
|
||||
func collectFileInfo(sourceDir string) (*FileInfo, error) {
|
||||
root := newRootFileInfo()
|
||||
|
||||
err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Rebase path
|
||||
relPath, err := filepath.Rel(sourceDir, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
relPath = filepath.Join("/", relPath)
|
||||
|
||||
if relPath == "/" {
|
||||
return nil
|
||||
}
|
||||
|
||||
parent := root.LookUp(filepath.Dir(relPath))
|
||||
if parent == nil {
|
||||
return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath)
|
||||
}
|
||||
|
||||
info := &FileInfo{
|
||||
name: filepath.Base(relPath),
|
||||
children: make(map[string]*FileInfo),
|
||||
parent: parent,
|
||||
}
|
||||
|
||||
if err := syscall.Lstat(path, &info.stat); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
parent.children[info.name] = info
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return root, nil
|
||||
}
|
||||
|
||||
// Compare two directories and generate an array of Change objects describing the changes
|
||||
func ChangesDirs(newDir, oldDir string) ([]Change, error) {
|
||||
oldRoot, err := collectFileInfo(oldDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
newRoot, err := collectFileInfo(newDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return newRoot.Changes(oldRoot), nil
|
||||
}
|
||||
|
||||
func ChangesSize(newDir string, changes []Change) int64 {
|
||||
var size int64
|
||||
for _, change := range changes {
|
||||
if change.Kind == ChangeModify || change.Kind == ChangeAdd {
|
||||
file := filepath.Join(newDir, change.Path)
|
||||
fileInfo, _ := os.Lstat(file)
|
||||
if fileInfo != nil && !fileInfo.IsDir() {
|
||||
size += fileInfo.Size()
|
||||
}
|
||||
}
|
||||
}
|
||||
return size
|
||||
}
|
||||
|
||||
func ExportChanges(dir string, changes []Change) (Archive, error) {
|
||||
files := make([]string, 0)
|
||||
deletions := make([]string, 0)
|
||||
for _, change := range changes {
|
||||
if change.Kind == ChangeModify || change.Kind == ChangeAdd {
|
||||
files = append(files, change.Path)
|
||||
}
|
||||
if change.Kind == ChangeDelete {
|
||||
base := filepath.Base(change.Path)
|
||||
dir := filepath.Dir(change.Path)
|
||||
deletions = append(deletions, filepath.Join(dir, ".wh."+base))
|
||||
}
|
||||
}
|
||||
// FIXME: Why do we create whiteout files inside Tar code ?
|
||||
return TarFilter(dir, &TarOptions{
|
||||
Compression: Uncompressed,
|
||||
Includes: files,
|
||||
Recursive: false,
|
||||
CreateFiles: deletions,
|
||||
})
|
||||
}
|
||||
301
archive/changes_test.go
Normal file
301
archive/changes_test.go
Normal file
@@ -0,0 +1,301 @@
|
||||
package archive
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func max(x, y int) int {
|
||||
if x >= y {
|
||||
return x
|
||||
}
|
||||
return y
|
||||
}
|
||||
|
||||
func copyDir(src, dst string) error {
|
||||
cmd := exec.Command("cp", "-a", src, dst)
|
||||
if err := cmd.Run(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Helper to sort []Change by path
|
||||
type byPath struct{ changes []Change }
|
||||
|
||||
func (b byPath) Less(i, j int) bool { return b.changes[i].Path < b.changes[j].Path }
|
||||
func (b byPath) Len() int { return len(b.changes) }
|
||||
func (b byPath) Swap(i, j int) { b.changes[i], b.changes[j] = b.changes[j], b.changes[i] }
|
||||
|
||||
type FileType uint32
|
||||
|
||||
const (
|
||||
Regular FileType = iota
|
||||
Dir
|
||||
Symlink
|
||||
)
|
||||
|
||||
type FileData struct {
|
||||
filetype FileType
|
||||
path string
|
||||
contents string
|
||||
permissions os.FileMode
|
||||
}
|
||||
|
||||
func createSampleDir(t *testing.T, root string) {
|
||||
files := []FileData{
|
||||
{Regular, "file1", "file1\n", 0600},
|
||||
{Regular, "file2", "file2\n", 0666},
|
||||
{Regular, "file3", "file3\n", 0404},
|
||||
{Regular, "file4", "file4\n", 0600},
|
||||
{Regular, "file5", "file5\n", 0600},
|
||||
{Regular, "file6", "file6\n", 0600},
|
||||
{Regular, "file7", "file7\n", 0600},
|
||||
{Dir, "dir1", "", 0740},
|
||||
{Regular, "dir1/file1-1", "file1-1\n", 01444},
|
||||
{Regular, "dir1/file1-2", "file1-2\n", 0666},
|
||||
{Dir, "dir2", "", 0700},
|
||||
{Regular, "dir2/file2-1", "file2-1\n", 0666},
|
||||
{Regular, "dir2/file2-2", "file2-2\n", 0666},
|
||||
{Dir, "dir3", "", 0700},
|
||||
{Regular, "dir3/file3-1", "file3-1\n", 0666},
|
||||
{Regular, "dir3/file3-2", "file3-2\n", 0666},
|
||||
{Dir, "dir4", "", 0700},
|
||||
{Regular, "dir4/file3-1", "file4-1\n", 0666},
|
||||
{Regular, "dir4/file3-2", "file4-2\n", 0666},
|
||||
{Symlink, "symlink1", "target1", 0666},
|
||||
{Symlink, "symlink2", "target2", 0666},
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
for _, info := range files {
|
||||
p := path.Join(root, info.path)
|
||||
if info.filetype == Dir {
|
||||
if err := os.MkdirAll(p, info.permissions); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
} else if info.filetype == Regular {
|
||||
if err := ioutil.WriteFile(p, []byte(info.contents), info.permissions); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
} else if info.filetype == Symlink {
|
||||
if err := os.Symlink(info.contents, p); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
if info.filetype != Symlink {
|
||||
// Set a consistent ctime, atime for all files and dirs
|
||||
if err := os.Chtimes(p, now, now); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Create an directory, copy it, make sure we report no changes between the two
|
||||
func TestChangesDirsEmpty(t *testing.T) {
|
||||
src, err := ioutil.TempDir("", "docker-changes-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
createSampleDir(t, src)
|
||||
dst := src + "-copy"
|
||||
if err := copyDir(src, dst); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
changes, err := ChangesDirs(dst, src)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(changes) != 0 {
|
||||
t.Fatalf("Reported changes for identical dirs: %v", changes)
|
||||
}
|
||||
os.RemoveAll(src)
|
||||
os.RemoveAll(dst)
|
||||
}
|
||||
|
||||
func mutateSampleDir(t *testing.T, root string) {
|
||||
// Remove a regular file
|
||||
if err := os.RemoveAll(path.Join(root, "file1")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Remove a directory
|
||||
if err := os.RemoveAll(path.Join(root, "dir1")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Remove a symlink
|
||||
if err := os.RemoveAll(path.Join(root, "symlink1")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Rewrite a file
|
||||
if err := ioutil.WriteFile(path.Join(root, "file2"), []byte("fileN\n"), 0777); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Replace a file
|
||||
if err := os.RemoveAll(path.Join(root, "file3")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := ioutil.WriteFile(path.Join(root, "file3"), []byte("fileM\n"), 0404); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Touch file
|
||||
if err := os.Chtimes(path.Join(root, "file4"), time.Now(), time.Now()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Replace file with dir
|
||||
if err := os.RemoveAll(path.Join(root, "file5")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := os.MkdirAll(path.Join(root, "file5"), 0666); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Create new file
|
||||
if err := ioutil.WriteFile(path.Join(root, "filenew"), []byte("filenew\n"), 0777); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Create new dir
|
||||
if err := os.MkdirAll(path.Join(root, "dirnew"), 0766); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Create a new symlink
|
||||
if err := os.Symlink("targetnew", path.Join(root, "symlinknew")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Change a symlink
|
||||
if err := os.RemoveAll(path.Join(root, "symlink2")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := os.Symlink("target2change", path.Join(root, "symlink2")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Replace dir with file
|
||||
if err := os.RemoveAll(path.Join(root, "dir2")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := ioutil.WriteFile(path.Join(root, "dir2"), []byte("dir2\n"), 0777); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Touch dir
|
||||
if err := os.Chtimes(path.Join(root, "dir3"), time.Now(), time.Now()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestChangesDirsMutated(t *testing.T) {
|
||||
src, err := ioutil.TempDir("", "docker-changes-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
createSampleDir(t, src)
|
||||
dst := src + "-copy"
|
||||
if err := copyDir(src, dst); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(src)
|
||||
defer os.RemoveAll(dst)
|
||||
|
||||
mutateSampleDir(t, dst)
|
||||
|
||||
changes, err := ChangesDirs(dst, src)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
sort.Sort(byPath{changes})
|
||||
|
||||
expectedChanges := []Change{
|
||||
{"/dir1", ChangeDelete},
|
||||
{"/dir2", ChangeModify},
|
||||
{"/dir3", ChangeModify},
|
||||
{"/dirnew", ChangeAdd},
|
||||
{"/file1", ChangeDelete},
|
||||
{"/file2", ChangeModify},
|
||||
{"/file3", ChangeModify},
|
||||
{"/file4", ChangeModify},
|
||||
{"/file5", ChangeModify},
|
||||
{"/filenew", ChangeAdd},
|
||||
{"/symlink1", ChangeDelete},
|
||||
{"/symlink2", ChangeModify},
|
||||
{"/symlinknew", ChangeAdd},
|
||||
}
|
||||
|
||||
for i := 0; i < max(len(changes), len(expectedChanges)); i++ {
|
||||
if i >= len(expectedChanges) {
|
||||
t.Fatalf("unexpected change %s\n", changes[i].String())
|
||||
}
|
||||
if i >= len(changes) {
|
||||
t.Fatalf("no change for expected change %s\n", expectedChanges[i].String())
|
||||
}
|
||||
if changes[i].Path == expectedChanges[i].Path {
|
||||
if changes[i] != expectedChanges[i] {
|
||||
t.Fatalf("Wrong change for %s, expected %s, got %s\n", changes[i].Path, changes[i].String(), expectedChanges[i].String())
|
||||
}
|
||||
} else if changes[i].Path < expectedChanges[i].Path {
|
||||
t.Fatalf("unexpected change %s\n", changes[i].String())
|
||||
} else {
|
||||
t.Fatalf("no change for expected change %s != %s\n", expectedChanges[i].String(), changes[i].String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestApplyLayer(t *testing.T) {
|
||||
src, err := ioutil.TempDir("", "docker-changes-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
createSampleDir(t, src)
|
||||
defer os.RemoveAll(src)
|
||||
dst := src + "-copy"
|
||||
if err := copyDir(src, dst); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
mutateSampleDir(t, dst)
|
||||
defer os.RemoveAll(dst)
|
||||
|
||||
changes, err := ChangesDirs(dst, src)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
layer, err := ExportChanges(dst, changes)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
layerCopy, err := NewTempArchive(layer, "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := ApplyLayer(src, layerCopy); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
changes2, err := ChangesDirs(src, dst)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(changes2) != 0 {
|
||||
t.Fatalf("Unexpected differences after reapplying mutation: %v", changes2)
|
||||
}
|
||||
}
|
||||
194
archive/diff.go
Normal file
194
archive/diff.go
Normal file
@@ -0,0 +1,194 @@
|
||||
package archive
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes.
|
||||
// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major,
|
||||
// then the top 12 bits of the minor
|
||||
func mkdev(major int64, minor int64) uint32 {
|
||||
return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff))
|
||||
}
|
||||
func timeToTimespec(time time.Time) (ts syscall.Timespec) {
|
||||
if time.IsZero() {
|
||||
// Return UTIME_OMIT special value
|
||||
ts.Sec = 0
|
||||
ts.Nsec = ((1 << 30) - 2)
|
||||
return
|
||||
}
|
||||
return syscall.NsecToTimespec(time.UnixNano())
|
||||
}
|
||||
|
||||
// ApplyLayer parses a diff in the standard layer format from `layer`, and
|
||||
// applies it to the directory `dest`.
|
||||
func ApplyLayer(dest string, layer Archive) error {
|
||||
// We need to be able to set any perms
|
||||
oldmask := syscall.Umask(0)
|
||||
defer syscall.Umask(oldmask)
|
||||
|
||||
layer, err := DecompressStream(layer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tr := tar.NewReader(layer)
|
||||
|
||||
var dirs []*tar.Header
|
||||
|
||||
// Iterate through the files in the archive.
|
||||
for {
|
||||
hdr, err := tr.Next()
|
||||
if err == io.EOF {
|
||||
// end of tar archive
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Normalize name, for safety and for a simple is-root check
|
||||
hdr.Name = filepath.Clean(hdr.Name)
|
||||
|
||||
if !strings.HasSuffix(hdr.Name, "/") {
|
||||
// Not the root directory, ensure that the parent directory exists.
|
||||
// This happened in some tests where an image had a tarfile without any
|
||||
// parent directories.
|
||||
parent := filepath.Dir(hdr.Name)
|
||||
parentPath := filepath.Join(dest, parent)
|
||||
if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
|
||||
err = os.MkdirAll(parentPath, 600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Skip AUFS metadata dirs
|
||||
if strings.HasPrefix(hdr.Name, ".wh..wh.") {
|
||||
continue
|
||||
}
|
||||
|
||||
path := filepath.Join(dest, hdr.Name)
|
||||
base := filepath.Base(path)
|
||||
if strings.HasPrefix(base, ".wh.") {
|
||||
originalBase := base[len(".wh."):]
|
||||
originalPath := filepath.Join(filepath.Dir(path), originalBase)
|
||||
if err := os.RemoveAll(originalPath); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// If path exits we almost always just want to remove and replace it.
|
||||
// The only exception is when it is a directory *and* the file from
|
||||
// the layer is also a directory. Then we want to merge them (i.e.
|
||||
// just apply the metadata from the layer).
|
||||
hasDir := false
|
||||
if fi, err := os.Lstat(path); err == nil {
|
||||
if fi.IsDir() && hdr.Typeflag == tar.TypeDir {
|
||||
hasDir = true
|
||||
} else {
|
||||
if err := os.RemoveAll(path); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch hdr.Typeflag {
|
||||
case tar.TypeDir:
|
||||
if !hasDir {
|
||||
err = os.Mkdir(path, os.FileMode(hdr.Mode))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
dirs = append(dirs, hdr)
|
||||
|
||||
case tar.TypeReg, tar.TypeRegA:
|
||||
// Source is regular file
|
||||
file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, os.FileMode(hdr.Mode))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.Copy(file, tr); err != nil {
|
||||
file.Close()
|
||||
return err
|
||||
}
|
||||
file.Close()
|
||||
|
||||
case tar.TypeBlock, tar.TypeChar, tar.TypeFifo:
|
||||
mode := uint32(hdr.Mode & 07777)
|
||||
switch hdr.Typeflag {
|
||||
case tar.TypeBlock:
|
||||
mode |= syscall.S_IFBLK
|
||||
case tar.TypeChar:
|
||||
mode |= syscall.S_IFCHR
|
||||
case tar.TypeFifo:
|
||||
mode |= syscall.S_IFIFO
|
||||
}
|
||||
|
||||
if err := syscall.Mknod(path, mode, int(mkdev(hdr.Devmajor, hdr.Devminor))); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case tar.TypeLink:
|
||||
if err := os.Link(filepath.Join(dest, hdr.Linkname), path); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case tar.TypeSymlink:
|
||||
if err := os.Symlink(hdr.Linkname, path); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
default:
|
||||
utils.Debugf("unhandled type %d\n", hdr.Typeflag)
|
||||
}
|
||||
|
||||
if err = syscall.Lchown(path, hdr.Uid, hdr.Gid); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// There is no LChmod, so ignore mode for symlink. Also, this
|
||||
// must happen after chown, as that can modify the file mode
|
||||
if hdr.Typeflag != tar.TypeSymlink {
|
||||
err = syscall.Chmod(path, uint32(hdr.Mode&07777))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Directories must be handled at the end to avoid further
|
||||
// file creation in them to modify the mtime
|
||||
if hdr.Typeflag != tar.TypeDir {
|
||||
ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)}
|
||||
// syscall.UtimesNano doesn't support a NOFOLLOW flag atm, and
|
||||
if hdr.Typeflag != tar.TypeSymlink {
|
||||
if err := syscall.UtimesNano(path, ts); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := LUtimesNano(path, ts); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, hdr := range dirs {
|
||||
path := filepath.Join(dest, hdr.Name)
|
||||
ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)}
|
||||
if err := syscall.UtimesNano(path, ts); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
15
archive/stat_darwin.go
Normal file
15
archive/stat_darwin.go
Normal file
@@ -0,0 +1,15 @@
|
||||
package archive
|
||||
|
||||
import "syscall"
|
||||
|
||||
func getLastAccess(stat *syscall.Stat_t) syscall.Timespec {
|
||||
return stat.Atimespec
|
||||
}
|
||||
|
||||
func getLastModification(stat *syscall.Stat_t) syscall.Timespec {
|
||||
return stat.Mtimespec
|
||||
}
|
||||
|
||||
func LUtimesNano(path string, ts []syscall.Timespec) error {
|
||||
return nil
|
||||
}
|
||||
32
archive/stat_linux.go
Normal file
32
archive/stat_linux.go
Normal file
@@ -0,0 +1,32 @@
|
||||
package archive
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func getLastAccess(stat *syscall.Stat_t) syscall.Timespec {
|
||||
return stat.Atim
|
||||
}
|
||||
|
||||
func getLastModification(stat *syscall.Stat_t) syscall.Timespec {
|
||||
return stat.Mtim
|
||||
}
|
||||
|
||||
func LUtimesNano(path string, ts []syscall.Timespec) error {
|
||||
// These are not currently available in syscall
|
||||
AT_FDCWD := -100
|
||||
AT_SYMLINK_NOFOLLOW := 0x100
|
||||
|
||||
var _path *byte
|
||||
_path, err := syscall.BytePtrFromString(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, _, err := syscall.Syscall6(syscall.SYS_UTIMENSAT, uintptr(AT_FDCWD), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), uintptr(AT_SYMLINK_NOFOLLOW), 0, 0); err != 0 && err != syscall.ENOSYS {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
49
auth/auth.go
49
auth/auth.go
@@ -63,7 +63,7 @@ func decodeAuth(authStr string) (string, string, error) {
|
||||
if n > decLen {
|
||||
return "", "", fmt.Errorf("Something went wrong decoding auth config")
|
||||
}
|
||||
arr := strings.Split(string(decoded), ":")
|
||||
arr := strings.SplitN(string(decoded), ":", 2)
|
||||
if len(arr) != 2 {
|
||||
return "", "", fmt.Errorf("Invalid auth configuration file")
|
||||
}
|
||||
@@ -163,7 +163,7 @@ func Login(authConfig *AuthConfig, factory *utils.HTTPRequestFactory) (string, e
|
||||
|
||||
loginAgainstOfficialIndex := serverAddress == IndexServerAddress()
|
||||
|
||||
// to avoid sending the server address to the server it should be removed before marshalled
|
||||
// to avoid sending the server address to the server it should be removed before being marshalled
|
||||
authCopy := *authConfig
|
||||
authCopy.ServerAddress = ""
|
||||
|
||||
@@ -192,13 +192,6 @@ func Login(authConfig *AuthConfig, factory *utils.HTTPRequestFactory) (string, e
|
||||
} else {
|
||||
status = "Account created. Please see the documentation of the registry " + serverAddress + " for instructions how to activate it."
|
||||
}
|
||||
} else if reqStatusCode == 403 {
|
||||
if loginAgainstOfficialIndex {
|
||||
return "", fmt.Errorf("Login: Your account hasn't been activated. " +
|
||||
"Please check your e-mail for a confirmation link.")
|
||||
}
|
||||
return "", fmt.Errorf("Login: Your account hasn't been activated. " +
|
||||
"Please see the documentation of the registry " + serverAddress + " for instructions how to activate it.")
|
||||
} else if reqStatusCode == 400 {
|
||||
if string(reqBody) == "\"Username or email already exists\"" {
|
||||
req, err := factory.NewRequest("GET", serverAddress+"users/", nil)
|
||||
@@ -216,13 +209,39 @@ func Login(authConfig *AuthConfig, factory *utils.HTTPRequestFactory) (string, e
|
||||
status = "Login Succeeded"
|
||||
} else if resp.StatusCode == 401 {
|
||||
return "", fmt.Errorf("Wrong login/password, please try again")
|
||||
} else if resp.StatusCode == 403 {
|
||||
if loginAgainstOfficialIndex {
|
||||
return "", fmt.Errorf("Login: Account is not Active. Please check your e-mail for a confirmation link.")
|
||||
}
|
||||
return "", fmt.Errorf("Login: Account is not Active. Please see the documentation of the registry %s for instructions how to activate it.", serverAddress)
|
||||
} else {
|
||||
return "", fmt.Errorf("Login: %s (Code: %d; Headers: %s)", body,
|
||||
resp.StatusCode, resp.Header)
|
||||
return "", fmt.Errorf("Login: %s (Code: %d; Headers: %s)", body, resp.StatusCode, resp.Header)
|
||||
}
|
||||
} else {
|
||||
return "", fmt.Errorf("Registration: %s", reqBody)
|
||||
}
|
||||
} else if reqStatusCode == 401 {
|
||||
// This case would happen with private registries where /v1/users is
|
||||
// protected, so people can use `docker login` as an auth check.
|
||||
req, err := factory.NewRequest("GET", serverAddress+"users/", nil)
|
||||
req.SetBasicAuth(authConfig.Username, authConfig.Password)
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if resp.StatusCode == 200 {
|
||||
status = "Login Succeeded"
|
||||
} else if resp.StatusCode == 401 {
|
||||
return "", fmt.Errorf("Wrong login/password, please try again")
|
||||
} else {
|
||||
return "", fmt.Errorf("Login: %s (Code: %d; Headers: %s)", body,
|
||||
resp.StatusCode, resp.Header)
|
||||
}
|
||||
} else {
|
||||
return "", fmt.Errorf("Unexpected status code [%d] : %s", reqStatusCode, reqBody)
|
||||
}
|
||||
@@ -235,11 +254,11 @@ func (config *ConfigFile) ResolveAuthConfig(registry string) AuthConfig {
|
||||
// default to the index server
|
||||
return config.Configs[IndexServerAddress()]
|
||||
}
|
||||
// if its not the index server there are three cases:
|
||||
// if it's not the index server there are three cases:
|
||||
//
|
||||
// 1. this is a full config url -> it should be used as is
|
||||
// 2. it could be a full url, but with the wrong protocol
|
||||
// 3. it can be the hostname optionally with a port
|
||||
// 1. a full config url -> it should be used as is
|
||||
// 2. a full url, but with the wrong protocol
|
||||
// 3. a hostname, with an optional port
|
||||
//
|
||||
// as there is only one auth entry which is fully qualified we need to start
|
||||
// parsing and matching
|
||||
|
||||
352
buildfile.go
352
buildfile.go
@@ -1,20 +1,30 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/archive"
|
||||
"github.com/dotcloud/docker/auth"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrDockerfileEmpty = errors.New("Dockerfile cannot be empty")
|
||||
)
|
||||
|
||||
type BuildFile interface {
|
||||
Build(io.Reader) (string, error)
|
||||
CmdFrom(string) error
|
||||
@@ -25,25 +35,35 @@ type buildFile struct {
|
||||
runtime *Runtime
|
||||
srv *Server
|
||||
|
||||
image string
|
||||
maintainer string
|
||||
config *Config
|
||||
context string
|
||||
image string
|
||||
maintainer string
|
||||
config *Config
|
||||
|
||||
contextPath string
|
||||
context *utils.TarSum
|
||||
|
||||
verbose bool
|
||||
utilizeCache bool
|
||||
rm bool
|
||||
|
||||
authConfig *auth.AuthConfig
|
||||
|
||||
tmpContainers map[string]struct{}
|
||||
tmpImages map[string]struct{}
|
||||
|
||||
out io.Writer
|
||||
outStream io.Writer
|
||||
errStream io.Writer
|
||||
|
||||
// Deprecated, original writer used for ImagePull. To be removed.
|
||||
outOld io.Writer
|
||||
sf *utils.StreamFormatter
|
||||
}
|
||||
|
||||
func (b *buildFile) clearTmp(containers map[string]struct{}) {
|
||||
for c := range containers {
|
||||
tmp := b.runtime.Get(c)
|
||||
b.runtime.Destroy(tmp)
|
||||
fmt.Fprintf(b.out, "Removing intermediate container %s\n", utils.TruncateID(c))
|
||||
fmt.Fprintf(b.outStream, "Removing intermediate container %s\n", utils.TruncateID(c))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -52,7 +72,7 @@ func (b *buildFile) CmdFrom(name string) error {
|
||||
if err != nil {
|
||||
if b.runtime.graph.IsNotExist(err) {
|
||||
remote, tag := utils.ParseRepositoryTag(name)
|
||||
if err := b.srv.ImagePull(remote, tag, b.out, utils.NewStreamFormatter(false), nil, nil, true); err != nil {
|
||||
if err := b.srv.ImagePull(remote, tag, b.outOld, b.sf, b.authConfig, nil, true); err != nil {
|
||||
return err
|
||||
}
|
||||
image, err = b.runtime.repositories.LookupImage(name)
|
||||
@@ -79,6 +99,27 @@ func (b *buildFile) CmdMaintainer(name string) error {
|
||||
return b.commit("", b.config.Cmd, fmt.Sprintf("MAINTAINER %s", name))
|
||||
}
|
||||
|
||||
// probeCache checks to see if image-caching is enabled (`b.utilizeCache`)
|
||||
// and if so attempts to look up the current `b.image` and `b.config` pair
|
||||
// in the current server `b.srv`. If an image is found, probeCache returns
|
||||
// `(true, nil)`. If no image is found, it returns `(false, nil)`. If there
|
||||
// is any error, it returns `(false, err)`.
|
||||
func (b *buildFile) probeCache() (bool, error) {
|
||||
if b.utilizeCache {
|
||||
if cache, err := b.srv.ImageGetCached(b.image, b.config); err != nil {
|
||||
return false, err
|
||||
} else if cache != nil {
|
||||
fmt.Fprintf(b.outStream, " ---> Using cache\n")
|
||||
utils.Debugf("[BUILDER] Use cached version")
|
||||
b.image = cache.ID
|
||||
return true, nil
|
||||
} else {
|
||||
utils.Debugf("[BUILDER] Cache miss")
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (b *buildFile) CmdRun(args string) error {
|
||||
if b.image == "" {
|
||||
return fmt.Errorf("Please provide a source image with `from` prior to run")
|
||||
@@ -96,17 +137,12 @@ func (b *buildFile) CmdRun(args string) error {
|
||||
|
||||
utils.Debugf("Command to be executed: %v", b.config.Cmd)
|
||||
|
||||
if b.utilizeCache {
|
||||
if cache, err := b.srv.ImageGetCached(b.image, b.config); err != nil {
|
||||
return err
|
||||
} else if cache != nil {
|
||||
fmt.Fprintf(b.out, " ---> Using cache\n")
|
||||
utils.Debugf("[BUILDER] Use cached version")
|
||||
b.image = cache.ID
|
||||
return nil
|
||||
} else {
|
||||
utils.Debugf("[BUILDER] Cache miss")
|
||||
}
|
||||
hit, err := b.probeCache()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if hit {
|
||||
return nil
|
||||
}
|
||||
|
||||
cid, err := b.run()
|
||||
@@ -177,16 +213,30 @@ func (b *buildFile) CmdEnv(args string) error {
|
||||
return b.commit("", b.config.Cmd, fmt.Sprintf("ENV %s", replacedVar))
|
||||
}
|
||||
|
||||
func (b *buildFile) CmdCmd(args string) error {
|
||||
func (b *buildFile) buildCmdFromJson(args string) []string {
|
||||
var cmd []string
|
||||
if err := json.Unmarshal([]byte(args), &cmd); err != nil {
|
||||
utils.Debugf("Error unmarshalling: %s, setting cmd to /bin/sh -c", err)
|
||||
utils.Debugf("Error unmarshalling: %s, setting to /bin/sh -c", err)
|
||||
cmd = []string{"/bin/sh", "-c", args}
|
||||
}
|
||||
if err := b.commit("", cmd, fmt.Sprintf("CMD %v", cmd)); err != nil {
|
||||
return cmd
|
||||
}
|
||||
|
||||
func (b *buildFile) CmdCmd(args string) error {
|
||||
cmd := b.buildCmdFromJson(args)
|
||||
b.config.Cmd = cmd
|
||||
if err := b.commit("", b.config.Cmd, fmt.Sprintf("CMD %v", cmd)); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *buildFile) CmdEntrypoint(args string) error {
|
||||
entrypoint := b.buildCmdFromJson(args)
|
||||
b.config.Entrypoint = entrypoint
|
||||
if err := b.commit("", b.config.Cmd, fmt.Sprintf("ENTRYPOINT %v", entrypoint)); err != nil {
|
||||
return err
|
||||
}
|
||||
b.config.Cmd = cmd
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -209,23 +259,6 @@ func (b *buildFile) CmdCopy(args string) error {
|
||||
return fmt.Errorf("COPY has been deprecated. Please use ADD instead")
|
||||
}
|
||||
|
||||
func (b *buildFile) CmdEntrypoint(args string) error {
|
||||
if args == "" {
|
||||
return fmt.Errorf("Entrypoint cannot be empty")
|
||||
}
|
||||
|
||||
var entrypoint []string
|
||||
if err := json.Unmarshal([]byte(args), &entrypoint); err != nil {
|
||||
b.config.Entrypoint = []string{"/bin/sh", "-c", args}
|
||||
} else {
|
||||
b.config.Entrypoint = entrypoint
|
||||
}
|
||||
if err := b.commit("", b.config.Cmd, fmt.Sprintf("ENTRYPOINT %s", args)); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *buildFile) CmdWorkdir(workdir string) error {
|
||||
b.config.WorkingDir = workdir
|
||||
return b.commit("", b.config.Cmd, fmt.Sprintf("WORKDIR %v", workdir))
|
||||
@@ -241,7 +274,7 @@ func (b *buildFile) CmdVolume(args string) error {
|
||||
volume = []string{args}
|
||||
}
|
||||
if b.config.Volumes == nil {
|
||||
b.config.Volumes = NewPathOpts()
|
||||
b.config.Volumes = map[string]struct{}{}
|
||||
}
|
||||
for _, v := range volume {
|
||||
b.config.Volumes[v] = struct{}{}
|
||||
@@ -252,44 +285,27 @@ func (b *buildFile) CmdVolume(args string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *buildFile) addRemote(container *Container, orig, dest string) error {
|
||||
file, err := utils.Download(orig, ioutil.Discard)
|
||||
func (b *buildFile) checkPathForAddition(orig string) error {
|
||||
origPath := path.Join(b.contextPath, orig)
|
||||
if !strings.HasPrefix(origPath, b.contextPath) {
|
||||
return fmt.Errorf("Forbidden path outside the build context: %s (%s)", orig, origPath)
|
||||
}
|
||||
_, err := os.Stat(origPath)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("%s: no such file or directory", orig)
|
||||
}
|
||||
defer file.Body.Close()
|
||||
|
||||
// If the destination is a directory, figure out the filename.
|
||||
if strings.HasSuffix(dest, "/") {
|
||||
u, err := url.Parse(orig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
path := u.Path
|
||||
if strings.HasSuffix(path, "/") {
|
||||
path = path[:len(path)-1]
|
||||
}
|
||||
parts := strings.Split(path, "/")
|
||||
filename := parts[len(parts)-1]
|
||||
if filename == "" {
|
||||
return fmt.Errorf("cannot determine filename from url: %s", u)
|
||||
}
|
||||
dest = dest + filename
|
||||
}
|
||||
|
||||
return container.Inject(file.Body, dest)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *buildFile) addContext(container *Container, orig, dest string) error {
|
||||
origPath := path.Join(b.context, orig)
|
||||
destPath := path.Join(container.RootfsPath(), dest)
|
||||
var (
|
||||
origPath = path.Join(b.contextPath, orig)
|
||||
destPath = path.Join(container.RootfsPath(), dest)
|
||||
)
|
||||
// Preserve the trailing '/'
|
||||
if strings.HasSuffix(dest, "/") {
|
||||
destPath = destPath + "/"
|
||||
}
|
||||
if !strings.HasPrefix(origPath, b.context) {
|
||||
return fmt.Errorf("Forbidden path: %s", origPath)
|
||||
}
|
||||
fi, err := os.Stat(origPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: no such file or directory", orig)
|
||||
@@ -313,7 +329,7 @@ func (b *buildFile) addContext(container *Container, orig, dest string) error {
|
||||
}
|
||||
|
||||
func (b *buildFile) CmdAdd(args string) error {
|
||||
if b.context == "" {
|
||||
if b.context == nil {
|
||||
return fmt.Errorf("No context given. Impossible to use ADD")
|
||||
}
|
||||
tmp := strings.SplitN(args, " ", 2)
|
||||
@@ -333,8 +349,100 @@ func (b *buildFile) CmdAdd(args string) error {
|
||||
|
||||
cmd := b.config.Cmd
|
||||
b.config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) ADD %s in %s", orig, dest)}
|
||||
|
||||
b.config.Image = b.image
|
||||
|
||||
// FIXME: do we really need this?
|
||||
var (
|
||||
origPath = orig
|
||||
destPath = dest
|
||||
)
|
||||
|
||||
if utils.IsURL(orig) {
|
||||
resp, err := utils.Download(orig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tmpDirName, err := ioutil.TempDir(b.contextPath, "docker-remote")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tmpFileName := path.Join(tmpDirName, "tmp")
|
||||
tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer os.RemoveAll(tmpDirName)
|
||||
if _, err = io.Copy(tmpFile, resp.Body); err != nil {
|
||||
return err
|
||||
}
|
||||
origPath = path.Join(filepath.Base(tmpDirName), filepath.Base(tmpFileName))
|
||||
tmpFile.Close()
|
||||
|
||||
// If the destination is a directory, figure out the filename.
|
||||
if strings.HasSuffix(dest, "/") {
|
||||
u, err := url.Parse(orig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
path := u.Path
|
||||
if strings.HasSuffix(path, "/") {
|
||||
path = path[:len(path)-1]
|
||||
}
|
||||
parts := strings.Split(path, "/")
|
||||
filename := parts[len(parts)-1]
|
||||
if filename == "" {
|
||||
return fmt.Errorf("cannot determine filename from url: %s", u)
|
||||
}
|
||||
destPath = dest + filename
|
||||
}
|
||||
}
|
||||
|
||||
if err := b.checkPathForAddition(origPath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Hash path and check the cache
|
||||
if b.utilizeCache {
|
||||
var (
|
||||
hash string
|
||||
sums = b.context.GetSums()
|
||||
)
|
||||
|
||||
// Has tarsum strips the '.' and './', we put it back for comparaison.
|
||||
for file, sum := range sums {
|
||||
if len(file) == 0 || file[0] != '.' && file[0] != '/' {
|
||||
delete(sums, file)
|
||||
sums["./"+file] = sum
|
||||
}
|
||||
}
|
||||
|
||||
if fi, err := os.Stat(path.Join(b.contextPath, origPath)); err != nil {
|
||||
return err
|
||||
} else if fi.IsDir() {
|
||||
var subfiles []string
|
||||
for file, sum := range sums {
|
||||
if strings.HasPrefix(file, origPath) {
|
||||
subfiles = append(subfiles, sum)
|
||||
}
|
||||
}
|
||||
sort.Strings(subfiles)
|
||||
hasher := sha256.New()
|
||||
hasher.Write([]byte(strings.Join(subfiles, ",")))
|
||||
hash = "dir:" + hex.EncodeToString(hasher.Sum(nil))
|
||||
} else {
|
||||
hash = "file:" + sums[origPath]
|
||||
}
|
||||
b.config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) ADD %s in %s", hash, dest)}
|
||||
hit, err := b.probeCache()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// If we do not have a hash, never use the cache
|
||||
if hit && hash != "" {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Create the container and start it
|
||||
container, _, err := b.runtime.Create(b.config, "")
|
||||
if err != nil {
|
||||
@@ -347,14 +455,8 @@ func (b *buildFile) CmdAdd(args string) error {
|
||||
}
|
||||
defer container.Unmount()
|
||||
|
||||
if utils.IsURL(orig) {
|
||||
if err := b.addRemote(container, orig, dest); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := b.addContext(container, orig, dest); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := b.addContext(container, origPath, destPath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := b.commit(container.ID, cmd, fmt.Sprintf("ADD %s in %s", orig, dest)); err != nil {
|
||||
@@ -364,6 +466,34 @@ func (b *buildFile) CmdAdd(args string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type StdoutFormater struct {
|
||||
io.Writer
|
||||
*utils.StreamFormatter
|
||||
}
|
||||
|
||||
func (sf *StdoutFormater) Write(buf []byte) (int, error) {
|
||||
formattedBuf := sf.StreamFormatter.FormatStream(string(buf))
|
||||
n, err := sf.Writer.Write(formattedBuf)
|
||||
if n != len(formattedBuf) {
|
||||
return n, io.ErrShortWrite
|
||||
}
|
||||
return len(buf), err
|
||||
}
|
||||
|
||||
type StderrFormater struct {
|
||||
io.Writer
|
||||
*utils.StreamFormatter
|
||||
}
|
||||
|
||||
func (sf *StderrFormater) Write(buf []byte) (int, error) {
|
||||
formattedBuf := sf.StreamFormatter.FormatStream("\033[91m" + string(buf) + "\033[0m")
|
||||
n, err := sf.Writer.Write(formattedBuf)
|
||||
if n != len(formattedBuf) {
|
||||
return n, io.ErrShortWrite
|
||||
}
|
||||
return len(buf), err
|
||||
}
|
||||
|
||||
func (b *buildFile) run() (string, error) {
|
||||
if b.image == "" {
|
||||
return "", fmt.Errorf("Please provide a source image with `from` prior to run")
|
||||
@@ -376,7 +506,7 @@ func (b *buildFile) run() (string, error) {
|
||||
return "", err
|
||||
}
|
||||
b.tmpContainers[c.ID] = struct{}{}
|
||||
fmt.Fprintf(b.out, " ---> Running in %s\n", utils.TruncateID(c.ID))
|
||||
fmt.Fprintf(b.outStream, " ---> Running in %s\n", utils.TruncateID(c.ID))
|
||||
|
||||
// override the entry point that may have been picked up from the base image
|
||||
c.Path = b.config.Cmd[0]
|
||||
@@ -386,7 +516,7 @@ func (b *buildFile) run() (string, error) {
|
||||
|
||||
if b.verbose {
|
||||
errCh = utils.Go(func() error {
|
||||
return <-c.Attach(nil, nil, b.out, b.out)
|
||||
return <-c.Attach(nil, nil, b.outStream, b.errStream)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -403,7 +533,11 @@ func (b *buildFile) run() (string, error) {
|
||||
|
||||
// Wait for it to finish
|
||||
if ret := c.Wait(); ret != 0 {
|
||||
return "", fmt.Errorf("The command %v returned a non-zero code: %d", b.config.Cmd, ret)
|
||||
err := &utils.JSONError{
|
||||
Message: fmt.Sprintf("The command %v returned a non-zero code: %d", b.config.Cmd, ret),
|
||||
Code: ret,
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
|
||||
return c.ID, nil
|
||||
@@ -420,17 +554,12 @@ func (b *buildFile) commit(id string, autoCmd []string, comment string) error {
|
||||
b.config.Cmd = []string{"/bin/sh", "-c", "#(nop) " + comment}
|
||||
defer func(cmd []string) { b.config.Cmd = cmd }(cmd)
|
||||
|
||||
if b.utilizeCache {
|
||||
if cache, err := b.srv.ImageGetCached(b.image, b.config); err != nil {
|
||||
return err
|
||||
} else if cache != nil {
|
||||
fmt.Fprintf(b.out, " ---> Using cache\n")
|
||||
utils.Debugf("[BUILDER] Use cached version")
|
||||
b.image = cache.ID
|
||||
return nil
|
||||
} else {
|
||||
utils.Debugf("[BUILDER] Cache miss")
|
||||
}
|
||||
hit, err := b.probeCache()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if hit {
|
||||
return nil
|
||||
}
|
||||
|
||||
container, warnings, err := b.runtime.Create(b.config, "")
|
||||
@@ -438,10 +567,10 @@ func (b *buildFile) commit(id string, autoCmd []string, comment string) error {
|
||||
return err
|
||||
}
|
||||
for _, warning := range warnings {
|
||||
fmt.Fprintf(b.out, " ---> [Warning] %s\n", warning)
|
||||
fmt.Fprintf(b.outStream, " ---> [Warning] %s\n", warning)
|
||||
}
|
||||
b.tmpContainers[container.ID] = struct{}{}
|
||||
fmt.Fprintf(b.out, " ---> Running in %s\n", utils.TruncateID(container.ID))
|
||||
fmt.Fprintf(b.outStream, " ---> Running in %s\n", utils.TruncateID(container.ID))
|
||||
id = container.ID
|
||||
if err := container.EnsureMounted(); err != nil {
|
||||
return err
|
||||
@@ -471,17 +600,17 @@ func (b *buildFile) commit(id string, autoCmd []string, comment string) error {
|
||||
var lineContinuation = regexp.MustCompile(`\s*\\\s*\n`)
|
||||
|
||||
func (b *buildFile) Build(context io.Reader) (string, error) {
|
||||
// FIXME: @creack "name" is a terrible variable name
|
||||
name, err := ioutil.TempDir("", "docker-build")
|
||||
tmpdirPath, err := ioutil.TempDir("", "docker-build")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if err := archive.Untar(context, name); err != nil {
|
||||
b.context = &utils.TarSum{Reader: context}
|
||||
if err := archive.Untar(b.context, tmpdirPath, nil); err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer os.RemoveAll(name)
|
||||
b.context = name
|
||||
filename := path.Join(name, "Dockerfile")
|
||||
defer os.RemoveAll(tmpdirPath)
|
||||
b.contextPath = tmpdirPath
|
||||
filename := path.Join(tmpdirPath, "Dockerfile")
|
||||
if _, err := os.Stat(filename); os.IsNotExist(err) {
|
||||
return "", fmt.Errorf("Can't build a directory with no Dockerfile")
|
||||
}
|
||||
@@ -489,6 +618,9 @@ func (b *buildFile) Build(context io.Reader) (string, error) {
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if len(fileBytes) == 0 {
|
||||
return "", ErrDockerfileEmpty
|
||||
}
|
||||
dockerfile := string(fileBytes)
|
||||
dockerfile = lineContinuation.ReplaceAllString(dockerfile, "")
|
||||
stepN := 0
|
||||
@@ -507,40 +639,44 @@ func (b *buildFile) Build(context io.Reader) (string, error) {
|
||||
|
||||
method, exists := reflect.TypeOf(b).MethodByName("Cmd" + strings.ToUpper(instruction[:1]) + strings.ToLower(instruction[1:]))
|
||||
if !exists {
|
||||
fmt.Fprintf(b.out, "# Skipping unknown instruction %s\n", strings.ToUpper(instruction))
|
||||
fmt.Fprintf(b.errStream, "# Skipping unknown instruction %s\n", strings.ToUpper(instruction))
|
||||
continue
|
||||
}
|
||||
|
||||
stepN += 1
|
||||
fmt.Fprintf(b.out, "Step %d : %s %s\n", stepN, strings.ToUpper(instruction), arguments)
|
||||
fmt.Fprintf(b.outStream, "Step %d : %s %s\n", stepN, strings.ToUpper(instruction), arguments)
|
||||
|
||||
ret := method.Func.Call([]reflect.Value{reflect.ValueOf(b), reflect.ValueOf(arguments)})[0].Interface()
|
||||
if ret != nil {
|
||||
return "", ret.(error)
|
||||
}
|
||||
|
||||
fmt.Fprintf(b.out, " ---> %v\n", utils.TruncateID(b.image))
|
||||
fmt.Fprintf(b.outStream, " ---> %s\n", utils.TruncateID(b.image))
|
||||
}
|
||||
if b.image != "" {
|
||||
fmt.Fprintf(b.out, "Successfully built %s\n", utils.TruncateID(b.image))
|
||||
fmt.Fprintf(b.outStream, "Successfully built %s\n", utils.TruncateID(b.image))
|
||||
if b.rm {
|
||||
b.clearTmp(b.tmpContainers)
|
||||
}
|
||||
return b.image, nil
|
||||
}
|
||||
return "", fmt.Errorf("An error occurred during the build\n")
|
||||
return "", fmt.Errorf("No image was generated. This may be because the Dockerfile does not, like, do anything.\n")
|
||||
}
|
||||
|
||||
func NewBuildFile(srv *Server, out io.Writer, verbose, utilizeCache, rm bool) BuildFile {
|
||||
func NewBuildFile(srv *Server, outStream, errStream io.Writer, verbose, utilizeCache, rm bool, outOld io.Writer, sf *utils.StreamFormatter, auth *auth.AuthConfig) BuildFile {
|
||||
return &buildFile{
|
||||
runtime: srv.runtime,
|
||||
srv: srv,
|
||||
config: &Config{},
|
||||
out: out,
|
||||
outStream: outStream,
|
||||
errStream: errStream,
|
||||
tmpContainers: make(map[string]struct{}),
|
||||
tmpImages: make(map[string]struct{}),
|
||||
verbose: verbose,
|
||||
utilizeCache: utilizeCache,
|
||||
rm: rm,
|
||||
sf: sf,
|
||||
authConfig: auth,
|
||||
outOld: outOld,
|
||||
}
|
||||
}
|
||||
|
||||
101
cgroups/cgroups.go
Normal file
101
cgroups/cgroups.go
Normal file
@@ -0,0 +1,101 @@
|
||||
package cgroups
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/mount"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// https://www.kernel.org/doc/Documentation/cgroups/cgroups.txt
|
||||
|
||||
func FindCgroupMountpoint(subsystem string) (string, error) {
|
||||
mounts, err := mount.GetMounts()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
for _, mount := range mounts {
|
||||
if mount.Fstype == "cgroup" {
|
||||
for _, opt := range strings.Split(mount.VfsOpts, ",") {
|
||||
if opt == subsystem {
|
||||
return mount.Mountpoint, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("cgroup mountpoint not found for %s", subsystem)
|
||||
}
|
||||
|
||||
// Returns the relative path to the cgroup docker is running in.
|
||||
func getThisCgroupDir(subsystem string) (string, error) {
|
||||
f, err := os.Open("/proc/self/cgroup")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
return parseCgroupFile(subsystem, f)
|
||||
}
|
||||
|
||||
func parseCgroupFile(subsystem string, r io.Reader) (string, error) {
|
||||
s := bufio.NewScanner(r)
|
||||
|
||||
for s.Scan() {
|
||||
if err := s.Err(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
text := s.Text()
|
||||
parts := strings.Split(text, ":")
|
||||
if parts[1] == subsystem {
|
||||
return parts[2], nil
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("cgroup '%s' not found in /proc/self/cgroup", subsystem)
|
||||
}
|
||||
|
||||
// Returns a list of pids for the given container.
|
||||
func GetPidsForContainer(id string) ([]int, error) {
|
||||
pids := []int{}
|
||||
|
||||
// memory is chosen randomly, any cgroup used by docker works
|
||||
subsystem := "memory"
|
||||
|
||||
cgroupRoot, err := FindCgroupMountpoint(subsystem)
|
||||
if err != nil {
|
||||
return pids, err
|
||||
}
|
||||
|
||||
cgroupDir, err := getThisCgroupDir(subsystem)
|
||||
if err != nil {
|
||||
return pids, err
|
||||
}
|
||||
|
||||
filename := filepath.Join(cgroupRoot, cgroupDir, id, "tasks")
|
||||
if _, err := os.Stat(filename); os.IsNotExist(err) {
|
||||
// With more recent lxc versions use, cgroup will be in lxc/
|
||||
filename = filepath.Join(cgroupRoot, cgroupDir, "lxc", id, "tasks")
|
||||
}
|
||||
|
||||
output, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
return pids, err
|
||||
}
|
||||
for _, p := range strings.Split(string(output), "\n") {
|
||||
if len(p) == 0 {
|
||||
continue
|
||||
}
|
||||
pid, err := strconv.Atoi(p)
|
||||
if err != nil {
|
||||
return pids, fmt.Errorf("Invalid pid '%s': %s", p, err)
|
||||
}
|
||||
pids = append(pids, pid)
|
||||
}
|
||||
return pids, nil
|
||||
}
|
||||
27
cgroups/cgroups_test.go
Normal file
27
cgroups/cgroups_test.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package cgroups
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
)
|
||||
|
||||
const (
|
||||
cgroupsContents = `11:hugetlb:/
|
||||
10:perf_event:/
|
||||
9:blkio:/
|
||||
8:net_cls:/
|
||||
7:freezer:/
|
||||
6:devices:/
|
||||
5:memory:/
|
||||
4:cpuacct,cpu:/
|
||||
3:cpuset:/
|
||||
2:name=systemd:/user.slice/user-1000.slice/session-16.scope`
|
||||
)
|
||||
|
||||
func TestParseCgroups(t *testing.T) {
|
||||
r := bytes.NewBuffer([]byte(cgroupsContents))
|
||||
_, err := parseCgroupFile("blkio", r)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
106
changes.go
106
changes.go
@@ -1,106 +0,0 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type ChangeType int
|
||||
|
||||
const (
|
||||
ChangeModify = iota
|
||||
ChangeAdd
|
||||
ChangeDelete
|
||||
)
|
||||
|
||||
type Change struct {
|
||||
Path string
|
||||
Kind ChangeType
|
||||
}
|
||||
|
||||
func (change *Change) String() string {
|
||||
var kind string
|
||||
switch change.Kind {
|
||||
case ChangeModify:
|
||||
kind = "C"
|
||||
case ChangeAdd:
|
||||
kind = "A"
|
||||
case ChangeDelete:
|
||||
kind = "D"
|
||||
}
|
||||
return fmt.Sprintf("%s %s", kind, change.Path)
|
||||
}
|
||||
|
||||
func Changes(layers []string, rw string) ([]Change, error) {
|
||||
var changes []Change
|
||||
err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Rebase path
|
||||
path, err = filepath.Rel(rw, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
path = filepath.Join("/", path)
|
||||
|
||||
// Skip root
|
||||
if path == "/" {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Skip AUFS metadata
|
||||
if matched, err := filepath.Match("/.wh..wh.*", path); err != nil || matched {
|
||||
return err
|
||||
}
|
||||
|
||||
change := Change{
|
||||
Path: path,
|
||||
}
|
||||
|
||||
// Find out what kind of modification happened
|
||||
file := filepath.Base(path)
|
||||
// If there is a whiteout, then the file was removed
|
||||
if strings.HasPrefix(file, ".wh.") {
|
||||
originalFile := file[len(".wh."):]
|
||||
change.Path = filepath.Join(filepath.Dir(path), originalFile)
|
||||
change.Kind = ChangeDelete
|
||||
} else {
|
||||
// Otherwise, the file was added
|
||||
change.Kind = ChangeAdd
|
||||
|
||||
// ...Unless it already existed in a top layer, in which case, it's a modification
|
||||
for _, layer := range layers {
|
||||
stat, err := os.Stat(filepath.Join(layer, path))
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
if err == nil {
|
||||
// The file existed in the top layer, so that's a modification
|
||||
|
||||
// However, if it's a directory, maybe it wasn't actually modified.
|
||||
// If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar
|
||||
if stat.IsDir() && f.IsDir() {
|
||||
if f.Size() == stat.Size() && f.Mode() == stat.Mode() && f.ModTime() == stat.ModTime() {
|
||||
// Both directories are the same, don't record the change
|
||||
return nil
|
||||
}
|
||||
}
|
||||
change.Kind = ChangeModify
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Record change
|
||||
changes = append(changes, change)
|
||||
return nil
|
||||
})
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
return changes, nil
|
||||
}
|
||||
744
commands.go
744
commands.go
File diff suppressed because it is too large
Load Diff
159
commands_unit_test.go
Normal file
159
commands_unit_test.go
Normal file
@@ -0,0 +1,159 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func parse(t *testing.T, args string) (*Config, *HostConfig, error) {
|
||||
config, hostConfig, _, err := ParseRun(strings.Split(args+" ubuntu bash", " "), nil)
|
||||
return config, hostConfig, err
|
||||
}
|
||||
|
||||
func mustParse(t *testing.T, args string) (*Config, *HostConfig) {
|
||||
config, hostConfig, err := parse(t, args)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return config, hostConfig
|
||||
}
|
||||
|
||||
func TestParseRunLinks(t *testing.T) {
|
||||
if _, hostConfig := mustParse(t, "-link a:b"); len(hostConfig.Links) == 0 || hostConfig.Links[0] != "a:b" {
|
||||
t.Fatalf("Error parsing links. Expected []string{\"a:b\"}, received: %v", hostConfig.Links)
|
||||
}
|
||||
if _, hostConfig := mustParse(t, "-link a:b -link c:d"); len(hostConfig.Links) < 2 || hostConfig.Links[0] != "a:b" || hostConfig.Links[1] != "c:d" {
|
||||
t.Fatalf("Error parsing links. Expected []string{\"a:b\", \"c:d\"}, received: %v", hostConfig.Links)
|
||||
}
|
||||
if _, hostConfig := mustParse(t, ""); len(hostConfig.Links) != 0 {
|
||||
t.Fatalf("Error parsing links. No link expected, received: %v", hostConfig.Links)
|
||||
}
|
||||
|
||||
if _, _, err := parse(t, "-link a"); err == nil {
|
||||
t.Fatalf("Error parsing links. `-link a` should be an error but is not")
|
||||
}
|
||||
if _, _, err := parse(t, "-link"); err == nil {
|
||||
t.Fatalf("Error parsing links. `-link` should be an error but is not")
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseRunAttach(t *testing.T) {
|
||||
if config, _ := mustParse(t, "-a stdin"); !config.AttachStdin || config.AttachStdout || config.AttachStderr {
|
||||
t.Fatalf("Error parsing attach flags. Expect only Stdin enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr)
|
||||
}
|
||||
if config, _ := mustParse(t, "-a stdin -a stdout"); !config.AttachStdin || !config.AttachStdout || config.AttachStderr {
|
||||
t.Fatalf("Error parsing attach flags. Expect only Stdin and Stdout enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr)
|
||||
}
|
||||
if config, _ := mustParse(t, "-a stdin -a stdout -a stderr"); !config.AttachStdin || !config.AttachStdout || !config.AttachStderr {
|
||||
t.Fatalf("Error parsing attach flags. Expect all attach enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr)
|
||||
}
|
||||
if config, _ := mustParse(t, ""); config.AttachStdin || !config.AttachStdout || !config.AttachStderr {
|
||||
t.Fatalf("Error parsing attach flags. Expect Stdin disabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr)
|
||||
}
|
||||
|
||||
if _, _, err := parse(t, "-a"); err == nil {
|
||||
t.Fatalf("Error parsing attach flags, `-a` should be an error but is not")
|
||||
}
|
||||
if _, _, err := parse(t, "-a invalid"); err == nil {
|
||||
t.Fatalf("Error parsing attach flags, `-a invalid` should be an error but is not")
|
||||
}
|
||||
if _, _, err := parse(t, "-a invalid -a stdout"); err == nil {
|
||||
t.Fatalf("Error parsing attach flags, `-a stdout -a invalid` should be an error but is not")
|
||||
}
|
||||
if _, _, err := parse(t, "-a stdout -a stderr -d"); err == nil {
|
||||
t.Fatalf("Error parsing attach flags, `-a stdout -a stderr -d` should be an error but is not")
|
||||
}
|
||||
if _, _, err := parse(t, "-a stdin -d"); err == nil {
|
||||
t.Fatalf("Error parsing attach flags, `-a stdin -d` should be an error but is not")
|
||||
}
|
||||
if _, _, err := parse(t, "-a stdout -d"); err == nil {
|
||||
t.Fatalf("Error parsing attach flags, `-a stdout -d` should be an error but is not")
|
||||
}
|
||||
if _, _, err := parse(t, "-a stderr -d"); err == nil {
|
||||
t.Fatalf("Error parsing attach flags, `-a stderr -d` should be an error but is not")
|
||||
}
|
||||
if _, _, err := parse(t, "-d -rm"); err == nil {
|
||||
t.Fatalf("Error parsing attach flags, `-d -rm` should be an error but is not")
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseRunVolumes(t *testing.T) {
|
||||
if config, hostConfig := mustParse(t, "-v /tmp"); hostConfig.Binds != nil {
|
||||
t.Fatalf("Error parsing volume flags, `-v /tmp` should not mount-bind anything. Received %v", hostConfig.Binds)
|
||||
} else if _, exists := config.Volumes["/tmp"]; !exists {
|
||||
t.Fatalf("Error parsing volume flags, `-v /tmp` is missing from volumes. Received %v", config.Volumes)
|
||||
}
|
||||
|
||||
if config, hostConfig := mustParse(t, "-v /tmp -v /var"); hostConfig.Binds != nil {
|
||||
t.Fatalf("Error parsing volume flags, `-v /tmp -v /var` should not mount-bind anything. Received %v", hostConfig.Binds)
|
||||
} else if _, exists := config.Volumes["/tmp"]; !exists {
|
||||
t.Fatalf("Error parsing volume flags, `-v /tmp` is missing from volumes. Recevied %v", config.Volumes)
|
||||
} else if _, exists := config.Volumes["/var"]; !exists {
|
||||
t.Fatalf("Error parsing volume flags, `-v /var` is missing from volumes. Received %v", config.Volumes)
|
||||
}
|
||||
|
||||
if config, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp"); hostConfig.Binds == nil || hostConfig.Binds[0] != "/hostTmp:/containerTmp" {
|
||||
t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp` should mount-bind /hostTmp into /containeTmp. Received %v", hostConfig.Binds)
|
||||
} else if _, exists := config.Volumes["/containerTmp"]; !exists {
|
||||
t.Fatalf("Error parsing volume flags, `-v /tmp` is missing from volumes. Received %v", config.Volumes)
|
||||
}
|
||||
|
||||
if config, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp -v /hostVar:/containerVar"); hostConfig.Binds == nil || hostConfig.Binds[0] != "/hostTmp:/containerTmp" || hostConfig.Binds[1] != "/hostVar:/containerVar" {
|
||||
t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp -v /hostVar:/containerVar` should mount-bind /hostTmp into /containeTmp and /hostVar into /hostContainer. Received %v", hostConfig.Binds)
|
||||
} else if _, exists := config.Volumes["/containerTmp"]; !exists {
|
||||
t.Fatalf("Error parsing volume flags, `-v /containerTmp` is missing from volumes. Received %v", config.Volumes)
|
||||
} else if _, exists := config.Volumes["/containerVar"]; !exists {
|
||||
t.Fatalf("Error parsing volume flags, `-v /containerVar` is missing from volumes. Received %v", config.Volumes)
|
||||
}
|
||||
|
||||
if config, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp:ro -v /hostVar:/containerVar:rw"); hostConfig.Binds == nil || hostConfig.Binds[0] != "/hostTmp:/containerTmp:ro" || hostConfig.Binds[1] != "/hostVar:/containerVar:rw" {
|
||||
t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp:ro -v /hostVar:/containerVar:rw` should mount-bind /hostTmp into /containeTmp and /hostVar into /hostContainer. Received %v", hostConfig.Binds)
|
||||
} else if _, exists := config.Volumes["/containerTmp"]; !exists {
|
||||
t.Fatalf("Error parsing volume flags, `-v /containerTmp` is missing from volumes. Received %v", config.Volumes)
|
||||
} else if _, exists := config.Volumes["/containerVar"]; !exists {
|
||||
t.Fatalf("Error parsing volume flags, `-v /containerVar` is missing from volumes. Received %v", config.Volumes)
|
||||
}
|
||||
|
||||
if config, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp -v /containerVar"); hostConfig.Binds == nil || len(hostConfig.Binds) > 1 || hostConfig.Binds[0] != "/hostTmp:/containerTmp" {
|
||||
t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp -v /containerVar` should mount-bind only /hostTmp into /containeTmp. Received %v", hostConfig.Binds)
|
||||
} else if _, exists := config.Volumes["/containerTmp"]; !exists {
|
||||
t.Fatalf("Error parsing volume flags, `-v /containerTmp` is missing from volumes. Received %v", config.Volumes)
|
||||
} else if _, exists := config.Volumes["/containerVar"]; !exists {
|
||||
t.Fatalf("Error parsing volume flags, `-v /containerVar` is missing from volumes. Received %v", config.Volumes)
|
||||
}
|
||||
|
||||
if config, hostConfig := mustParse(t, ""); hostConfig.Binds != nil {
|
||||
t.Fatalf("Error parsing volume flags, without volume, nothing should be mount-binded. Received %v", hostConfig.Binds)
|
||||
} else if len(config.Volumes) != 0 {
|
||||
t.Fatalf("Error parsing volume flags, without volume, no volume should be present. Received %v", config.Volumes)
|
||||
}
|
||||
|
||||
if _, _, err := parse(t, "-v /"); err == nil {
|
||||
t.Fatalf("Expected error, but got none")
|
||||
}
|
||||
|
||||
if _, _, err := parse(t, "-v /:/"); err == nil {
|
||||
t.Fatalf("Error parsing volume flags, `-v /:/` should fail but didn't")
|
||||
}
|
||||
if _, _, err := parse(t, "-v"); err == nil {
|
||||
t.Fatalf("Error parsing volume flags, `-v` should fail but didn't")
|
||||
}
|
||||
if _, _, err := parse(t, "-v /tmp:"); err == nil {
|
||||
t.Fatalf("Error parsing volume flags, `-v /tmp:` should fail but didn't")
|
||||
}
|
||||
if _, _, err := parse(t, "-v /tmp:ro"); err == nil {
|
||||
t.Fatalf("Error parsing volume flags, `-v /tmp:ro` should fail but didn't")
|
||||
}
|
||||
if _, _, err := parse(t, "-v /tmp::"); err == nil {
|
||||
t.Fatalf("Error parsing volume flags, `-v /tmp::` should fail but didn't")
|
||||
}
|
||||
if _, _, err := parse(t, "-v :"); err == nil {
|
||||
t.Fatalf("Error parsing volume flags, `-v :` should fail but didn't")
|
||||
}
|
||||
if _, _, err := parse(t, "-v ::"); err == nil {
|
||||
t.Fatalf("Error parsing volume flags, `-v ::` should fail but didn't")
|
||||
}
|
||||
if _, _, err := parse(t, "-v /tmp:/tmp:/tmp:/tmp"); err == nil {
|
||||
t.Fatalf("Error parsing volume flags, `-v /tmp:/tmp:/tmp:/tmp` should fail but didn't")
|
||||
}
|
||||
}
|
||||
14
config.go
14
config.go
@@ -14,8 +14,11 @@ type DaemonConfig struct {
|
||||
Dns []string
|
||||
EnableIptables bool
|
||||
BridgeIface string
|
||||
BridgeIp string
|
||||
DefaultIp net.IP
|
||||
InterContainerCommunication bool
|
||||
GraphDriver string
|
||||
Mtu int
|
||||
}
|
||||
|
||||
// ConfigFromJob creates and returns a new DaemonConfig object
|
||||
@@ -26,8 +29,8 @@ func ConfigFromJob(job *engine.Job) *DaemonConfig {
|
||||
config.Root = job.Getenv("Root")
|
||||
config.AutoRestart = job.GetenvBool("AutoRestart")
|
||||
config.EnableCors = job.GetenvBool("EnableCors")
|
||||
if dns := job.Getenv("Dns"); dns != "" {
|
||||
config.Dns = []string{dns}
|
||||
if dns := job.GetenvList("Dns"); dns != nil {
|
||||
config.Dns = dns
|
||||
}
|
||||
config.EnableIptables = job.GetenvBool("EnableIptables")
|
||||
if br := job.Getenv("BridgeIface"); br != "" {
|
||||
@@ -35,7 +38,14 @@ func ConfigFromJob(job *engine.Job) *DaemonConfig {
|
||||
} else {
|
||||
config.BridgeIface = DefaultNetworkBridge
|
||||
}
|
||||
config.BridgeIp = job.Getenv("BridgeIp")
|
||||
config.DefaultIp = net.ParseIP(job.Getenv("DefaultIp"))
|
||||
config.InterContainerCommunication = job.GetenvBool("InterContainerCommunication")
|
||||
config.GraphDriver = job.Getenv("GraphDriver")
|
||||
if mtu := job.GetenvInt("Mtu"); mtu != -1 {
|
||||
config.Mtu = mtu
|
||||
} else {
|
||||
config.Mtu = DefaultNetworkMtu
|
||||
}
|
||||
return &config
|
||||
}
|
||||
|
||||
596
container.go
596
container.go
@@ -6,7 +6,9 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/archive"
|
||||
"github.com/dotcloud/docker/term"
|
||||
"github.com/dotcloud/docker/graphdriver"
|
||||
"github.com/dotcloud/docker/mount"
|
||||
"github.com/dotcloud/docker/pkg/term"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"github.com/kr/pty"
|
||||
"io"
|
||||
@@ -16,7 +18,6 @@ import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -24,10 +25,15 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrNotATTY = errors.New("The PTY is not a file")
|
||||
ErrNoTTY = errors.New("No PTY found")
|
||||
)
|
||||
|
||||
type Container struct {
|
||||
sync.Mutex
|
||||
|
||||
root string
|
||||
root string // Path to the "home" of the container, including metadata.
|
||||
rootfs string // Path to the root filesystem of the container.
|
||||
|
||||
ID string
|
||||
|
||||
@@ -43,11 +49,11 @@ type Container struct {
|
||||
network *NetworkInterface
|
||||
NetworkSettings *NetworkSettings
|
||||
|
||||
SysInitPath string
|
||||
ResolvConfPath string
|
||||
HostnamePath string
|
||||
HostsPath string
|
||||
Name string
|
||||
Driver string
|
||||
|
||||
cmd *exec.Cmd
|
||||
stdout *utils.WriteBroadcaster
|
||||
@@ -196,8 +202,13 @@ func (settings *NetworkSettings) PortMappingAPI() []APIPort {
|
||||
|
||||
// Inject the io.Reader at the given path. Note: do not close the reader
|
||||
func (container *Container) Inject(file io.Reader, pth string) error {
|
||||
if err := container.EnsureMounted(); err != nil {
|
||||
return fmt.Errorf("inject: error mounting container %s: %s", container.ID, err)
|
||||
}
|
||||
|
||||
// Return error if path exists
|
||||
if _, err := os.Stat(path.Join(container.rwPath(), pth)); err == nil {
|
||||
destPath := path.Join(container.RootfsPath(), pth)
|
||||
if _, err := os.Stat(destPath); err == nil {
|
||||
// Since err is nil, the path could be stat'd and it exists
|
||||
return fmt.Errorf("%s exists", pth)
|
||||
} else if !os.IsNotExist(err) {
|
||||
@@ -208,14 +219,16 @@ func (container *Container) Inject(file io.Reader, pth string) error {
|
||||
}
|
||||
|
||||
// Make sure the directory exists
|
||||
if err := os.MkdirAll(path.Join(container.rwPath(), path.Dir(pth)), 0755); err != nil {
|
||||
if err := os.MkdirAll(path.Join(container.RootfsPath(), path.Dir(pth)), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dest, err := os.Create(path.Join(container.rwPath(), pth))
|
||||
dest, err := os.Create(destPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer dest.Close()
|
||||
|
||||
if _, err := io.Copy(dest, file); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -284,7 +297,11 @@ func (container *Container) generateEnvConfig(env []string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ioutil.WriteFile(container.EnvConfigPath(), data, 0600)
|
||||
p, err := container.EnvConfigPath()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ioutil.WriteFile(p, data, 0600)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -528,157 +545,18 @@ func (container *Container) Start() (err error) {
|
||||
log.Printf("WARNING: IPv4 forwarding is disabled. Networking will not work")
|
||||
}
|
||||
|
||||
// Create the requested bind mounts
|
||||
binds := make(map[string]BindMap)
|
||||
// Define illegal container destinations
|
||||
illegalDsts := []string{"/", "."}
|
||||
|
||||
for _, bind := range container.hostConfig.Binds {
|
||||
// FIXME: factorize bind parsing in parseBind
|
||||
var src, dst, mode string
|
||||
arr := strings.Split(bind, ":")
|
||||
if len(arr) == 2 {
|
||||
src = arr[0]
|
||||
dst = arr[1]
|
||||
mode = "rw"
|
||||
} else if len(arr) == 3 {
|
||||
src = arr[0]
|
||||
dst = arr[1]
|
||||
mode = arr[2]
|
||||
} else {
|
||||
return fmt.Errorf("Invalid bind specification: %s", bind)
|
||||
}
|
||||
|
||||
// Bail if trying to mount to an illegal destination
|
||||
for _, illegal := range illegalDsts {
|
||||
if dst == illegal {
|
||||
return fmt.Errorf("Illegal bind destination: %s", dst)
|
||||
}
|
||||
}
|
||||
|
||||
bindMap := BindMap{
|
||||
SrcPath: src,
|
||||
DstPath: dst,
|
||||
Mode: mode,
|
||||
}
|
||||
binds[path.Clean(dst)] = bindMap
|
||||
}
|
||||
|
||||
if container.Volumes == nil || len(container.Volumes) == 0 {
|
||||
container.Volumes = make(map[string]string)
|
||||
container.VolumesRW = make(map[string]bool)
|
||||
}
|
||||
|
||||
// Apply volumes from another container if requested
|
||||
if container.Config.VolumesFrom != "" {
|
||||
containerSpecs := strings.Split(container.Config.VolumesFrom, ",")
|
||||
for _, containerSpec := range containerSpecs {
|
||||
mountRW := true
|
||||
specParts := strings.SplitN(containerSpec, ":", 2)
|
||||
switch len(specParts) {
|
||||
case 0:
|
||||
return fmt.Errorf("Malformed volumes-from specification: %s", container.Config.VolumesFrom)
|
||||
case 2:
|
||||
switch specParts[1] {
|
||||
case "ro":
|
||||
mountRW = false
|
||||
case "rw": // mountRW is already true
|
||||
default:
|
||||
return fmt.Errorf("Malformed volumes-from speficication: %s", containerSpec)
|
||||
}
|
||||
}
|
||||
c := container.runtime.Get(specParts[0])
|
||||
if c == nil {
|
||||
return fmt.Errorf("Container %s not found. Impossible to mount its volumes", container.ID)
|
||||
}
|
||||
for volPath, id := range c.Volumes {
|
||||
if _, exists := container.Volumes[volPath]; exists {
|
||||
continue
|
||||
}
|
||||
if err := os.MkdirAll(path.Join(container.RootfsPath(), volPath), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
container.Volumes[volPath] = id
|
||||
if isRW, exists := c.VolumesRW[volPath]; exists {
|
||||
container.VolumesRW[volPath] = isRW && mountRW
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
if err := container.applyExternalVolumes(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create the requested volumes if they don't exist
|
||||
for volPath := range container.Config.Volumes {
|
||||
volPath = path.Clean(volPath)
|
||||
// Skip existing volumes
|
||||
if _, exists := container.Volumes[volPath]; exists {
|
||||
continue
|
||||
}
|
||||
var srcPath string
|
||||
var isBindMount bool
|
||||
srcRW := false
|
||||
// If an external bind is defined for this volume, use that as a source
|
||||
if bindMap, exists := binds[volPath]; exists {
|
||||
isBindMount = true
|
||||
srcPath = bindMap.SrcPath
|
||||
if strings.ToLower(bindMap.Mode) == "rw" {
|
||||
srcRW = true
|
||||
}
|
||||
// Otherwise create an directory in $ROOT/volumes/ and use that
|
||||
} else {
|
||||
c, err := container.runtime.volumes.Create(nil, container, "", "", nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
srcPath, err = c.layer()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
srcRW = true // RW by default
|
||||
}
|
||||
container.Volumes[volPath] = srcPath
|
||||
container.VolumesRW[volPath] = srcRW
|
||||
// Create the mountpoint
|
||||
rootVolPath := path.Join(container.RootfsPath(), volPath)
|
||||
if err := os.MkdirAll(rootVolPath, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Do not copy or change permissions if we are mounting from the host
|
||||
if srcRW && !isBindMount {
|
||||
volList, err := ioutil.ReadDir(rootVolPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(volList) > 0 {
|
||||
srcList, err := ioutil.ReadDir(srcPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(srcList) == 0 {
|
||||
// If the source volume is empty copy files from the root into the volume
|
||||
if err := archive.CopyWithTar(rootVolPath, srcPath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var stat syscall.Stat_t
|
||||
if err := syscall.Stat(rootVolPath, &stat); err != nil {
|
||||
return err
|
||||
}
|
||||
var srcStat syscall.Stat_t
|
||||
if err := syscall.Stat(srcPath, &srcStat); err != nil {
|
||||
return err
|
||||
}
|
||||
// Change the source volume's ownership if it differs from the root
|
||||
// files that where just copied
|
||||
if stat.Uid != srcStat.Uid || stat.Gid != srcStat.Gid {
|
||||
if err := os.Chown(srcPath, int(stat.Uid), int(stat.Gid)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if err := container.createVolumes(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := container.generateLXCConfig(); err != nil {
|
||||
@@ -700,7 +578,12 @@ func (container *Container) Start() (err error) {
|
||||
|
||||
// Networking
|
||||
if !container.Config.NetworkDisabled {
|
||||
params = append(params, "-g", container.network.Gateway.String())
|
||||
network := container.NetworkSettings
|
||||
params = append(params,
|
||||
"-g", network.Gateway,
|
||||
"-i", fmt.Sprintf("%s/%d", network.IPAddress, network.IPPrefixLen),
|
||||
"-mtu", strconv.Itoa(container.runtime.config.Mtu),
|
||||
)
|
||||
}
|
||||
|
||||
// User
|
||||
@@ -712,7 +595,6 @@ func (container *Container) Start() (err error) {
|
||||
env := []string{
|
||||
"HOME=/",
|
||||
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
|
||||
"container=lxc",
|
||||
"HOSTNAME=" + container.Config.Hostname,
|
||||
}
|
||||
|
||||
@@ -720,6 +602,10 @@ func (container *Container) Start() (err error) {
|
||||
env = append(env, "TERM=xterm")
|
||||
}
|
||||
|
||||
if container.hostConfig.Privileged {
|
||||
params = append(params, "-privileged")
|
||||
}
|
||||
|
||||
// Init any links between the parent and children
|
||||
runtime := container.runtime
|
||||
|
||||
@@ -800,6 +686,45 @@ func (container *Container) Start() (err error) {
|
||||
}
|
||||
}
|
||||
|
||||
root := container.RootfsPath()
|
||||
envPath, err := container.EnvConfigPath()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Mount docker specific files into the containers root fs
|
||||
if err := mount.Mount(runtime.sysInitPath, path.Join(root, "/.dockerinit"), "none", "bind,ro"); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := mount.Mount(envPath, path.Join(root, "/.dockerenv"), "none", "bind,ro"); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := mount.Mount(container.ResolvConfPath, path.Join(root, "/etc/resolv.conf"), "none", "bind,ro"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if container.HostnamePath != "" && container.HostsPath != "" {
|
||||
if err := mount.Mount(container.HostnamePath, path.Join(root, "/etc/hostname"), "none", "bind,ro"); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := mount.Mount(container.HostsPath, path.Join(root, "/etc/hosts"), "none", "bind,ro"); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Mount user specified volumes
|
||||
|
||||
for r, v := range container.Volumes {
|
||||
mountAs := "ro"
|
||||
if container.VolumesRW[v] {
|
||||
mountAs = "rw"
|
||||
}
|
||||
|
||||
if err := mount.Mount(v, path.Join(root, r), "none", fmt.Sprintf("bind,%s", mountAs)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
container.cmd = exec.Command(params[0], params[1:]...)
|
||||
|
||||
// Setup logging of stdout and stderr to disk
|
||||
@@ -864,6 +789,198 @@ func (container *Container) Start() (err error) {
|
||||
return ErrContainerStart
|
||||
}
|
||||
|
||||
func (container *Container) getBindMap() (map[string]BindMap, error) {
|
||||
// Create the requested bind mounts
|
||||
binds := make(map[string]BindMap)
|
||||
// Define illegal container destinations
|
||||
illegalDsts := []string{"/", "."}
|
||||
|
||||
for _, bind := range container.hostConfig.Binds {
|
||||
// FIXME: factorize bind parsing in parseBind
|
||||
var src, dst, mode string
|
||||
arr := strings.Split(bind, ":")
|
||||
if len(arr) == 2 {
|
||||
src = arr[0]
|
||||
dst = arr[1]
|
||||
mode = "rw"
|
||||
} else if len(arr) == 3 {
|
||||
src = arr[0]
|
||||
dst = arr[1]
|
||||
mode = arr[2]
|
||||
} else {
|
||||
return nil, fmt.Errorf("Invalid bind specification: %s", bind)
|
||||
}
|
||||
|
||||
// Bail if trying to mount to an illegal destination
|
||||
for _, illegal := range illegalDsts {
|
||||
if dst == illegal {
|
||||
return nil, fmt.Errorf("Illegal bind destination: %s", dst)
|
||||
}
|
||||
}
|
||||
|
||||
bindMap := BindMap{
|
||||
SrcPath: src,
|
||||
DstPath: dst,
|
||||
Mode: mode,
|
||||
}
|
||||
binds[path.Clean(dst)] = bindMap
|
||||
}
|
||||
return binds, nil
|
||||
}
|
||||
|
||||
func (container *Container) createVolumes() error {
|
||||
binds, err := container.getBindMap()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
volumesDriver := container.runtime.volumes.driver
|
||||
// Create the requested volumes if they don't exist
|
||||
for volPath := range container.Config.Volumes {
|
||||
volPath = path.Clean(volPath)
|
||||
volIsDir := true
|
||||
// Skip existing volumes
|
||||
if _, exists := container.Volumes[volPath]; exists {
|
||||
continue
|
||||
}
|
||||
var srcPath string
|
||||
var isBindMount bool
|
||||
srcRW := false
|
||||
// If an external bind is defined for this volume, use that as a source
|
||||
if bindMap, exists := binds[volPath]; exists {
|
||||
isBindMount = true
|
||||
srcPath = bindMap.SrcPath
|
||||
if strings.ToLower(bindMap.Mode) == "rw" {
|
||||
srcRW = true
|
||||
}
|
||||
if stat, err := os.Lstat(bindMap.SrcPath); err != nil {
|
||||
return err
|
||||
} else {
|
||||
volIsDir = stat.IsDir()
|
||||
}
|
||||
// Otherwise create an directory in $ROOT/volumes/ and use that
|
||||
} else {
|
||||
|
||||
// Do not pass a container as the parameter for the volume creation.
|
||||
// The graph driver using the container's information ( Image ) to
|
||||
// create the parent.
|
||||
c, err := container.runtime.volumes.Create(nil, nil, "", "", nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
srcPath, err = volumesDriver.Get(c.ID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Driver %s failed to get volume rootfs %s: %s", volumesDriver, c.ID, err)
|
||||
}
|
||||
srcRW = true // RW by default
|
||||
}
|
||||
container.Volumes[volPath] = srcPath
|
||||
container.VolumesRW[volPath] = srcRW
|
||||
|
||||
// Create the mountpoint
|
||||
volPath = path.Join(container.RootfsPath(), volPath)
|
||||
rootVolPath, err := utils.FollowSymlinkInScope(volPath, container.RootfsPath())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := os.Stat(rootVolPath); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
if volIsDir {
|
||||
if err := os.MkdirAll(rootVolPath, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := os.MkdirAll(path.Dir(rootVolPath), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
if f, err := os.OpenFile(rootVolPath, os.O_CREATE, 0755); err != nil {
|
||||
return err
|
||||
} else {
|
||||
f.Close()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Do not copy or change permissions if we are mounting from the host
|
||||
if srcRW && !isBindMount {
|
||||
volList, err := ioutil.ReadDir(rootVolPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(volList) > 0 {
|
||||
srcList, err := ioutil.ReadDir(srcPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(srcList) == 0 {
|
||||
// If the source volume is empty copy files from the root into the volume
|
||||
if err := archive.CopyWithTar(rootVolPath, srcPath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var stat syscall.Stat_t
|
||||
if err := syscall.Stat(rootVolPath, &stat); err != nil {
|
||||
return err
|
||||
}
|
||||
var srcStat syscall.Stat_t
|
||||
if err := syscall.Stat(srcPath, &srcStat); err != nil {
|
||||
return err
|
||||
}
|
||||
// Change the source volume's ownership if it differs from the root
|
||||
// files that where just copied
|
||||
if stat.Uid != srcStat.Uid || stat.Gid != srcStat.Gid {
|
||||
if err := os.Chown(srcPath, int(stat.Uid), int(stat.Gid)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (container *Container) applyExternalVolumes() error {
|
||||
if container.Config.VolumesFrom != "" {
|
||||
containerSpecs := strings.Split(container.Config.VolumesFrom, ",")
|
||||
for _, containerSpec := range containerSpecs {
|
||||
mountRW := true
|
||||
specParts := strings.SplitN(containerSpec, ":", 2)
|
||||
switch len(specParts) {
|
||||
case 0:
|
||||
return fmt.Errorf("Malformed volumes-from specification: %s", container.Config.VolumesFrom)
|
||||
case 2:
|
||||
switch specParts[1] {
|
||||
case "ro":
|
||||
mountRW = false
|
||||
case "rw": // mountRW is already true
|
||||
default:
|
||||
return fmt.Errorf("Malformed volumes-from speficication: %s", containerSpec)
|
||||
}
|
||||
}
|
||||
c := container.runtime.Get(specParts[0])
|
||||
if c == nil {
|
||||
return fmt.Errorf("Container %s not found. Impossible to mount its volumes", container.ID)
|
||||
}
|
||||
for volPath, id := range c.Volumes {
|
||||
if _, exists := container.Volumes[volPath]; exists {
|
||||
continue
|
||||
}
|
||||
if err := os.MkdirAll(path.Join(container.RootfsPath(), volPath), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
container.Volumes[volPath] = id
|
||||
if isRW, exists := c.VolumesRW[volPath]; exists {
|
||||
container.VolumesRW[volPath] = isRW && mountRW
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (container *Container) Run() error {
|
||||
if err := container.Start(); err != nil {
|
||||
return err
|
||||
@@ -1231,15 +1348,14 @@ func (container *Container) Resize(h, w int) error {
|
||||
}
|
||||
|
||||
func (container *Container) ExportRw() (archive.Archive, error) {
|
||||
return archive.Tar(container.rwPath(), archive.Uncompressed)
|
||||
}
|
||||
|
||||
func (container *Container) RwChecksum() (string, error) {
|
||||
rwData, err := archive.Tar(container.rwPath(), archive.Xz)
|
||||
if err != nil {
|
||||
return "", err
|
||||
if err := container.EnsureMounted(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return utils.HashData(rwData)
|
||||
if container.runtime == nil {
|
||||
return nil, fmt.Errorf("Can't load storage driver for unregistered container %s", container.ID)
|
||||
}
|
||||
|
||||
return container.runtime.Diff(container)
|
||||
}
|
||||
|
||||
func (container *Container) Export() (archive.Archive, error) {
|
||||
@@ -1265,28 +1381,17 @@ func (container *Container) WaitTimeout(timeout time.Duration) error {
|
||||
}
|
||||
|
||||
func (container *Container) EnsureMounted() error {
|
||||
if mounted, err := container.Mounted(); err != nil {
|
||||
return err
|
||||
} else if mounted {
|
||||
return nil
|
||||
}
|
||||
// FIXME: EnsureMounted is deprecated because drivers are now responsible
|
||||
// for re-entrant mounting in their Get() method.
|
||||
return container.Mount()
|
||||
}
|
||||
|
||||
func (container *Container) Mount() error {
|
||||
image, err := container.GetImage()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return image.Mount(container.RootfsPath(), container.rwPath())
|
||||
return container.runtime.Mount(container)
|
||||
}
|
||||
|
||||
func (container *Container) Changes() ([]Change, error) {
|
||||
image, err := container.GetImage()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return image.Changes(container.rwPath())
|
||||
func (container *Container) Changes() ([]archive.Change, error) {
|
||||
return container.runtime.Changes(container)
|
||||
}
|
||||
|
||||
func (container *Container) GetImage() (*Image, error) {
|
||||
@@ -1296,18 +1401,34 @@ func (container *Container) GetImage() (*Image, error) {
|
||||
return container.runtime.graph.Get(container.Image)
|
||||
}
|
||||
|
||||
func (container *Container) Mounted() (bool, error) {
|
||||
return Mounted(container.RootfsPath())
|
||||
}
|
||||
|
||||
func (container *Container) Unmount() error {
|
||||
if _, err := os.Stat(container.RootfsPath()); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
var (
|
||||
err error
|
||||
root = container.RootfsPath()
|
||||
mounts = []string{
|
||||
path.Join(root, "/.dockerinit"),
|
||||
path.Join(root, "/.dockerenv"),
|
||||
path.Join(root, "/etc/resolv.conf"),
|
||||
}
|
||||
)
|
||||
|
||||
if container.HostnamePath != "" && container.HostsPath != "" {
|
||||
mounts = append(mounts, path.Join(root, "/etc/hostname"), path.Join(root, "/etc/hosts"))
|
||||
}
|
||||
|
||||
for r := range container.Volumes {
|
||||
mounts = append(mounts, path.Join(root, r))
|
||||
}
|
||||
|
||||
for _, m := range mounts {
|
||||
if lastError := mount.Unmount(m); lastError != nil {
|
||||
err = lastError
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return Unmount(container.RootfsPath())
|
||||
return container.runtime.Unmount(container)
|
||||
}
|
||||
|
||||
func (container *Container) logPath(name string) string {
|
||||
@@ -1326,8 +1447,20 @@ func (container *Container) jsonPath() string {
|
||||
return path.Join(container.root, "config.json")
|
||||
}
|
||||
|
||||
func (container *Container) EnvConfigPath() string {
|
||||
return path.Join(container.root, "config.env")
|
||||
func (container *Container) EnvConfigPath() (string, error) {
|
||||
p := path.Join(container.root, "config.env")
|
||||
if _, err := os.Stat(p); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
f, err := os.Create(p)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
f.Close()
|
||||
} else {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func (container *Container) lxcConfigPath() string {
|
||||
@@ -1336,11 +1469,7 @@ func (container *Container) lxcConfigPath() string {
|
||||
|
||||
// This method must be exported to be used from the lxc template
|
||||
func (container *Container) RootfsPath() string {
|
||||
return path.Join(container.root, "rootfs")
|
||||
}
|
||||
|
||||
func (container *Container) rwPath() string {
|
||||
return path.Join(container.root, "rw")
|
||||
return container.rootfs
|
||||
}
|
||||
|
||||
func validateID(id string) error {
|
||||
@@ -1352,49 +1481,38 @@ func validateID(id string) error {
|
||||
|
||||
// GetSize, return real size, virtual size
|
||||
func (container *Container) GetSize() (int64, int64) {
|
||||
var sizeRw, sizeRootfs int64
|
||||
data := make(map[uint64]bool)
|
||||
var (
|
||||
sizeRw, sizeRootfs int64
|
||||
err error
|
||||
driver = container.runtime.driver
|
||||
)
|
||||
|
||||
filepath.Walk(container.rwPath(), func(path string, fileInfo os.FileInfo, err error) error {
|
||||
if fileInfo == nil {
|
||||
return nil
|
||||
if err := container.EnsureMounted(); err != nil {
|
||||
utils.Errorf("Warning: failed to compute size of container rootfs %s: %s", container.ID, err)
|
||||
return sizeRw, sizeRootfs
|
||||
}
|
||||
|
||||
if differ, ok := container.runtime.driver.(graphdriver.Differ); ok {
|
||||
sizeRw, err = differ.DiffSize(container.ID)
|
||||
if err != nil {
|
||||
utils.Errorf("Warning: driver %s couldn't return diff size of container %s: %s", driver, container.ID, err)
|
||||
// FIXME: GetSize should return an error. Not changing it now in case
|
||||
// there is a side-effect.
|
||||
sizeRw = -1
|
||||
}
|
||||
size := fileInfo.Size()
|
||||
if size == 0 {
|
||||
return nil
|
||||
} else {
|
||||
changes, _ := container.Changes()
|
||||
if changes != nil {
|
||||
sizeRw = archive.ChangesSize(container.RootfsPath(), changes)
|
||||
} else {
|
||||
sizeRw = -1
|
||||
}
|
||||
}
|
||||
|
||||
inode := fileInfo.Sys().(*syscall.Stat_t).Ino
|
||||
if _, entryExists := data[inode]; entryExists {
|
||||
return nil
|
||||
if _, err = os.Stat(container.RootfsPath()); err != nil {
|
||||
if sizeRootfs, err = utils.TreeSize(container.RootfsPath()); err != nil {
|
||||
sizeRootfs = -1
|
||||
}
|
||||
data[inode] = false
|
||||
|
||||
sizeRw += size
|
||||
return nil
|
||||
})
|
||||
|
||||
data = make(map[uint64]bool)
|
||||
_, err := os.Stat(container.RootfsPath())
|
||||
if err == nil {
|
||||
filepath.Walk(container.RootfsPath(), func(path string, fileInfo os.FileInfo, err error) error {
|
||||
if fileInfo == nil {
|
||||
return nil
|
||||
}
|
||||
size := fileInfo.Size()
|
||||
if size == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
inode := fileInfo.Sys().(*syscall.Stat_t).Ino
|
||||
if _, entryExists := data[inode]; entryExists {
|
||||
return nil
|
||||
}
|
||||
data[inode] = false
|
||||
|
||||
sizeRootfs += size
|
||||
return nil
|
||||
})
|
||||
}
|
||||
return sizeRw, sizeRootfs
|
||||
}
|
||||
@@ -1417,7 +1535,11 @@ func (container *Container) Copy(resource string) (archive.Archive, error) {
|
||||
filter = []string{path.Base(basePath)}
|
||||
basePath = path.Dir(basePath)
|
||||
}
|
||||
return archive.TarFilter(basePath, archive.Uncompressed, filter)
|
||||
return archive.TarFilter(basePath, &archive.TarOptions{
|
||||
Compression: archive.Uncompressed,
|
||||
Includes: filter,
|
||||
Recursive: true,
|
||||
})
|
||||
}
|
||||
|
||||
// Returns true if the container exposes a certain port
|
||||
@@ -1425,3 +1547,13 @@ func (container *Container) Exposes(p Port) bool {
|
||||
_, exists := container.Config.ExposedPorts[p]
|
||||
return exists
|
||||
}
|
||||
|
||||
func (container *Container) GetPtyMaster() (*os.File, error) {
|
||||
if container.ptyMaster == nil {
|
||||
return nil, ErrNoTTY
|
||||
}
|
||||
if pty, ok := container.ptyMaster.(*os.File); ok {
|
||||
return pty, nil
|
||||
}
|
||||
return nil, ErrNotATTY
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
#
|
||||
# This script provides supports completion of:
|
||||
# - commands and their options
|
||||
# - container ids
|
||||
# - container ids and names
|
||||
# - image repos and tags
|
||||
# - filepaths
|
||||
#
|
||||
@@ -25,21 +25,24 @@ __docker_containers_all()
|
||||
{
|
||||
local containers
|
||||
containers="$( docker ps -a -q )"
|
||||
COMPREPLY=( $( compgen -W "$containers" -- "$cur" ) )
|
||||
names="$( docker inspect -format '{{.Name}}' $containers | sed 's,^/,,' )"
|
||||
COMPREPLY=( $( compgen -W "$names $containers" -- "$cur" ) )
|
||||
}
|
||||
|
||||
__docker_containers_running()
|
||||
{
|
||||
local containers
|
||||
containers="$( docker ps -q )"
|
||||
COMPREPLY=( $( compgen -W "$containers" -- "$cur" ) )
|
||||
names="$( docker inspect -format '{{.Name}}' $containers | sed 's,^/,,' )"
|
||||
COMPREPLY=( $( compgen -W "$names $containers" -- "$cur" ) )
|
||||
}
|
||||
|
||||
__docker_containers_stopped()
|
||||
{
|
||||
local containers
|
||||
containers="$( comm -13 <(docker ps -q | sort -u) <(docker ps -a -q | sort -u) )"
|
||||
COMPREPLY=( $( compgen -W "$containers" -- "$cur" ) )
|
||||
names="$( docker inspect -format '{{.Name}}' $containers | sed 's,^/,,' )"
|
||||
COMPREPLY=( $( compgen -W "$names $containers" -- "$cur" ) )
|
||||
}
|
||||
|
||||
__docker_image_repos()
|
||||
@@ -70,8 +73,9 @@ __docker_containers_and_images()
|
||||
{
|
||||
local containers images
|
||||
containers="$( docker ps -a -q )"
|
||||
names="$( docker inspect -format '{{.Name}}' $containers | sed 's,^/,,' )"
|
||||
images="$( docker images | awk 'NR>1{print $1":"$2}' )"
|
||||
COMPREPLY=( $( compgen -W "$images $containers" -- "$cur" ) )
|
||||
COMPREPLY=( $( compgen -W "$images $names $containers" -- "$cur" ) )
|
||||
__ltrim_colon_completions "$cur"
|
||||
}
|
||||
|
||||
|
||||
170
contrib/docker-device-tool/device_tool.go
Normal file
170
contrib/docker-device-tool/device_tool.go
Normal file
@@ -0,0 +1,170 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/graphdriver/devmapper"
|
||||
"os"
|
||||
"path"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func usage() {
|
||||
fmt.Fprintf(os.Stderr, "Usage: %s <flags> [status] | [list] | [device id] | [resize new-pool-size] | [snap new-id base-id] | [remove id] | [mount id mountpoint]\n", os.Args[0])
|
||||
flag.PrintDefaults()
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func byteSizeFromString(arg string) (int64, error) {
|
||||
digits := ""
|
||||
rest := ""
|
||||
last := strings.LastIndexAny(arg, "0123456789")
|
||||
if last >= 0 {
|
||||
digits = arg[:last+1]
|
||||
rest = arg[last+1:]
|
||||
}
|
||||
|
||||
val, err := strconv.ParseInt(digits, 10, 64)
|
||||
if err != nil {
|
||||
return val, err
|
||||
}
|
||||
|
||||
rest = strings.ToLower(strings.TrimSpace(rest))
|
||||
|
||||
var multiplier int64 = 1
|
||||
switch rest {
|
||||
case "":
|
||||
multiplier = 1
|
||||
case "k", "kb":
|
||||
multiplier = 1024
|
||||
case "m", "mb":
|
||||
multiplier = 1024 * 1024
|
||||
case "g", "gb":
|
||||
multiplier = 1024 * 1024 * 1024
|
||||
case "t", "tb":
|
||||
multiplier = 1024 * 1024 * 1024 * 1024
|
||||
default:
|
||||
return 0, fmt.Errorf("Unknown size unit: %s", rest)
|
||||
}
|
||||
|
||||
return val * multiplier, nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
root := flag.String("r", "/var/lib/docker", "Docker root dir")
|
||||
flDebug := flag.Bool("D", false, "Debug mode")
|
||||
|
||||
flag.Parse()
|
||||
|
||||
if *flDebug {
|
||||
os.Setenv("DEBUG", "1")
|
||||
}
|
||||
|
||||
if flag.NArg() < 1 {
|
||||
usage()
|
||||
}
|
||||
|
||||
args := flag.Args()
|
||||
|
||||
home := path.Join(*root, "devicemapper")
|
||||
devices, err := devmapper.NewDeviceSet(home, false)
|
||||
if err != nil {
|
||||
fmt.Println("Can't initialize device mapper: ", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
switch args[0] {
|
||||
case "status":
|
||||
status := devices.Status()
|
||||
fmt.Printf("Pool name: %s\n", status.PoolName)
|
||||
fmt.Printf("Data Loopback file: %s\n", status.DataLoopback)
|
||||
fmt.Printf("Metadata Loopback file: %s\n", status.MetadataLoopback)
|
||||
fmt.Printf("Sector size: %d\n", status.SectorSize)
|
||||
fmt.Printf("Data use: %d of %d (%.1f %%)\n", status.Data.Used, status.Data.Total, 100.0*float64(status.Data.Used)/float64(status.Data.Total))
|
||||
fmt.Printf("Metadata use: %d of %d (%.1f %%)\n", status.Metadata.Used, status.Metadata.Total, 100.0*float64(status.Metadata.Used)/float64(status.Metadata.Total))
|
||||
break
|
||||
case "list":
|
||||
ids := devices.List()
|
||||
sort.Strings(ids)
|
||||
for _, id := range ids {
|
||||
fmt.Println(id)
|
||||
}
|
||||
break
|
||||
case "device":
|
||||
if flag.NArg() < 2 {
|
||||
usage()
|
||||
}
|
||||
status, err := devices.GetDeviceStatus(args[1])
|
||||
if err != nil {
|
||||
fmt.Println("Can't get device info: ", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Printf("Id: %d\n", status.DeviceId)
|
||||
fmt.Printf("Size: %d\n", status.Size)
|
||||
fmt.Printf("Transaction Id: %d\n", status.TransactionId)
|
||||
fmt.Printf("Size in Sectors: %d\n", status.SizeInSectors)
|
||||
fmt.Printf("Mapped Sectors: %d\n", status.MappedSectors)
|
||||
fmt.Printf("Highest Mapped Sector: %d\n", status.HighestMappedSector)
|
||||
break
|
||||
case "resize":
|
||||
if flag.NArg() < 2 {
|
||||
usage()
|
||||
}
|
||||
|
||||
size, err := byteSizeFromString(args[1])
|
||||
if err != nil {
|
||||
fmt.Println("Invalid size: ", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
err = devices.ResizePool(size)
|
||||
if err != nil {
|
||||
fmt.Println("Error resizeing pool: ", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
break
|
||||
case "snap":
|
||||
if flag.NArg() < 3 {
|
||||
usage()
|
||||
}
|
||||
|
||||
err := devices.AddDevice(args[1], args[2])
|
||||
if err != nil {
|
||||
fmt.Println("Can't create snap device: ", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
break
|
||||
case "remove":
|
||||
if flag.NArg() < 2 {
|
||||
usage()
|
||||
}
|
||||
|
||||
err := devices.RemoveDevice(args[1])
|
||||
if err != nil {
|
||||
fmt.Println("Can't remove device: ", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
break
|
||||
case "mount":
|
||||
if flag.NArg() < 3 {
|
||||
usage()
|
||||
}
|
||||
|
||||
err := devices.MountDevice(args[1], args[2], false)
|
||||
if err != nil {
|
||||
fmt.Println("Can't create snap device: ", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
break
|
||||
default:
|
||||
fmt.Printf("Unknown command %s\n", args[0])
|
||||
usage()
|
||||
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
@@ -1,11 +1,9 @@
|
||||
[Unit]
|
||||
Description=Docker Application Container Engine
|
||||
Documentation=http://docs.docker.io
|
||||
Requires=network.target
|
||||
After=multi-user.target
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
ExecStartPre=/bin/mount --make-rprivate /
|
||||
ExecStart=/usr/bin/docker -d
|
||||
|
||||
|
||||
@@ -1,15 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Create a CentOS base image for Docker
|
||||
# From unclejack https://github.com/dotcloud/docker/issues/290
|
||||
set -e
|
||||
|
||||
MIRROR_URL="http://centos.netnitco.net/6.4/os/x86_64/"
|
||||
MIRROR_URL_UPDATES="http://centos.netnitco.net/6.4/updates/x86_64/"
|
||||
|
||||
yum install -y febootstrap xz
|
||||
|
||||
febootstrap -i bash -i coreutils -i tar -i bzip2 -i gzip -i vim-minimal -i wget -i patch -i diffutils -i iproute -i yum centos centos64 $MIRROR_URL -u $MIRROR_URL_UPDATES
|
||||
touch centos64/etc/resolv.conf
|
||||
touch centos64/sbin/init
|
||||
|
||||
tar --numeric-owner -Jcpf centos-64.tar.xz -C centos64 .
|
||||
@@ -142,14 +142,22 @@ if [ -z "$strictDebootstrap" ]; then
|
||||
# this forces dpkg not to call sync() after package extraction and speeds up install
|
||||
# the benefit is huge on spinning disks, and the penalty is nonexistent on SSD or decent server virtualization
|
||||
echo 'force-unsafe-io' | sudo tee etc/dpkg/dpkg.cfg.d/02apt-speedup > /dev/null
|
||||
# we want to effectively run "apt-get clean" after every install to keep images small
|
||||
echo 'DPkg::Post-Invoke {"/bin/rm -f /var/cache/apt/archives/*.deb || true";};' | sudo tee etc/apt/apt.conf.d/no-cache > /dev/null
|
||||
# we want to effectively run "apt-get clean" after every install to keep images small (see output of "apt-get clean -s" for context)
|
||||
{
|
||||
aptGetClean='"rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true";'
|
||||
echo "DPkg::Post-Invoke { ${aptGetClean} };"
|
||||
echo "APT::Update::Post-Invoke { ${aptGetClean} };"
|
||||
echo 'Dir::Cache::pkgcache ""; Dir::Cache::srcpkgcache "";'
|
||||
} | sudo tee etc/apt/apt.conf.d/no-cache > /dev/null
|
||||
# and remove the translations, too
|
||||
echo 'Acquire::Languages "none";' | sudo tee etc/apt/apt.conf.d/no-languages > /dev/null
|
||||
|
||||
# helpful undo lines for each the above tweaks (for lack of a better home to keep track of them):
|
||||
# rm /usr/sbin/policy-rc.d
|
||||
# rm /sbin/initctl; dpkg-divert --rename --remove /sbin/initctl
|
||||
# rm /etc/dpkg/dpkg.cfg.d/02apt-speedup
|
||||
# rm /etc/apt/apt.conf.d/no-cache
|
||||
# rm /etc/apt/apt.conf.d/no-languages
|
||||
|
||||
if [ -z "$skipDetection" ]; then
|
||||
# see also rudimentary platform detection in hack/install.sh
|
||||
|
||||
112
contrib/mkimage-rinse.sh
Executable file
112
contrib/mkimage-rinse.sh
Executable file
@@ -0,0 +1,112 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
repo="$1"
|
||||
distro="$2"
|
||||
mirror="$3"
|
||||
|
||||
if [ ! "$repo" ] || [ ! "$distro" ]; then
|
||||
self="$(basename $0)"
|
||||
echo >&2 "usage: $self repo distro [mirror]"
|
||||
echo >&2
|
||||
echo >&2 " ie: $self username/centos centos-5"
|
||||
echo >&2 " $self username/centos centos-6"
|
||||
echo >&2
|
||||
echo >&2 " ie: $self username/slc slc-5"
|
||||
echo >&2 " $self username/slc slc-6"
|
||||
echo >&2
|
||||
echo >&2 " ie: $self username/centos centos-5 http://vault.centos.org/5.8/os/x86_64/CentOS/"
|
||||
echo >&2 " $self username/centos centos-6 http://vault.centos.org/6.3/os/x86_64/Packages/"
|
||||
echo >&2
|
||||
echo >&2 'See /etc/rinse for supported values of "distro" and for examples of'
|
||||
echo >&2 ' expected values of "mirror".'
|
||||
echo >&2
|
||||
echo >&2 'This script is tested to work with the original upstream version of rinse,'
|
||||
echo >&2 ' found at http://www.steve.org.uk/Software/rinse/ and also in Debian at'
|
||||
echo >&2 ' http://packages.debian.org/wheezy/rinse -- as always, YMMV.'
|
||||
echo >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
target="/tmp/docker-rootfs-rinse-$distro-$$-$RANDOM"
|
||||
|
||||
cd "$(dirname "$(readlink -f "$BASH_SOURCE")")"
|
||||
returnTo="$(pwd -P)"
|
||||
|
||||
rinseArgs=( --arch amd64 --distribution "$distro" --directory "$target" )
|
||||
if [ "$mirror" ]; then
|
||||
rinseArgs+=( --mirror "$mirror" )
|
||||
fi
|
||||
|
||||
set -x
|
||||
|
||||
mkdir -p "$target"
|
||||
|
||||
sudo rinse "${rinseArgs[@]}"
|
||||
|
||||
cd "$target"
|
||||
|
||||
# rinse fails a little at setting up /dev, so we'll just wipe it out and create our own
|
||||
sudo rm -rf dev
|
||||
sudo mkdir -m 755 dev
|
||||
(
|
||||
cd dev
|
||||
sudo ln -sf /proc/self/fd ./
|
||||
sudo mkdir -m 755 pts
|
||||
sudo mkdir -m 1777 shm
|
||||
sudo mknod -m 600 console c 5 1
|
||||
sudo mknod -m 600 initctl p
|
||||
sudo mknod -m 666 full c 1 7
|
||||
sudo mknod -m 666 null c 1 3
|
||||
sudo mknod -m 666 ptmx c 5 2
|
||||
sudo mknod -m 666 random c 1 8
|
||||
sudo mknod -m 666 tty c 5 0
|
||||
sudo mknod -m 666 tty0 c 4 0
|
||||
sudo mknod -m 666 urandom c 1 9
|
||||
sudo mknod -m 666 zero c 1 5
|
||||
)
|
||||
|
||||
# effectively: febootstrap-minimize --keep-zoneinfo --keep-rpmdb --keep-services "$target"
|
||||
# locales
|
||||
sudo rm -rf usr/{{lib,share}/locale,{lib,lib64}/gconv,bin/localedef,sbin/build-locale-archive}
|
||||
# docs
|
||||
sudo rm -rf usr/share/{man,doc,info,gnome/help}
|
||||
# cracklib
|
||||
sudo rm -rf usr/share/cracklib
|
||||
# i18n
|
||||
sudo rm -rf usr/share/i18n
|
||||
# yum cache
|
||||
sudo rm -rf var/cache/yum
|
||||
sudo mkdir -p --mode=0755 var/cache/yum
|
||||
# sln
|
||||
sudo rm -rf sbin/sln
|
||||
# ldconfig
|
||||
#sudo rm -rf sbin/ldconfig
|
||||
sudo rm -rf etc/ld.so.cache var/cache/ldconfig
|
||||
sudo mkdir -p --mode=0755 var/cache/ldconfig
|
||||
|
||||
# allow networking init scripts inside the container to work without extra steps
|
||||
echo 'NETWORKING=yes' | sudo tee etc/sysconfig/network > /dev/null
|
||||
|
||||
# to restore locales later:
|
||||
# yum reinstall glibc-common
|
||||
|
||||
version=
|
||||
if [ -r etc/redhat-release ]; then
|
||||
version="$(sed -E 's/^[^0-9.]*([0-9.]+).*$/\1/' etc/redhat-release)"
|
||||
elif [ -r etc/SuSE-release ]; then
|
||||
version="$(awk '/^VERSION/ { print $3 }' etc/SuSE-release)"
|
||||
fi
|
||||
|
||||
if [ -z "$version" ]; then
|
||||
echo >&2 "warning: cannot autodetect OS version, using $distro as tag"
|
||||
sleep 20
|
||||
version="$distro"
|
||||
fi
|
||||
|
||||
sudo tar --numeric-owner -c . | docker import - $repo:$version
|
||||
|
||||
docker run -i -t $repo:$version echo success
|
||||
|
||||
cd "$returnTo"
|
||||
sudo rm -rf "$target"
|
||||
77
contrib/mkseccomp.pl
Executable file
77
contrib/mkseccomp.pl
Executable file
@@ -0,0 +1,77 @@
|
||||
#!/usr/bin/perl
|
||||
#
|
||||
# A simple helper script to help people build seccomp profiles for
|
||||
# Docker/LXC. The goal is mostly to reduce the attack surface to the
|
||||
# kernel, by restricting access to rarely used, recently added or not used
|
||||
# syscalls.
|
||||
#
|
||||
# This script processes one or more files which contain the list of system
|
||||
# calls to be allowed. See mkseccomp.sample for more information how you
|
||||
# can configure the list of syscalls. When run, this script produces output
|
||||
# which, when stored in a file, can be passed to docker as follows:
|
||||
#
|
||||
# docker run -lxc-conf="lxc.seccomp=$file" <rest of arguments>
|
||||
#
|
||||
# The included sample file shows how to cut about a quarter of all syscalls,
|
||||
# which affecting most applications.
|
||||
#
|
||||
# For specific situations it is possible to reduce the list further. By
|
||||
# reducing the list to just those syscalls required by a certain application
|
||||
# you can make it difficult for unknown/unexpected code to run.
|
||||
#
|
||||
# Run this script as follows:
|
||||
#
|
||||
# ./mkseccomp.pl < mkseccomp.sample >syscalls.list
|
||||
# or
|
||||
# ./mkseccomp.pl mkseccomp.sample >syscalls.list
|
||||
#
|
||||
# Multiple files can be specified, in which case the lists of syscalls are
|
||||
# combined.
|
||||
#
|
||||
# By Martijn van Oosterhout <kleptog@svana.org> Nov 2013
|
||||
|
||||
# How it works:
|
||||
#
|
||||
# This program basically spawns two processes to form a chain like:
|
||||
#
|
||||
# <process data section to prefix __NR_> | cpp | <add header and filter unknown syscalls>
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
if( -t ) {
|
||||
print STDERR "Helper script to make seccomp filters for Docker/LXC.\n";
|
||||
print STDERR "Usage: mkseccomp.pl [files...]\n";
|
||||
exit 1;
|
||||
}
|
||||
|
||||
my $pid = open(my $in, "-|") // die "Couldn't fork1 ($!)\n";
|
||||
|
||||
if($pid == 0) { # Child
|
||||
$pid = open(my $out, "|-") // die "Couldn't fork2 ($!)\n";
|
||||
|
||||
if($pid == 0) { # Child, which execs cpp
|
||||
exec "cpp" or die "Couldn't exec cpp ($!)\n";
|
||||
exit 1;
|
||||
}
|
||||
|
||||
# Process the DATA section and output to cpp
|
||||
print $out "#include <sys/syscall.h>\n";
|
||||
while(<>) {
|
||||
if(/^\w/) {
|
||||
print $out "__NR_$_";
|
||||
}
|
||||
}
|
||||
close $out;
|
||||
exit 0;
|
||||
|
||||
}
|
||||
|
||||
# Print header and then process output from cpp.
|
||||
print "1\n";
|
||||
print "whitelist\n";
|
||||
|
||||
while(<$in>) {
|
||||
print if( /^[0-9]/ );
|
||||
}
|
||||
|
||||
444
contrib/mkseccomp.sample
Normal file
444
contrib/mkseccomp.sample
Normal file
@@ -0,0 +1,444 @@
|
||||
/* This sample file is an example for mkseccomp.pl to produce a seccomp file
|
||||
* which restricts syscalls that are only useful for an admin but allows the
|
||||
* vast majority of normal userspace programs to run normally.
|
||||
*
|
||||
* The format of this file is one line per syscall. This is then processed
|
||||
* and passed to 'cpp' to convert the names to numbers using whatever is
|
||||
* correct for your platform. As such C-style comments are permitted. Note
|
||||
* this also means that C preprocessor macros are also allowed. So it is
|
||||
* possible to create groups surrounded by #ifdef/#endif and control their
|
||||
* inclusion via #define (not #include).
|
||||
*
|
||||
* Syscalls that don't exist on your architecture are silently filtered out.
|
||||
* Syscalls marked with (*) are required for a container to spawn a bash
|
||||
* shell successfully (not necessarily full featured). Listing the same
|
||||
* syscall multiple times is no problem.
|
||||
*
|
||||
* If you want to make a list specifically for one application the easiest
|
||||
* way is to run the application under strace, like so:
|
||||
*
|
||||
* $ strace -f -q -c -o strace.out application args...
|
||||
*
|
||||
* Once you have a reasonable sample of the execution of the program, exit
|
||||
* it. The file strace.out will have a summary of the syscalls used. Copy
|
||||
* that list into this file, comment out everything else except the starred
|
||||
* syscalls (which you need for the container to start) and you're done.
|
||||
*
|
||||
* To get the list of syscalls from the strace output this works well for
|
||||
* me
|
||||
*
|
||||
* $ cut -c52 < strace.out
|
||||
*
|
||||
* This sample list was compiled as a combination of all the syscalls
|
||||
* available on i386 and amd64 on Ubuntu Precise, as such it may not contain
|
||||
* everything and not everything may be relevent for your system. This
|
||||
* shouldn't be a problem.
|
||||
*/
|
||||
|
||||
// Filesystem/File descriptor related
|
||||
access // (*)
|
||||
chdir // (*)
|
||||
chmod
|
||||
chown
|
||||
chown32
|
||||
close // (*)
|
||||
creat
|
||||
dup // (*)
|
||||
dup2 // (*)
|
||||
dup3
|
||||
epoll_create
|
||||
epoll_create1
|
||||
epoll_ctl
|
||||
epoll_ctl_old
|
||||
epoll_pwait
|
||||
epoll_wait
|
||||
epoll_wait_old
|
||||
eventfd
|
||||
eventfd2
|
||||
faccessat // (*)
|
||||
fadvise64
|
||||
fadvise64_64
|
||||
fallocate
|
||||
fanotify_init
|
||||
fanotify_mark
|
||||
ioctl // (*)
|
||||
fchdir
|
||||
fchmod
|
||||
fchmodat
|
||||
fchown
|
||||
fchown32
|
||||
fchownat
|
||||
fcntl // (*)
|
||||
fcntl64
|
||||
fdatasync
|
||||
fgetxattr
|
||||
flistxattr
|
||||
flock
|
||||
fremovexattr
|
||||
fsetxattr
|
||||
fstat // (*)
|
||||
fstat64
|
||||
fstatat64
|
||||
fstatfs
|
||||
fstatfs64
|
||||
fsync
|
||||
ftruncate
|
||||
ftruncate64
|
||||
getcwd // (*)
|
||||
getdents // (*)
|
||||
getdents64
|
||||
getxattr
|
||||
inotify_add_watch
|
||||
inotify_init
|
||||
inotify_init1
|
||||
inotify_rm_watch
|
||||
io_cancel
|
||||
io_destroy
|
||||
io_getevents
|
||||
io_setup
|
||||
io_submit
|
||||
lchown
|
||||
lchown32
|
||||
lgetxattr
|
||||
link
|
||||
linkat
|
||||
listxattr
|
||||
llistxattr
|
||||
llseek
|
||||
_llseek
|
||||
lremovexattr
|
||||
lseek // (*)
|
||||
lsetxattr
|
||||
lstat
|
||||
lstat64
|
||||
mkdir
|
||||
mkdirat
|
||||
mknod
|
||||
mknodat
|
||||
newfstatat
|
||||
_newselect
|
||||
oldfstat
|
||||
oldlstat
|
||||
oldolduname
|
||||
oldstat
|
||||
olduname
|
||||
oldwait4
|
||||
open // (*)
|
||||
openat // (*)
|
||||
pipe // (*)
|
||||
pipe2
|
||||
poll
|
||||
ppoll
|
||||
pread64
|
||||
preadv
|
||||
futimesat
|
||||
pselect6
|
||||
pwrite64
|
||||
pwritev
|
||||
read // (*)
|
||||
readahead
|
||||
readdir
|
||||
readlink
|
||||
readlinkat
|
||||
readv
|
||||
removexattr
|
||||
rename
|
||||
renameat
|
||||
rmdir
|
||||
select
|
||||
sendfile
|
||||
sendfile64
|
||||
setxattr
|
||||
splice
|
||||
stat // (*)
|
||||
stat64
|
||||
statfs // (*)
|
||||
statfs64
|
||||
symlink
|
||||
symlinkat
|
||||
sync
|
||||
sync_file_range
|
||||
sync_file_range2
|
||||
syncfs
|
||||
tee
|
||||
truncate
|
||||
truncate64
|
||||
umask
|
||||
unlink
|
||||
unlinkat
|
||||
ustat
|
||||
utime
|
||||
utimensat
|
||||
utimes
|
||||
write // (*)
|
||||
writev
|
||||
|
||||
// Network related
|
||||
accept
|
||||
accept4
|
||||
bind // (*)
|
||||
connect // (*)
|
||||
getpeername
|
||||
getsockname // (*)
|
||||
getsockopt
|
||||
listen
|
||||
recv
|
||||
recvfrom // (*)
|
||||
recvmmsg
|
||||
recvmsg
|
||||
send
|
||||
sendmmsg
|
||||
sendmsg
|
||||
sendto // (*)
|
||||
setsockopt
|
||||
shutdown
|
||||
socket // (*)
|
||||
socketcall
|
||||
socketpair
|
||||
|
||||
// Signal related
|
||||
pause
|
||||
rt_sigaction // (*)
|
||||
rt_sigpending
|
||||
rt_sigprocmask // (*)
|
||||
rt_sigqueueinfo
|
||||
rt_sigreturn // (*)
|
||||
rt_sigsuspend
|
||||
rt_sigtimedwait
|
||||
rt_tgsigqueueinfo
|
||||
sigaction
|
||||
sigaltstack // (*)
|
||||
signal
|
||||
signalfd
|
||||
signalfd4
|
||||
sigpending
|
||||
sigprocmask
|
||||
sigreturn
|
||||
sigsuspend
|
||||
|
||||
// Other needed POSIX
|
||||
alarm
|
||||
brk // (*)
|
||||
clock_adjtime
|
||||
clock_getres
|
||||
clock_gettime
|
||||
clock_nanosleep
|
||||
//clock_settime
|
||||
gettimeofday
|
||||
nanosleep
|
||||
nice
|
||||
sysinfo
|
||||
syslog
|
||||
time
|
||||
timer_create
|
||||
timer_delete
|
||||
timerfd_create
|
||||
timerfd_gettime
|
||||
timerfd_settime
|
||||
timer_getoverrun
|
||||
timer_gettime
|
||||
timer_settime
|
||||
times
|
||||
uname // (*)
|
||||
|
||||
// Memory control
|
||||
madvise
|
||||
mbind
|
||||
mincore
|
||||
mlock
|
||||
mlockall
|
||||
mmap // (*)
|
||||
mmap2
|
||||
mprotect // (*)
|
||||
mremap
|
||||
msync
|
||||
munlock
|
||||
munlockall
|
||||
munmap // (*)
|
||||
remap_file_pages
|
||||
set_mempolicy
|
||||
vmsplice
|
||||
|
||||
// Process control
|
||||
capget
|
||||
//capset
|
||||
clone // (*)
|
||||
execve // (*)
|
||||
exit // (*)
|
||||
exit_group // (*)
|
||||
fork
|
||||
getcpu
|
||||
getpgid
|
||||
getpgrp // (*)
|
||||
getpid // (*)
|
||||
getppid // (*)
|
||||
getpriority
|
||||
getresgid
|
||||
getresgid32
|
||||
getresuid
|
||||
getresuid32
|
||||
getrlimit // (*)
|
||||
getrusage
|
||||
getsid
|
||||
getuid // (*)
|
||||
getuid32
|
||||
getegid // (*)
|
||||
getegid32
|
||||
geteuid // (*)
|
||||
geteuid32
|
||||
getgid // (*)
|
||||
getgid32
|
||||
getgroups
|
||||
getgroups32
|
||||
getitimer
|
||||
get_mempolicy
|
||||
kill
|
||||
//personality
|
||||
prctl
|
||||
prlimit64
|
||||
sched_getaffinity
|
||||
sched_getparam
|
||||
sched_get_priority_max
|
||||
sched_get_priority_min
|
||||
sched_getscheduler
|
||||
sched_rr_get_interval
|
||||
//sched_setaffinity
|
||||
//sched_setparam
|
||||
//sched_setscheduler
|
||||
sched_yield
|
||||
setfsgid
|
||||
setfsgid32
|
||||
setfsuid
|
||||
setfsuid32
|
||||
setgid
|
||||
setgid32
|
||||
setgroups
|
||||
setgroups32
|
||||
setitimer
|
||||
setpgid // (*)
|
||||
setpriority
|
||||
setregid
|
||||
setregid32
|
||||
setresgid
|
||||
setresgid32
|
||||
setresuid
|
||||
setresuid32
|
||||
setreuid
|
||||
setreuid32
|
||||
setrlimit
|
||||
setsid
|
||||
setuid
|
||||
setuid32
|
||||
ugetrlimit
|
||||
vfork
|
||||
wait4 // (*)
|
||||
waitid
|
||||
waitpid
|
||||
|
||||
// IPC
|
||||
ipc
|
||||
mq_getsetattr
|
||||
mq_notify
|
||||
mq_open
|
||||
mq_timedreceive
|
||||
mq_timedsend
|
||||
mq_unlink
|
||||
msgctl
|
||||
msgget
|
||||
msgrcv
|
||||
msgsnd
|
||||
semctl
|
||||
semget
|
||||
semop
|
||||
semtimedop
|
||||
shmat
|
||||
shmctl
|
||||
shmdt
|
||||
shmget
|
||||
|
||||
// Linux specific, mostly needed for thread-related stuff
|
||||
arch_prctl // (*)
|
||||
get_robust_list
|
||||
get_thread_area
|
||||
gettid
|
||||
futex // (*)
|
||||
restart_syscall // (*)
|
||||
set_robust_list // (*)
|
||||
set_thread_area
|
||||
set_tid_address // (*)
|
||||
tgkill
|
||||
tkill
|
||||
|
||||
// Admin syscalls, these are blocked
|
||||
//acct
|
||||
//adjtimex
|
||||
//bdflush
|
||||
//chroot
|
||||
//create_module
|
||||
//delete_module
|
||||
//get_kernel_syms // Obsolete
|
||||
//idle // Obsolete
|
||||
//init_module
|
||||
//ioperm
|
||||
//iopl
|
||||
//ioprio_get
|
||||
//ioprio_set
|
||||
//kexec_load
|
||||
//lookup_dcookie // oprofile only?
|
||||
//migrate_pages // NUMA
|
||||
//modify_ldt
|
||||
//mount
|
||||
//move_pages // NUMA
|
||||
//name_to_handle_at // NFS server
|
||||
//nfsservctl // NFS server
|
||||
//open_by_handle_at // NFS server
|
||||
//perf_event_open
|
||||
//pivot_root
|
||||
//process_vm_readv // For debugger
|
||||
//process_vm_writev // For debugger
|
||||
//ptrace // For debugger
|
||||
//query_module
|
||||
//quotactl
|
||||
//reboot
|
||||
//setdomainname
|
||||
//sethostname
|
||||
//setns
|
||||
//settimeofday
|
||||
//sgetmask // Obsolete
|
||||
//ssetmask // Obsolete
|
||||
//stime
|
||||
//swapoff
|
||||
//swapon
|
||||
//_sysctl
|
||||
//sysfs
|
||||
//sys_setaltroot
|
||||
//umount
|
||||
//umount2
|
||||
//unshare
|
||||
//uselib
|
||||
//vhangup
|
||||
//vm86
|
||||
//vm86old
|
||||
|
||||
// Kernel key management
|
||||
//add_key
|
||||
//keyctl
|
||||
//request_key
|
||||
|
||||
// Unimplemented
|
||||
//afs_syscall
|
||||
//break
|
||||
//ftime
|
||||
//getpmsg
|
||||
//gtty
|
||||
//lock
|
||||
//madvise1
|
||||
//mpx
|
||||
//prof
|
||||
//profil
|
||||
//putpmsg
|
||||
//security
|
||||
//stty
|
||||
//tuxcall
|
||||
//ulimit
|
||||
//vserver
|
||||
@@ -4,6 +4,10 @@
|
||||
<dict>
|
||||
<key>name</key>
|
||||
<string>Dockerfile</string>
|
||||
<key>fileTypes</key>
|
||||
<array>
|
||||
<string>Dockerfile</string>
|
||||
</array>
|
||||
<key>patterns</key>
|
||||
<array>
|
||||
<dict>
|
||||
3
contrib/udev/80-docker.rules
Normal file
3
contrib/udev/80-docker.rules
Normal file
@@ -0,0 +1,3 @@
|
||||
# hide docker's loopback devices from udisks, and thus from user desktops
|
||||
SUBSYSTEM=="block", ENV{DM_NAME}=="docker-*", ENV{UDISKS_PRESENTATION_HIDE}="1", ENV{UDISKS_IGNORE}="1"
|
||||
SUBSYSTEM=="block", DEVPATH=="/devices/virtual/block/loop*", ATTR{loop/backing_file}=="/var/lib/docker/*", ENV{UDISKS_PRESENTATION_HIDE}="1", ENV{UDISKS_IGNORE}="1"
|
||||
@@ -17,3 +17,34 @@ meaning you can use Vagrant to control Docker containers.
|
||||
|
||||
* [docker-provider](https://github.com/fgrehm/docker-provider)
|
||||
* [vagrant-shell](https://github.com/destructuring/vagrant-shell)
|
||||
|
||||
## Setting up Vagrant-docker with the Remote API
|
||||
|
||||
The initial Docker upstart script will not work because it runs on `127.0.0.1`, which is not accessible to the host machine. Instead, we need to change the script to connect to `0.0.0.0`. To do this, modify `/etc/init/docker.conf` to look like this:
|
||||
|
||||
```
|
||||
description "Docker daemon"
|
||||
|
||||
start on filesystem and started lxc-net
|
||||
stop on runlevel [!2345]
|
||||
|
||||
respawn
|
||||
|
||||
script
|
||||
/usr/bin/docker -d -H=tcp://0.0.0.0:4243/
|
||||
end script
|
||||
```
|
||||
|
||||
Once that's done, you need to set up a SSH tunnel between your host machine and the vagrant machine that's running Docker. This can be done by running the following command in a host terminal:
|
||||
|
||||
```
|
||||
ssh -L 4243:localhost:4243 -p 2222 vagrant@localhost
|
||||
```
|
||||
|
||||
(The first 4243 is what your host can connect to, the second 4243 is what port Docker is running on in the vagrant machine, and the 2222 is the port Vagrant is providing for SSH. If VirtualBox is the VM you're using, you can see what value "2222" should be by going to: Network > Adapter 1 > Advanced > Port Forwarding in the VirtualBox GUI.)
|
||||
|
||||
Note that because the port has been changed, to run docker commands from within the command line you must run them like this:
|
||||
|
||||
```
|
||||
sudo docker -H 0.0.0.0:4243 < commands for docker >
|
||||
```
|
||||
|
||||
1
contrib/zfs/MAINTAINERS
Normal file
1
contrib/zfs/MAINTAINERS
Normal file
@@ -0,0 +1 @@
|
||||
Gurjeet Singh <gurjeet@singh.im> (gurjeet.singh.im)
|
||||
23
contrib/zfs/README.md
Normal file
23
contrib/zfs/README.md
Normal file
@@ -0,0 +1,23 @@
|
||||
# ZFS Storage Driver
|
||||
|
||||
This is a placeholder to declare the presence and status of ZFS storage driver
|
||||
for containers.
|
||||
|
||||
The current development is done in Gurjeet Singh's fork of Docker, under the
|
||||
branch named [zfs_driver].
|
||||
|
||||
[zfs_driver]: https://github.com/gurjeet/docker/tree/zfs_driver
|
||||
|
||||
|
||||
# Status
|
||||
|
||||
Alpha: The code is now capable of creating, running and destroying containers
|
||||
and images.
|
||||
|
||||
The code is under development. Contributions in the form of suggestions,
|
||||
code-reviews, and patches are welcome.
|
||||
|
||||
Please send the communication to gurjeet@singh.im and CC at least one Docker
|
||||
mailing list.
|
||||
|
||||
|
||||
@@ -23,21 +23,27 @@ func main() {
|
||||
sysinit.SysInit()
|
||||
return
|
||||
}
|
||||
// FIXME: Switch d and D ? (to be more sshd like)
|
||||
flVersion := flag.Bool("v", false, "Print version information and quit")
|
||||
flDaemon := flag.Bool("d", false, "Daemon mode")
|
||||
flDebug := flag.Bool("D", false, "Debug mode")
|
||||
flAutoRestart := flag.Bool("r", true, "Restart previously running containers")
|
||||
bridgeName := flag.String("b", "", "Attach containers to a pre-existing network bridge. Use 'none' to disable container networking")
|
||||
pidfile := flag.String("p", "/var/run/docker.pid", "File containing process PID")
|
||||
flRoot := flag.String("g", "/var/lib/docker", "Path to use as the root of the docker runtime.")
|
||||
flEnableCors := flag.Bool("api-enable-cors", false, "Enable CORS requests in the remote api.")
|
||||
flDns := flag.String("dns", "", "Set custom dns servers")
|
||||
flHosts := utils.ListOpts{fmt.Sprintf("unix://%s", docker.DEFAULTUNIXSOCKET)}
|
||||
flag.Var(&flHosts, "H", "tcp://host:port to bind/connect to or unix://path/to/socket to use")
|
||||
flEnableIptables := flag.Bool("iptables", true, "Disable iptables within docker")
|
||||
flDefaultIp := flag.String("ip", "0.0.0.0", "Default ip address to use when binding a containers ports")
|
||||
flInterContainerComm := flag.Bool("icc", true, "Enable inter-container communication")
|
||||
|
||||
var (
|
||||
flVersion = flag.Bool("v", false, "Print version information and quit")
|
||||
flDaemon = flag.Bool("d", false, "Enable daemon mode")
|
||||
flDebug = flag.Bool("D", false, "Enable debug mode")
|
||||
flAutoRestart = flag.Bool("r", true, "Restart previously running containers")
|
||||
bridgeName = flag.String("b", "", "Attach containers to a pre-existing network bridge; use 'none' to disable container networking")
|
||||
bridgeIp = flag.String("bip", "", "Use this CIDR notation address for the network bridge's IP, not compatible with -b")
|
||||
pidfile = flag.String("p", "/var/run/docker.pid", "Path to use for daemon PID file")
|
||||
flRoot = flag.String("g", "/var/lib/docker", "Path to use as the root of the docker runtime")
|
||||
flEnableCors = flag.Bool("api-enable-cors", false, "Enable CORS headers in the remote API")
|
||||
flDns = docker.NewListOpts(docker.ValidateIp4Address)
|
||||
flEnableIptables = flag.Bool("iptables", true, "Disable docker's addition of iptables rules")
|
||||
flDefaultIp = flag.String("ip", "0.0.0.0", "Default IP address to use when binding container ports")
|
||||
flInterContainerComm = flag.Bool("icc", true, "Enable inter-container communication")
|
||||
flGraphDriver = flag.String("s", "", "Force the docker runtime to use a specific storage driver")
|
||||
flHosts = docker.NewListOpts(docker.ValidateHost)
|
||||
flMtu = flag.Int("mtu", docker.DefaultNetworkMtu, "Set the containers network mtu")
|
||||
)
|
||||
flag.Var(&flDns, "dns", "Force docker to use specific DNS servers")
|
||||
flag.Var(&flHosts, "H", "Multiple tcp://host:port or unix://path/to/socket to bind in daemon mode, single connection otherwise")
|
||||
|
||||
flag.Parse()
|
||||
|
||||
@@ -45,16 +51,18 @@ func main() {
|
||||
showVersion()
|
||||
return
|
||||
}
|
||||
if len(flHosts) > 1 {
|
||||
flHosts = flHosts[1:] //trick to display a nice default value in the usage
|
||||
}
|
||||
for i, flHost := range flHosts {
|
||||
host, err := utils.ParseHost(docker.DEFAULTHTTPHOST, docker.DEFAULTHTTPPORT, flHost)
|
||||
if err == nil {
|
||||
flHosts[i] = host
|
||||
} else {
|
||||
log.Fatal(err)
|
||||
if flHosts.Len() == 0 {
|
||||
defaultHost := os.Getenv("DOCKER_HOST")
|
||||
|
||||
if defaultHost == "" || *flDaemon {
|
||||
// If we do not have a host, default to unix socket
|
||||
defaultHost = fmt.Sprintf("unix://%s", docker.DEFAULTUNIXSOCKET)
|
||||
}
|
||||
flHosts.Set(defaultHost)
|
||||
}
|
||||
|
||||
if *bridgeName != "" && *bridgeIp != "" {
|
||||
log.Fatal("You specified -b & -bip, mutually exclusive options. Please specify only one.")
|
||||
}
|
||||
|
||||
if *flDebug {
|
||||
@@ -67,6 +75,7 @@ func main() {
|
||||
flag.Usage()
|
||||
return
|
||||
}
|
||||
|
||||
eng, err := engine.New(*flRoot)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
@@ -77,28 +86,34 @@ func main() {
|
||||
job.Setenv("Root", *flRoot)
|
||||
job.SetenvBool("AutoRestart", *flAutoRestart)
|
||||
job.SetenvBool("EnableCors", *flEnableCors)
|
||||
job.Setenv("Dns", *flDns)
|
||||
job.SetenvList("Dns", flDns.GetAll())
|
||||
job.SetenvBool("EnableIptables", *flEnableIptables)
|
||||
job.Setenv("BridgeIface", *bridgeName)
|
||||
job.Setenv("BridgeIp", *bridgeIp)
|
||||
job.Setenv("DefaultIp", *flDefaultIp)
|
||||
job.SetenvBool("InterContainerCommunication", *flInterContainerComm)
|
||||
job.Setenv("GraphDriver", *flGraphDriver)
|
||||
job.SetenvInt("Mtu", *flMtu)
|
||||
if err := job.Run(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
// Serve api
|
||||
job = eng.Job("serveapi", flHosts...)
|
||||
job = eng.Job("serveapi", flHosts.GetAll()...)
|
||||
job.SetenvBool("Logging", true)
|
||||
if err := job.Run(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
} else {
|
||||
if len(flHosts) > 1 {
|
||||
if flHosts.Len() > 1 {
|
||||
log.Fatal("Please specify only one -H")
|
||||
}
|
||||
protoAddrParts := strings.SplitN(flHosts[0], "://", 2)
|
||||
protoAddrParts := strings.SplitN(flHosts.GetAll()[0], "://", 2)
|
||||
if err := docker.ParseCommands(protoAddrParts[0], protoAddrParts[1], flag.Args()...); err != nil {
|
||||
if sterr, ok := err.(*utils.StatusError); ok {
|
||||
os.Exit(sterr.Status)
|
||||
if sterr.Status != "" {
|
||||
log.Println(sterr.Status)
|
||||
}
|
||||
os.Exit(sterr.StatusCode)
|
||||
}
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@ run apt-get install -y python-setuptools make
|
||||
run easy_install pip
|
||||
#from docs/requirements.txt, but here to increase cacheability
|
||||
run pip install Sphinx==1.1.3
|
||||
run pip install sphinxcontrib-httpdomain==1.1.8
|
||||
run pip install sphinxcontrib-httpdomain==1.1.9
|
||||
add . /docs
|
||||
run cd /docs; make docs
|
||||
|
||||
|
||||
@@ -1,2 +1,4 @@
|
||||
Andy Rothfusz <andy@dotcloud.com> (@metalivedev)
|
||||
Ken Cochrane <ken@dotcloud.com> (@kencochrane)
|
||||
James Turnbull <james@lovedthanlost.net> (@jamtur01)
|
||||
Sven Dowideit <SvenDowideit@fosiki.com> (@SvenDowideit)
|
||||
|
||||
@@ -41,24 +41,25 @@ its dependencies. There are two main ways to install this tool:
|
||||
|
||||
###Native Installation
|
||||
|
||||
* Install sphinx: `pip install sphinx`
|
||||
* Mac OS X: `[sudo] pip-2.7 install sphinx`
|
||||
* Install sphinx httpdomain contrib package: `pip install sphinxcontrib-httpdomain`
|
||||
* Mac OS X: `[sudo] pip-2.7 install sphinxcontrib-httpdomain`
|
||||
* If pip is not available you can probably install it using your favorite package manager as **python-pip**
|
||||
Install dependencies from `requirements.txt` file in your `docker/docs`
|
||||
directory:
|
||||
|
||||
* Linux: `pip install -r docs/requirements.txt`
|
||||
|
||||
* Mac OS X: `[sudo] pip-2.7 install -r docs/requirements.txt`
|
||||
|
||||
###Alternative Installation: Docker Container
|
||||
|
||||
If you're running ``docker`` on your development machine then you may
|
||||
find it easier and cleaner to use the Dockerfile. This installs Sphinx
|
||||
find it easier and cleaner to use the docs Dockerfile. This installs Sphinx
|
||||
in a container, adds the local ``docs/`` directory and builds the HTML
|
||||
docs inside the container, even starting a simple HTTP server on port
|
||||
8000 so that you can connect and see your changes. Just run ``docker
|
||||
build .`` and run the resulting image. This is the equivalent to
|
||||
``make clean server`` since each container starts clean.
|
||||
8000 so that you can connect and see your changes.
|
||||
|
||||
In the ``docs/`` directory, run:
|
||||
```docker build -t docker:docs . && docker run -p 8000:8000 docker:docs```
|
||||
In the ``docker`` source directory, run:
|
||||
```make docs```
|
||||
|
||||
This is the equivalent to ``make clean server`` since each container starts clean.
|
||||
|
||||
Usage
|
||||
-----
|
||||
@@ -127,7 +128,8 @@ Guides on using sphinx
|
||||
|
||||
* Code examples
|
||||
|
||||
* Start without $, so it's easy to copy and paste.
|
||||
* Start typed commands with ``$ `` (dollar space) so that they
|
||||
are easily differentiated from program output.
|
||||
* Use "sudo" with docker to ensure that your command is runnable
|
||||
even if they haven't [used the *docker*
|
||||
group](http://docs.docker.io/en/latest/use/basics/#why-sudo).
|
||||
@@ -136,7 +138,7 @@ Manpages
|
||||
--------
|
||||
|
||||
* To make the manpages, run ``make man``. Please note there is a bug
|
||||
in spinx 1.1.3 which makes this fail. Upgrade to the latest version
|
||||
in Sphinx 1.1.3 which makes this fail. Upgrade to the latest version
|
||||
of Sphinx.
|
||||
* Then preview the manpage by running ``man _build/man/docker.1``,
|
||||
where ``_build/man/docker.1`` is the path to the generated manfile
|
||||
|
||||
@@ -26,14 +26,43 @@ Docker Remote API
|
||||
2. Versions
|
||||
===========
|
||||
|
||||
The current version of the API is 1.7
|
||||
The current version of the API is 1.8
|
||||
|
||||
Calling /images/<name>/insert is the same as calling
|
||||
/v1.7/images/<name>/insert
|
||||
/v1.8/images/<name>/insert
|
||||
|
||||
You can still call an old version of the api using
|
||||
/v1.0/images/<name>/insert
|
||||
|
||||
|
||||
v1.8
|
||||
****
|
||||
|
||||
Full Documentation
|
||||
------------------
|
||||
|
||||
:doc:`docker_remote_api_v1.8`
|
||||
|
||||
What's new
|
||||
----------
|
||||
|
||||
.. http:post:: /build
|
||||
|
||||
**New!** This endpoint now returns build status as json stream. In case
|
||||
of a build error, it returns the exit status of the failed command.
|
||||
|
||||
.. http:get:: /containers/(id)/json
|
||||
|
||||
**New!** This endpoint now returns the host config for the container.
|
||||
|
||||
.. http:post:: /images/create
|
||||
.. http:post:: /images/(name)/insert
|
||||
.. http:post:: /images/(name)/push
|
||||
|
||||
**New!** progressDetail object was added in the JSON. It's now possible
|
||||
to get the current value and the total of the progress without having to
|
||||
parse the string.
|
||||
|
||||
v1.7
|
||||
****
|
||||
|
||||
|
||||
@@ -1078,7 +1078,7 @@ Monitor Docker's events
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
POST /events?since=1374067924
|
||||
GET /events?since=1374067924
|
||||
|
||||
**Example response**:
|
||||
|
||||
|
||||
@@ -1122,7 +1122,7 @@ Monitor Docker's events
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
POST /events?since=1374067924
|
||||
GET /events?since=1374067924
|
||||
|
||||
**Example response**:
|
||||
|
||||
|
||||
@@ -1093,7 +1093,7 @@ Monitor Docker's events
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
POST /events?since=1374067924
|
||||
GET /events?since=1374067924
|
||||
|
||||
**Example response**:
|
||||
|
||||
|
||||
@@ -1228,7 +1228,7 @@ Monitor Docker's events
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
POST /events?since=1374067924
|
||||
GET /events?since=1374067924
|
||||
|
||||
**Example response**:
|
||||
|
||||
|
||||
@@ -122,7 +122,6 @@ Create a container
|
||||
"AttachStdout":true,
|
||||
"AttachStderr":true,
|
||||
"PortSpecs":null,
|
||||
"Privileged": false,
|
||||
"Tty":false,
|
||||
"OpenStdin":false,
|
||||
"StdinOnce":false,
|
||||
@@ -132,12 +131,16 @@ Create a container
|
||||
],
|
||||
"Dns":null,
|
||||
"Image":"base",
|
||||
"Volumes":{},
|
||||
"Volumes":{
|
||||
"/tmp": {}
|
||||
},
|
||||
"VolumesFrom":"",
|
||||
"WorkingDir":""
|
||||
|
||||
"WorkingDir":"",
|
||||
"ExposedPorts":{
|
||||
"22/tcp": {}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
**Example response**:
|
||||
|
||||
.. sourcecode:: http
|
||||
@@ -361,9 +364,14 @@ Start a container
|
||||
|
||||
{
|
||||
"Binds":["/tmp:/tmp"],
|
||||
"LxcConf":{"lxc.utsname":"docker"}
|
||||
"LxcConf":{"lxc.utsname":"docker"},
|
||||
"PortBindings":{ "22/tcp": [{ "HostPort": "11022" }] },
|
||||
"Privileged":false,
|
||||
"PublishAllPorts":false
|
||||
}
|
||||
|
||||
Binds need to reference Volumes that were defined during container creation.
|
||||
|
||||
**Example response**:
|
||||
|
||||
.. sourcecode:: http
|
||||
@@ -990,10 +998,10 @@ Build an image from Dockerfile via stdin
|
||||
.. sourcecode:: http
|
||||
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/json
|
||||
|
||||
{{ STREAM }}
|
||||
|
||||
|
||||
The stream must be a tar archive compressed with one of the
|
||||
following algorithms: identity (no compression), gzip, bzip2,
|
||||
xz.
|
||||
@@ -1153,7 +1161,7 @@ Monitor Docker's events
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
POST /events?since=1374067924
|
||||
GET /events?since=1374067924
|
||||
|
||||
**Example response**:
|
||||
|
||||
|
||||
1281
docs/sources/api/docker_remote_api_v1.8.rst
Normal file
1281
docs/sources/api/docker_remote_api_v1.8.rst
Normal file
File diff suppressed because it is too large
Load Diff
@@ -19,7 +19,8 @@ Docker Registry API
|
||||
- It doesn’t have a local database
|
||||
- It will be open-sourced at some point
|
||||
|
||||
We expect that there will be multiple registries out there. To help to grasp the context, here are some examples of registries:
|
||||
We expect that there will be multiple registries out there. To help to grasp
|
||||
the context, here are some examples of registries:
|
||||
|
||||
- **sponsor registry**: such a registry is provided by a third-party hosting infrastructure as a convenience for their customers and the docker community as a whole. Its costs are supported by the third party, but the management and operation of the registry are supported by dotCloud. It features read/write access, and delegates authentication and authorization to the Index.
|
||||
- **mirror registry**: such a registry is provided by a third-party hosting infrastructure but is targeted at their customers only. Some mechanism (unspecified to date) ensures that public images are pulled from a sponsor registry to the mirror registry, to make sure that the customers of the third-party provider can “docker pull” those images locally.
|
||||
@@ -37,7 +38,10 @@ We expect that there will be multiple registries out there. To help to grasp the
|
||||
- local mount point;
|
||||
- remote docker addressed through SSH.
|
||||
|
||||
The latter would only require two new commands in docker, e.g. “registryget” and “registryput”, wrapping access to the local filesystem (and optionally doing consistency checks). Authentication and authorization are then delegated to SSH (e.g. with public keys).
|
||||
The latter would only require two new commands in docker, e.g. ``registryget``
|
||||
and ``registryput``, wrapping access to the local filesystem (and optionally
|
||||
doing consistency checks). Authentication and authorization are then delegated
|
||||
to SSH (e.g. with public keys).
|
||||
|
||||
2. Endpoints
|
||||
============
|
||||
|
||||
@@ -15,11 +15,13 @@ Registry & Index Spec
|
||||
---------
|
||||
|
||||
The Index is responsible for centralizing information about:
|
||||
|
||||
- User accounts
|
||||
- Checksums of the images
|
||||
- Public namespaces
|
||||
|
||||
The Index has different components:
|
||||
|
||||
- Web UI
|
||||
- Meta-data store (comments, stars, list public repositories)
|
||||
- Authentication service
|
||||
@@ -27,7 +29,7 @@ The Index has different components:
|
||||
|
||||
The index is authoritative for those information.
|
||||
|
||||
We expect that there will be only one instance of the index, run and managed by dotCloud.
|
||||
We expect that there will be only one instance of the index, run and managed by Docker Inc.
|
||||
|
||||
1.2 Registry
|
||||
------------
|
||||
@@ -37,7 +39,7 @@ We expect that there will be only one instance of the index, run and managed by
|
||||
- It delegates authentication and authorization to the Index Auth service using tokens
|
||||
- It supports different storage backends (S3, cloud files, local FS)
|
||||
- It doesn’t have a local database
|
||||
- It will be open-sourced at some point
|
||||
- `Source Code <https://github.com/dotcloud/docker-registry>`_
|
||||
|
||||
We expect that there will be multiple registries out there. To help to grasp the context, here are some examples of registries:
|
||||
|
||||
@@ -46,10 +48,6 @@ We expect that there will be multiple registries out there. To help to grasp the
|
||||
- **vendor registry**: such a registry is provided by a software vendor, who wants to distribute docker images. It would be operated and managed by the vendor. Only users authorized by the vendor would be able to get write access. Some images would be public (accessible for anyone), others private (accessible only for authorized users). Authentication and authorization would be delegated to the Index. The goal of vendor registries is to let someone do “docker pull basho/riak1.3” and automatically push from the vendor registry (instead of a sponsor registry); i.e. get all the convenience of a sponsor registry, while retaining control on the asset distribution.
|
||||
- **private registry**: such a registry is located behind a firewall, or protected by an additional security layer (HTTP authorization, SSL client-side certificates, IP address authorization...). The registry is operated by a private entity, outside of dotCloud’s control. It can optionally delegate additional authorization to the Index, but it is not mandatory.
|
||||
|
||||
.. note::
|
||||
|
||||
Mirror registries and private registries which do not use the Index don’t even need to run the registry code. They can be implemented by any kind of transport implementing HTTP GET and PUT. Read-only registries can be powered by a simple static HTTP server.
|
||||
|
||||
.. note::
|
||||
|
||||
The latter implies that while HTTP is the protocol of choice for a registry, multiple schemes are possible (and in some cases, trivial):
|
||||
@@ -57,12 +55,16 @@ We expect that there will be multiple registries out there. To help to grasp the
|
||||
- local mount point;
|
||||
- remote docker addressed through SSH.
|
||||
|
||||
The latter would only require two new commands in docker, e.g. “registryget” and “registryput”, wrapping access to the local filesystem (and optionally doing consistency checks). Authentication and authorization are then delegated to SSH (e.g. with public keys).
|
||||
The latter would only require two new commands in docker, e.g. ``registryget``
|
||||
and ``registryput``, wrapping access to the local filesystem (and optionally
|
||||
doing consistency checks). Authentication and authorization are then delegated
|
||||
to SSH (e.g. with public keys).
|
||||
|
||||
1.3 Docker
|
||||
----------
|
||||
|
||||
On top of being a runtime for LXC, Docker is the Registry client. It supports:
|
||||
|
||||
- Push / Pull on the registry
|
||||
- Client authentication on the Index
|
||||
|
||||
@@ -76,21 +78,33 @@ On top of being a runtime for LXC, Docker is the Registry client. It supports:
|
||||
|
||||
1. Contact the Index to know where I should download “samalba/busybox”
|
||||
2. Index replies:
|
||||
a. “samalba/busybox” is on Registry A
|
||||
b. here are the checksums for “samalba/busybox” (for all layers)
|
||||
a. ``samalba/busybox`` is on Registry A
|
||||
b. here are the checksums for ``samalba/busybox`` (for all layers)
|
||||
c. token
|
||||
3. Contact Registry A to receive the layers for “samalba/busybox” (all of them to the base image). Registry A is authoritative for “samalba/busybox” but keeps a copy of all inherited layers and serve them all from the same location.
|
||||
3. Contact Registry A to receive the layers for ``samalba/busybox`` (all of them to the base image). Registry A is authoritative for “samalba/busybox” but keeps a copy of all inherited layers and serve them all from the same location.
|
||||
4. registry contacts index to verify if token/user is allowed to download images
|
||||
5. Index returns true/false lettings registry know if it should proceed or error out
|
||||
6. Get the payload for all layers
|
||||
|
||||
It’s possible to run docker pull \https://<registry>/repositories/samalba/busybox. In this case, docker bypasses the Index. However the security is not guaranteed (in case Registry A is corrupted) because there won’t be any checksum checks.
|
||||
It's possible to run:
|
||||
|
||||
Currently registry redirects to s3 urls for downloads, going forward all downloads need to be streamed through the registry. The Registry will then abstract the calls to S3 by a top-level class which implements sub-classes for S3 and local storage.
|
||||
.. code-block:: bash
|
||||
|
||||
Token is only returned when the 'X-Docker-Token' header is sent with request.
|
||||
docker pull https://<registry>/repositories/samalba/busybox
|
||||
|
||||
Basic Auth is required to pull private repos. Basic auth isn't required for pulling public repos, but if one is provided, it needs to be valid and for an active account.
|
||||
In this case, Docker bypasses the Index. However the security is not guaranteed
|
||||
(in case Registry A is corrupted) because there won’t be any checksum checks.
|
||||
|
||||
Currently registry redirects to s3 urls for downloads, going forward all
|
||||
downloads need to be streamed through the registry. The Registry will then
|
||||
abstract the calls to S3 by a top-level class which implements sub-classes for
|
||||
S3 and local storage.
|
||||
|
||||
Token is only returned when the ``X-Docker-Token`` header is sent with request.
|
||||
|
||||
Basic Auth is required to pull private repos. Basic auth isn't required for
|
||||
pulling public repos, but if one is provided, it needs to be valid and for an
|
||||
active account.
|
||||
|
||||
API (pulling repository foo/bar):
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
@@ -159,7 +173,9 @@ API (pulling repository foo/bar):
|
||||
|
||||
**Index can be replaced!** For a private Registry deployed, a custom Index can be used to serve and validate token according to different policies.
|
||||
|
||||
Docker computes the checksums and submit them to the Index at the end of the push. When a repository name does not have checksums on the Index, it means that the push is in progress (since checksums are submitted at the end).
|
||||
Docker computes the checksums and submit them to the Index at the end of the
|
||||
push. When a repository name does not have checksums on the Index, it means
|
||||
that the push is in progress (since checksums are submitted at the end).
|
||||
|
||||
API (pushing repos foo/bar):
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
@@ -241,10 +257,11 @@ API (pushing repos foo/bar):
|
||||
2.3 Delete
|
||||
----------
|
||||
|
||||
If you need to delete something from the index or registry, we need a nice clean way to do that. Here is the workflow.
|
||||
If you need to delete something from the index or registry, we need a nice
|
||||
clean way to do that. Here is the workflow.
|
||||
|
||||
1. Docker contacts the index to request a delete of a repository “samalba/busybox” (authentication required with user credentials)
|
||||
2. If authentication works and repository is valid, “samalba/busybox” is marked as deleted and a temporary token is returned
|
||||
1. Docker contacts the index to request a delete of a repository ``samalba/busybox`` (authentication required with user credentials)
|
||||
2. If authentication works and repository is valid, ``samalba/busybox`` is marked as deleted and a temporary token is returned
|
||||
3. Send a delete request to the registry for the repository (along with the token)
|
||||
4. Registry A contacts the Index to verify the token (token must corresponds to the repository name)
|
||||
5. Index validates the token. Registry A deletes the repository and everything associated to it.
|
||||
@@ -316,24 +333,40 @@ The Index has two main purposes (along with its fancy social features):
|
||||
|
||||
3.1 Without an Index
|
||||
--------------------
|
||||
Using the Registry without the Index can be useful to store the images on a private network without having to rely on an external entity controlled by dotCloud.
|
||||
|
||||
In this case, the registry will be launched in a special mode (--standalone? --no-index?). In this mode, the only thing which changes is that Registry will never contact the Index to verify a token. It will be the Registry owner responsibility to authenticate the user who pushes (or even pulls) an image using any mechanism (HTTP auth, IP based, etc...).
|
||||
Using the Registry without the Index can be useful to store the images on a
|
||||
private network without having to rely on an external entity controlled by
|
||||
Docker Inc.
|
||||
|
||||
In this scenario, the Registry is responsible for the security in case of data corruption since the checksums are not delivered by a trusted entity.
|
||||
In this case, the registry will be launched in a special mode (--standalone?
|
||||
--no-index?). In this mode, the only thing which changes is that Registry will
|
||||
never contact the Index to verify a token. It will be the Registry owner
|
||||
responsibility to authenticate the user who pushes (or even pulls) an image
|
||||
using any mechanism (HTTP auth, IP based, etc...).
|
||||
|
||||
As hinted previously, a standalone registry can also be implemented by any HTTP server handling GET/PUT requests (or even only GET requests if no write access is necessary).
|
||||
In this scenario, the Registry is responsible for the security in case of data
|
||||
corruption since the checksums are not delivered by a trusted entity.
|
||||
|
||||
As hinted previously, a standalone registry can also be implemented by any HTTP
|
||||
server handling GET/PUT requests (or even only GET requests if no write access
|
||||
is necessary).
|
||||
|
||||
3.2 With an Index
|
||||
-----------------
|
||||
|
||||
The Index data needed by the Registry are simple:
|
||||
|
||||
- Serve the checksums
|
||||
- Provide and authorize a Token
|
||||
|
||||
In the scenario of a Registry running on a private network with the need of centralizing and authorizing, it’s easy to use a custom Index.
|
||||
In the scenario of a Registry running on a private network with the need of
|
||||
centralizing and authorizing, it’s easy to use a custom Index.
|
||||
|
||||
The only challenge will be to tell Docker to contact (and trust) this custom Index. Docker will be configurable at some point to use a specific Index, it’ll be the private entity responsibility (basically the organization who uses Docker in a private environment) to maintain the Index and the Docker’s configuration among its consumers.
|
||||
The only challenge will be to tell Docker to contact (and trust) this custom
|
||||
Index. Docker will be configurable at some point to use a specific Index, it’ll
|
||||
be the private entity responsibility (basically the organization who uses
|
||||
Docker in a private environment) to maintain the Index and the Docker’s
|
||||
configuration among its consumers.
|
||||
|
||||
4. The API
|
||||
==========
|
||||
@@ -343,16 +376,22 @@ The first version of the api is available here: https://github.com/jpetazzo/dock
|
||||
4.1 Images
|
||||
----------
|
||||
|
||||
The format returned in the images is not defined here (for layer and json), basically because Registry stores exactly the same kind of information as Docker uses to manage them.
|
||||
The format returned in the images is not defined here (for layer and JSON),
|
||||
basically because Registry stores exactly the same kind of information as
|
||||
Docker uses to manage them.
|
||||
|
||||
The format of ancestry is a line-separated list of image ids, in age order. I.e. the image’s parent is on the last line, the parent of the parent on the next-to-last line, etc.; if the image has no parent, the file is empty.
|
||||
The format of ancestry is a line-separated list of image ids, in age order,
|
||||
i.e. the image’s parent is on the last line, the parent of the parent on the
|
||||
next-to-last line, etc.; if the image has no parent, the file is empty.
|
||||
|
||||
GET /v1/images/<image_id>/layer
|
||||
PUT /v1/images/<image_id>/layer
|
||||
GET /v1/images/<image_id>/json
|
||||
PUT /v1/images/<image_id>/json
|
||||
GET /v1/images/<image_id>/ancestry
|
||||
PUT /v1/images/<image_id>/ancestry
|
||||
.. code-block:: bash
|
||||
|
||||
GET /v1/images/<image_id>/layer
|
||||
PUT /v1/images/<image_id>/layer
|
||||
GET /v1/images/<image_id>/json
|
||||
PUT /v1/images/<image_id>/json
|
||||
GET /v1/images/<image_id>/ancestry
|
||||
PUT /v1/images/<image_id>/ancestry
|
||||
|
||||
4.2 Users
|
||||
---------
|
||||
@@ -397,7 +436,9 @@ PUT /v1/users/<username>
|
||||
|
||||
4.2.3 Login (Index)
|
||||
^^^^^^^^^^^^^^^^^^^
|
||||
Does nothing else but asking for a user authentication. Can be used to validate credentials. HTTP Basic Auth for now, maybe change in future.
|
||||
|
||||
Does nothing else but asking for a user authentication. Can be used to validate
|
||||
credentials. HTTP Basic Auth for now, maybe change in future.
|
||||
|
||||
GET /v1/users
|
||||
|
||||
@@ -409,7 +450,10 @@ GET /v1/users
|
||||
4.3 Tags (Registry)
|
||||
-------------------
|
||||
|
||||
The Registry does not know anything about users. Even though repositories are under usernames, it’s just a namespace for the registry. Allowing us to implement organizations or different namespaces per user later, without modifying the Registry’s API.
|
||||
The Registry does not know anything about users. Even though repositories are
|
||||
under usernames, it’s just a namespace for the registry. Allowing us to
|
||||
implement organizations or different namespaces per user later, without
|
||||
modifying the Registry’s API.
|
||||
|
||||
The following naming restrictions apply:
|
||||
|
||||
@@ -443,7 +487,10 @@ DELETE /v1/repositories/<namespace>/<repo_name>/tags/<tag>
|
||||
4.4 Images (Index)
|
||||
------------------
|
||||
|
||||
For the Index to “resolve” the repository name to a Registry location, it uses the X-Docker-Endpoints header. In other terms, this requests always add a “X-Docker-Endpoints” to indicate the location of the registry which hosts this repository.
|
||||
For the Index to “resolve” the repository name to a Registry location, it uses
|
||||
the X-Docker-Endpoints header. In other terms, this requests always add a
|
||||
``X-Docker-Endpoints`` to indicate the location of the registry which hosts this
|
||||
repository.
|
||||
|
||||
4.4.1 Get the images
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
@@ -488,17 +535,20 @@ Return 202 OK
|
||||
======================
|
||||
|
||||
It’s possible to chain Registries server for several reasons:
|
||||
|
||||
- Load balancing
|
||||
- Delegate the next request to another server
|
||||
|
||||
When a Registry is a reference for a repository, it should host the entire images chain in order to avoid breaking the chain during the download.
|
||||
When a Registry is a reference for a repository, it should host the entire
|
||||
images chain in order to avoid breaking the chain during the download.
|
||||
|
||||
The Index and Registry use this mechanism to redirect on one or the other.
|
||||
|
||||
Example with an image download:
|
||||
On every request, a special header can be returned:
|
||||
|
||||
X-Docker-Endpoints: server1,server2
|
||||
On every request, a special header can be returned::
|
||||
|
||||
X-Docker-Endpoints: server1,server2
|
||||
|
||||
On the next request, the client will always pick a server from this list.
|
||||
|
||||
@@ -508,7 +558,8 @@ On the next request, the client will always pick a server from this list.
|
||||
6.1 On the Index
|
||||
-----------------
|
||||
|
||||
The Index supports both “Basic” and “Token” challenges. Usually when there is a “401 Unauthorized”, the Index replies this::
|
||||
The Index supports both “Basic” and “Token” challenges. Usually when there is a
|
||||
``401 Unauthorized``, the Index replies this::
|
||||
|
||||
401 Unauthorized
|
||||
WWW-Authenticate: Basic realm="auth required",Token
|
||||
@@ -547,11 +598,13 @@ The Registry only supports the Token challenge::
|
||||
401 Unauthorized
|
||||
WWW-Authenticate: Token
|
||||
|
||||
The only way is to provide a token on “401 Unauthorized” responses::
|
||||
The only way is to provide a token on ``401 Unauthorized`` responses::
|
||||
|
||||
Authorization: Token signature=123abc,repository=”foo/bar”,access=read
|
||||
Authorization: Token signature=123abc,repository="foo/bar",access=read
|
||||
|
||||
Usually, the Registry provides a Cookie when a Token verification succeeded. Every time the Registry passes a Cookie, you have to pass it back the same cookie.::
|
||||
Usually, the Registry provides a Cookie when a Token verification succeeded.
|
||||
Every time the Registry passes a Cookie, you have to pass it back the same
|
||||
cookie.::
|
||||
|
||||
200 OK
|
||||
Set-Cookie: session="wD/J7LqL5ctqw8haL10vgfhrb2Q=?foo=UydiYXInCnAxCi4=×tamp=RjEzNjYzMTQ5NDcuNDc0NjQzCi4="; Path=/; HttpOnly
|
||||
|
||||
@@ -12,12 +12,58 @@ To list available commands, either run ``docker`` with no parameters or execute
|
||||
|
||||
$ sudo docker
|
||||
Usage: docker [OPTIONS] COMMAND [arg...]
|
||||
-H=[unix:///var/run/docker.sock]: tcp://host:port to bind/connect to or unix://path/to/socket to use
|
||||
-H=[unix:///var/run/docker.sock]: tcp://[host[:port]] to bind/connect to or unix://[/path/to/socket] to use. When host=[0.0.0.0], port=[4243] or path=[/var/run/docker.sock] is omitted, default values are used.
|
||||
|
||||
A self-sufficient runtime for linux containers.
|
||||
|
||||
...
|
||||
|
||||
.. _cli_daemon:
|
||||
|
||||
``daemon``
|
||||
----------
|
||||
|
||||
::
|
||||
|
||||
Usage of docker:
|
||||
-D=false: Enable debug mode
|
||||
-H=[unix:///var/run/docker.sock]: tcp://[host[:port]] to bind or unix://[/path/to/socket] to use. When host=[0.0.0.0], port=[4243] or path=[/var/run/docker.sock] is omitted, default values are used.
|
||||
-api-enable-cors=false: Enable CORS headers in the remote API
|
||||
-b="": Attach containers to a pre-existing network bridge; use 'none' to disable container networking
|
||||
-bip="": Use the provided CIDR notation address for the dynamically created bridge (docker0); Mutually exclusive of -b
|
||||
-d=false: Enable daemon mode
|
||||
-dns="": Force docker to use specific DNS servers
|
||||
-g="/var/lib/docker": Path to use as the root of the docker runtime
|
||||
-icc=true: Enable inter-container communication
|
||||
-ip="0.0.0.0": Default IP address to use when binding container ports
|
||||
-iptables=true: Disable docker's addition of iptables rules
|
||||
-mtu=1500: Set the containers network mtu
|
||||
-p="/var/run/docker.pid": Path to use for daemon PID file
|
||||
-r=true: Restart previously running containers
|
||||
-s="": Force the docker runtime to use a specific storage driver
|
||||
-v=false: Print version information and quit
|
||||
|
||||
The Docker daemon is the persistent process that manages containers. Docker uses the same binary for both the
|
||||
daemon and client. To run the daemon you provide the ``-d`` flag.
|
||||
|
||||
To force Docker to use devicemapper as the storage driver, use ``docker -d -s devicemapper``.
|
||||
|
||||
To set the DNS server for all Docker containers, use ``docker -d -dns 8.8.8.8``.
|
||||
|
||||
To run the daemon with debug output, use ``docker -d -D``.
|
||||
|
||||
The docker client will also honor the ``DOCKER_HOST`` environment variable to set
|
||||
the ``-H`` flag for the client.
|
||||
|
||||
::
|
||||
|
||||
docker -H tcp://0.0.0.0:4243 ps
|
||||
# or
|
||||
export DOCKER_HOST="tcp://0.0.0.0:4243"
|
||||
docker ps
|
||||
# both are equal
|
||||
|
||||
|
||||
.. _cli_attach:
|
||||
|
||||
``attach``
|
||||
@@ -34,11 +80,12 @@ To list available commands, either run ``docker`` with no parameters or execute
|
||||
|
||||
You can detach from the container again (and leave it running) with
|
||||
``CTRL-c`` (for a quiet exit) or ``CTRL-\`` to get a stacktrace of
|
||||
the Docker client when it quits.
|
||||
the Docker client when it quits. When you detach from the container's
|
||||
process the exit code will be returned to the client.
|
||||
|
||||
To stop a container, use ``docker stop``
|
||||
To stop a container, use ``docker stop``.
|
||||
|
||||
To kill the container, use ``docker kill``
|
||||
To kill the container, use ``docker kill``.
|
||||
|
||||
.. _cli_attach_examples:
|
||||
|
||||
@@ -94,12 +141,11 @@ Examples:
|
||||
-no-cache: Do not use the cache when building the image.
|
||||
-rm: Remove intermediate containers after a successful build
|
||||
|
||||
The files at PATH or URL are called the "context" of the build. The
|
||||
build process may refer to any of the files in the context, for
|
||||
example when using an :ref:`ADD <dockerfile_add>` instruction. When a
|
||||
single ``Dockerfile`` is given as URL, then no context is set. When a
|
||||
git repository is set as URL, then the repository is used as the
|
||||
context
|
||||
The files at ``PATH`` or ``URL`` are called the "context" of the build. The
|
||||
build process may refer to any of the files in the context, for example when
|
||||
using an :ref:`ADD <dockerfile_add>` instruction. When a single ``Dockerfile``
|
||||
is given as ``URL``, then no context is set. When a Git repository is set as
|
||||
``URL``, then the repository is used as the context
|
||||
|
||||
.. _cli_build_examples:
|
||||
|
||||
@@ -110,7 +156,7 @@ Examples:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo docker build .
|
||||
$ sudo docker build .
|
||||
Uploading context 10240 bytes
|
||||
Step 1 : FROM busybox
|
||||
Pulling repository busybox
|
||||
@@ -134,13 +180,13 @@ Examples:
|
||||
---> f52f38b7823e
|
||||
Successfully built f52f38b7823e
|
||||
|
||||
This example specifies that the PATH is ``.``, and so all the files in
|
||||
the local directory get tar'd and sent to the Docker daemon. The PATH
|
||||
This example specifies that the ``PATH`` is ``.``, and so all the files in
|
||||
the local directory get tar'd and sent to the Docker daemon. The ``PATH``
|
||||
specifies where to find the files for the "context" of the build on
|
||||
the Docker daemon. Remember that the daemon could be running on a
|
||||
remote machine and that no parsing of the Dockerfile happens at the
|
||||
remote machine and that no parsing of the ``Dockerfile`` happens at the
|
||||
client side (where you're running ``docker build``). That means that
|
||||
*all* the files at PATH get sent, not just the ones listed to
|
||||
*all* the files at ``PATH`` get sent, not just the ones listed to
|
||||
:ref:`ADD <dockerfile_add>` in the ``Dockerfile``.
|
||||
|
||||
The transfer of context from the local machine to the Docker daemon is
|
||||
@@ -150,7 +196,7 @@ message.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo docker build -t vieux/apache:2.0 .
|
||||
$ sudo docker build -t vieux/apache:2.0 .
|
||||
|
||||
This will build like the previous example, but it will then tag the
|
||||
resulting image. The repository name will be ``vieux/apache`` and the
|
||||
@@ -159,20 +205,20 @@ tag will be ``2.0``
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo docker build - < Dockerfile
|
||||
$ sudo docker build - < Dockerfile
|
||||
|
||||
This will read a ``Dockerfile`` from *stdin* without context. Due to
|
||||
the lack of a context, no contents of any local directory will be sent
|
||||
to the ``docker`` daemon. Since there is no context, a Dockerfile
|
||||
to the ``docker`` daemon. Since there is no context, a ``Dockerfile``
|
||||
``ADD`` only works if it refers to a remote URL.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo docker build github.com/creack/docker-firefox
|
||||
$ sudo docker build github.com/creack/docker-firefox
|
||||
|
||||
This will clone the Github repository and use the cloned repository as
|
||||
This will clone the GitHub repository and use the cloned repository as
|
||||
context. The ``Dockerfile`` at the root of the repository is used as
|
||||
``Dockerfile``. Note that you can specify an arbitrary git repository
|
||||
``Dockerfile``. Note that you can specify an arbitrary Git repository
|
||||
by using the ``git://`` schema.
|
||||
|
||||
|
||||
@@ -192,28 +238,53 @@ by using the ``git://`` schema.
|
||||
-run="": Configuration to be applied when the image is launched with `docker run`.
|
||||
(ex: -run='{"Cmd": ["cat", "/world"], "PortSpecs": ["22"]}')
|
||||
|
||||
Simple commit of an existing container
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
.. _cli_commit_examples:
|
||||
|
||||
Commit an existing container
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ docker ps
|
||||
$ sudo docker ps
|
||||
ID IMAGE COMMAND CREATED STATUS PORTS
|
||||
c3f279d17e0a ubuntu:12.04 /bin/bash 7 days ago Up 25 hours
|
||||
197387f1b436 ubuntu:12.04 /bin/bash 7 days ago Up 25 hours
|
||||
$ docker commit c3f279d17e0a SvenDowideit/testimage:version3
|
||||
f5283438590d
|
||||
$ docker images | head
|
||||
REPOSITORY TAG ID CREATED SIZE
|
||||
SvenDowideit/testimage version3 f5283438590d 16 seconds ago 204.2 MB (virtual 335.7 MB)
|
||||
REPOSITORY TAG ID CREATED VIRTUAL SIZE
|
||||
SvenDowideit/testimage version3 f5283438590d 16 seconds ago 335.7 MB
|
||||
|
||||
Change the command that a container runs
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Sometimes you have an application container running just a service and you need
|
||||
to make a quick change and then change it back.
|
||||
|
||||
In this example, we run a container with ``ls`` and then change the image to
|
||||
run ``ls /etc``.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ docker run -t -name test ubuntu ls
|
||||
bin boot dev etc home lib lib64 media mnt opt proc root run sbin selinux srv sys tmp usr var
|
||||
$ docker commit -run='{"Cmd": ["ls","/etc"]}' test test2
|
||||
933d16de9e70005304c1717b5c6f2f39d6fd50752834c6f34a155c70790011eb
|
||||
$ docker run -t test2
|
||||
adduser.conf gshadow login.defs rc0.d
|
||||
alternatives gshadow- logrotate.d rc1.d
|
||||
apt host.conf lsb-base rc2.d
|
||||
...
|
||||
|
||||
Full -run example
|
||||
.................
|
||||
|
||||
(multiline is ok within a single quote ``'``)
|
||||
The ``-run`` JSON hash changes the ``Config`` section when running ``docker inspect CONTAINERID``
|
||||
or ``config`` when running ``docker inspect IMAGEID``.
|
||||
|
||||
::
|
||||
(Multiline is okay within a single quote ``'``)
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo docker commit -run='
|
||||
{
|
||||
@@ -256,7 +327,7 @@ Full -run example
|
||||
|
||||
Copy files/folders from the containers filesystem to the host
|
||||
path. Paths are relative to the root of the filesystem.
|
||||
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo docker cp 7bb0e258aefe:/etc/debian_version .
|
||||
@@ -270,7 +341,7 @@ Full -run example
|
||||
::
|
||||
|
||||
Usage: docker diff CONTAINER
|
||||
|
||||
|
||||
List the changed files and directories in a container's filesystem
|
||||
|
||||
There are 3 events that are listed in the 'diff':
|
||||
@@ -279,7 +350,7 @@ There are 3 events that are listed in the 'diff':
|
||||
2. ```D``` - Delete
|
||||
3. ```C``` - Change
|
||||
|
||||
for example:
|
||||
For example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
@@ -307,7 +378,7 @@ for example:
|
||||
Usage: docker events
|
||||
|
||||
Get real time events from the server
|
||||
|
||||
|
||||
-since="": Show previously created events and then stream.
|
||||
(either seconds since epoch, or date string as below)
|
||||
|
||||
@@ -369,7 +440,13 @@ Show events in the past from a specified time
|
||||
|
||||
Usage: docker export CONTAINER
|
||||
|
||||
Export the contents of a filesystem as a tar archive
|
||||
Export the contents of a filesystem as a tar archive to STDOUT
|
||||
|
||||
For example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo docker export red_panda > latest.tar
|
||||
|
||||
.. _cli_history:
|
||||
|
||||
@@ -385,7 +462,7 @@ Show events in the past from a specified time
|
||||
-notrunc=false: Don't truncate output
|
||||
-q=false: only show numeric IDs
|
||||
|
||||
To see how the docker:latest image was built:
|
||||
To see how the ``docker:latest`` image was built:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
@@ -417,7 +494,7 @@ To see how the docker:latest image was built:
|
||||
d5e85dc5b1d8 2 weeks ago /bin/sh -c apt-get update
|
||||
13e642467c11 2 weeks ago /bin/sh -c echo 'deb http://archive.ubuntu.com/ubuntu precise main universe' > /etc/apt/sources.list
|
||||
ae6dde92a94e 2 weeks ago /bin/sh -c #(nop) MAINTAINER Solomon Hykes <solomon@dotcloud.com>
|
||||
ubuntu:12.04 6 months ago
|
||||
ubuntu:12.04 6 months ago
|
||||
|
||||
.. _cli_images:
|
||||
|
||||
@@ -435,23 +512,23 @@ To see how the docker:latest image was built:
|
||||
-q=false: only show numeric IDs
|
||||
-tree=false: output graph in tree format
|
||||
-viz=false: output graph in graphviz format
|
||||
|
||||
|
||||
Listing the most recently created images
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo docker images | head
|
||||
REPOSITORY TAG IMAGE ID CREATED SIZE
|
||||
<none> <none> 77af4d6b9913 19 hours ago 30.53 MB (virtual 1.089 GB)
|
||||
committest latest b6fa739cedf5 19 hours ago 30.53 MB (virtual 1.089 GB)
|
||||
<none> <none> 78a85c484f71 19 hours ago 30.53 MB (virtual 1.089 GB)
|
||||
docker latest 30557a29d5ab 20 hours ago 30.53 MB (virtual 1.089 GB)
|
||||
<none> <none> 0124422dd9f9 20 hours ago 30.53 MB (virtual 1.089 GB)
|
||||
<none> <none> 18ad6fad3402 22 hours ago 23.68 MB (virtual 1.082 GB)
|
||||
<none> <none> f9f1e26352f0 23 hours ago 30.46 MB (virtual 1.089 GB)
|
||||
tryout latest 2629d1fa0b81 23 hours ago 16.4 kB (virtual 131.5 MB)
|
||||
<none> <none> 5ed6274db6ce 24 hours ago 30.44 MB (virtual 1.089 GB)
|
||||
REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE
|
||||
<none> <none> 77af4d6b9913 19 hours ago 1.089 GB
|
||||
committest latest b6fa739cedf5 19 hours ago 1.089 GB
|
||||
<none> <none> 78a85c484f71 19 hours ago 1.089 GB
|
||||
docker latest 30557a29d5ab 20 hours ago 1.089 GB
|
||||
<none> <none> 0124422dd9f9 20 hours ago 1.089 GB
|
||||
<none> <none> 18ad6fad3402 22 hours ago 1.082 GB
|
||||
<none> <none> f9f1e26352f0 23 hours ago 1.089 GB
|
||||
tryout latest 2629d1fa0b81 23 hours ago 131.5 MB
|
||||
<none> <none> 5ed6274db6ce 24 hours ago 1.089 GB
|
||||
|
||||
Listing the full length image IDs
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
@@ -459,16 +536,16 @@ Listing the full length image IDs
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo docker images -notrunc | head
|
||||
REPOSITORY TAG IMAGE ID CREATED SIZE
|
||||
<none> <none> 77af4d6b9913e693e8d0b4b294fa62ade6054e6b2f1ffb617ac955dd63fb0182 19 hours ago 30.53 MB (virtual 1.089 GB)
|
||||
committest latest b6fa739cedf5ea12a620a439402b6004d057da800f91c7524b5086a5e4749c9f 19 hours ago 30.53 MB (virtual 1.089 GB)
|
||||
<none> <none> 78a85c484f71509adeaace20e72e941f6bdd2b25b4c75da8693efd9f61a37921 19 hours ago 30.53 MB (virtual 1.089 GB)
|
||||
docker latest 30557a29d5abc51e5f1d5b472e79b7e296f595abcf19fe6b9199dbbc809c6ff4 20 hours ago 30.53 MB (virtual 1.089 GB)
|
||||
<none> <none> 0124422dd9f9cf7ef15c0617cda3931ee68346455441d66ab8bdc5b05e9fdce5 20 hours ago 30.53 MB (virtual 1.089 GB)
|
||||
<none> <none> 18ad6fad340262ac2a636efd98a6d1f0ea775ae3d45240d3418466495a19a81b 22 hours ago 23.68 MB (virtual 1.082 GB)
|
||||
<none> <none> f9f1e26352f0a3ba6a0ff68167559f64f3e21ff7ada60366e2d44a04befd1d3a 23 hours ago 30.46 MB (virtual 1.089 GB)
|
||||
tryout latest 2629d1fa0b81b222fca63371ca16cbf6a0772d07759ff80e8d1369b926940074 23 hours ago 16.4 kB (virtual 131.5 MB)
|
||||
<none> <none> 5ed6274db6ceb2397844896966ea239290555e74ef307030ebb01ff91b1914df 24 hours ago 30.44 MB (virtual 1.089 GB)
|
||||
REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE
|
||||
<none> <none> 77af4d6b9913e693e8d0b4b294fa62ade6054e6b2f1ffb617ac955dd63fb0182 19 hours ago 1.089 GB
|
||||
committest latest b6fa739cedf5ea12a620a439402b6004d057da800f91c7524b5086a5e4749c9f 19 hours ago 1.089 GB
|
||||
<none> <none> 78a85c484f71509adeaace20e72e941f6bdd2b25b4c75da8693efd9f61a37921 19 hours ago 1.089 GB
|
||||
docker latest 30557a29d5abc51e5f1d5b472e79b7e296f595abcf19fe6b9199dbbc809c6ff4 20 hours ago 1.089 GB
|
||||
<none> <none> 0124422dd9f9cf7ef15c0617cda3931ee68346455441d66ab8bdc5b05e9fdce5 20 hours ago 1.089 GB
|
||||
<none> <none> 18ad6fad340262ac2a636efd98a6d1f0ea775ae3d45240d3418466495a19a81b 22 hours ago 1.082 GB
|
||||
<none> <none> f9f1e26352f0a3ba6a0ff68167559f64f3e21ff7ada60366e2d44a04befd1d3a 23 hours ago 1.089 GB
|
||||
tryout latest 2629d1fa0b81b222fca63371ca16cbf6a0772d07759ff80e8d1369b926940074 23 hours ago 131.5 MB
|
||||
<none> <none> 5ed6274db6ceb2397844896966ea239290555e74ef307030ebb01ff91b1914df 24 hours ago 1.089 GB
|
||||
|
||||
Displaying images visually
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
@@ -488,15 +565,15 @@ Displaying image hierarchy
|
||||
|
||||
$ sudo docker images -tree
|
||||
|
||||
|─8dbd9e392a96 Size: 131.5 MB (virtual 131.5 MB) Tags: ubuntu:12.04,ubuntu:latest,ubuntu:precise
|
||||
├─8dbd9e392a96 Size: 131.5 MB (virtual 131.5 MB) Tags: ubuntu:12.04,ubuntu:latest,ubuntu:precise
|
||||
└─27cf78414709 Size: 180.1 MB (virtual 180.1 MB)
|
||||
└─b750fe79269d Size: 24.65 kB (virtual 180.1 MB) Tags: ubuntu:12.10,ubuntu:quantal
|
||||
|─f98de3b610d5 Size: 12.29 kB (virtual 180.1 MB)
|
||||
| └─7da80deb7dbf Size: 16.38 kB (virtual 180.1 MB)
|
||||
| └─65ed2fee0a34 Size: 20.66 kB (virtual 180.2 MB)
|
||||
| └─a2b9ea53dddc Size: 819.7 MB (virtual 999.8 MB)
|
||||
| └─a29b932eaba8 Size: 28.67 kB (virtual 999.9 MB)
|
||||
| └─e270a44f124d Size: 12.29 kB (virtual 999.9 MB) Tags: progrium/buildstep:latest
|
||||
├─f98de3b610d5 Size: 12.29 kB (virtual 180.1 MB)
|
||||
│ └─7da80deb7dbf Size: 16.38 kB (virtual 180.1 MB)
|
||||
│ └─65ed2fee0a34 Size: 20.66 kB (virtual 180.2 MB)
|
||||
│ └─a2b9ea53dddc Size: 819.7 MB (virtual 999.8 MB)
|
||||
│ └─a29b932eaba8 Size: 28.67 kB (virtual 999.9 MB)
|
||||
│ └─e270a44f124d Size: 12.29 kB (virtual 999.9 MB) Tags: progrium/buildstep:latest
|
||||
└─17e74ac162d8 Size: 53.93 kB (virtual 180.2 MB)
|
||||
└─339a3f56b760 Size: 24.65 kB (virtual 180.2 MB)
|
||||
└─904fcc40e34d Size: 96.7 MB (virtual 276.9 MB)
|
||||
@@ -519,13 +596,13 @@ Displaying image hierarchy
|
||||
|
||||
Usage: docker import URL|- [REPOSITORY[:TAG]]
|
||||
|
||||
Create a new filesystem image from the contents of a tarball
|
||||
Create an empty filesystem image and import the contents of the tarball
|
||||
(.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) into it, then optionally tag it.
|
||||
|
||||
At this time, the URL must start with ``http`` and point to a single
|
||||
file archive (.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) containing a
|
||||
file archive (.tar, .tar.gz, .tgz, .bzip, .tar.xz, or .txz) containing a
|
||||
root filesystem. If you would like to import from a local directory or
|
||||
archive, you can use the ``-`` parameter to take the data from
|
||||
standard in.
|
||||
archive, you can use the ``-`` parameter to take the data from *stdin*.
|
||||
|
||||
Examples
|
||||
~~~~~~~~
|
||||
@@ -535,24 +612,30 @@ Import from a remote location
|
||||
|
||||
This will create a new untagged image.
|
||||
|
||||
``$ sudo docker import http://example.com/exampleimage.tgz``
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo docker import http://example.com/exampleimage.tgz
|
||||
|
||||
Import from a local file
|
||||
........................
|
||||
|
||||
Import to docker via pipe and standard in
|
||||
Import to docker via pipe and *stdin*.
|
||||
|
||||
``$ cat exampleimage.tgz | sudo docker import - exampleimagelocal:new``
|
||||
.. code-block:: bash
|
||||
|
||||
$ cat exampleimage.tgz | sudo docker import - exampleimagelocal:new
|
||||
|
||||
Import from a local directory
|
||||
.............................
|
||||
|
||||
``$ sudo tar -c . | docker import - exampleimagedir``
|
||||
.. code-block:: bash
|
||||
|
||||
Note the ``sudo`` in this example -- you must preserve the ownership
|
||||
of the files (especially root ownership) during the archiving with
|
||||
tar. If you are not root (or sudo) when you tar, then the ownerships
|
||||
might not get preserved.
|
||||
$ sudo tar -c . | docker import - exampleimagedir
|
||||
|
||||
Note the ``sudo`` in this example -- you must preserve the ownership of the
|
||||
files (especially root ownership) during the archiving with tar. If you are not
|
||||
root (or the sudo command) when you tar, then the ownerships might not get
|
||||
preserved.
|
||||
|
||||
.. _cli_info:
|
||||
|
||||
@@ -591,15 +674,22 @@ might not get preserved.
|
||||
|
||||
Insert a file from URL in the IMAGE at PATH
|
||||
|
||||
Use the specified ``IMAGE`` as the parent for a new image which adds a
|
||||
:ref:`layer <layer_def>` containing the new file. The ``insert`` command does
|
||||
not modify the original image, and the new image has the contents of the parent
|
||||
image, plus the new file.
|
||||
|
||||
|
||||
Examples
|
||||
~~~~~~~~
|
||||
|
||||
Insert file from github
|
||||
Insert file from GitHub
|
||||
.......................
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo docker insert 8283e18b24bc https://raw.github.com/metalivedev/django/master/postinstall /tmp/postinstall.sh
|
||||
06fd35556d7b
|
||||
|
||||
.. _cli_inspect:
|
||||
|
||||
@@ -608,9 +698,55 @@ Insert file from github
|
||||
|
||||
::
|
||||
|
||||
Usage: docker inspect [OPTIONS] CONTAINER
|
||||
Usage: docker inspect CONTAINER|IMAGE [CONTAINER|IMAGE...]
|
||||
|
||||
Return low-level information on a container
|
||||
Return low-level information on a container/image
|
||||
|
||||
-format="": Format the output using the given go template.
|
||||
|
||||
By default, this will render all results in a JSON array. If a format
|
||||
is specified, the given template will be executed for each result.
|
||||
|
||||
Go's `text/template <http://golang.org/pkg/text/template/>`_ package
|
||||
describes all the details of the format.
|
||||
|
||||
Examples
|
||||
~~~~~~~~
|
||||
|
||||
Get an instance's IP Address
|
||||
............................
|
||||
|
||||
For the most part, you can pick out any field from the JSON in a
|
||||
fairly straightforward manner.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo docker inspect -format='{{.NetworkSettings.IPAddress}}' $INSTANCE_ID
|
||||
|
||||
List All Port Bindings
|
||||
......................
|
||||
|
||||
One can loop over arrays and maps in the results to produce simple
|
||||
text output:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo docker inspect -format='{{range $p, $conf := .NetworkSettings.Ports}} {{$p}} -> {{(index $conf 0).HostPort}} {{end}}' $INSTANCE_ID
|
||||
|
||||
Find a Specific Port Mapping
|
||||
............................
|
||||
|
||||
The ``.Field`` syntax doesn't work when the field name begins with a
|
||||
number, but the template language's ``index`` function does. The
|
||||
``.NetworkSettings.Ports`` section contains a map of the internal port
|
||||
mappings to a list of external address/port objects, so to grab just
|
||||
the numeric public port, you use ``index`` to find the specific port
|
||||
map, and then ``index`` 0 contains first object inside of that. Then
|
||||
we ask for the ``HostPort`` field to get the public address.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo docker inspect -format='{{(index (index .NetworkSettings.Ports "8787/tcp") 0).HostPort}}' $INSTANCE_ID
|
||||
|
||||
.. _cli_kill:
|
||||
|
||||
@@ -676,6 +812,15 @@ Known Issues (kill)
|
||||
|
||||
Fetch the logs of a container
|
||||
|
||||
The ``docker logs`` command is a convenience which batch-retrieves whatever
|
||||
logs are present at the time of execution. This does not guarantee execution
|
||||
order when combined with a ``docker run`` (i.e. your run may not have generated
|
||||
any logs at the time you execute ``docker logs``).
|
||||
|
||||
The ``docker logs -f`` command combines ``docker logs`` and ``docker attach``:
|
||||
it will first return all logs from the beginning and then continue streaming
|
||||
new output from the container's stdout and stderr.
|
||||
|
||||
|
||||
.. _cli_port:
|
||||
|
||||
@@ -704,6 +849,15 @@ Known Issues (kill)
|
||||
-notrunc=false: Don't truncate output
|
||||
-q=false: Only display numeric IDs
|
||||
|
||||
Running ``docker ps`` showing 2 linked containers.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ docker ps
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
4c01db0b339c ubuntu:12.04 bash 17 seconds ago Up 16 seconds webapp
|
||||
d7886598dbe2 crosbymichael/redis:latest /redis-server --dir 33 minutes ago Up 33 minutes 6379/tcp redis,webapp/db
|
||||
|
||||
.. _cli_pull:
|
||||
|
||||
``pull``
|
||||
@@ -752,7 +906,7 @@ Known Issues (kill)
|
||||
-link="": Remove the link instead of the actual container
|
||||
|
||||
Known Issues (rm)
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
* :issue:`197` indicates that ``docker kill`` may leave directories
|
||||
behind and make it difficult to remove the container.
|
||||
@@ -763,7 +917,7 @@ Examples:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ docker rm /redis
|
||||
$ sudo docker rm /redis
|
||||
/redis
|
||||
|
||||
|
||||
@@ -772,7 +926,7 @@ This will remove the container referenced under the link ``/redis``.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ docker rm -link /webapp/redis
|
||||
$ sudo docker rm -link /webapp/redis
|
||||
/webapp/redis
|
||||
|
||||
|
||||
@@ -781,7 +935,7 @@ network communication.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ docker rm `docker ps -a -q`
|
||||
$ sudo docker rm `docker ps -a -q`
|
||||
|
||||
|
||||
This command will delete all stopped containers. The command ``docker ps -a -q`` will return all
|
||||
@@ -798,6 +952,38 @@ containers will not be deleted.
|
||||
Usage: docker rmi IMAGE [IMAGE...]
|
||||
|
||||
Remove one or more images
|
||||
|
||||
Removing tagged images
|
||||
~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Images can be removed either by their short or long ID's, or their image names.
|
||||
If an image has more than one name, each of them needs to be removed before the
|
||||
image is removed.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo docker images
|
||||
REPOSITORY TAG IMAGE ID CREATED SIZE
|
||||
test1 latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB)
|
||||
test latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB)
|
||||
test2 latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB)
|
||||
|
||||
$ sudo docker rmi fd484f19954f
|
||||
Error: Conflict, cannot delete image fd484f19954f because it is tagged in multiple repositories
|
||||
2013/12/11 05:47:16 Error: failed to remove one or more images
|
||||
|
||||
$ sudo docker rmi test1
|
||||
Untagged: fd484f19954f4920da7ff372b5067f5b7ddb2fd3830cecd17b96ea9e286ba5b8
|
||||
$ sudo docker rmi test2
|
||||
Untagged: fd484f19954f4920da7ff372b5067f5b7ddb2fd3830cecd17b96ea9e286ba5b8
|
||||
|
||||
$ sudo docker images
|
||||
REPOSITORY TAG IMAGE ID CREATED SIZE
|
||||
test1 latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB)
|
||||
$ sudo docker rmi test
|
||||
Untagged: fd484f19954f4920da7ff372b5067f5b7ddb2fd3830cecd17b96ea9e286ba5b8
|
||||
Deleted: fd484f19954f4920da7ff372b5067f5b7ddb2fd3830cecd17b96ea9e286ba5b8
|
||||
|
||||
|
||||
.. _cli_run:
|
||||
|
||||
@@ -836,21 +1022,39 @@ containers will not be deleted.
|
||||
-name="": Assign the specified name to the container. If no name is specific docker will generate a random name
|
||||
-P=false: Publish all exposed ports to the host interfaces
|
||||
|
||||
Examples
|
||||
--------
|
||||
The ``docker run`` command first ``creates`` a writeable container layer over
|
||||
the specified image, and then ``starts`` it using the specified command. That
|
||||
is, ``docker run`` is equivalent to the API ``/containers/create`` then
|
||||
``/containers/(id)/start``.
|
||||
|
||||
The ``docker run`` command can be used in combination with ``docker commit`` to
|
||||
:ref:`change the command that a container runs <cli_commit_examples>`.
|
||||
|
||||
Known Issues (run -volumes-from)
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
* :issue:`2702`: "lxc-start: Permission denied - failed to mount"
|
||||
could indicate a permissions problem with AppArmor. Please see the
|
||||
issue for a workaround.
|
||||
|
||||
Examples:
|
||||
~~~~~~~~~
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo docker run -cidfile /tmp/docker_test.cid ubuntu echo "test"
|
||||
$ sudo docker run -cidfile /tmp/docker_test.cid ubuntu echo "test"
|
||||
|
||||
This will create a container and print "test" to the console. The
|
||||
``cidfile`` flag makes docker attempt to create a new file and write the
|
||||
container ID to it. If the file exists already, docker will return an
|
||||
error. Docker will close this file when docker run exits.
|
||||
This will create a container and print ``test`` to the console. The
|
||||
``cidfile`` flag makes Docker attempt to create a new file and write the
|
||||
container ID to it. If the file exists already, Docker will return an
|
||||
error. Docker will close this file when ``docker run`` exits.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
docker run mount -t tmpfs none /var/spool/squid
|
||||
$ sudo docker run -t -i -rm ubuntu bash
|
||||
root@bc338942ef20:/# mount -t tmpfs none /mnt
|
||||
mount: permission denied
|
||||
|
||||
|
||||
This will *not* work, because by default, most potentially dangerous
|
||||
kernel capabilities are dropped; including ``cap_sys_admin`` (which is
|
||||
@@ -859,7 +1063,12 @@ allow it to run:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
docker run -privileged mount -t tmpfs none /var/spool/squid
|
||||
$ sudo docker run -privileged ubuntu bash
|
||||
root@50e3f57e16e6:/# mount -t tmpfs none /mnt
|
||||
root@50e3f57e16e6:/# df -h
|
||||
Filesystem Size Used Avail Use% Mounted on
|
||||
none 1.9G 0 1.9G 0% /mnt
|
||||
|
||||
|
||||
The ``-privileged`` flag gives *all* capabilities to the container,
|
||||
and it also lifts all the limitations enforced by the ``device``
|
||||
@@ -869,15 +1078,15 @@ use-cases, like running Docker within Docker.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
docker run -w /path/to/dir/ -i -t ubuntu pwd
|
||||
$ sudo docker run -w /path/to/dir/ -i -t ubuntu pwd
|
||||
|
||||
The ``-w`` lets the command being executed inside directory given,
|
||||
here /path/to/dir/. If the path does not exists it is created inside the
|
||||
here ``/path/to/dir/``. If the path does not exists it is created inside the
|
||||
container.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
docker run -v `pwd`:`pwd` -w `pwd` -i -t ubuntu pwd
|
||||
$ sudo docker run -v `pwd`:`pwd` -w `pwd` -i -t ubuntu pwd
|
||||
|
||||
The ``-v`` flag mounts the current working directory into the container.
|
||||
The ``-w`` lets the command being executed inside the current
|
||||
@@ -887,15 +1096,15 @@ using the container, but inside the current working directory.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
docker run -p 127.0.0.1:80:8080 ubuntu bash
|
||||
$ sudo docker run -p 127.0.0.1:80:8080 ubuntu bash
|
||||
|
||||
This binds port ``8080`` of the container to port ``80`` on 127.0.0.1 of the
|
||||
This binds port ``8080`` of the container to port ``80`` on ``127.0.0.1`` of the
|
||||
host machine. :ref:`port_redirection` explains in detail how to manipulate ports
|
||||
in Docker.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
docker run -expose 80 ubuntu bash
|
||||
$ sudo docker run -expose 80 ubuntu bash
|
||||
|
||||
This exposes port ``80`` of the container for use within a link without
|
||||
publishing the port to the host system's interfaces. :ref:`port_redirection`
|
||||
@@ -903,14 +1112,14 @@ explains in detail how to manipulate ports in Docker.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
docker run -name console -t -i ubuntu bash
|
||||
$ sudo docker run -name console -t -i ubuntu bash
|
||||
|
||||
This will create and run a new container with the container name
|
||||
being ``console``.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
docker run -link /redis:redis -name console ubuntu bash
|
||||
$ sudo docker run -link /redis:redis -name console ubuntu bash
|
||||
|
||||
The ``-link`` flag will link the container named ``/redis`` into the
|
||||
newly created container with the alias ``redis``. The new container
|
||||
@@ -920,25 +1129,39 @@ to the newly created container.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
docker run -volumes-from 777f7dc92da7,ba8c0c54f0f2:ro -i -t ubuntu pwd
|
||||
$ sudo docker run -volumes-from 777f7dc92da7,ba8c0c54f0f2:ro -i -t ubuntu pwd
|
||||
|
||||
The ``-volumes-from`` flag mounts all the defined volumes from the
|
||||
refrence containers. Containers can be specified by a comma seperated
|
||||
referenced containers. Containers can be specified by a comma seperated
|
||||
list or by repetitions of the ``-volumes-from`` argument. The container
|
||||
id may be optionally suffixed with ``:ro`` or ``:rw`` to mount the volumes in
|
||||
ID may be optionally suffixed with ``:ro`` or ``:rw`` to mount the volumes in
|
||||
read-only or read-write mode, respectively. By default, the volumes are mounted
|
||||
in the same mode (rw or ro) as the reference container.
|
||||
in the same mode (read write or read only) as the reference container.
|
||||
|
||||
Known Issues (run -volumes-from)
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
A complete example
|
||||
..................
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo docker run -d -name static static-web-files sh
|
||||
$ sudo docker run -d -expose=8098 -name riak riakserver
|
||||
$ sudo docker run -d -m 100m -e DEVELOPMENT=1 -e BRANCH=example-code -v $(pwd):/app/bin:ro -name app appserver
|
||||
$ sudo docker run -d -p 1443:443 -dns=dns.dev.org -v /var/log/httpd -volumes-from static -link riak -link app -h www.sven.dev.org -name web webserver
|
||||
$ sudo docker run -t -i -rm -volumes-from web -w /var/log/httpd busybox tail -f access.log
|
||||
|
||||
This example shows 5 containers that might be set up to test a web application change:
|
||||
|
||||
1. Start a pre-prepared volume image ``static-web-files`` (in the background) that has CSS, image and static HTML in it, (with a ``VOLUME`` instruction in the ``Dockerfile`` to allow the web server to use those files);
|
||||
2. Start a pre-prepared ``riakserver`` image, give the container name ``riak`` and expose port ``8098`` to any containers that link to it;
|
||||
3. Start the ``appserver`` image, restricting its memory usage to 100MB, setting two environment variables ``DEVELOPMENT`` and ``BRANCH`` and bind-mounting the current directory (``$(pwd)``) in the container in read-only mode as ``/app/bin``;
|
||||
4. Start the ``webserver``, mapping port ``443`` in the container to port ``1443`` on the Docker server, setting the DNS server to ``dns.dev.org``, creating a volume to put the log files into (so we can access it from another container), then importing the files from the volume exposed by the ``static`` container, and linking to all exposed ports from ``riak`` and ``app``. Lastly, we set the hostname to ``web.sven.dev.org`` so its consistent with the pre-generated SSL certificate;
|
||||
5. Finally, we create a container that runs ``tail -f access.log`` using the logs volume from the ``web`` container, setting the workdir to ``/var/log/httpd``. The ``-rm`` option means that when the container exits, the container's layer is removed.
|
||||
|
||||
* :issue:`2702`: "lxc-start: Permission denied - failed to mount"
|
||||
could indicate a permissions problem with AppArmor. Please see the
|
||||
issue for a workaround.
|
||||
|
||||
.. _cli_save:
|
||||
|
||||
``save``
|
||||
---------
|
||||
|
||||
::
|
||||
|
||||
@@ -969,7 +1192,7 @@ Known Issues (run -volumes-from)
|
||||
|
||||
::
|
||||
|
||||
Usage: docker start [OPTIONS] NAME
|
||||
Usage: docker start [OPTIONS] CONTAINER
|
||||
|
||||
Start a stopped container
|
||||
|
||||
@@ -1020,7 +1243,7 @@ The main process inside the container will receive SIGTERM, and after a grace pe
|
||||
``version``
|
||||
-----------
|
||||
|
||||
Show the version of the docker client, daemon, and latest released version.
|
||||
Show the version of the Docker client, daemon, and latest released version.
|
||||
|
||||
|
||||
.. _cli_wait:
|
||||
|
||||
@@ -42,11 +42,10 @@ This following command will build a development environment using the Dockerfile
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo docker build -t docker .
|
||||
sudo make build
|
||||
|
||||
|
||||
|
||||
If the build is successful, congratulations! You have produced a clean build of docker, neatly encapsulated in a standard build environment.
|
||||
If the build is successful, congratulations! You have produced a clean build of
|
||||
docker, neatly encapsulated in a standard build environment.
|
||||
|
||||
|
||||
Step 4: Build the Docker Binary
|
||||
@@ -56,10 +55,23 @@ To create the Docker binary, run this command:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo docker run -privileged -v `pwd`:/go/src/github.com/dotcloud/docker docker hack/make.sh binary
|
||||
sudo make binary
|
||||
|
||||
This will create the Docker binary in ``./bundles/<version>-dev/binary/``
|
||||
|
||||
Using your built Docker binary
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The binary is available outside the container in the directory
|
||||
``./bundles/<version>-dev/binary/``. You can swap your host docker executable
|
||||
with this binary for live testing - for example, on ubuntu:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo service docker stop ; sudo cp $(which docker) $(which docker)_ ; sudo cp ./bundles/<version>-dev/binary/docker-<version>-dev $(which docker);sudo service docker start
|
||||
|
||||
.. note:: Its safer to run the tests below before swapping your hosts docker binary.
|
||||
|
||||
|
||||
Step 5: Run the Tests
|
||||
---------------------
|
||||
@@ -68,10 +80,15 @@ To execute the test cases, run this command:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo docker run -privileged -v `pwd`:/go/src/github.com/dotcloud/docker docker hack/make.sh test
|
||||
sudo make test
|
||||
|
||||
|
||||
Note: if you're running the tests in vagrant, you need to specify a dns entry in the command: `-dns 8.8.8.8`
|
||||
Note: if you're running the tests in vagrant, you need to specify a dns entry in
|
||||
the command (either edit the Makefile, or run the step manually):
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo docker run -dns 8.8.8.8 -privileged -v `pwd`:/go/src/github.com/dotcloud/docker docker hack/make.sh test
|
||||
|
||||
If the test are successful then the tail of the output should look something like this
|
||||
|
||||
@@ -113,15 +130,24 @@ You can run an interactive session in the newly built container:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo docker run -privileged -i -t docker bash
|
||||
sudo make shell
|
||||
|
||||
# type 'exit' to exit
|
||||
# type 'exit' or Ctrl-D to exit
|
||||
|
||||
|
||||
Extra Step: Build and view the Documentation
|
||||
--------------------------------------------
|
||||
|
||||
.. note:: The binary is available outside the container in the directory ``./bundles/<version>-dev/binary/``. You can swap your host docker executable with this binary for live testing - for example, on ubuntu: ``sudo service docker stop ; sudo cp $(which docker) $(which docker)_ ; sudo cp ./bundles/<version>-dev/binary/docker-<version>-dev $(which docker);sudo service docker start``.
|
||||
If you want to read the documentation from a local website, or are making changes
|
||||
to it, you can build the documentation and then serve it by:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo make docs
|
||||
# when its done, you can point your browser to http://yourdockerhost:8000
|
||||
# type Ctrl-C to exit
|
||||
|
||||
|
||||
**Need More Help?**
|
||||
|
||||
If you need more help then hop on to the `#docker-dev IRC channel <irc://chat.freenode.net#docker-dev>`_ or post a message on the `Docker developer mailinglist <https://groups.google.com/d/forum/docker-dev>`_.
|
||||
If you need more help then hop on to the `#docker-dev IRC channel <irc://chat.freenode.net#docker-dev>`_ or post a message on the `Docker developer mailing list <https://groups.google.com/d/forum/docker-dev>`_.
|
||||
|
||||
137
docs/sources/examples/cfengine_process_management.rst
Normal file
137
docs/sources/examples/cfengine_process_management.rst
Normal file
@@ -0,0 +1,137 @@
|
||||
:title: Process Management with CFEngine
|
||||
:description: Managing containerized processes with CFEngine
|
||||
:keywords: cfengine, process, management, usage, docker, documentation
|
||||
|
||||
Process Management with CFEngine
|
||||
================================
|
||||
|
||||
Create Docker containers with managed processes.
|
||||
|
||||
Docker monitors one process in each running container and the container lives or dies with that process.
|
||||
By introducing CFEngine inside Docker containers, we can alleviate a few of the issues that may arise:
|
||||
|
||||
* It is possible to easily start multiple processes within a container, all of which will be managed automatically, with the normal ``docker run`` command.
|
||||
* If a managed process dies or crashes, CFEngine will start it again within 1 minute.
|
||||
* The container itself will live as long as the CFEngine scheduling daemon (cf-execd) lives. With CFEngine, we are able to decouple the life of the container from the uptime of the service it provides.
|
||||
|
||||
|
||||
How it works
|
||||
------------
|
||||
|
||||
CFEngine, together with the cfe-docker integration policies, are installed as part of the Dockerfile. This builds CFEngine into our Docker image.
|
||||
|
||||
The Dockerfile's ``ENTRYPOINT`` takes an arbitrary amount of commands (with any desired arguments) as parameters.
|
||||
When we run the Docker container these parameters get written to CFEngine policies and CFEngine takes over to ensure that the desired processes are running in the container.
|
||||
|
||||
CFEngine scans the process table for the ``basename`` of the commands given to the ``ENTRYPOINT`` and runs the command to start the process if the ``basename`` is not found.
|
||||
For example, if we start the container with ``docker run "/path/to/my/application parameters"``, CFEngine will look for a process named ``application`` and run the command.
|
||||
If an entry for ``application`` is not found in the process table at any point in time, CFEngine will execute ``/path/to/my/application parameters`` to start the application once again.
|
||||
The check on the process table happens every minute.
|
||||
|
||||
Note that it is therefore important that the command to start your application leaves a process with the basename of the command.
|
||||
This can be made more flexible by making some minor adjustments to the CFEngine policies, if desired.
|
||||
|
||||
|
||||
Usage
|
||||
-----
|
||||
|
||||
This example assumes you have Docker installed and working.
|
||||
We will install and manage ``apache2`` and ``sshd`` in a single container.
|
||||
|
||||
There are three steps:
|
||||
|
||||
1. Install CFEngine into the container.
|
||||
2. Copy the CFEngine Docker process management policy into the containerized CFEngine installation.
|
||||
3. Start your application processes as part of the ``docker run`` command.
|
||||
|
||||
|
||||
Building the container image
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The first two steps can be done as part of a Dockerfile, as follows.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
FROM ubuntu
|
||||
MAINTAINER Eystein Måløy Stenberg <eytein.stenberg@gmail.com>
|
||||
|
||||
RUN apt-get -y install wget lsb-release unzip
|
||||
|
||||
# install latest CFEngine
|
||||
RUN wget -qO- http://cfengine.com/pub/gpg.key | apt-key add -
|
||||
RUN echo "deb http://cfengine.com/pub/apt $(lsb_release -cs) main" > /etc/apt/sources.list.d/cfengine-community.list
|
||||
RUN apt-get update
|
||||
RUN apt-get install cfengine-community
|
||||
|
||||
# install cfe-docker process management policy
|
||||
RUN wget --no-check-certificate https://github.com/estenberg/cfe-docker/archive/master.zip -P /tmp/ && unzip /tmp/master.zip -d /tmp/
|
||||
RUN cp /tmp/cfe-docker-master/cfengine/bin/* /var/cfengine/bin/
|
||||
RUN cp /tmp/cfe-docker-master/cfengine/inputs/* /var/cfengine/inputs/
|
||||
RUN rm -rf /tmp/cfe-docker-master /tmp/master.zip
|
||||
|
||||
# apache2 and openssh are just for testing purposes, install your own apps here
|
||||
RUN apt-get -y install openssh-server apache2
|
||||
RUN mkdir -p /var/run/sshd
|
||||
RUN echo "root:password" | chpasswd # need a password for ssh
|
||||
|
||||
ENTRYPOINT ["/var/cfengine/bin/docker_processes_run.sh"]
|
||||
|
||||
|
||||
By saving this file as ``Dockerfile`` to a working directory, you can then build your container with the docker build command,
|
||||
e.g. ``docker build -t managed_image``.
|
||||
|
||||
Testing the container
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Start the container with ``apache2`` and ``sshd`` running and managed, forwarding a port to our SSH instance:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
docker run -p 127.0.0.1:222:22 -d managed_image "/usr/sbin/sshd" "/etc/init.d/apache2 start"
|
||||
|
||||
We now clearly see one of the benefits of the cfe-docker integration: it allows to start several processes
|
||||
as part of a normal ``docker run`` command.
|
||||
|
||||
We can now log in to our new container and see that both ``apache2`` and ``sshd`` are running. We have set the root password to
|
||||
"password" in the Dockerfile above and can use that to log in with ssh:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
ssh -p222 root@127.0.0.1
|
||||
|
||||
ps -ef
|
||||
UID PID PPID C STIME TTY TIME CMD
|
||||
root 1 0 0 07:48 ? 00:00:00 /bin/bash /var/cfengine/bin/docker_processes_run.sh /usr/sbin/sshd /etc/init.d/apache2 start
|
||||
root 18 1 0 07:48 ? 00:00:00 /var/cfengine/bin/cf-execd -F
|
||||
root 20 1 0 07:48 ? 00:00:00 /usr/sbin/sshd
|
||||
root 32 1 0 07:48 ? 00:00:00 /usr/sbin/apache2 -k start
|
||||
www-data 34 32 0 07:48 ? 00:00:00 /usr/sbin/apache2 -k start
|
||||
www-data 35 32 0 07:48 ? 00:00:00 /usr/sbin/apache2 -k start
|
||||
www-data 36 32 0 07:48 ? 00:00:00 /usr/sbin/apache2 -k start
|
||||
root 93 20 0 07:48 ? 00:00:00 sshd: root@pts/0
|
||||
root 105 93 0 07:48 pts/0 00:00:00 -bash
|
||||
root 112 105 0 07:49 pts/0 00:00:00 ps -ef
|
||||
|
||||
|
||||
If we stop apache2, it will be started again within a minute by CFEngine.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
service apache2 status
|
||||
Apache2 is running (pid 32).
|
||||
service apache2 stop
|
||||
* Stopping web server apache2 ... waiting [ OK ]
|
||||
service apache2 status
|
||||
Apache2 is NOT running.
|
||||
# ... wait up to 1 minute...
|
||||
service apache2 status
|
||||
Apache2 is running (pid 173).
|
||||
|
||||
|
||||
Adapting to your applications
|
||||
-----------------------------
|
||||
|
||||
To make sure your applications get managed in the same manner, there are just two things you need to adjust from the above example:
|
||||
|
||||
* In the Dockerfile used above, install your applications instead of ``apache2`` and ``sshd``.
|
||||
* When you start the container with ``docker run``, specify the command line arguments to your applications rather than ``apache2`` and ``sshd``.
|
||||
@@ -131,8 +131,6 @@ Attach to the container to see the results in real-time.
|
||||
|
||||
- **"docker attach**" This will allow us to attach to a background
|
||||
process to see what is going on.
|
||||
- **"-sig-proxy=true"** Proxify all received signal to the process
|
||||
(even in non-tty mode)
|
||||
- **$CONTAINER_ID** The Id of the container we want to attach too.
|
||||
|
||||
Exit from the container attachment by pressing Control-C.
|
||||
|
||||
@@ -24,3 +24,5 @@ to more substantial services like those which you might find in production.
|
||||
postgresql_service
|
||||
mongodb
|
||||
running_riak_service
|
||||
using_supervisord
|
||||
cfengine_process_management
|
||||
|
||||
@@ -7,26 +7,18 @@
|
||||
PostgreSQL Service
|
||||
==================
|
||||
|
||||
.. include:: example_header.inc
|
||||
|
||||
.. note::
|
||||
|
||||
A shorter version of `this blog post`_.
|
||||
|
||||
.. note::
|
||||
|
||||
As of version 0.5.2, Docker requires root privileges to run.
|
||||
You have to either manually adjust your system configuration (permissions on
|
||||
/var/run/docker.sock or sudo config), or prefix `docker` with `sudo`. Check
|
||||
`this thread`_ for details.
|
||||
|
||||
.. _this blog post: http://zaiste.net/2013/08/docker_postgresql_how_to/
|
||||
.. _this thread: https://groups.google.com/forum/?fromgroups#!topic/docker-club/P3xDLqmLp0E
|
||||
|
||||
Installing PostgreSQL on Docker
|
||||
-------------------------------
|
||||
|
||||
For clarity I won't be showing command output.
|
||||
|
||||
Run an interactive shell in Docker container.
|
||||
Run an interactive shell in a Docker container.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
@@ -38,26 +30,26 @@ Update its dependencies.
|
||||
|
||||
apt-get update
|
||||
|
||||
Install ``python-software-properties``.
|
||||
Install ``python-software-properties``, ``software-properties-common``, ``wget`` and ``vim``.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
apt-get -y install python-software-properties
|
||||
apt-get -y install software-properties-common
|
||||
apt-get -y install python-software-properties software-properties-common wget vim
|
||||
|
||||
Add Pitti's PostgreSQL repository. It contains the most recent stable release
|
||||
of PostgreSQL i.e. ``9.2``.
|
||||
Add PostgreSQL's repository. It contains the most recent stable release
|
||||
of PostgreSQL, ``9.3``.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
add-apt-repository ppa:pitti/postgresql
|
||||
wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add -
|
||||
echo "deb http://apt.postgresql.org/pub/repos/apt/ precise-pgdg main" > /etc/apt/sources.list.d/pgdg.list
|
||||
apt-get update
|
||||
|
||||
Finally, install PostgreSQL 9.2
|
||||
Finally, install PostgreSQL 9.3
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
apt-get -y install postgresql-9.2 postgresql-client-9.2 postgresql-contrib-9.2
|
||||
apt-get -y install postgresql-9.3 postgresql-client-9.3 postgresql-contrib-9.3
|
||||
|
||||
Now, create a PostgreSQL superuser role that can create databases and
|
||||
other roles. Following Vagrant's convention the role will be named
|
||||
@@ -76,15 +68,14 @@ role.
|
||||
|
||||
Adjust PostgreSQL configuration so that remote connections to the
|
||||
database are possible. Make sure that inside
|
||||
``/etc/postgresql/9.2/main/pg_hba.conf`` you have following line (you will need
|
||||
to install an editor, e.g. ``apt-get install vim``):
|
||||
``/etc/postgresql/9.3/main/pg_hba.conf`` you have following line:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
host all all 0.0.0.0/0 md5
|
||||
|
||||
Additionaly, inside ``/etc/postgresql/9.2/main/postgresql.conf``
|
||||
uncomment ``listen_addresses`` so it is as follows:
|
||||
Additionaly, inside ``/etc/postgresql/9.3/main/postgresql.conf``
|
||||
uncomment ``listen_addresses`` like so:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
@@ -94,7 +85,7 @@ uncomment ``listen_addresses`` so it is as follows:
|
||||
|
||||
This PostgreSQL setup is for development only purposes. Refer
|
||||
to PostgreSQL documentation how to fine-tune these settings so that it
|
||||
is enough secure.
|
||||
is secure enough.
|
||||
|
||||
Exit.
|
||||
|
||||
@@ -102,43 +93,43 @@ Exit.
|
||||
|
||||
exit
|
||||
|
||||
Create an image and assign it a name. ``<container_id>`` is in the
|
||||
Bash prompt; you can also locate it using ``docker ps -a``.
|
||||
Create an image from our container and assign it a name. The ``<container_id>``
|
||||
is in the Bash prompt; you can also locate it using ``docker ps -a``.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo docker commit <container_id> <your username>/postgresql
|
||||
|
||||
Finally, run PostgreSQL server via ``docker``.
|
||||
Finally, run the PostgreSQL server via ``docker``.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
CONTAINER=$(sudo docker run -d -p 5432 \
|
||||
-t <your username>/postgresql \
|
||||
/bin/su postgres -c '/usr/lib/postgresql/9.2/bin/postgres \
|
||||
-D /var/lib/postgresql/9.2/main \
|
||||
-c config_file=/etc/postgresql/9.2/main/postgresql.conf')
|
||||
/bin/su postgres -c '/usr/lib/postgresql/9.3/bin/postgres \
|
||||
-D /var/lib/postgresql/9.3/main \
|
||||
-c config_file=/etc/postgresql/9.3/main/postgresql.conf')
|
||||
|
||||
Connect the PostgreSQL server using ``psql`` (You will need postgres installed
|
||||
on the machine. For ubuntu, use something like
|
||||
``sudo apt-get install postgresql``).
|
||||
Connect the PostgreSQL server using ``psql`` (You will need the
|
||||
postgresql client installed on the machine. For ubuntu, use something
|
||||
like ``sudo apt-get install postgresql-client``).
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
CONTAINER_IP=$(sudo docker inspect $CONTAINER | grep IPAddress | awk '{ print $2 }' | tr -d ',"')
|
||||
CONTAINER_IP=$(sudo docker inspect -format='{{.NetworkSettings.IPAddress}}' $CONTAINER)
|
||||
psql -h $CONTAINER_IP -p 5432 -d docker -U docker -W
|
||||
|
||||
As before, create roles or databases if needed.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
psql (9.2.4)
|
||||
psql (9.3.1)
|
||||
Type "help" for help.
|
||||
|
||||
docker=# CREATE DATABASE foo OWNER=docker;
|
||||
CREATE DATABASE
|
||||
|
||||
Additionally, publish your newly created image on Docker Index.
|
||||
Additionally, publish your newly created image on the Docker Index.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
@@ -160,9 +151,9 @@ container starts.
|
||||
.. code-block:: bash
|
||||
|
||||
sudo docker commit -run='{"Cmd": \
|
||||
["/bin/su", "postgres", "-c", "/usr/lib/postgresql/9.2/bin/postgres -D \
|
||||
/var/lib/postgresql/9.2/main -c \
|
||||
config_file=/etc/postgresql/9.2/main/postgresql.conf"], "PortSpecs": ["5432"]}' \
|
||||
["/bin/su", "postgres", "-c", "/usr/lib/postgresql/9.3/bin/postgres -D \
|
||||
/var/lib/postgresql/9.3/main -c \
|
||||
config_file=/etc/postgresql/9.3/main/postgresql.conf"], "PortSpecs": ["5432"]}' \
|
||||
<container_id> <your username>/postgresql
|
||||
|
||||
From now on, just type ``docker run <your username>/postgresql`` and
|
||||
|
||||
@@ -94,5 +94,13 @@ The password is ``screencast``.
|
||||
$ ifconfig
|
||||
$ ssh root@192.168.33.10 -p 49154
|
||||
# Thanks for watching, Thatcher thatcher@dotcloud.com
|
||||
|
||||
Update:
|
||||
-------
|
||||
|
||||
For Ubuntu 13.10 using stackbrew/ubuntu, you may need do these additional steps:
|
||||
|
||||
1. change /etc/pam.d/sshd, pam_loginuid line 'required' to 'optional'
|
||||
2. echo LANG=\"en_US.UTF-8\" > /etc/default/locale
|
||||
|
||||
|
||||
|
||||
128
docs/sources/examples/using_supervisord.rst
Normal file
128
docs/sources/examples/using_supervisord.rst
Normal file
@@ -0,0 +1,128 @@
|
||||
:title: Using Supervisor with Docker
|
||||
:description: How to use Supervisor process management with Docker
|
||||
:keywords: docker, supervisor, process management
|
||||
|
||||
.. _using_supervisord:
|
||||
|
||||
Using Supervisor with Docker
|
||||
============================
|
||||
|
||||
.. include:: example_header.inc
|
||||
|
||||
Traditionally a Docker container runs a single process when it is launched, for
|
||||
example an Apache daemon or a SSH server daemon. Often though you want to run
|
||||
more than one process in a container. There are a number of ways you can
|
||||
achieve this ranging from using a simple Bash script as the value of your
|
||||
container's ``CMD`` instruction to installing a process management tool.
|
||||
|
||||
In this example we're going to make use of the process management tool,
|
||||
`Supervisor <http://supervisord.org/>`_, to manage multiple processes in our
|
||||
container. Using Supervisor allows us to better control, manage, and restart the
|
||||
processes we want to run. To demonstrate this we're going to install and manage both an
|
||||
SSH daemon and an Apache daemon.
|
||||
|
||||
Creating a Dockerfile
|
||||
---------------------
|
||||
|
||||
Let's start by creating a basic ``Dockerfile`` for our new image.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
FROM ubuntu:latest
|
||||
MAINTAINER examples@docker.io
|
||||
RUN echo "deb http://archive.ubuntu.com/ubuntu precise main universe" > /etc/apt/sources.list
|
||||
RUN apt-get update
|
||||
RUN apt-get upgrade -y
|
||||
|
||||
Installing Supervisor
|
||||
---------------------
|
||||
|
||||
We can now install our SSH and Apache daemons as well as Supervisor in our container.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
RUN apt-get install -y openssh-server apache2 supervisor
|
||||
RUN mkdir -p /var/run/sshd
|
||||
RUN mkdir -p /var/log/supervisor
|
||||
|
||||
Here we're installing the ``openssh-server``, ``apache2`` and ``supervisor``
|
||||
(which provides the Supervisor daemon) packages. We're also creating two new
|
||||
directories that are needed to run our SSH daemon and Supervisor.
|
||||
|
||||
Adding Supervisor's configuration file
|
||||
--------------------------------------
|
||||
|
||||
Now let's add a configuration file for Supervisor. The default file is called
|
||||
``supervisord.conf`` and is located in ``/etc/supervisor/conf.d/``.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
ADD supervisord.conf /etc/supervisor/conf.d/supervisord.conf
|
||||
|
||||
Let's see what is inside our ``supervisord.conf`` file.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
[supervisord]
|
||||
nodaemon=true
|
||||
|
||||
[program:sshd]
|
||||
command=/usr/sbin/sshd -D
|
||||
|
||||
[program:apache2]
|
||||
command=/bin/bash -c "source /etc/apache2/envvars && /usr/sbin/apache2 -DFOREGROUND"
|
||||
|
||||
The ``supervisord.conf`` configuration file contains directives that configure
|
||||
Supervisor and the processes it manages. The first block ``[supervisord]``
|
||||
provides configuration for Supervisor itself. We're using one directive,
|
||||
``nodaemon`` which tells Supervisor to run interactively rather than daemonize.
|
||||
|
||||
The next two blocks manage the services we wish to control. Each block controls
|
||||
a separate process. The blocks contain a single directive, ``command``, which
|
||||
specifies what command to run to start each process.
|
||||
|
||||
Exposing ports and running Supervisor
|
||||
-------------------------------------
|
||||
|
||||
Now let's finish our ``Dockerfile`` by exposing some required ports and
|
||||
specifying the ``CMD`` instruction to start Supervisor when our container
|
||||
launches.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
EXPOSE 22 80
|
||||
CMD ["/usr/bin/supervisord"]
|
||||
|
||||
Here we've exposed ports 22 and 80 on the container and we're running the
|
||||
``/usr/bin/supervisord`` binary when the container launches.
|
||||
|
||||
Building our container
|
||||
----------------------
|
||||
|
||||
We can now build our new container.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo docker build -t <yourname>/supervisord .
|
||||
|
||||
Running our Supervisor container
|
||||
--------------------------------
|
||||
|
||||
Once we've got a built image we can launch a container from it.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo docker run -p 22 -p 80 -t -i <yourname>/supervisor
|
||||
2013-11-25 18:53:22,312 CRIT Supervisor running as root (no user in config file)
|
||||
2013-11-25 18:53:22,312 WARN Included extra file "/etc/supervisor/conf.d/supervisord.conf" during parsing
|
||||
2013-11-25 18:53:22,342 INFO supervisord started with pid 1
|
||||
2013-11-25 18:53:23,346 INFO spawned: 'sshd' with pid 6
|
||||
2013-11-25 18:53:23,349 INFO spawned: 'apache2' with pid 7
|
||||
. . .
|
||||
|
||||
We've launched a new container interactively using the ``docker run`` command.
|
||||
That container has run Supervisor and launched the SSH and Apache daemons with
|
||||
it. We've specified the ``-p`` flag to expose ports 22 and 80. From here we can
|
||||
now identify the exposed ports and connect to one or both of the SSH and Apache
|
||||
daemons.
|
||||
|
||||
@@ -111,7 +111,7 @@ What does Docker add to just plain LXC?
|
||||
registry to store and transfer private containers, for internal
|
||||
server deployments for example.
|
||||
|
||||
* *Tool ecosystem.*
|
||||
* *Tool ecosystem.*
|
||||
Docker defines an API for automating and customizing the
|
||||
creation and deployment of containers. There are a huge number
|
||||
of tools integrating with Docker to extend its
|
||||
@@ -122,6 +122,11 @@ What does Docker add to just plain LXC?
|
||||
(Jenkins, Strider, Travis), etc. Docker is rapidly establishing
|
||||
itself as the standard for container-based tooling.
|
||||
|
||||
What is different between a Docker container and a VM?
|
||||
......................................................
|
||||
|
||||
There's a great StackOverflow answer `showing the differences <http://stackoverflow.com/questions/16047306/how-is-docker-io-different-from-a-normal-virtual-machine>`_.
|
||||
|
||||
Do I lose my data when the container exits?
|
||||
...........................................
|
||||
|
||||
@@ -129,6 +134,53 @@ Not at all! Any data that your application writes to disk gets preserved
|
||||
in its container until you explicitly delete the container. The file
|
||||
system for the container persists even after the container halts.
|
||||
|
||||
How far do Docker containers scale?
|
||||
...................................
|
||||
|
||||
Some of the largest server farms in the world today are based on containers.
|
||||
Large web deployments like Google and Twitter, and platform providers such as
|
||||
Heroku and dotCloud all run on container technology, at a scale of hundreds of
|
||||
thousands or even millions of containers running in parallel.
|
||||
|
||||
How do I connect Docker containers?
|
||||
...................................
|
||||
|
||||
Currently the recommended way to link containers is via the `link` primitive.
|
||||
You can see details of how to `work with links here
|
||||
<http://docs.docker.io/en/latest/use/working_with_links_names/>`_.
|
||||
|
||||
Also of useful when enabling more flexible service portability is the
|
||||
`Ambassador linking pattern
|
||||
<http://docs.docker.io/en/latest/use/ambassador_pattern_linking/>`_.
|
||||
|
||||
How do I run more than one process in a Docker container?
|
||||
.........................................................
|
||||
|
||||
Any capable process supervisor such as http://supervisord.org/, runit, s6, or
|
||||
daemontools can do the trick. Docker will start up the process management
|
||||
daemon which will then fork to run additional processes. As long as the
|
||||
processor manager daemon continues to run, the container will continue to as
|
||||
well. You can see a more substantial example `that uses supervisord here
|
||||
<http://docs.docker.io/en/latest/examples/using_supervisord/>`_.
|
||||
|
||||
What platforms does Docker run on?
|
||||
..................................
|
||||
|
||||
Linux:
|
||||
|
||||
- Ubuntu 12.04, 13.04 et al
|
||||
- Fedora 19/20+
|
||||
- RHEL 6.5+
|
||||
- Centos 6+
|
||||
- Gento
|
||||
- ArchLinux
|
||||
|
||||
Cloud:
|
||||
|
||||
- Amazon EC2
|
||||
- Google Compute Engine
|
||||
- Rackspace
|
||||
|
||||
Can I help by adding some questions and answers?
|
||||
................................................
|
||||
|
||||
|
||||
@@ -25,7 +25,7 @@ currently in active development, so this documentation will change
|
||||
frequently.
|
||||
|
||||
For an overview of Docker, please see the `Introduction
|
||||
<http://www.docker.io>`_. When you're ready to start working with
|
||||
<http://www.docker.io/learn_more/>`_. When you're ready to start working with
|
||||
Docker, we have a `quick start <http://www.docker.io/gettingstarted>`_
|
||||
and a more in-depth guide to :ref:`ubuntu_linux` and other
|
||||
:ref:`installation_list` paths including prebuilt binaries,
|
||||
|
||||
@@ -22,20 +22,32 @@ Amazon QuickStart
|
||||
|
||||
1. **Choose an image:**
|
||||
|
||||
* Launch the `Create Instance Wizard` <https://console.aws.amazon.com/ec2/v2/home?#LaunchInstanceWizard:> menu on your AWS Console
|
||||
* Select "Community AMIs" option and serch for ``amd64 precise`` (click enter to search)
|
||||
* If you choose a EBS enabled AMI you will be able to launch a `t1.micro` instance (more info on `pricing` <http://aws.amazon.com/en/ec2/pricing/> )
|
||||
* When you click select you'll be taken to the instance setup, and you're one click away from having your Ubuntu VM up and running.
|
||||
* Launch the `Create Instance Wizard
|
||||
<https://console.aws.amazon.com/ec2/v2/home?#LaunchInstanceWizard:>`_ menu
|
||||
on your AWS Console.
|
||||
|
||||
* Click the ``Select`` button for a 64Bit Ubuntu image. For example: Ubuntu Server 12.04.3 LTS
|
||||
|
||||
* For testing you can use the default (possibly free)
|
||||
``t1.micro`` instance (more info on `pricing
|
||||
<http://aws.amazon.com/en/ec2/pricing/>`_).
|
||||
|
||||
* Click the ``Next: Configure Instance Details`` button at the bottom right.
|
||||
|
||||
2. **Tell CloudInit to install Docker:**
|
||||
|
||||
* Enter ``#include https://get.docker.io`` into the instance *User
|
||||
Data*. `CloudInit <https://help.ubuntu.com/community/CloudInit>`_
|
||||
is part of the Ubuntu image you chose and it bootstraps from this
|
||||
*User Data*.
|
||||
* When you're on the "Configure Instance Details" step, expand the "Advanced
|
||||
Details" section.
|
||||
|
||||
3. After a few more standard choices where defaults are probably ok, your
|
||||
AWS Ubuntu instance with Docker should be running!
|
||||
* Under "User data", select "As text".
|
||||
|
||||
* Enter ``#include https://get.docker.io`` into the instance *User Data*.
|
||||
`CloudInit <https://help.ubuntu.com/community/CloudInit>`_ is part of the
|
||||
Ubuntu image you chose; it will bootstrap Docker by running the shell
|
||||
script located at this URL.
|
||||
|
||||
3. After a few more standard choices where defaults are probably ok, your AWS
|
||||
Ubuntu instance with Docker should be running!
|
||||
|
||||
**If this is your first AWS instance, you may need to set up your
|
||||
Security Group to allow SSH.** By default all incoming ports to your
|
||||
@@ -152,7 +164,7 @@ Docker that way too. Vagrant 1.1 or higher is required.
|
||||
includes rights to SSH (port 22) to your container.
|
||||
|
||||
If you have an advanced AWS setup, you might want to have a look at
|
||||
https://github.com/mitchellh/vagrant-aws
|
||||
`vagrant-aws <https://github.com/mitchellh/vagrant-aws>`_.
|
||||
|
||||
7. Connect to your machine
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
:title: Installation on Arch Linux
|
||||
:description: Docker installation on Arch Linux.
|
||||
:description: Docker installation on Arch Linux.
|
||||
:keywords: arch linux, virtualization, docker, documentation, installation
|
||||
|
||||
.. _arch_linux:
|
||||
@@ -7,54 +7,58 @@
|
||||
Arch Linux
|
||||
==========
|
||||
|
||||
Installing on Arch Linux is not officially supported but can be handled via
|
||||
either of the following AUR packages:
|
||||
.. include:: install_header.inc
|
||||
|
||||
* `lxc-docker <https://aur.archlinux.org/packages/lxc-docker/>`_
|
||||
* `lxc-docker-git <https://aur.archlinux.org/packages/lxc-docker-git/>`_
|
||||
.. include:: install_unofficial.inc
|
||||
|
||||
The lxc-docker package will install the latest tagged version of docker.
|
||||
The lxc-docker-git package will build from the current master branch.
|
||||
Installing on Arch Linux can be handled via the package in community:
|
||||
|
||||
* `docker <https://www.archlinux.org/packages/community/x86_64/docker/>`_
|
||||
|
||||
or the following AUR package:
|
||||
|
||||
* `docker-git <https://aur.archlinux.org/packages/docker-git/>`_
|
||||
|
||||
The docker package will install the latest tagged version of docker.
|
||||
The docker-git package will build from the current master branch.
|
||||
|
||||
Dependencies
|
||||
------------
|
||||
|
||||
Docker depends on several packages which are specified as dependencies in
|
||||
either AUR package.
|
||||
the packages. The core dependencies are:
|
||||
|
||||
* aufs3
|
||||
* bridge-utils
|
||||
* go
|
||||
* device-mapper
|
||||
* iproute2
|
||||
* linux-aufs_friendly
|
||||
* lxc
|
||||
* sqlite
|
||||
|
||||
|
||||
Installation
|
||||
------------
|
||||
|
||||
.. include:: install_header.inc
|
||||
For the normal package a simple
|
||||
::
|
||||
|
||||
.. include:: install_unofficial.inc
|
||||
pacman -S docker
|
||||
|
||||
is all that is needed.
|
||||
|
||||
For the AUR package execute:
|
||||
::
|
||||
|
||||
yaourt -S docker-git
|
||||
|
||||
The instructions here assume **yaourt** is installed. See
|
||||
`Arch User Repository <https://wiki.archlinux.org/index.php/Arch_User_Repository#Installing_packages>`_
|
||||
for information on building and installing packages from the AUR if you have not
|
||||
done so before.
|
||||
|
||||
Keep in mind that if **linux-aufs_friendly** is not already installed that a
|
||||
new kernel will be compiled and this can take quite a while.
|
||||
|
||||
::
|
||||
|
||||
yaourt -S lxc-docker-git
|
||||
|
||||
|
||||
Starting Docker
|
||||
---------------
|
||||
|
||||
Prior to starting docker modify your bootloader to use the
|
||||
**linux-aufs_friendly** kernel and reboot your system.
|
||||
|
||||
There is a systemd service unit created for docker. To start the docker service:
|
||||
|
||||
::
|
||||
|
||||
@@ -12,29 +12,26 @@ Binaries
|
||||
**This instruction set is meant for hackers who want to try out Docker
|
||||
on a variety of environments.**
|
||||
|
||||
Right now, the officially supported distributions are:
|
||||
|
||||
- :ref:`ubuntu_precise`
|
||||
- :ref:`ubuntu_raring`
|
||||
|
||||
|
||||
But we know people have had success running it under
|
||||
|
||||
- Debian
|
||||
- Suse
|
||||
- :ref:`arch_linux`
|
||||
Before following these directions, you should really check if a packaged version
|
||||
of Docker is already available for your distribution. We have packages for many
|
||||
distributions, and more keep showing up all the time!
|
||||
|
||||
Check Your Kernel
|
||||
-----------------
|
||||
|
||||
Your host's Linux kernel must meet the Docker :ref:`kernel`
|
||||
|
||||
Check for User Space Tools
|
||||
--------------------------
|
||||
|
||||
You must have a working installation of the `lxc <http://linuxcontainers.org>`_ utilities and library.
|
||||
|
||||
Get the docker binary:
|
||||
----------------------
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
wget --output-document=docker https://get.docker.io/builds/Linux/x86_64/docker-latest
|
||||
wget https://get.docker.io/builds/Linux/x86_64/docker-latest -O docker
|
||||
chmod +x docker
|
||||
|
||||
|
||||
|
||||
75
docs/sources/installation/fedora.rst
Normal file
75
docs/sources/installation/fedora.rst
Normal file
@@ -0,0 +1,75 @@
|
||||
:title: Requirements and Installation on Fedora
|
||||
:description: Please note this project is currently under heavy development. It should not be used in production.
|
||||
:keywords: Docker, Docker documentation, Fedora, requirements, virtualbox, vagrant, git, ssh, putty, cygwin, linux
|
||||
|
||||
.. _fedora:
|
||||
|
||||
Fedora
|
||||
======
|
||||
|
||||
.. include:: install_header.inc
|
||||
|
||||
.. include:: install_unofficial.inc
|
||||
|
||||
Docker is available in **Fedora 19 and later**. Please note that due to the
|
||||
current Docker limitations Docker is able to run only on the **64 bit**
|
||||
architecture.
|
||||
|
||||
Installation
|
||||
------------
|
||||
|
||||
The ``docker-io`` package provides Docker on Fedora.
|
||||
|
||||
|
||||
If you have the (unrelated) ``docker`` package installed already, it will
|
||||
conflict with ``docker-io``. There's a `bug report`_ filed for it.
|
||||
To proceed with ``docker-io`` installation on Fedora 19, please remove
|
||||
``docker`` first.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo yum -y remove docker
|
||||
|
||||
For Fedora 20 and later, the ``wmdocker`` package will provide the same
|
||||
functionality as ``docker`` and will also not conflict with ``docker-io``.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo yum -y install wmdocker
|
||||
sudo yum -y remove docker
|
||||
|
||||
Install the ``docker-io`` package which will install Docker on our host.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo yum -y install docker-io
|
||||
|
||||
|
||||
To update the ``docker-io`` package:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo yum -y update docker-io
|
||||
|
||||
Now that it's installed, let's start the Docker daemon.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo systemctl start docker
|
||||
|
||||
If we want Docker to start at boot, we should also:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo systemctl enable docker
|
||||
|
||||
Now let's verify that Docker is working.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo docker run -i -t fedora /bin/bash
|
||||
|
||||
**Done!**, now continue with the :ref:`hello_world` example.
|
||||
|
||||
.. _bug report: https://bugzilla.redhat.com/show_bug.cgi?id=1043676
|
||||
|
||||
80
docs/sources/installation/frugalware.rst
Normal file
80
docs/sources/installation/frugalware.rst
Normal file
@@ -0,0 +1,80 @@
|
||||
:title: Installation on FrugalWare
|
||||
:description: Docker installation on FrugalWare.
|
||||
:keywords: frugalware linux, virtualization, docker, documentation, installation
|
||||
|
||||
.. _frugalware:
|
||||
|
||||
FrugalWare
|
||||
==========
|
||||
|
||||
.. include:: install_header.inc
|
||||
|
||||
.. include:: install_unofficial.inc
|
||||
|
||||
Installing on FrugalWare is handled via the official packages:
|
||||
|
||||
* `lxc-docker i686 <http://www.frugalware.org/packages/200141>`_
|
||||
|
||||
* `lxc-docker x86_64 <http://www.frugalware.org/packages/200130>`_
|
||||
|
||||
The `lxc-docker` package will install the latest tagged version of Docker.
|
||||
|
||||
Dependencies
|
||||
------------
|
||||
|
||||
Docker depends on several packages which are specified as dependencies in
|
||||
the packages. The core dependencies are:
|
||||
|
||||
* systemd
|
||||
* lvm2
|
||||
* sqlite3
|
||||
* libguestfs
|
||||
* lxc
|
||||
* iproute2
|
||||
* bridge-utils
|
||||
|
||||
|
||||
Installation
|
||||
------------
|
||||
|
||||
A simple
|
||||
::
|
||||
|
||||
pacman -S lxc-docker
|
||||
|
||||
is all that is needed.
|
||||
|
||||
|
||||
Starting Docker
|
||||
---------------
|
||||
|
||||
There is a systemd service unit created for Docker. To start Docker as service:
|
||||
|
||||
::
|
||||
|
||||
sudo systemctl start lxc-docker
|
||||
|
||||
|
||||
To start on system boot:
|
||||
|
||||
::
|
||||
|
||||
sudo systemctl enable lxc-docker
|
||||
|
||||
Network Configuration
|
||||
---------------------
|
||||
|
||||
IPv4 packet forwarding is disabled by default on FrugalWare, so Internet access from inside
|
||||
the container may not work.
|
||||
|
||||
To enable packet forwarding, run the following command as the ``root`` user on the host system:
|
||||
|
||||
::
|
||||
|
||||
sysctl net.ipv4.ip_forward=1
|
||||
|
||||
And, to make it persistent across reboots, add the following to a file named **/etc/sysctl.d/docker.conf**:
|
||||
|
||||
::
|
||||
|
||||
net.ipv4.ip_forward=1
|
||||
@@ -4,8 +4,8 @@
|
||||
|
||||
.. _gentoo_linux:
|
||||
|
||||
Gentoo Linux
|
||||
============
|
||||
Gentoo
|
||||
======
|
||||
|
||||
.. include:: install_header.inc
|
||||
|
||||
@@ -22,17 +22,19 @@ provided at https://github.com/tianon/docker-overlay which can be added using
|
||||
properly installing and using the overlay can be found in `the overlay README
|
||||
<https://github.com/tianon/docker-overlay/blob/master/README.md#using-this-overlay>`_.
|
||||
|
||||
Note that sometimes there is a disparity between the latest version and what's
|
||||
in the overlay, and between the latest version in the overlay and what's in the
|
||||
portage tree. Please be patient, and the latest version should propagate
|
||||
shortly.
|
||||
|
||||
Installation
|
||||
^^^^^^^^^^^^
|
||||
|
||||
The package should properly pull in all the necessary dependencies and prompt
|
||||
for all necessary kernel options. For the most straightforward installation
|
||||
experience, use ``sys-kernel/aufs-sources`` as your kernel sources. If you
|
||||
prefer not to use ``sys-kernel/aufs-sources``, the portage tree also contains
|
||||
``sys-fs/aufs3``, which includes the patches necessary for adding AUFS support
|
||||
to other kernel source packages such as ``sys-kernel/gentoo-sources`` (and a
|
||||
``kernel-patch`` USE flag to perform the patching to ``/usr/src/linux``
|
||||
automatically).
|
||||
for all necessary kernel options. The ebuilds for 0.7+ include use flags to
|
||||
pull in the proper dependencies of the major storage drivers, with the
|
||||
"device-mapper" use flag being enabled by default, since that is the simplest
|
||||
installation path.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
@@ -47,9 +49,9 @@ the #docker IRC channel on the freenode network.
|
||||
Starting Docker
|
||||
^^^^^^^^^^^^^^^
|
||||
|
||||
Ensure that you are running a kernel that includes the necessary AUFS
|
||||
patches/support and includes all the necessary modules and/or configuration for
|
||||
LXC.
|
||||
Ensure that you are running a kernel that includes all the necessary modules
|
||||
and/or configuration for LXC (and optionally for device-mapper and/or AUFS,
|
||||
depending on the storage driver you've decided to use).
|
||||
|
||||
OpenRC
|
||||
------
|
||||
|
||||
74
docs/sources/installation/google.rst
Normal file
74
docs/sources/installation/google.rst
Normal file
@@ -0,0 +1,74 @@
|
||||
:title: Installation on Google Cloud Platform
|
||||
:description: Please note this project is currently under heavy development. It should not be used in production.
|
||||
:keywords: Docker, Docker documentation, installation, google, Google Compute Engine, Google Cloud Platform
|
||||
|
||||
`Google Cloud Platform <https://cloud.google.com/>`_
|
||||
====================================================
|
||||
|
||||
.. include:: install_header.inc
|
||||
|
||||
.. _googlequickstart:
|
||||
|
||||
`Compute Engine <https://developers.google.com/compute>`_ QuickStart for `Debian <https://www.debian.org>`_
|
||||
-----------------------------------------------------------------------------------------------------------
|
||||
|
||||
1. Go to `Google Cloud Console <https://cloud.google.com/console>`_ and create a new Cloud Project with `Compute Engine enabled <https://developers.google.com/compute/docs/signup>`_.
|
||||
|
||||
2. Download and configure the `Google Cloud SDK <https://developers.google.com/cloud/sdk/>`_ to use your project with the following commands:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ curl https://dl.google.com/dl/cloudsdk/release/install_google_cloud_sdk.bash | bash
|
||||
$ gcloud auth login
|
||||
Enter a cloud project id (or leave blank to not set): <google-cloud-project-id>
|
||||
|
||||
3. Start a new instance, select a zone close to you and the desired instance size:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ gcutil addinstance docker-playground --image=backports-debian-7
|
||||
1: europe-west1-a
|
||||
...
|
||||
4: us-central1-b
|
||||
>>> <zone-index>
|
||||
1: machineTypes/n1-standard-1
|
||||
...
|
||||
12: machineTypes/g1-small
|
||||
>>> <machine-type-index>
|
||||
|
||||
4. Connect to the instance using SSH:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ gcutil ssh docker-playground
|
||||
docker-playground:~$
|
||||
|
||||
5. Enable IP forwarding:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
docker-playground:~$ echo net.ipv4.ip_forward=1 | sudo tee /etc/sysctl.d/99-docker.conf
|
||||
docker-playground:~$ sudo sysctl --system
|
||||
|
||||
6. Install the latest Docker release and configure it to start when the instance boots:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
docker-playground:~$ curl get.docker.io | bash
|
||||
docker-playground:~$ sudo update-rc.d docker defaults
|
||||
|
||||
7. If running in zones: ``us-central1-a``, ``europe-west1-1``, and ``europe-west1-b``, the docker daemon must be started with the ``-mtu`` flag. Without the flag, you may experience intermittent network pauses.
|
||||
`See this issue <https://code.google.com/p/google-compute-engine/issues/detail?id=57>`_ for more details.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
docker-playground:~$ echo "DOCKER_OPTS="$DOCKER_OPTS -mtu 1460" | sudo tee -a /etc/defaults/docker
|
||||
docker-playground:~$ sudo service docker restart
|
||||
|
||||
8. Start a new container:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
docker-playground:~$ sudo docker run busybox echo 'docker on GCE \o/'
|
||||
docker on GCE \o/
|
||||
|
||||
@@ -9,7 +9,7 @@ Installation
|
||||
|
||||
There are a number of ways to install Docker, depending on where you
|
||||
want to run the daemon. The :ref:`ubuntu_linux` installation is the
|
||||
officially-tested version, and the community adds more techniques for
|
||||
officially-tested version. The community adds more techniques for
|
||||
installing Docker all the time.
|
||||
|
||||
Contents:
|
||||
@@ -18,13 +18,17 @@ Contents:
|
||||
:maxdepth: 1
|
||||
|
||||
ubuntulinux
|
||||
binaries
|
||||
security
|
||||
upgrading
|
||||
kernel
|
||||
rhel
|
||||
fedora
|
||||
archlinux
|
||||
gentoolinux
|
||||
frugalware
|
||||
vagrant
|
||||
windows
|
||||
amazon
|
||||
rackspace
|
||||
archlinux
|
||||
gentoolinux
|
||||
google
|
||||
kernel
|
||||
binaries
|
||||
security
|
||||
upgrading
|
||||
|
||||
@@ -11,10 +11,10 @@ In short, Docker has the following kernel requirements:
|
||||
|
||||
- Linux version 3.8 or above.
|
||||
|
||||
- `AUFS support <http://aufs.sourceforge.net/>`_.
|
||||
|
||||
- Cgroups and namespaces must be enabled.
|
||||
|
||||
*Note: as of 0.7 docker no longer requires aufs. AUFS support is still available as an optional driver.*
|
||||
|
||||
The officially supported kernel is the one recommended by the
|
||||
:ref:`ubuntu_linux` installation path. It is the one that most developers
|
||||
will use, and the one that receives the most attention from the core
|
||||
@@ -58,17 +58,6 @@ detects something older than 3.8.
|
||||
See issue `#407 <https://github.com/dotcloud/docker/issues/407>`_ for details.
|
||||
|
||||
|
||||
AUFS support
|
||||
------------
|
||||
|
||||
Docker currently relies on AUFS, an unioning filesystem.
|
||||
While AUFS is included in the kernels built by the Debian and Ubuntu
|
||||
distributions, is not part of the standard kernel. This means that if
|
||||
you decide to roll your own kernel, you will have to patch your
|
||||
kernel tree to add AUFS. The process is documented on
|
||||
`AUFS webpage <http://aufs.sourceforge.net/>`_.
|
||||
|
||||
|
||||
Cgroups and namespaces
|
||||
----------------------
|
||||
|
||||
@@ -122,3 +111,42 @@ And replace it by the following one::
|
||||
GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1"
|
||||
|
||||
Then run ``update-grub``, and reboot.
|
||||
|
||||
Details
|
||||
-------
|
||||
|
||||
To automatically check some of the requirements below, you can run `lxc-checkconfig`.
|
||||
|
||||
Networking:
|
||||
|
||||
- CONFIG_BRIDGE
|
||||
- CONFIG_NETFILTER_XT_MATCH_ADDRTYPE
|
||||
- CONFIG_NF_NAT
|
||||
- CONFIG_NF_NAT_IPV4
|
||||
- CONFIG_NF_NAT_NEEDED
|
||||
|
||||
LVM:
|
||||
|
||||
- CONFIG_BLK_DEV_DM
|
||||
- CONFIG_DM_THIN_PROVISIONING
|
||||
- CONFIG_EXT4_FS
|
||||
|
||||
Namespaces:
|
||||
|
||||
- CONFIG_NAMESPACES
|
||||
- CONFIG_UTS_NS
|
||||
- CONFIG_IPC_NS
|
||||
- CONFIG_UID_NS
|
||||
- CONFIG_PID_NS
|
||||
- CONFIG_NET_NS
|
||||
|
||||
Cgroups:
|
||||
|
||||
- CONFIG_CGROUPS
|
||||
|
||||
Cgroup controllers (optional but highly recommended):
|
||||
|
||||
- CONFIG_CGROUP_CPUACCT
|
||||
- CONFIG_BLK_CGROUP
|
||||
- CONFIG_MEMCG
|
||||
- CONFIG_MEMCG_SWAP
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
:description: Installing Docker on Ubuntu proviced by Rackspace
|
||||
:keywords: Rackspace Cloud, installation, docker, linux, ubuntu
|
||||
|
||||
===============
|
||||
Rackspace Cloud
|
||||
===============
|
||||
|
||||
@@ -14,14 +13,14 @@ straightforward, and you should mostly be able to follow the
|
||||
|
||||
**However, there is one caveat:**
|
||||
|
||||
If you are using any linux not already shipping with the 3.8 kernel
|
||||
If you are using any Linux not already shipping with the 3.8 kernel
|
||||
you will need to install it. And this is a little more difficult on
|
||||
Rackspace.
|
||||
|
||||
Rackspace boots their servers using grub's ``menu.lst`` and does not
|
||||
like non 'virtual' packages (e.g. xen compatible) kernels there,
|
||||
although they do work. This makes ``update-grub`` to not have the
|
||||
expected result, and you need to set the kernel manually.
|
||||
like non 'virtual' packages (e.g. Xen compatible) kernels there,
|
||||
although they do work. This results in ``update-grub`` not having the
|
||||
expected result, and you will need to set the kernel manually.
|
||||
|
||||
**Do not attempt this on a production machine!**
|
||||
|
||||
@@ -34,7 +33,7 @@ expected result, and you need to set the kernel manually.
|
||||
apt-get install linux-generic-lts-raring
|
||||
|
||||
|
||||
Great, now you have kernel installed in ``/boot/``, next is to make it
|
||||
Great, now you have the kernel installed in ``/boot/``, next you need to make it
|
||||
boot next time.
|
||||
|
||||
.. code-block:: bash
|
||||
@@ -48,9 +47,9 @@ boot next time.
|
||||
Now you need to manually edit ``/boot/grub/menu.lst``, you will find a
|
||||
section at the bottom with the existing options. Copy the top one and
|
||||
substitute the new kernel into that. Make sure the new kernel is on
|
||||
top, and double check kernel and initrd point to the right files.
|
||||
top, and double check the kernel and initrd lines point to the right files.
|
||||
|
||||
Make special care to double check the kernel and initrd entries.
|
||||
Take special care to double check the kernel and initrd entries.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
@@ -79,7 +78,7 @@ It will probably look something like this:
|
||||
initrd /boot/initrd.img-3.2.0-38-virtual
|
||||
|
||||
|
||||
Reboot server (either via command line or console)
|
||||
Reboot the server (either via command line or console)
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
|
||||
81
docs/sources/installation/rhel.rst
Normal file
81
docs/sources/installation/rhel.rst
Normal file
@@ -0,0 +1,81 @@
|
||||
:title: Requirements and Installation on Red Hat Enterprise Linux
|
||||
:description: Please note this project is currently under heavy development. It should not be used in production.
|
||||
:keywords: Docker, Docker documentation, requirements, linux, rhel, centos
|
||||
|
||||
.. _rhel:
|
||||
|
||||
Red Hat Enterprise Linux
|
||||
========================
|
||||
|
||||
.. include:: install_header.inc
|
||||
|
||||
.. include:: install_unofficial.inc
|
||||
|
||||
Docker is available for **RHEL** on EPEL. These instructions should work for
|
||||
both RHEL and CentOS. They will likely work for other binary compatible EL6
|
||||
distributions as well, but they haven't been tested.
|
||||
|
||||
Please note that this package is part of `Extra Packages for Enterprise
|
||||
Linux (EPEL)`_, a community effort to create and maintain additional packages
|
||||
for the RHEL distribution.
|
||||
|
||||
Also note that due to the current Docker limitations, Docker is able to run
|
||||
only on the **64 bit** architecture.
|
||||
|
||||
Installation
|
||||
------------
|
||||
|
||||
Firstly, you need to install the EPEL repository. Please follow the `EPEL installation instructions`_.
|
||||
|
||||
|
||||
The ``docker-io`` package provides Docker on EPEL.
|
||||
|
||||
|
||||
If you already have the (unrelated) ``docker`` package installed, it will
|
||||
conflict with ``docker-io``. There's a `bug report`_ filed for it.
|
||||
To proceed with ``docker-io`` installation, please remove
|
||||
``docker`` first.
|
||||
|
||||
|
||||
Next, let's install the ``docker-io`` package which will install Docker on our host.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo yum -y install docker-io
|
||||
|
||||
To update the ``docker-io`` package
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo yum -y update docker-io
|
||||
|
||||
Now that it's installed, let's start the Docker daemon.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo service docker start
|
||||
|
||||
If we want Docker to start at boot, we should also:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo chkconfig docker on
|
||||
|
||||
Now let's verify that Docker is working.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo docker run -i -t fedora /bin/bash
|
||||
|
||||
**Done!**, now continue with the :ref:`hello_world` example.
|
||||
|
||||
Issues?
|
||||
-------
|
||||
|
||||
If you have any issues - please report them directly in the `Red Hat Bugzilla for docker-io component`_.
|
||||
|
||||
.. _Extra Packages for Enterprise Linux (EPEL): https://fedoraproject.org/wiki/EPEL
|
||||
.. _EPEL installation instructions: https://fedoraproject.org/wiki/EPEL#How_can_I_use_these_extra_packages.3F
|
||||
.. _Red Hat Bugzilla for docker-io component : https://bugzilla.redhat.com/enter_bug.cgi?product=Fedora%20EPEL&component=docker-io
|
||||
.. _bug report: https://bugzilla.redhat.com/show_bug.cgi?id=1043676
|
||||
|
||||
@@ -4,8 +4,8 @@
|
||||
|
||||
.. _ubuntu_linux:
|
||||
|
||||
Ubuntu Linux
|
||||
============
|
||||
Ubuntu
|
||||
======
|
||||
|
||||
.. warning::
|
||||
|
||||
@@ -14,15 +14,10 @@ Ubuntu Linux
|
||||
|
||||
.. include:: install_header.inc
|
||||
|
||||
Right now, the officially supported distribution are:
|
||||
Docker is supported on the following versions of Ubuntu:
|
||||
|
||||
- :ref:`ubuntu_precise`
|
||||
- :ref:`ubuntu_raring`
|
||||
|
||||
Docker has the following dependencies
|
||||
|
||||
* Linux kernel 3.8 (read more about :ref:`kernel`)
|
||||
* AUFS file system support (we are working on BTRFS support as an alternative)
|
||||
- :ref:`ubuntu_raring_saucy`
|
||||
|
||||
Please read :ref:`ufw`, if you plan to use `UFW (Uncomplicated
|
||||
Firewall) <https://help.ubuntu.com/community/UFW>`_
|
||||
@@ -68,49 +63,68 @@ Installation
|
||||
These instructions have changed for 0.6. If you are upgrading from
|
||||
an earlier version, you will need to follow them again.
|
||||
|
||||
Docker is available as a Debian package, which makes installation easy.
|
||||
Docker is available as a Debian package, which makes installation
|
||||
easy. **See the :ref:`installmirrors` section below if you are not in
|
||||
the United States.** Other sources of the Debian packages may be
|
||||
faster for you to install.
|
||||
|
||||
First add the Docker repository key to your local keychain. You can use the
|
||||
``apt-key`` command to check the fingerprint matches: ``36A1 D786 9245 C895 0F96
|
||||
6E92 D857 6A8B A88D 21E9``
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# Add the Docker repository key to your local keychain
|
||||
# using apt-key finger you can check the fingerprint matches 36A1 D786 9245 C895 0F96 6E92 D857 6A8B A88D 21E9
|
||||
sudo sh -c "wget -qO- https://get.docker.io/gpg | apt-key add -"
|
||||
|
||||
# Add the Docker repository to your apt sources list.
|
||||
sudo sh -c "echo deb http://get.docker.io/ubuntu docker main\
|
||||
> /etc/apt/sources.list.d/docker.list"
|
||||
Add the Docker repository to your apt sources list, update and install the
|
||||
``lxc-docker`` package.
|
||||
|
||||
# Update your sources
|
||||
sudo apt-get update
|
||||
|
||||
# Install, you will see another warning that the package cannot be authenticated. Confirm install.
|
||||
sudo apt-get install lxc-docker
|
||||
|
||||
Verify it worked
|
||||
*You may receive a warning that the package isn't trusted. Answer yes to
|
||||
continue installation.*
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo sh -c "echo deb http://get.docker.io/ubuntu docker main\
|
||||
> /etc/apt/sources.list.d/docker.list"
|
||||
sudo apt-get update
|
||||
sudo apt-get install lxc-docker
|
||||
|
||||
.. note::
|
||||
|
||||
There is also a simple ``curl`` script available to help with this process.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
curl -s https://get.docker.io/ubuntu/ | sudo sh
|
||||
|
||||
Now verify that the installation has worked by downloading the ``ubuntu`` image
|
||||
and launching a container.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# download the base 'ubuntu' container and run bash inside it while setting up an interactive shell
|
||||
sudo docker run -i -t ubuntu /bin/bash
|
||||
|
||||
# type 'exit' to exit
|
||||
|
||||
Type ``exit`` to exit
|
||||
|
||||
**Done!**, now continue with the :ref:`hello_world` example.
|
||||
|
||||
.. _ubuntu_raring:
|
||||
.. _ubuntu_raring_saucy:
|
||||
|
||||
Ubuntu Raring 13.04 (64 bit)
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Ubuntu Raring 13.04 and Saucy 13.10 (64 bit)
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
These instructions cover both Ubuntu Raring 13.04 and Saucy 13.10.
|
||||
|
||||
Dependencies
|
||||
------------
|
||||
|
||||
**AUFS filesystem support**
|
||||
**Optional AUFS filesystem support**
|
||||
|
||||
Ubuntu Raring already comes with the 3.8 kernel, so we don't need to install it. However, not all systems
|
||||
have AUFS filesystem support enabled, so we need to install it.
|
||||
have AUFS filesystem support enabled. AUFS support is optional as of version 0.7, but it's still available as
|
||||
a driver and we recommend using it if you can.
|
||||
|
||||
To make sure AUFS is installed, run the following commands:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
@@ -123,47 +137,47 @@ Installation
|
||||
|
||||
Docker is available as a Debian package, which makes installation easy.
|
||||
|
||||
*Please note that these instructions have changed for 0.6. If you are upgrading from an earlier version, you will need
|
||||
to follow them again.*
|
||||
.. warning::
|
||||
|
||||
Please note that these instructions have changed for 0.6. If you are upgrading from an earlier version, you will need
|
||||
to follow them again.
|
||||
|
||||
First add the Docker repository key to your local keychain. You can use the
|
||||
``apt-key`` command to check the fingerprint matches: ``36A1 D786 9245 C895 0F96
|
||||
6E92 D857 6A8B A88D 21E9``
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# Add the Docker repository key to your local keychain
|
||||
# using apt-key finger you can check the fingerprint matches 36A1 D786 9245 C895 0F96 6E92 D857 6A8B A88D 21E9
|
||||
sudo sh -c "wget -qO- https://get.docker.io/gpg | apt-key add -"
|
||||
|
||||
# Add the Docker repository to your apt sources list.
|
||||
sudo sh -c "echo deb http://get.docker.io/ubuntu docker main\
|
||||
> /etc/apt/sources.list.d/docker.list"
|
||||
|
||||
# update
|
||||
sudo apt-get update
|
||||
|
||||
# install
|
||||
sudo apt-get install lxc-docker
|
||||
|
||||
|
||||
Verify it worked
|
||||
Add the Docker repository to your apt sources list, update and install the
|
||||
``lxc-docker`` package.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo sh -c "echo deb http://get.docker.io/ubuntu docker main\
|
||||
> /etc/apt/sources.list.d/docker.list"
|
||||
sudo apt-get update
|
||||
sudo apt-get install lxc-docker
|
||||
|
||||
Now verify that the installation has worked by downloading the ``ubuntu`` image
|
||||
and launching a container.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# download the base 'ubuntu' container
|
||||
# and run bash inside it while setting up an interactive shell
|
||||
sudo docker run -i -t ubuntu /bin/bash
|
||||
|
||||
# type exit to exit
|
||||
|
||||
Type ``exit`` to exit
|
||||
|
||||
**Done!**, now continue with the :ref:`hello_world` example.
|
||||
|
||||
|
||||
.. _ufw:
|
||||
|
||||
Docker and UFW
|
||||
^^^^^^^^^^^^^^
|
||||
|
||||
Docker uses a bridge to manage container networking. By default, UFW
|
||||
drops all `forwarding`, thus a first step is to enable UFW forwarding:
|
||||
Docker uses a bridge to manage container networking. By default, UFW drops all
|
||||
`forwarding` traffic. As a result will you need to enable UFW forwarding:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
@@ -181,11 +195,33 @@ Then reload UFW:
|
||||
sudo ufw reload
|
||||
|
||||
|
||||
UFW's default set of rules denied all `incoming`, so if you want to be
|
||||
able to reach your containers from another host, you should allow
|
||||
incoming connections on the docker port (default 4243):
|
||||
UFW's default set of rules denies all `incoming` traffic. If you want to be
|
||||
able to reach your containers from another host then you should allow
|
||||
incoming connections on the Docker port (default 4243):
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo ufw allow 4243/tcp
|
||||
|
||||
.. _installmirrors:
|
||||
|
||||
Mirrors
|
||||
^^^^^^^
|
||||
|
||||
You should ``ping get.docker.io`` and compare the latency to the
|
||||
following mirrors, and pick whichever one is best for you.
|
||||
|
||||
Yandex
|
||||
------
|
||||
|
||||
`Yandex <http://yandex.ru/>`_ in Russia is mirroring the Docker Debian
|
||||
packages, updating every 6 hours. Substitute
|
||||
``http://mirror.yandex.ru/mirrors/docker/`` for
|
||||
``http://get.docker.io/ubuntu`` in the instructions above. For example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo sh -c "echo deb http://mirror.yandex.ru/mirrors/docker/ docker main\
|
||||
> /etc/apt/sources.list.d/docker.list"
|
||||
sudo apt-get update
|
||||
sudo apt-get install lxc-docker
|
||||
|
||||
183
docs/sources/use/ambassador_pattern_linking.rst
Normal file
183
docs/sources/use/ambassador_pattern_linking.rst
Normal file
@@ -0,0 +1,183 @@
|
||||
:title: Link via an Ambassador Container
|
||||
:description: Using the Ambassador pattern to abstract (network) services
|
||||
:keywords: Examples, Usage, links, docker, documentation, examples, names, name, container naming
|
||||
|
||||
.. _ambassador_pattern_linking:
|
||||
|
||||
Link via an Ambassador Container
|
||||
================================
|
||||
|
||||
Rather than hardcoding network links between a service consumer and provider, Docker
|
||||
encourages service portability.
|
||||
|
||||
eg, instead of
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
(consumer) --> (redis)
|
||||
|
||||
requiring you to restart the ``consumer`` to attach it to a different ``redis`` service,
|
||||
you can add ambassadors
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
(consumer) --> (redis-ambassador) --> (redis)
|
||||
|
||||
or
|
||||
|
||||
(consumer) --> (redis-ambassador) ---network---> (redis-ambassador) --> (redis)
|
||||
|
||||
When you need to rewire your consumer to talk to a different redis server, you
|
||||
can just restart the ``redis-ambassador`` container that the consumer is connected to.
|
||||
|
||||
This pattern also allows you to transparently move the redis server to a different
|
||||
docker host from the consumer.
|
||||
|
||||
Using the ``svendowideit/ambassador`` container, the link wiring is controlled entirely
|
||||
from the ``docker run`` parameters.
|
||||
|
||||
Two host Example
|
||||
----------------
|
||||
|
||||
Start actual redis server on one Docker host
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
big-server $ docker run -d -name redis crosbymichael/redis
|
||||
|
||||
Then add an ambassador linked to the redis server, mapping a port to the outside world
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
big-server $ docker run -d -link redis:redis -name redis_ambassador -p 6379:6379 svendowideit/ambassador
|
||||
|
||||
On the other host, you can set up another ambassador setting environment variables for each remote port we want to proxy to the ``big-server``
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
client-server $ docker run -d -name redis_ambassador -expose 6379 -e REDIS_PORT_6379_TCP=tcp://192.168.1.52:6379 svendowideit/ambassador
|
||||
|
||||
Then on the ``client-server`` host, you can use a redis client container to talk
|
||||
to the remote redis server, just by linking to the local redis ambassador.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
client-server $ docker run -i -t -rm -link redis_ambassador:redis relateiq/redis-cli
|
||||
redis 172.17.0.160:6379> ping
|
||||
PONG
|
||||
|
||||
|
||||
|
||||
How it works
|
||||
------------
|
||||
|
||||
The following example shows what the ``svendowideit/ambassador`` container does
|
||||
automatically (with a tiny amount of ``sed``)
|
||||
|
||||
On the docker host (192.168.1.52) that redis will run on:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# start actual redis server
|
||||
$ docker run -d -name redis crosbymichael/redis
|
||||
|
||||
# get a redis-cli container for connection testing
|
||||
$ docker pull relateiq/redis-cli
|
||||
|
||||
# test the redis server by talking to it directly
|
||||
$ docker run -t -i -rm -link redis:redis relateiq/redis-cli
|
||||
redis 172.17.0.136:6379> ping
|
||||
PONG
|
||||
^D
|
||||
|
||||
# add redis ambassador
|
||||
$ docker run -t -i -link redis:redis -name redis_ambassador -p 6379:6379 busybox sh
|
||||
|
||||
in the redis_ambassador container, you can see the linked redis containers's env
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ env
|
||||
REDIS_PORT=tcp://172.17.0.136:6379
|
||||
REDIS_PORT_6379_TCP_ADDR=172.17.0.136
|
||||
REDIS_NAME=/redis_ambassador/redis
|
||||
HOSTNAME=19d7adf4705e
|
||||
REDIS_PORT_6379_TCP_PORT=6379
|
||||
HOME=/
|
||||
REDIS_PORT_6379_TCP_PROTO=tcp
|
||||
container=lxc
|
||||
REDIS_PORT_6379_TCP=tcp://172.17.0.136:6379
|
||||
TERM=xterm
|
||||
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
|
||||
PWD=/
|
||||
|
||||
|
||||
This environment is used by the ambassador socat script to expose redis to the world
|
||||
(via the -p 6379:6379 port mapping)
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ docker rm redis_ambassador
|
||||
$ sudo ./contrib/mkimage-unittest.sh
|
||||
$ docker run -t -i -link redis:redis -name redis_ambassador -p 6379:6379 docker-ut sh
|
||||
|
||||
$ socat TCP4-LISTEN:6379,fork,reuseaddr TCP4:172.17.0.136:6379
|
||||
|
||||
then ping the redis server via the ambassador
|
||||
|
||||
.. code-block::bash
|
||||
|
||||
$ docker run -i -t -rm -link redis_ambassador:redis relateiq/redis-cli
|
||||
redis 172.17.0.160:6379> ping
|
||||
PONG
|
||||
|
||||
Now goto a different server
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo ./contrib/mkimage-unittest.sh
|
||||
$ docker run -t -i -expose 6379 -name redis_ambassador docker-ut sh
|
||||
|
||||
$ socat TCP4-LISTEN:6379,fork,reuseaddr TCP4:192.168.1.52:6379
|
||||
|
||||
and get the redis-cli image so we can talk over the ambassador bridge
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ docker pull relateiq/redis-cli
|
||||
$ docker run -i -t -rm -link redis_ambassador:redis relateiq/redis-cli
|
||||
redis 172.17.0.160:6379> ping
|
||||
PONG
|
||||
|
||||
The svendowideit/ambassador Dockerfile
|
||||
--------------------------------------
|
||||
|
||||
The ``svendowideit/ambassador`` image is a small busybox image with ``socat`` built in.
|
||||
When you start the container, it uses a small ``sed`` script to parse out the (possibly multiple)
|
||||
link environment variables to set up the port forwarding. On the remote host, you need to set the
|
||||
variable using the ``-e`` command line option.
|
||||
|
||||
``-expose 1234 -e REDIS_PORT_1234_TCP=tcp://192.168.1.52:6379`` will forward the
|
||||
local ``1234`` port to the remote IP and port - in this case ``192.168.1.52:6379``.
|
||||
|
||||
|
||||
::
|
||||
|
||||
#
|
||||
#
|
||||
# first you need to build the docker-ut image
|
||||
# using ./contrib/mkimage-unittest.sh
|
||||
# then
|
||||
# docker build -t SvenDowideit/ambassador .
|
||||
# docker tag SvenDowideit/ambassador ambassador
|
||||
# then to run it (on the host that has the real backend on it)
|
||||
# docker run -t -i -link redis:redis -name redis_ambassador -p 6379:6379 ambassador
|
||||
# on the remote host, you can set up another ambassador
|
||||
# docker run -t -i -name redis_ambassador -expose 6379 sh
|
||||
|
||||
FROM docker-ut
|
||||
MAINTAINER SvenDowideit@home.org.au
|
||||
|
||||
|
||||
CMD env | grep _TCP= | sed 's/.*_PORT_\([0-9]*\)_TCP=tcp:\/\/\(.*\):\(.*\)/socat TCP4-LISTEN:\1,fork,reuseaddr TCP4:\2:\3 \&/' | sh && top
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
:title: Base Image Creation
|
||||
:title: Create a Base Image
|
||||
:description: How to create base images
|
||||
:keywords: Examples, Usage, base image, docker, documentation, examples
|
||||
|
||||
.. _base_image_creation:
|
||||
|
||||
Base Image Creation
|
||||
Create a Base Image
|
||||
===================
|
||||
|
||||
So you want to create your own :ref:`base_image_def`? Great!
|
||||
@@ -37,7 +37,7 @@ There are more example scripts for creating base images in the
|
||||
Docker Github Repo:
|
||||
|
||||
* `BusyBox <https://github.com/dotcloud/docker/blob/master/contrib/mkimage-busybox.sh>`_
|
||||
* `CentOS
|
||||
<https://github.com/dotcloud/docker/blob/master/contrib/mkimage-centos.sh>`_
|
||||
* `Debian/Ubuntu
|
||||
* `CentOS / Scientific Linux CERN (SLC)
|
||||
<https://github.com/dotcloud/docker/blob/master/contrib/mkimage-rinse.sh>`_
|
||||
* `Debian / Ubuntu
|
||||
<https://github.com/dotcloud/docker/blob/master/contrib/mkimage-debootstrap.sh>`_
|
||||
|
||||
@@ -1,15 +1,15 @@
|
||||
:title: Basic Commands
|
||||
:title: Learn Basic Commands
|
||||
:description: Common usage and commands
|
||||
:keywords: Examples, Usage, basic commands, docker, documentation, examples
|
||||
|
||||
|
||||
The Basics
|
||||
==========
|
||||
Learn Basic Commands
|
||||
====================
|
||||
|
||||
Starting Docker
|
||||
---------------
|
||||
|
||||
If you have used one of the quick install paths', Docker may have been
|
||||
If you have used one of the quick install paths, Docker may have been
|
||||
installed with upstart, Ubuntu's system for starting processes at boot
|
||||
time. You should be able to run ``sudo docker help`` and get output.
|
||||
|
||||
@@ -30,8 +30,8 @@ Download a pre-built image
|
||||
# Download an ubuntu image
|
||||
sudo docker pull ubuntu
|
||||
|
||||
This will find the ``ubuntu`` image by name in the :ref:`Central Index
|
||||
<searching_central_index>` and download it from the top-level Central
|
||||
This will find the ``ubuntu`` image by name in the :ref:`Central Index
|
||||
<searching_central_index>` and download it from the top-level Central
|
||||
Repository to a local image cache.
|
||||
|
||||
.. NOTE:: When the image has successfully downloaded, you will see a
|
||||
@@ -53,21 +53,23 @@ Running an interactive shell
|
||||
|
||||
.. _dockergroup:
|
||||
|
||||
sudo and the docker Group
|
||||
-------------------------
|
||||
The sudo command and the docker Group
|
||||
-------------------------------------
|
||||
|
||||
The ``docker`` daemon always runs as root, and since ``docker``
|
||||
version 0.5.2, ``docker`` binds to a Unix socket instead of a TCP
|
||||
port. By default that Unix socket is owned by the user *root*, and so,
|
||||
by default, you can access it with ``sudo``.
|
||||
The ``docker`` daemon always runs as the root user, and since Docker version
|
||||
0.5.2, the ``docker`` daemon binds to a Unix socket instead of a TCP port. By
|
||||
default that Unix socket is owned by the user *root*, and so, by default, you
|
||||
can access it with ``sudo``.
|
||||
|
||||
Starting in version 0.5.3, if you (or your Docker installer) create a
|
||||
Unix group called *docker* and add users to it, then the ``docker``
|
||||
daemon will make the ownership of the Unix socket read/writable by the
|
||||
*docker* group when the daemon starts. The ``docker`` daemon must
|
||||
always run as root, but if you run the ``docker`` client as a user in
|
||||
always run as the root user, but if you run the ``docker`` client as a user in
|
||||
the *docker* group then you don't need to add ``sudo`` to all the
|
||||
client commands.
|
||||
client commands.
|
||||
|
||||
.. warning:: The *docker* group is root-equivalent.
|
||||
|
||||
**Example:**
|
||||
|
||||
@@ -76,11 +78,11 @@ client commands.
|
||||
# Add the docker group if it doesn't already exist.
|
||||
sudo groupadd docker
|
||||
|
||||
# Add the user "ubuntu" to the docker group.
|
||||
# Add the connected user "${USERNAME}" to the docker group.
|
||||
# Change the user name to match your preferred user.
|
||||
# You may have to logout and log back in again for
|
||||
# this to take effect.
|
||||
sudo gpasswd -a ubuntu docker
|
||||
sudo gpasswd -a ${USERNAME} docker
|
||||
|
||||
# Restart the docker daemon.
|
||||
sudo service docker restart
|
||||
@@ -97,10 +99,10 @@ Bind Docker to another host/port or a Unix socket
|
||||
<https://github.com/dotcloud/docker/issues/1369>`_). Make sure you
|
||||
control access to ``docker``.
|
||||
|
||||
With -H it is possible to make the Docker daemon to listen on a
|
||||
specific ip and port. By default, it will listen on
|
||||
With ``-H`` it is possible to make the Docker daemon to listen on a
|
||||
specific IP and port. By default, it will listen on
|
||||
``unix:///var/run/docker.sock`` to allow only local connections by the
|
||||
*root* user. You *could* set it to 0.0.0.0:4243 or a specific host ip to
|
||||
*root* user. You *could* set it to ``0.0.0.0:4243`` or a specific host IP to
|
||||
give access to everybody, but that is **not recommended** because then
|
||||
it is trivial for someone to gain root access to the host where the
|
||||
daemon is running.
|
||||
@@ -115,6 +117,11 @@ For example:
|
||||
* ``tcp://host:4243`` -> tcp connection on host:4243
|
||||
* ``unix://path/to/socket`` -> unix socket located at ``path/to/socket``
|
||||
|
||||
``-H``, when empty, will default to the same value as when no ``-H`` was passed in.
|
||||
|
||||
``-H`` also accepts short form for TCP bindings:
|
||||
``host[:port]`` or ``:port``
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# Run docker in daemon mode
|
||||
@@ -179,10 +186,10 @@ Committing (saving) a container state
|
||||
|
||||
Save your containers state to a container image, so the state can be re-used.
|
||||
|
||||
When you commit your container only the differences between the image
|
||||
the container was created from and the current state of the container
|
||||
will be stored (as a diff). See which images you already have using
|
||||
``sudo docker images``
|
||||
When you commit your container only the differences between the image the
|
||||
container was created from and the current state of the container will be
|
||||
stored (as a diff). See which images you already have using the ``docker
|
||||
images`` command.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
@@ -194,7 +201,5 @@ will be stored (as a diff). See which images you already have using
|
||||
|
||||
You now have a image state from which you can create new instances.
|
||||
|
||||
|
||||
|
||||
Read more about :ref:`working_with_the_repository` or continue to the
|
||||
complete :ref:`cli`
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
:title: Dockerfiles for Images
|
||||
:title: Build Images (Dockerfile Reference)
|
||||
:description: Dockerfiles use a simple DSL which allows you to automate the steps you would normally manually take to create an image.
|
||||
:keywords: builder, docker, Dockerfile, automation, image creation
|
||||
|
||||
.. _dockerbuilder:
|
||||
|
||||
======================
|
||||
Dockerfiles for Images
|
||||
======================
|
||||
===================================
|
||||
Build Images (Dockerfile Reference)
|
||||
===================================
|
||||
|
||||
**Docker can act as a builder** and read instructions from a text
|
||||
``Dockerfile`` to automate the steps you would otherwise take manually
|
||||
@@ -251,6 +251,11 @@ All new files and directories are created with mode 0755, uid and gid
|
||||
if you build using STDIN (``docker build - < somefile``), there is no build
|
||||
context, so the Dockerfile can only contain an URL based ADD statement.
|
||||
|
||||
.. note::
|
||||
if your URL files are protected using authentication, you will need to use
|
||||
an ``RUN wget`` , ``RUN curl`` or other tool from within the container as
|
||||
ADD does not support authentication.
|
||||
|
||||
The copy obeys the following rules:
|
||||
|
||||
* The ``<src>`` path must be inside the *context* of the build; you cannot
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
:title: Host Integration
|
||||
:title: Automatically Start Containers
|
||||
:description: How to generate scripts for upstart, systemd, etc.
|
||||
:keywords: systemd, upstart, supervisor, docker, documentation, host integration
|
||||
|
||||
|
||||
|
||||
Host Integration
|
||||
================
|
||||
Automatically Start Containers
|
||||
==============================
|
||||
|
||||
You can use your Docker containers with process managers like ``upstart``,
|
||||
``systemd`` and ``supervisor``.
|
||||
|
||||
@@ -17,7 +17,9 @@ Contents:
|
||||
workingwithrepository
|
||||
baseimages
|
||||
port_redirection
|
||||
puppet
|
||||
networking
|
||||
host_integration
|
||||
working_with_volumes
|
||||
working_with_links_names
|
||||
ambassador_pattern_linking
|
||||
puppet
|
||||
|
||||
153
docs/sources/use/networking.rst
Normal file
153
docs/sources/use/networking.rst
Normal file
@@ -0,0 +1,153 @@
|
||||
:title: Configure Networking
|
||||
:description: Docker networking
|
||||
:keywords: network, networking, bridge, docker, documentation
|
||||
|
||||
|
||||
Configure Networking
|
||||
====================
|
||||
|
||||
Docker uses Linux bridge capabilities to provide network connectivity
|
||||
to containers. The ``docker0`` bridge interface is managed by Docker
|
||||
itself for this purpose. Thus, when the Docker daemon starts it :
|
||||
|
||||
- creates the ``docker0`` bridge if not present
|
||||
- searches for an IP address range which doesn't overlap with an existing route
|
||||
- picks an IP in the selected range
|
||||
- assigns this IP to the ``docker0`` bridge
|
||||
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# List host bridges
|
||||
$ sudo brctl show
|
||||
bridge name bridge id STP enabled interfaces
|
||||
docker0 8000.000000000000 no
|
||||
|
||||
# Show docker0 IP address
|
||||
$ sudo ifconfig docker0
|
||||
docker0 Link encap:Ethernet HWaddr xx:xx:xx:xx:xx:xx
|
||||
inet addr:172.17.42.1 Bcast:0.0.0.0 Mask:255.255.0.0
|
||||
|
||||
|
||||
|
||||
At runtime, a :ref:`specific kind of virtual
|
||||
interface<vethxxxx-device>` is given to each containers which is then
|
||||
bonded to the ``docker0`` bridge. Each containers also receives a
|
||||
dedicated IP address from the same range as ``docker0``. The
|
||||
``docker0`` IP address is then used as the default gateway for the
|
||||
containers.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# Run a container
|
||||
$ sudo docker run -t -i -d base /bin/bash
|
||||
52f811c5d3d69edddefc75aff5a4525fc8ba8bcfa1818132f9dc7d4f7c7e78b4
|
||||
|
||||
$ sudo brctl show
|
||||
bridge name bridge id STP enabled interfaces
|
||||
docker0 8000.fef213db5a66 no vethQCDY1N
|
||||
|
||||
|
||||
Above, ``docker0`` acts as a bridge for the ``vethQCDY1N`` interface
|
||||
which is dedicated to the 52f811c5d3d6 container.
|
||||
|
||||
|
||||
How to use a specific IP address range
|
||||
---------------------------------------
|
||||
|
||||
Docker will try hard to find an IP range which is not used by the
|
||||
host. Even if it works for most cases, it's not bullet-proof and
|
||||
sometimes you need to have more control over the IP addressing scheme.
|
||||
|
||||
For this purpose, Docker allows you to manage the ``docker0`` bridge
|
||||
or your own one using the ``-b=<bridgename>`` parameter.
|
||||
|
||||
In this scenario:
|
||||
|
||||
- ensure Docker is stopped
|
||||
- create your own bridge (``bridge0`` for example)
|
||||
- assign a specific IP to this bridge
|
||||
- start Docker with the ``-b=bridge0`` parameter
|
||||
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# Stop Docker
|
||||
$ sudo service docker stop
|
||||
|
||||
# Clean docker0 bridge and
|
||||
# add your very own bridge0
|
||||
$ sudo ifconfig docker0 down
|
||||
$ sudo brctl addbr bridge0
|
||||
$ sudo ifconfig bridge0 192.168.227.1 netmask 255.255.255.0
|
||||
|
||||
# Edit your Docker startup file
|
||||
$ echo "DOCKER_OPTS=\"-b=bridge0\"" >> /etc/default/docker
|
||||
|
||||
# Start Docker
|
||||
$ sudo service docker start
|
||||
|
||||
# Ensure bridge0 IP is not changed by Docker
|
||||
$ sudo ifconfig bridge0
|
||||
bridge0 Link encap:Ethernet HWaddr xx:xx:xx:xx:xx:xx
|
||||
inet addr:192.168.227.1 Bcast:192.168.227.255 Mask:255.255.255.0
|
||||
|
||||
# Run a container
|
||||
$ docker run -i -t base /bin/bash
|
||||
|
||||
# Container IP in the 192.168.227/24 range
|
||||
root@261c272cd7d5:/# ifconfig eth0
|
||||
eth0 Link encap:Ethernet HWaddr xx:xx:xx:xx:xx:xx
|
||||
inet addr:192.168.227.5 Bcast:192.168.227.255 Mask:255.255.255.0
|
||||
|
||||
# bridge0 IP as the default gateway
|
||||
root@261c272cd7d5:/# route -n
|
||||
Kernel IP routing table
|
||||
Destination Gateway Genmask Flags Metric Ref Use Iface
|
||||
0.0.0.0 192.168.227.1 0.0.0.0 UG 0 0 0 eth0
|
||||
192.168.227.0 0.0.0.0 255.255.255.0 U 0 0 0 eth0
|
||||
|
||||
# hits CTRL+P then CTRL+Q to detach
|
||||
|
||||
# Display bridge info
|
||||
$ sudo brctl show
|
||||
bridge name bridge id STP enabled interfaces
|
||||
bridge0 8000.fe7c2e0faebd no vethAQI2QT
|
||||
|
||||
|
||||
Container intercommunication
|
||||
-------------------------------
|
||||
|
||||
Containers can communicate with each other according to the ``icc``
|
||||
parameter value of the Docker daemon.
|
||||
|
||||
- The default, ``-icc=true`` allows containers to communicate with each other.
|
||||
- ``-icc=false`` means containers are isolated from each other.
|
||||
|
||||
Under the hood, ``iptables`` is used by Docker to either accept or
|
||||
drop communication between containers.
|
||||
|
||||
|
||||
.. _vethxxxx-device:
|
||||
|
||||
What's about the vethXXXX device?
|
||||
-----------------------------------
|
||||
Well. Things get complicated here.
|
||||
|
||||
The ``vethXXXX`` interface is the host side of a point-to-point link
|
||||
between the host and the corresponding container, the other side of
|
||||
the link being materialized by the container's ``eth0``
|
||||
interface. This pair (host ``vethXXX`` and container ``eth0``) are
|
||||
connected like a tube. Everything that comes in one side will come out
|
||||
the other side.
|
||||
|
||||
All the plumbing is delegated to Linux network capabilities (check the
|
||||
ip link command) and the namespaces infrastructure.
|
||||
|
||||
|
||||
I want more
|
||||
------------
|
||||
|
||||
Jérôme Petazzoni has create ``pipework`` to connect together
|
||||
containers in arbitrarily complex scenarios :
|
||||
https://github.com/jpetazzo/pipework
|
||||
@@ -1,12 +1,12 @@
|
||||
:title: Port redirection
|
||||
:title: Redirect Ports
|
||||
:description: usage about port redirection
|
||||
:keywords: Usage, basic port, docker, documentation, examples
|
||||
|
||||
|
||||
.. _port_redirection:
|
||||
|
||||
Port redirection
|
||||
================
|
||||
Redirect Ports
|
||||
==============
|
||||
|
||||
Interacting with a service is commonly done through a connection to a
|
||||
port. When this service runs inside a container, one can connect to
|
||||
@@ -31,7 +31,7 @@ container, Docker provide ways to bind the container port to an
|
||||
interface of the host system. To simplify communication between
|
||||
containers, Docker provides the linking mechanism.
|
||||
|
||||
Binding a port to an host interface
|
||||
Binding a port to a host interface
|
||||
-----------------------------------
|
||||
|
||||
To bind a port of the container to a specific interface of the host
|
||||
|
||||
@@ -1,15 +1,16 @@
|
||||
:title: Working with Links and Names
|
||||
:description: How to create and use links and names
|
||||
:keywords: Examples, Usage, links, docker, documentation, examples, names, name, container naming
|
||||
:title: Link Containers
|
||||
:description: How to create and use both links and names
|
||||
:keywords: Examples, Usage, links, linking, docker, documentation, examples, names, name, container naming
|
||||
|
||||
.. _working_with_links_names:
|
||||
|
||||
Working with Links and Names
|
||||
============================
|
||||
Link Containers
|
||||
===============
|
||||
|
||||
From version 0.6.5 you are now able to ``name`` a container and ``link`` it to another
|
||||
container by referring to its name. This will create a parent -> child relationship
|
||||
where the parent container can see selected information about its child.
|
||||
From version 0.6.5 you are now able to ``name`` a container and
|
||||
``link`` it to another container by referring to its name. This will
|
||||
create a parent -> child relationship where the parent container can
|
||||
see selected information about its child.
|
||||
|
||||
.. _run_name:
|
||||
|
||||
@@ -18,8 +19,9 @@ Container Naming
|
||||
|
||||
.. versionadded:: v0.6.5
|
||||
|
||||
You can now name your container by using the ``-name`` flag. If no name is provided, Docker
|
||||
will automatically generate a name. You can see this name using the ``docker ps`` command.
|
||||
You can now name your container by using the ``-name`` flag. If no
|
||||
name is provided, Docker will automatically generate a name. You can
|
||||
see this name using the ``docker ps`` command.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
@@ -38,47 +40,53 @@ Links: service discovery for docker
|
||||
|
||||
.. versionadded:: v0.6.5
|
||||
|
||||
Links allow containers to discover and securely communicate with each other by using the
|
||||
flag ``-link name:alias``. Inter-container communication can be disabled with the daemon
|
||||
flag ``-icc=false``. With this flag set to false, Container A cannot access Container B
|
||||
unless explicitly allowed via a link. This is a huge win for securing your containers.
|
||||
When two containers are linked together Docker creates a parent child relationship
|
||||
between the containers. The parent container will be able to access information via
|
||||
environment variables of the child such as name, exposed ports, IP and other selected
|
||||
environment variables.
|
||||
Links allow containers to discover and securely communicate with each
|
||||
other by using the flag ``-link name:alias``. Inter-container
|
||||
communication can be disabled with the daemon flag
|
||||
``-icc=false``. With this flag set to ``false``, Container A cannot
|
||||
access Container B unless explicitly allowed via a link. This is a
|
||||
huge win for securing your containers. When two containers are linked
|
||||
together Docker creates a parent child relationship between the
|
||||
containers. The parent container will be able to access information
|
||||
via environment variables of the child such as name, exposed ports, IP
|
||||
and other selected environment variables.
|
||||
|
||||
When linking two containers Docker will use the exposed ports of the container to create
|
||||
a secure tunnel for the parent to access. If a database container only exposes port 8080
|
||||
then the linked container will only be allowed to access port 8080 and nothing else if
|
||||
When linking two containers Docker will use the exposed ports of the
|
||||
container to create a secure tunnel for the parent to access. If a
|
||||
database container only exposes port 8080 then the linked container
|
||||
will only be allowed to access port 8080 and nothing else if
|
||||
inter-container communication is set to false.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# Example: there is an image called redis-2.6 that exposes the port 6379 and starts redis-server.
|
||||
# Let's name the container as "redis" based on that image and run it as daemon.
|
||||
$ sudo docker run -d -name redis redis-2.6
|
||||
|
||||
We can issue all the commands that you would expect using the name "redis"; start, stop,
|
||||
attach, using the name for our container. The name also allows us to link other containers
|
||||
into this one.
|
||||
|
||||
Next, we can start a new web application that has a dependency on Redis and apply a link
|
||||
to connect both containers. If you noticed when running our Redis server we did not use
|
||||
the -p flag to publish the Redis port to the host system. Redis exposed port 6379 and
|
||||
this is all we need to establish a link.
|
||||
For example, there is an image called ``crosbymichael/redis`` that exposes the
|
||||
port 6379 and starts the Redis server. Let's name the container as ``redis``
|
||||
based on that image and run it as daemon.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo docker run -d -name redis crosbymichael/redis
|
||||
|
||||
We can issue all the commands that you would expect using the name
|
||||
``redis``; start, stop, attach, using the name for our container. The
|
||||
name also allows us to link other containers into this one.
|
||||
|
||||
Next, we can start a new web application that has a dependency on
|
||||
Redis and apply a link to connect both containers. If you noticed when
|
||||
running our Redis server we did not use the ``-p`` flag to publish the
|
||||
Redis port to the host system. Redis exposed port 6379 and this is all
|
||||
we need to establish a link.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# Linking the redis container as a child
|
||||
$ sudo docker run -t -i -link redis:db -name webapp ubuntu bash
|
||||
|
||||
When you specified -link redis:db you are telling docker to link the container named redis
|
||||
into this new container with the alias db. Environment variables are prefixed with the alias
|
||||
so that the parent container can access network and environment information from the containers
|
||||
that are linked into it.
|
||||
When you specified ``-link redis:db`` you are telling Docker to link
|
||||
the container named ``redis`` into this new container with the alias
|
||||
``db``. Environment variables are prefixed with the alias so that the
|
||||
parent container can access network and environment information from
|
||||
the containers that are linked into it.
|
||||
|
||||
If we inspect the environment variables of the second container, we would see all the information
|
||||
about the child container.
|
||||
If we inspect the environment variables of the second container, we
|
||||
would see all the information about the child container.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
@@ -100,5 +108,17 @@ about the child container.
|
||||
_=/usr/bin/env
|
||||
root@4c01db0b339c:/#
|
||||
|
||||
Accessing the network information along with the environment of the child container allows
|
||||
us to easily connect to the Redis service on the specific IP and port in the environment.
|
||||
Accessing the network information along with the environment of the
|
||||
child container allows us to easily connect to the Redis service on
|
||||
the specific IP and port in the environment.
|
||||
|
||||
Running ``docker ps`` shows the 2 containers, and the ``webapp/db``
|
||||
alias name for the redis container.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ docker ps
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
4c01db0b339c ubuntu:12.04 bash 17 seconds ago Up 16 seconds webapp
|
||||
d7886598dbe2 crosbymichael/redis:latest /redis-server --dir 33 minutes ago Up 33 minutes 6379/tcp redis,webapp/db
|
||||
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
:title: Working with Volumes
|
||||
:title: Share Directories via Volumes
|
||||
:description: How to create and share volumes
|
||||
:keywords: Examples, Usage, volume, docker, documentation, examples
|
||||
|
||||
.. _volume_def:
|
||||
|
||||
Data Volume
|
||||
===========
|
||||
Share Directories via Volumes
|
||||
=============================
|
||||
|
||||
.. versionadded:: v0.3.0
|
||||
Data volumes have been available since version 1 of the
|
||||
@@ -13,7 +13,7 @@ Data Volume
|
||||
|
||||
A *data volume* is a specially-designated directory within one or more
|
||||
containers that bypasses the :ref:`ufs_def` to provide several useful
|
||||
features for persistant or shared data:
|
||||
features for persistent or shared data:
|
||||
|
||||
* **Data volumes can be shared and reused between containers.** This
|
||||
is the feature that makes data volumes so powerful. You can use it
|
||||
@@ -30,35 +30,58 @@ Each container can have zero or more data volumes.
|
||||
Getting Started
|
||||
...............
|
||||
|
||||
Using data volumes is as simple as adding a new flag: ``-v``. The
|
||||
parameter ``-v`` can be used more than once in order to create more
|
||||
volumes within the new container. The example below shows the
|
||||
instruction to create a container with two new volumes::
|
||||
Using data volumes is as simple as adding a ``-v`` parameter to the ``docker run``
|
||||
command. The ``-v`` parameter can be used more than once in order to
|
||||
create more volumes within the new container. To create a new container with
|
||||
two new volumes::
|
||||
|
||||
docker run -v /var/volume1 -v /var/volume2 shykes/couchdb
|
||||
$ docker run -v /var/volume1 -v /var/volume2 busybox true
|
||||
|
||||
For a Dockerfile, the VOLUME instruction will add one or more new
|
||||
volumes to any container created from the image::
|
||||
This command will create the new container with two new volumes that
|
||||
exits instantly (``true`` is pretty much the smallest, simplest program
|
||||
that you can run). Once created you can mount its volumes in any other
|
||||
container using the ``-volumes-from`` option; irrespecive of whether the
|
||||
container is running or not.
|
||||
|
||||
VOLUME ["/var/volume1", "/var/volume2"]
|
||||
Or, you can use the VOLUME instruction in a Dockerfile to add one or more new
|
||||
volumes to any container created from that image::
|
||||
|
||||
# BUILD-USING: docker build -t data .
|
||||
# RUN-USING: docker run -name DATA data
|
||||
FROM busybox
|
||||
VOLUME ["/var/volume1", "/var/volume2"]
|
||||
CMD ["/usr/bin/true"]
|
||||
|
||||
Mount Volumes from an Existing Container:
|
||||
-----------------------------------------
|
||||
Creating and mounting a Data Volume Container
|
||||
---------------------------------------------
|
||||
|
||||
The command below creates a new container which is runnning as daemon
|
||||
``-d`` and with one volume ``/var/lib/couchdb``::
|
||||
If you have some persistent data that you want to share between containers,
|
||||
or want to use from non-persistent containers, its best to create a named
|
||||
Data Volume Container, and then to mount the data from it.
|
||||
|
||||
COUCH1=$(sudo docker run -d -v /var/lib/couchdb shykes/couchdb:2013-05-03)
|
||||
Create a named container with volumes to share (``/var/volume1`` and ``/var/volume2``)::
|
||||
|
||||
From the container id of that previous container ``$COUCH1`` it's
|
||||
possible to create new container sharing the same volume using the
|
||||
parameter ``-volumes-from container_id``::
|
||||
$ docker run -v /var/volume1 -v /var/volume2 -name DATA busybox true
|
||||
|
||||
COUCH2=$(sudo docker run -d -volumes-from $COUCH1 shykes/couchdb:2013-05-03)
|
||||
Then mount those data volumes into your application containers::
|
||||
|
||||
Now, the second container has the all the information from the first volume.
|
||||
$ docker run -t -i -rm -volumes-from DATA -name client1 ubuntu bash
|
||||
|
||||
You can use multiple ``-volumes-from`` parameters to bring together multiple
|
||||
data volumes from multiple containers.
|
||||
|
||||
Interestingly, you can mount the volumes that came from the ``DATA`` container in
|
||||
yet another container via the ``client1`` middleman container::
|
||||
|
||||
$ docker run -t -i -rm -volumes-from client1 ubuntu -name client2 bash
|
||||
|
||||
This allows you to abstract the actual data source from users of that data,
|
||||
similar to :ref:`ambassador_pattern_linking <ambassador_pattern_linking>`.
|
||||
|
||||
If you remove containers that mount volumes, including the initial DATA container,
|
||||
or the middleman, the volumes will not be deleted until there are no containers still
|
||||
referencing those volumes. This allows you to upgrade, or effectivly migrate data volumes
|
||||
between containers.
|
||||
|
||||
Mount a Host Directory as a Container Volume:
|
||||
---------------------------------------------
|
||||
@@ -68,13 +91,13 @@ Mount a Host Directory as a Container Volume:
|
||||
-v=[]: Create a bind mount with: [host-dir]:[container-dir]:[rw|ro].
|
||||
If "host-dir" is missing, then docker creates a new volume.
|
||||
|
||||
This is not available for a Dockerfile due the portability and sharing
|
||||
purpose of it. The [host-dir] volumes is something 100% host dependent
|
||||
and will break on any other machine.
|
||||
This is not available from a Dockerfile as it makes the built image less portable
|
||||
or shareable. [host-dir] volumes are 100% host dependent and will break on any
|
||||
other machine.
|
||||
|
||||
For example::
|
||||
|
||||
sudo docker run -v /var/logs:/var/host_logs:ro shykes/couchdb:2013-05-03
|
||||
sudo docker run -v /var/logs:/var/host_logs:ro ubuntu bash
|
||||
|
||||
The command above mounts the host directory ``/var/logs`` into the
|
||||
container with read only permissions as ``/var/host_logs``.
|
||||
@@ -87,3 +110,6 @@ Known Issues
|
||||
* :issue:`2702`: "lxc-start: Permission denied - failed to mount"
|
||||
could indicate a permissions problem with AppArmor. Please see the
|
||||
issue for a workaround.
|
||||
* :issue:`2528`: the busybox container is used to make the resulting container as small and
|
||||
simple as possible - whenever you need to interact with the data in the volume
|
||||
you mount it into another container.
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
:title: Working With Repositories
|
||||
:title: Share Images via Repositories
|
||||
:description: Repositories allow users to share images.
|
||||
:keywords: repo, repositories, usage, pull image, push image, image, documentation
|
||||
|
||||
.. _working_with_the_repository:
|
||||
|
||||
Working with Repositories
|
||||
=========================
|
||||
Share Images via Repositories
|
||||
=============================
|
||||
|
||||
A *repository* is a hosted collection of tagged :ref:`images
|
||||
<image_def>` that together create the file system for a container. The
|
||||
@@ -152,6 +152,41 @@ or tag.
|
||||
|
||||
.. _using_private_repositories:
|
||||
|
||||
Trusted Builds
|
||||
--------------
|
||||
|
||||
Trusted Builds automate the building and updating of images from GitHub, directly
|
||||
on docker.io servers. It works by adding a commit hook to your selected repository,
|
||||
triggering a build and update when you push a commit.
|
||||
|
||||
To setup a trusted build
|
||||
++++++++++++++++++++++++
|
||||
|
||||
#. Create a `Docker Index account <https://index.docker.io/>`_ and login.
|
||||
#. Link your GitHub account through the ``Link Accounts`` menu.
|
||||
#. `Configure a Trusted build <https://index.docker.io/builds/>`_.
|
||||
#. Pick a GitHub project that has a ``Dockerfile`` that you want to build.
|
||||
#. Pick the branch you want to build (the default is the ``master`` branch).
|
||||
#. Give the Trusted Build a name.
|
||||
#. Assign an optional Docker tag to the Build.
|
||||
#. Specify where the ``Dockerfile`` is located. The default is ``/``.
|
||||
|
||||
Once the Trusted Build is configured it will automatically trigger a build, and
|
||||
in a few minutes, if there are no errors, you will see your new trusted build
|
||||
on the Docker Index. It will will stay in sync with your GitHub repo until you
|
||||
deactivate the Trusted Build.
|
||||
|
||||
If you want to see the status of your Trusted Builds you can go to your
|
||||
`Trusted Builds page <https://index.docker.io/builds/>`_ on the Docker index,
|
||||
and it will show you the status of your builds, and the build history.
|
||||
|
||||
Once you've created a Trusted Build you can deactive or delete it. You cannot
|
||||
however push to a Trusted Build with the ``docker push`` command. You can only
|
||||
manage it by committing code to your GitHub repository.
|
||||
|
||||
You can create multiple Trusted Builds per repository and configure them to
|
||||
point to specific ``Dockerfile``'s or Git branches.
|
||||
|
||||
Private Repositories
|
||||
--------------------
|
||||
|
||||
|
||||
28
docs/theme/docker/layout.html
vendored
28
docs/theme/docker/layout.html
vendored
@@ -35,7 +35,7 @@
|
||||
%}
|
||||
|
||||
{#
|
||||
This part is hopefully complex because things like |cut '/index/' are not available in spinx jinja
|
||||
This part is hopefully complex because things like |cut '/index/' are not available in Sphinx jinja
|
||||
and will make it crash. (and we need index/ out.
|
||||
#}
|
||||
<link rel="canonical" href="http://docs.docker.io/en/latest/
|
||||
@@ -86,26 +86,26 @@
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="container">
|
||||
<div class="container-fluid">
|
||||
|
||||
<!-- Docs nav
|
||||
================================================== -->
|
||||
<div class="row main-row">
|
||||
<div class="row-fluid main-row">
|
||||
|
||||
<div class="span3 sidebar bs-docs-sidebar">
|
||||
<div class="sidebar bs-docs-sidebar">
|
||||
<div class="page-title" >
|
||||
<h4>DOCUMENTATION</h4>
|
||||
</div>
|
||||
|
||||
{{ toctree(collapse=False, maxdepth=3) }}
|
||||
<form>
|
||||
<input type="text" id="st-search-input" class="st-search-input span3" style="width:160px;" />
|
||||
<input type="text" id="st-search-input" class="st-search-input span3" placeholder="search in documentation" style="width:210px;" />
|
||||
<div id="st-results-container"></div>
|
||||
</form>
|
||||
</div>
|
||||
|
||||
<!-- body block -->
|
||||
<div class="span9 main-content">
|
||||
<div class="main-content">
|
||||
|
||||
<!-- Main section
|
||||
================================================== -->
|
||||
@@ -134,13 +134,22 @@
|
||||
</div>
|
||||
|
||||
<div class="social links">
|
||||
<a class="twitter" href="http://twitter.com/docker">Twitter</a>
|
||||
<a class="github" href="https://github.com/dotcloud/docker/">GitHub</a>
|
||||
<a title="Docker on Twitter" class="twitter" href="http://twitter.com/docker">Twitter</a>
|
||||
<a title="Docker on GitHub" class="github" href="https://github.com/dotcloud/docker/">GitHub</a>
|
||||
<a title="Docker on Reddit" class="reddit" href="http://www.reddit.com/r/Docker/">Reddit</a>
|
||||
<a title="Docker on Google+" class="googleplus" href="https://plus.google.com/u/0/b/100381662757235514581/communities/108146856671494713993">Google+</a>
|
||||
<a title="Docker on Facebook" class="facebook" href="https://www.facebook.com/docker.run">Facebook</a>
|
||||
<a title="Docker on SlideShare" class="slideshare" href="http://www.slideshare.net/dotCloud">Slideshare</a>
|
||||
<a title="Docker on Youtube" class="youtube" href="http://www.youtube.com/user/dockerrun/">Youtube</a>
|
||||
<a title="Docker on Flickr" class="flickr" href="http://www.flickr.com/photos/99741659@N08/">Flickr</a>
|
||||
<a title="Docker on LinkedIn" class="linkedin" href="http://www.linkedin.com/company/dotcloud">LinkedIn</a>
|
||||
</div>
|
||||
|
||||
<div class="tbox version-flyer ">
|
||||
<div class="content">
|
||||
<small>Current version:</small>
|
||||
<p class="version-note">Note: You are currently browsing the development documentation. The current release may work differently.</p>
|
||||
|
||||
<small>Available versions:</small>
|
||||
<ul class="inline">
|
||||
{% for slug, url in versions %}
|
||||
<li class="alternative"><a href="{{ url }}{%- for word in pagename.split('/') -%}
|
||||
@@ -163,6 +172,7 @@
|
||||
</div>
|
||||
<!-- end of footer -->
|
||||
</div>
|
||||
|
||||
</div>
|
||||
|
||||
|
||||
|
||||
88
docs/theme/docker/static/css/main.css
vendored
88
docs/theme/docker/static/css/main.css
vendored
@@ -62,9 +62,12 @@ p a.btn {
|
||||
-moz-box-shadow: 0 1px 4px rgba(0, 0, 0, 0.065);
|
||||
box-shadow: 0 1px 4px rgba(0, 0, 0, 0.065);
|
||||
}
|
||||
.brand.logo a {
|
||||
.brand-logo a {
|
||||
color: white;
|
||||
}
|
||||
.brand-logo a img {
|
||||
width: auto;
|
||||
}
|
||||
.inline-icon {
|
||||
margin-bottom: 6px;
|
||||
}
|
||||
@@ -186,8 +189,15 @@ body {
|
||||
.main-row {
|
||||
margin-top: 40px;
|
||||
}
|
||||
.sidebar {
|
||||
width: 215px;
|
||||
float: left;
|
||||
}
|
||||
.main-content {
|
||||
padding: 16px 18px inherit;
|
||||
margin-left: 230px;
|
||||
/* space for sidebar */
|
||||
|
||||
}
|
||||
/* =======================
|
||||
Social footer
|
||||
@@ -198,20 +208,54 @@ body {
|
||||
}
|
||||
.social .twitter,
|
||||
.social .github,
|
||||
.social .googleplus {
|
||||
background: url("https://www.docker.io/static/img/footer-links.png") no-repeat transparent;
|
||||
.social .googleplus,
|
||||
.social .facebook,
|
||||
.social .slideshare,
|
||||
.social .linkedin,
|
||||
.social .flickr,
|
||||
.social .youtube,
|
||||
.social .reddit {
|
||||
background: url("../img/social/docker_social_logos.png") no-repeat transparent;
|
||||
display: inline-block;
|
||||
height: 35px;
|
||||
height: 32px;
|
||||
overflow: hidden;
|
||||
text-indent: 9999px;
|
||||
width: 35px;
|
||||
margin-right: 10px;
|
||||
width: 32px;
|
||||
margin-right: 5px;
|
||||
}
|
||||
.social :hover {
|
||||
-webkit-transform: rotate(-10deg);
|
||||
-moz-transform: rotate(-10deg);
|
||||
-o-transform: rotate(-10deg);
|
||||
-ms-transform: rotate(-10deg);
|
||||
transform: rotate(-10deg);
|
||||
}
|
||||
.social .twitter {
|
||||
background-position: 0px 2px;
|
||||
background-position: -160px 0px;
|
||||
}
|
||||
.social .reddit {
|
||||
background-position: -256px 0px;
|
||||
}
|
||||
.social .github {
|
||||
background-position: -59px 2px;
|
||||
background-position: -64px 0px;
|
||||
}
|
||||
.social .googleplus {
|
||||
background-position: -96px 0px;
|
||||
}
|
||||
.social .facebook {
|
||||
background-position: 0px 0px;
|
||||
}
|
||||
.social .slideshare {
|
||||
background-position: -128px 0px;
|
||||
}
|
||||
.social .youtube {
|
||||
background-position: -192px 0px;
|
||||
}
|
||||
.social .flickr {
|
||||
background-position: -32px 0px;
|
||||
}
|
||||
.social .linkedin {
|
||||
background-position: -224px 0px;
|
||||
}
|
||||
form table th {
|
||||
vertical-align: top;
|
||||
@@ -342,6 +386,7 @@ div.alert.alert-block {
|
||||
border: 1px solid #88BABC;
|
||||
padding: 5px;
|
||||
font-size: larger;
|
||||
max-width: 300px;
|
||||
}
|
||||
.version-flyer .content {
|
||||
padding-right: 45px;
|
||||
@@ -351,18 +396,18 @@ div.alert.alert-block {
|
||||
background-position: right center;
|
||||
background-repeat: no-repeat;
|
||||
}
|
||||
.version-flyer .alternative {
|
||||
visibility: hidden;
|
||||
display: none;
|
||||
}
|
||||
.version-flyer .active-slug {
|
||||
visibility: visible;
|
||||
display: inline-block;
|
||||
font-weight: bolder;
|
||||
}
|
||||
.version-flyer:hover .alternative {
|
||||
animation-duration: 1s;
|
||||
display: inline-block;
|
||||
visibility: visible;
|
||||
}
|
||||
.version-flyer .version-note {
|
||||
font-size: 16px;
|
||||
color: black;
|
||||
}
|
||||
/* =====================================
|
||||
Styles for
|
||||
@@ -410,3 +455,20 @@ dt:hover > a.headerlink {
|
||||
.admonition.seealso {
|
||||
border-color: #23cb1f;
|
||||
}
|
||||
/* Add styles for other types of comments */
|
||||
.versionchanged,
|
||||
.versionadded,
|
||||
.versionmodified,
|
||||
.deprecated {
|
||||
font-size: larger;
|
||||
font-weight: bold;
|
||||
}
|
||||
.versionchanged {
|
||||
color: lightseagreen;
|
||||
}
|
||||
.versionadded {
|
||||
color: mediumblue;
|
||||
}
|
||||
.deprecated {
|
||||
color: orangered;
|
||||
}
|
||||
|
||||
107
docs/theme/docker/static/css/main.less
vendored
107
docs/theme/docker/static/css/main.less
vendored
@@ -98,7 +98,6 @@ p a {
|
||||
}
|
||||
|
||||
|
||||
|
||||
.navbar .brand {
|
||||
margin-left: 0px;
|
||||
float: left;
|
||||
@@ -126,9 +125,11 @@ p a {
|
||||
box-shadow: 0 1px 4px rgba(0, 0, 0, 0.065);
|
||||
}
|
||||
|
||||
.brand.logo a {
|
||||
.brand-logo a {
|
||||
color: white;
|
||||
|
||||
img {
|
||||
width: auto;
|
||||
}
|
||||
}
|
||||
|
||||
.logo {
|
||||
@@ -317,10 +318,18 @@ body {
|
||||
margin-top: 40px;
|
||||
}
|
||||
|
||||
.sidebar {
|
||||
width: 215px;
|
||||
float: left;
|
||||
}
|
||||
|
||||
.main-content {
|
||||
padding: 16px 18px inherit;
|
||||
margin-left: 230px; /* space for sidebar */
|
||||
}
|
||||
|
||||
|
||||
|
||||
/* =======================
|
||||
Social footer
|
||||
======================= */
|
||||
@@ -330,24 +339,64 @@ body {
|
||||
margin-top: 15px;
|
||||
}
|
||||
|
||||
.social .twitter, .social .github, .social .googleplus {
|
||||
background: url("https://www.docker.io/static/img/footer-links.png") no-repeat transparent;
|
||||
display: inline-block;
|
||||
height: 35px;
|
||||
overflow: hidden;
|
||||
text-indent: 9999px;
|
||||
width: 35px;
|
||||
margin-right: 10px;
|
||||
.social {
|
||||
.twitter, .github, .googleplus, .facebook, .slideshare, .linkedin, .flickr, .youtube, .reddit {
|
||||
background: url("../img/social/docker_social_logos.png") no-repeat transparent;
|
||||
display: inline-block;
|
||||
height: 32px;
|
||||
overflow: hidden;
|
||||
text-indent: 9999px;
|
||||
width: 32px;
|
||||
margin-right: 5px;
|
||||
}
|
||||
}
|
||||
|
||||
.social :hover {
|
||||
-webkit-transform: rotate(-10deg);
|
||||
-moz-transform: rotate(-10deg);
|
||||
-o-transform: rotate(-10deg);
|
||||
-ms-transform: rotate(-10deg);
|
||||
transform: rotate(-10deg);
|
||||
}
|
||||
|
||||
.social .twitter {
|
||||
background-position: 0px 2px;
|
||||
background-position: -160px 0px;
|
||||
}
|
||||
|
||||
.social .reddit {
|
||||
background-position: -256px 0px;
|
||||
}
|
||||
|
||||
.social .github {
|
||||
background-position: -59px 2px;
|
||||
background-position: -64px 0px;
|
||||
}
|
||||
|
||||
.social .googleplus {
|
||||
background-position: -96px 0px;
|
||||
}
|
||||
|
||||
.social .facebook {
|
||||
background-position: -0px 0px;
|
||||
}
|
||||
|
||||
.social .slideshare {
|
||||
background-position: -128px 0px;
|
||||
}
|
||||
|
||||
.social .youtube {
|
||||
background-position: -192px 0px;
|
||||
}
|
||||
|
||||
.social .flickr {
|
||||
background-position: -32px 0px;
|
||||
}
|
||||
|
||||
.social .linkedin {
|
||||
background-position: -224px 0px;
|
||||
}
|
||||
|
||||
|
||||
|
||||
// Styles on the forms
|
||||
// ----------------------------------
|
||||
|
||||
@@ -528,31 +577,34 @@ div.alert.alert-block {
|
||||
border: 1px solid #88BABC;
|
||||
padding: 5px;
|
||||
font-size: larger;
|
||||
max-width: 300px;
|
||||
|
||||
.content {
|
||||
padding-right: 45px;
|
||||
margin-top: 7px;
|
||||
margin-left: 7px;
|
||||
// display: inline-block;
|
||||
background-image: url('../img/container3.png');
|
||||
background-position: right center;
|
||||
background-repeat: no-repeat;
|
||||
}
|
||||
|
||||
.alternative {
|
||||
visibility: hidden;
|
||||
display: none;
|
||||
}
|
||||
|
||||
.active-slug {
|
||||
visibility: visible;
|
||||
display: inline-block;
|
||||
font-weight: bolder;
|
||||
}
|
||||
|
||||
&:hover .alternative {
|
||||
animation-duration: 1s;
|
||||
display: inline-block;
|
||||
visibility: visible;
|
||||
}
|
||||
|
||||
.version-note {
|
||||
font-size: 16px;
|
||||
color: black;
|
||||
}
|
||||
|
||||
}
|
||||
@@ -612,3 +664,24 @@ dt:hover > a.headerlink {
|
||||
|
||||
}
|
||||
|
||||
/* Add styles for other types of comments */
|
||||
|
||||
.versionchanged,
|
||||
.versionadded,
|
||||
.versionmodified,
|
||||
.deprecated {
|
||||
font-size: larger;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.versionchanged {
|
||||
color: lightseagreen;
|
||||
}
|
||||
|
||||
.versionadded {
|
||||
color: mediumblue;
|
||||
}
|
||||
|
||||
.deprecated {
|
||||
color: orangered;
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user