mirror of
https://github.com/moby/moby.git
synced 2026-01-11 18:51:37 +00:00
Compare commits
643 Commits
v0.7.0-rc6
...
v0.7.2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
28b162eeb4 | ||
|
|
e960152a1e | ||
|
|
47375ddf54 | ||
|
|
f0d6a91a1b | ||
|
|
62213ee314 | ||
|
|
41d972baf1 | ||
|
|
b3ad330782 | ||
|
|
606cacdca0 | ||
|
|
d526038503 | ||
|
|
12fb508262 | ||
|
|
0a3eedd4c9 | ||
|
|
a6928e70ac | ||
|
|
20197385b2 | ||
|
|
85b9338205 | ||
|
|
51e2c1794b | ||
|
|
20899cdb34 | ||
|
|
f5ab2516d8 | ||
|
|
d5f5ecb658 | ||
|
|
4b5ceb0f24 | ||
|
|
906b481148 | ||
|
|
930ec9f52c | ||
|
|
aaa1c48d24 | ||
|
|
d7123a597f | ||
|
|
9a9ecda7c8 | ||
|
|
071338172c | ||
|
|
4975c1b549 | ||
|
|
73e8a39ff2 | ||
|
|
847cf5b599 | ||
|
|
bf91636558 | ||
|
|
1e85aabf71 | ||
|
|
4fe0a9b6a0 | ||
|
|
f63cdf0260 | ||
|
|
9fb1ba97b1 | ||
|
|
59dc2876a7 | ||
|
|
23ab0af2ff | ||
|
|
b8a16b3459 | ||
|
|
a530b8d981 | ||
|
|
89beb55c32 | ||
|
|
f9328ad9cc | ||
|
|
20759c3ef7 | ||
|
|
5d81776714 | ||
|
|
0ef1ff91cb | ||
|
|
a68d7f3d70 | ||
|
|
5a89c6f6df | ||
|
|
e877294321 | ||
|
|
ecc51cd465 | ||
|
|
f7c7f7978c | ||
|
|
8224e13bd2 | ||
|
|
912bf8ff92 | ||
|
|
e43ff2f6f2 | ||
|
|
b8f1c73705 | ||
|
|
1572989201 | ||
|
|
bd02d6e662 | ||
|
|
2d1f61ef0e | ||
|
|
54df95f26c | ||
|
|
5b33ae5971 | ||
|
|
0db1c60542 | ||
|
|
f216448c82 | ||
|
|
f26a9d456c | ||
|
|
bf5b949ffc | ||
|
|
621523a041 | ||
|
|
8fd9633a6b | ||
|
|
1124261158 | ||
|
|
b722f809e7 | ||
|
|
f396c42cad | ||
|
|
8874f2aef9 | ||
|
|
e8ec3dba7b | ||
|
|
4eda2a54de | ||
|
|
d3292078dc | ||
|
|
6ba456ff87 | ||
|
|
44984602c7 | ||
|
|
d534e1c3a1 | ||
|
|
d56d8ab96e | ||
|
|
6cf8ec606e | ||
|
|
db3019d50b | ||
|
|
42c38bf34d | ||
|
|
11b3fbb3bd | ||
|
|
036f41fde3 | ||
|
|
6e9c1590c6 | ||
|
|
39cc8a32b1 | ||
|
|
31961ccd94 | ||
|
|
eec48f93a3 | ||
|
|
dbe1915fee | ||
|
|
bef8de9319 | ||
|
|
81fc368a6d | ||
|
|
bd292759f0 | ||
|
|
5fd3c8204d | ||
|
|
af21908493 | ||
|
|
7edd1f6bad | ||
|
|
d878632b25 | ||
|
|
be13735001 | ||
|
|
fb9ddc5de5 | ||
|
|
27646c4459 | ||
|
|
b98d51dddb | ||
|
|
0025e9bd71 | ||
|
|
4c6e528f13 | ||
|
|
95f061b408 | ||
|
|
761184df52 | ||
|
|
78b85220be | ||
|
|
8814c11b14 | ||
|
|
09d2c2351c | ||
|
|
c618a906a4 | ||
|
|
9c1e9a5157 | ||
|
|
0b0b0ca0f9 | ||
|
|
ac1093b83a | ||
|
|
c9cedb4c04 | ||
|
|
a74be95b23 | ||
|
|
8291f00a0e | ||
|
|
b7bc80cba9 | ||
|
|
864729b96f | ||
|
|
a67571668e | ||
|
|
776bb43c9e | ||
|
|
75bd5bea70 | ||
|
|
e2ee5c71fc | ||
|
|
f0879a1e14 | ||
|
|
ca405786f4 | ||
|
|
cdc07f7d5c | ||
|
|
45cea94a82 | ||
|
|
8ec96c9605 | ||
|
|
c094807a1b | ||
|
|
bac3a8e6f5 | ||
|
|
dcfc4ada4d | ||
|
|
416b16e1e2 | ||
|
|
f832b76bdf | ||
|
|
d502f0cfac | ||
|
|
16fad96007 | ||
|
|
de35b346d1 | ||
|
|
869a11bc93 | ||
|
|
f806818154 | ||
|
|
a7a171b6c2 | ||
|
|
a80c059bae | ||
|
|
edace08327 | ||
|
|
9656cdf0c2 | ||
|
|
50f3a696bd | ||
|
|
f4676f0ffa | ||
|
|
3c1f3be032 | ||
|
|
aeba4e6482 | ||
|
|
3569d080af | ||
|
|
427bdb60e7 | ||
|
|
9b1930c5a0 | ||
|
|
2546a2c645 | ||
|
|
fdb3de7b11 | ||
|
|
88df052197 | ||
|
|
04ffa53ba8 | ||
|
|
07f7643bbc | ||
|
|
228091c79e | ||
|
|
6fa1463614 | ||
|
|
f28445254f | ||
|
|
0969be5ddb | ||
|
|
95c0ade04b | ||
|
|
e01732f857 | ||
|
|
9b644ff246 | ||
|
|
2c646b2d46 | ||
|
|
becb13dc26 | ||
|
|
05f416d869 | ||
|
|
7fd64e0196 | ||
|
|
13da09d22b | ||
|
|
6720bfb243 | ||
|
|
d75fc6e529 | ||
|
|
4a148919c3 | ||
|
|
c7d75588f4 | ||
|
|
dfade9e2d8 | ||
|
|
b655406faa | ||
|
|
a015f38f4a | ||
|
|
02ef8ec3ca | ||
|
|
25d3db048e | ||
|
|
a69bb25820 | ||
|
|
5f5949f6a6 | ||
|
|
58b75f8f29 | ||
|
|
aea7418d8a | ||
|
|
f9147effac | ||
|
|
0e2b0f284c | ||
|
|
80dfa23da8 | ||
|
|
bc9b239d74 | ||
|
|
4bea68dfa6 | ||
|
|
ea0ed9a915 | ||
|
|
e39d35deda | ||
|
|
4acd579226 | ||
|
|
c764fb0c29 | ||
|
|
de090116dd | ||
|
|
7a87023587 | ||
|
|
584164177e | ||
|
|
35e80868ad | ||
|
|
2acea6090f | ||
|
|
81b25fde79 | ||
|
|
0189a99471 | ||
|
|
7bf3a07371 | ||
|
|
9320f4e2d1 | ||
|
|
d1a4f83e5e | ||
|
|
fb810b54ff | ||
|
|
eac95671f5 | ||
|
|
06379d8bd9 | ||
|
|
a96bf74397 | ||
|
|
cc0466bb68 | ||
|
|
0a7e0f0819 | ||
|
|
ef157cee30 | ||
|
|
7ab4f37d60 | ||
|
|
5d022f0445 | ||
|
|
697707e4af | ||
|
|
e3c3f3c324 | ||
|
|
797bac2344 | ||
|
|
7a94cdf8ed | ||
|
|
61fbf3d8e2 | ||
|
|
f49eb29497 | ||
|
|
1525f71b5a | ||
|
|
4188cd6bcd | ||
|
|
e304e8936b | ||
|
|
03f8a3bbae | ||
|
|
066b961a0c | ||
|
|
f95f2789f2 | ||
|
|
a8e99d9235 | ||
|
|
5a17c208cd | ||
|
|
e9bf971e69 | ||
|
|
909da5d524 | ||
|
|
04c32495f6 | ||
|
|
af020e2d67 | ||
|
|
94d46a8d3a | ||
|
|
ec9f2f1d0f | ||
|
|
0bfa22124e | ||
|
|
79031c4f8c | ||
|
|
5f55c1aee1 | ||
|
|
b4e21ad1da | ||
|
|
97088ebef7 | ||
|
|
c35cebaa06 | ||
|
|
41b5e87873 | ||
|
|
9dfc7bc36f | ||
|
|
afbea3f13f | ||
|
|
5dab47a475 | ||
|
|
3ba279a370 | ||
|
|
944c1f10ea | ||
|
|
0d1506adb3 | ||
|
|
3a8222dfa5 | ||
|
|
00030ced4b | ||
|
|
f95621fd05 | ||
|
|
4328926acc | ||
|
|
3df5d120de | ||
|
|
1b5517b68f | ||
|
|
4bc100b494 | ||
|
|
be282b57d5 | ||
|
|
12180948be | ||
|
|
6cf2c14c00 | ||
|
|
dc9f8bf072 | ||
|
|
0862756beb | ||
|
|
de60bee3d4 | ||
|
|
51b9fe7301 | ||
|
|
61aad8fc10 | ||
|
|
6ddea783ef | ||
|
|
58c33360b0 | ||
|
|
40fe9f581b | ||
|
|
258d707548 | ||
|
|
99e4f56353 | ||
|
|
0132547a38 | ||
|
|
84f78d9cad | ||
|
|
f8176de191 | ||
|
|
f50fe14e13 | ||
|
|
45567f2209 | ||
|
|
2fd76fc0b8 | ||
|
|
b699aee91f | ||
|
|
64439505c7 | ||
|
|
664174c7aa | ||
|
|
7428e6a5f0 | ||
|
|
d21563ced3 | ||
|
|
6a55169e2e | ||
|
|
5976c26c1e | ||
|
|
b59dea6767 | ||
|
|
9be5db8704 | ||
|
|
3f92163989 | ||
|
|
3b5010e90b | ||
|
|
ec4863ae55 | ||
|
|
a02bc8a5db | ||
|
|
045989e3d8 | ||
|
|
bbf9135adc | ||
|
|
1cb1e08644 | ||
|
|
682cf48d1d | ||
|
|
48e1766527 | ||
|
|
3ddbb36a84 | ||
|
|
62263967b9 | ||
|
|
3ed0ff85f5 | ||
|
|
c4c90e9cec | ||
|
|
650d4cc644 | ||
|
|
d9b742419c | ||
|
|
c81bb20f5b | ||
|
|
6c70d23e0d | ||
|
|
c9432cf51a | ||
|
|
829b118dd8 | ||
|
|
3ac76cfeff | ||
|
|
5a9cf7e754 | ||
|
|
e4aba11e80 | ||
|
|
9d62dc1a08 | ||
|
|
0017c68f4a | ||
|
|
3cd9b2aadf | ||
|
|
8afb0abbee | ||
|
|
98ed1dc433 | ||
|
|
7aec93c370 | ||
|
|
62f0e5aef9 | ||
|
|
59a85798fa | ||
|
|
67c03552f6 | ||
|
|
4fdc117ad2 | ||
|
|
5cd09dc115 | ||
|
|
6ea3b9651b | ||
|
|
de4429f70d | ||
|
|
8cc524996a | ||
|
|
d9fbdd7b3f | ||
|
|
4ad3dfb05f | ||
|
|
1d503be466 | ||
|
|
9837ad8e9b | ||
|
|
70b586702c | ||
|
|
4b35c1b6a6 | ||
|
|
fe571dd293 | ||
|
|
e1414a4c39 | ||
|
|
d4ebba703c | ||
|
|
e4cb83c50e | ||
|
|
d7dd19d22e | ||
|
|
5f2313aad3 | ||
|
|
d6cdbca6c1 | ||
|
|
751250015b | ||
|
|
b04c6466cd | ||
|
|
b9ad0c9f74 | ||
|
|
dbb47f63ab | ||
|
|
c4548506c5 | ||
|
|
26cf8b9aff | ||
|
|
7f1a91121c | ||
|
|
c30e2dc28c | ||
|
|
d9cdd45d2e | ||
|
|
5c5f670901 | ||
|
|
fea432bdf5 | ||
|
|
39aac21db4 | ||
|
|
56ab9cb0d5 | ||
|
|
d8ee08ba7b | ||
|
|
e8437e8fcf | ||
|
|
4e030c78d2 | ||
|
|
62b1faf28c | ||
|
|
2dac7b5209 | ||
|
|
1890301e67 | ||
|
|
a6c9a332d0 | ||
|
|
65db62619c | ||
|
|
35d54c6655 | ||
|
|
3553a803e3 | ||
|
|
a4f8a2494b | ||
|
|
fe72f15e4a | ||
|
|
a37b155384 | ||
|
|
82cecb34b5 | ||
|
|
e1278e9ec2 | ||
|
|
db7c55ba7f | ||
|
|
0d3f4017cf | ||
|
|
ab35aef6b5 | ||
|
|
bb284ce59d | ||
|
|
34353e782e | ||
|
|
ca98434a45 | ||
|
|
86c00be180 | ||
|
|
2ec1146679 | ||
|
|
2e6a958612 | ||
|
|
697be6aaa0 | ||
|
|
c13821ad0b | ||
|
|
aa68656cd3 | ||
|
|
63d6cbe3e4 | ||
|
|
67e9e0e11b | ||
|
|
fbebe20bc6 | ||
|
|
e535f544c7 | ||
|
|
fe727e2a87 | ||
|
|
f72e604872 | ||
|
|
926f7b579e | ||
|
|
ff5747728c | ||
|
|
6c56993639 | ||
|
|
ba5268d382 | ||
|
|
8291d509c2 | ||
|
|
139644895e | ||
|
|
cca9e51f5d | ||
|
|
668d22be54 | ||
|
|
77c94175bd | ||
|
|
f94ea7769f | ||
|
|
39bec226c0 | ||
|
|
677e2ad92e | ||
|
|
d3cc558d14 | ||
|
|
ad43d88af5 | ||
|
|
1fe1b216ad | ||
|
|
3faf450f11 | ||
|
|
b36dd3f9cc | ||
|
|
a0525d90ab | ||
|
|
ebc36b879d | ||
|
|
14425c1690 | ||
|
|
aae23255a0 | ||
|
|
2bbc90e92f | ||
|
|
0c758e9312 | ||
|
|
597e0e69b4 | ||
|
|
261bd0d187 | ||
|
|
3d0486979e | ||
|
|
377817db1b | ||
|
|
a990b3aeb9 | ||
|
|
9f46779d42 | ||
|
|
533067bba4 | ||
|
|
438607ecc3 | ||
|
|
d47507791e | ||
|
|
bdfe8ed403 | ||
|
|
f1e44e0b0c | ||
|
|
c226ab6d9e | ||
|
|
74ea136a49 | ||
|
|
24c03b2d93 | ||
|
|
a58fef9f13 | ||
|
|
597ca192e7 | ||
|
|
8b2a7e35c3 | ||
|
|
8a5d927a53 | ||
|
|
1214b8897b | ||
|
|
eb528b959e | ||
|
|
75e9cff98c | ||
|
|
74c8f7af75 | ||
|
|
2c27da8818 | ||
|
|
39f21af687 | ||
|
|
d1a631cedb | ||
|
|
7f9cdaa342 | ||
|
|
e4ae44b844 | ||
|
|
89454851d1 | ||
|
|
f75dc36204 | ||
|
|
5fe5055bd9 | ||
|
|
4e826e99b2 | ||
|
|
788feab3a7 | ||
|
|
682a188ead | ||
|
|
45b1e8c236 | ||
|
|
ae474e05f5 | ||
|
|
0ff9bc1be3 | ||
|
|
b3e8ba1908 | ||
|
|
7b95d41092 | ||
|
|
1cb7b9adc6 | ||
|
|
d370a889c3 | ||
|
|
6d34c50e89 | ||
|
|
6344e6f258 | ||
|
|
462e30dcbd | ||
|
|
c7661f40b6 | ||
|
|
c707c587c1 | ||
|
|
5e3f6e7023 | ||
|
|
1beb5005d1 | ||
|
|
1ba11384bf | ||
|
|
8398abf0dc | ||
|
|
ab3a83c617 | ||
|
|
8b99e4ed37 | ||
|
|
17efa9dc2d | ||
|
|
76c71260f1 | ||
|
|
8267437294 | ||
|
|
9c15322894 | ||
|
|
0d078b6581 | ||
|
|
06d5e25224 | ||
|
|
009024ad64 | ||
|
|
14d9f04e89 | ||
|
|
18d08d0d42 | ||
|
|
0bb2c0b1d0 | ||
|
|
1af6ffb9bb | ||
|
|
233ad38802 | ||
|
|
db28e839e0 | ||
|
|
de30ffb2c3 | ||
|
|
5c5ee194cb | ||
|
|
b6dd67c707 | ||
|
|
740958dda7 | ||
|
|
c38386d876 | ||
|
|
4267fb66ef | ||
|
|
a74b512540 | ||
|
|
60809a4f72 | ||
|
|
65fcc81b42 | ||
|
|
06cf8fee1b | ||
|
|
c92dab0eb4 | ||
|
|
6ad5b2bcf4 | ||
|
|
77f1362c64 | ||
|
|
4049359bee | ||
|
|
7daefc9d3f | ||
|
|
d4c32b9015 | ||
|
|
8bd6127ab3 | ||
|
|
2302293244 | ||
|
|
fd7ff6411d | ||
|
|
59f76bf1c7 | ||
|
|
02cb7f45fa | ||
|
|
a937313747 | ||
|
|
fb3d60f27a | ||
|
|
5ff74e268d | ||
|
|
09b7b55e2c | ||
|
|
110c4f2043 | ||
|
|
0d1b5d7676 | ||
|
|
5242a49f3f | ||
|
|
2586c042ae | ||
|
|
688e86c625 | ||
|
|
750d2d8d07 | ||
|
|
19df6c32c0 | ||
|
|
1d903da6fd | ||
|
|
aaefb8c07c | ||
|
|
b3959e69b5 | ||
|
|
43c7df946d | ||
|
|
6acdf68ee1 | ||
|
|
487b3d8a8c | ||
|
|
33f70f8978 | ||
|
|
809239c0af | ||
|
|
937f52aef9 | ||
|
|
aa48acc5ec | ||
|
|
ac70e296db | ||
|
|
e2c3860ec3 | ||
|
|
2d715bf3c0 | ||
|
|
d9e54e28e7 | ||
|
|
78d2e2dc37 | ||
|
|
abfdaca3f8 | ||
|
|
3a2fbcfdec | ||
|
|
ba2b36e192 | ||
|
|
d47d49a2f9 | ||
|
|
8b0b10b6f9 | ||
|
|
399c71de83 | ||
|
|
d8f4b733f2 | ||
|
|
b4eeb6be61 | ||
|
|
41704d8933 | ||
|
|
64dd4afed6 | ||
|
|
5da1ed3291 | ||
|
|
ad23745456 | ||
|
|
cee0a292d0 | ||
|
|
b702edadb7 | ||
|
|
f16c45f8b0 | ||
|
|
07180f3aa7 | ||
|
|
a606474825 | ||
|
|
5d6ef3177b | ||
|
|
0a89db04fe | ||
|
|
1cce9f25b2 | ||
|
|
f1b3e278b9 | ||
|
|
e288e7763e | ||
|
|
9696ec509a | ||
|
|
96b5be9dd9 | ||
|
|
ba6dd1d8d6 | ||
|
|
c67f9b671d | ||
|
|
1c8ae47770 | ||
|
|
d55998be81 | ||
|
|
e69bbd239e | ||
|
|
a26f9183bd | ||
|
|
944a48ec5a | ||
|
|
79e2b33ede | ||
|
|
076c0eab70 | ||
|
|
1f9223a7c2 | ||
|
|
476559458d | ||
|
|
d4c8fb9ee2 | ||
|
|
ae8c589d35 | ||
|
|
6130f2531e | ||
|
|
ef14aaf627 | ||
|
|
1e7c04fcfe | ||
|
|
37e0083169 | ||
|
|
8b0cd60019 | ||
|
|
0198f8a879 | ||
|
|
b3f5973f41 | ||
|
|
3314e005f3 | ||
|
|
a93e40a158 | ||
|
|
58f8503b73 | ||
|
|
cb48ecc9dc | ||
|
|
53f1bf0f99 | ||
|
|
9dc59797e0 | ||
|
|
0cecc2a78c | ||
|
|
437bdeee59 | ||
|
|
806abe90ba | ||
|
|
25e443a3c7 | ||
|
|
33e70864a2 | ||
|
|
bc82940a57 | ||
|
|
d6e6214d37 | ||
|
|
70f1bd3104 | ||
|
|
f7c2a00557 | ||
|
|
8498b44eac | ||
|
|
f1e6dce047 | ||
|
|
e2dcfc2cf7 | ||
|
|
9b4c151142 | ||
|
|
50239e0573 | ||
|
|
42c23b0f04 | ||
|
|
eec91e7941 | ||
|
|
3f17844b6e | ||
|
|
efd0e13ca7 | ||
|
|
bcdeb37bb6 | ||
|
|
362e9d6b3c | ||
|
|
c4ab498920 | ||
|
|
cb70eedfda | ||
|
|
75a7f4d90c | ||
|
|
da824b4a5a | ||
|
|
1ab6b8bf49 | ||
|
|
eaeb969138 | ||
|
|
253214f07d | ||
|
|
a2c9d2da93 | ||
|
|
d7e2fc8982 | ||
|
|
f20c738963 | ||
|
|
df258f5861 | ||
|
|
60f728b170 | ||
|
|
2b7c63b1b5 | ||
|
|
fd7ab143bf | ||
|
|
82cdd21a34 | ||
|
|
a9230af52e | ||
|
|
2f0d18ac4a | ||
|
|
6469422465 | ||
|
|
5306053e21 | ||
|
|
e2390318bb | ||
|
|
4e0c76b321 | ||
|
|
da514223d1 | ||
|
|
023ff36704 | ||
|
|
8fdbf46afb | ||
|
|
d233894c25 | ||
|
|
8a756f417e | ||
|
|
a39bd65662 | ||
|
|
5690139785 | ||
|
|
92f94f06ae | ||
|
|
2bc35287a0 | ||
|
|
2382a0f920 | ||
|
|
579a5c843b | ||
|
|
b3bee7e0c4 | ||
|
|
6ebb236aa1 | ||
|
|
b4f7078a02 | ||
|
|
9e68913397 | ||
|
|
1b28cdc7f9 | ||
|
|
304a80fcd5 | ||
|
|
04f1d4dcdb | ||
|
|
171d681724 | ||
|
|
60cb5f1a34 | ||
|
|
cfdc284abe | ||
|
|
7192be47c5 | ||
|
|
cd4c1ac356 | ||
|
|
b8af68a92b | ||
|
|
9de4590498 | ||
|
|
0ef6fed5c7 | ||
|
|
383f95bba1 | ||
|
|
1211065c8d | ||
|
|
844c13bce6 | ||
|
|
6014db4a7e | ||
|
|
bf504f2afa | ||
|
|
61a8020e51 | ||
|
|
7eaa59f626 | ||
|
|
2cccbbdadd | ||
|
|
66beafa9f3 | ||
|
|
8e5ab5bfca | ||
|
|
4f9f83d6c6 | ||
|
|
145c2008ae | ||
|
|
aeb304b37c | ||
|
|
78c843c8ef | ||
|
|
ac821f2446 | ||
|
|
2fe4467d73 | ||
|
|
4b80ec9aae | ||
|
|
fef41ef7bf | ||
|
|
fe302fbfd2 | ||
|
|
72d02ecdde | ||
|
|
baa687bed2 | ||
|
|
30ea0bebce | ||
|
|
bc74f65068 | ||
|
|
152459b727 | ||
|
|
27159ce6ba | ||
|
|
7267c4b746 | ||
|
|
26533eb2c4 | ||
|
|
0013aa7d9f | ||
|
|
1d7f22c0d4 | ||
|
|
6a693176d6 | ||
|
|
6d420407ca |
3
.gitignore
vendored
3
.gitignore
vendored
@@ -1,3 +1,6 @@
|
||||
# Docker project generated files to ignore
|
||||
# if you want to ignore files created by your editor/tools,
|
||||
# please consider a global .gitignore https://help.github.com/articles/ignoring-files
|
||||
.vagrant*
|
||||
bin
|
||||
docker/docker
|
||||
|
||||
145
CHANGELOG.md
145
CHANGELOG.md
@@ -1,5 +1,149 @@
|
||||
# Changelog
|
||||
|
||||
## 0.7.2 (2013-12-16)
|
||||
|
||||
#### Runtime
|
||||
|
||||
+ Validate container names on creation with standard regex
|
||||
* Increase maximum image depth to 127 from 42
|
||||
* Continue to move api endpoints to the job api
|
||||
+ Add -bip flag to allow specification of dynamic bridge IP via CIDR
|
||||
- Allow bridge creation when ipv6 is not enabled on certain systems
|
||||
* Set hostname and IP address from within dockerinit
|
||||
* Drop capabilities from within dockerinit
|
||||
- Fix volumes on host when symlink is present the image
|
||||
- Prevent deletion of image if ANY container is depending on it even if the container is not running
|
||||
* Update docker push to use new progress display
|
||||
* Use os.Lstat to allow mounting unix sockets when inspecting volumes
|
||||
- Adjusted handling of inactive user login
|
||||
- Add missing defines in devicemapper for older kernels
|
||||
- Allow untag operations with no container validation
|
||||
- Add auth config to docker build
|
||||
|
||||
#### Documentation
|
||||
|
||||
* Add more information about Docker logging
|
||||
+ Add RHEL documentation
|
||||
* Add a direct example for changing the CMD that is run in a container
|
||||
* Update Arch installation documentation
|
||||
+ Add section on Trusted Builds
|
||||
+ Add Network documentation page
|
||||
|
||||
#### Other
|
||||
|
||||
+ Add new cover bundle for providing code coverage reporting
|
||||
* Separate integration tests in bundles
|
||||
* Make Tianon the hack maintainer
|
||||
* Update mkimage-debootstrap with more tweaks for keeping images small
|
||||
* Use https to get the install script
|
||||
* Remove vendored dotcloud/tar now that Go 1.2 has been released
|
||||
|
||||
## 0.7.1 (2013-12-05)
|
||||
|
||||
#### Documentation
|
||||
|
||||
+ Add @SvenDowideit as documentation maintainer
|
||||
+ Add links example
|
||||
+ Add documentation regarding ambassador pattern
|
||||
+ Add Google Cloud Platform docs
|
||||
+ Add dockerfile best practices
|
||||
* Update doc for RHEL
|
||||
* Update doc for registry
|
||||
* Update Postgres examples
|
||||
* Update doc for Ubuntu install
|
||||
* Improve remote api doc
|
||||
|
||||
#### Runtime
|
||||
|
||||
+ Add hostconfig to docker inspect
|
||||
+ Implement `docker log -f` to stream logs
|
||||
+ Add env variable to disable kernel version warning
|
||||
+ Add -format to `docker inspect`
|
||||
+ Support bind-mount for files
|
||||
- Fix bridge creation on RHEL
|
||||
- Fix image size calculation
|
||||
- Make sure iptables are called even if the bridge already exists
|
||||
- Fix issue with stderr only attach
|
||||
- Remove init layer when destroying a container
|
||||
- Fix same port binding on different interfaces
|
||||
- `docker build` now returns the correct exit code
|
||||
- Fix `docker port` to display correct port
|
||||
- `docker build` now check that the dockerfile exists client side
|
||||
- `docker attach` now returns the correct exit code
|
||||
- Remove the name entry when the container does not exist
|
||||
|
||||
#### Registry
|
||||
|
||||
* Improve progress bars, add ETA for downloads
|
||||
* Simultaneous pulls now waits for the first to finish instead of failing
|
||||
- Tag only the top-layer image when pushing to registry
|
||||
- Fix issue with offline image transfer
|
||||
- Fix issue preventing using ':' in password for registry
|
||||
|
||||
#### Other
|
||||
|
||||
+ Add pprof handler for debug
|
||||
+ Create a Makefile
|
||||
* Use stdlib tar that now includes fix
|
||||
* Improve make.sh test script
|
||||
* Handle SIGQUIT on the daemon
|
||||
* Disable verbose during tests
|
||||
* Upgrade to go1.2 for official build
|
||||
* Improve unit tests
|
||||
* The test suite now runs all tests even if one fails
|
||||
* Refactor C in Go (Devmapper)
|
||||
- Fix OSX compilation
|
||||
|
||||
## 0.7.0 (2013-11-25)
|
||||
|
||||
#### Notable features since 0.6.0
|
||||
|
||||
* Storage drivers: choose from aufs, device-mapper, or vfs.
|
||||
* Standard Linux support: docker now runs on unmodified Linux kernels and all major distributions.
|
||||
* Links: compose complex software stacks by connecting containers to each other.
|
||||
* Container naming: organize your containers by giving them memorable names.
|
||||
* Advanced port redirects: specify port redirects per interface, or keep sensitive ports private.
|
||||
* Offline transfer: push and pull images to the filesystem without losing information.
|
||||
* Quality: numerous bugfixes and small usability improvements. Significant increase in test coverage.
|
||||
|
||||
## 0.6.7 (2013-11-21)
|
||||
|
||||
#### Runtime
|
||||
|
||||
* Improved stability, fixes some race conditons
|
||||
* Skip the volumes mounted when deleting the volumes of container.
|
||||
* Fix layer size computation: handle hard links correctly
|
||||
* Use the work Path for docker cp CONTAINER:PATH
|
||||
* Fix tmp dir never cleanup
|
||||
* Speedup docker ps
|
||||
* More informative error message on name collisions
|
||||
* Fix nameserver regex
|
||||
* Always return long id's
|
||||
* Fix container restart race condition
|
||||
* Keep published ports on docker stop;docker start
|
||||
* Fix container networking on Fedora
|
||||
* Correctly express "any address" to iptables
|
||||
* Fix network setup when reconnecting to ghost container
|
||||
* Prevent deletion if image is used by a running container
|
||||
* Lock around read operations in graph
|
||||
|
||||
#### RemoteAPI
|
||||
|
||||
* Return full ID on docker rmi
|
||||
|
||||
#### Client
|
||||
|
||||
+ Add -tree option to images
|
||||
+ Offline image transfer
|
||||
* Exit with status 2 on usage error and display usage on stderr
|
||||
* Do not forward SIGCHLD to container
|
||||
* Use string timestamp for docker events -since
|
||||
|
||||
#### Other
|
||||
|
||||
* Update to go 1.2rc5
|
||||
+ Add /etc/default/docker support to upstart
|
||||
|
||||
## 0.6.6 (2013-11-06)
|
||||
|
||||
#### Runtime
|
||||
@@ -17,6 +161,7 @@
|
||||
+ Prevent DNS server conflicts in CreateBridgeIface
|
||||
+ Validate bind mounts on the server side
|
||||
+ Use parent image config in docker build
|
||||
* Fix regression in /etc/hosts
|
||||
|
||||
#### Client
|
||||
|
||||
|
||||
@@ -4,6 +4,13 @@ Want to hack on Docker? Awesome! Here are instructions to get you
|
||||
started. They are probably not perfect, please let us know if anything
|
||||
feels wrong or incomplete.
|
||||
|
||||
## Reporting Issues
|
||||
|
||||
When reporting [issues](https://github.com/dotcloud/docker/issues)
|
||||
on Github please include your host OS ( Ubuntu 12.04, Fedora 19, etc... )
|
||||
and the output of `docker version` along with the output of `docker info` if possible.
|
||||
This information will help us review and fix your issue faster.
|
||||
|
||||
## Build Environment
|
||||
|
||||
For instructions on setting up your development environment, please
|
||||
@@ -64,7 +71,7 @@ your branch before submitting a pull request.
|
||||
|
||||
Update the documentation when creating or modifying features. Test
|
||||
your documentation changes for clarity, concision, and correctness, as
|
||||
well as a clean docmuent build. See ``docs/README.md`` for more
|
||||
well as a clean documentation build. See ``docs/README.md`` for more
|
||||
information on building the docs and how docs get released.
|
||||
|
||||
Write clean code. Universally formatted code promotes ease of writing, reading,
|
||||
@@ -115,6 +122,7 @@ For more details see [MAINTAINERS.md](hack/MAINTAINERS.md)
|
||||
* Step 1: learn the component inside out
|
||||
* Step 2: make yourself useful by contributing code, bugfixes, support etc.
|
||||
* Step 3: volunteer on the irc channel (#docker@freenode)
|
||||
* Step 4: propose yourself at a scheduled #docker-meeting
|
||||
|
||||
Don't forget: being a maintainer is a time investment. Make sure you will have time to make yourself available.
|
||||
You don't have to be a maintainer to make a difference on the project!
|
||||
|
||||
60
Dockerfile
60
Dockerfile
@@ -24,53 +24,55 @@
|
||||
#
|
||||
|
||||
docker-version 0.6.1
|
||||
from ubuntu:12.04
|
||||
maintainer Solomon Hykes <solomon@dotcloud.com>
|
||||
FROM ubuntu:12.04
|
||||
MAINTAINER Solomon Hykes <solomon@dotcloud.com>
|
||||
|
||||
# Build dependencies
|
||||
run echo 'deb http://archive.ubuntu.com/ubuntu precise main universe' > /etc/apt/sources.list
|
||||
run apt-get update
|
||||
run apt-get install -y -q curl
|
||||
run apt-get install -y -q git
|
||||
run apt-get install -y -q mercurial
|
||||
run apt-get install -y -q build-essential libsqlite3-dev
|
||||
RUN echo 'deb http://archive.ubuntu.com/ubuntu precise main universe' > /etc/apt/sources.list
|
||||
RUN apt-get update
|
||||
RUN apt-get install -y -q curl
|
||||
RUN apt-get install -y -q git
|
||||
RUN apt-get install -y -q mercurial
|
||||
RUN apt-get install -y -q build-essential libsqlite3-dev
|
||||
|
||||
# Install Go
|
||||
run curl -s https://go.googlecode.com/files/go1.2rc5.src.tar.gz | tar -v -C /usr/local -xz
|
||||
env PATH /usr/local/go/bin:/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/sbin:/bin:/sbin
|
||||
env GOPATH /go:/go/src/github.com/dotcloud/docker/vendor
|
||||
run cd /usr/local/go/src && ./make.bash && go install -ldflags '-w -linkmode external -extldflags "-static -Wl,--unresolved-symbols=ignore-in-shared-libs"' -tags netgo -a std
|
||||
RUN curl -s https://go.googlecode.com/files/go1.2.src.tar.gz | tar -v -C /usr/local -xz
|
||||
ENV PATH /usr/local/go/bin:/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/sbin:/bin:/sbin
|
||||
ENV GOPATH /go:/go/src/github.com/dotcloud/docker/vendor
|
||||
RUN cd /usr/local/go/src && ./make.bash && go install -ldflags '-w -linkmode external -extldflags "-static -Wl,--unresolved-symbols=ignore-in-shared-libs"' -tags netgo -a std
|
||||
|
||||
# Ubuntu stuff
|
||||
run apt-get install -y -q ruby1.9.3 rubygems libffi-dev
|
||||
run gem install --no-rdoc --no-ri fpm
|
||||
run apt-get install -y -q reprepro dpkg-sig
|
||||
RUN apt-get install -y -q ruby1.9.3 rubygems libffi-dev
|
||||
RUN gem install --no-rdoc --no-ri fpm
|
||||
RUN apt-get install -y -q reprepro dpkg-sig
|
||||
|
||||
# Install s3cmd 1.0.1 (earlier versions don't support env variables in the config)
|
||||
run apt-get install -y -q python-pip
|
||||
run pip install s3cmd
|
||||
run pip install python-magic
|
||||
run /bin/echo -e '[default]\naccess_key=$AWS_ACCESS_KEY\nsecret_key=$AWS_SECRET_KEY\n' > /.s3cfg
|
||||
RUN apt-get install -y -q python-pip
|
||||
RUN pip install s3cmd==1.1.0-beta3
|
||||
RUN pip install python-magic==0.4.6
|
||||
RUN /bin/echo -e '[default]\naccess_key=$AWS_ACCESS_KEY\nsecret_key=$AWS_SECRET_KEY\n' > /.s3cfg
|
||||
|
||||
# Runtime dependencies
|
||||
run apt-get install -y -q iptables
|
||||
run apt-get install -y -q lxc
|
||||
run apt-get install -y -q aufs-tools
|
||||
RUN apt-get install -y -q iptables
|
||||
RUN apt-get install -y -q lxc
|
||||
RUN apt-get install -y -q aufs-tools
|
||||
|
||||
# Get lvm2 source for compiling statically
|
||||
run git clone git://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout v2_02_103
|
||||
RUN git clone https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout v2_02_103
|
||||
# see https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags
|
||||
# note: we can't use "git clone -b" above because it requires at least git 1.7.10 to be able to use that on a tag instead of a branch and we only have 1.7.9.5
|
||||
|
||||
# Compile and install lvm2
|
||||
run cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper && make install_device-mapper
|
||||
RUN cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper && make install_device-mapper
|
||||
# see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL
|
||||
|
||||
volume /var/lib/docker
|
||||
workdir /go/src/github.com/dotcloud/docker
|
||||
# Grab Go's cover tool for dead-simple code coverage testing
|
||||
RUN go get code.google.com/p/go.tools/cmd/cover
|
||||
|
||||
VOLUME /var/lib/docker
|
||||
WORKDIR /go/src/github.com/dotcloud/docker
|
||||
|
||||
# Wrap all commands in the "docker-in-docker" script to allow nested containers
|
||||
entrypoint ["hack/dind"]
|
||||
ENTRYPOINT ["hack/dind"]
|
||||
|
||||
# Upload docker source
|
||||
add . /go/src/github.com/dotcloud/docker
|
||||
ADD . /go/src/github.com/dotcloud/docker
|
||||
|
||||
26
Makefile
Normal file
26
Makefile
Normal file
@@ -0,0 +1,26 @@
|
||||
.PHONY: all binary build default docs shell test
|
||||
|
||||
DOCKER_RUN_DOCKER := docker run -rm -i -t -privileged -e TESTFLAGS -v $(CURDIR)/bundles:/go/src/github.com/dotcloud/docker/bundles docker
|
||||
|
||||
default: binary
|
||||
|
||||
all: build
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh
|
||||
|
||||
binary: build
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh binary
|
||||
|
||||
docs:
|
||||
docker build -t docker-docs docs && docker run -p 8000:8000 docker-docs
|
||||
|
||||
test: build
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh test test-integration
|
||||
|
||||
shell: build
|
||||
$(DOCKER_RUN_DOCKER) bash
|
||||
|
||||
build: bundles
|
||||
docker build -t docker .
|
||||
|
||||
bundles:
|
||||
mkdir bundles
|
||||
7
Vagrantfile
vendored
7
Vagrantfile
vendored
@@ -70,7 +70,7 @@ SCRIPT
|
||||
# trigger dkms to build the virtualbox guest module install.
|
||||
$vbox_script = <<VBOX_SCRIPT + $script
|
||||
# Install the VirtualBox guest additions if they aren't already installed.
|
||||
if [ ! -d /opt/VBoxGuestAdditions-4.3.2/ ]; then
|
||||
if [ ! -d /opt/VBoxGuestAdditions-4.3.4/ ]; then
|
||||
# Update remote package metadata. 'apt-get update' is idempotent.
|
||||
apt-get update -q
|
||||
|
||||
@@ -79,9 +79,10 @@ if [ ! -d /opt/VBoxGuestAdditions-4.3.2/ ]; then
|
||||
apt-get install -q -y linux-headers-generic-lts-raring dkms
|
||||
|
||||
echo 'Downloading VBox Guest Additions...'
|
||||
wget -cq http://dlc.sun.com.edgesuite.net/virtualbox/4.3.2/VBoxGuestAdditions_4.3.2.iso
|
||||
wget -cq http://dlc.sun.com.edgesuite.net/virtualbox/4.3.4/VBoxGuestAdditions_4.3.4.iso
|
||||
echo "f120793fa35050a8280eacf9c930cf8d9b88795161520f6515c0cc5edda2fe8a VBoxGuestAdditions_4.3.4.iso" | sha256sum --check || exit 1
|
||||
|
||||
mount -o loop,ro /home/vagrant/VBoxGuestAdditions_4.3.2.iso /mnt
|
||||
mount -o loop,ro /home/vagrant/VBoxGuestAdditions_4.3.4.iso /mnt
|
||||
/mnt/VBoxLinuxAdditions.run --nox11
|
||||
umount /mnt
|
||||
fi
|
||||
|
||||
218
api.go
218
api.go
@@ -1,12 +1,16 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"code.google.com/p/go.net/websocket"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"expvar"
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/archive"
|
||||
"github.com/dotcloud/docker/auth"
|
||||
"github.com/dotcloud/docker/systemd"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"github.com/gorilla/mux"
|
||||
"io"
|
||||
@@ -15,6 +19,7 @@ import (
|
||||
"mime"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/pprof"
|
||||
"os"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
@@ -23,7 +28,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
APIVERSION = 1.7
|
||||
APIVERSION = 1.8
|
||||
DEFAULTHTTPHOST = "127.0.0.1"
|
||||
DEFAULTHTTPPORT = 4243
|
||||
DEFAULTUNIXSOCKET = "/var/run/docker.sock"
|
||||
@@ -135,7 +140,8 @@ func postAuth(srv *Server, version float64, w http.ResponseWriter, r *http.Reque
|
||||
}
|
||||
|
||||
func getVersion(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
return writeJSON(w, http.StatusOK, srv.DockerVersion())
|
||||
srv.Eng.ServeHTTP(w, r)
|
||||
return nil
|
||||
}
|
||||
|
||||
func postContainersKill(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
@@ -145,19 +151,11 @@ func postContainersKill(srv *Server, version float64, w http.ResponseWriter, r *
|
||||
if err := parseForm(r); err != nil {
|
||||
return err
|
||||
}
|
||||
name := vars["name"]
|
||||
|
||||
signal := 0
|
||||
if r != nil {
|
||||
if s := r.Form.Get("signal"); s != "" {
|
||||
s, err := strconv.Atoi(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
signal = s
|
||||
}
|
||||
job := srv.Eng.Job("kill", vars["name"])
|
||||
if sig := r.Form.Get("signal"); sig != "" {
|
||||
job.Args = append(job.Args, sig)
|
||||
}
|
||||
if err := srv.ContainerKill(name, signal); err != nil {
|
||||
if err := job.Run(); err != nil {
|
||||
return err
|
||||
}
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
@@ -168,10 +166,11 @@ func getContainersExport(srv *Server, version float64, w http.ResponseWriter, r
|
||||
if vars == nil {
|
||||
return fmt.Errorf("Missing parameter")
|
||||
}
|
||||
name := vars["name"]
|
||||
|
||||
if err := srv.ContainerExport(name, w); err != nil {
|
||||
utils.Errorf("%s", err)
|
||||
job := srv.Eng.Job("export", vars["name"])
|
||||
if err := job.Stdout.Add(w); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := job.Run(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@@ -217,7 +216,8 @@ func getImagesViz(srv *Server, version float64, w http.ResponseWriter, r *http.R
|
||||
}
|
||||
|
||||
func getInfo(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
return writeJSON(w, http.StatusOK, srv.DockerInfo())
|
||||
srv.Eng.ServeHTTP(w, r)
|
||||
return nil
|
||||
}
|
||||
|
||||
func getEvents(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
@@ -254,7 +254,7 @@ func getEvents(srv *Server, version float64, w http.ResponseWriter, r *http.Requ
|
||||
wf.Flush()
|
||||
if since != 0 {
|
||||
// If since, send previous events that happened after the timestamp
|
||||
for _, event := range srv.events {
|
||||
for _, event := range srv.GetEvents() {
|
||||
if event.Time >= since {
|
||||
err := sendEvent(wf, &event)
|
||||
if err != nil && err.Error() == "JSON error" {
|
||||
@@ -357,18 +357,13 @@ func postImagesTag(srv *Server, version float64, w http.ResponseWriter, r *http.
|
||||
if err := parseForm(r); err != nil {
|
||||
return err
|
||||
}
|
||||
repo := r.Form.Get("repo")
|
||||
tag := r.Form.Get("tag")
|
||||
if vars == nil {
|
||||
return fmt.Errorf("Missing parameter")
|
||||
}
|
||||
name := vars["name"]
|
||||
force, err := getBoolParam(r.Form.Get("force"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := srv.ContainerTag(name, repo, tag, force); err != nil {
|
||||
job := srv.Eng.Job("tag", vars["name"], r.Form.Get("repo"), r.Form.Get("tag"))
|
||||
job.Setenv("force", r.Form.Get("force"))
|
||||
if err := job.Run(); err != nil {
|
||||
return err
|
||||
}
|
||||
w.WriteHeader(http.StatusCreated)
|
||||
@@ -383,13 +378,17 @@ func postCommit(srv *Server, version float64, w http.ResponseWriter, r *http.Req
|
||||
if err := json.NewDecoder(r.Body).Decode(config); err != nil && err != io.EOF {
|
||||
utils.Errorf("%s", err)
|
||||
}
|
||||
repo := r.Form.Get("repo")
|
||||
tag := r.Form.Get("tag")
|
||||
container := r.Form.Get("container")
|
||||
author := r.Form.Get("author")
|
||||
comment := r.Form.Get("comment")
|
||||
id, err := srv.ContainerCommit(container, repo, tag, author, comment, config)
|
||||
if err != nil {
|
||||
|
||||
job := srv.Eng.Job("commit", r.Form.Get("container"))
|
||||
job.Setenv("repo", r.Form.Get("repo"))
|
||||
job.Setenv("tag", r.Form.Get("tag"))
|
||||
job.Setenv("author", r.Form.Get("author"))
|
||||
job.Setenv("comment", r.Form.Get("comment"))
|
||||
job.SetenvJson("config", config)
|
||||
|
||||
var id string
|
||||
job.Stdout.AddString(&id)
|
||||
if err := job.Run(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -534,6 +533,18 @@ func postImagesPush(srv *Server, version float64, w http.ResponseWriter, r *http
|
||||
return nil
|
||||
}
|
||||
|
||||
func getImagesGet(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
name := vars["name"]
|
||||
if version > 1.0 {
|
||||
w.Header().Set("Content-Type", "application/x-tar")
|
||||
}
|
||||
return srv.ImageExport(name, w)
|
||||
}
|
||||
|
||||
func postImagesLoad(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
return srv.ImageLoad(r.Body)
|
||||
}
|
||||
|
||||
func postContainersCreate(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
if err := parseForm(r); err != nil {
|
||||
return nil
|
||||
@@ -552,12 +563,18 @@ func postContainersCreate(srv *Server, version float64, w http.ResponseWriter, r
|
||||
job.SetenvList("Dns", defaultDns)
|
||||
}
|
||||
// Read container ID from the first line of stdout
|
||||
job.StdoutParseString(&out.ID)
|
||||
job.Stdout.AddString(&out.ID)
|
||||
// Read warnings from stderr
|
||||
job.StderrParseLines(&out.Warnings, 0)
|
||||
warnings := &bytes.Buffer{}
|
||||
job.Stderr.Add(warnings)
|
||||
if err := job.Run(); err != nil {
|
||||
return err
|
||||
}
|
||||
// Parse warnings from stderr
|
||||
scanner := bufio.NewScanner(warnings)
|
||||
for scanner.Scan() {
|
||||
out.Warnings = append(out.Warnings, scanner.Text())
|
||||
}
|
||||
if job.GetenvInt("Memory") > 0 && !srv.runtime.capabilities.MemoryLimit {
|
||||
log.Println("WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.")
|
||||
out.Warnings = append(out.Warnings, "Your kernel does not support memory limit capabilities. Limitation discarded.")
|
||||
@@ -666,17 +683,12 @@ func postContainersStop(srv *Server, version float64, w http.ResponseWriter, r *
|
||||
if err := parseForm(r); err != nil {
|
||||
return err
|
||||
}
|
||||
t, err := strconv.Atoi(r.Form.Get("t"))
|
||||
if err != nil || t < 0 {
|
||||
t = 10
|
||||
}
|
||||
|
||||
if vars == nil {
|
||||
return fmt.Errorf("Missing parameter")
|
||||
}
|
||||
name := vars["name"]
|
||||
|
||||
if err := srv.ContainerStop(name, t); err != nil {
|
||||
job := srv.Eng.Job("stop", vars["name"])
|
||||
job.Setenv("t", r.Form.Get("t"))
|
||||
if err := job.Run(); err != nil {
|
||||
return err
|
||||
}
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
@@ -687,33 +699,28 @@ func postContainersWait(srv *Server, version float64, w http.ResponseWriter, r *
|
||||
if vars == nil {
|
||||
return fmt.Errorf("Missing parameter")
|
||||
}
|
||||
name := vars["name"]
|
||||
|
||||
status, err := srv.ContainerWait(name)
|
||||
job := srv.Eng.Job("wait", vars["name"])
|
||||
var statusStr string
|
||||
job.Stdout.AddString(&statusStr)
|
||||
if err := job.Run(); err != nil {
|
||||
return err
|
||||
}
|
||||
// Parse a 16-bit encoded integer to map typical unix exit status.
|
||||
status, err := strconv.ParseInt(statusStr, 10, 16)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return writeJSON(w, http.StatusOK, &APIWait{StatusCode: status})
|
||||
return writeJSON(w, http.StatusOK, &APIWait{StatusCode: int(status)})
|
||||
}
|
||||
|
||||
func postContainersResize(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
if err := parseForm(r); err != nil {
|
||||
return err
|
||||
}
|
||||
height, err := strconv.Atoi(r.Form.Get("h"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
width, err := strconv.Atoi(r.Form.Get("w"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if vars == nil {
|
||||
return fmt.Errorf("Missing parameter")
|
||||
}
|
||||
name := vars["name"]
|
||||
if err := srv.ContainerResize(name, height, width); err != nil {
|
||||
if err := srv.Eng.Job("resize", vars["name"], r.Form.Get("h"), r.Form.Get("w")).Run(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@@ -853,7 +860,10 @@ func getContainersByName(srv *Server, version float64, w http.ResponseWriter, r
|
||||
return fmt.Errorf("Conflict between containers and images")
|
||||
}
|
||||
|
||||
return writeJSON(w, http.StatusOK, container)
|
||||
container.readHostConfig()
|
||||
c := APIContainer{container, container.hostConfig}
|
||||
|
||||
return writeJSON(w, http.StatusOK, c)
|
||||
}
|
||||
|
||||
func getImagesByName(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
@@ -879,12 +889,25 @@ func postBuild(srv *Server, version float64, w http.ResponseWriter, r *http.Requ
|
||||
if version < 1.3 {
|
||||
return fmt.Errorf("Multipart upload for build is no longer supported. Please upgrade your docker client.")
|
||||
}
|
||||
remoteURL := r.FormValue("remote")
|
||||
repoName := r.FormValue("t")
|
||||
rawSuppressOutput := r.FormValue("q")
|
||||
rawNoCache := r.FormValue("nocache")
|
||||
rawRm := r.FormValue("rm")
|
||||
repoName, tag := utils.ParseRepositoryTag(repoName)
|
||||
var (
|
||||
remoteURL = r.FormValue("remote")
|
||||
repoName = r.FormValue("t")
|
||||
rawSuppressOutput = r.FormValue("q")
|
||||
rawNoCache = r.FormValue("nocache")
|
||||
rawRm = r.FormValue("rm")
|
||||
authEncoded = r.Header.Get("X-Registry-Auth")
|
||||
authConfig = &auth.AuthConfig{}
|
||||
tag string
|
||||
)
|
||||
repoName, tag = utils.ParseRepositoryTag(repoName)
|
||||
if authEncoded != "" {
|
||||
authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded))
|
||||
if err := json.NewDecoder(authJson).Decode(authConfig); err != nil {
|
||||
// for a pull it is not an error if no auth was given
|
||||
// to increase compatibility with the existing api it is defaulting to be empty
|
||||
authConfig = &auth.AuthConfig{}
|
||||
}
|
||||
}
|
||||
|
||||
var context io.Reader
|
||||
|
||||
@@ -910,7 +933,7 @@ func postBuild(srv *Server, version float64, w http.ResponseWriter, r *http.Requ
|
||||
}
|
||||
context = c
|
||||
} else if utils.IsURL(remoteURL) {
|
||||
f, err := utils.Download(remoteURL, ioutil.Discard)
|
||||
f, err := utils.Download(remoteURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -939,9 +962,26 @@ func postBuild(srv *Server, version float64, w http.ResponseWriter, r *http.Requ
|
||||
return err
|
||||
}
|
||||
|
||||
b := NewBuildFile(srv, utils.NewWriteFlusher(w), !suppressOutput, !noCache, rm)
|
||||
if version >= 1.8 {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
}
|
||||
sf := utils.NewStreamFormatter(version >= 1.8)
|
||||
b := NewBuildFile(srv,
|
||||
&StdoutFormater{
|
||||
Writer: utils.NewWriteFlusher(w),
|
||||
StreamFormatter: sf,
|
||||
},
|
||||
&StderrFormater{
|
||||
Writer: utils.NewWriteFlusher(w),
|
||||
StreamFormatter: sf,
|
||||
},
|
||||
!suppressOutput, !noCache, rm, utils.NewWriteFlusher(w), sf, authConfig)
|
||||
id, err := b.Build(context)
|
||||
if err != nil {
|
||||
if sf.Used() {
|
||||
w.Write(sf.FormatError(err))
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("Error build: %s", err)
|
||||
}
|
||||
if repoName != "" {
|
||||
@@ -967,7 +1007,7 @@ func postContainersCopy(srv *Server, version float64, w http.ResponseWriter, r *
|
||||
}
|
||||
|
||||
if copyData.Resource == "" {
|
||||
return fmt.Errorf("Resource cannot be empty")
|
||||
return fmt.Errorf("Path cannot be empty")
|
||||
}
|
||||
if copyData.Resource[0] == '/' {
|
||||
copyData.Resource = copyData.Resource[1:]
|
||||
@@ -1025,9 +1065,37 @@ func makeHttpHandler(srv *Server, logging bool, localMethod string, localRoute s
|
||||
}
|
||||
}
|
||||
|
||||
// Replicated from expvar.go as not public.
|
||||
func expvarHandler(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
fmt.Fprintf(w, "{\n")
|
||||
first := true
|
||||
expvar.Do(func(kv expvar.KeyValue) {
|
||||
if !first {
|
||||
fmt.Fprintf(w, ",\n")
|
||||
}
|
||||
first = false
|
||||
fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value)
|
||||
})
|
||||
fmt.Fprintf(w, "\n}\n")
|
||||
}
|
||||
|
||||
func AttachProfiler(router *mux.Router) {
|
||||
router.HandleFunc("/debug/vars", expvarHandler)
|
||||
router.HandleFunc("/debug/pprof/", pprof.Index)
|
||||
router.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline)
|
||||
router.HandleFunc("/debug/pprof/profile", pprof.Profile)
|
||||
router.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
|
||||
router.HandleFunc("/debug/pprof/heap", pprof.Handler("heap").ServeHTTP)
|
||||
router.HandleFunc("/debug/pprof/goroutine", pprof.Handler("goroutine").ServeHTTP)
|
||||
router.HandleFunc("/debug/pprof/threadcreate", pprof.Handler("threadcreate").ServeHTTP)
|
||||
}
|
||||
|
||||
func createRouter(srv *Server, logging bool) (*mux.Router, error) {
|
||||
r := mux.NewRouter()
|
||||
|
||||
if os.Getenv("DEBUG") != "" {
|
||||
AttachProfiler(r)
|
||||
}
|
||||
m := map[string]map[string]HttpApiFunc{
|
||||
"GET": {
|
||||
"/events": getEvents,
|
||||
@@ -1036,6 +1104,7 @@ func createRouter(srv *Server, logging bool) (*mux.Router, error) {
|
||||
"/images/json": getImagesJSON,
|
||||
"/images/viz": getImagesViz,
|
||||
"/images/search": getImagesSearch,
|
||||
"/images/{name:.*}/get": getImagesGet,
|
||||
"/images/{name:.*}/history": getImagesHistory,
|
||||
"/images/{name:.*}/json": getImagesByName,
|
||||
"/containers/ps": getContainersJSON,
|
||||
@@ -1052,6 +1121,7 @@ func createRouter(srv *Server, logging bool) (*mux.Router, error) {
|
||||
"/build": postBuild,
|
||||
"/images/create": postImagesCreate,
|
||||
"/images/{name:.*}/insert": postImagesInsert,
|
||||
"/images/load": postImagesLoad,
|
||||
"/images/{name:.*}/push": postImagesPush,
|
||||
"/images/{name:.*}/tag": postImagesTag,
|
||||
"/containers/create": postContainersCreate,
|
||||
@@ -1112,8 +1182,6 @@ func ServeRequest(srv *Server, apiversion float64, w http.ResponseWriter, req *h
|
||||
}
|
||||
|
||||
func ListenAndServe(proto, addr string, srv *Server, logging bool) error {
|
||||
log.Printf("Listening for HTTP on %s (%s)\n", addr, proto)
|
||||
|
||||
r, err := createRouter(srv, logging)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1144,5 +1212,9 @@ func ListenAndServe(proto, addr string, srv *Server, logging bool) error {
|
||||
}
|
||||
}
|
||||
httpSrv := http.Server{Addr: addr, Handler: r}
|
||||
|
||||
log.Printf("Listening for HTTP on %s (%s)\n", addr, proto)
|
||||
// Tell the init daemon we are accepting requests
|
||||
go systemd.SdNotify("READY=1")
|
||||
return httpSrv.Serve(l)
|
||||
}
|
||||
|
||||
@@ -29,23 +29,6 @@ type (
|
||||
VirtualSize int64
|
||||
}
|
||||
|
||||
APIInfo struct {
|
||||
Debug bool
|
||||
Containers int
|
||||
Images int
|
||||
Driver string `json:",omitempty"`
|
||||
DriverStatus [][2]string `json:",omitempty"`
|
||||
NFd int `json:",omitempty"`
|
||||
NGoroutines int `json:",omitempty"`
|
||||
MemoryLimit bool `json:",omitempty"`
|
||||
SwapLimit bool `json:",omitempty"`
|
||||
IPv4Forwarding bool `json:",omitempty"`
|
||||
LXCVersion string `json:",omitempty"`
|
||||
NEventsListener int `json:",omitempty"`
|
||||
KernelVersion string `json:",omitempty"`
|
||||
IndexServerAddress string `json:",omitempty"`
|
||||
}
|
||||
|
||||
APITop struct {
|
||||
Titles []string
|
||||
Processes [][]string
|
||||
@@ -95,12 +78,6 @@ type (
|
||||
IP string
|
||||
}
|
||||
|
||||
APIVersion struct {
|
||||
Version string
|
||||
GitCommit string `json:",omitempty"`
|
||||
GoVersion string `json:",omitempty"`
|
||||
}
|
||||
|
||||
APIWait struct {
|
||||
StatusCode int
|
||||
}
|
||||
@@ -118,6 +95,10 @@ type (
|
||||
Resource string
|
||||
HostPath string
|
||||
}
|
||||
APIContainer struct {
|
||||
*Container
|
||||
HostConfig *HostConfig
|
||||
}
|
||||
)
|
||||
|
||||
func (api APIImages) ToLegacy() []APIImagesOld {
|
||||
|
||||
@@ -181,7 +181,7 @@ func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
|
||||
oldStat.Rdev != newStat.Rdev ||
|
||||
// Don't look at size for dirs, its not a good measure of change
|
||||
(oldStat.Size != newStat.Size && oldStat.Mode&syscall.S_IFDIR != syscall.S_IFDIR) ||
|
||||
oldStat.Mtim != newStat.Mtim {
|
||||
getLastModification(oldStat) != getLastModification(newStat) {
|
||||
change := Change{
|
||||
Path: newChild.path(),
|
||||
Kind: ChangeModify,
|
||||
|
||||
@@ -71,17 +71,27 @@ func createSampleDir(t *testing.T, root string) {
|
||||
{Symlink, "symlink1", "target1", 0666},
|
||||
{Symlink, "symlink2", "target2", 0666},
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
for _, info := range files {
|
||||
p := path.Join(root, info.path)
|
||||
if info.filetype == Dir {
|
||||
if err := os.MkdirAll(path.Join(root, info.path), info.permissions); err != nil {
|
||||
if err := os.MkdirAll(p, info.permissions); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
} else if info.filetype == Regular {
|
||||
if err := ioutil.WriteFile(path.Join(root, info.path), []byte(info.contents), info.permissions); err != nil {
|
||||
if err := ioutil.WriteFile(p, []byte(info.contents), info.permissions); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
} else if info.filetype == Symlink {
|
||||
if err := os.Symlink(info.contents, path.Join(root, info.path)); err != nil {
|
||||
if err := os.Symlink(info.contents, p); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
if info.filetype != Symlink {
|
||||
// Set a consistent ctime, atime for all files and dirs
|
||||
if err := os.Chtimes(p, now, now); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
@@ -200,6 +210,9 @@ func TestChangesDirsMutated(t *testing.T) {
|
||||
if err := copyDir(src, dst); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(src)
|
||||
defer os.RemoveAll(dst)
|
||||
|
||||
mutateSampleDir(t, dst)
|
||||
|
||||
changes, err := ChangesDirs(dst, src)
|
||||
@@ -225,8 +238,7 @@ func TestChangesDirsMutated(t *testing.T) {
|
||||
{"/symlinknew", ChangeAdd},
|
||||
}
|
||||
|
||||
i := 0
|
||||
for ; i < max(len(changes), len(expectedChanges)); i++ {
|
||||
for i := 0; i < max(len(changes), len(expectedChanges)); i++ {
|
||||
if i >= len(expectedChanges) {
|
||||
t.Fatalf("unexpected change %s\n", changes[i].String())
|
||||
}
|
||||
@@ -235,64 +247,59 @@ func TestChangesDirsMutated(t *testing.T) {
|
||||
}
|
||||
if changes[i].Path == expectedChanges[i].Path {
|
||||
if changes[i] != expectedChanges[i] {
|
||||
t.Fatalf("Wrong change for %s, expected %s, got %d\n", changes[i].Path, changes[i].String(), expectedChanges[i].String())
|
||||
t.Fatalf("Wrong change for %s, expected %s, got %s\n", changes[i].Path, changes[i].String(), expectedChanges[i].String())
|
||||
}
|
||||
} else if changes[i].Path < expectedChanges[i].Path {
|
||||
t.Fatalf("unexpected change %s\n", changes[i].String())
|
||||
} else {
|
||||
t.Fatalf("no change for expected change %s\n", expectedChanges[i].String())
|
||||
t.Fatalf("no change for expected change %s != %s\n", expectedChanges[i].String(), changes[i].String())
|
||||
}
|
||||
}
|
||||
for ; i < len(expectedChanges); i++ {
|
||||
}
|
||||
|
||||
os.RemoveAll(src)
|
||||
os.RemoveAll(dst)
|
||||
}
|
||||
|
||||
func TestApplyLayer(t *testing.T) {
|
||||
t.Skip("Skipping TestApplyLayer due to known failures") // Disable this for now as it is broken
|
||||
return
|
||||
|
||||
src, err := ioutil.TempDir("", "docker-changes-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
createSampleDir(t, src)
|
||||
dst := src + "-copy"
|
||||
if err := copyDir(src, dst); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
mutateSampleDir(t, dst)
|
||||
// src, err := ioutil.TempDir("", "docker-changes-test")
|
||||
// if err != nil {
|
||||
// t.Fatal(err)
|
||||
// }
|
||||
// createSampleDir(t, src)
|
||||
// dst := src + "-copy"
|
||||
// if err := copyDir(src, dst); err != nil {
|
||||
// t.Fatal(err)
|
||||
// }
|
||||
// mutateSampleDir(t, dst)
|
||||
|
||||
changes, err := ChangesDirs(dst, src)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// changes, err := ChangesDirs(dst, src)
|
||||
// if err != nil {
|
||||
// t.Fatal(err)
|
||||
// }
|
||||
|
||||
layer, err := ExportChanges(dst, changes)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// layer, err := ExportChanges(dst, changes)
|
||||
// if err != nil {
|
||||
// t.Fatal(err)
|
||||
// }
|
||||
|
||||
layerCopy, err := NewTempArchive(layer, "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// layerCopy, err := NewTempArchive(layer, "")
|
||||
// if err != nil {
|
||||
// t.Fatal(err)
|
||||
// }
|
||||
|
||||
if err := ApplyLayer(src, layerCopy); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// if err := ApplyLayer(src, layerCopy); err != nil {
|
||||
// t.Fatal(err)
|
||||
// }
|
||||
|
||||
changes2, err := ChangesDirs(src, dst)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// changes2, err := ChangesDirs(src, dst)
|
||||
// if err != nil {
|
||||
// t.Fatal(err)
|
||||
// }
|
||||
|
||||
if len(changes2) != 0 {
|
||||
t.Fatalf("Unexpected differences after re applying mutation: %v", changes)
|
||||
}
|
||||
// if len(changes2) != 0 {
|
||||
// t.Fatalf("Unexpected differences after re applying mutation: %v", changes)
|
||||
// }
|
||||
|
||||
os.RemoveAll(src)
|
||||
os.RemoveAll(dst)
|
||||
// os.RemoveAll(src)
|
||||
// os.RemoveAll(dst)
|
||||
}
|
||||
|
||||
@@ -83,8 +83,10 @@ func ApplyLayer(dest string, layer Archive) error {
|
||||
}
|
||||
|
||||
for k, v := range modifiedDirs {
|
||||
aTime := time.Unix(v.Atim.Unix())
|
||||
mTime := time.Unix(v.Mtim.Unix())
|
||||
lastAccess := getLastAccess(v)
|
||||
lastModification := getLastModification(v)
|
||||
aTime := time.Unix(lastAccess.Unix())
|
||||
mTime := time.Unix(lastModification.Unix())
|
||||
|
||||
if err := os.Chtimes(k, aTime, mTime); err != nil {
|
||||
return err
|
||||
|
||||
11
archive/stat_darwin.go
Normal file
11
archive/stat_darwin.go
Normal file
@@ -0,0 +1,11 @@
|
||||
package archive
|
||||
|
||||
import "syscall"
|
||||
|
||||
func getLastAccess(stat *syscall.Stat_t) syscall.Timespec {
|
||||
return stat.Atimespec
|
||||
}
|
||||
|
||||
func getLastModification(stat *syscall.Stat_t) syscall.Timespec {
|
||||
return stat.Mtimespec
|
||||
}
|
||||
11
archive/stat_linux.go
Normal file
11
archive/stat_linux.go
Normal file
@@ -0,0 +1,11 @@
|
||||
package archive
|
||||
|
||||
import "syscall"
|
||||
|
||||
func getLastAccess(stat *syscall.Stat_t) syscall.Timespec {
|
||||
return stat.Atim
|
||||
}
|
||||
|
||||
func getLastModification(stat *syscall.Stat_t) syscall.Timespec {
|
||||
return stat.Mtim
|
||||
}
|
||||
39
auth/auth.go
39
auth/auth.go
@@ -63,7 +63,7 @@ func decodeAuth(authStr string) (string, string, error) {
|
||||
if n > decLen {
|
||||
return "", "", fmt.Errorf("Something went wrong decoding auth config")
|
||||
}
|
||||
arr := strings.Split(string(decoded), ":")
|
||||
arr := strings.SplitN(string(decoded), ":", 2)
|
||||
if len(arr) != 2 {
|
||||
return "", "", fmt.Errorf("Invalid auth configuration file")
|
||||
}
|
||||
@@ -192,13 +192,6 @@ func Login(authConfig *AuthConfig, factory *utils.HTTPRequestFactory) (string, e
|
||||
} else {
|
||||
status = "Account created. Please see the documentation of the registry " + serverAddress + " for instructions how to activate it."
|
||||
}
|
||||
} else if reqStatusCode == 403 {
|
||||
if loginAgainstOfficialIndex {
|
||||
return "", fmt.Errorf("Login: Your account hasn't been activated. " +
|
||||
"Please check your e-mail for a confirmation link.")
|
||||
}
|
||||
return "", fmt.Errorf("Login: Your account hasn't been activated. " +
|
||||
"Please see the documentation of the registry " + serverAddress + " for instructions how to activate it.")
|
||||
} else if reqStatusCode == 400 {
|
||||
if string(reqBody) == "\"Username or email already exists\"" {
|
||||
req, err := factory.NewRequest("GET", serverAddress+"users/", nil)
|
||||
@@ -216,13 +209,39 @@ func Login(authConfig *AuthConfig, factory *utils.HTTPRequestFactory) (string, e
|
||||
status = "Login Succeeded"
|
||||
} else if resp.StatusCode == 401 {
|
||||
return "", fmt.Errorf("Wrong login/password, please try again")
|
||||
} else if resp.StatusCode == 403 {
|
||||
if loginAgainstOfficialIndex {
|
||||
return "", fmt.Errorf("Login: Account is not Active. Please check your e-mail for a confirmation link.")
|
||||
}
|
||||
return "", fmt.Errorf("Login: Account is not Active. Please see the documentation of the registry %s for instructions how to activate it.", serverAddress)
|
||||
} else {
|
||||
return "", fmt.Errorf("Login: %s (Code: %d; Headers: %s)", body,
|
||||
resp.StatusCode, resp.Header)
|
||||
return "", fmt.Errorf("Login: %s (Code: %d; Headers: %s)", body, resp.StatusCode, resp.Header)
|
||||
}
|
||||
} else {
|
||||
return "", fmt.Errorf("Registration: %s", reqBody)
|
||||
}
|
||||
} else if reqStatusCode == 401 {
|
||||
// This case would happen with private registries where /v1/users is
|
||||
// protected, so people can use `docker login` as an auth check.
|
||||
req, err := factory.NewRequest("GET", serverAddress+"users/", nil)
|
||||
req.SetBasicAuth(authConfig.Username, authConfig.Password)
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if resp.StatusCode == 200 {
|
||||
status = "Login Succeeded"
|
||||
} else if resp.StatusCode == 401 {
|
||||
return "", fmt.Errorf("Wrong login/password, please try again")
|
||||
} else {
|
||||
return "", fmt.Errorf("Login: %s (Code: %d; Headers: %s)", body,
|
||||
resp.StatusCode, resp.Header)
|
||||
}
|
||||
} else {
|
||||
return "", fmt.Errorf("Unexpected status code [%d] : %s", reqStatusCode, reqBody)
|
||||
}
|
||||
|
||||
82
buildfile.go
82
buildfile.go
@@ -4,6 +4,7 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/archive"
|
||||
"github.com/dotcloud/docker/auth"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@@ -33,17 +34,24 @@ type buildFile struct {
|
||||
utilizeCache bool
|
||||
rm bool
|
||||
|
||||
authConfig *auth.AuthConfig
|
||||
|
||||
tmpContainers map[string]struct{}
|
||||
tmpImages map[string]struct{}
|
||||
|
||||
out io.Writer
|
||||
outStream io.Writer
|
||||
errStream io.Writer
|
||||
|
||||
// Deprecated, original writer used for ImagePull. To be removed.
|
||||
outOld io.Writer
|
||||
sf *utils.StreamFormatter
|
||||
}
|
||||
|
||||
func (b *buildFile) clearTmp(containers map[string]struct{}) {
|
||||
for c := range containers {
|
||||
tmp := b.runtime.Get(c)
|
||||
b.runtime.Destroy(tmp)
|
||||
fmt.Fprintf(b.out, "Removing intermediate container %s\n", utils.TruncateID(c))
|
||||
fmt.Fprintf(b.outStream, "Removing intermediate container %s\n", utils.TruncateID(c))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -52,7 +60,7 @@ func (b *buildFile) CmdFrom(name string) error {
|
||||
if err != nil {
|
||||
if b.runtime.graph.IsNotExist(err) {
|
||||
remote, tag := utils.ParseRepositoryTag(name)
|
||||
if err := b.srv.ImagePull(remote, tag, b.out, utils.NewStreamFormatter(false), nil, nil, true); err != nil {
|
||||
if err := b.srv.ImagePull(remote, tag, b.outOld, b.sf, b.authConfig, nil, true); err != nil {
|
||||
return err
|
||||
}
|
||||
image, err = b.runtime.repositories.LookupImage(name)
|
||||
@@ -100,7 +108,7 @@ func (b *buildFile) CmdRun(args string) error {
|
||||
if cache, err := b.srv.ImageGetCached(b.image, b.config); err != nil {
|
||||
return err
|
||||
} else if cache != nil {
|
||||
fmt.Fprintf(b.out, " ---> Using cache\n")
|
||||
fmt.Fprintf(b.outStream, " ---> Using cache\n")
|
||||
utils.Debugf("[BUILDER] Use cached version")
|
||||
b.image = cache.ID
|
||||
return nil
|
||||
@@ -241,7 +249,7 @@ func (b *buildFile) CmdVolume(args string) error {
|
||||
volume = []string{args}
|
||||
}
|
||||
if b.config.Volumes == nil {
|
||||
b.config.Volumes = NewPathOpts()
|
||||
b.config.Volumes = map[string]struct{}{}
|
||||
}
|
||||
for _, v := range volume {
|
||||
b.config.Volumes[v] = struct{}{}
|
||||
@@ -253,7 +261,7 @@ func (b *buildFile) CmdVolume(args string) error {
|
||||
}
|
||||
|
||||
func (b *buildFile) addRemote(container *Container, orig, dest string) error {
|
||||
file, err := utils.Download(orig, ioutil.Discard)
|
||||
file, err := utils.Download(orig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -288,7 +296,7 @@ func (b *buildFile) addContext(container *Container, orig, dest string) error {
|
||||
destPath = destPath + "/"
|
||||
}
|
||||
if !strings.HasPrefix(origPath, b.context) {
|
||||
return fmt.Errorf("Forbidden path: %s", origPath)
|
||||
return fmt.Errorf("Forbidden path outside the build context: %s (%s)", orig, origPath)
|
||||
}
|
||||
fi, err := os.Stat(origPath)
|
||||
if err != nil {
|
||||
@@ -364,6 +372,34 @@ func (b *buildFile) CmdAdd(args string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type StdoutFormater struct {
|
||||
io.Writer
|
||||
*utils.StreamFormatter
|
||||
}
|
||||
|
||||
func (sf *StdoutFormater) Write(buf []byte) (int, error) {
|
||||
formattedBuf := sf.StreamFormatter.FormatStream(string(buf))
|
||||
n, err := sf.Writer.Write(formattedBuf)
|
||||
if n != len(formattedBuf) {
|
||||
return n, io.ErrShortWrite
|
||||
}
|
||||
return len(buf), err
|
||||
}
|
||||
|
||||
type StderrFormater struct {
|
||||
io.Writer
|
||||
*utils.StreamFormatter
|
||||
}
|
||||
|
||||
func (sf *StderrFormater) Write(buf []byte) (int, error) {
|
||||
formattedBuf := sf.StreamFormatter.FormatStream("\033[91m" + string(buf) + "\033[0m")
|
||||
n, err := sf.Writer.Write(formattedBuf)
|
||||
if n != len(formattedBuf) {
|
||||
return n, io.ErrShortWrite
|
||||
}
|
||||
return len(buf), err
|
||||
}
|
||||
|
||||
func (b *buildFile) run() (string, error) {
|
||||
if b.image == "" {
|
||||
return "", fmt.Errorf("Please provide a source image with `from` prior to run")
|
||||
@@ -376,7 +412,7 @@ func (b *buildFile) run() (string, error) {
|
||||
return "", err
|
||||
}
|
||||
b.tmpContainers[c.ID] = struct{}{}
|
||||
fmt.Fprintf(b.out, " ---> Running in %s\n", utils.TruncateID(c.ID))
|
||||
fmt.Fprintf(b.outStream, " ---> Running in %s\n", utils.TruncateID(c.ID))
|
||||
|
||||
// override the entry point that may have been picked up from the base image
|
||||
c.Path = b.config.Cmd[0]
|
||||
@@ -386,7 +422,7 @@ func (b *buildFile) run() (string, error) {
|
||||
|
||||
if b.verbose {
|
||||
errCh = utils.Go(func() error {
|
||||
return <-c.Attach(nil, nil, b.out, b.out)
|
||||
return <-c.Attach(nil, nil, b.outStream, b.errStream)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -403,7 +439,11 @@ func (b *buildFile) run() (string, error) {
|
||||
|
||||
// Wait for it to finish
|
||||
if ret := c.Wait(); ret != 0 {
|
||||
return "", fmt.Errorf("The command %v returned a non-zero code: %d", b.config.Cmd, ret)
|
||||
err := &utils.JSONError{
|
||||
Message: fmt.Sprintf("The command %v returned a non-zero code: %d", b.config.Cmd, ret),
|
||||
Code: ret,
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
|
||||
return c.ID, nil
|
||||
@@ -424,7 +464,7 @@ func (b *buildFile) commit(id string, autoCmd []string, comment string) error {
|
||||
if cache, err := b.srv.ImageGetCached(b.image, b.config); err != nil {
|
||||
return err
|
||||
} else if cache != nil {
|
||||
fmt.Fprintf(b.out, " ---> Using cache\n")
|
||||
fmt.Fprintf(b.outStream, " ---> Using cache\n")
|
||||
utils.Debugf("[BUILDER] Use cached version")
|
||||
b.image = cache.ID
|
||||
return nil
|
||||
@@ -438,10 +478,10 @@ func (b *buildFile) commit(id string, autoCmd []string, comment string) error {
|
||||
return err
|
||||
}
|
||||
for _, warning := range warnings {
|
||||
fmt.Fprintf(b.out, " ---> [Warning] %s\n", warning)
|
||||
fmt.Fprintf(b.outStream, " ---> [Warning] %s\n", warning)
|
||||
}
|
||||
b.tmpContainers[container.ID] = struct{}{}
|
||||
fmt.Fprintf(b.out, " ---> Running in %s\n", utils.TruncateID(container.ID))
|
||||
fmt.Fprintf(b.outStream, " ---> Running in %s\n", utils.TruncateID(container.ID))
|
||||
id = container.ID
|
||||
if err := container.EnsureMounted(); err != nil {
|
||||
return err
|
||||
@@ -507,22 +547,22 @@ func (b *buildFile) Build(context io.Reader) (string, error) {
|
||||
|
||||
method, exists := reflect.TypeOf(b).MethodByName("Cmd" + strings.ToUpper(instruction[:1]) + strings.ToLower(instruction[1:]))
|
||||
if !exists {
|
||||
fmt.Fprintf(b.out, "# Skipping unknown instruction %s\n", strings.ToUpper(instruction))
|
||||
fmt.Fprintf(b.errStream, "# Skipping unknown instruction %s\n", strings.ToUpper(instruction))
|
||||
continue
|
||||
}
|
||||
|
||||
stepN += 1
|
||||
fmt.Fprintf(b.out, "Step %d : %s %s\n", stepN, strings.ToUpper(instruction), arguments)
|
||||
fmt.Fprintf(b.outStream, "Step %d : %s %s\n", stepN, strings.ToUpper(instruction), arguments)
|
||||
|
||||
ret := method.Func.Call([]reflect.Value{reflect.ValueOf(b), reflect.ValueOf(arguments)})[0].Interface()
|
||||
if ret != nil {
|
||||
return "", ret.(error)
|
||||
}
|
||||
|
||||
fmt.Fprintf(b.out, " ---> %v\n", utils.TruncateID(b.image))
|
||||
fmt.Fprintf(b.outStream, " ---> %s\n", utils.TruncateID(b.image))
|
||||
}
|
||||
if b.image != "" {
|
||||
fmt.Fprintf(b.out, "Successfully built %s\n", utils.TruncateID(b.image))
|
||||
fmt.Fprintf(b.outStream, "Successfully built %s\n", utils.TruncateID(b.image))
|
||||
if b.rm {
|
||||
b.clearTmp(b.tmpContainers)
|
||||
}
|
||||
@@ -531,16 +571,20 @@ func (b *buildFile) Build(context io.Reader) (string, error) {
|
||||
return "", fmt.Errorf("An error occurred during the build\n")
|
||||
}
|
||||
|
||||
func NewBuildFile(srv *Server, out io.Writer, verbose, utilizeCache, rm bool) BuildFile {
|
||||
func NewBuildFile(srv *Server, outStream, errStream io.Writer, verbose, utilizeCache, rm bool, outOld io.Writer, sf *utils.StreamFormatter, auth *auth.AuthConfig) BuildFile {
|
||||
return &buildFile{
|
||||
runtime: srv.runtime,
|
||||
srv: srv,
|
||||
config: &Config{},
|
||||
out: out,
|
||||
outStream: outStream,
|
||||
errStream: errStream,
|
||||
tmpContainers: make(map[string]struct{}),
|
||||
tmpImages: make(map[string]struct{}),
|
||||
verbose: verbose,
|
||||
utilizeCache: utilizeCache,
|
||||
rm: rm,
|
||||
sf: sf,
|
||||
authConfig: auth,
|
||||
outOld: outOld,
|
||||
}
|
||||
}
|
||||
|
||||
887
commands.go
887
commands.go
File diff suppressed because it is too large
Load Diff
157
commands_unit_test.go
Normal file
157
commands_unit_test.go
Normal file
@@ -0,0 +1,157 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func parse(t *testing.T, args string) (*Config, *HostConfig, error) {
|
||||
config, hostConfig, _, err := ParseRun(strings.Split(args+" ubuntu bash", " "), nil)
|
||||
return config, hostConfig, err
|
||||
}
|
||||
|
||||
func mustParse(t *testing.T, args string) (*Config, *HostConfig) {
|
||||
config, hostConfig, err := parse(t, args)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return config, hostConfig
|
||||
}
|
||||
|
||||
func TestParseRunLinks(t *testing.T) {
|
||||
if _, hostConfig := mustParse(t, "-link a:b"); len(hostConfig.Links) == 0 || hostConfig.Links[0] != "a:b" {
|
||||
t.Fatalf("Error parsing links. Expected []string{\"a:b\"}, received: %v", hostConfig.Links)
|
||||
}
|
||||
if _, hostConfig := mustParse(t, "-link a:b -link c:d"); len(hostConfig.Links) < 2 || hostConfig.Links[0] != "a:b" || hostConfig.Links[1] != "c:d" {
|
||||
t.Fatalf("Error parsing links. Expected []string{\"a:b\", \"c:d\"}, received: %v", hostConfig.Links)
|
||||
}
|
||||
if _, hostConfig := mustParse(t, ""); len(hostConfig.Links) != 0 {
|
||||
t.Fatalf("Error parsing links. No link expected, received: %v", hostConfig.Links)
|
||||
}
|
||||
|
||||
if _, _, err := parse(t, "-link a"); err == nil {
|
||||
t.Fatalf("Error parsing links. `-link a` should be an error but is not")
|
||||
}
|
||||
if _, _, err := parse(t, "-link"); err == nil {
|
||||
t.Fatalf("Error parsing links. `-link` should be an error but is not")
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseRunAttach(t *testing.T) {
|
||||
if config, _ := mustParse(t, "-a stdin"); !config.AttachStdin || config.AttachStdout || config.AttachStderr {
|
||||
t.Fatalf("Error parsing attach flags. Expect only Stdin enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr)
|
||||
}
|
||||
if config, _ := mustParse(t, "-a stdin -a stdout"); !config.AttachStdin || !config.AttachStdout || config.AttachStderr {
|
||||
t.Fatalf("Error parsing attach flags. Expect only Stdin and Stdout enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr)
|
||||
}
|
||||
if config, _ := mustParse(t, "-a stdin -a stdout -a stderr"); !config.AttachStdin || !config.AttachStdout || !config.AttachStderr {
|
||||
t.Fatalf("Error parsing attach flags. Expect all attach enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr)
|
||||
}
|
||||
if config, _ := mustParse(t, ""); config.AttachStdin || !config.AttachStdout || !config.AttachStderr {
|
||||
t.Fatalf("Error parsing attach flags. Expect Stdin disabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr)
|
||||
}
|
||||
|
||||
if _, _, err := parse(t, "-a"); err == nil {
|
||||
t.Fatalf("Error parsing attach flags, `-a` should be an error but is not")
|
||||
}
|
||||
if _, _, err := parse(t, "-a invalid"); err == nil {
|
||||
t.Fatalf("Error parsing attach flags, `-a invalid` should be an error but is not")
|
||||
}
|
||||
if _, _, err := parse(t, "-a invalid -a stdout"); err == nil {
|
||||
t.Fatalf("Error parsing attach flags, `-a stdout -a invalid` should be an error but is not")
|
||||
}
|
||||
if _, _, err := parse(t, "-a stdout -a stderr -d"); err == nil {
|
||||
t.Fatalf("Error parsing attach flags, `-a stdout -a stderr -d` should be an error but is not")
|
||||
}
|
||||
if _, _, err := parse(t, "-a stdin -d"); err == nil {
|
||||
t.Fatalf("Error parsing attach flags, `-a stdin -d` should be an error but is not")
|
||||
}
|
||||
if _, _, err := parse(t, "-a stdout -d"); err == nil {
|
||||
t.Fatalf("Error parsing attach flags, `-a stdout -d` should be an error but is not")
|
||||
}
|
||||
if _, _, err := parse(t, "-a stderr -d"); err == nil {
|
||||
t.Fatalf("Error parsing attach flags, `-a stderr -d` should be an error but is not")
|
||||
}
|
||||
if _, _, err := parse(t, "-d -rm"); err == nil {
|
||||
t.Fatalf("Error parsing attach flags, `-d -rm` should be an error but is not")
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseRunVolumes(t *testing.T) {
|
||||
if config, hostConfig := mustParse(t, "-v /tmp"); hostConfig.Binds != nil {
|
||||
t.Fatalf("Error parsing volume flags, `-v /tmp` should not mount-bind anything. Received %v", hostConfig.Binds)
|
||||
} else if _, exists := config.Volumes["/tmp"]; !exists {
|
||||
t.Fatalf("Error parsing volume flags, `-v /tmp` is missing from volumes. Received %v", config.Volumes)
|
||||
}
|
||||
|
||||
if config, hostConfig := mustParse(t, "-v /tmp -v /var"); hostConfig.Binds != nil {
|
||||
t.Fatalf("Error parsing volume flags, `-v /tmp -v /var` should not mount-bind anything. Received %v", hostConfig.Binds)
|
||||
} else if _, exists := config.Volumes["/tmp"]; !exists {
|
||||
t.Fatalf("Error parsing volume flags, `-v /tmp` is missing from volumes. Recevied %v", config.Volumes)
|
||||
} else if _, exists := config.Volumes["/var"]; !exists {
|
||||
t.Fatalf("Error parsing volume flags, `-v /var` is missing from volumes. Received %v", config.Volumes)
|
||||
}
|
||||
|
||||
if config, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp"); hostConfig.Binds == nil || hostConfig.Binds[0] != "/hostTmp:/containerTmp" {
|
||||
t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp` should mount-bind /hostTmp into /containeTmp. Received %v", hostConfig.Binds)
|
||||
} else if _, exists := config.Volumes["/containerTmp"]; !exists {
|
||||
t.Fatalf("Error parsing volume flags, `-v /tmp` is missing from volumes. Received %v", config.Volumes)
|
||||
}
|
||||
|
||||
if config, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp -v /hostVar:/containerVar"); hostConfig.Binds == nil || hostConfig.Binds[0] != "/hostTmp:/containerTmp" || hostConfig.Binds[1] != "/hostVar:/containerVar" {
|
||||
t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp -v /hostVar:/containerVar` should mount-bind /hostTmp into /containeTmp and /hostVar into /hostContainer. Received %v", hostConfig.Binds)
|
||||
} else if _, exists := config.Volumes["/containerTmp"]; !exists {
|
||||
t.Fatalf("Error parsing volume flags, `-v /containerTmp` is missing from volumes. Received %v", config.Volumes)
|
||||
} else if _, exists := config.Volumes["/containerVar"]; !exists {
|
||||
t.Fatalf("Error parsing volume flags, `-v /containerVar` is missing from volumes. Received %v", config.Volumes)
|
||||
}
|
||||
|
||||
if config, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp:ro -v /hostVar:/containerVar:rw"); hostConfig.Binds == nil || hostConfig.Binds[0] != "/hostTmp:/containerTmp:ro" || hostConfig.Binds[1] != "/hostVar:/containerVar:rw" {
|
||||
t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp:ro -v /hostVar:/containerVar:rw` should mount-bind /hostTmp into /containeTmp and /hostVar into /hostContainer. Received %v", hostConfig.Binds)
|
||||
} else if _, exists := config.Volumes["/containerTmp"]; !exists {
|
||||
t.Fatalf("Error parsing volume flags, `-v /containerTmp` is missing from volumes. Received %v", config.Volumes)
|
||||
} else if _, exists := config.Volumes["/containerVar"]; !exists {
|
||||
t.Fatalf("Error parsing volume flags, `-v /containerVar` is missing from volumes. Received %v", config.Volumes)
|
||||
}
|
||||
|
||||
if config, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp -v /containerVar"); hostConfig.Binds == nil || len(hostConfig.Binds) > 1 || hostConfig.Binds[0] != "/hostTmp:/containerTmp" {
|
||||
t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp -v /containerVar` should mount-bind only /hostTmp into /containeTmp. Received %v", hostConfig.Binds)
|
||||
} else if _, exists := config.Volumes["/containerTmp"]; !exists {
|
||||
t.Fatalf("Error parsing volume flags, `-v /containerTmp` is missing from volumes. Received %v", config.Volumes)
|
||||
} else if _, exists := config.Volumes["/containerVar"]; !exists {
|
||||
t.Fatalf("Error parsing volume flags, `-v /containerVar` is missing from volumes. Received %v", config.Volumes)
|
||||
}
|
||||
|
||||
if config, hostConfig := mustParse(t, ""); hostConfig.Binds != nil {
|
||||
t.Fatalf("Error parsing volume flags, without volume, nothing should be mount-binded. Received %v", hostConfig.Binds)
|
||||
} else if len(config.Volumes) != 0 {
|
||||
t.Fatalf("Error parsing volume flags, without volume, no volume should be present. Received %v", config.Volumes)
|
||||
}
|
||||
|
||||
mustParse(t, "-v /")
|
||||
|
||||
if _, _, err := parse(t, "-v /:/"); err == nil {
|
||||
t.Fatalf("Error parsing volume flags, `-v /:/` should fail but didn't")
|
||||
}
|
||||
if _, _, err := parse(t, "-v"); err == nil {
|
||||
t.Fatalf("Error parsing volume flags, `-v` should fail but didn't")
|
||||
}
|
||||
if _, _, err := parse(t, "-v /tmp:"); err == nil {
|
||||
t.Fatalf("Error parsing volume flags, `-v /tmp:` should fail but didn't")
|
||||
}
|
||||
if _, _, err := parse(t, "-v /tmp:ro"); err == nil {
|
||||
t.Fatalf("Error parsing volume flags, `-v /tmp:ro` should fail but didn't")
|
||||
}
|
||||
if _, _, err := parse(t, "-v /tmp::"); err == nil {
|
||||
t.Fatalf("Error parsing volume flags, `-v /tmp::` should fail but didn't")
|
||||
}
|
||||
if _, _, err := parse(t, "-v :"); err == nil {
|
||||
t.Fatalf("Error parsing volume flags, `-v :` should fail but didn't")
|
||||
}
|
||||
if _, _, err := parse(t, "-v ::"); err == nil {
|
||||
t.Fatalf("Error parsing volume flags, `-v ::` should fail but didn't")
|
||||
}
|
||||
if _, _, err := parse(t, "-v /tmp:/tmp:/tmp:/tmp"); err == nil {
|
||||
t.Fatalf("Error parsing volume flags, `-v /tmp:/tmp:/tmp:/tmp` should fail but didn't")
|
||||
}
|
||||
}
|
||||
@@ -14,6 +14,7 @@ type DaemonConfig struct {
|
||||
Dns []string
|
||||
EnableIptables bool
|
||||
BridgeIface string
|
||||
BridgeIp string
|
||||
DefaultIp net.IP
|
||||
InterContainerCommunication bool
|
||||
GraphDriver string
|
||||
@@ -27,8 +28,8 @@ func ConfigFromJob(job *engine.Job) *DaemonConfig {
|
||||
config.Root = job.Getenv("Root")
|
||||
config.AutoRestart = job.GetenvBool("AutoRestart")
|
||||
config.EnableCors = job.GetenvBool("EnableCors")
|
||||
if dns := job.Getenv("Dns"); dns != "" {
|
||||
config.Dns = []string{dns}
|
||||
if dns := job.GetenvList("Dns"); dns != nil {
|
||||
config.Dns = dns
|
||||
}
|
||||
config.EnableIptables = job.GetenvBool("EnableIptables")
|
||||
if br := job.Getenv("BridgeIface"); br != "" {
|
||||
@@ -36,6 +37,7 @@ func ConfigFromJob(job *engine.Job) *DaemonConfig {
|
||||
} else {
|
||||
config.BridgeIface = DefaultNetworkBridge
|
||||
}
|
||||
config.BridgeIp = job.Getenv("BridgeIp")
|
||||
config.DefaultIp = net.ParseIP(job.Getenv("DefaultIp"))
|
||||
config.InterContainerCommunication = job.GetenvBool("InterContainerCommunication")
|
||||
config.GraphDriver = job.Getenv("GraphDriver")
|
||||
|
||||
648
container.go
648
container.go
@@ -4,7 +4,6 @@ import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/archive"
|
||||
"github.com/dotcloud/docker/graphdriver"
|
||||
@@ -18,14 +17,20 @@ import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrNotATTY = errors.New("The PTY is not a file")
|
||||
ErrNoTTY = errors.New("No PTY found")
|
||||
)
|
||||
|
||||
type Container struct {
|
||||
sync.Mutex
|
||||
root string // Path to the "home" of the container, including metadata.
|
||||
rootfs string // Path to the root filesystem of the container.
|
||||
|
||||
@@ -159,218 +164,6 @@ func NewPort(proto, port string) Port {
|
||||
return Port(fmt.Sprintf("%s/%s", port, proto))
|
||||
}
|
||||
|
||||
func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig, *flag.FlagSet, error) {
|
||||
cmd := Subcmd("run", "[OPTIONS] IMAGE [COMMAND] [ARG...]", "Run a command in a new container")
|
||||
if os.Getenv("TEST") != "" {
|
||||
cmd.SetOutput(ioutil.Discard)
|
||||
cmd.Usage = nil
|
||||
}
|
||||
|
||||
flHostname := cmd.String("h", "", "Container host name")
|
||||
flWorkingDir := cmd.String("w", "", "Working directory inside the container")
|
||||
flUser := cmd.String("u", "", "Username or UID")
|
||||
flDetach := cmd.Bool("d", false, "Detached mode: Run container in the background, print new container id")
|
||||
flAttach := NewAttachOpts()
|
||||
cmd.Var(flAttach, "a", "Attach to stdin, stdout or stderr.")
|
||||
flStdin := cmd.Bool("i", false, "Keep stdin open even if not attached")
|
||||
flTty := cmd.Bool("t", false, "Allocate a pseudo-tty")
|
||||
flMemoryString := cmd.String("m", "", "Memory limit (format: <number><optional unit>, where unit = b, k, m or g)")
|
||||
flContainerIDFile := cmd.String("cidfile", "", "Write the container ID to the file")
|
||||
flNetwork := cmd.Bool("n", true, "Enable networking for this container")
|
||||
flPrivileged := cmd.Bool("privileged", false, "Give extended privileges to this container")
|
||||
flAutoRemove := cmd.Bool("rm", false, "Automatically remove the container when it exits (incompatible with -d)")
|
||||
cmd.Bool("sig-proxy", true, "Proxify all received signal to the process (even in non-tty mode)")
|
||||
cmd.String("name", "", "Assign a name to the container")
|
||||
flPublishAll := cmd.Bool("P", false, "Publish all exposed ports to the host interfaces")
|
||||
|
||||
if capabilities != nil && *flMemoryString != "" && !capabilities.MemoryLimit {
|
||||
//fmt.Fprintf(stdout, "WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.\n")
|
||||
*flMemoryString = ""
|
||||
}
|
||||
|
||||
flCpuShares := cmd.Int64("c", 0, "CPU shares (relative weight)")
|
||||
|
||||
var flPublish utils.ListOpts
|
||||
cmd.Var(&flPublish, "p", "Publish a container's port to the host (use 'docker port' to see the actual mapping)")
|
||||
|
||||
var flExpose utils.ListOpts
|
||||
cmd.Var(&flExpose, "expose", "Expose a port from the container without publishing it to your host")
|
||||
|
||||
var flEnv utils.ListOpts
|
||||
cmd.Var(&flEnv, "e", "Set environment variables")
|
||||
|
||||
var flDns utils.ListOpts
|
||||
cmd.Var(&flDns, "dns", "Set custom dns servers")
|
||||
|
||||
flVolumes := NewPathOpts()
|
||||
cmd.Var(flVolumes, "v", "Bind mount a volume (e.g. from the host: -v /host:/container, from docker: -v /container)")
|
||||
|
||||
var flVolumesFrom utils.ListOpts
|
||||
cmd.Var(&flVolumesFrom, "volumes-from", "Mount volumes from the specified container(s)")
|
||||
|
||||
flEntrypoint := cmd.String("entrypoint", "", "Overwrite the default entrypoint of the image")
|
||||
|
||||
var flLxcOpts utils.ListOpts
|
||||
cmd.Var(&flLxcOpts, "lxc-conf", "Add custom lxc options -lxc-conf=\"lxc.cgroup.cpuset.cpus = 0,1\"")
|
||||
|
||||
var flLinks utils.ListOpts
|
||||
cmd.Var(&flLinks, "link", "Add link to another container (name:alias)")
|
||||
|
||||
if err := cmd.Parse(args); err != nil {
|
||||
return nil, nil, cmd, err
|
||||
}
|
||||
if *flDetach && len(flAttach) > 0 {
|
||||
return nil, nil, cmd, ErrConflictAttachDetach
|
||||
}
|
||||
if *flWorkingDir != "" && !path.IsAbs(*flWorkingDir) {
|
||||
return nil, nil, cmd, ErrInvalidWorikingDirectory
|
||||
}
|
||||
if *flDetach && *flAutoRemove {
|
||||
return nil, nil, cmd, ErrConflictDetachAutoRemove
|
||||
}
|
||||
|
||||
// If neither -d or -a are set, attach to everything by default
|
||||
if len(flAttach) == 0 && !*flDetach {
|
||||
if !*flDetach {
|
||||
flAttach.Set("stdout")
|
||||
flAttach.Set("stderr")
|
||||
if *flStdin {
|
||||
flAttach.Set("stdin")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
envs := []string{}
|
||||
|
||||
for _, env := range flEnv {
|
||||
arr := strings.Split(env, "=")
|
||||
if len(arr) > 1 {
|
||||
envs = append(envs, env)
|
||||
} else {
|
||||
v := os.Getenv(env)
|
||||
envs = append(envs, env+"="+v)
|
||||
}
|
||||
}
|
||||
|
||||
var flMemory int64
|
||||
|
||||
if *flMemoryString != "" {
|
||||
parsedMemory, err := utils.RAMInBytes(*flMemoryString)
|
||||
|
||||
if err != nil {
|
||||
return nil, nil, cmd, err
|
||||
}
|
||||
|
||||
flMemory = parsedMemory
|
||||
}
|
||||
|
||||
var binds []string
|
||||
|
||||
// add any bind targets to the list of container volumes
|
||||
for bind := range flVolumes {
|
||||
arr := strings.Split(bind, ":")
|
||||
if len(arr) > 1 {
|
||||
if arr[0] == "/" {
|
||||
return nil, nil, cmd, fmt.Errorf("Invalid bind mount: source can't be '/'")
|
||||
}
|
||||
dstDir := arr[1]
|
||||
flVolumes[dstDir] = struct{}{}
|
||||
binds = append(binds, bind)
|
||||
delete(flVolumes, bind)
|
||||
}
|
||||
}
|
||||
|
||||
parsedArgs := cmd.Args()
|
||||
runCmd := []string{}
|
||||
entrypoint := []string{}
|
||||
image := ""
|
||||
if len(parsedArgs) >= 1 {
|
||||
image = cmd.Arg(0)
|
||||
}
|
||||
if len(parsedArgs) > 1 {
|
||||
runCmd = parsedArgs[1:]
|
||||
}
|
||||
if *flEntrypoint != "" {
|
||||
entrypoint = []string{*flEntrypoint}
|
||||
}
|
||||
|
||||
var lxcConf []KeyValuePair
|
||||
lxcConf, err := parseLxcConfOpts(flLxcOpts)
|
||||
if err != nil {
|
||||
return nil, nil, cmd, err
|
||||
}
|
||||
|
||||
hostname := *flHostname
|
||||
domainname := ""
|
||||
|
||||
parts := strings.SplitN(hostname, ".", 2)
|
||||
if len(parts) > 1 {
|
||||
hostname = parts[0]
|
||||
domainname = parts[1]
|
||||
}
|
||||
|
||||
ports, portBindings, err := parsePortSpecs(flPublish)
|
||||
if err != nil {
|
||||
return nil, nil, cmd, err
|
||||
}
|
||||
|
||||
// Merge in exposed ports to the map of published ports
|
||||
for _, e := range flExpose {
|
||||
if strings.Contains(e, ":") {
|
||||
return nil, nil, cmd, fmt.Errorf("Invalid port format for -expose: %s", e)
|
||||
}
|
||||
p := NewPort(splitProtoPort(e))
|
||||
if _, exists := ports[p]; !exists {
|
||||
ports[p] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
config := &Config{
|
||||
Hostname: hostname,
|
||||
Domainname: domainname,
|
||||
PortSpecs: nil, // Deprecated
|
||||
ExposedPorts: ports,
|
||||
User: *flUser,
|
||||
Tty: *flTty,
|
||||
NetworkDisabled: !*flNetwork,
|
||||
OpenStdin: *flStdin,
|
||||
Memory: flMemory,
|
||||
CpuShares: *flCpuShares,
|
||||
AttachStdin: flAttach.Get("stdin"),
|
||||
AttachStdout: flAttach.Get("stdout"),
|
||||
AttachStderr: flAttach.Get("stderr"),
|
||||
Env: envs,
|
||||
Cmd: runCmd,
|
||||
Dns: flDns,
|
||||
Image: image,
|
||||
Volumes: flVolumes,
|
||||
VolumesFrom: strings.Join(flVolumesFrom, ","),
|
||||
Entrypoint: entrypoint,
|
||||
WorkingDir: *flWorkingDir,
|
||||
}
|
||||
|
||||
hostConfig := &HostConfig{
|
||||
Binds: binds,
|
||||
ContainerIDFile: *flContainerIDFile,
|
||||
LxcConf: lxcConf,
|
||||
Privileged: *flPrivileged,
|
||||
PortBindings: portBindings,
|
||||
Links: flLinks,
|
||||
PublishAllPorts: *flPublishAll,
|
||||
}
|
||||
|
||||
if capabilities != nil && flMemory > 0 && !capabilities.SwapLimit {
|
||||
//fmt.Fprintf(stdout, "WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.\n")
|
||||
config.MemorySwap = -1
|
||||
}
|
||||
|
||||
// When allocating stdin in attached mode, close stdin at client disconnect
|
||||
if config.OpenStdin && config.AttachStdin {
|
||||
config.StdinOnce = true
|
||||
}
|
||||
return config, hostConfig, cmd, nil
|
||||
}
|
||||
|
||||
type PortMapping map[string]string // Deprecated
|
||||
|
||||
type NetworkSettings struct {
|
||||
@@ -710,9 +503,10 @@ func (container *Container) Attach(stdin io.ReadCloser, stdinCloser io.Closer, s
|
||||
}
|
||||
|
||||
func (container *Container) Start() (err error) {
|
||||
container.State.Lock()
|
||||
defer container.State.Unlock()
|
||||
if container.State.Running {
|
||||
container.Lock()
|
||||
defer container.Unlock()
|
||||
|
||||
if container.State.IsRunning() {
|
||||
return fmt.Errorf("The container %s is already running.", container.ID)
|
||||
}
|
||||
defer func() {
|
||||
@@ -747,162 +541,18 @@ func (container *Container) Start() (err error) {
|
||||
log.Printf("WARNING: IPv4 forwarding is disabled. Networking will not work")
|
||||
}
|
||||
|
||||
// Create the requested bind mounts
|
||||
binds := make(map[string]BindMap)
|
||||
// Define illegal container destinations
|
||||
illegalDsts := []string{"/", "."}
|
||||
|
||||
for _, bind := range container.hostConfig.Binds {
|
||||
// FIXME: factorize bind parsing in parseBind
|
||||
var src, dst, mode string
|
||||
arr := strings.Split(bind, ":")
|
||||
if len(arr) == 2 {
|
||||
src = arr[0]
|
||||
dst = arr[1]
|
||||
mode = "rw"
|
||||
} else if len(arr) == 3 {
|
||||
src = arr[0]
|
||||
dst = arr[1]
|
||||
mode = arr[2]
|
||||
} else {
|
||||
return fmt.Errorf("Invalid bind specification: %s", bind)
|
||||
}
|
||||
|
||||
// Bail if trying to mount to an illegal destination
|
||||
for _, illegal := range illegalDsts {
|
||||
if dst == illegal {
|
||||
return fmt.Errorf("Illegal bind destination: %s", dst)
|
||||
}
|
||||
}
|
||||
|
||||
bindMap := BindMap{
|
||||
SrcPath: src,
|
||||
DstPath: dst,
|
||||
Mode: mode,
|
||||
}
|
||||
binds[path.Clean(dst)] = bindMap
|
||||
}
|
||||
|
||||
if container.Volumes == nil || len(container.Volumes) == 0 {
|
||||
container.Volumes = make(map[string]string)
|
||||
container.VolumesRW = make(map[string]bool)
|
||||
}
|
||||
|
||||
// Apply volumes from another container if requested
|
||||
if container.Config.VolumesFrom != "" {
|
||||
containerSpecs := strings.Split(container.Config.VolumesFrom, ",")
|
||||
for _, containerSpec := range containerSpecs {
|
||||
mountRW := true
|
||||
specParts := strings.SplitN(containerSpec, ":", 2)
|
||||
switch len(specParts) {
|
||||
case 0:
|
||||
return fmt.Errorf("Malformed volumes-from specification: %s", container.Config.VolumesFrom)
|
||||
case 2:
|
||||
switch specParts[1] {
|
||||
case "ro":
|
||||
mountRW = false
|
||||
case "rw": // mountRW is already true
|
||||
default:
|
||||
return fmt.Errorf("Malformed volumes-from speficication: %s", containerSpec)
|
||||
}
|
||||
}
|
||||
c := container.runtime.Get(specParts[0])
|
||||
if c == nil {
|
||||
return fmt.Errorf("Container %s not found. Impossible to mount its volumes", container.ID)
|
||||
}
|
||||
for volPath, id := range c.Volumes {
|
||||
if _, exists := container.Volumes[volPath]; exists {
|
||||
continue
|
||||
}
|
||||
if err := os.MkdirAll(path.Join(container.RootfsPath(), volPath), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
container.Volumes[volPath] = id
|
||||
if isRW, exists := c.VolumesRW[volPath]; exists {
|
||||
container.VolumesRW[volPath] = isRW && mountRW
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
if err := container.applyExternalVolumes(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
volumesDriver := container.runtime.volumes.driver
|
||||
// Create the requested volumes if they don't exist
|
||||
for volPath := range container.Config.Volumes {
|
||||
volPath = path.Clean(volPath)
|
||||
// Skip existing volumes
|
||||
if _, exists := container.Volumes[volPath]; exists {
|
||||
continue
|
||||
}
|
||||
var srcPath string
|
||||
var isBindMount bool
|
||||
srcRW := false
|
||||
// If an external bind is defined for this volume, use that as a source
|
||||
if bindMap, exists := binds[volPath]; exists {
|
||||
isBindMount = true
|
||||
srcPath = bindMap.SrcPath
|
||||
if strings.ToLower(bindMap.Mode) == "rw" {
|
||||
srcRW = true
|
||||
}
|
||||
// Otherwise create an directory in $ROOT/volumes/ and use that
|
||||
} else {
|
||||
|
||||
// Do not pass a container as the parameter for the volume creation.
|
||||
// The graph driver using the container's information ( Image ) to
|
||||
// create the parent.
|
||||
c, err := container.runtime.volumes.Create(nil, nil, "", "", nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
srcPath, err = volumesDriver.Get(c.ID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Driver %s failed to get volume rootfs %s: %s", volumesDriver, c.ID, err)
|
||||
}
|
||||
srcRW = true // RW by default
|
||||
}
|
||||
container.Volumes[volPath] = srcPath
|
||||
container.VolumesRW[volPath] = srcRW
|
||||
// Create the mountpoint
|
||||
rootVolPath := path.Join(container.RootfsPath(), volPath)
|
||||
if err := os.MkdirAll(rootVolPath, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Do not copy or change permissions if we are mounting from the host
|
||||
if srcRW && !isBindMount {
|
||||
volList, err := ioutil.ReadDir(rootVolPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(volList) > 0 {
|
||||
srcList, err := ioutil.ReadDir(srcPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(srcList) == 0 {
|
||||
// If the source volume is empty copy files from the root into the volume
|
||||
if err := archive.CopyWithTar(rootVolPath, srcPath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var stat syscall.Stat_t
|
||||
if err := syscall.Stat(rootVolPath, &stat); err != nil {
|
||||
return err
|
||||
}
|
||||
var srcStat syscall.Stat_t
|
||||
if err := syscall.Stat(srcPath, &srcStat); err != nil {
|
||||
return err
|
||||
}
|
||||
// Change the source volume's ownership if it differs from the root
|
||||
// files that where just copied
|
||||
if stat.Uid != srcStat.Uid || stat.Gid != srcStat.Gid {
|
||||
if err := os.Chown(srcPath, int(stat.Uid), int(stat.Gid)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if err := container.createVolumes(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := container.generateLXCConfig(); err != nil {
|
||||
@@ -924,7 +574,11 @@ func (container *Container) Start() (err error) {
|
||||
|
||||
// Networking
|
||||
if !container.Config.NetworkDisabled {
|
||||
params = append(params, "-g", container.network.Gateway.String())
|
||||
network := container.NetworkSettings
|
||||
params = append(params,
|
||||
"-g", network.Gateway,
|
||||
"-i", fmt.Sprintf("%s/%d", network.IPAddress, network.IPPrefixLen),
|
||||
)
|
||||
}
|
||||
|
||||
// User
|
||||
@@ -936,7 +590,6 @@ func (container *Container) Start() (err error) {
|
||||
env := []string{
|
||||
"HOME=/",
|
||||
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
|
||||
"container=lxc",
|
||||
"HOSTNAME=" + container.Config.Hostname,
|
||||
}
|
||||
|
||||
@@ -944,6 +597,10 @@ func (container *Container) Start() (err error) {
|
||||
env = append(env, "TERM=xterm")
|
||||
}
|
||||
|
||||
if container.hostConfig.Privileged {
|
||||
params = append(params, "-privileged")
|
||||
}
|
||||
|
||||
// Init any links between the parent and children
|
||||
runtime := container.runtime
|
||||
|
||||
@@ -1046,7 +703,7 @@ func (container *Container) Start() (err error) {
|
||||
}
|
||||
// FIXME: save state on disk *first*, then converge
|
||||
// this way disk state is used as a journal, eg. we can restore after crash etc.
|
||||
container.State.setRunning(container.cmd.Process.Pid)
|
||||
container.State.SetRunning(container.cmd.Process.Pid)
|
||||
|
||||
// Init the lock
|
||||
container.waitLock = make(chan struct{})
|
||||
@@ -1054,14 +711,14 @@ func (container *Container) Start() (err error) {
|
||||
container.ToDisk()
|
||||
go container.monitor()
|
||||
|
||||
defer utils.Debugf("Container running: %v", container.State.Running)
|
||||
defer utils.Debugf("Container running: %v", container.State.IsRunning())
|
||||
// We wait for the container to be fully running.
|
||||
// Timeout after 5 seconds. In case of broken pipe, just retry.
|
||||
// Note: The container can run and finish correctly before
|
||||
// the end of this loop
|
||||
for now := time.Now(); time.Since(now) < 5*time.Second; {
|
||||
// If the container dies while waiting for it, just return
|
||||
if !container.State.Running {
|
||||
if !container.State.IsRunning() {
|
||||
return nil
|
||||
}
|
||||
output, err := exec.Command("lxc-info", "-s", "-n", container.ID).CombinedOutput()
|
||||
@@ -1078,16 +735,208 @@ func (container *Container) Start() (err error) {
|
||||
if strings.Contains(string(output), "RUNNING") {
|
||||
return nil
|
||||
}
|
||||
utils.Debugf("Waiting for the container to start (running: %v): %s", container.State.Running, bytes.TrimSpace(output))
|
||||
utils.Debugf("Waiting for the container to start (running: %v): %s", container.State.IsRunning(), bytes.TrimSpace(output))
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
}
|
||||
|
||||
if container.State.Running {
|
||||
if container.State.IsRunning() {
|
||||
return ErrContainerStartTimeout
|
||||
}
|
||||
return ErrContainerStart
|
||||
}
|
||||
|
||||
func (container *Container) getBindMap() (map[string]BindMap, error) {
|
||||
// Create the requested bind mounts
|
||||
binds := make(map[string]BindMap)
|
||||
// Define illegal container destinations
|
||||
illegalDsts := []string{"/", "."}
|
||||
|
||||
for _, bind := range container.hostConfig.Binds {
|
||||
// FIXME: factorize bind parsing in parseBind
|
||||
var src, dst, mode string
|
||||
arr := strings.Split(bind, ":")
|
||||
if len(arr) == 2 {
|
||||
src = arr[0]
|
||||
dst = arr[1]
|
||||
mode = "rw"
|
||||
} else if len(arr) == 3 {
|
||||
src = arr[0]
|
||||
dst = arr[1]
|
||||
mode = arr[2]
|
||||
} else {
|
||||
return nil, fmt.Errorf("Invalid bind specification: %s", bind)
|
||||
}
|
||||
|
||||
// Bail if trying to mount to an illegal destination
|
||||
for _, illegal := range illegalDsts {
|
||||
if dst == illegal {
|
||||
return nil, fmt.Errorf("Illegal bind destination: %s", dst)
|
||||
}
|
||||
}
|
||||
|
||||
bindMap := BindMap{
|
||||
SrcPath: src,
|
||||
DstPath: dst,
|
||||
Mode: mode,
|
||||
}
|
||||
binds[path.Clean(dst)] = bindMap
|
||||
}
|
||||
return binds, nil
|
||||
}
|
||||
|
||||
func (container *Container) createVolumes() error {
|
||||
binds, err := container.getBindMap()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
volumesDriver := container.runtime.volumes.driver
|
||||
// Create the requested volumes if they don't exist
|
||||
for volPath := range container.Config.Volumes {
|
||||
volPath = path.Clean(volPath)
|
||||
volIsDir := true
|
||||
// Skip existing volumes
|
||||
if _, exists := container.Volumes[volPath]; exists {
|
||||
continue
|
||||
}
|
||||
var srcPath string
|
||||
var isBindMount bool
|
||||
srcRW := false
|
||||
// If an external bind is defined for this volume, use that as a source
|
||||
if bindMap, exists := binds[volPath]; exists {
|
||||
isBindMount = true
|
||||
srcPath = bindMap.SrcPath
|
||||
if strings.ToLower(bindMap.Mode) == "rw" {
|
||||
srcRW = true
|
||||
}
|
||||
if stat, err := os.Lstat(bindMap.SrcPath); err != nil {
|
||||
return err
|
||||
} else {
|
||||
volIsDir = stat.IsDir()
|
||||
}
|
||||
// Otherwise create an directory in $ROOT/volumes/ and use that
|
||||
} else {
|
||||
|
||||
// Do not pass a container as the parameter for the volume creation.
|
||||
// The graph driver using the container's information ( Image ) to
|
||||
// create the parent.
|
||||
c, err := container.runtime.volumes.Create(nil, nil, "", "", nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
srcPath, err = volumesDriver.Get(c.ID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Driver %s failed to get volume rootfs %s: %s", volumesDriver, c.ID, err)
|
||||
}
|
||||
srcRW = true // RW by default
|
||||
}
|
||||
container.Volumes[volPath] = srcPath
|
||||
container.VolumesRW[volPath] = srcRW
|
||||
|
||||
// Create the mountpoint
|
||||
volPath = path.Join(container.RootfsPath(), volPath)
|
||||
rootVolPath, err := utils.FollowSymlinkInScope(volPath, container.RootfsPath())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if _, err := os.Stat(rootVolPath); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
if volIsDir {
|
||||
if err := os.MkdirAll(rootVolPath, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := os.MkdirAll(path.Dir(rootVolPath), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
if f, err := os.OpenFile(rootVolPath, os.O_CREATE, 0755); err != nil {
|
||||
return err
|
||||
} else {
|
||||
f.Close()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Do not copy or change permissions if we are mounting from the host
|
||||
if srcRW && !isBindMount {
|
||||
volList, err := ioutil.ReadDir(rootVolPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(volList) > 0 {
|
||||
srcList, err := ioutil.ReadDir(srcPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(srcList) == 0 {
|
||||
// If the source volume is empty copy files from the root into the volume
|
||||
if err := archive.CopyWithTar(rootVolPath, srcPath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var stat syscall.Stat_t
|
||||
if err := syscall.Stat(rootVolPath, &stat); err != nil {
|
||||
return err
|
||||
}
|
||||
var srcStat syscall.Stat_t
|
||||
if err := syscall.Stat(srcPath, &srcStat); err != nil {
|
||||
return err
|
||||
}
|
||||
// Change the source volume's ownership if it differs from the root
|
||||
// files that where just copied
|
||||
if stat.Uid != srcStat.Uid || stat.Gid != srcStat.Gid {
|
||||
if err := os.Chown(srcPath, int(stat.Uid), int(stat.Gid)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (container *Container) applyExternalVolumes() error {
|
||||
if container.Config.VolumesFrom != "" {
|
||||
containerSpecs := strings.Split(container.Config.VolumesFrom, ",")
|
||||
for _, containerSpec := range containerSpecs {
|
||||
mountRW := true
|
||||
specParts := strings.SplitN(containerSpec, ":", 2)
|
||||
switch len(specParts) {
|
||||
case 0:
|
||||
return fmt.Errorf("Malformed volumes-from specification: %s", container.Config.VolumesFrom)
|
||||
case 2:
|
||||
switch specParts[1] {
|
||||
case "ro":
|
||||
mountRW = false
|
||||
case "rw": // mountRW is already true
|
||||
default:
|
||||
return fmt.Errorf("Malformed volumes-from speficication: %s", containerSpec)
|
||||
}
|
||||
}
|
||||
c := container.runtime.Get(specParts[0])
|
||||
if c == nil {
|
||||
return fmt.Errorf("Container %s not found. Impossible to mount its volumes", container.ID)
|
||||
}
|
||||
for volPath, id := range c.Volumes {
|
||||
if _, exists := container.Volumes[volPath]; exists {
|
||||
continue
|
||||
}
|
||||
if err := os.MkdirAll(path.Join(container.RootfsPath(), volPath), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
container.Volumes[volPath] = id
|
||||
if isRW, exists := c.VolumesRW[volPath]; exists {
|
||||
container.VolumesRW[volPath] = isRW && mountRW
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (container *Container) Run() error {
|
||||
if err := container.Start(); err != nil {
|
||||
return err
|
||||
@@ -1163,11 +1012,12 @@ func (container *Container) allocateNetwork() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
var iface *NetworkInterface
|
||||
var err error
|
||||
if container.State.Ghost {
|
||||
manager := container.runtime.networkManager
|
||||
if manager.disabled {
|
||||
var (
|
||||
iface *NetworkInterface
|
||||
err error
|
||||
)
|
||||
if container.State.IsGhost() {
|
||||
if manager := container.runtime.networkManager; manager.disabled {
|
||||
iface = &NetworkInterface{disabled: true}
|
||||
} else {
|
||||
iface = &NetworkInterface{
|
||||
@@ -1203,10 +1053,12 @@ func (container *Container) allocateNetwork() error {
|
||||
}
|
||||
}
|
||||
|
||||
portSpecs := make(map[Port]struct{})
|
||||
bindings := make(map[Port][]PortBinding)
|
||||
var (
|
||||
portSpecs = make(map[Port]struct{})
|
||||
bindings = make(map[Port][]PortBinding)
|
||||
)
|
||||
|
||||
if !container.State.Ghost {
|
||||
if !container.State.IsGhost() {
|
||||
if container.Config.ExposedPorts != nil {
|
||||
portSpecs = container.Config.ExposedPorts
|
||||
}
|
||||
@@ -1315,7 +1167,7 @@ func (container *Container) monitor() {
|
||||
}
|
||||
|
||||
// Report status back
|
||||
container.State.setStopped(exitCode)
|
||||
container.State.SetStopped(exitCode)
|
||||
|
||||
// Release the lock
|
||||
close(container.waitLock)
|
||||
@@ -1365,10 +1217,10 @@ func (container *Container) cleanup() {
|
||||
}
|
||||
|
||||
func (container *Container) kill(sig int) error {
|
||||
container.State.Lock()
|
||||
defer container.State.Unlock()
|
||||
container.Lock()
|
||||
defer container.Unlock()
|
||||
|
||||
if !container.State.Running {
|
||||
if !container.State.IsRunning() {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1381,7 +1233,7 @@ func (container *Container) kill(sig int) error {
|
||||
}
|
||||
|
||||
func (container *Container) Kill() error {
|
||||
if !container.State.Running {
|
||||
if !container.State.IsRunning() {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1406,7 +1258,7 @@ func (container *Container) Kill() error {
|
||||
}
|
||||
|
||||
func (container *Container) Stop(seconds int) error {
|
||||
if !container.State.Running {
|
||||
if !container.State.IsRunning() {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1440,7 +1292,7 @@ func (container *Container) Restart(seconds int) error {
|
||||
// Wait blocks until the container stops running, then returns its exit code.
|
||||
func (container *Container) Wait() int {
|
||||
<-container.waitLock
|
||||
return container.State.ExitCode
|
||||
return container.State.GetExitCode()
|
||||
}
|
||||
|
||||
func (container *Container) Resize(h, w int) error {
|
||||
@@ -1575,14 +1427,10 @@ func (container *Container) GetSize() (int64, int64) {
|
||||
}
|
||||
}
|
||||
|
||||
_, err = os.Stat(container.RootfsPath())
|
||||
if err == nil {
|
||||
filepath.Walk(container.RootfsPath(), func(path string, fileInfo os.FileInfo, err error) error {
|
||||
if fileInfo != nil {
|
||||
sizeRootfs += fileInfo.Size()
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if _, err = os.Stat(container.RootfsPath()); err != nil {
|
||||
if sizeRootfs, err = utils.TreeSize(container.RootfsPath()); err != nil {
|
||||
sizeRootfs = -1
|
||||
}
|
||||
}
|
||||
return sizeRw, sizeRootfs
|
||||
}
|
||||
@@ -1617,3 +1465,13 @@ func (container *Container) Exposes(p Port) bool {
|
||||
_, exists := container.Config.ExposedPorts[p]
|
||||
return exists
|
||||
}
|
||||
|
||||
func (container *Container) GetPtyMaster() (*os.File, error) {
|
||||
if container.ptyMaster == nil {
|
||||
return nil, ErrNoTTY
|
||||
}
|
||||
if pty, ok := container.ptyMaster.(*os.File); ok {
|
||||
return pty, nil
|
||||
}
|
||||
return nil, ErrNotATTY
|
||||
}
|
||||
|
||||
@@ -1,11 +1,9 @@
|
||||
[Unit]
|
||||
Description=Docker Application Container Engine
|
||||
Documentation=http://docs.docker.io
|
||||
Requires=network.target
|
||||
After=multi-user.target
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
ExecStartPre=/bin/mount --make-rprivate /
|
||||
ExecStart=/usr/bin/docker -d
|
||||
|
||||
|
||||
@@ -1,15 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Create a CentOS base image for Docker
|
||||
# From unclejack https://github.com/dotcloud/docker/issues/290
|
||||
set -e
|
||||
|
||||
MIRROR_URL="http://centos.netnitco.net/6.4/os/x86_64/"
|
||||
MIRROR_URL_UPDATES="http://centos.netnitco.net/6.4/updates/x86_64/"
|
||||
|
||||
yum install -y febootstrap xz
|
||||
|
||||
febootstrap -i bash -i coreutils -i tar -i bzip2 -i gzip -i vim-minimal -i wget -i patch -i diffutils -i iproute -i yum centos centos64 $MIRROR_URL -u $MIRROR_URL_UPDATES
|
||||
touch centos64/etc/resolv.conf
|
||||
touch centos64/sbin/init
|
||||
|
||||
tar --numeric-owner -Jcpf centos-64.tar.xz -C centos64 .
|
||||
@@ -142,14 +142,22 @@ if [ -z "$strictDebootstrap" ]; then
|
||||
# this forces dpkg not to call sync() after package extraction and speeds up install
|
||||
# the benefit is huge on spinning disks, and the penalty is nonexistent on SSD or decent server virtualization
|
||||
echo 'force-unsafe-io' | sudo tee etc/dpkg/dpkg.cfg.d/02apt-speedup > /dev/null
|
||||
# we want to effectively run "apt-get clean" after every install to keep images small
|
||||
echo 'DPkg::Post-Invoke {"/bin/rm -f /var/cache/apt/archives/*.deb || true";};' | sudo tee etc/apt/apt.conf.d/no-cache > /dev/null
|
||||
# we want to effectively run "apt-get clean" after every install to keep images small (see output of "apt-get clean -s" for context)
|
||||
{
|
||||
aptGetClean='rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true'
|
||||
echo 'DPkg::Post-Invoke { "'$aptGetClean'"; };'
|
||||
echo 'APT::Update::Post-Invoke { "'$aptGetClean'"; };'
|
||||
echo 'Dir::Cache::pkgcache ""; Dir::Cache::srcpkgcache "";'
|
||||
} | sudo tee etc/apt/apt.conf.d/no-cache > /dev/null
|
||||
# and remove the translations, too
|
||||
echo 'Acquire::Languages "none";' | sudo tee etc/apt/apt.conf.d/no-languages > /dev/null
|
||||
|
||||
# helpful undo lines for each the above tweaks (for lack of a better home to keep track of them):
|
||||
# rm /usr/sbin/policy-rc.d
|
||||
# rm /sbin/initctl; dpkg-divert --rename --remove /sbin/initctl
|
||||
# rm /etc/dpkg/dpkg.cfg.d/02apt-speedup
|
||||
# rm /etc/apt/apt.conf.d/no-cache
|
||||
# rm /etc/apt/apt.conf.d/no-languages
|
||||
|
||||
if [ -z "$skipDetection" ]; then
|
||||
# see also rudimentary platform detection in hack/install.sh
|
||||
|
||||
112
contrib/mkimage-rinse.sh
Executable file
112
contrib/mkimage-rinse.sh
Executable file
@@ -0,0 +1,112 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
repo="$1"
|
||||
distro="$2"
|
||||
mirror="$3"
|
||||
|
||||
if [ ! "$repo" ] || [ ! "$distro" ]; then
|
||||
self="$(basename $0)"
|
||||
echo >&2 "usage: $self repo distro [mirror]"
|
||||
echo >&2
|
||||
echo >&2 " ie: $self username/centos centos-5"
|
||||
echo >&2 " $self username/centos centos-6"
|
||||
echo >&2
|
||||
echo >&2 " ie: $self username/slc slc-5"
|
||||
echo >&2 " $self username/slc slc-6"
|
||||
echo >&2
|
||||
echo >&2 " ie: $self username/centos centos-5 http://vault.centos.org/5.8/os/x86_64/CentOS/"
|
||||
echo >&2 " $self username/centos centos-6 http://vault.centos.org/6.3/os/x86_64/Packages/"
|
||||
echo >&2
|
||||
echo >&2 'See /etc/rinse for supported values of "distro" and for examples of'
|
||||
echo >&2 ' expected values of "mirror".'
|
||||
echo >&2
|
||||
echo >&2 'This script is tested to work with the original upstream version of rinse,'
|
||||
echo >&2 ' found at http://www.steve.org.uk/Software/rinse/ and also in Debian at'
|
||||
echo >&2 ' http://packages.debian.org/wheezy/rinse -- as always, YMMV.'
|
||||
echo >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
target="/tmp/docker-rootfs-rinse-$distro-$$-$RANDOM"
|
||||
|
||||
cd "$(dirname "$(readlink -f "$BASH_SOURCE")")"
|
||||
returnTo="$(pwd -P)"
|
||||
|
||||
rinseArgs=( --arch amd64 --distribution "$distro" --directory "$target" )
|
||||
if [ "$mirror" ]; then
|
||||
rinseArgs+=( --mirror "$mirror" )
|
||||
fi
|
||||
|
||||
set -x
|
||||
|
||||
mkdir -p "$target"
|
||||
|
||||
sudo rinse "${rinseArgs[@]}"
|
||||
|
||||
cd "$target"
|
||||
|
||||
# rinse fails a little at setting up /dev, so we'll just wipe it out and create our own
|
||||
sudo rm -rf dev
|
||||
sudo mkdir -m 755 dev
|
||||
(
|
||||
cd dev
|
||||
sudo ln -sf /proc/self/fd ./
|
||||
sudo mkdir -m 755 pts
|
||||
sudo mkdir -m 1777 shm
|
||||
sudo mknod -m 600 console c 5 1
|
||||
sudo mknod -m 600 initctl p
|
||||
sudo mknod -m 666 full c 1 7
|
||||
sudo mknod -m 666 null c 1 3
|
||||
sudo mknod -m 666 ptmx c 5 2
|
||||
sudo mknod -m 666 random c 1 8
|
||||
sudo mknod -m 666 tty c 5 0
|
||||
sudo mknod -m 666 tty0 c 4 0
|
||||
sudo mknod -m 666 urandom c 1 9
|
||||
sudo mknod -m 666 zero c 1 5
|
||||
)
|
||||
|
||||
# effectively: febootstrap-minimize --keep-zoneinfo --keep-rpmdb --keep-services "$target"
|
||||
# locales
|
||||
sudo rm -rf usr/{{lib,share}/locale,{lib,lib64}/gconv,bin/localedef,sbin/build-locale-archive}
|
||||
# docs
|
||||
sudo rm -rf usr/share/{man,doc,info,gnome/help}
|
||||
# cracklib
|
||||
sudo rm -rf usr/share/cracklib
|
||||
# i18n
|
||||
sudo rm -rf usr/share/i18n
|
||||
# yum cache
|
||||
sudo rm -rf var/cache/yum
|
||||
sudo mkdir -p --mode=0755 var/cache/yum
|
||||
# sln
|
||||
sudo rm -rf sbin/sln
|
||||
# ldconfig
|
||||
#sudo rm -rf sbin/ldconfig
|
||||
sudo rm -rf etc/ld.so.cache var/cache/ldconfig
|
||||
sudo mkdir -p --mode=0755 var/cache/ldconfig
|
||||
|
||||
# allow networking init scripts inside the container to work without extra steps
|
||||
echo 'NETWORKING=yes' | sudo tee etc/sysconfig/network > /dev/null
|
||||
|
||||
# to restore locales later:
|
||||
# yum reinstall glibc-common
|
||||
|
||||
version=
|
||||
if [ -r etc/redhat-release ]; then
|
||||
version="$(sed -E 's/^[^0-9.]*([0-9.]+).*$/\1/' etc/redhat-release)"
|
||||
elif [ -r etc/SuSE-release ]; then
|
||||
version="$(awk '/^VERSION/ { print $3 }' etc/SuSE-release)"
|
||||
fi
|
||||
|
||||
if [ -z "$version" ]; then
|
||||
echo >&2 "warning: cannot autodetect OS version, using $distro as tag"
|
||||
sleep 20
|
||||
version="$distro"
|
||||
fi
|
||||
|
||||
sudo tar --numeric-owner -c . | docker import - $repo:$version
|
||||
|
||||
docker run -i -t $repo:$version echo success
|
||||
|
||||
cd "$returnTo"
|
||||
sudo rm -rf "$target"
|
||||
77
contrib/mkseccomp.pl
Executable file
77
contrib/mkseccomp.pl
Executable file
@@ -0,0 +1,77 @@
|
||||
#!/usr/bin/perl
|
||||
#
|
||||
# A simple helper script to help people build seccomp profiles for
|
||||
# Docker/LXC. The goal is mostly to reduce the attack surface to the
|
||||
# kernel, by restricting access to rarely used, recently added or not used
|
||||
# syscalls.
|
||||
#
|
||||
# This script processes one or more files which contain the list of system
|
||||
# calls to be allowed. See mkseccomp.sample for more information how you
|
||||
# can configure the list of syscalls. When run, this script produces output
|
||||
# which, when stored in a file, can be passed to docker as follows:
|
||||
#
|
||||
# docker run -lxc-conf="lxc.seccomp=$file" <rest of arguments>
|
||||
#
|
||||
# The included sample file shows how to cut about a quarter of all syscalls,
|
||||
# which affecting most applications.
|
||||
#
|
||||
# For specific situations it is possible to reduce the list further. By
|
||||
# reducing the list to just those syscalls required by a certain application
|
||||
# you can make it difficult for unknown/unexpected code to run.
|
||||
#
|
||||
# Run this script as follows:
|
||||
#
|
||||
# ./mkseccomp.pl < mkseccomp.sample >syscalls.list
|
||||
# or
|
||||
# ./mkseccomp.pl mkseccomp.sample >syscalls.list
|
||||
#
|
||||
# Multiple files can be specified, in which case the lists of syscalls are
|
||||
# combined.
|
||||
#
|
||||
# By Martijn van Oosterhout <kleptog@svana.org> Nov 2013
|
||||
|
||||
# How it works:
|
||||
#
|
||||
# This program basically spawns two processes to form a chain like:
|
||||
#
|
||||
# <process data section to prefix __NR_> | cpp | <add header and filter unknown syscalls>
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
if( -t ) {
|
||||
print STDERR "Helper script to make seccomp filters for Docker/LXC.\n";
|
||||
print STDERR "Usage: mkseccomp.pl [files...]\n";
|
||||
exit 1;
|
||||
}
|
||||
|
||||
my $pid = open(my $in, "-|") // die "Couldn't fork1 ($!)\n";
|
||||
|
||||
if($pid == 0) { # Child
|
||||
$pid = open(my $out, "|-") // die "Couldn't fork2 ($!)\n";
|
||||
|
||||
if($pid == 0) { # Child, which execs cpp
|
||||
exec "cpp" or die "Couldn't exec cpp ($!)\n";
|
||||
exit 1;
|
||||
}
|
||||
|
||||
# Process the DATA section and output to cpp
|
||||
print $out "#include <sys/syscall.h>\n";
|
||||
while(<>) {
|
||||
if(/^\w/) {
|
||||
print $out "__NR_$_";
|
||||
}
|
||||
}
|
||||
close $out;
|
||||
exit 0;
|
||||
|
||||
}
|
||||
|
||||
# Print header and then process output from cpp.
|
||||
print "1\n";
|
||||
print "whitelist\n";
|
||||
|
||||
while(<$in>) {
|
||||
print if( /^[0-9]/ );
|
||||
}
|
||||
|
||||
444
contrib/mkseccomp.sample
Normal file
444
contrib/mkseccomp.sample
Normal file
@@ -0,0 +1,444 @@
|
||||
/* This sample file is an example for mkseccomp.pl to produce a seccomp file
|
||||
* which restricts syscalls that are only useful for an admin but allows the
|
||||
* vast majority of normal userspace programs to run normally.
|
||||
*
|
||||
* The format of this file is one line per syscall. This is then processed
|
||||
* and passed to 'cpp' to convert the names to numbers using whatever is
|
||||
* correct for your platform. As such C-style comments are permitted. Note
|
||||
* this also means that C preprocessor macros are also allowed. So it is
|
||||
* possible to create groups surrounded by #ifdef/#endif and control their
|
||||
* inclusion via #define (not #include).
|
||||
*
|
||||
* Syscalls that don't exist on your architecture are silently filtered out.
|
||||
* Syscalls marked with (*) are required for a container to spawn a bash
|
||||
* shell successfully (not necessarily full featured). Listing the same
|
||||
* syscall multiple times is no problem.
|
||||
*
|
||||
* If you want to make a list specifically for one application the easiest
|
||||
* way is to run the application under strace, like so:
|
||||
*
|
||||
* $ strace -f -q -c -o strace.out application args...
|
||||
*
|
||||
* Once you have a reasonable sample of the execution of the program, exit
|
||||
* it. The file strace.out will have a summary of the syscalls used. Copy
|
||||
* that list into this file, comment out everything else except the starred
|
||||
* syscalls (which you need for the container to start) and you're done.
|
||||
*
|
||||
* To get the list of syscalls from the strace output this works well for
|
||||
* me
|
||||
*
|
||||
* $ cut -c52 < strace.out
|
||||
*
|
||||
* This sample list was compiled as a combination of all the syscalls
|
||||
* available on i386 and amd64 on Ubuntu Precise, as such it may not contain
|
||||
* everything and not everything may be relevent for your system. This
|
||||
* shouldn't be a problem.
|
||||
*/
|
||||
|
||||
// Filesystem/File descriptor related
|
||||
access // (*)
|
||||
chdir // (*)
|
||||
chmod
|
||||
chown
|
||||
chown32
|
||||
close // (*)
|
||||
creat
|
||||
dup // (*)
|
||||
dup2 // (*)
|
||||
dup3
|
||||
epoll_create
|
||||
epoll_create1
|
||||
epoll_ctl
|
||||
epoll_ctl_old
|
||||
epoll_pwait
|
||||
epoll_wait
|
||||
epoll_wait_old
|
||||
eventfd
|
||||
eventfd2
|
||||
faccessat // (*)
|
||||
fadvise64
|
||||
fadvise64_64
|
||||
fallocate
|
||||
fanotify_init
|
||||
fanotify_mark
|
||||
ioctl // (*)
|
||||
fchdir
|
||||
fchmod
|
||||
fchmodat
|
||||
fchown
|
||||
fchown32
|
||||
fchownat
|
||||
fcntl // (*)
|
||||
fcntl64
|
||||
fdatasync
|
||||
fgetxattr
|
||||
flistxattr
|
||||
flock
|
||||
fremovexattr
|
||||
fsetxattr
|
||||
fstat // (*)
|
||||
fstat64
|
||||
fstatat64
|
||||
fstatfs
|
||||
fstatfs64
|
||||
fsync
|
||||
ftruncate
|
||||
ftruncate64
|
||||
getcwd // (*)
|
||||
getdents // (*)
|
||||
getdents64
|
||||
getxattr
|
||||
inotify_add_watch
|
||||
inotify_init
|
||||
inotify_init1
|
||||
inotify_rm_watch
|
||||
io_cancel
|
||||
io_destroy
|
||||
io_getevents
|
||||
io_setup
|
||||
io_submit
|
||||
lchown
|
||||
lchown32
|
||||
lgetxattr
|
||||
link
|
||||
linkat
|
||||
listxattr
|
||||
llistxattr
|
||||
llseek
|
||||
_llseek
|
||||
lremovexattr
|
||||
lseek // (*)
|
||||
lsetxattr
|
||||
lstat
|
||||
lstat64
|
||||
mkdir
|
||||
mkdirat
|
||||
mknod
|
||||
mknodat
|
||||
newfstatat
|
||||
_newselect
|
||||
oldfstat
|
||||
oldlstat
|
||||
oldolduname
|
||||
oldstat
|
||||
olduname
|
||||
oldwait4
|
||||
open // (*)
|
||||
openat // (*)
|
||||
pipe // (*)
|
||||
pipe2
|
||||
poll
|
||||
ppoll
|
||||
pread64
|
||||
preadv
|
||||
futimesat
|
||||
pselect6
|
||||
pwrite64
|
||||
pwritev
|
||||
read // (*)
|
||||
readahead
|
||||
readdir
|
||||
readlink
|
||||
readlinkat
|
||||
readv
|
||||
removexattr
|
||||
rename
|
||||
renameat
|
||||
rmdir
|
||||
select
|
||||
sendfile
|
||||
sendfile64
|
||||
setxattr
|
||||
splice
|
||||
stat // (*)
|
||||
stat64
|
||||
statfs // (*)
|
||||
statfs64
|
||||
symlink
|
||||
symlinkat
|
||||
sync
|
||||
sync_file_range
|
||||
sync_file_range2
|
||||
syncfs
|
||||
tee
|
||||
truncate
|
||||
truncate64
|
||||
umask
|
||||
unlink
|
||||
unlinkat
|
||||
ustat
|
||||
utime
|
||||
utimensat
|
||||
utimes
|
||||
write // (*)
|
||||
writev
|
||||
|
||||
// Network related
|
||||
accept
|
||||
accept4
|
||||
bind // (*)
|
||||
connect // (*)
|
||||
getpeername
|
||||
getsockname // (*)
|
||||
getsockopt
|
||||
listen
|
||||
recv
|
||||
recvfrom // (*)
|
||||
recvmmsg
|
||||
recvmsg
|
||||
send
|
||||
sendmmsg
|
||||
sendmsg
|
||||
sendto // (*)
|
||||
setsockopt
|
||||
shutdown
|
||||
socket // (*)
|
||||
socketcall
|
||||
socketpair
|
||||
|
||||
// Signal related
|
||||
pause
|
||||
rt_sigaction // (*)
|
||||
rt_sigpending
|
||||
rt_sigprocmask // (*)
|
||||
rt_sigqueueinfo
|
||||
rt_sigreturn // (*)
|
||||
rt_sigsuspend
|
||||
rt_sigtimedwait
|
||||
rt_tgsigqueueinfo
|
||||
sigaction
|
||||
sigaltstack // (*)
|
||||
signal
|
||||
signalfd
|
||||
signalfd4
|
||||
sigpending
|
||||
sigprocmask
|
||||
sigreturn
|
||||
sigsuspend
|
||||
|
||||
// Other needed POSIX
|
||||
alarm
|
||||
brk // (*)
|
||||
clock_adjtime
|
||||
clock_getres
|
||||
clock_gettime
|
||||
clock_nanosleep
|
||||
//clock_settime
|
||||
gettimeofday
|
||||
nanosleep
|
||||
nice
|
||||
sysinfo
|
||||
syslog
|
||||
time
|
||||
timer_create
|
||||
timer_delete
|
||||
timerfd_create
|
||||
timerfd_gettime
|
||||
timerfd_settime
|
||||
timer_getoverrun
|
||||
timer_gettime
|
||||
timer_settime
|
||||
times
|
||||
uname // (*)
|
||||
|
||||
// Memory control
|
||||
madvise
|
||||
mbind
|
||||
mincore
|
||||
mlock
|
||||
mlockall
|
||||
mmap // (*)
|
||||
mmap2
|
||||
mprotect // (*)
|
||||
mremap
|
||||
msync
|
||||
munlock
|
||||
munlockall
|
||||
munmap // (*)
|
||||
remap_file_pages
|
||||
set_mempolicy
|
||||
vmsplice
|
||||
|
||||
// Process control
|
||||
capget
|
||||
//capset
|
||||
clone // (*)
|
||||
execve // (*)
|
||||
exit // (*)
|
||||
exit_group // (*)
|
||||
fork
|
||||
getcpu
|
||||
getpgid
|
||||
getpgrp // (*)
|
||||
getpid // (*)
|
||||
getppid // (*)
|
||||
getpriority
|
||||
getresgid
|
||||
getresgid32
|
||||
getresuid
|
||||
getresuid32
|
||||
getrlimit // (*)
|
||||
getrusage
|
||||
getsid
|
||||
getuid // (*)
|
||||
getuid32
|
||||
getegid // (*)
|
||||
getegid32
|
||||
geteuid // (*)
|
||||
geteuid32
|
||||
getgid // (*)
|
||||
getgid32
|
||||
getgroups
|
||||
getgroups32
|
||||
getitimer
|
||||
get_mempolicy
|
||||
kill
|
||||
//personality
|
||||
prctl
|
||||
prlimit64
|
||||
sched_getaffinity
|
||||
sched_getparam
|
||||
sched_get_priority_max
|
||||
sched_get_priority_min
|
||||
sched_getscheduler
|
||||
sched_rr_get_interval
|
||||
//sched_setaffinity
|
||||
//sched_setparam
|
||||
//sched_setscheduler
|
||||
sched_yield
|
||||
setfsgid
|
||||
setfsgid32
|
||||
setfsuid
|
||||
setfsuid32
|
||||
setgid
|
||||
setgid32
|
||||
setgroups
|
||||
setgroups32
|
||||
setitimer
|
||||
setpgid // (*)
|
||||
setpriority
|
||||
setregid
|
||||
setregid32
|
||||
setresgid
|
||||
setresgid32
|
||||
setresuid
|
||||
setresuid32
|
||||
setreuid
|
||||
setreuid32
|
||||
setrlimit
|
||||
setsid
|
||||
setuid
|
||||
setuid32
|
||||
ugetrlimit
|
||||
vfork
|
||||
wait4 // (*)
|
||||
waitid
|
||||
waitpid
|
||||
|
||||
// IPC
|
||||
ipc
|
||||
mq_getsetattr
|
||||
mq_notify
|
||||
mq_open
|
||||
mq_timedreceive
|
||||
mq_timedsend
|
||||
mq_unlink
|
||||
msgctl
|
||||
msgget
|
||||
msgrcv
|
||||
msgsnd
|
||||
semctl
|
||||
semget
|
||||
semop
|
||||
semtimedop
|
||||
shmat
|
||||
shmctl
|
||||
shmdt
|
||||
shmget
|
||||
|
||||
// Linux specific, mostly needed for thread-related stuff
|
||||
arch_prctl // (*)
|
||||
get_robust_list
|
||||
get_thread_area
|
||||
gettid
|
||||
futex // (*)
|
||||
restart_syscall // (*)
|
||||
set_robust_list // (*)
|
||||
set_thread_area
|
||||
set_tid_address // (*)
|
||||
tgkill
|
||||
tkill
|
||||
|
||||
// Admin syscalls, these are blocked
|
||||
//acct
|
||||
//adjtimex
|
||||
//bdflush
|
||||
//chroot
|
||||
//create_module
|
||||
//delete_module
|
||||
//get_kernel_syms // Obsolete
|
||||
//idle // Obsolete
|
||||
//init_module
|
||||
//ioperm
|
||||
//iopl
|
||||
//ioprio_get
|
||||
//ioprio_set
|
||||
//kexec_load
|
||||
//lookup_dcookie // oprofile only?
|
||||
//migrate_pages // NUMA
|
||||
//modify_ldt
|
||||
//mount
|
||||
//move_pages // NUMA
|
||||
//name_to_handle_at // NFS server
|
||||
//nfsservctl // NFS server
|
||||
//open_by_handle_at // NFS server
|
||||
//perf_event_open
|
||||
//pivot_root
|
||||
//process_vm_readv // For debugger
|
||||
//process_vm_writev // For debugger
|
||||
//ptrace // For debugger
|
||||
//query_module
|
||||
//quotactl
|
||||
//reboot
|
||||
//setdomainname
|
||||
//sethostname
|
||||
//setns
|
||||
//settimeofday
|
||||
//sgetmask // Obsolete
|
||||
//ssetmask // Obsolete
|
||||
//stime
|
||||
//swapoff
|
||||
//swapon
|
||||
//_sysctl
|
||||
//sysfs
|
||||
//sys_setaltroot
|
||||
//umount
|
||||
//umount2
|
||||
//unshare
|
||||
//uselib
|
||||
//vhangup
|
||||
//vm86
|
||||
//vm86old
|
||||
|
||||
// Kernel key management
|
||||
//add_key
|
||||
//keyctl
|
||||
//request_key
|
||||
|
||||
// Unimplemented
|
||||
//afs_syscall
|
||||
//break
|
||||
//ftime
|
||||
//getpmsg
|
||||
//gtty
|
||||
//lock
|
||||
//madvise1
|
||||
//mpx
|
||||
//prof
|
||||
//profil
|
||||
//putpmsg
|
||||
//security
|
||||
//stty
|
||||
//tuxcall
|
||||
//ulimit
|
||||
//vserver
|
||||
3
contrib/udev/80-docker.rules
Normal file
3
contrib/udev/80-docker.rules
Normal file
@@ -0,0 +1,3 @@
|
||||
# hide docker's loopback devices from udisks, and thus from user desktops
|
||||
SUBSYSTEM=="block", ENV{DM_NAME}=="docker-*", ENV{UDISKS_PRESENTATION_HIDE}="1", ENV{UDISKS_IGNORE}="1"
|
||||
SUBSYSTEM=="block", DEVPATH=="/devices/virtual/block/loop*", ATTR{loop/backing_file}=="/var/lib/docker/*", ENV{UDISKS_PRESENTATION_HIDE}="1", ENV{UDISKS_IGNORE}="1"
|
||||
@@ -17,3 +17,34 @@ meaning you can use Vagrant to control Docker containers.
|
||||
|
||||
* [docker-provider](https://github.com/fgrehm/docker-provider)
|
||||
* [vagrant-shell](https://github.com/destructuring/vagrant-shell)
|
||||
|
||||
## Setting up Vagrant-docker with the Remote API
|
||||
|
||||
The initial Docker upstart script will not work because it runs on `127.0.0.1`, which is not accessible to the host machine. Instead, we need to change the script to connect to `0.0.0.0`. To do this, modify `/etc/init/docker.conf` to look like this:
|
||||
|
||||
```
|
||||
description "Docker daemon"
|
||||
|
||||
start on filesystem and started lxc-net
|
||||
stop on runlevel [!2345]
|
||||
|
||||
respawn
|
||||
|
||||
script
|
||||
/usr/bin/docker -d -H=tcp://0.0.0.0:4243/
|
||||
end script
|
||||
```
|
||||
|
||||
Once that's done, you need to set up a SSH tunnel between your host machine and the vagrant machine that's running Docker. This can be done by running the following command in a host terminal:
|
||||
|
||||
```
|
||||
ssh -L 4243:localhost:4243 -p 2222 vagrant@localhost
|
||||
```
|
||||
|
||||
(The first 4243 is what your host can connect to, the second 4243 is what port Docker is running on in the vagrant machine, and the 2222 is the port Vagrant is providing for SSH. If VirtualBox is the VM you're using, you can see what value "2222" should be by going to: Network > Adapter 1 > Advanced > Port Forwarding in the VirtualBox GUI.)
|
||||
|
||||
Note that because the port has been changed, to run docker commands from within the command line you must run them like this:
|
||||
|
||||
```
|
||||
sudo docker -H 0.0.0.0:4243 < commands for docker >
|
||||
```
|
||||
|
||||
1
contrib/zfs/MAINTAINERS
Normal file
1
contrib/zfs/MAINTAINERS
Normal file
@@ -0,0 +1 @@
|
||||
Gurjeet Singh <gurjeet@singh.im> (gurjeet.singh.im)
|
||||
23
contrib/zfs/README.md
Normal file
23
contrib/zfs/README.md
Normal file
@@ -0,0 +1,23 @@
|
||||
# ZFS Storage Driver
|
||||
|
||||
This is a placeholder to declare the presence and status of ZFS storage driver
|
||||
for containers.
|
||||
|
||||
The current development is done in Gurjeet Singh's fork of Docker, under the
|
||||
branch named [zfs_driver].
|
||||
|
||||
[zfs_driver]: https://github.com/gurjeet/docker/tree/zfs_driver
|
||||
|
||||
|
||||
# Status
|
||||
|
||||
Alpha: The code is now capable of creating, running and destroying containers
|
||||
and images.
|
||||
|
||||
The code is under development. Contributions in the form of suggestions,
|
||||
code-reviews, and patches are welcome.
|
||||
|
||||
Please send the communication to gurjeet@singh.im and CC at least one Docker
|
||||
mailing list.
|
||||
|
||||
|
||||
@@ -23,22 +23,26 @@ func main() {
|
||||
sysinit.SysInit()
|
||||
return
|
||||
}
|
||||
// FIXME: Switch d and D ? (to be more sshd like)
|
||||
flVersion := flag.Bool("v", false, "Print version information and quit")
|
||||
flDaemon := flag.Bool("d", false, "Daemon mode")
|
||||
flDebug := flag.Bool("D", false, "Debug mode")
|
||||
flAutoRestart := flag.Bool("r", true, "Restart previously running containers")
|
||||
bridgeName := flag.String("b", "", "Attach containers to a pre-existing network bridge. Use 'none' to disable container networking")
|
||||
pidfile := flag.String("p", "/var/run/docker.pid", "File containing process PID")
|
||||
flRoot := flag.String("g", "/var/lib/docker", "Path to use as the root of the docker runtime.")
|
||||
flEnableCors := flag.Bool("api-enable-cors", false, "Enable CORS requests in the remote api.")
|
||||
flDns := flag.String("dns", "", "Set custom dns servers")
|
||||
flHosts := utils.ListOpts{fmt.Sprintf("unix://%s", docker.DEFAULTUNIXSOCKET)}
|
||||
flag.Var(&flHosts, "H", "tcp://host:port to bind/connect to or unix://path/to/socket to use")
|
||||
flEnableIptables := flag.Bool("iptables", true, "Disable iptables within docker")
|
||||
flDefaultIp := flag.String("ip", "0.0.0.0", "Default ip address to use when binding a containers ports")
|
||||
flInterContainerComm := flag.Bool("icc", true, "Enable inter-container communication")
|
||||
flGraphDriver := flag.String("graph-driver", "", "For docker to use a specific graph driver")
|
||||
|
||||
var (
|
||||
flVersion = flag.Bool("v", false, "Print version information and quit")
|
||||
flDaemon = flag.Bool("d", false, "Enable daemon mode")
|
||||
flDebug = flag.Bool("D", false, "Enable debug mode")
|
||||
flAutoRestart = flag.Bool("r", true, "Restart previously running containers")
|
||||
bridgeName = flag.String("b", "", "Attach containers to a pre-existing network bridge; use 'none' to disable container networking")
|
||||
bridgeIp = flag.String("bip", "", "Use this CIDR notation address for the network bridge's IP, not compatible with -b")
|
||||
pidfile = flag.String("p", "/var/run/docker.pid", "Path to use for daemon PID file")
|
||||
flRoot = flag.String("g", "/var/lib/docker", "Path to use as the root of the docker runtime")
|
||||
flEnableCors = flag.Bool("api-enable-cors", false, "Enable CORS headers in the remote API")
|
||||
flDns = docker.NewListOpts(docker.ValidateIp4Address)
|
||||
flEnableIptables = flag.Bool("iptables", true, "Disable docker's addition of iptables rules")
|
||||
flDefaultIp = flag.String("ip", "0.0.0.0", "Default IP address to use when binding container ports")
|
||||
flInterContainerComm = flag.Bool("icc", true, "Enable inter-container communication")
|
||||
flGraphDriver = flag.String("s", "", "Force the docker runtime to use a specific storage driver")
|
||||
flHosts = docker.NewListOpts(docker.ValidateHost)
|
||||
)
|
||||
flag.Var(&flDns, "dns", "Force docker to use specific DNS servers")
|
||||
flag.Var(&flHosts, "H", "Multiple tcp://host:port or unix://path/to/socket to bind in daemon mode, single connection otherwise")
|
||||
|
||||
flag.Parse()
|
||||
|
||||
@@ -46,16 +50,13 @@ func main() {
|
||||
showVersion()
|
||||
return
|
||||
}
|
||||
if len(flHosts) > 1 {
|
||||
flHosts = flHosts[1:] //trick to display a nice default value in the usage
|
||||
if flHosts.Len() == 0 {
|
||||
// If we do not have a host, default to unix socket
|
||||
flHosts.Set(fmt.Sprintf("unix://%s", docker.DEFAULTUNIXSOCKET))
|
||||
}
|
||||
for i, flHost := range flHosts {
|
||||
host, err := utils.ParseHost(docker.DEFAULTHTTPHOST, docker.DEFAULTHTTPPORT, flHost)
|
||||
if err == nil {
|
||||
flHosts[i] = host
|
||||
} else {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if *bridgeName != "" && *bridgeIp != "" {
|
||||
log.Fatal("You specified -b & -bip, mutually exclusive options. Please specify only one.")
|
||||
}
|
||||
|
||||
if *flDebug {
|
||||
@@ -78,9 +79,10 @@ func main() {
|
||||
job.Setenv("Root", *flRoot)
|
||||
job.SetenvBool("AutoRestart", *flAutoRestart)
|
||||
job.SetenvBool("EnableCors", *flEnableCors)
|
||||
job.Setenv("Dns", *flDns)
|
||||
job.SetenvList("Dns", flDns.GetAll())
|
||||
job.SetenvBool("EnableIptables", *flEnableIptables)
|
||||
job.Setenv("BridgeIface", *bridgeName)
|
||||
job.Setenv("BridgeIp", *bridgeIp)
|
||||
job.Setenv("DefaultIp", *flDefaultIp)
|
||||
job.SetenvBool("InterContainerCommunication", *flInterContainerComm)
|
||||
job.Setenv("GraphDriver", *flGraphDriver)
|
||||
@@ -88,19 +90,22 @@ func main() {
|
||||
log.Fatal(err)
|
||||
}
|
||||
// Serve api
|
||||
job = eng.Job("serveapi", flHosts...)
|
||||
job = eng.Job("serveapi", flHosts.GetAll()...)
|
||||
job.SetenvBool("Logging", true)
|
||||
if err := job.Run(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
} else {
|
||||
if len(flHosts) > 1 {
|
||||
if flHosts.Len() > 1 {
|
||||
log.Fatal("Please specify only one -H")
|
||||
}
|
||||
protoAddrParts := strings.SplitN(flHosts[0], "://", 2)
|
||||
protoAddrParts := strings.SplitN(flHosts.GetAll()[0], "://", 2)
|
||||
if err := docker.ParseCommands(protoAddrParts[0], protoAddrParts[1], flag.Args()...); err != nil {
|
||||
if sterr, ok := err.(*utils.StatusError); ok {
|
||||
os.Exit(sterr.Status)
|
||||
if sterr.Status != "" {
|
||||
log.Println(sterr.Status)
|
||||
}
|
||||
os.Exit(sterr.StatusCode)
|
||||
}
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@ run apt-get install -y python-setuptools make
|
||||
run easy_install pip
|
||||
#from docs/requirements.txt, but here to increase cacheability
|
||||
run pip install Sphinx==1.1.3
|
||||
run pip install sphinxcontrib-httpdomain==1.1.8
|
||||
run pip install sphinxcontrib-httpdomain==1.1.9
|
||||
add . /docs
|
||||
run cd /docs; make docs
|
||||
|
||||
|
||||
@@ -1,2 +1,4 @@
|
||||
Andy Rothfusz <andy@dotcloud.com> (@metalivedev)
|
||||
Ken Cochrane <ken@dotcloud.com> (@kencochrane)
|
||||
James Turnbull <james@lovedthanlost.net> (@jamtur01)
|
||||
Sven Dowideit <SvenDowideit@fosiki.com> (@SvenDowideit)
|
||||
|
||||
@@ -41,24 +41,25 @@ its dependencies. There are two main ways to install this tool:
|
||||
|
||||
###Native Installation
|
||||
|
||||
* Install sphinx: `pip install sphinx`
|
||||
* Mac OS X: `[sudo] pip-2.7 install sphinx`
|
||||
* Install sphinx httpdomain contrib package: `pip install sphinxcontrib-httpdomain`
|
||||
* Mac OS X: `[sudo] pip-2.7 install sphinxcontrib-httpdomain`
|
||||
* If pip is not available you can probably install it using your favorite package manager as **python-pip**
|
||||
Install dependencies from `requirements.txt` file in your `docker/docs`
|
||||
directory:
|
||||
|
||||
* Linux: `pip install -r docs/requirements.txt`
|
||||
|
||||
* Mac OS X: `[sudo] pip-2.7 -r docs/requirements.txt`
|
||||
|
||||
###Alternative Installation: Docker Container
|
||||
|
||||
If you're running ``docker`` on your development machine then you may
|
||||
find it easier and cleaner to use the Dockerfile. This installs Sphinx
|
||||
find it easier and cleaner to use the docs Dockerfile. This installs Sphinx
|
||||
in a container, adds the local ``docs/`` directory and builds the HTML
|
||||
docs inside the container, even starting a simple HTTP server on port
|
||||
8000 so that you can connect and see your changes. Just run ``docker
|
||||
build .`` and run the resulting image. This is the equivalent to
|
||||
``make clean server`` since each container starts clean.
|
||||
8000 so that you can connect and see your changes.
|
||||
|
||||
In the ``docs/`` directory, run:
|
||||
```docker build -t docker:docs . && docker run -p 8000:8000 docker:docs```
|
||||
In the ``docker`` source directory, run:
|
||||
```make docs```
|
||||
|
||||
This is the equivalent to ``make clean server`` since each container starts clean.
|
||||
|
||||
Usage
|
||||
-----
|
||||
@@ -127,7 +128,8 @@ Guides on using sphinx
|
||||
|
||||
* Code examples
|
||||
|
||||
* Start without $, so it's easy to copy and paste.
|
||||
* Start typed commands with ``$ `` (dollar space) so that they
|
||||
are easily differentiated from program output.
|
||||
* Use "sudo" with docker to ensure that your command is runnable
|
||||
even if they haven't [used the *docker*
|
||||
group](http://docs.docker.io/en/latest/use/basics/#why-sudo).
|
||||
@@ -136,7 +138,7 @@ Manpages
|
||||
--------
|
||||
|
||||
* To make the manpages, run ``make man``. Please note there is a bug
|
||||
in spinx 1.1.3 which makes this fail. Upgrade to the latest version
|
||||
in Sphinx 1.1.3 which makes this fail. Upgrade to the latest version
|
||||
of Sphinx.
|
||||
* Then preview the manpage by running ``man _build/man/docker.1``,
|
||||
where ``_build/man/docker.1`` is the path to the generated manfile
|
||||
|
||||
@@ -34,6 +34,35 @@ Calling /images/<name>/insert is the same as calling
|
||||
You can still call an old version of the api using
|
||||
/v1.0/images/<name>/insert
|
||||
|
||||
|
||||
v1.8
|
||||
****
|
||||
|
||||
Full Documentation
|
||||
------------------
|
||||
|
||||
:doc:`docker_remote_api_v1.8`
|
||||
|
||||
What's new
|
||||
----------
|
||||
|
||||
.. http:post:: /build
|
||||
|
||||
**New!** This endpoint now returns build status as json stream. In case
|
||||
of a build error, it returns the exit status of the failed command.
|
||||
|
||||
.. http:get:: /containers/(id)/json
|
||||
|
||||
**New!** This endpoint now returns the host config for the container.
|
||||
|
||||
.. http:post:: /images/create
|
||||
.. http:post:: /images/(name)/insert
|
||||
.. http:post:: /images/(name)/push
|
||||
|
||||
**New!** progressDetail object was added in the JSON. It's now possible
|
||||
to get the current value and the total of the progress without having to
|
||||
parse the string.
|
||||
|
||||
v1.7
|
||||
****
|
||||
|
||||
|
||||
@@ -132,7 +132,9 @@ Create a container
|
||||
],
|
||||
"Dns":null,
|
||||
"Image":"base",
|
||||
"Volumes":{},
|
||||
"Volumes":{
|
||||
"/tmp": {}
|
||||
},
|
||||
"VolumesFrom":"",
|
||||
"WorkingDir":""
|
||||
|
||||
@@ -361,8 +363,12 @@ Start a container
|
||||
|
||||
{
|
||||
"Binds":["/tmp:/tmp"],
|
||||
"LxcConf":{"lxc.utsname":"docker"}
|
||||
"LxcConf":{"lxc.utsname":"docker"},
|
||||
"PortBindings":null
|
||||
"PublishAllPorts":false
|
||||
}
|
||||
|
||||
Binds need to reference Volumes that were defined during container creation.
|
||||
|
||||
**Example response**:
|
||||
|
||||
@@ -990,10 +996,10 @@ Build an image from Dockerfile via stdin
|
||||
.. sourcecode:: http
|
||||
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/json
|
||||
|
||||
{{ STREAM }}
|
||||
|
||||
|
||||
The stream must be a tar archive compressed with one of the
|
||||
following algorithms: identity (no compression), gzip, bzip2,
|
||||
xz.
|
||||
@@ -1171,6 +1177,53 @@ Monitor Docker's events
|
||||
:statuscode 200: no error
|
||||
:statuscode 500: server error
|
||||
|
||||
Get a tarball containing all images and tags in a repository
|
||||
************************************************************
|
||||
|
||||
.. http:get:: /images/(name)/get
|
||||
|
||||
Get a tarball containing all images and metadata for the repository specified by ``name``.
|
||||
|
||||
**Example request**
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
GET /images/ubuntu/get
|
||||
|
||||
**Example response**:
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/x-tar
|
||||
|
||||
Binary data stream
|
||||
:statuscode 200: no error
|
||||
:statuscode 500: server error
|
||||
|
||||
Load a tarball with a set of images and tags into docker
|
||||
********************************************************
|
||||
|
||||
.. http:post:: /images/load
|
||||
|
||||
Load a set of images and tags into the docker repository.
|
||||
|
||||
**Example request**
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
POST /images/load
|
||||
|
||||
Tarball in body
|
||||
|
||||
**Example response**:
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
HTTP/1.1 200 OK
|
||||
|
||||
:statuscode 200: no error
|
||||
:statuscode 500: server error
|
||||
|
||||
3. Going further
|
||||
================
|
||||
|
||||
1274
docs/sources/api/docker_remote_api_v1.8.rst
Normal file
1274
docs/sources/api/docker_remote_api_v1.8.rst
Normal file
File diff suppressed because it is too large
Load Diff
@@ -37,7 +37,7 @@ We expect that there will be only one instance of the index, run and managed by
|
||||
- It delegates authentication and authorization to the Index Auth service using tokens
|
||||
- It supports different storage backends (S3, cloud files, local FS)
|
||||
- It doesn’t have a local database
|
||||
- It will be open-sourced at some point
|
||||
- `Source Code <https://github.com/dotcloud/docker-registry>`_
|
||||
|
||||
We expect that there will be multiple registries out there. To help to grasp the context, here are some examples of registries:
|
||||
|
||||
@@ -46,10 +46,6 @@ We expect that there will be multiple registries out there. To help to grasp the
|
||||
- **vendor registry**: such a registry is provided by a software vendor, who wants to distribute docker images. It would be operated and managed by the vendor. Only users authorized by the vendor would be able to get write access. Some images would be public (accessible for anyone), others private (accessible only for authorized users). Authentication and authorization would be delegated to the Index. The goal of vendor registries is to let someone do “docker pull basho/riak1.3” and automatically push from the vendor registry (instead of a sponsor registry); i.e. get all the convenience of a sponsor registry, while retaining control on the asset distribution.
|
||||
- **private registry**: such a registry is located behind a firewall, or protected by an additional security layer (HTTP authorization, SSL client-side certificates, IP address authorization...). The registry is operated by a private entity, outside of dotCloud’s control. It can optionally delegate additional authorization to the Index, but it is not mandatory.
|
||||
|
||||
.. note::
|
||||
|
||||
Mirror registries and private registries which do not use the Index don’t even need to run the registry code. They can be implemented by any kind of transport implementing HTTP GET and PUT. Read-only registries can be powered by a simple static HTTP server.
|
||||
|
||||
.. note::
|
||||
|
||||
The latter implies that while HTTP is the protocol of choice for a registry, multiple schemes are possible (and in some cases, trivial):
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
:title: Registry API
|
||||
:title: Remote API Client Libraries
|
||||
:description: Various client libraries available to use with the Docker remote API
|
||||
:keywords: API, Docker, index, registry, REST, documentation, clients, Python, Ruby, Javascript, Erlang, Go
|
||||
|
||||
|
||||
@@ -18,6 +18,39 @@ To list available commands, either run ``docker`` with no parameters or execute
|
||||
|
||||
...
|
||||
|
||||
.. _cli_daemon:
|
||||
|
||||
``daemon``
|
||||
----------
|
||||
|
||||
::
|
||||
|
||||
Usage of docker:
|
||||
-D=false: Enable debug mode
|
||||
-H=[unix:///var/run/docker.sock]: Multiple tcp://host:port or unix://path/to/socket to bind in daemon mode, single connection otherwise
|
||||
-api-enable-cors=false: Enable CORS headers in the remote API
|
||||
-b="": Attach containers to a pre-existing network bridge; use 'none' to disable container networking
|
||||
-bip="": Use the provided CIDR notation address for the dynamically created bridge (docker0); Mutually exclusive of -b
|
||||
-d=false: Enable daemon mode
|
||||
-dns="": Force docker to use specific DNS servers
|
||||
-g="/var/lib/docker": Path to use as the root of the docker runtime
|
||||
-icc=true: Enable inter-container communication
|
||||
-ip="0.0.0.0": Default IP address to use when binding container ports
|
||||
-iptables=true: Disable docker's addition of iptables rules
|
||||
-p="/var/run/docker.pid": Path to use for daemon PID file
|
||||
-r=true: Restart previously running containers
|
||||
-s="": Force the docker runtime to use a specific storage driver
|
||||
-v=false: Print version information and quit
|
||||
|
||||
The docker daemon is the persistent process that manages containers. Docker uses the same binary for both the
|
||||
daemon and client. To run the daemon you provide the ``-d`` flag.
|
||||
|
||||
To force docker to use devicemapper as the storage driver, use ``docker -d -s devicemapper``
|
||||
|
||||
To set the dns server for all docker containers, use ``docker -d -dns 8.8.8.8``
|
||||
|
||||
To run the daemon with debug output, use ``docker -d -D``
|
||||
|
||||
.. _cli_attach:
|
||||
|
||||
``attach``
|
||||
@@ -34,7 +67,8 @@ To list available commands, either run ``docker`` with no parameters or execute
|
||||
|
||||
You can detach from the container again (and leave it running) with
|
||||
``CTRL-c`` (for a quiet exit) or ``CTRL-\`` to get a stacktrace of
|
||||
the Docker client when it quits.
|
||||
the Docker client when it quits. When you detach from the container's
|
||||
process the exit code will be retuned to the client.
|
||||
|
||||
To stop a container, use ``docker stop``
|
||||
|
||||
@@ -88,35 +122,69 @@ Examples:
|
||||
|
||||
Usage: docker build [OPTIONS] PATH | URL | -
|
||||
Build a new container image from the source code at PATH
|
||||
-t="": Repository name (and optionally a tag) to be applied to the resulting image in case of success.
|
||||
-t="": Repository name (and optionally a tag) to be applied
|
||||
to the resulting image in case of success.
|
||||
-q=false: Suppress verbose build output.
|
||||
-no-cache: Do not use the cache when building the image.
|
||||
-rm: Remove intermediate containers after a successful build
|
||||
When a single Dockerfile is given as URL, then no context is set. When a git repository is set as URL, the repository is used as context
|
||||
|
||||
The files at PATH or URL are called the "context" of the build. The
|
||||
build process may refer to any of the files in the context, for
|
||||
example when using an :ref:`ADD <dockerfile_add>` instruction. When a
|
||||
single ``Dockerfile`` is given as URL, then no context is set. When a
|
||||
git repository is set as URL, then the repository is used as the
|
||||
context
|
||||
|
||||
.. _cli_build_examples:
|
||||
|
||||
.. seealso:: :ref:`dockerbuilder`.
|
||||
|
||||
Examples:
|
||||
~~~~~~~~~
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo docker build .
|
||||
$ sudo docker build .
|
||||
Uploading context 10240 bytes
|
||||
Step 1 : FROM busybox
|
||||
Pulling repository busybox
|
||||
---> e9aa60c60128MB/2.284 MB (100%) endpoint: https://cdn-registry-1.docker.io/v1/
|
||||
Step 2 : RUN ls -lh /
|
||||
---> Running in 9c9e81692ae9
|
||||
total 24
|
||||
drwxr-xr-x 2 root root 4.0K Mar 12 2013 bin
|
||||
drwxr-xr-x 5 root root 4.0K Oct 19 00:19 dev
|
||||
drwxr-xr-x 2 root root 4.0K Oct 19 00:19 etc
|
||||
drwxr-xr-x 2 root root 4.0K Nov 15 23:34 lib
|
||||
lrwxrwxrwx 1 root root 3 Mar 12 2013 lib64 -> lib
|
||||
dr-xr-xr-x 116 root root 0 Nov 15 23:34 proc
|
||||
lrwxrwxrwx 1 root root 3 Mar 12 2013 sbin -> bin
|
||||
dr-xr-xr-x 13 root root 0 Nov 15 23:34 sys
|
||||
drwxr-xr-x 2 root root 4.0K Mar 12 2013 tmp
|
||||
drwxr-xr-x 2 root root 4.0K Nov 15 23:34 usr
|
||||
---> b35f4035db3f
|
||||
Step 3 : CMD echo Hello World
|
||||
---> Running in 02071fceb21b
|
||||
---> f52f38b7823e
|
||||
Successfully built f52f38b7823e
|
||||
|
||||
This will read the ``Dockerfile`` from the current directory. It will
|
||||
also send any other files and directories found in the current
|
||||
directory to the ``docker`` daemon.
|
||||
This example specifies that the PATH is ``.``, and so all the files in
|
||||
the local directory get tar'd and sent to the Docker daemon. The PATH
|
||||
specifies where to find the files for the "context" of the build on
|
||||
the Docker daemon. Remember that the daemon could be running on a
|
||||
remote machine and that no parsing of the Dockerfile happens at the
|
||||
client side (where you're running ``docker build``). That means that
|
||||
*all* the files at PATH get sent, not just the ones listed to
|
||||
:ref:`ADD <dockerfile_add>` in the ``Dockerfile``.
|
||||
|
||||
The transfer of context from the local machine to the Docker daemon is
|
||||
what the ``docker`` client means when you see the "Uploading context"
|
||||
message.
|
||||
|
||||
The contents of this directory would be used by ``ADD`` commands found
|
||||
within the ``Dockerfile``. This will send a lot of data to the
|
||||
``docker`` daemon if the current directory contains a lot of data. If
|
||||
the absolute path is provided instead of ``.`` then only the files and
|
||||
directories required by the ADD commands from the ``Dockerfile`` will be
|
||||
added to the context and transferred to the ``docker`` daemon.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo docker build -t vieux/apache:2.0 .
|
||||
$ sudo docker build -t vieux/apache:2.0 .
|
||||
|
||||
This will build like the previous example, but it will then tag the
|
||||
resulting image. The repository name will be ``vieux/apache`` and the
|
||||
@@ -125,20 +193,19 @@ tag will be ``2.0``
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo docker build - < Dockerfile
|
||||
$ sudo docker build - < Dockerfile
|
||||
|
||||
This will read a ``Dockerfile`` from *stdin* without context. Due to
|
||||
the lack of a context, no contents of any local directory will be sent
|
||||
to the ``docker`` daemon. ``ADD`` doesn't work when running in this
|
||||
mode because the absence of the context provides no source files to
|
||||
copy to the container.
|
||||
to the ``docker`` daemon. Since there is no context, a Dockerfile
|
||||
``ADD`` only works if it refers to a remote URL.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo docker build github.com/creack/docker-firefox
|
||||
$ sudo docker build github.com/creack/docker-firefox
|
||||
|
||||
This will clone the Github repository and use it as context. The
|
||||
``Dockerfile`` at the root of the repository is used as
|
||||
This will clone the Github repository and use the cloned repository as
|
||||
context. The ``Dockerfile`` at the root of the repository is used as
|
||||
``Dockerfile``. Note that you can specify an arbitrary git repository
|
||||
by using the ``git://`` schema.
|
||||
|
||||
@@ -157,27 +224,52 @@ by using the ``git://`` schema.
|
||||
-m="": Commit message
|
||||
-author="": Author (eg. "John Hannibal Smith <hannibal@a-team.com>"
|
||||
-run="": Configuration to be applied when the image is launched with `docker run`.
|
||||
(ex: '{"Cmd": ["cat", "/world"], "PortSpecs": ["22"]}')
|
||||
(ex: -run='{"Cmd": ["cat", "/world"], "PortSpecs": ["22"]}')
|
||||
|
||||
Simple commit of an existing container
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
.. _cli_commit_examples:
|
||||
|
||||
Commit an existing container
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ docker ps
|
||||
$ sudo docker ps
|
||||
ID IMAGE COMMAND CREATED STATUS PORTS
|
||||
c3f279d17e0a ubuntu:12.04 /bin/bash 7 days ago Up 25 hours
|
||||
197387f1b436 ubuntu:12.04 /bin/bash 7 days ago Up 25 hours
|
||||
$ docker commit c3f279d17e0a SvenDowideit/testimage:version3
|
||||
f5283438590d
|
||||
$ docker images | head
|
||||
REPOSITORY TAG ID CREATED SIZE
|
||||
SvenDowideit/testimage version3 f5283438590d 16 seconds ago 204.2 MB (virtual 335.7 MB)
|
||||
S
|
||||
REPOSITORY TAG ID CREATED VIRTUAL SIZE
|
||||
SvenDowideit/testimage version3 f5283438590d 16 seconds ago 335.7 MB
|
||||
|
||||
Change the command that a container runs
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Sometimes you have an application container running just a service and you need
|
||||
to make a quick change (run bash?) and then change it back.
|
||||
|
||||
In this example, we run a container with ``ls`` and then change the image to
|
||||
run ``ls /etc``.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ docker run -t -name test ubuntu ls
|
||||
bin boot dev etc home lib lib64 media mnt opt proc root run sbin selinux srv sys tmp usr var
|
||||
$ docker commit -run='{"Cmd": ["ls","/etc"]}' test test2
|
||||
933d16de9e70005304c1717b5c6f2f39d6fd50752834c6f34a155c70790011eb
|
||||
$ docker run -t test2
|
||||
adduser.conf gshadow login.defs rc0.d
|
||||
alternatives gshadow- logrotate.d rc1.d
|
||||
apt host.conf lsb-base rc2.d
|
||||
...
|
||||
|
||||
Full -run example
|
||||
.................
|
||||
|
||||
The ``-run`` JSON hash changes the ``Config`` section when running ``docker inspect CONTAINERID``
|
||||
or ``config`` when running ``docker inspect IMAGEID``.
|
||||
|
||||
(multiline is ok within a single quote ``'``)
|
||||
|
||||
::
|
||||
@@ -219,10 +311,15 @@ Full -run example
|
||||
|
||||
::
|
||||
|
||||
Usage: docker cp CONTAINER:RESOURCE HOSTPATH
|
||||
Usage: docker cp CONTAINER:PATH HOSTPATH
|
||||
|
||||
Copy files/folders from the containers filesystem to the host
|
||||
path. Paths are relative to the root of the filesystem.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo docker cp 7bb0e258aefe:/etc/debian_version .
|
||||
$ sudo docker cp blue_frog:/etc/hosts .
|
||||
|
||||
.. _cli_diff:
|
||||
|
||||
@@ -331,7 +428,13 @@ Show events in the past from a specified time
|
||||
|
||||
Usage: docker export CONTAINER
|
||||
|
||||
Export the contents of a filesystem as a tar archive
|
||||
Export the contents of a filesystem as a tar archive to STDOUT
|
||||
|
||||
for example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo docker export red_panda > latest.tar
|
||||
|
||||
.. _cli_history:
|
||||
|
||||
@@ -392,18 +495,52 @@ To see how the docker:latest image was built:
|
||||
|
||||
List images
|
||||
|
||||
-a=false: show all images
|
||||
-a=false: show all images (by default filter out the intermediate images used to build)
|
||||
-notrunc=false: Don't truncate output
|
||||
-q=false: only show numeric IDs
|
||||
-tree=false: output graph in tree format
|
||||
-viz=false: output graph in graphviz format
|
||||
|
||||
Listing the most recently created images
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo docker images | head
|
||||
REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE
|
||||
<none> <none> 77af4d6b9913 19 hours ago 1.089 GB
|
||||
committest latest b6fa739cedf5 19 hours ago 1.089 GB
|
||||
<none> <none> 78a85c484f71 19 hours ago 1.089 GB
|
||||
docker latest 30557a29d5ab 20 hours ago 1.089 GB
|
||||
<none> <none> 0124422dd9f9 20 hours ago 1.089 GB
|
||||
<none> <none> 18ad6fad3402 22 hours ago 1.082 GB
|
||||
<none> <none> f9f1e26352f0 23 hours ago 1.089 GB
|
||||
tryout latest 2629d1fa0b81 23 hours ago 131.5 MB
|
||||
<none> <none> 5ed6274db6ce 24 hours ago 1.089 GB
|
||||
|
||||
Listing the full length image IDs
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo docker images -notrunc | head
|
||||
REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE
|
||||
<none> <none> 77af4d6b9913e693e8d0b4b294fa62ade6054e6b2f1ffb617ac955dd63fb0182 19 hours ago 1.089 GB
|
||||
committest latest b6fa739cedf5ea12a620a439402b6004d057da800f91c7524b5086a5e4749c9f 19 hours ago 1.089 GB
|
||||
<none> <none> 78a85c484f71509adeaace20e72e941f6bdd2b25b4c75da8693efd9f61a37921 19 hours ago 1.089 GB
|
||||
docker latest 30557a29d5abc51e5f1d5b472e79b7e296f595abcf19fe6b9199dbbc809c6ff4 20 hours ago 1.089 GB
|
||||
<none> <none> 0124422dd9f9cf7ef15c0617cda3931ee68346455441d66ab8bdc5b05e9fdce5 20 hours ago 1.089 GB
|
||||
<none> <none> 18ad6fad340262ac2a636efd98a6d1f0ea775ae3d45240d3418466495a19a81b 22 hours ago 1.082 GB
|
||||
<none> <none> f9f1e26352f0a3ba6a0ff68167559f64f3e21ff7ada60366e2d44a04befd1d3a 23 hours ago 1.089 GB
|
||||
tryout latest 2629d1fa0b81b222fca63371ca16cbf6a0772d07759ff80e8d1369b926940074 23 hours ago 131.5 MB
|
||||
<none> <none> 5ed6274db6ceb2397844896966ea239290555e74ef307030ebb01ff91b1914df 24 hours ago 1.089 GB
|
||||
|
||||
Displaying images visually
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
::
|
||||
.. code-block:: bash
|
||||
|
||||
sudo docker images -viz | dot -Tpng -o docker.png
|
||||
$ sudo docker images -viz | dot -Tpng -o docker.png
|
||||
|
||||
.. image:: docker_images.gif
|
||||
:alt: Example inheritance graph of Docker images.
|
||||
@@ -412,9 +549,9 @@ Displaying images visually
|
||||
Displaying image hierarchy
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
::
|
||||
.. code-block:: bash
|
||||
|
||||
sudo docker images -tree
|
||||
$ sudo docker images -tree
|
||||
|
||||
|─8dbd9e392a96 Size: 131.5 MB (virtual 131.5 MB) Tags: ubuntu:12.04,ubuntu:latest,ubuntu:precise
|
||||
└─27cf78414709 Size: 180.1 MB (virtual 180.1 MB)
|
||||
@@ -447,7 +584,8 @@ Displaying image hierarchy
|
||||
|
||||
Usage: docker import URL|- [REPOSITORY[:TAG]]
|
||||
|
||||
Create a new filesystem image from the contents of a tarball
|
||||
Create an empty filesystem image and import the contents of the tarball
|
||||
(.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) into it, then optionally tag it.
|
||||
|
||||
At this time, the URL must start with ``http`` and point to a single
|
||||
file archive (.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) containing a
|
||||
@@ -519,6 +657,12 @@ might not get preserved.
|
||||
|
||||
Insert a file from URL in the IMAGE at PATH
|
||||
|
||||
Use the specified IMAGE as the parent for a new image which adds a
|
||||
:ref:`layer <layer_def>` containing the new file. ``insert`` does not modify
|
||||
the original image, and the new image has the contents of the parent image,
|
||||
plus the new file.
|
||||
|
||||
|
||||
Examples
|
||||
~~~~~~~~
|
||||
|
||||
@@ -528,6 +672,7 @@ Insert file from github
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo docker insert 8283e18b24bc https://raw.github.com/metalivedev/django/master/postinstall /tmp/postinstall.sh
|
||||
06fd35556d7b
|
||||
|
||||
.. _cli_inspect:
|
||||
|
||||
@@ -540,6 +685,52 @@ Insert file from github
|
||||
|
||||
Return low-level information on a container
|
||||
|
||||
-format="": template to output results
|
||||
|
||||
By default, this will render all results in a JSON array. If a format
|
||||
is specified, the given template will be executed for each result.
|
||||
|
||||
Go's `text/template <http://golang.org/pkg/text/template/>` package
|
||||
describes all the details of the format.
|
||||
|
||||
Examples
|
||||
~~~~~~~~
|
||||
|
||||
Get an instance's IP Address
|
||||
............................
|
||||
|
||||
For the most part, you can pick out any field from the JSON in a
|
||||
fairly straightforward manner.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo docker inspect -format='{{.NetworkSettings.IPAddress}}' $INSTANCE_ID
|
||||
|
||||
List All Port Bindings
|
||||
......................
|
||||
|
||||
One can loop over arrays and maps in the results to produce simple
|
||||
text output:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo docker inspect -format='{{range $p, $conf := .NetworkSettings.Ports}} {{$p}} -> {{(index $conf 0).HostPort}} {{end}}' $INSTANCE_ID
|
||||
|
||||
Find a Specific Port Mapping
|
||||
............................
|
||||
|
||||
The ``.Field`` syntax doesn't work when the field name begins with a
|
||||
number, but the template language's ``index`` function does. The
|
||||
``.NetworkSettings.Ports`` section contains a map of the internal port
|
||||
mappings to a list of external address/port objects, so to grab just
|
||||
the numeric public port, you use ``index`` to find the specific port
|
||||
map, and then ``index`` 0 contains first object inside of that. Then
|
||||
we ask for the ``HostPort`` field to get the public address.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo docker inspect -format='{{(index (index .NetworkSettings.Ports "8787/tcp") 0).HostPort}}' $INSTANCE_ID
|
||||
|
||||
.. _cli_kill:
|
||||
|
||||
``kill``
|
||||
@@ -559,6 +750,18 @@ Known Issues (kill)
|
||||
* :issue:`197` indicates that ``docker kill`` may leave directories
|
||||
behind and make it difficult to remove the container.
|
||||
|
||||
.. _cli_load:
|
||||
|
||||
``load``
|
||||
--------
|
||||
|
||||
::
|
||||
|
||||
Usage: docker load < repository.tar
|
||||
|
||||
Loads a tarred repository from the standard input stream.
|
||||
Restores both images and tags.
|
||||
|
||||
.. _cli_login:
|
||||
|
||||
``login``
|
||||
@@ -592,6 +795,15 @@ Known Issues (kill)
|
||||
|
||||
Fetch the logs of a container
|
||||
|
||||
``docker logs`` is a convenience which batch-retrieves whatever logs
|
||||
are present at the time of execution. This does not guarantee
|
||||
execution order when combined with a ``docker run`` (i.e. your run may
|
||||
not have generated any logs at the time you execute ``docker logs``).
|
||||
|
||||
``docker logs -f`` combines ``docker logs`` and ``docker attach``: it
|
||||
will first return all logs from the beginning and then continue
|
||||
streaming new output from the container's stdout and stderr.
|
||||
|
||||
|
||||
.. _cli_port:
|
||||
|
||||
@@ -620,6 +832,15 @@ Known Issues (kill)
|
||||
-notrunc=false: Don't truncate output
|
||||
-q=false: Only display numeric IDs
|
||||
|
||||
Running ``docker ps`` showing 2 linked containers.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ docker ps
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
4c01db0b339c ubuntu:12.04 bash 17 seconds ago Up 16 seconds webapp
|
||||
d7886598dbe2 crosbymichael/redis:latest /redis-server --dir 33 minutes ago Up 33 minutes 6379/tcp redis,webapp/db
|
||||
|
||||
.. _cli_pull:
|
||||
|
||||
``pull``
|
||||
@@ -668,7 +889,7 @@ Known Issues (kill)
|
||||
-link="": Remove the link instead of the actual container
|
||||
|
||||
Known Issues (rm)
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
* :issue:`197` indicates that ``docker kill`` may leave directories
|
||||
behind and make it difficult to remove the container.
|
||||
@@ -679,7 +900,7 @@ Examples:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ docker rm /redis
|
||||
$ sudo docker rm /redis
|
||||
/redis
|
||||
|
||||
|
||||
@@ -688,7 +909,7 @@ This will remove the container referenced under the link ``/redis``.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ docker rm -link /webapp/redis
|
||||
$ sudo docker rm -link /webapp/redis
|
||||
/webapp/redis
|
||||
|
||||
|
||||
@@ -697,7 +918,7 @@ network communication.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ docker rm `docker ps -a -q`
|
||||
$ sudo docker rm `docker ps -a -q`
|
||||
|
||||
|
||||
This command will delete all stopped containers. The command ``docker ps -a -q`` will return all
|
||||
@@ -714,6 +935,38 @@ containers will not be deleted.
|
||||
Usage: docker rmi IMAGE [IMAGE...]
|
||||
|
||||
Remove one or more images
|
||||
|
||||
Removing tagged images
|
||||
~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Images can be removed either by their short or long ID's, or their image names.
|
||||
If an image has more than one name, each of them needs to be removed before the
|
||||
image is removed.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo docker images
|
||||
REPOSITORY TAG IMAGE ID CREATED SIZE
|
||||
test1 latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB)
|
||||
test latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB)
|
||||
test2 latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB)
|
||||
|
||||
$ sudo docker rmi fd484f19954f
|
||||
Error: Conflict, fd484f19954f wasn't deleted
|
||||
2013/12/11 05:47:16 Error: failed to remove one or more images
|
||||
|
||||
$ sudo docker rmi test1
|
||||
Untagged: fd484f19954f4920da7ff372b5067f5b7ddb2fd3830cecd17b96ea9e286ba5b8
|
||||
$ sudo docker rmi test2
|
||||
Untagged: fd484f19954f4920da7ff372b5067f5b7ddb2fd3830cecd17b96ea9e286ba5b8
|
||||
|
||||
$ sudo docker images
|
||||
REPOSITORY TAG IMAGE ID CREATED SIZE
|
||||
test1 latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB)
|
||||
$ sudo docker rmi test
|
||||
Untagged: fd484f19954f4920da7ff372b5067f5b7ddb2fd3830cecd17b96ea9e286ba5b8
|
||||
Deleted: fd484f19954f4920da7ff372b5067f5b7ddb2fd3830cecd17b96ea9e286ba5b8
|
||||
|
||||
|
||||
.. _cli_run:
|
||||
|
||||
@@ -751,13 +1004,27 @@ containers will not be deleted.
|
||||
-link="": Add link to another container (name:alias)
|
||||
-name="": Assign the specified name to the container. If no name is specific docker will generate a random name
|
||||
-P=false: Publish all exposed ports to the host interfaces
|
||||
|
||||
``'docker run'`` first ``'creates'`` a writeable container layer over
|
||||
the specified image, and then ``'starts'`` it using the specified
|
||||
command. That is, ``'docker run'`` is equivalent to the API
|
||||
``/containers/create`` then ``/containers/(id)/start``.
|
||||
|
||||
Examples
|
||||
--------
|
||||
``docker run`` can be used in combination with ``docker commit`` to :ref:`change the command that a container runs <cli_commit_examples>`.
|
||||
|
||||
Known Issues (run -volumes-from)
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
* :issue:`2702`: "lxc-start: Permission denied - failed to mount"
|
||||
could indicate a permissions problem with AppArmor. Please see the
|
||||
issue for a workaround.
|
||||
|
||||
Examples:
|
||||
~~~~~~~~~
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo docker run -cidfile /tmp/docker_test.cid ubuntu echo "test"
|
||||
$ sudo docker run -cidfile /tmp/docker_test.cid ubuntu echo "test"
|
||||
|
||||
This will create a container and print "test" to the console. The
|
||||
``cidfile`` flag makes docker attempt to create a new file and write the
|
||||
@@ -766,7 +1033,10 @@ error. Docker will close this file when docker run exits.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
docker run mount -t tmpfs none /var/spool/squid
|
||||
$ sudo docker run -t -i -rm ubuntu bash
|
||||
root@bc338942ef20:/# mount -t tmpfs none /mnt
|
||||
mount: permission denied
|
||||
|
||||
|
||||
This will *not* work, because by default, most potentially dangerous
|
||||
kernel capabilities are dropped; including ``cap_sys_admin`` (which is
|
||||
@@ -775,7 +1045,12 @@ allow it to run:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
docker run -privileged mount -t tmpfs none /var/spool/squid
|
||||
$ sudo docker run -privileged ubuntu bash
|
||||
root@50e3f57e16e6:/# mount -t tmpfs none /mnt
|
||||
root@50e3f57e16e6:/# df -h
|
||||
Filesystem Size Used Avail Use% Mounted on
|
||||
none 1.9G 0 1.9G 0% /mnt
|
||||
|
||||
|
||||
The ``-privileged`` flag gives *all* capabilities to the container,
|
||||
and it also lifts all the limitations enforced by the ``device``
|
||||
@@ -785,7 +1060,7 @@ use-cases, like running Docker within Docker.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
docker run -w /path/to/dir/ -i -t ubuntu pwd
|
||||
$ sudo docker run -w /path/to/dir/ -i -t ubuntu pwd
|
||||
|
||||
The ``-w`` lets the command being executed inside directory given,
|
||||
here /path/to/dir/. If the path does not exists it is created inside the
|
||||
@@ -793,7 +1068,7 @@ container.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
docker run -v `pwd`:`pwd` -w `pwd` -i -t ubuntu pwd
|
||||
$ sudo docker run -v `pwd`:`pwd` -w `pwd` -i -t ubuntu pwd
|
||||
|
||||
The ``-v`` flag mounts the current working directory into the container.
|
||||
The ``-w`` lets the command being executed inside the current
|
||||
@@ -803,7 +1078,7 @@ using the container, but inside the current working directory.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
docker run -p 127.0.0.1:80:8080 ubuntu bash
|
||||
$ sudo docker run -p 127.0.0.1:80:8080 ubuntu bash
|
||||
|
||||
This binds port ``8080`` of the container to port ``80`` on 127.0.0.1 of the
|
||||
host machine. :ref:`port_redirection` explains in detail how to manipulate ports
|
||||
@@ -811,7 +1086,7 @@ in Docker.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
docker run -expose 80 ubuntu bash
|
||||
$ sudo docker run -expose 80 ubuntu bash
|
||||
|
||||
This exposes port ``80`` of the container for use within a link without
|
||||
publishing the port to the host system's interfaces. :ref:`port_redirection`
|
||||
@@ -819,14 +1094,14 @@ explains in detail how to manipulate ports in Docker.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
docker run -name console -t -i ubuntu bash
|
||||
$ sudo docker run -name console -t -i ubuntu bash
|
||||
|
||||
This will create and run a new container with the container name
|
||||
being ``console``.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
docker run -link /redis:redis -name console ubuntu bash
|
||||
$ sudo docker run -link /redis:redis -name console ubuntu bash
|
||||
|
||||
The ``-link`` flag will link the container named ``/redis`` into the
|
||||
newly created container with the alias ``redis``. The new container
|
||||
@@ -836,7 +1111,7 @@ to the newly created container.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
docker run -volumes-from 777f7dc92da7,ba8c0c54f0f2:ro -i -t ubuntu pwd
|
||||
$ sudo docker run -volumes-from 777f7dc92da7,ba8c0c54f0f2:ro -i -t ubuntu pwd
|
||||
|
||||
The ``-volumes-from`` flag mounts all the defined volumes from the
|
||||
refrence containers. Containers can be specified by a comma seperated
|
||||
@@ -845,12 +1120,17 @@ id may be optionally suffixed with ``:ro`` or ``:rw`` to mount the volumes in
|
||||
read-only or read-write mode, respectively. By default, the volumes are mounted
|
||||
in the same mode (rw or ro) as the reference container.
|
||||
|
||||
Known Issues (run -volumes-from)
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
.. _cli_save:
|
||||
|
||||
* :issue:`2702`: "lxc-start: Permission denied - failed to mount"
|
||||
could indicate a permissions problem with AppArmor. Please see the
|
||||
issue for a workaround.
|
||||
``save``
|
||||
---------
|
||||
|
||||
::
|
||||
|
||||
Usage: docker save image > repository.tar
|
||||
|
||||
Streams a tarred repository to the standard output stream.
|
||||
Contains all parent layers, and all tags + versions.
|
||||
|
||||
.. _cli_search:
|
||||
|
||||
@@ -874,7 +1154,7 @@ Known Issues (run -volumes-from)
|
||||
|
||||
::
|
||||
|
||||
Usage: docker start [OPTIONS] NAME
|
||||
Usage: docker start [OPTIONS] CONTAINER
|
||||
|
||||
Start a stopped container
|
||||
|
||||
|
||||
@@ -42,11 +42,10 @@ This following command will build a development environment using the Dockerfile
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo docker build -t docker .
|
||||
sudo make build
|
||||
|
||||
|
||||
|
||||
If the build is successful, congratulations! You have produced a clean build of docker, neatly encapsulated in a standard build environment.
|
||||
If the build is successful, congratulations! You have produced a clean build of
|
||||
docker, neatly encapsulated in a standard build environment.
|
||||
|
||||
|
||||
Step 4: Build the Docker Binary
|
||||
@@ -56,10 +55,23 @@ To create the Docker binary, run this command:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo docker run -privileged -v `pwd`:/go/src/github.com/dotcloud/docker docker hack/make.sh binary
|
||||
sudo make binary
|
||||
|
||||
This will create the Docker binary in ``./bundles/<version>-dev/binary/``
|
||||
|
||||
Using your built Docker binary
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The binary is available outside the container in the directory
|
||||
``./bundles/<version>-dev/binary/``. You can swap your host docker executable
|
||||
with this binary for live testing - for example, on ubuntu:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo service docker stop ; sudo cp $(which docker) $(which docker)_ ; sudo cp ./bundles/<version>-dev/binary/docker-<version>-dev $(which docker);sudo service docker start
|
||||
|
||||
.. note:: Its safer to run the tests below before swapping your hosts docker binary.
|
||||
|
||||
|
||||
Step 5: Run the Tests
|
||||
---------------------
|
||||
@@ -68,10 +80,15 @@ To execute the test cases, run this command:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo docker run -privileged -v `pwd`:/go/src/github.com/dotcloud/docker docker hack/make.sh test
|
||||
sudo make test
|
||||
|
||||
|
||||
Note: if you're running the tests in vagrant, you need to specify a dns entry in the command: `-dns 8.8.8.8`
|
||||
Note: if you're running the tests in vagrant, you need to specify a dns entry in
|
||||
the command (either edit the Makefile, or run the step manually):
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo docker run -dns 8.8.8.8 -privileged -v `pwd`:/go/src/github.com/dotcloud/docker docker hack/make.sh test
|
||||
|
||||
If the test are successful then the tail of the output should look something like this
|
||||
|
||||
@@ -113,15 +130,24 @@ You can run an interactive session in the newly built container:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo docker run -privileged -i -t docker bash
|
||||
sudo make shell
|
||||
|
||||
# type 'exit' to exit
|
||||
# type 'exit' or Ctrl-D to exit
|
||||
|
||||
|
||||
Extra Step: Build and view the Documentation
|
||||
-------------------------------------------
|
||||
|
||||
.. note:: The binary is available outside the container in the directory ``./bundles/<version>-dev/binary/``. You can swap your host docker executable with this binary for live testing - for example, on ubuntu: ``sudo service docker stop ; sudo cp $(which docker) $(which docker)_ ; sudo cp ./bundles/<version>-dev/binary/docker-<version>-dev $(which docker);sudo service docker start``.
|
||||
If you want to read the documentation from a local website, or are making changes
|
||||
to it, you can build the documentation and then serve it by:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo make docs
|
||||
# when its done, you can point your browser to http://yourdockerhost:8000
|
||||
# type Ctrl-C to exit
|
||||
|
||||
|
||||
**Need More Help?**
|
||||
|
||||
If you need more help then hop on to the `#docker-dev IRC channel <irc://chat.freenode.net#docker-dev>`_ or post a message on the `Docker developer mailinglist <https://groups.google.com/d/forum/docker-dev>`_.
|
||||
If you need more help then hop on to the `#docker-dev IRC channel <irc://chat.freenode.net#docker-dev>`_ or post a message on the `Docker developer mailing list <https://groups.google.com/d/forum/docker-dev>`_.
|
||||
|
||||
137
docs/sources/examples/cfengine_process_management.rst
Normal file
137
docs/sources/examples/cfengine_process_management.rst
Normal file
@@ -0,0 +1,137 @@
|
||||
:title: Process Management with CFEngine
|
||||
:description: Managing containerized processes with CFEngine
|
||||
:keywords: cfengine, process, management, usage, docker, documentation
|
||||
|
||||
Process Management with CFEngine
|
||||
================================
|
||||
|
||||
Create Docker containers with managed processes.
|
||||
|
||||
Docker monitors one process in each running container and the container lives or dies with that process.
|
||||
By introducing CFEngine inside Docker containers, we can alleviate a few of the issues that may arise:
|
||||
|
||||
* It is possible to easily start multiple processes within a container, all of which will be managed automatically, with the normal ``docker run`` command.
|
||||
* If a managed process dies or crashes, CFEngine will start it again within 1 minute.
|
||||
* The container itself will live as long as the CFEngine scheduling daemon (cf-execd) lives. With CFEngine, we are able to decouple the life of the container from the uptime of the service it provides.
|
||||
|
||||
|
||||
How it works
|
||||
------------
|
||||
|
||||
CFEngine, together with the cfe-docker integration policies, are installed as part of the Dockerfile. This builds CFEngine into our Docker image.
|
||||
|
||||
The Dockerfile's ``ENTRYPOINT`` takes an arbitrary amount of commands (with any desired arguments) as parameters.
|
||||
When we run the Docker container these parameters get written to CFEngine policies and CFEngine takes over to ensure that the desired processes are running in the container.
|
||||
|
||||
CFEngine scans the process table for the ``basename`` of the commands given to the ``ENTRYPOINT`` and runs the command to start the process if the ``basename`` is not found.
|
||||
For example, if we start the container with ``docker run "/path/to/my/application parameters"``, CFEngine will look for a process named ``application`` and run the command.
|
||||
If an entry for ``application`` is not found in the process table at any point in time, CFEngine will execute ``/path/to/my/application parameters`` to start the application once again.
|
||||
The check on the process table happens every minute.
|
||||
|
||||
Note that it is therefore important that the command to start your application leaves a process with the basename of the command.
|
||||
This can be made more flexible by making some minor adjustments to the CFEngine policies, if desired.
|
||||
|
||||
|
||||
Usage
|
||||
-----
|
||||
|
||||
This example assumes you have Docker installed and working.
|
||||
We will install and manage ``apache2`` and ``sshd`` in a single container.
|
||||
|
||||
There are three steps:
|
||||
|
||||
1. Install CFEngine into the container.
|
||||
2. Copy the CFEngine Docker process management policy into the containerized CFEngine installation.
|
||||
3. Start your application processes as part of the ``docker run`` command.
|
||||
|
||||
|
||||
Building the container image
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The first two steps can be done as part of a Dockerfile, as follows.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
FROM ubuntu
|
||||
MAINTAINER Eystein Måløy Stenberg <eytein.stenberg@gmail.com>
|
||||
|
||||
RUN apt-get -y install wget lsb-release unzip
|
||||
|
||||
# install latest CFEngine
|
||||
RUN wget -qO- http://cfengine.com/pub/gpg.key | apt-key add -
|
||||
RUN echo "deb http://cfengine.com/pub/apt $(lsb_release -cs) main" > /etc/apt/sources.list.d/cfengine-community.list
|
||||
RUN apt-get update
|
||||
RUN apt-get install cfengine-community
|
||||
|
||||
# install cfe-docker process management policy
|
||||
RUN wget --no-check-certificate https://github.com/estenberg/cfe-docker/archive/master.zip -P /tmp/ && unzip /tmp/master.zip -d /tmp/
|
||||
RUN cp /tmp/cfe-docker-master/cfengine/bin/* /var/cfengine/bin/
|
||||
RUN cp /tmp/cfe-docker-master/cfengine/inputs/* /var/cfengine/inputs/
|
||||
RUN rm -rf /tmp/cfe-docker-master /tmp/master.zip
|
||||
|
||||
# apache2 and openssh are just for testing purposes, install your own apps here
|
||||
RUN apt-get -y install openssh-server apache2
|
||||
RUN mkdir -p /var/run/sshd
|
||||
RUN echo "root:password" | chpasswd # need a password for ssh
|
||||
|
||||
ENTRYPOINT ["/var/cfengine/bin/docker_processes_run.sh"]
|
||||
|
||||
|
||||
By saving this file as ``Dockerfile`` to a working directory, you can then build your container with the docker build command,
|
||||
e.g. ``docker build -t managed_image``.
|
||||
|
||||
Testing the container
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Start the container with ``apache2`` and ``sshd`` running and managed, forwarding a port to our SSH instance:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
docker run -p 127.0.0.1:222:22 -d managed_image "/usr/sbin/sshd" "/etc/init.d/apache2 start"
|
||||
|
||||
We now clearly see one of the benefits of the cfe-docker integration: it allows to start several processes
|
||||
as part of a normal ``docker run`` command.
|
||||
|
||||
We can now log in to our new container and see that both ``apache2`` and ``sshd`` are running. We have set the root password to
|
||||
"password" in the Dockerfile above and can use that to log in with ssh:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
ssh -p222 root@127.0.0.1
|
||||
|
||||
ps -ef
|
||||
UID PID PPID C STIME TTY TIME CMD
|
||||
root 1 0 0 07:48 ? 00:00:00 /bin/bash /var/cfengine/bin/docker_processes_run.sh /usr/sbin/sshd /etc/init.d/apache2 start
|
||||
root 18 1 0 07:48 ? 00:00:00 /var/cfengine/bin/cf-execd -F
|
||||
root 20 1 0 07:48 ? 00:00:00 /usr/sbin/sshd
|
||||
root 32 1 0 07:48 ? 00:00:00 /usr/sbin/apache2 -k start
|
||||
www-data 34 32 0 07:48 ? 00:00:00 /usr/sbin/apache2 -k start
|
||||
www-data 35 32 0 07:48 ? 00:00:00 /usr/sbin/apache2 -k start
|
||||
www-data 36 32 0 07:48 ? 00:00:00 /usr/sbin/apache2 -k start
|
||||
root 93 20 0 07:48 ? 00:00:00 sshd: root@pts/0
|
||||
root 105 93 0 07:48 pts/0 00:00:00 -bash
|
||||
root 112 105 0 07:49 pts/0 00:00:00 ps -ef
|
||||
|
||||
|
||||
If we stop apache2, it will be started again within a minute by CFEngine.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
service apache2 status
|
||||
Apache2 is running (pid 32).
|
||||
service apache2 stop
|
||||
* Stopping web server apache2 ... waiting [ OK ]
|
||||
service apache2 status
|
||||
Apache2 is NOT running.
|
||||
# ... wait up to 1 minute...
|
||||
service apache2 status
|
||||
Apache2 is running (pid 173).
|
||||
|
||||
|
||||
Adapting to your applications
|
||||
-----------------------------
|
||||
|
||||
To make sure your applications get managed in the same manner, there are just two things you need to adjust from the above example:
|
||||
|
||||
* In the Dockerfile used above, install your applications instead of ``apache2`` and ``sshd``.
|
||||
* When you start the container with ``docker run``, specify the command line arguments to your applications rather than ``apache2`` and ``sshd``.
|
||||
@@ -131,8 +131,6 @@ Attach to the container to see the results in real-time.
|
||||
|
||||
- **"docker attach**" This will allow us to attach to a background
|
||||
process to see what is going on.
|
||||
- **"-sig-proxy=true"** Proxify all received signal to the process
|
||||
(even in non-tty mode)
|
||||
- **$CONTAINER_ID** The Id of the container we want to attach too.
|
||||
|
||||
Exit from the container attachment by pressing Control-C.
|
||||
|
||||
@@ -24,3 +24,5 @@ to more substantial services like those which you might find in production.
|
||||
postgresql_service
|
||||
mongodb
|
||||
running_riak_service
|
||||
using_supervisord
|
||||
cfengine_process_management
|
||||
|
||||
@@ -7,26 +7,18 @@
|
||||
PostgreSQL Service
|
||||
==================
|
||||
|
||||
.. include:: example_header.inc
|
||||
|
||||
.. note::
|
||||
|
||||
A shorter version of `this blog post`_.
|
||||
|
||||
.. note::
|
||||
|
||||
As of version 0.5.2, Docker requires root privileges to run.
|
||||
You have to either manually adjust your system configuration (permissions on
|
||||
/var/run/docker.sock or sudo config), or prefix `docker` with `sudo`. Check
|
||||
`this thread`_ for details.
|
||||
|
||||
.. _this blog post: http://zaiste.net/2013/08/docker_postgresql_how_to/
|
||||
.. _this thread: https://groups.google.com/forum/?fromgroups#!topic/docker-club/P3xDLqmLp0E
|
||||
|
||||
Installing PostgreSQL on Docker
|
||||
-------------------------------
|
||||
|
||||
For clarity I won't be showing command output.
|
||||
|
||||
Run an interactive shell in Docker container.
|
||||
Run an interactive shell in a Docker container.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
@@ -38,26 +30,26 @@ Update its dependencies.
|
||||
|
||||
apt-get update
|
||||
|
||||
Install ``python-software-properties``.
|
||||
Install ``python-software-properties``, ``software-properties-common``, ``wget`` and ``vim``.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
apt-get -y install python-software-properties
|
||||
apt-get -y install software-properties-common
|
||||
apt-get -y install python-software-properties software-properties-common wget vim
|
||||
|
||||
Add Pitti's PostgreSQL repository. It contains the most recent stable release
|
||||
of PostgreSQL i.e. ``9.2``.
|
||||
Add PostgreSQL's repository. It contains the most recent stable release
|
||||
of PostgreSQL, ``9.3``.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
add-apt-repository ppa:pitti/postgresql
|
||||
wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add -
|
||||
echo "deb http://apt.postgresql.org/pub/repos/apt/ precise-pgdg main" > /etc/apt/sources.list.d/pgdg.list
|
||||
apt-get update
|
||||
|
||||
Finally, install PostgreSQL 9.2
|
||||
Finally, install PostgreSQL 9.3
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
apt-get -y install postgresql-9.2 postgresql-client-9.2 postgresql-contrib-9.2
|
||||
apt-get -y install postgresql-9.3 postgresql-client-9.3 postgresql-contrib-9.3
|
||||
|
||||
Now, create a PostgreSQL superuser role that can create databases and
|
||||
other roles. Following Vagrant's convention the role will be named
|
||||
@@ -76,15 +68,14 @@ role.
|
||||
|
||||
Adjust PostgreSQL configuration so that remote connections to the
|
||||
database are possible. Make sure that inside
|
||||
``/etc/postgresql/9.2/main/pg_hba.conf`` you have following line (you will need
|
||||
to install an editor, e.g. ``apt-get install vim``):
|
||||
``/etc/postgresql/9.3/main/pg_hba.conf`` you have following line:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
host all all 0.0.0.0/0 md5
|
||||
|
||||
Additionaly, inside ``/etc/postgresql/9.2/main/postgresql.conf``
|
||||
uncomment ``listen_addresses`` so it is as follows:
|
||||
Additionaly, inside ``/etc/postgresql/9.3/main/postgresql.conf``
|
||||
uncomment ``listen_addresses`` like so:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
@@ -94,7 +85,7 @@ uncomment ``listen_addresses`` so it is as follows:
|
||||
|
||||
This PostgreSQL setup is for development only purposes. Refer
|
||||
to PostgreSQL documentation how to fine-tune these settings so that it
|
||||
is enough secure.
|
||||
is secure enough.
|
||||
|
||||
Exit.
|
||||
|
||||
@@ -102,43 +93,43 @@ Exit.
|
||||
|
||||
exit
|
||||
|
||||
Create an image and assign it a name. ``<container_id>`` is in the
|
||||
Bash prompt; you can also locate it using ``docker ps -a``.
|
||||
Create an image from our container and assign it a name. The ``<container_id>``
|
||||
is in the Bash prompt; you can also locate it using ``docker ps -a``.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo docker commit <container_id> <your username>/postgresql
|
||||
|
||||
Finally, run PostgreSQL server via ``docker``.
|
||||
Finally, run the PostgreSQL server via ``docker``.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
CONTAINER=$(sudo docker run -d -p 5432 \
|
||||
-t <your username>/postgresql \
|
||||
/bin/su postgres -c '/usr/lib/postgresql/9.2/bin/postgres \
|
||||
-D /var/lib/postgresql/9.2/main \
|
||||
-c config_file=/etc/postgresql/9.2/main/postgresql.conf')
|
||||
/bin/su postgres -c '/usr/lib/postgresql/9.3/bin/postgres \
|
||||
-D /var/lib/postgresql/9.3/main \
|
||||
-c config_file=/etc/postgresql/9.3/main/postgresql.conf')
|
||||
|
||||
Connect the PostgreSQL server using ``psql`` (You will need postgres installed
|
||||
on the machine. For ubuntu, use something like
|
||||
``sudo apt-get install postgresql``).
|
||||
Connect the PostgreSQL server using ``psql`` (You will need the
|
||||
postgresql client installed on the machine. For ubuntu, use something
|
||||
like ``sudo apt-get install postgresql-client``).
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
CONTAINER_IP=$(sudo docker inspect $CONTAINER | grep IPAddress | awk '{ print $2 }' | tr -d ',"')
|
||||
CONTAINER_IP=$(sudo docker inspect -format='{{.NetworkSettings.IPAddress}}' $CONTAINER)
|
||||
psql -h $CONTAINER_IP -p 5432 -d docker -U docker -W
|
||||
|
||||
As before, create roles or databases if needed.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
psql (9.2.4)
|
||||
psql (9.3.1)
|
||||
Type "help" for help.
|
||||
|
||||
docker=# CREATE DATABASE foo OWNER=docker;
|
||||
CREATE DATABASE
|
||||
|
||||
Additionally, publish your newly created image on Docker Index.
|
||||
Additionally, publish your newly created image on the Docker Index.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
@@ -160,9 +151,9 @@ container starts.
|
||||
.. code-block:: bash
|
||||
|
||||
sudo docker commit -run='{"Cmd": \
|
||||
["/bin/su", "postgres", "-c", "/usr/lib/postgresql/9.2/bin/postgres -D \
|
||||
/var/lib/postgresql/9.2/main -c \
|
||||
config_file=/etc/postgresql/9.2/main/postgresql.conf"], "PortSpecs": ["5432"]}' \
|
||||
["/bin/su", "postgres", "-c", "/usr/lib/postgresql/9.3/bin/postgres -D \
|
||||
/var/lib/postgresql/9.3/main -c \
|
||||
config_file=/etc/postgresql/9.3/main/postgresql.conf"], "PortSpecs": ["5432"]}' \
|
||||
<container_id> <your username>/postgresql
|
||||
|
||||
From now on, just type ``docker run <your username>/postgresql`` and
|
||||
|
||||
128
docs/sources/examples/using_supervisord.rst
Normal file
128
docs/sources/examples/using_supervisord.rst
Normal file
@@ -0,0 +1,128 @@
|
||||
:title: Using Supervisor with Docker
|
||||
:description: How to use Supervisor process management with Docker
|
||||
:keywords: docker, supervisor, process management
|
||||
|
||||
.. _using_supervisord:
|
||||
|
||||
Using Supervisor with Docker
|
||||
============================
|
||||
|
||||
.. include:: example_header.inc
|
||||
|
||||
Traditionally a Docker container runs a single process when it is launched, for
|
||||
example an Apache daemon or a SSH server daemon. Often though you want to run
|
||||
more than one process in a container. There are a number of ways you can
|
||||
achieve this ranging from using a simple Bash script as the value of your
|
||||
container's ``CMD`` instruction to installing a process management tool.
|
||||
|
||||
In this example we're going to make use of the process management tool,
|
||||
`Supervisor <http://supervisord.org/>`_, to manage multiple processes in our
|
||||
container. Using Supervisor allows us to better control, manage, and restart the
|
||||
processes we want to run. To demonstrate this we're going to install and manage both an
|
||||
SSH daemon and an Apache daemon.
|
||||
|
||||
Creating a Dockerfile
|
||||
---------------------
|
||||
|
||||
Let's start by creating a basic ``Dockerfile`` for our new image.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
FROM ubuntu:latest
|
||||
MAINTAINER examples@docker.io
|
||||
RUN echo "deb http://archive.ubuntu.com/ubuntu precise main universe" > /etc/apt/sources.list
|
||||
RUN apt-get update
|
||||
RUN apt-get upgrade -y
|
||||
|
||||
Installing Supervisor
|
||||
---------------------
|
||||
|
||||
We can now install our SSH and Apache daemons as well as Supervisor in our container.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
RUN apt-get install -y openssh-server apache2 supervisor
|
||||
RUN mkdir -p /var/run/sshd
|
||||
RUN mkdir -p /var/log/supervisor
|
||||
|
||||
Here we're installing the ``openssh-server``, ``apache2`` and ``supervisor``
|
||||
(which provides the Supervisor daemon) packages. We're also creating two new
|
||||
directories that are needed to run our SSH daemon and Supervisor.
|
||||
|
||||
Adding Supervisor's configuration file
|
||||
--------------------------------------
|
||||
|
||||
Now let's add a configuration file for Supervisor. The default file is called
|
||||
``supervisord.conf`` and is located in ``/etc/supervisor/conf.d/``.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
ADD supervisord.conf /etc/supervisor/conf.d/supervisord.conf
|
||||
|
||||
Let's see what is inside our ``supervisord.conf`` file.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
[supervisord]
|
||||
nodaemon=true
|
||||
|
||||
[program:sshd]
|
||||
command=/usr/sbin/sshd -D
|
||||
|
||||
[program:apache2]
|
||||
command=/bin/bash -c "source /etc/apache2/envvars && /usr/sbin/apache2 -DFOREGROUND"
|
||||
|
||||
The ``supervisord.conf`` configuration file contains directives that configure
|
||||
Supervisor and the processes it manages. The first block ``[supervisord]``
|
||||
provides configuration for Supervisor itself. We're using one directive,
|
||||
``nodaemon`` which tells Supervisor to run interactively rather than daemonize.
|
||||
|
||||
The next two blocks manage the services we wish to control. Each block controls
|
||||
a separate process. The blocks contain a single directive, ``command``, which
|
||||
specifies what command to run to start each process.
|
||||
|
||||
Exposing ports and running Supervisor
|
||||
-------------------------------------
|
||||
|
||||
Now let's finish our ``Dockerfile`` by exposing some required ports and
|
||||
specifying the ``CMD`` instruction to start Supervisor when our container
|
||||
launches.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
EXPOSE 22 80
|
||||
CMD ["/usr/bin/supervisord"]
|
||||
|
||||
Here we've exposed ports 22 and 80 on the container and we're running the
|
||||
``/usr/bin/supervisord`` binary when the container launches.
|
||||
|
||||
Building our container
|
||||
----------------------
|
||||
|
||||
We can now build our new container.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo docker build -t <yourname>/supervisord .
|
||||
|
||||
Running our Supervisor container
|
||||
--------------------------------
|
||||
|
||||
Once we've got a built image we can launch a container from it.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo docker run -p 22 -p 80 -t -i <yourname>/supervisor
|
||||
2013-11-25 18:53:22,312 CRIT Supervisor running as root (no user in config file)
|
||||
2013-11-25 18:53:22,312 WARN Included extra file "/etc/supervisor/conf.d/supervisord.conf" during parsing
|
||||
2013-11-25 18:53:22,342 INFO supervisord started with pid 1
|
||||
2013-11-25 18:53:23,346 INFO spawned: 'sshd' with pid 6
|
||||
2013-11-25 18:53:23,349 INFO spawned: 'apache2' with pid 7
|
||||
. . .
|
||||
|
||||
We've launched a new container interactively using the ``docker run`` command.
|
||||
That container has run Supervisor and launched the SSH and Apache daemons with
|
||||
it. We've specified the ``-p`` flag to expose ports 22 and 80. From here we can
|
||||
now identify the exposed ports and connect to one or both of the SSH and Apache
|
||||
daemons.
|
||||
|
||||
@@ -22,22 +22,37 @@ Amazon QuickStart
|
||||
|
||||
1. **Choose an image:**
|
||||
|
||||
* Open http://cloud-images.ubuntu.com/locator/ec2/
|
||||
* Enter ``amd64 precise`` in the search field (it will search as you
|
||||
type)
|
||||
* Pick an image by clicking on the image name. *An EBS-enabled
|
||||
image will let you use a t1.micro instance.* Clicking on the image
|
||||
name will take you to your AWS Console.
|
||||
* Launch the `Create Instance Wizard
|
||||
<https://console.aws.amazon.com/ec2/v2/home?#LaunchInstanceWizard:>`_ menu
|
||||
on your AWS Console.
|
||||
|
||||
* When picking the source AMI for your instance type, select "Community
|
||||
AMIs".
|
||||
|
||||
* Search for ``amd64 precise``. Pick one of the amd64 Ubuntu images.
|
||||
|
||||
* If you choose a EBS enabled AMI, you'll also be able to launch a
|
||||
``t1.micro`` instance (more info on `pricing
|
||||
<http://aws.amazon.com/en/ec2/pricing/>`_). ``t1.micro`` instances are
|
||||
eligible for Amazon's Free Usage Tier.
|
||||
|
||||
* When you click select you'll be taken to the instance setup, and you're one
|
||||
click away from having your Ubuntu VM up and running.
|
||||
|
||||
2. **Tell CloudInit to install Docker:**
|
||||
|
||||
* Enter ``#include https://get.docker.io`` into the instance *User
|
||||
Data*. `CloudInit <https://help.ubuntu.com/community/CloudInit>`_
|
||||
is part of the Ubuntu image you chose and it bootstraps from this
|
||||
*User Data*.
|
||||
* When you're on the "Configure Instance Details" step, expand the "Advanced
|
||||
Details" section.
|
||||
|
||||
3. After a few more standard choices where defaults are probably ok, your
|
||||
AWS Ubuntu instance with Docker should be running!
|
||||
* Under "User data", select "As text".
|
||||
|
||||
* Enter ``#include https://get.docker.io`` into the instance *User Data*.
|
||||
`CloudInit <https://help.ubuntu.com/community/CloudInit>`_ is part of the
|
||||
Ubuntu image you chose; it will bootstrap Docker by running the shell
|
||||
script located at this URL.
|
||||
|
||||
3. After a few more standard choices where defaults are probably ok, your AWS
|
||||
Ubuntu instance with Docker should be running!
|
||||
|
||||
**If this is your first AWS instance, you may need to set up your
|
||||
Security Group to allow SSH.** By default all incoming ports to your
|
||||
@@ -154,7 +169,7 @@ Docker that way too. Vagrant 1.1 or higher is required.
|
||||
includes rights to SSH (port 22) to your container.
|
||||
|
||||
If you have an advanced AWS setup, you might want to have a look at
|
||||
https://github.com/mitchellh/vagrant-aws
|
||||
`vagrant-aws <https://github.com/mitchellh/vagrant-aws>`_.
|
||||
|
||||
7. Connect to your machine
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
:title: Installation on Arch Linux
|
||||
:description: Docker installation on Arch Linux.
|
||||
:description: Docker installation on Arch Linux.
|
||||
:keywords: arch linux, virtualization, docker, documentation, installation
|
||||
|
||||
.. _arch_linux:
|
||||
@@ -7,54 +7,58 @@
|
||||
Arch Linux
|
||||
==========
|
||||
|
||||
Installing on Arch Linux is not officially supported but can be handled via
|
||||
either of the following AUR packages:
|
||||
.. include:: install_header.inc
|
||||
|
||||
* `lxc-docker <https://aur.archlinux.org/packages/lxc-docker/>`_
|
||||
* `lxc-docker-git <https://aur.archlinux.org/packages/lxc-docker-git/>`_
|
||||
.. include:: install_unofficial.inc
|
||||
|
||||
The lxc-docker package will install the latest tagged version of docker.
|
||||
The lxc-docker-git package will build from the current master branch.
|
||||
Installing on Arch Linux can be handled via the package in community:
|
||||
|
||||
* `docker <https://www.archlinux.org/packages/community/x86_64/docker/>`_
|
||||
|
||||
or the following AUR package:
|
||||
|
||||
* `docker-git <https://aur.archlinux.org/packages/docker-git/>`_
|
||||
|
||||
The docker package will install the latest tagged version of docker.
|
||||
The docker-git package will build from the current master branch.
|
||||
|
||||
Dependencies
|
||||
------------
|
||||
|
||||
Docker depends on several packages which are specified as dependencies in
|
||||
either AUR package.
|
||||
the packages. The core dependencies are:
|
||||
|
||||
* aufs3
|
||||
* bridge-utils
|
||||
* go
|
||||
* device-mapper
|
||||
* iproute2
|
||||
* linux-aufs_friendly
|
||||
* lxc
|
||||
* sqlite
|
||||
|
||||
|
||||
Installation
|
||||
------------
|
||||
|
||||
.. include:: install_header.inc
|
||||
For the normal package a simple
|
||||
::
|
||||
|
||||
.. include:: install_unofficial.inc
|
||||
pacman -S docker
|
||||
|
||||
is all that is needed.
|
||||
|
||||
For the AUR package execute:
|
||||
::
|
||||
|
||||
yaourt -S docker-git
|
||||
|
||||
The instructions here assume **yaourt** is installed. See
|
||||
`Arch User Repository <https://wiki.archlinux.org/index.php/Arch_User_Repository#Installing_packages>`_
|
||||
for information on building and installing packages from the AUR if you have not
|
||||
done so before.
|
||||
|
||||
Keep in mind that if **linux-aufs_friendly** is not already installed that a
|
||||
new kernel will be compiled and this can take quite a while.
|
||||
|
||||
::
|
||||
|
||||
yaourt -S lxc-docker-git
|
||||
|
||||
|
||||
Starting Docker
|
||||
---------------
|
||||
|
||||
Prior to starting docker modify your bootloader to use the
|
||||
**linux-aufs_friendly** kernel and reboot your system.
|
||||
|
||||
There is a systemd service unit created for docker. To start the docker service:
|
||||
|
||||
::
|
||||
|
||||
@@ -12,17 +12,9 @@ Binaries
|
||||
**This instruction set is meant for hackers who want to try out Docker
|
||||
on a variety of environments.**
|
||||
|
||||
Right now, the officially supported distributions are:
|
||||
|
||||
- :ref:`ubuntu_precise`
|
||||
- :ref:`ubuntu_raring`
|
||||
|
||||
|
||||
But we know people have had success running it under
|
||||
|
||||
- Debian
|
||||
- Suse
|
||||
- :ref:`arch_linux`
|
||||
Before following these directions, you should really check if a packaged version
|
||||
of Docker is already available for your distribution. We have packages for many
|
||||
distributions, and more keep showing up all the time!
|
||||
|
||||
Check Your Kernel
|
||||
-----------------
|
||||
@@ -34,7 +26,7 @@ Get the docker binary:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
wget --output-document=docker https://get.docker.io/builds/Linux/x86_64/docker-latest
|
||||
wget https://get.docker.io/builds/Linux/x86_64/docker-latest -O docker
|
||||
chmod +x docker
|
||||
|
||||
|
||||
|
||||
52
docs/sources/installation/fedora.rst
Normal file
52
docs/sources/installation/fedora.rst
Normal file
@@ -0,0 +1,52 @@
|
||||
:title: Requirements and Installation on Fedora
|
||||
:description: Please note this project is currently under heavy development. It should not be used in production.
|
||||
:keywords: Docker, Docker documentation, fedora, requirements, virtualbox, vagrant, git, ssh, putty, cygwin, linux
|
||||
|
||||
.. _fedora:
|
||||
|
||||
Fedora
|
||||
======
|
||||
|
||||
.. include:: install_header.inc
|
||||
|
||||
.. include:: install_unofficial.inc
|
||||
|
||||
Docker is available in **Fedora 19 and later**. Please note that due to the
|
||||
current Docker limitations Docker is able to run only on the **64 bit**
|
||||
architecture.
|
||||
|
||||
Installation
|
||||
------------
|
||||
|
||||
Install the ``docker-io`` package which will install Docker on our host.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo yum -y install docker-io
|
||||
|
||||
To update the ``docker-io`` package
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo yum -y update docker-io
|
||||
|
||||
Now that it's installed, let's start the Docker daemon.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo systemctl start docker
|
||||
|
||||
If we want Docker to start at boot, we should also:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo systemctl enable docker
|
||||
|
||||
Now let's verify that Docker is working.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo docker run -i -t mattdm/fedora /bin/bash
|
||||
|
||||
**Done!**, now continue with the :ref:`hello_world` example.
|
||||
|
||||
@@ -4,8 +4,8 @@
|
||||
|
||||
.. _gentoo_linux:
|
||||
|
||||
Gentoo Linux
|
||||
============
|
||||
Gentoo
|
||||
======
|
||||
|
||||
.. include:: install_header.inc
|
||||
|
||||
@@ -22,17 +22,19 @@ provided at https://github.com/tianon/docker-overlay which can be added using
|
||||
properly installing and using the overlay can be found in `the overlay README
|
||||
<https://github.com/tianon/docker-overlay/blob/master/README.md#using-this-overlay>`_.
|
||||
|
||||
Note that sometimes there is a disparity between the latest version and what's
|
||||
in the overlay, and between the latest version in the overlay and what's in the
|
||||
portage tree. Please be patient, and the latest version should propagate
|
||||
shortly.
|
||||
|
||||
Installation
|
||||
^^^^^^^^^^^^
|
||||
|
||||
The package should properly pull in all the necessary dependencies and prompt
|
||||
for all necessary kernel options. For the most straightforward installation
|
||||
experience, use ``sys-kernel/aufs-sources`` as your kernel sources. If you
|
||||
prefer not to use ``sys-kernel/aufs-sources``, the portage tree also contains
|
||||
``sys-fs/aufs3``, which includes the patches necessary for adding AUFS support
|
||||
to other kernel source packages such as ``sys-kernel/gentoo-sources`` (and a
|
||||
``kernel-patch`` USE flag to perform the patching to ``/usr/src/linux``
|
||||
automatically).
|
||||
for all necessary kernel options. The ebuilds for 0.7+ include use flags to
|
||||
pull in the proper dependencies of the major storage drivers, with the
|
||||
"device-mapper" use flag being enabled by default, since that is the simplest
|
||||
installation path.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
@@ -47,9 +49,9 @@ the #docker IRC channel on the freenode network.
|
||||
Starting Docker
|
||||
^^^^^^^^^^^^^^^
|
||||
|
||||
Ensure that you are running a kernel that includes the necessary AUFS
|
||||
patches/support and includes all the necessary modules and/or configuration for
|
||||
LXC.
|
||||
Ensure that you are running a kernel that includes all the necessary modules
|
||||
and/or configuration for LXC (and optionally for device-mapper and/or AUFS,
|
||||
depending on the storage driver you've decided to use).
|
||||
|
||||
OpenRC
|
||||
------
|
||||
|
||||
65
docs/sources/installation/google.rst
Normal file
65
docs/sources/installation/google.rst
Normal file
@@ -0,0 +1,65 @@
|
||||
:title: Installation on Google Cloud Platform
|
||||
:description: Please note this project is currently under heavy development. It should not be used in production.
|
||||
:keywords: Docker, Docker documentation, installation, google, Google Compute Engine, Google Cloud Platform
|
||||
|
||||
`Google Cloud Platform <https://cloud.google.com/>`_
|
||||
====================================================
|
||||
|
||||
.. include:: install_header.inc
|
||||
|
||||
.. _googlequickstart:
|
||||
|
||||
`Compute Engine <https://developers.google.com/compute>`_ QuickStart for `Debian <https://www.debian.org>`_
|
||||
-----------------------------------------------------------------------------------------------------------
|
||||
|
||||
1. Go to `Google Cloud Console <https://cloud.google.com/console>`_ and create a new Cloud Project with `Compute Engine enabled <https://developers.google.com/compute/docs/signup>`_.
|
||||
|
||||
2. Download and configure the `Google Cloud SDK <https://developers.google.com/cloud/sdk/>`_ to use your project with the following commands:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ curl https://dl.google.com/dl/cloudsdk/release/install_google_cloud_sdk.bash | bash
|
||||
$ gcloud auth login
|
||||
Enter a cloud project id (or leave blank to not set): <google-cloud-project-id>
|
||||
|
||||
3. Start a new instance, select a zone close to you and the desired instance size:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ gcutil addinstance docker-playground --image=backports-debian-7
|
||||
1: europe-west1-a
|
||||
...
|
||||
4: us-central1-b
|
||||
>>> <zone-index>
|
||||
1: machineTypes/n1-standard-1
|
||||
...
|
||||
12: machineTypes/g1-small
|
||||
>>> <machine-type-index>
|
||||
|
||||
4. Connect to the instance using SSH:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ gcutil ssh docker-playground
|
||||
docker-playground:~$
|
||||
|
||||
5. Enable IP forwarding:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
docker-playground:~$ echo net.ipv4.ip_forward=1 | sudo tee /etc/sysctl.d/99-docker.conf
|
||||
docker-playground:~$ sudo sysctl --system
|
||||
|
||||
6. Install the latest Docker release and configure it to start when the instance boots:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
docker-playground:~$ curl get.docker.io | bash
|
||||
docker-playground:~$ sudo update-rc.d docker defaults
|
||||
|
||||
7. Start a new container:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
docker-playground:~$ sudo docker run busybox echo 'docker on GCE \o/'
|
||||
docker on GCE \o/
|
||||
@@ -9,7 +9,7 @@ Installation
|
||||
|
||||
There are a number of ways to install Docker, depending on where you
|
||||
want to run the daemon. The :ref:`ubuntu_linux` installation is the
|
||||
officially-tested version, and the community adds more techniques for
|
||||
officially-tested version. The community adds more techniques for
|
||||
installing Docker all the time.
|
||||
|
||||
Contents:
|
||||
@@ -18,13 +18,16 @@ Contents:
|
||||
:maxdepth: 1
|
||||
|
||||
ubuntulinux
|
||||
binaries
|
||||
security
|
||||
upgrading
|
||||
kernel
|
||||
rhel
|
||||
fedora
|
||||
archlinux
|
||||
gentoolinux
|
||||
vagrant
|
||||
windows
|
||||
amazon
|
||||
rackspace
|
||||
archlinux
|
||||
gentoolinux
|
||||
google
|
||||
kernel
|
||||
binaries
|
||||
security
|
||||
upgrading
|
||||
|
||||
@@ -11,10 +11,10 @@ In short, Docker has the following kernel requirements:
|
||||
|
||||
- Linux version 3.8 or above.
|
||||
|
||||
- `AUFS support <http://aufs.sourceforge.net/>`_.
|
||||
|
||||
- Cgroups and namespaces must be enabled.
|
||||
|
||||
*Note: as of 0.7 docker no longer requires aufs. AUFS support is still available as an optional driver.*
|
||||
|
||||
The officially supported kernel is the one recommended by the
|
||||
:ref:`ubuntu_linux` installation path. It is the one that most developers
|
||||
will use, and the one that receives the most attention from the core
|
||||
@@ -58,17 +58,6 @@ detects something older than 3.8.
|
||||
See issue `#407 <https://github.com/dotcloud/docker/issues/407>`_ for details.
|
||||
|
||||
|
||||
AUFS support
|
||||
------------
|
||||
|
||||
Docker currently relies on AUFS, an unioning filesystem.
|
||||
While AUFS is included in the kernels built by the Debian and Ubuntu
|
||||
distributions, is not part of the standard kernel. This means that if
|
||||
you decide to roll your own kernel, you will have to patch your
|
||||
kernel tree to add AUFS. The process is documented on
|
||||
`AUFS webpage <http://aufs.sourceforge.net/>`_.
|
||||
|
||||
|
||||
Cgroups and namespaces
|
||||
----------------------
|
||||
|
||||
@@ -122,3 +111,40 @@ And replace it by the following one::
|
||||
GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1"
|
||||
|
||||
Then run ``update-grub``, and reboot.
|
||||
|
||||
Details
|
||||
-------
|
||||
|
||||
Networking:
|
||||
|
||||
- CONFIG_BRIDGE
|
||||
- CONFIG_NETFILTER_XT_MATCH_ADDRTYPE
|
||||
- CONFIG_NF_NAT
|
||||
- CONFIG_NF_NAT_IPV4
|
||||
- CONFIG_NF_NAT_NEEDED
|
||||
|
||||
LVM:
|
||||
|
||||
- CONFIG_BLK_DEV_DM
|
||||
- CONFIG_DM_THIN_PROVISIONING
|
||||
- CONFIG_EXT4_FS
|
||||
|
||||
Namespaces:
|
||||
|
||||
- CONFIG_NAMESPACES
|
||||
- CONFIG_UTS_NS
|
||||
- CONFIG_IPC_NS
|
||||
- CONFIG_UID_NS
|
||||
- CONFIG_PID_NS
|
||||
- CONFIG_NET_NS
|
||||
|
||||
Cgroups:
|
||||
|
||||
- CONFIG_CGROUPS
|
||||
|
||||
Cgroup controllers (optional but highly recommended):
|
||||
|
||||
- CONFIG_CGROUP_CPUACCT
|
||||
- CONFIG_BLK_CGROUP
|
||||
- CONFIG_MEMCG
|
||||
- CONFIG_MEMCG_SWAP
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
:description: Installing Docker on Ubuntu proviced by Rackspace
|
||||
:keywords: Rackspace Cloud, installation, docker, linux, ubuntu
|
||||
|
||||
===============
|
||||
Rackspace Cloud
|
||||
===============
|
||||
|
||||
@@ -14,14 +13,14 @@ straightforward, and you should mostly be able to follow the
|
||||
|
||||
**However, there is one caveat:**
|
||||
|
||||
If you are using any linux not already shipping with the 3.8 kernel
|
||||
If you are using any Linux not already shipping with the 3.8 kernel
|
||||
you will need to install it. And this is a little more difficult on
|
||||
Rackspace.
|
||||
|
||||
Rackspace boots their servers using grub's ``menu.lst`` and does not
|
||||
like non 'virtual' packages (e.g. xen compatible) kernels there,
|
||||
although they do work. This makes ``update-grub`` to not have the
|
||||
expected result, and you need to set the kernel manually.
|
||||
like non 'virtual' packages (e.g. Xen compatible) kernels there,
|
||||
although they do work. This results in ``update-grub`` not having the
|
||||
expected result, and you will need to set the kernel manually.
|
||||
|
||||
**Do not attempt this on a production machine!**
|
||||
|
||||
@@ -34,7 +33,7 @@ expected result, and you need to set the kernel manually.
|
||||
apt-get install linux-generic-lts-raring
|
||||
|
||||
|
||||
Great, now you have kernel installed in ``/boot/``, next is to make it
|
||||
Great, now you have the kernel installed in ``/boot/``, next you need to make it
|
||||
boot next time.
|
||||
|
||||
.. code-block:: bash
|
||||
@@ -48,9 +47,9 @@ boot next time.
|
||||
Now you need to manually edit ``/boot/grub/menu.lst``, you will find a
|
||||
section at the bottom with the existing options. Copy the top one and
|
||||
substitute the new kernel into that. Make sure the new kernel is on
|
||||
top, and double check kernel and initrd point to the right files.
|
||||
top, and double check the kernel and initrd lines point to the right files.
|
||||
|
||||
Make special care to double check the kernel and initrd entries.
|
||||
Take special care to double check the kernel and initrd entries.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
@@ -79,7 +78,7 @@ It will probably look something like this:
|
||||
initrd /boot/initrd.img-3.2.0-38-virtual
|
||||
|
||||
|
||||
Reboot server (either via command line or console)
|
||||
Reboot the server (either via command line or console)
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
|
||||
71
docs/sources/installation/rhel.rst
Normal file
71
docs/sources/installation/rhel.rst
Normal file
@@ -0,0 +1,71 @@
|
||||
:title: Requirements and Installation on Red Hat Enterprise Linux
|
||||
:description: Please note this project is currently under heavy development. It should not be used in production.
|
||||
:keywords: Docker, Docker documentation, requirements, linux, rhel, centos
|
||||
|
||||
.. _rhel:
|
||||
|
||||
Red Hat Enterprise Linux
|
||||
========================
|
||||
|
||||
.. include:: install_header.inc
|
||||
|
||||
.. include:: install_unofficial.inc
|
||||
|
||||
Docker is available for **RHEL** on EPEL. These instructions should work for
|
||||
both RHEL and CentOS. They will likely work for other binary compatible EL6
|
||||
distributions as well, but they haven't been tested.
|
||||
|
||||
Please note that this package is part of `Extra Packages for Enterprise
|
||||
Linux (EPEL)`_, a community effort to create and maintain additional packages
|
||||
for the RHEL distribution.
|
||||
|
||||
Also note that due to the current Docker limitations, Docker is able to run
|
||||
only on the **64 bit** architecture.
|
||||
|
||||
Installation
|
||||
------------
|
||||
|
||||
Firstly, you need to install the EPEL repository. Please follow the `EPEL installation instructions`_.
|
||||
|
||||
|
||||
Next, let's install the ``docker-io`` package which will install Docker on our host.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo yum -y install docker-io
|
||||
|
||||
To update the ``docker-io`` package
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo yum -y update docker-io
|
||||
|
||||
Now that it's installed, let's start the Docker daemon.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo service docker start
|
||||
|
||||
If we want Docker to start at boot, we should also:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo chkconfig docker on
|
||||
|
||||
Now let's verify that Docker is working.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo docker run -i -t mattdm/fedora /bin/bash
|
||||
|
||||
**Done!**, now continue with the :ref:`hello_world` example.
|
||||
|
||||
Issues?
|
||||
-------
|
||||
|
||||
If you have any issues - please report them directly in the `Red Hat Bugzilla for docker-io component`_.
|
||||
|
||||
.. _Extra Packages for Enterprise Linux (EPEL): https://fedoraproject.org/wiki/EPEL
|
||||
.. _EPEL installation instructions: https://fedoraproject.org/wiki/EPEL#How_can_I_use_these_extra_packages.3F
|
||||
.. _Red Hat Bugzilla for docker-io component : https://bugzilla.redhat.com/enter_bug.cgi?product=Fedora%20EPEL&component=docker-io
|
||||
|
||||
@@ -4,8 +4,8 @@
|
||||
|
||||
.. _ubuntu_linux:
|
||||
|
||||
Ubuntu Linux
|
||||
============
|
||||
Ubuntu
|
||||
======
|
||||
|
||||
.. warning::
|
||||
|
||||
@@ -14,16 +14,11 @@ Ubuntu Linux
|
||||
|
||||
.. include:: install_header.inc
|
||||
|
||||
Right now, the officially supported distribution are:
|
||||
Docker is supported on the following versions of Ubuntu:
|
||||
|
||||
- :ref:`ubuntu_precise`
|
||||
- :ref:`ubuntu_raring`
|
||||
|
||||
Docker has the following dependencies
|
||||
|
||||
* Linux kernel 3.8 (read more about :ref:`kernel`)
|
||||
* AUFS file system support (we are working on BTRFS support as an alternative)
|
||||
|
||||
Please read :ref:`ufw`, if you plan to use `UFW (Uncomplicated
|
||||
Firewall) <https://help.ubuntu.com/community/UFW>`_
|
||||
|
||||
@@ -68,34 +63,48 @@ Installation
|
||||
These instructions have changed for 0.6. If you are upgrading from
|
||||
an earlier version, you will need to follow them again.
|
||||
|
||||
Docker is available as a Debian package, which makes installation easy.
|
||||
Docker is available as a Debian package, which makes installation
|
||||
easy. **See the :ref:`installmirrors` section below if you are not in
|
||||
the United States.** Other sources of the Debian packages may be
|
||||
faster for you to install.
|
||||
|
||||
First add the Docker repository key to your local keychain. You can use the
|
||||
``apt-key`` command to check the fingerprint matches: ``36A1 D786 9245 C895 0F96
|
||||
6E92 D857 6A8B A88D 21E9``
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# Add the Docker repository key to your local keychain
|
||||
# using apt-key finger you can check the fingerprint matches 36A1 D786 9245 C895 0F96 6E92 D857 6A8B A88D 21E9
|
||||
sudo sh -c "wget -qO- https://get.docker.io/gpg | apt-key add -"
|
||||
|
||||
# Add the Docker repository to your apt sources list.
|
||||
sudo sh -c "echo deb http://get.docker.io/ubuntu docker main\
|
||||
> /etc/apt/sources.list.d/docker.list"
|
||||
Add the Docker repository to your apt sources list, update and install the
|
||||
``lxc-docker`` package.
|
||||
|
||||
# Update your sources
|
||||
sudo apt-get update
|
||||
|
||||
# Install, you will see another warning that the package cannot be authenticated. Confirm install.
|
||||
sudo apt-get install lxc-docker
|
||||
|
||||
Verify it worked
|
||||
*You may receive a warning that the package isn't trusted. Answer yes to
|
||||
continue installation.*
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo sh -c "echo deb http://get.docker.io/ubuntu docker main\
|
||||
> /etc/apt/sources.list.d/docker.list"
|
||||
sudo apt-get update
|
||||
sudo apt-get install lxc-docker
|
||||
|
||||
.. note::
|
||||
|
||||
There is also a simple ``curl`` script available to help with this process.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
curl -s https://get.docker.io/ubuntu/ | sudo sh
|
||||
|
||||
Now verify that the installation has worked by downloading the ``ubuntu`` image
|
||||
and launching a container.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# download the base 'ubuntu' container and run bash inside it while setting up an interactive shell
|
||||
sudo docker run -i -t ubuntu /bin/bash
|
||||
|
||||
# type 'exit' to exit
|
||||
|
||||
Type ``exit`` to exit
|
||||
|
||||
**Done!**, now continue with the :ref:`hello_world` example.
|
||||
|
||||
@@ -107,10 +116,13 @@ Ubuntu Raring 13.04 (64 bit)
|
||||
Dependencies
|
||||
------------
|
||||
|
||||
**AUFS filesystem support**
|
||||
**Optional AUFS filesystem support**
|
||||
|
||||
Ubuntu Raring already comes with the 3.8 kernel, so we don't need to install it. However, not all systems
|
||||
have AUFS filesystem support enabled, so we need to install it.
|
||||
have AUFS filesystem support enabled. AUFS support is optional as of version 0.7, but it's still available as
|
||||
a driver and we recommend using it if you can.
|
||||
|
||||
To make sure AUFS is installed, run the following commands:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
@@ -123,36 +135,37 @@ Installation
|
||||
|
||||
Docker is available as a Debian package, which makes installation easy.
|
||||
|
||||
*Please note that these instructions have changed for 0.6. If you are upgrading from an earlier version, you will need
|
||||
to follow them again.*
|
||||
.. warning::
|
||||
|
||||
Please note that these instructions have changed for 0.6. If you are upgrading from an earlier version, you will need
|
||||
to follow them again.
|
||||
|
||||
First add the Docker repository key to your local keychain. You can use the
|
||||
``apt-key`` command to check the fingerprint matches: ``36A1 D786 9245 C895 0F96
|
||||
6E92 D857 6A8B A88D 21E9``
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# Add the Docker repository key to your local keychain
|
||||
# using apt-key finger you can check the fingerprint matches 36A1 D786 9245 C895 0F96 6E92 D857 6A8B A88D 21E9
|
||||
sudo sh -c "wget -qO- https://get.docker.io/gpg | apt-key add -"
|
||||
|
||||
# Add the Docker repository to your apt sources list.
|
||||
sudo sh -c "echo deb http://get.docker.io/ubuntu docker main\
|
||||
> /etc/apt/sources.list.d/docker.list"
|
||||
|
||||
# update
|
||||
sudo apt-get update
|
||||
|
||||
# install
|
||||
sudo apt-get install lxc-docker
|
||||
|
||||
|
||||
Verify it worked
|
||||
Add the Docker repository to your apt sources list, update and install the
|
||||
``lxc-docker`` package.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo sh -c "echo deb http://get.docker.io/ubuntu docker main\
|
||||
> /etc/apt/sources.list.d/docker.list"
|
||||
sudo apt-get update
|
||||
sudo apt-get install lxc-docker
|
||||
|
||||
Now verify that the installation has worked by downloading the ``ubuntu`` image
|
||||
and launching a container.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# download the base 'ubuntu' container
|
||||
# and run bash inside it while setting up an interactive shell
|
||||
sudo docker run -i -t ubuntu /bin/bash
|
||||
|
||||
# type exit to exit
|
||||
|
||||
Type ``exit`` to exit
|
||||
|
||||
**Done!**, now continue with the :ref:`hello_world` example.
|
||||
|
||||
@@ -162,8 +175,8 @@ Verify it worked
|
||||
Docker and UFW
|
||||
^^^^^^^^^^^^^^
|
||||
|
||||
Docker uses a bridge to manage container networking. By default, UFW
|
||||
drops all `forwarding`, thus a first step is to enable UFW forwarding:
|
||||
Docker uses a bridge to manage container networking. By default, UFW drops all
|
||||
`forwarding` traffic. As a result will you need to enable UFW forwarding:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
@@ -181,11 +194,33 @@ Then reload UFW:
|
||||
sudo ufw reload
|
||||
|
||||
|
||||
UFW's default set of rules denied all `incoming`, so if you want to be
|
||||
able to reach your containers from another host, you should allow
|
||||
incoming connections on the docker port (default 4243):
|
||||
UFW's default set of rules denies all `incoming` traffic. If you want to be
|
||||
able to reach your containers from another host then you should allow
|
||||
incoming connections on the Docker port (default 4243):
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo ufw allow 4243/tcp
|
||||
|
||||
.. _installmirrors:
|
||||
|
||||
Mirrors
|
||||
^^^^^^^
|
||||
|
||||
You should ``ping get.docker.io`` and compare the latency to the
|
||||
following mirrors, and pick whichever one is best for you.
|
||||
|
||||
Yandex
|
||||
------
|
||||
|
||||
`Yandex <http://yandex.ru/>`_ in Russia is mirroring the Docker Debian
|
||||
packages, updating every 6 hours. Substitute
|
||||
``http://mirror.yandex.ru/mirrors/docker/`` for
|
||||
``http://get.docker.io/ubuntu`` in the instructions above. For example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo sh -c "echo deb http://mirror.yandex.ru/mirrors/docker/ docker main\
|
||||
> /etc/apt/sources.list.d/docker.list"
|
||||
sudo apt-get update
|
||||
sudo apt-get install lxc-docker
|
||||
|
||||
183
docs/sources/use/ambassador_pattern_linking.rst
Normal file
183
docs/sources/use/ambassador_pattern_linking.rst
Normal file
@@ -0,0 +1,183 @@
|
||||
:title: Link via an Ambassador Container
|
||||
:description: Using the Ambassador pattern to abstract (network) services
|
||||
:keywords: Examples, Usage, links, docker, documentation, examples, names, name, container naming
|
||||
|
||||
.. _ambassador_pattern_linking:
|
||||
|
||||
Link via an Ambassador Container
|
||||
================================
|
||||
|
||||
Rather than hardcoding network links between a service consumer and provider, Docker
|
||||
encourages service portability.
|
||||
|
||||
eg, instead of
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
(consumer) --> (redis)
|
||||
|
||||
requiring you to restart the ``consumer`` to attach it to a different ``redis`` service,
|
||||
you can add ambassadors
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
(consumer) --> (redis-ambassador) --> (redis)
|
||||
|
||||
or
|
||||
|
||||
(consumer) --> (redis-ambassador) ---network---> (redis-ambassador) --> (redis)
|
||||
|
||||
When you need to rewire your consumer to talk to a different redis server, you
|
||||
can just restart the ``redis-ambassador`` container that the consumer is connected to.
|
||||
|
||||
This pattern also allows you to transparently move the redis server to a different
|
||||
docker host from the consumer.
|
||||
|
||||
Using the ``svendowideit/ambassador`` container, the link wiring is controlled entirely
|
||||
from the ``docker run`` parameters.
|
||||
|
||||
Two host Example
|
||||
----------------
|
||||
|
||||
Start actual redis server on one Docker host
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
big-server $ docker run -d -name redis crosbymichael/redis
|
||||
|
||||
Then add an ambassador linked to the redis server, mapping a port to the outside world
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
big-server $ docker run -d -link redis:redis -name redis_ambassador -p 6379:6379 svendowideit/ambassador
|
||||
|
||||
On the other host, you can set up another ambassador setting environment variables for each remote port we want to proxy to the ``big-server``
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
client-server $ docker run -d -name redis_ambassador -expose 6379 -e REDIS_PORT_6379_TCP=tcp://192.168.1.52:6379 svendowideit/ambassador
|
||||
|
||||
Then on the ``client-server`` host, you can use a redis client container to talk
|
||||
to the remote redis server, just by linking to the local redis ambassador.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
client-server $ docker run -i -t -rm -link redis_ambassador:redis relateiq/redis-cli
|
||||
redis 172.17.0.160:6379> ping
|
||||
PONG
|
||||
|
||||
|
||||
|
||||
How it works
|
||||
------------
|
||||
|
||||
The following example shows what the ``svendowideit/ambassador`` container does
|
||||
automatically (with a tiny amount of ``sed``)
|
||||
|
||||
On the docker host (192.168.1.52) that redis will run on:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# start actual redis server
|
||||
$ docker run -d -name redis crosbymichael/redis
|
||||
|
||||
# get a redis-cli container for connection testing
|
||||
$ docker pull relateiq/redis-cli
|
||||
|
||||
# test the redis server by talking to it directly
|
||||
$ docker run -t -i -rm -link redis:redis relateiq/redis-cli
|
||||
redis 172.17.0.136:6379> ping
|
||||
PONG
|
||||
^D
|
||||
|
||||
# add redis ambassador
|
||||
$ docker run -t -i -link redis:redis -name redis_ambassador -p 6379:6379 busybox sh
|
||||
|
||||
in the redis_ambassador container, you can see the linked redis containers's env
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ env
|
||||
REDIS_PORT=tcp://172.17.0.136:6379
|
||||
REDIS_PORT_6379_TCP_ADDR=172.17.0.136
|
||||
REDIS_NAME=/redis_ambassador/redis
|
||||
HOSTNAME=19d7adf4705e
|
||||
REDIS_PORT_6379_TCP_PORT=6379
|
||||
HOME=/
|
||||
REDIS_PORT_6379_TCP_PROTO=tcp
|
||||
container=lxc
|
||||
REDIS_PORT_6379_TCP=tcp://172.17.0.136:6379
|
||||
TERM=xterm
|
||||
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
|
||||
PWD=/
|
||||
|
||||
|
||||
This environment is used by the ambassador socat script to expose redis to the world
|
||||
(via the -p 6379:6379 port mapping)
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ docker rm redis_ambassador
|
||||
$ sudo ./contrib/mkimage-unittest.sh
|
||||
$ docker run -t -i -link redis:redis -name redis_ambassador -p 6379:6379 docker-ut sh
|
||||
|
||||
$ socat TCP4-LISTEN:6379,fork,reuseaddr TCP4:172.17.0.136:6379
|
||||
|
||||
then ping the redis server via the ambassador
|
||||
|
||||
.. code-block::bash
|
||||
|
||||
$ docker run -i -t -rm -link redis_ambassador:redis relateiq/redis-cli
|
||||
redis 172.17.0.160:6379> ping
|
||||
PONG
|
||||
|
||||
Now goto a different server
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo ./contrib/mkimage-unittest.sh
|
||||
$ docker run -t -i -expose 6379 -name redis_ambassador docker-ut sh
|
||||
|
||||
$ socat TCP4-LISTEN:6379,fork,reuseaddr TCP4:192.168.1.52:6379
|
||||
|
||||
and get the redis-cli image so we can talk over the ambassador bridge
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ docker pull relateiq/redis-cli
|
||||
$ docker run -i -t -rm -link redis_ambassador:redis relateiq/redis-cli
|
||||
redis 172.17.0.160:6379> ping
|
||||
PONG
|
||||
|
||||
The svendowideit/ambassador Dockerfile
|
||||
--------------------------------------
|
||||
|
||||
The ``svendowideit/ambassador`` image is a small busybox image with ``socat`` built in.
|
||||
When you start the container, it uses a small ``sed`` script to parse out the (possibly multiple)
|
||||
link environment variables to set up the port forwarding. On the remote host, you need to set the
|
||||
variable using the ``-e`` command line option.
|
||||
|
||||
``-expose 1234 -e REDIS_PORT_1234_TCP=tcp://192.168.1.52:6379`` will forward the
|
||||
local ``1234`` port to the remote IP and port - in this case ``192.168.1.52:6379``.
|
||||
|
||||
|
||||
::
|
||||
|
||||
#
|
||||
#
|
||||
# first you need to build the docker-ut image
|
||||
# using ./contrib/mkimage-unittest.sh
|
||||
# then
|
||||
# docker build -t SvenDowideit/ambassador .
|
||||
# docker tag SvenDowideit/ambassador ambassador
|
||||
# then to run it (on the host that has the real backend on it)
|
||||
# docker run -t -i -link redis:redis -name redis_ambassador -p 6379:6379 ambassador
|
||||
# on the remote host, you can set up another ambassador
|
||||
# docker run -t -i -name redis_ambassador -expose 6379 sh
|
||||
|
||||
FROM docker-ut
|
||||
MAINTAINER SvenDowideit@home.org.au
|
||||
|
||||
|
||||
CMD env | grep _TCP= | sed 's/.*_PORT_\([0-9]*\)_TCP=tcp:\/\/\(.*\):\(.*\)/socat TCP4-LISTEN:\1,fork,reuseaddr TCP4:\2:\3 \&/' | sh && top
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
:title: Base Image Creation
|
||||
:title: Create a Base Image
|
||||
:description: How to create base images
|
||||
:keywords: Examples, Usage, base image, docker, documentation, examples
|
||||
|
||||
.. _base_image_creation:
|
||||
|
||||
Base Image Creation
|
||||
Create a Base Image
|
||||
===================
|
||||
|
||||
So you want to create your own :ref:`base_image_def`? Great!
|
||||
@@ -37,7 +37,7 @@ There are more example scripts for creating base images in the
|
||||
Docker Github Repo:
|
||||
|
||||
* `BusyBox <https://github.com/dotcloud/docker/blob/master/contrib/mkimage-busybox.sh>`_
|
||||
* `CentOS
|
||||
<https://github.com/dotcloud/docker/blob/master/contrib/mkimage-centos.sh>`_
|
||||
* `Debian/Ubuntu
|
||||
* `CentOS / Scientific Linux CERN (SLC)
|
||||
<https://github.com/dotcloud/docker/blob/master/contrib/mkimage-rinse.sh>`_
|
||||
* `Debian / Ubuntu
|
||||
<https://github.com/dotcloud/docker/blob/master/contrib/mkimage-debootstrap.sh>`_
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
:title: Basic Commands
|
||||
:title: Learn Basic Commands
|
||||
:description: Common usage and commands
|
||||
:keywords: Examples, Usage, basic commands, docker, documentation, examples
|
||||
|
||||
|
||||
The Basics
|
||||
==========
|
||||
Learn Basic Commands
|
||||
====================
|
||||
|
||||
Starting Docker
|
||||
---------------
|
||||
@@ -67,7 +67,7 @@ daemon will make the ownership of the Unix socket read/writable by the
|
||||
*docker* group when the daemon starts. The ``docker`` daemon must
|
||||
always run as root, but if you run the ``docker`` client as a user in
|
||||
the *docker* group then you don't need to add ``sudo`` to all the
|
||||
client commands.
|
||||
client commands. Warning: the *docker* group is root-equivalent.
|
||||
|
||||
**Example:**
|
||||
|
||||
@@ -76,11 +76,11 @@ client commands.
|
||||
# Add the docker group if it doesn't already exist.
|
||||
sudo groupadd docker
|
||||
|
||||
# Add the user "ubuntu" to the docker group.
|
||||
# Add the connected user "${USERNAME}" to the docker group.
|
||||
# Change the user name to match your preferred user.
|
||||
# You may have to logout and log back in again for
|
||||
# this to take effect.
|
||||
sudo gpasswd -a ubuntu docker
|
||||
sudo gpasswd -a ${USERNAME} docker
|
||||
|
||||
# Restart the docker daemon.
|
||||
sudo service docker restart
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
:title: Dockerfiles for Images
|
||||
:title: Build Images (Dockerfile Reference)
|
||||
:description: Dockerfiles use a simple DSL which allows you to automate the steps you would normally manually take to create an image.
|
||||
:keywords: builder, docker, Dockerfile, automation, image creation
|
||||
|
||||
.. _dockerbuilder:
|
||||
|
||||
======================
|
||||
Dockerfiles for Images
|
||||
======================
|
||||
===================================
|
||||
Build Images (Dockerfile Reference)
|
||||
===================================
|
||||
|
||||
**Docker can act as a builder** and read instructions from a text
|
||||
``Dockerfile`` to automate the steps you would otherwise take manually
|
||||
@@ -15,27 +15,39 @@ commit them along the way, giving you a final image.
|
||||
|
||||
.. contents:: Table of Contents
|
||||
|
||||
.. _dockerfile_usage:
|
||||
|
||||
1. Usage
|
||||
========
|
||||
|
||||
To build an image from a source repository, create a description file
|
||||
called ``Dockerfile`` at the root of your repository. This file will
|
||||
describe the steps to assemble the image.
|
||||
To :ref:`build <cli_build>` an image from a source repository, create
|
||||
a description file called ``Dockerfile`` at the root of your
|
||||
repository. This file will describe the steps to assemble the image.
|
||||
|
||||
Then call ``docker build`` with the path of your source repository as
|
||||
argument:
|
||||
argument (for example, ``.``):
|
||||
|
||||
``sudo docker build .``
|
||||
|
||||
The path to the source repository defines where to find the *context*
|
||||
of the build. The build is run by the Docker daemon, not by the CLI,
|
||||
so the whole context must be transferred to the daemon. The Docker CLI
|
||||
reports "Uploading context" when the context is sent to the daemon.
|
||||
|
||||
You can specify a repository and tag at which to save the new image if the
|
||||
build succeeds:
|
||||
|
||||
``sudo docker build -t shykes/myapp .``
|
||||
|
||||
Docker will run your steps one-by-one, committing the result if necessary,
|
||||
before finally outputting the ID of your new image.
|
||||
The Docker daemon will run your steps one-by-one, committing the
|
||||
result if necessary, before finally outputting the ID of your new
|
||||
image. The Docker daemon will automatically clean up the context you
|
||||
sent.
|
||||
|
||||
When you're done with your build, you're ready to look into :ref:`image_push`.
|
||||
When you're done with your build, you're ready to look into
|
||||
:ref:`image_push`.
|
||||
|
||||
.. _dockerfile_format:
|
||||
|
||||
2. Format
|
||||
=========
|
||||
@@ -63,12 +75,16 @@ allows statements like:
|
||||
# Comment
|
||||
RUN echo 'we are running some # of cool things'
|
||||
|
||||
.. _dockerfile_instructions:
|
||||
|
||||
3. Instructions
|
||||
===============
|
||||
|
||||
Here is the set of instructions you can use in a ``Dockerfile`` for
|
||||
building images.
|
||||
|
||||
.. _dockerfile_from:
|
||||
|
||||
3.1 FROM
|
||||
--------
|
||||
|
||||
@@ -94,6 +110,8 @@ output by the commit before each new ``FROM`` command.
|
||||
If no ``tag`` is given to the ``FROM`` instruction, ``latest`` is
|
||||
assumed. If the used tag does not exist, an error will be returned.
|
||||
|
||||
.. _dockerfile_maintainer:
|
||||
|
||||
3.2 MAINTAINER
|
||||
--------------
|
||||
|
||||
@@ -102,6 +120,8 @@ assumed. If the used tag does not exist, an error will be returned.
|
||||
The ``MAINTAINER`` instruction allows you to set the *Author* field of
|
||||
the generated images.
|
||||
|
||||
.. _dockerfile_run:
|
||||
|
||||
3.3 RUN
|
||||
-------
|
||||
|
||||
@@ -124,7 +144,7 @@ Known Issues (RUN)
|
||||
``rm`` a file, for example. The issue describes a workaround.
|
||||
* :issue:`2424` Locale will not be set automatically.
|
||||
|
||||
|
||||
.. _dockerfile_cmd:
|
||||
|
||||
3.4 CMD
|
||||
-------
|
||||
@@ -169,7 +189,7 @@ array:
|
||||
|
||||
If you would like your container to run the same executable every
|
||||
time, then you should consider using ``ENTRYPOINT`` in combination
|
||||
with ``CMD``. See :ref:`entrypoint_def`.
|
||||
with ``CMD``. See :ref:`dockerfile_entrypoint`.
|
||||
|
||||
If the user specifies arguments to ``docker run`` then they will
|
||||
override the default specified in CMD.
|
||||
@@ -179,6 +199,8 @@ override the default specified in CMD.
|
||||
command and commits the result; ``CMD`` does not execute anything at
|
||||
build time, but specifies the intended command for the image.
|
||||
|
||||
.. _dockerfile_expose:
|
||||
|
||||
3.5 EXPOSE
|
||||
----------
|
||||
|
||||
@@ -189,6 +211,8 @@ functionally equivalent to running ``docker commit -run '{"PortSpecs":
|
||||
["<port>", "<port2>"]}'`` outside the builder. Refer to
|
||||
:ref:`port_redirection` for detailed information.
|
||||
|
||||
.. _dockerfile_env:
|
||||
|
||||
3.6 ENV
|
||||
-------
|
||||
|
||||
@@ -203,6 +227,8 @@ with ``<key>=<value>``
|
||||
The environment variables will persist when a container is run
|
||||
from the resulting image.
|
||||
|
||||
.. _dockerfile_add:
|
||||
|
||||
3.7 ADD
|
||||
-------
|
||||
|
||||
@@ -263,7 +289,7 @@ The copy obeys the following rules:
|
||||
* If ``<dest>`` doesn't exist, it is created along with all missing
|
||||
directories in its path.
|
||||
|
||||
.. _entrypoint_def:
|
||||
.. _dockerfile_entrypoint:
|
||||
|
||||
3.8 ENTRYPOINT
|
||||
--------------
|
||||
@@ -312,6 +338,7 @@ this optional but default, you could use a CMD:
|
||||
CMD ["-l", "-"]
|
||||
ENTRYPOINT ["/usr/bin/wc"]
|
||||
|
||||
.. _dockerfile_volume:
|
||||
|
||||
3.9 VOLUME
|
||||
----------
|
||||
@@ -322,6 +349,8 @@ The ``VOLUME`` instruction will create a mount point with the specified name and
|
||||
as holding externally mounted volumes from native host or other containers. For more information/examples
|
||||
and mounting instructions via docker client, refer to :ref:`volume_def` documentation.
|
||||
|
||||
.. _dockerfile_user:
|
||||
|
||||
3.10 USER
|
||||
---------
|
||||
|
||||
@@ -330,6 +359,8 @@ and mounting instructions via docker client, refer to :ref:`volume_def` document
|
||||
The ``USER`` instruction sets the username or UID to use when running
|
||||
the image.
|
||||
|
||||
.. _dockerfile_workdir:
|
||||
|
||||
3.11 WORKDIR
|
||||
------------
|
||||
|
||||
@@ -338,6 +369,7 @@ the image.
|
||||
The ``WORKDIR`` instruction sets the working directory in which
|
||||
the command given by ``CMD`` is executed.
|
||||
|
||||
.. _dockerfile_examples:
|
||||
|
||||
4. Dockerfile Examples
|
||||
======================
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
:title: Host Integration
|
||||
:title: Automatically Start Containers
|
||||
:description: How to generate scripts for upstart, systemd, etc.
|
||||
:keywords: systemd, upstart, supervisor, docker, documentation, host integration
|
||||
|
||||
|
||||
|
||||
Host Integration
|
||||
================
|
||||
Automatically Start Containers
|
||||
==============================
|
||||
|
||||
You can use your Docker containers with process managers like ``upstart``,
|
||||
``systemd`` and ``supervisor``.
|
||||
|
||||
@@ -17,7 +17,9 @@ Contents:
|
||||
workingwithrepository
|
||||
baseimages
|
||||
port_redirection
|
||||
puppet
|
||||
networking
|
||||
host_integration
|
||||
working_with_volumes
|
||||
working_with_links_names
|
||||
ambassador_pattern_linking
|
||||
puppet
|
||||
|
||||
153
docs/sources/use/networking.rst
Normal file
153
docs/sources/use/networking.rst
Normal file
@@ -0,0 +1,153 @@
|
||||
:title: Configure Networking
|
||||
:description: Docker networking
|
||||
:keywords: network, networking, bridge, docker, documentation
|
||||
|
||||
|
||||
Configure Networking
|
||||
====================
|
||||
|
||||
Docker uses Linux bridge capabilities to provide network connectivity
|
||||
to containers. The ``docker0`` bridge interface is managed by Docker
|
||||
itself for this purpose. Thus, when the Docker daemon starts it :
|
||||
|
||||
- creates the ``docker0`` bridge if not present
|
||||
- searches for an IP address range which doesn't overlap with an existing route
|
||||
- picks an IP in the selected range
|
||||
- assigns this IP to the ``docker0`` bridge
|
||||
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# List host bridges
|
||||
$ sudo brctl show
|
||||
bridge name bridge id STP enabled interfaces
|
||||
docker0 8000.000000000000 no
|
||||
|
||||
# Show docker0 IP address
|
||||
$ sudo ifconfig docker0
|
||||
docker0 Link encap:Ethernet HWaddr xx:xx:xx:xx:xx:xx
|
||||
inet addr:172.17.42.1 Bcast:0.0.0.0 Mask:255.255.0.0
|
||||
|
||||
|
||||
|
||||
At runtime, a :ref:`specific kind of virtual
|
||||
interface<vethxxxx-device>` is given to each containers which is then
|
||||
bonded to the ``docker0`` bridge. Each containers also receives a
|
||||
dedicated IP address from the same range as ``docker0``. The
|
||||
``docker0`` IP address is then used as the default gateway for the
|
||||
containers.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# Run a container
|
||||
$ sudo docker run -t -i -d base /bin/bash
|
||||
52f811c5d3d69edddefc75aff5a4525fc8ba8bcfa1818132f9dc7d4f7c7e78b4
|
||||
|
||||
$ sudo brctl show
|
||||
bridge name bridge id STP enabled interfaces
|
||||
docker0 8000.fef213db5a66 no vethQCDY1N
|
||||
|
||||
|
||||
Above, ``docker0`` acts as a bridge for the ``vethQCDY1N`` interface
|
||||
which is dedicated to the 52f811c5d3d6 container.
|
||||
|
||||
|
||||
How to use a specific IP address range
|
||||
---------------------------------------
|
||||
|
||||
Docker will try hard to find an IP range which is not used by the
|
||||
host. Even if it works for most cases, it's not bullet-proof and
|
||||
sometimes you need to have more control over the IP addressing scheme.
|
||||
|
||||
For this purpose, Docker allows you to manage the ``docker0`` bridge
|
||||
or your own one using the ``-b=<bridgename>`` parameter.
|
||||
|
||||
In this scenario:
|
||||
|
||||
- ensure Docker is stopped
|
||||
- create your own bridge (``bridge0`` for example)
|
||||
- assign a specific IP to this bridge
|
||||
- start Docker with the ``-b=bridge0`` parameter
|
||||
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# Stop Docker
|
||||
$ sudo service docker stop
|
||||
|
||||
# Clean docker0 bridge and
|
||||
# add your very own bridge0
|
||||
$ sudo ifconfig docker0 down
|
||||
$ sudo brctl addbr bridge0
|
||||
$ sudo ifconfig bridge0 192.168.227.1 netmask 255.255.255.0
|
||||
|
||||
# Edit your Docker startup file
|
||||
$ echo "DOCKER_OPTS=\"-b=bridge0\"" /etc/default/docker
|
||||
|
||||
# Start Docker
|
||||
$ sudo service docker start
|
||||
|
||||
# Ensure bridge0 IP is not changed by Docker
|
||||
$ sudo ifconfig bridge0
|
||||
bridge0 Link encap:Ethernet HWaddr xx:xx:xx:xx:xx:xx
|
||||
inet addr:192.168.227.1 Bcast:192.168.227.255 Mask:255.255.255.0
|
||||
|
||||
# Run a container
|
||||
$ docker run -i -t base /bin/bash
|
||||
|
||||
# Container IP in the 192.168.227/24 range
|
||||
root@261c272cd7d5:/# ifconfig eth0
|
||||
eth0 Link encap:Ethernet HWaddr xx:xx:xx:xx:xx:xx
|
||||
inet addr:192.168.227.5 Bcast:192.168.227.255 Mask:255.255.255.0
|
||||
|
||||
# bridge0 IP as the default gateway
|
||||
root@261c272cd7d5:/# route -n
|
||||
Kernel IP routing table
|
||||
Destination Gateway Genmask Flags Metric Ref Use Iface
|
||||
0.0.0.0 192.168.227.1 0.0.0.0 UG 0 0 0 eth0
|
||||
192.168.227.0 0.0.0.0 255.255.255.0 U 0 0 0 eth0
|
||||
|
||||
# hits CTRL+P then CTRL+Q to detach
|
||||
|
||||
# Display bridge info
|
||||
$ sudo brctl show
|
||||
bridge name bridge id STP enabled interfaces
|
||||
bridge0 8000.fe7c2e0faebd no vethAQI2QT
|
||||
|
||||
|
||||
Container intercommunication
|
||||
-------------------------------
|
||||
|
||||
Containers can communicate with each other according to the ``icc``
|
||||
parameter value of the Docker daemon.
|
||||
|
||||
- The default, ``-icc=true`` allows containers to communicate with each other.
|
||||
- ``-icc=false`` means containers are isolated from each other.
|
||||
|
||||
Under the hood, ``iptables`` is used by Docker to either accept or
|
||||
drop communication between containers.
|
||||
|
||||
|
||||
.. _vethxxxx-device:
|
||||
|
||||
What's about the vethXXXX device?
|
||||
-----------------------------------
|
||||
Well. Things get complicated here.
|
||||
|
||||
The ``vethXXXX`` interface is the host side of a point-to-point link
|
||||
between the host and the corresponding container, the other side of
|
||||
the link being materialized by the container's ``eth0``
|
||||
interface. This pair (host ``vethXXX`` and container ``eth0``) are
|
||||
connected like a tube. Everything that comes in one side will come out
|
||||
the other side.
|
||||
|
||||
All the plumbing is delegated to Linux network capabilities (check the
|
||||
ip link command) and the namespaces infrastructure.
|
||||
|
||||
|
||||
I want more
|
||||
------------
|
||||
|
||||
Jérôme Petazzoni has create ``pipework`` to connect together
|
||||
containers in arbitrarily complex scenarios :
|
||||
https://github.com/jpetazzo/pipework
|
||||
@@ -1,12 +1,12 @@
|
||||
:title: Port redirection
|
||||
:title: Redirect Ports
|
||||
:description: usage about port redirection
|
||||
:keywords: Usage, basic port, docker, documentation, examples
|
||||
|
||||
|
||||
.. _port_redirection:
|
||||
|
||||
Port redirection
|
||||
================
|
||||
Redirect Ports
|
||||
==============
|
||||
|
||||
Interacting with a service is commonly done through a connection to a
|
||||
port. When this service runs inside a container, one can connect to
|
||||
|
||||
@@ -1,15 +1,16 @@
|
||||
:title: Working with Links and Names
|
||||
:description: How to create and use links and names
|
||||
:keywords: Examples, Usage, links, docker, documentation, examples, names, name, container naming
|
||||
:title: Link Containers
|
||||
:description: How to create and use both links and names
|
||||
:keywords: Examples, Usage, links, linking, docker, documentation, examples, names, name, container naming
|
||||
|
||||
.. _working_with_links_names:
|
||||
|
||||
Working with Links and Names
|
||||
============================
|
||||
Link Containers
|
||||
===============
|
||||
|
||||
From version 0.6.5 you are now able to ``name`` a container and ``link`` it to another
|
||||
container by referring to its name. This will create a parent -> child relationship
|
||||
where the parent container can see selected information about its child.
|
||||
From version 0.6.5 you are now able to ``name`` a container and
|
||||
``link`` it to another container by referring to its name. This will
|
||||
create a parent -> child relationship where the parent container can
|
||||
see selected information about its child.
|
||||
|
||||
.. _run_name:
|
||||
|
||||
@@ -18,8 +19,9 @@ Container Naming
|
||||
|
||||
.. versionadded:: v0.6.5
|
||||
|
||||
You can now name your container by using the ``-name`` flag. If no name is provided, Docker
|
||||
will automatically generate a name. You can see this name using the ``docker ps`` command.
|
||||
You can now name your container by using the ``-name`` flag. If no
|
||||
name is provided, Docker will automatically generate a name. You can
|
||||
see this name using the ``docker ps`` command.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
@@ -38,47 +40,53 @@ Links: service discovery for docker
|
||||
|
||||
.. versionadded:: v0.6.5
|
||||
|
||||
Links allow containers to discover and securely communicate with each other by using the
|
||||
flag ``-link name:alias``. Inter-container communication can be disabled with the daemon
|
||||
flag ``-icc=false``. With this flag set to false, Container A cannot access Container B
|
||||
unless explicitly allowed via a link. This is a huge win for securing your containers.
|
||||
When two containers are linked together Docker creates a parent child relationship
|
||||
between the containers. The parent container will be able to access information via
|
||||
environment variables of the child such as name, exposed ports, IP and other selected
|
||||
environment variables.
|
||||
Links allow containers to discover and securely communicate with each
|
||||
other by using the flag ``-link name:alias``. Inter-container
|
||||
communication can be disabled with the daemon flag
|
||||
``-icc=false``. With this flag set to ``false``, Container A cannot
|
||||
access Container B unless explicitly allowed via a link. This is a
|
||||
huge win for securing your containers. When two containers are linked
|
||||
together Docker creates a parent child relationship between the
|
||||
containers. The parent container will be able to access information
|
||||
via environment variables of the child such as name, exposed ports, IP
|
||||
and other selected environment variables.
|
||||
|
||||
When linking two containers Docker will use the exposed ports of the container to create
|
||||
a secure tunnel for the parent to access. If a database container only exposes port 8080
|
||||
then the linked container will only be allowed to access port 8080 and nothing else if
|
||||
When linking two containers Docker will use the exposed ports of the
|
||||
container to create a secure tunnel for the parent to access. If a
|
||||
database container only exposes port 8080 then the linked container
|
||||
will only be allowed to access port 8080 and nothing else if
|
||||
inter-container communication is set to false.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# Example: there is an image called redis-2.6 that exposes the port 6379 and starts redis-server.
|
||||
# Let's name the container as "redis" based on that image and run it as daemon.
|
||||
$ sudo docker run -d -name redis redis-2.6
|
||||
|
||||
We can issue all the commands that you would expect using the name "redis"; start, stop,
|
||||
attach, using the name for our container. The name also allows us to link other containers
|
||||
into this one.
|
||||
|
||||
Next, we can start a new web application that has a dependency on Redis and apply a link
|
||||
to connect both containers. If you noticed when running our Redis server we did not use
|
||||
the -p flag to publish the Redis port to the host system. Redis exposed port 6379 and
|
||||
this is all we need to establish a link.
|
||||
For example, there is an image called ``crosbymichael/redis`` that exposes the
|
||||
port 6379 and starts the Redis server. Let's name the container as ``redis``
|
||||
based on that image and run it as daemon.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo docker run -d -name redis crosbymichael/redis
|
||||
|
||||
We can issue all the commands that you would expect using the name
|
||||
``redis``; start, stop, attach, using the name for our container. The
|
||||
name also allows us to link other containers into this one.
|
||||
|
||||
Next, we can start a new web application that has a dependency on
|
||||
Redis and apply a link to connect both containers. If you noticed when
|
||||
running our Redis server we did not use the ``-p`` flag to publish the
|
||||
Redis port to the host system. Redis exposed port 6379 and this is all
|
||||
we need to establish a link.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# Linking the redis container as a child
|
||||
$ sudo docker run -t -i -link redis:db -name webapp ubuntu bash
|
||||
|
||||
When you specified -link redis:db you are telling docker to link the container named redis
|
||||
into this new container with the alias db. Environment variables are prefixed with the alias
|
||||
so that the parent container can access network and environment information from the containers
|
||||
that are linked into it.
|
||||
When you specified ``-link redis:db`` you are telling Docker to link
|
||||
the container named ``redis`` into this new container with the alias
|
||||
``db``. Environment variables are prefixed with the alias so that the
|
||||
parent container can access network and environment information from
|
||||
the containers that are linked into it.
|
||||
|
||||
If we inspect the environment variables of the second container, we would see all the information
|
||||
about the child container.
|
||||
If we inspect the environment variables of the second container, we
|
||||
would see all the information about the child container.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
@@ -100,5 +108,17 @@ about the child container.
|
||||
_=/usr/bin/env
|
||||
root@4c01db0b339c:/#
|
||||
|
||||
Accessing the network information along with the environment of the child container allows
|
||||
us to easily connect to the Redis service on the specific IP and port in the environment.
|
||||
Accessing the network information along with the environment of the
|
||||
child container allows us to easily connect to the Redis service on
|
||||
the specific IP and port in the environment.
|
||||
|
||||
Running ``docker ps`` shows the 2 containers, and the ``webapp/db``
|
||||
alias name for the redis container.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ docker ps
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
4c01db0b339c ubuntu:12.04 bash 17 seconds ago Up 16 seconds webapp
|
||||
d7886598dbe2 crosbymichael/redis:latest /redis-server --dir 33 minutes ago Up 33 minutes 6379/tcp redis,webapp/db
|
||||
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
:title: Working with Volumes
|
||||
:title: Share Directories via Volumes
|
||||
:description: How to create and share volumes
|
||||
:keywords: Examples, Usage, volume, docker, documentation, examples
|
||||
|
||||
.. _volume_def:
|
||||
|
||||
Data Volume
|
||||
===========
|
||||
Share Directories via Volumes
|
||||
=============================
|
||||
|
||||
.. versionadded:: v0.3.0
|
||||
Data volumes have been available since version 1 of the
|
||||
@@ -46,7 +46,7 @@ volumes to any container created from the image::
|
||||
Mount Volumes from an Existing Container:
|
||||
-----------------------------------------
|
||||
|
||||
The command below creates a new container which is runnning as daemon
|
||||
The command below creates a new container which is running as daemon
|
||||
``-d`` and with one volume ``/var/lib/couchdb``::
|
||||
|
||||
COUCH1=$(sudo docker run -d -v /var/lib/couchdb shykes/couchdb:2013-05-03)
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
:title: Working With Repositories
|
||||
:title: Share Images via Repositories
|
||||
:description: Repositories allow users to share images.
|
||||
:keywords: repo, repositories, usage, pull image, push image, image, documentation
|
||||
|
||||
.. _working_with_the_repository:
|
||||
|
||||
Working with Repositories
|
||||
=========================
|
||||
Share Images via Repositories
|
||||
=============================
|
||||
|
||||
A *repository* is a hosted collection of tagged :ref:`images
|
||||
<image_def>` that together create the file system for a container. The
|
||||
@@ -152,6 +152,41 @@ or tag.
|
||||
|
||||
.. _using_private_repositories:
|
||||
|
||||
Trusted Builds
|
||||
--------------
|
||||
|
||||
Trusted Builds automate the building and updating of images from GitHub, directly
|
||||
on docker.io servers. It works by adding a commit hook to your selected repository,
|
||||
triggering a build and update when you push a commit.
|
||||
|
||||
To setup a trusted build
|
||||
++++++++++++++++++++++++
|
||||
|
||||
#. Create a `Docker Index account <https://index.docker.io/>`_ and login.
|
||||
#. Link your GitHub account through the ``Link Accounts`` menu.
|
||||
#. `Configure a Trusted build <https://index.docker.io/builds/>`_.
|
||||
#. Pick a GitHub project that has a ``Dockerfile`` that you want to build.
|
||||
#. Pick the branch you want to build (the default is the ``master`` branch).
|
||||
#. Give the Trusted Build a name.
|
||||
#. Assign an optional Docker tag to the Build.
|
||||
#. Specify where the ``Dockerfile`` is located. The default is ``/``.
|
||||
|
||||
Once the Trusted Build is configured it will automatically trigger a build, and
|
||||
in a few minutes, if there are no errors, you will see your new trusted build
|
||||
on the Docker Index. It will will stay in sync with your GitHub repo until you
|
||||
deactivate the Trusted Build.
|
||||
|
||||
If you want to see the status of your Trusted Builds you can go to your
|
||||
`Trusted Builds page <https://index.docker.io/builds/>`_ on the Docker index,
|
||||
and it will show you the status of your builds, and the build history.
|
||||
|
||||
Once you've created a Trusted Build you can deactive or delete it. You cannot
|
||||
however push to a Trusted Build with the ``docker push`` command. You can only
|
||||
manage it by committing code to your GitHub repository.
|
||||
|
||||
You can create multiple Trusted Builds per repository and configure them to
|
||||
point to specific ``Dockerfile``'s or Git branches.
|
||||
|
||||
Private Repositories
|
||||
--------------------
|
||||
|
||||
|
||||
2
docs/theme/docker/layout.html
vendored
2
docs/theme/docker/layout.html
vendored
@@ -35,7 +35,7 @@
|
||||
%}
|
||||
|
||||
{#
|
||||
This part is hopefully complex because things like |cut '/index/' are not available in spinx jinja
|
||||
This part is hopefully complex because things like |cut '/index/' are not available in Sphinx jinja
|
||||
and will make it crash. (and we need index/ out.
|
||||
#}
|
||||
<link rel="canonical" href="http://docs.docker.io/en/latest/
|
||||
|
||||
20
docs/theme/docker/static/css/main.css
vendored
20
docs/theme/docker/static/css/main.css
vendored
@@ -410,3 +410,23 @@ dt:hover > a.headerlink {
|
||||
.admonition.seealso {
|
||||
border-color: #23cb1f;
|
||||
}
|
||||
|
||||
.versionchanged,
|
||||
.versionadded,
|
||||
.versionmodified,
|
||||
.deprecated {
|
||||
font-size: larger;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.versionchanged {
|
||||
color: lightseagreen;
|
||||
}
|
||||
|
||||
.versionadded {
|
||||
color: mediumblue;
|
||||
}
|
||||
|
||||
.deprecated {
|
||||
color: orangered;
|
||||
}
|
||||
|
||||
@@ -1 +1 @@
|
||||
Solomon Hykes <solomon@dotcloud.com>
|
||||
#Solomon Hykes <solomon@dotcloud.com> Temporarily unavailable
|
||||
|
||||
@@ -3,13 +3,14 @@ package engine
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Handler func(*Job) string
|
||||
type Handler func(*Job) Status
|
||||
|
||||
var globalHandlers map[string]Handler
|
||||
|
||||
@@ -34,6 +35,9 @@ type Engine struct {
|
||||
handlers map[string]Handler
|
||||
hack Hack // data for temporary hackery (see hack.go)
|
||||
id string
|
||||
Stdout io.Writer
|
||||
Stderr io.Writer
|
||||
Stdin io.Reader
|
||||
}
|
||||
|
||||
func (eng *Engine) Root() string {
|
||||
@@ -70,7 +74,9 @@ func New(root string) (*Engine, error) {
|
||||
log.Printf("WARNING: %s\n", err)
|
||||
} else {
|
||||
if utils.CompareKernelVersion(k, &utils.KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}) < 0 {
|
||||
log.Printf("WARNING: You are running linux kernel version %s, which might be unstable running docker. Please upgrade your kernel to 3.8.0.", k.String())
|
||||
if os.Getenv("DOCKER_NOWARN_KERNEL_VERSION") == "" {
|
||||
log.Printf("WARNING: You are running linux kernel version %s, which might be unstable running docker. Please upgrade your kernel to 3.8.0.", k.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
if err := os.MkdirAll(root, 0700); err != nil && !os.IsExist(err) {
|
||||
@@ -80,6 +86,9 @@ func New(root string) (*Engine, error) {
|
||||
root: root,
|
||||
handlers: make(map[string]Handler),
|
||||
id: utils.RandomString(),
|
||||
Stdout: os.Stdout,
|
||||
Stderr: os.Stderr,
|
||||
Stdin: os.Stdin,
|
||||
}
|
||||
// Copy existing global handlers
|
||||
for k, v := range globalHandlers {
|
||||
@@ -99,10 +108,12 @@ func (eng *Engine) Job(name string, args ...string) *Job {
|
||||
Eng: eng,
|
||||
Name: name,
|
||||
Args: args,
|
||||
Stdin: os.Stdin,
|
||||
Stdout: os.Stdout,
|
||||
Stderr: os.Stderr,
|
||||
Stdin: NewInput(),
|
||||
Stdout: NewOutput(),
|
||||
Stderr: NewOutput(),
|
||||
env: &Env{},
|
||||
}
|
||||
job.Stderr.Add(utils.NopWriteCloser(eng.Stderr))
|
||||
handler, exists := eng.handlers[name]
|
||||
if exists {
|
||||
job.handler = handler
|
||||
@@ -112,5 +123,5 @@ func (eng *Engine) Job(name string, args ...string) *Job {
|
||||
|
||||
func (eng *Engine) Logf(format string, args ...interface{}) (n int, err error) {
|
||||
prefixedFormat := fmt.Sprintf("[%s] %s\n", eng, strings.TrimRight(format, "\n"))
|
||||
return fmt.Fprintf(os.Stderr, prefixedFormat, args...)
|
||||
return fmt.Fprintf(eng.Stderr, prefixedFormat, args...)
|
||||
}
|
||||
|
||||
103
engine/engine_test.go
Normal file
103
engine/engine_test.go
Normal file
@@ -0,0 +1,103 @@
|
||||
package engine
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestRegister(t *testing.T) {
|
||||
if err := Register("dummy1", nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := Register("dummy1", nil); err == nil {
|
||||
t.Fatalf("Expecting error, got none")
|
||||
}
|
||||
|
||||
eng := newTestEngine(t)
|
||||
|
||||
//Should fail because globan handlers are copied
|
||||
//at the engine creation
|
||||
if err := eng.Register("dummy1", nil); err == nil {
|
||||
t.Fatalf("Expecting error, got none")
|
||||
}
|
||||
|
||||
if err := eng.Register("dummy2", nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := eng.Register("dummy2", nil); err == nil {
|
||||
t.Fatalf("Expecting error, got none")
|
||||
}
|
||||
}
|
||||
|
||||
func TestJob(t *testing.T) {
|
||||
eng := newTestEngine(t)
|
||||
job1 := eng.Job("dummy1", "--level=awesome")
|
||||
|
||||
if job1.handler != nil {
|
||||
t.Fatalf("job1.handler should be empty")
|
||||
}
|
||||
|
||||
h := func(j *Job) Status {
|
||||
j.Printf("%s\n", j.Name)
|
||||
return 42
|
||||
}
|
||||
|
||||
eng.Register("dummy2", h)
|
||||
job2 := eng.Job("dummy2", "--level=awesome")
|
||||
|
||||
if job2.handler == nil {
|
||||
t.Fatalf("job2.handler shouldn't be nil")
|
||||
}
|
||||
|
||||
if job2.handler(job2) != 42 {
|
||||
t.Fatalf("handler dummy2 was not found in job2")
|
||||
}
|
||||
}
|
||||
|
||||
func TestEngineRoot(t *testing.T) {
|
||||
tmp, err := ioutil.TempDir("", "docker-test-TestEngineCreateDir")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmp)
|
||||
dir := path.Join(tmp, "dir")
|
||||
eng, err := New(dir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if st, err := os.Stat(dir); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if !st.IsDir() {
|
||||
t.Fatalf("engine.New() created something other than a directory at %s", dir)
|
||||
}
|
||||
if r := eng.Root(); r != dir {
|
||||
t.Fatalf("Expected: %v\nReceived: %v", dir, r)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEngineString(t *testing.T) {
|
||||
eng1 := newTestEngine(t)
|
||||
defer os.RemoveAll(eng1.Root())
|
||||
eng2 := newTestEngine(t)
|
||||
defer os.RemoveAll(eng2.Root())
|
||||
s1 := eng1.String()
|
||||
s2 := eng2.String()
|
||||
if eng1 == eng2 {
|
||||
t.Fatalf("Different engines should have different names (%v == %v)", s1, s2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEngineLogf(t *testing.T) {
|
||||
eng := newTestEngine(t)
|
||||
defer os.RemoveAll(eng.Root())
|
||||
input := "Test log line"
|
||||
if n, err := eng.Logf("%s\n", input); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if n < len(input) {
|
||||
t.Fatalf("Test: Logf() should print at least as much as the input\ninput=%d\nprinted=%d", len(input), n)
|
||||
}
|
||||
}
|
||||
234
engine/env.go
Normal file
234
engine/env.go
Normal file
@@ -0,0 +1,234 @@
|
||||
package engine
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Env []string
|
||||
|
||||
func (env *Env) Get(key string) (value string) {
|
||||
// FIXME: use Map()
|
||||
for _, kv := range *env {
|
||||
if strings.Index(kv, "=") == -1 {
|
||||
continue
|
||||
}
|
||||
parts := strings.SplitN(kv, "=", 2)
|
||||
if parts[0] != key {
|
||||
continue
|
||||
}
|
||||
if len(parts) < 2 {
|
||||
value = ""
|
||||
} else {
|
||||
value = parts[1]
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (env *Env) Exists(key string) bool {
|
||||
_, exists := env.Map()[key]
|
||||
return exists
|
||||
}
|
||||
|
||||
func (env *Env) GetBool(key string) (value bool) {
|
||||
s := strings.ToLower(strings.Trim(env.Get(key), " \t"))
|
||||
if s == "" || s == "0" || s == "no" || s == "false" || s == "none" {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (env *Env) SetBool(key string, value bool) {
|
||||
if value {
|
||||
env.Set(key, "1")
|
||||
} else {
|
||||
env.Set(key, "0")
|
||||
}
|
||||
}
|
||||
|
||||
func (env *Env) GetInt(key string) int {
|
||||
return int(env.GetInt64(key))
|
||||
}
|
||||
|
||||
func (env *Env) GetInt64(key string) int64 {
|
||||
s := strings.Trim(env.Get(key), " \t")
|
||||
val, err := strconv.ParseInt(s, 10, 64)
|
||||
if err != nil {
|
||||
return -1
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
func (env *Env) SetInt(key string, value int) {
|
||||
env.Set(key, fmt.Sprintf("%d", value))
|
||||
}
|
||||
|
||||
func (env *Env) SetInt64(key string, value int64) {
|
||||
env.Set(key, fmt.Sprintf("%d", value))
|
||||
}
|
||||
|
||||
// Returns nil if key not found
|
||||
func (env *Env) GetList(key string) []string {
|
||||
sval := env.Get(key)
|
||||
if sval == "" {
|
||||
return nil
|
||||
}
|
||||
l := make([]string, 0, 1)
|
||||
if err := json.Unmarshal([]byte(sval), &l); err != nil {
|
||||
l = append(l, sval)
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
func (env *Env) GetJson(key string, iface interface{}) error {
|
||||
sval := env.Get(key)
|
||||
if sval == "" {
|
||||
return nil
|
||||
}
|
||||
return json.Unmarshal([]byte(sval), iface)
|
||||
}
|
||||
|
||||
func (env *Env) SetJson(key string, value interface{}) error {
|
||||
sval, err := json.Marshal(value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
env.Set(key, string(sval))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (env *Env) SetList(key string, value []string) error {
|
||||
return env.SetJson(key, value)
|
||||
}
|
||||
|
||||
func (env *Env) Set(key, value string) {
|
||||
*env = append(*env, key+"="+value)
|
||||
}
|
||||
|
||||
func NewDecoder(src io.Reader) *Decoder {
|
||||
return &Decoder{
|
||||
json.NewDecoder(src),
|
||||
}
|
||||
}
|
||||
|
||||
type Decoder struct {
|
||||
*json.Decoder
|
||||
}
|
||||
|
||||
func (decoder *Decoder) Decode() (*Env, error) {
|
||||
m := make(map[string]interface{})
|
||||
if err := decoder.Decoder.Decode(&m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
env := &Env{}
|
||||
for key, value := range m {
|
||||
env.SetAuto(key, value)
|
||||
}
|
||||
return env, nil
|
||||
}
|
||||
|
||||
// DecodeEnv decodes `src` as a json dictionary, and adds
|
||||
// each decoded key-value pair to the environment.
|
||||
//
|
||||
// If `src` cannot be decoded as a json dictionary, an error
|
||||
// is returned.
|
||||
func (env *Env) Decode(src io.Reader) error {
|
||||
m := make(map[string]interface{})
|
||||
if err := json.NewDecoder(src).Decode(&m); err != nil {
|
||||
return err
|
||||
}
|
||||
for k, v := range m {
|
||||
env.SetAuto(k, v)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (env *Env) SetAuto(k string, v interface{}) {
|
||||
// FIXME: we fix-convert float values to int, because
|
||||
// encoding/json decodes integers to float64, but cannot encode them back.
|
||||
// (See http://golang.org/src/pkg/encoding/json/decode.go#L46)
|
||||
if fval, ok := v.(float64); ok {
|
||||
env.SetInt64(k, int64(fval))
|
||||
} else if sval, ok := v.(string); ok {
|
||||
env.Set(k, sval)
|
||||
} else if val, err := json.Marshal(v); err == nil {
|
||||
env.Set(k, string(val))
|
||||
} else {
|
||||
env.Set(k, fmt.Sprintf("%v", v))
|
||||
}
|
||||
}
|
||||
|
||||
func (env *Env) Encode(dst io.Writer) error {
|
||||
m := make(map[string]interface{})
|
||||
for k, v := range env.Map() {
|
||||
var val interface{}
|
||||
if err := json.Unmarshal([]byte(v), &val); err == nil {
|
||||
// FIXME: we fix-convert float values to int, because
|
||||
// encoding/json decodes integers to float64, but cannot encode them back.
|
||||
// (See http://golang.org/src/pkg/encoding/json/decode.go#L46)
|
||||
if fval, isFloat := val.(float64); isFloat {
|
||||
val = int(fval)
|
||||
}
|
||||
m[k] = val
|
||||
} else {
|
||||
m[k] = v
|
||||
}
|
||||
}
|
||||
if err := json.NewEncoder(dst).Encode(&m); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (env *Env) WriteTo(dst io.Writer) (n int64, err error) {
|
||||
// FIXME: return the number of bytes written to respect io.WriterTo
|
||||
return 0, env.Encode(dst)
|
||||
}
|
||||
|
||||
func (env *Env) Export(dst interface{}) (err error) {
|
||||
defer func() {
|
||||
if err != nil {
|
||||
err = fmt.Errorf("ExportEnv %s", err)
|
||||
}
|
||||
}()
|
||||
var buf bytes.Buffer
|
||||
// step 1: encode/marshal the env to an intermediary json representation
|
||||
if err := env.Encode(&buf); err != nil {
|
||||
return err
|
||||
}
|
||||
// step 2: decode/unmarshal the intermediary json into the destination object
|
||||
if err := json.NewDecoder(&buf).Decode(dst); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (env *Env) Import(src interface{}) (err error) {
|
||||
defer func() {
|
||||
if err != nil {
|
||||
err = fmt.Errorf("ImportEnv: %s", err)
|
||||
}
|
||||
}()
|
||||
var buf bytes.Buffer
|
||||
if err := json.NewEncoder(&buf).Encode(src); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := env.Decode(&buf); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (env *Env) Map() map[string]string {
|
||||
m := make(map[string]string)
|
||||
for _, kv := range *env {
|
||||
parts := strings.SplitN(kv, "=", 2)
|
||||
m[parts[0]] = parts[1]
|
||||
}
|
||||
return m
|
||||
}
|
||||
@@ -23,7 +23,101 @@ func TestSetenv(t *testing.T) {
|
||||
if val := job.Getenv("foo"); val != "bar" {
|
||||
t.Fatalf("Getenv returns incorrect value: %s", val)
|
||||
}
|
||||
|
||||
job.Setenv("bar", "")
|
||||
if val := job.Getenv("bar"); val != "" {
|
||||
t.Fatalf("Getenv returns incorrect value: %s", val)
|
||||
}
|
||||
if val := job.Getenv("nonexistent"); val != "" {
|
||||
t.Fatalf("Getenv returns incorrect value: %s", val)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetenvBool(t *testing.T) {
|
||||
job := mkJob(t, "dummy")
|
||||
job.SetenvBool("foo", true)
|
||||
if val := job.GetenvBool("foo"); !val {
|
||||
t.Fatalf("GetenvBool returns incorrect value: %t", val)
|
||||
}
|
||||
|
||||
job.SetenvBool("bar", false)
|
||||
if val := job.GetenvBool("bar"); val {
|
||||
t.Fatalf("GetenvBool returns incorrect value: %t", val)
|
||||
}
|
||||
|
||||
if val := job.GetenvBool("nonexistent"); val {
|
||||
t.Fatalf("GetenvBool returns incorrect value: %t", val)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetenvInt(t *testing.T) {
|
||||
job := mkJob(t, "dummy")
|
||||
|
||||
job.SetenvInt("foo", -42)
|
||||
if val := job.GetenvInt("foo"); val != -42 {
|
||||
t.Fatalf("GetenvInt returns incorrect value: %d", val)
|
||||
}
|
||||
|
||||
job.SetenvInt("bar", 42)
|
||||
if val := job.GetenvInt("bar"); val != 42 {
|
||||
t.Fatalf("GetenvInt returns incorrect value: %d", val)
|
||||
}
|
||||
if val := job.GetenvInt("nonexistent"); val != -1 {
|
||||
t.Fatalf("GetenvInt returns incorrect value: %d", val)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetenvList(t *testing.T) {
|
||||
job := mkJob(t, "dummy")
|
||||
|
||||
job.SetenvList("foo", []string{"bar"})
|
||||
if val := job.GetenvList("foo"); len(val) != 1 || val[0] != "bar" {
|
||||
t.Fatalf("GetenvList returns incorrect value: %v", val)
|
||||
}
|
||||
|
||||
job.SetenvList("bar", nil)
|
||||
if val := job.GetenvList("bar"); val != nil {
|
||||
t.Fatalf("GetenvList returns incorrect value: %v", val)
|
||||
}
|
||||
if val := job.GetenvList("nonexistent"); val != nil {
|
||||
t.Fatalf("GetenvList returns incorrect value: %v", val)
|
||||
}
|
||||
}
|
||||
|
||||
func TestImportEnv(t *testing.T) {
|
||||
type dummy struct {
|
||||
DummyInt int
|
||||
DummyStringArray []string
|
||||
}
|
||||
|
||||
job := mkJob(t, "dummy")
|
||||
if err := job.ImportEnv(&dummy{42, []string{"foo", "bar"}}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
dmy := dummy{}
|
||||
if err := job.ExportEnv(&dmy); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if dmy.DummyInt != 42 {
|
||||
t.Fatalf("Expected 42, got %d", dmy.DummyInt)
|
||||
}
|
||||
|
||||
if len(dmy.DummyStringArray) != 2 || dmy.DummyStringArray[0] != "foo" || dmy.DummyStringArray[1] != "bar" {
|
||||
t.Fatalf("Expected {foo, bar}, got %v", dmy.DummyStringArray)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestEnviron(t *testing.T) {
|
||||
job := mkJob(t, "dummy")
|
||||
job.Setenv("foo", "bar")
|
||||
val, exists := job.Environ()["foo"]
|
||||
if !exists {
|
||||
t.Fatalf("foo not found in the environ")
|
||||
}
|
||||
if val != "bar" {
|
||||
t.Fatalf("bar not found in the environ")
|
||||
}
|
||||
}
|
||||
|
||||
24
engine/helpers_test.go
Normal file
24
engine/helpers_test.go
Normal file
@@ -0,0 +1,24 @@
|
||||
package engine
|
||||
|
||||
import (
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var globalTestID string
|
||||
|
||||
func newTestEngine(t *testing.T) *Engine {
|
||||
tmp, err := utils.TestDirectory("")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
eng, err := New(tmp)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return eng
|
||||
}
|
||||
|
||||
func mkJob(t *testing.T, name string, args ...string) *Job {
|
||||
return newTestEngine(t).Job(name, args...)
|
||||
}
|
||||
40
engine/http.go
Normal file
40
engine/http.go
Normal file
@@ -0,0 +1,40 @@
|
||||
package engine
|
||||
|
||||
import (
|
||||
"path"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// ServeHTTP executes a job as specified by the http request `r`, and sends the
|
||||
// result as an http response.
|
||||
// This method allows an Engine instance to be passed as a standard http.Handler interface.
|
||||
//
|
||||
// Note that the protocol used in this methid is a convenience wrapper and is not the canonical
|
||||
// implementation of remote job execution. This is because HTTP/1 does not handle stream multiplexing,
|
||||
// and so cannot differentiate stdout from stderr. Additionally, headers cannot be added to a response
|
||||
// once data has been written to the body, which makes it inconvenient to return metadata such
|
||||
// as the exit status.
|
||||
//
|
||||
func (eng *Engine) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
jobName := path.Base(r.URL.Path)
|
||||
jobArgs, exists := r.URL.Query()["a"]
|
||||
if !exists {
|
||||
jobArgs = []string{}
|
||||
}
|
||||
w.Header().Set("Job-Name", jobName)
|
||||
for _, arg := range(jobArgs) {
|
||||
w.Header().Add("Job-Args", arg)
|
||||
}
|
||||
job := eng.Job(jobName, jobArgs...)
|
||||
job.Stdout.Add(w)
|
||||
job.Stderr.Add(w)
|
||||
// FIXME: distinguish job status from engine error in Run()
|
||||
// The former should be passed as a special header, the former
|
||||
// should cause a 500 status
|
||||
w.WriteHeader(http.StatusOK)
|
||||
// The exit status cannot be sent reliably with HTTP1, because headers
|
||||
// can only be sent before the body.
|
||||
// (we could possibly use http footers via chunked encoding, but I couldn't find
|
||||
// how to use them in net/http)
|
||||
job.Run()
|
||||
}
|
||||
303
engine/job.go
303
engine/job.go
@@ -1,16 +1,10 @@
|
||||
package engine
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// A job is the fundamental unit of work in the docker engine.
|
||||
@@ -30,127 +24,76 @@ type Job struct {
|
||||
Eng *Engine
|
||||
Name string
|
||||
Args []string
|
||||
env []string
|
||||
Stdin io.Reader
|
||||
Stdout io.Writer
|
||||
Stderr io.Writer
|
||||
handler func(*Job) string
|
||||
status string
|
||||
env *Env
|
||||
Stdout *Output
|
||||
Stderr *Output
|
||||
Stdin *Input
|
||||
handler Handler
|
||||
status Status
|
||||
end time.Time
|
||||
onExit []func()
|
||||
}
|
||||
|
||||
type Status int
|
||||
|
||||
const (
|
||||
StatusOK Status = 0
|
||||
StatusErr Status = 1
|
||||
StatusNotFound Status = 127
|
||||
)
|
||||
|
||||
// Run executes the job and blocks until the job completes.
|
||||
// If the job returns a failure status, an error is returned
|
||||
// which includes the status.
|
||||
func (job *Job) Run() error {
|
||||
defer func() {
|
||||
var wg sync.WaitGroup
|
||||
for _, f := range job.onExit {
|
||||
wg.Add(1)
|
||||
go func(f func()) {
|
||||
f()
|
||||
wg.Done()
|
||||
}(f)
|
||||
}
|
||||
wg.Wait()
|
||||
}()
|
||||
if job.Stdout != nil && job.Stdout != os.Stdout {
|
||||
job.Stdout = io.MultiWriter(job.Stdout, os.Stdout)
|
||||
}
|
||||
if job.Stderr != nil && job.Stderr != os.Stderr {
|
||||
job.Stderr = io.MultiWriter(job.Stderr, os.Stderr)
|
||||
// FIXME: make this thread-safe
|
||||
// FIXME: implement wait
|
||||
if !job.end.IsZero() {
|
||||
return fmt.Errorf("%s: job has already completed", job.Name)
|
||||
}
|
||||
// Log beginning and end of the job
|
||||
job.Eng.Logf("+job %s", job.CallString())
|
||||
defer func() {
|
||||
job.Eng.Logf("-job %s%s", job.CallString(), job.StatusString())
|
||||
}()
|
||||
var errorMessage string
|
||||
job.Stderr.AddString(&errorMessage)
|
||||
if job.handler == nil {
|
||||
job.status = "command not found"
|
||||
job.Errorf("%s: command not found", job.Name)
|
||||
job.status = 127
|
||||
} else {
|
||||
job.status = job.handler(job)
|
||||
job.end = time.Now()
|
||||
}
|
||||
if job.status != "0" {
|
||||
return fmt.Errorf("%s: %s", job.Name, job.status)
|
||||
// Wait for all background tasks to complete
|
||||
if err := job.Stdout.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := job.Stderr.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
if job.status != 0 {
|
||||
return fmt.Errorf("%s: %s", job.Name, errorMessage)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (job *Job) StdoutParseLines(dst *[]string, limit int) {
|
||||
job.parseLines(job.StdoutPipe(), dst, limit)
|
||||
}
|
||||
|
||||
func (job *Job) StderrParseLines(dst *[]string, limit int) {
|
||||
job.parseLines(job.StderrPipe(), dst, limit)
|
||||
}
|
||||
|
||||
func (job *Job) parseLines(src io.Reader, dst *[]string, limit int) {
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
scanner := bufio.NewScanner(src)
|
||||
for scanner.Scan() {
|
||||
// If the limit is reached, flush the rest of the source and return
|
||||
if limit > 0 && len(*dst) >= limit {
|
||||
io.Copy(ioutil.Discard, src)
|
||||
return
|
||||
}
|
||||
line := scanner.Text()
|
||||
// Append the line (with delimitor removed)
|
||||
*dst = append(*dst, line)
|
||||
}
|
||||
}()
|
||||
job.onExit = append(job.onExit, wg.Wait)
|
||||
}
|
||||
|
||||
func (job *Job) StdoutParseString(dst *string) {
|
||||
lines := make([]string, 0, 1)
|
||||
job.StdoutParseLines(&lines, 1)
|
||||
job.onExit = append(job.onExit, func() {
|
||||
if len(lines) >= 1 {
|
||||
*dst = lines[0]
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (job *Job) StderrParseString(dst *string) {
|
||||
lines := make([]string, 0, 1)
|
||||
job.StderrParseLines(&lines, 1)
|
||||
job.onExit = append(job.onExit, func() { *dst = lines[0] })
|
||||
}
|
||||
|
||||
func (job *Job) StdoutPipe() io.ReadCloser {
|
||||
r, w := io.Pipe()
|
||||
job.Stdout = w
|
||||
job.onExit = append(job.onExit, func() { w.Close() })
|
||||
return r
|
||||
}
|
||||
|
||||
func (job *Job) StderrPipe() io.ReadCloser {
|
||||
r, w := io.Pipe()
|
||||
job.Stderr = w
|
||||
job.onExit = append(job.onExit, func() { w.Close() })
|
||||
return r
|
||||
}
|
||||
|
||||
func (job *Job) CallString() string {
|
||||
return fmt.Sprintf("%s(%s)", job.Name, strings.Join(job.Args, ", "))
|
||||
}
|
||||
|
||||
func (job *Job) StatusString() string {
|
||||
// FIXME: if a job returns the empty string, it will be printed
|
||||
// as not having returned.
|
||||
// (this only affects String which is a convenience function).
|
||||
if job.status != "" {
|
||||
var okerr string
|
||||
if job.status == "0" {
|
||||
okerr = "OK"
|
||||
} else {
|
||||
okerr = "ERR"
|
||||
}
|
||||
return fmt.Sprintf(" = %s (%s)", okerr, job.status)
|
||||
// If the job hasn't completed, status string is empty
|
||||
if job.end.IsZero() {
|
||||
return ""
|
||||
}
|
||||
return ""
|
||||
var okerr string
|
||||
if job.status == StatusOK {
|
||||
okerr = "OK"
|
||||
} else {
|
||||
okerr = "ERR"
|
||||
}
|
||||
return fmt.Sprintf(" = %s (%d)", okerr, job.status)
|
||||
}
|
||||
|
||||
// String returns a human-readable description of `job`
|
||||
@@ -159,168 +102,77 @@ func (job *Job) String() string {
|
||||
}
|
||||
|
||||
func (job *Job) Getenv(key string) (value string) {
|
||||
for _, kv := range job.env {
|
||||
if strings.Index(kv, "=") == -1 {
|
||||
continue
|
||||
}
|
||||
parts := strings.SplitN(kv, "=", 2)
|
||||
if parts[0] != key {
|
||||
continue
|
||||
}
|
||||
if len(parts) < 2 {
|
||||
value = ""
|
||||
} else {
|
||||
value = parts[1]
|
||||
}
|
||||
}
|
||||
return
|
||||
return job.env.Get(key)
|
||||
}
|
||||
|
||||
func (job *Job) GetenvBool(key string) (value bool) {
|
||||
s := strings.ToLower(strings.Trim(job.Getenv(key), " \t"))
|
||||
if s == "" || s == "0" || s == "no" || s == "false" || s == "none" {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
return job.env.GetBool(key)
|
||||
}
|
||||
|
||||
func (job *Job) SetenvBool(key string, value bool) {
|
||||
if value {
|
||||
job.Setenv(key, "1")
|
||||
} else {
|
||||
job.Setenv(key, "0")
|
||||
}
|
||||
job.env.SetBool(key, value)
|
||||
}
|
||||
|
||||
func (job *Job) GetenvInt(key string) int64 {
|
||||
s := strings.Trim(job.Getenv(key), " \t")
|
||||
val, err := strconv.ParseInt(s, 10, 64)
|
||||
if err != nil {
|
||||
return -1
|
||||
}
|
||||
return val
|
||||
func (job *Job) GetenvInt64(key string) int64 {
|
||||
return job.env.GetInt64(key)
|
||||
}
|
||||
|
||||
func (job *Job) SetenvInt(key string, value int64) {
|
||||
job.Setenv(key, fmt.Sprintf("%d", value))
|
||||
func (job *Job) GetenvInt(key string) int {
|
||||
return job.env.GetInt(key)
|
||||
}
|
||||
|
||||
func (job *Job) SetenvInt64(key string, value int64) {
|
||||
job.env.SetInt64(key, value)
|
||||
}
|
||||
|
||||
func (job *Job) SetenvInt(key string, value int) {
|
||||
job.env.SetInt(key, value)
|
||||
}
|
||||
|
||||
// Returns nil if key not found
|
||||
func (job *Job) GetenvList(key string) []string {
|
||||
sval := job.Getenv(key)
|
||||
l := make([]string, 0, 1)
|
||||
if err := json.Unmarshal([]byte(sval), &l); err != nil {
|
||||
l = append(l, sval)
|
||||
}
|
||||
return l
|
||||
return job.env.GetList(key)
|
||||
}
|
||||
|
||||
func (job *Job) GetenvJson(key string, iface interface{}) error {
|
||||
return job.env.GetJson(key, iface)
|
||||
}
|
||||
|
||||
func (job *Job) SetenvJson(key string, value interface{}) error {
|
||||
sval, err := json.Marshal(value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
job.Setenv(key, string(sval))
|
||||
return nil
|
||||
return job.env.SetJson(key, value)
|
||||
}
|
||||
|
||||
func (job *Job) SetenvList(key string, value []string) error {
|
||||
return job.SetenvJson(key, value)
|
||||
return job.env.SetJson(key, value)
|
||||
}
|
||||
|
||||
func (job *Job) Setenv(key, value string) {
|
||||
job.env = append(job.env, key+"="+value)
|
||||
job.env.Set(key, value)
|
||||
}
|
||||
|
||||
// DecodeEnv decodes `src` as a json dictionary, and adds
|
||||
// each decoded key-value pair to the environment.
|
||||
//
|
||||
// If `text` cannot be decoded as a json dictionary, an error
|
||||
// If `src` cannot be decoded as a json dictionary, an error
|
||||
// is returned.
|
||||
func (job *Job) DecodeEnv(src io.Reader) error {
|
||||
m := make(map[string]interface{})
|
||||
if err := json.NewDecoder(src).Decode(&m); err != nil {
|
||||
return err
|
||||
}
|
||||
for k, v := range m {
|
||||
// FIXME: we fix-convert float values to int, because
|
||||
// encoding/json decodes integers to float64, but cannot encode them back.
|
||||
// (See http://golang.org/src/pkg/encoding/json/decode.go#L46)
|
||||
if fval, ok := v.(float64); ok {
|
||||
job.SetenvInt(k, int64(fval))
|
||||
} else if sval, ok := v.(string); ok {
|
||||
job.Setenv(k, sval)
|
||||
} else if val, err := json.Marshal(v); err == nil {
|
||||
job.Setenv(k, string(val))
|
||||
} else {
|
||||
job.Setenv(k, fmt.Sprintf("%v", v))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return job.env.Decode(src)
|
||||
}
|
||||
|
||||
func (job *Job) EncodeEnv(dst io.Writer) error {
|
||||
m := make(map[string]interface{})
|
||||
for k, v := range job.Environ() {
|
||||
var val interface{}
|
||||
if err := json.Unmarshal([]byte(v), &val); err == nil {
|
||||
// FIXME: we fix-convert float values to int, because
|
||||
// encoding/json decodes integers to float64, but cannot encode them back.
|
||||
// (See http://golang.org/src/pkg/encoding/json/decode.go#L46)
|
||||
if fval, isFloat := val.(float64); isFloat {
|
||||
val = int(fval)
|
||||
}
|
||||
m[k] = val
|
||||
} else {
|
||||
m[k] = v
|
||||
}
|
||||
}
|
||||
if err := json.NewEncoder(dst).Encode(&m); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return job.env.Encode(dst)
|
||||
}
|
||||
|
||||
func (job *Job) ExportEnv(dst interface{}) (err error) {
|
||||
defer func() {
|
||||
if err != nil {
|
||||
err = fmt.Errorf("ExportEnv %s", err)
|
||||
}
|
||||
}()
|
||||
var buf bytes.Buffer
|
||||
// step 1: encode/marshal the env to an intermediary json representation
|
||||
if err := job.EncodeEnv(&buf); err != nil {
|
||||
return err
|
||||
}
|
||||
// step 2: decode/unmarshal the intermediary json into the destination object
|
||||
if err := json.NewDecoder(&buf).Decode(dst); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return job.env.Export(dst)
|
||||
}
|
||||
|
||||
func (job *Job) ImportEnv(src interface{}) (err error) {
|
||||
defer func() {
|
||||
if err != nil {
|
||||
err = fmt.Errorf("ImportEnv: %s", err)
|
||||
}
|
||||
}()
|
||||
var buf bytes.Buffer
|
||||
if err := json.NewEncoder(&buf).Encode(src); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := job.DecodeEnv(&buf); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return job.env.Import(src)
|
||||
}
|
||||
|
||||
func (job *Job) Environ() map[string]string {
|
||||
m := make(map[string]string)
|
||||
for _, kv := range job.env {
|
||||
parts := strings.SplitN(kv, "=", 2)
|
||||
m[parts[0]] = parts[1]
|
||||
}
|
||||
return m
|
||||
return job.env.Map()
|
||||
}
|
||||
|
||||
func (job *Job) Logf(format string, args ...interface{}) (n int, err error) {
|
||||
@@ -334,5 +186,8 @@ func (job *Job) Printf(format string, args ...interface{}) (n int, err error) {
|
||||
|
||||
func (job *Job) Errorf(format string, args ...interface{}) (n int, err error) {
|
||||
return fmt.Fprintf(job.Stderr, format, args...)
|
||||
|
||||
}
|
||||
|
||||
func (job *Job) Error(err error) (int, error) {
|
||||
return fmt.Fprintf(job.Stderr, "%s", err)
|
||||
}
|
||||
|
||||
80
engine/job_test.go
Normal file
80
engine/job_test.go
Normal file
@@ -0,0 +1,80 @@
|
||||
package engine
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestJobStatusOK(t *testing.T) {
|
||||
eng := newTestEngine(t)
|
||||
defer os.RemoveAll(eng.Root())
|
||||
eng.Register("return_ok", func(job *Job) Status { return StatusOK })
|
||||
err := eng.Job("return_ok").Run()
|
||||
if err != nil {
|
||||
t.Fatalf("Expected: err=%v\nReceived: err=%v", nil, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestJobStatusErr(t *testing.T) {
|
||||
eng := newTestEngine(t)
|
||||
defer os.RemoveAll(eng.Root())
|
||||
eng.Register("return_err", func(job *Job) Status { return StatusErr })
|
||||
err := eng.Job("return_err").Run()
|
||||
if err == nil {
|
||||
t.Fatalf("When a job returns StatusErr, Run() should return an error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestJobStatusNotFound(t *testing.T) {
|
||||
eng := newTestEngine(t)
|
||||
defer os.RemoveAll(eng.Root())
|
||||
eng.Register("return_not_found", func(job *Job) Status { return StatusNotFound })
|
||||
err := eng.Job("return_not_found").Run()
|
||||
if err == nil {
|
||||
t.Fatalf("When a job returns StatusNotFound, Run() should return an error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestJobStdoutString(t *testing.T) {
|
||||
eng := newTestEngine(t)
|
||||
defer os.RemoveAll(eng.Root())
|
||||
// FIXME: test multiple combinations of output and status
|
||||
eng.Register("say_something_in_stdout", func(job *Job) Status {
|
||||
job.Printf("Hello world\n")
|
||||
return StatusOK
|
||||
})
|
||||
|
||||
job := eng.Job("say_something_in_stdout")
|
||||
var output string
|
||||
if err := job.Stdout.AddString(&output); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := job.Run(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if expectedOutput := "Hello world"; output != expectedOutput {
|
||||
t.Fatalf("Stdout last line:\nExpected: %v\nReceived: %v", expectedOutput, output)
|
||||
}
|
||||
}
|
||||
|
||||
func TestJobStderrString(t *testing.T) {
|
||||
eng := newTestEngine(t)
|
||||
defer os.RemoveAll(eng.Root())
|
||||
// FIXME: test multiple combinations of output and status
|
||||
eng.Register("say_something_in_stderr", func(job *Job) Status {
|
||||
job.Errorf("Warning, something might happen\nHere it comes!\nOh no...\nSomething happened\n")
|
||||
return StatusOK
|
||||
})
|
||||
|
||||
job := eng.Job("say_something_in_stderr")
|
||||
var output string
|
||||
if err := job.Stderr.AddString(&output); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := job.Run(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if expectedOutput := "Something happened"; output != expectedOutput {
|
||||
t.Fatalf("Stderr last line:\nExpected: %v\nReceived: %v", expectedOutput, output)
|
||||
}
|
||||
}
|
||||
192
engine/streams.go
Normal file
192
engine/streams.go
Normal file
@@ -0,0 +1,192 @@
|
||||
package engine
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"container/ring"
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type Output struct {
|
||||
sync.Mutex
|
||||
dests []io.Writer
|
||||
tasks sync.WaitGroup
|
||||
}
|
||||
|
||||
// NewOutput returns a new Output object with no destinations attached.
|
||||
// Writing to an empty Output will cause the written data to be discarded.
|
||||
func NewOutput() *Output {
|
||||
return &Output{}
|
||||
}
|
||||
|
||||
// Add attaches a new destination to the Output. Any data subsequently written
|
||||
// to the output will be written to the new destination in addition to all the others.
|
||||
// This method is thread-safe.
|
||||
// FIXME: Add cannot fail
|
||||
func (o *Output) Add(dst io.Writer) error {
|
||||
o.Mutex.Lock()
|
||||
defer o.Mutex.Unlock()
|
||||
o.dests = append(o.dests, dst)
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddPipe creates an in-memory pipe with io.Pipe(), adds its writing end as a destination,
|
||||
// and returns its reading end for consumption by the caller.
|
||||
// This is a rough equivalent similar to Cmd.StdoutPipe() in the standard os/exec package.
|
||||
// This method is thread-safe.
|
||||
func (o *Output) AddPipe() (io.Reader, error) {
|
||||
r, w := io.Pipe()
|
||||
o.Add(w)
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// AddTail starts a new goroutine which will read all subsequent data written to the output,
|
||||
// line by line, and append the last `n` lines to `dst`.
|
||||
func (o *Output) AddTail(dst *[]string, n int) error {
|
||||
src, err := o.AddPipe()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.tasks.Add(1)
|
||||
go func() {
|
||||
defer o.tasks.Done()
|
||||
Tail(src, n, dst)
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddString starts a new goroutine which will read all subsequent data written to the output,
|
||||
// line by line, and store the last line into `dst`.
|
||||
func (o *Output) AddString(dst *string) error {
|
||||
src, err := o.AddPipe()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.tasks.Add(1)
|
||||
go func() {
|
||||
defer o.tasks.Done()
|
||||
lines := make([]string, 0, 1)
|
||||
Tail(src, 1, &lines)
|
||||
if len(lines) == 0 {
|
||||
*dst = ""
|
||||
} else {
|
||||
*dst = lines[0]
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Write writes the same data to all registered destinations.
|
||||
// This method is thread-safe.
|
||||
func (o *Output) Write(p []byte) (n int, err error) {
|
||||
o.Mutex.Lock()
|
||||
defer o.Mutex.Unlock()
|
||||
var firstErr error
|
||||
for _, dst := range o.dests {
|
||||
_, err := dst.Write(p)
|
||||
if err != nil && firstErr == nil {
|
||||
firstErr = err
|
||||
}
|
||||
}
|
||||
return len(p), firstErr
|
||||
}
|
||||
|
||||
// Close unregisters all destinations and waits for all background
|
||||
// AddTail and AddString tasks to complete.
|
||||
// The Close method of each destination is called if it exists.
|
||||
func (o *Output) Close() error {
|
||||
o.Mutex.Lock()
|
||||
defer o.Mutex.Unlock()
|
||||
var firstErr error
|
||||
for _, dst := range o.dests {
|
||||
if closer, ok := dst.(io.WriteCloser); ok {
|
||||
err := closer.Close()
|
||||
if err != nil && firstErr == nil {
|
||||
firstErr = err
|
||||
}
|
||||
}
|
||||
}
|
||||
o.tasks.Wait()
|
||||
return firstErr
|
||||
}
|
||||
|
||||
type Input struct {
|
||||
src io.Reader
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
// NewInput returns a new Input object with no source attached.
|
||||
// Reading to an empty Input will return io.EOF.
|
||||
func NewInput() *Input {
|
||||
return &Input{}
|
||||
}
|
||||
|
||||
// Read reads from the input in a thread-safe way.
|
||||
func (i *Input) Read(p []byte) (n int, err error) {
|
||||
i.Mutex.Lock()
|
||||
defer i.Mutex.Unlock()
|
||||
if i.src == nil {
|
||||
return 0, io.EOF
|
||||
}
|
||||
return i.src.Read(p)
|
||||
}
|
||||
|
||||
// Add attaches a new source to the input.
|
||||
// Add can only be called once per input. Subsequent calls will
|
||||
// return an error.
|
||||
func (i *Input) Add(src io.Reader) error {
|
||||
i.Mutex.Lock()
|
||||
defer i.Mutex.Unlock()
|
||||
if i.src != nil {
|
||||
return fmt.Errorf("Maximum number of sources reached: 1")
|
||||
}
|
||||
i.src = src
|
||||
return nil
|
||||
}
|
||||
|
||||
// Tail reads from `src` line per line, and returns the last `n` lines as an array.
|
||||
// A ring buffer is used to only store `n` lines at any time.
|
||||
func Tail(src io.Reader, n int, dst *[]string) {
|
||||
scanner := bufio.NewScanner(src)
|
||||
r := ring.New(n)
|
||||
for scanner.Scan() {
|
||||
if n == 0 {
|
||||
continue
|
||||
}
|
||||
r.Value = scanner.Text()
|
||||
r = r.Next()
|
||||
}
|
||||
r.Do(func(v interface{}) {
|
||||
if v == nil {
|
||||
return
|
||||
}
|
||||
*dst = append(*dst, v.(string))
|
||||
})
|
||||
}
|
||||
|
||||
// AddEnv starts a new goroutine which will decode all subsequent data
|
||||
// as a stream of json-encoded objects, and point `dst` to the last
|
||||
// decoded object.
|
||||
// The result `env` can be queried using the type-neutral Env interface.
|
||||
// It is not safe to query `env` until the Output is closed.
|
||||
func (o *Output) AddEnv() (dst *Env, err error) {
|
||||
src, err := o.AddPipe()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dst = &Env{}
|
||||
o.tasks.Add(1)
|
||||
go func() {
|
||||
defer o.tasks.Done()
|
||||
decoder := NewDecoder(src)
|
||||
for {
|
||||
env, err := decoder.Decode()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
*dst = *env
|
||||
}
|
||||
}()
|
||||
return dst, nil
|
||||
}
|
||||
294
engine/streams_test.go
Normal file
294
engine/streams_test.go
Normal file
@@ -0,0 +1,294 @@
|
||||
package engine
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestOutputAddString(t *testing.T) {
|
||||
var testInputs = [][2]string{
|
||||
{
|
||||
"hello, world!",
|
||||
"hello, world!",
|
||||
},
|
||||
|
||||
{
|
||||
"One\nTwo\nThree",
|
||||
"Three",
|
||||
},
|
||||
|
||||
{
|
||||
"",
|
||||
"",
|
||||
},
|
||||
|
||||
{
|
||||
"A line\nThen another nl-terminated line\n",
|
||||
"Then another nl-terminated line",
|
||||
},
|
||||
|
||||
{
|
||||
"A line followed by an empty line\n\n",
|
||||
"",
|
||||
},
|
||||
}
|
||||
for _, testData := range testInputs {
|
||||
input := testData[0]
|
||||
expectedOutput := testData[1]
|
||||
o := NewOutput()
|
||||
var output string
|
||||
if err := o.AddString(&output); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if n, err := o.Write([]byte(input)); err != nil {
|
||||
t.Error(err)
|
||||
} else if n != len(input) {
|
||||
t.Errorf("Expected %d, got %d", len(input), n)
|
||||
}
|
||||
o.Close()
|
||||
if output != expectedOutput {
|
||||
t.Errorf("Last line is not stored as return string.\nInput: '%s'\nExpected: '%s'\nGot: '%s'", input, expectedOutput, output)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type sentinelWriteCloser struct {
|
||||
calledWrite bool
|
||||
calledClose bool
|
||||
}
|
||||
|
||||
func (w *sentinelWriteCloser) Write(p []byte) (int, error) {
|
||||
w.calledWrite = true
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func (w *sentinelWriteCloser) Close() error {
|
||||
w.calledClose = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestOutputAddEnv(t *testing.T) {
|
||||
input := "{\"foo\": \"bar\", \"answer_to_life_the_universe_and_everything\": 42}"
|
||||
o := NewOutput()
|
||||
result, err := o.AddEnv()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
o.Write([]byte(input))
|
||||
o.Close()
|
||||
if v := result.Get("foo"); v != "bar" {
|
||||
t.Errorf("Expected %v, got %v", "bar", v)
|
||||
}
|
||||
if v := result.GetInt("answer_to_life_the_universe_and_everything"); v != 42 {
|
||||
t.Errorf("Expected %v, got %v", 42, v)
|
||||
}
|
||||
if v := result.Get("this-value-doesnt-exist"); v != "" {
|
||||
t.Errorf("Expected %v, got %v", "", v)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOutputAddClose(t *testing.T) {
|
||||
o := NewOutput()
|
||||
var s sentinelWriteCloser
|
||||
if err := o.Add(&s); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := o.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Write data after the output is closed.
|
||||
// Write should succeed, but no destination should receive it.
|
||||
if _, err := o.Write([]byte("foo bar")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !s.calledClose {
|
||||
t.Fatal("Output.Close() didn't close the destination")
|
||||
}
|
||||
}
|
||||
|
||||
func TestOutputAddPipe(t *testing.T) {
|
||||
var testInputs = []string{
|
||||
"hello, world!",
|
||||
"One\nTwo\nThree",
|
||||
"",
|
||||
"A line\nThen another nl-terminated line\n",
|
||||
"A line followed by an empty line\n\n",
|
||||
}
|
||||
for _, input := range testInputs {
|
||||
expectedOutput := input
|
||||
o := NewOutput()
|
||||
r, err := o.AddPipe()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
go func(o *Output) {
|
||||
if n, err := o.Write([]byte(input)); err != nil {
|
||||
t.Error(err)
|
||||
} else if n != len(input) {
|
||||
t.Errorf("Expected %d, got %d", len(input), n)
|
||||
}
|
||||
if err := o.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}(o)
|
||||
output, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if string(output) != expectedOutput {
|
||||
t.Errorf("Last line is not stored as return string.\nExpected: '%s'\nGot: '%s'", expectedOutput, output)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTail(t *testing.T) {
|
||||
var tests = make(map[string][][]string)
|
||||
tests["hello, world!"] = [][]string{
|
||||
{},
|
||||
{"hello, world!"},
|
||||
{"hello, world!"},
|
||||
{"hello, world!"},
|
||||
}
|
||||
tests["One\nTwo\nThree"] = [][]string{
|
||||
{},
|
||||
{"Three"},
|
||||
{"Two", "Three"},
|
||||
{"One", "Two", "Three"},
|
||||
}
|
||||
for input, outputs := range tests {
|
||||
for n, expectedOutput := range outputs {
|
||||
var output []string
|
||||
Tail(strings.NewReader(input), n, &output)
|
||||
if fmt.Sprintf("%v", output) != fmt.Sprintf("%v", expectedOutput) {
|
||||
t.Errorf("Tail n=%d returned wrong result.\nExpected: '%s'\nGot : '%s'", expectedOutput, output)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestOutputAddTail(t *testing.T) {
|
||||
var tests = make(map[string][][]string)
|
||||
tests["hello, world!"] = [][]string{
|
||||
{},
|
||||
{"hello, world!"},
|
||||
{"hello, world!"},
|
||||
{"hello, world!"},
|
||||
}
|
||||
tests["One\nTwo\nThree"] = [][]string{
|
||||
{},
|
||||
{"Three"},
|
||||
{"Two", "Three"},
|
||||
{"One", "Two", "Three"},
|
||||
}
|
||||
for input, outputs := range tests {
|
||||
for n, expectedOutput := range outputs {
|
||||
o := NewOutput()
|
||||
var output []string
|
||||
if err := o.AddTail(&output, n); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if n, err := o.Write([]byte(input)); err != nil {
|
||||
t.Error(err)
|
||||
} else if n != len(input) {
|
||||
t.Errorf("Expected %d, got %d", len(input), n)
|
||||
}
|
||||
o.Close()
|
||||
if fmt.Sprintf("%v", output) != fmt.Sprintf("%v", expectedOutput) {
|
||||
t.Errorf("Tail(%d) returned wrong result.\nExpected: %v\nGot: %v", n, expectedOutput, output)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func lastLine(txt string) string {
|
||||
scanner := bufio.NewScanner(strings.NewReader(txt))
|
||||
var lastLine string
|
||||
for scanner.Scan() {
|
||||
lastLine = scanner.Text()
|
||||
}
|
||||
return lastLine
|
||||
}
|
||||
|
||||
func TestOutputAdd(t *testing.T) {
|
||||
o := NewOutput()
|
||||
b := &bytes.Buffer{}
|
||||
o.Add(b)
|
||||
input := "hello, world!"
|
||||
if n, err := o.Write([]byte(input)); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if n != len(input) {
|
||||
t.Fatalf("Expected %d, got %d", len(input), n)
|
||||
}
|
||||
if output := b.String(); output != input {
|
||||
t.Fatal("Received wrong data from Add.\nExpected: '%s'\nGot: '%s'", input, output)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOutputWriteError(t *testing.T) {
|
||||
o := NewOutput()
|
||||
buf := &bytes.Buffer{}
|
||||
o.Add(buf)
|
||||
r, w := io.Pipe()
|
||||
input := "Hello there"
|
||||
expectedErr := fmt.Errorf("This is an error")
|
||||
r.CloseWithError(expectedErr)
|
||||
o.Add(w)
|
||||
n, err := o.Write([]byte(input))
|
||||
if err != expectedErr {
|
||||
t.Fatalf("Output.Write() should return the first error encountered, if any")
|
||||
}
|
||||
if buf.String() != input {
|
||||
t.Fatalf("Output.Write() should attempt write on all destinations, even after encountering an error")
|
||||
}
|
||||
if n != len(input) {
|
||||
t.Fatalf("Output.Write() should return the size of the input if it successfully writes to at least one destination")
|
||||
}
|
||||
}
|
||||
|
||||
func TestInputAddEmpty(t *testing.T) {
|
||||
i := NewInput()
|
||||
var b bytes.Buffer
|
||||
if err := i.Add(&b); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
data, err := ioutil.ReadAll(i)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(data) > 0 {
|
||||
t.Fatalf("Read from empty input shoul yield no data")
|
||||
}
|
||||
}
|
||||
|
||||
func TestInputAddTwo(t *testing.T) {
|
||||
i := NewInput()
|
||||
var b1 bytes.Buffer
|
||||
// First add should succeed
|
||||
if err := i.Add(&b1); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
var b2 bytes.Buffer
|
||||
// Second add should fail
|
||||
if err := i.Add(&b2); err == nil {
|
||||
t.Fatalf("Adding a second source should return an error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestInputAddNotEmpty(t *testing.T) {
|
||||
i := NewInput()
|
||||
b := bytes.NewBufferString("hello world\nabc")
|
||||
expectedResult := b.String()
|
||||
i.Add(b)
|
||||
result, err := ioutil.ReadAll(i)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if string(result) != expectedResult {
|
||||
t.Fatalf("Expected: %v\nReceived: %v", expectedResult, result)
|
||||
}
|
||||
}
|
||||
@@ -1,42 +0,0 @@
|
||||
package engine
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"io/ioutil"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var globalTestID string
|
||||
|
||||
func init() {
|
||||
Register("dummy", func(job *Job) string { return "" })
|
||||
}
|
||||
|
||||
func newTestEngine(t *testing.T) *Engine {
|
||||
// Use the caller function name as a prefix.
|
||||
// This helps trace temp directories back to their test.
|
||||
pc, _, _, _ := runtime.Caller(1)
|
||||
callerLongName := runtime.FuncForPC(pc).Name()
|
||||
parts := strings.Split(callerLongName, ".")
|
||||
callerShortName := parts[len(parts)-1]
|
||||
if globalTestID == "" {
|
||||
globalTestID = utils.RandomString()[:4]
|
||||
}
|
||||
prefix := fmt.Sprintf("docker-test%s-%s-", globalTestID, callerShortName)
|
||||
root, err := ioutil.TempDir("", prefix)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
eng, err := New(root)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return eng
|
||||
}
|
||||
|
||||
func mkJob(t *testing.T, name string, args ...string) *Job {
|
||||
return newTestEngine(t).Job(name, args...)
|
||||
}
|
||||
60
graph.go
60
graph.go
@@ -52,7 +52,9 @@ func (graph *Graph) restore() error {
|
||||
}
|
||||
for _, v := range dir {
|
||||
id := v.Name()
|
||||
graph.idIndex.Add(id)
|
||||
if graph.driver.Exists(id) {
|
||||
graph.idIndex.Add(id)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -92,11 +94,25 @@ func (graph *Graph) Get(name string) (*Image, error) {
|
||||
return nil, fmt.Errorf("Image stored at '%s' has wrong id '%s'", id, img.ID)
|
||||
}
|
||||
img.graph = graph
|
||||
if img.Size == 0 {
|
||||
size, err := utils.TreeSize(rootfs)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error computing size of rootfs %s: %s", img.ID, err)
|
||||
|
||||
if img.Size < 0 {
|
||||
var size int64
|
||||
if img.Parent == "" {
|
||||
if size, err = utils.TreeSize(rootfs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
parentFs, err := graph.driver.Get(img.Parent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
changes, err := archive.ChangesDirs(rootfs, parentFs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
size = archive.ChangesSize(rootfs, changes)
|
||||
}
|
||||
|
||||
img.Size = size
|
||||
if err := img.SaveSize(graph.imageRoot(id)); err != nil {
|
||||
return nil, err
|
||||
@@ -110,7 +126,7 @@ func (graph *Graph) Create(layerData archive.Archive, container *Container, comm
|
||||
img := &Image{
|
||||
ID: GenerateID(),
|
||||
Comment: comment,
|
||||
Created: time.Now(),
|
||||
Created: time.Now().UTC(),
|
||||
DockerVersion: VERSION,
|
||||
Author: author,
|
||||
Config: config,
|
||||
@@ -129,7 +145,15 @@ func (graph *Graph) Create(layerData archive.Archive, container *Container, comm
|
||||
|
||||
// Register imports a pre-existing image into the graph.
|
||||
// FIXME: pass img as first argument
|
||||
func (graph *Graph) Register(jsonData []byte, layerData archive.Archive, img *Image) error {
|
||||
func (graph *Graph) Register(jsonData []byte, layerData archive.Archive, img *Image) (err error) {
|
||||
defer func() {
|
||||
// If any error occurs, remove the new dir from the driver.
|
||||
// Don't check for errors since the dir might not have been created.
|
||||
// FIXME: this leaves a possible race condition.
|
||||
if err != nil {
|
||||
graph.driver.Remove(img.ID)
|
||||
}
|
||||
}()
|
||||
if err := ValidateID(img.ID); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -137,6 +161,20 @@ func (graph *Graph) Register(jsonData []byte, layerData archive.Archive, img *Im
|
||||
if graph.Exists(img.ID) {
|
||||
return fmt.Errorf("Image %s already exists", img.ID)
|
||||
}
|
||||
|
||||
// Ensure that the image root does not exist on the filesystem
|
||||
// when it is not registered in the graph.
|
||||
// This is common when you switch from one graph driver to another
|
||||
if err := os.RemoveAll(graph.imageRoot(img.ID)); err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
// If the driver has this ID but the graph doesn't, remove it from the driver to start fresh.
|
||||
// (the graph is the source of truth).
|
||||
// Ignore errors, since we don't know if the driver correctly returns ErrNotExist.
|
||||
// (FIXME: make that mandatory for drivers).
|
||||
graph.driver.Remove(img.ID)
|
||||
|
||||
tmp, err := graph.Mktemp("")
|
||||
defer os.RemoveAll(tmp)
|
||||
if err != nil {
|
||||
@@ -177,11 +215,11 @@ func (graph *Graph) TempLayerArchive(id string, compression archive.Compression,
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
a, err := image.TarLayer(compression)
|
||||
a, err := image.TarLayer()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return archive.NewTempArchive(utils.ProgressReader(ioutil.NopCloser(a), 0, output, sf.FormatProgress("", "Buffering to disk", "%v/%v (%v)"), sf, true), tmp)
|
||||
return archive.NewTempArchive(utils.ProgressReader(ioutil.NopCloser(a), 0, output, sf, false, utils.TruncateID(id), "Buffering to disk"), tmp)
|
||||
}
|
||||
|
||||
// Mktemp creates a temporary sub-directory inside the graph's filesystem.
|
||||
@@ -353,3 +391,7 @@ func (graph *Graph) Heads() (map[string]*Image, error) {
|
||||
func (graph *Graph) imageRoot(id string) string {
|
||||
return path.Join(graph.Root, id)
|
||||
}
|
||||
|
||||
func (graph *Graph) Driver() graphdriver.Driver {
|
||||
return graph.driver
|
||||
}
|
||||
|
||||
297
graph_test.go
297
graph_test.go
@@ -1,297 +0,0 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"errors"
|
||||
"github.com/dotcloud/docker/archive"
|
||||
"github.com/dotcloud/docker/graphdriver"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestInit(t *testing.T) {
|
||||
graph := tempGraph(t)
|
||||
defer os.RemoveAll(graph.Root)
|
||||
// Root should exist
|
||||
if _, err := os.Stat(graph.Root); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Map() should be empty
|
||||
if l, err := graph.Map(); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if len(l) != 0 {
|
||||
t.Fatalf("len(Map()) should return %d, not %d", 0, len(l))
|
||||
}
|
||||
}
|
||||
|
||||
// Test that Register can be interrupted cleanly without side effects
|
||||
func TestInterruptedRegister(t *testing.T) {
|
||||
graph := tempGraph(t)
|
||||
defer os.RemoveAll(graph.Root)
|
||||
badArchive, w := io.Pipe() // Use a pipe reader as a fake archive which never yields data
|
||||
image := &Image{
|
||||
ID: GenerateID(),
|
||||
Comment: "testing",
|
||||
Created: time.Now(),
|
||||
}
|
||||
go graph.Register(nil, badArchive, image)
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
w.CloseWithError(errors.New("But I'm not a tarball!")) // (Nobody's perfect, darling)
|
||||
if _, err := graph.Get(image.ID); err == nil {
|
||||
t.Fatal("Image should not exist after Register is interrupted")
|
||||
}
|
||||
// Registering the same image again should succeed if the first register was interrupted
|
||||
goodArchive, err := fakeTar()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := graph.Register(nil, goodArchive, image); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// FIXME: Do more extensive tests (ex: create multiple, delete, recreate;
|
||||
// create multiple, check the amount of images and paths, etc..)
|
||||
func TestGraphCreate(t *testing.T) {
|
||||
graph := tempGraph(t)
|
||||
defer os.RemoveAll(graph.Root)
|
||||
archive, err := fakeTar()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
image, err := graph.Create(archive, nil, "Testing", "", nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := ValidateID(image.ID); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if image.Comment != "Testing" {
|
||||
t.Fatalf("Wrong comment: should be '%s', not '%s'", "Testing", image.Comment)
|
||||
}
|
||||
if image.DockerVersion != VERSION {
|
||||
t.Fatalf("Wrong docker_version: should be '%s', not '%s'", VERSION, image.DockerVersion)
|
||||
}
|
||||
images, err := graph.Map()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else if l := len(images); l != 1 {
|
||||
t.Fatalf("Wrong number of images. Should be %d, not %d", 1, l)
|
||||
}
|
||||
if images[image.ID] == nil {
|
||||
t.Fatalf("Could not find image with id %s", image.ID)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRegister(t *testing.T) {
|
||||
graph := tempGraph(t)
|
||||
defer os.RemoveAll(graph.Root)
|
||||
archive, err := fakeTar()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
image := &Image{
|
||||
ID: GenerateID(),
|
||||
Comment: "testing",
|
||||
Created: time.Now(),
|
||||
}
|
||||
err = graph.Register(nil, archive, image)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if images, err := graph.Map(); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if l := len(images); l != 1 {
|
||||
t.Fatalf("Wrong number of images. Should be %d, not %d", 1, l)
|
||||
}
|
||||
if resultImg, err := graph.Get(image.ID); err != nil {
|
||||
t.Fatal(err)
|
||||
} else {
|
||||
if resultImg.ID != image.ID {
|
||||
t.Fatalf("Wrong image ID. Should be '%s', not '%s'", image.ID, resultImg.ID)
|
||||
}
|
||||
if resultImg.Comment != image.Comment {
|
||||
t.Fatalf("Wrong image comment. Should be '%s', not '%s'", image.Comment, resultImg.Comment)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Test that an image can be deleted by its shorthand prefix
|
||||
func TestDeletePrefix(t *testing.T) {
|
||||
graph := tempGraph(t)
|
||||
defer os.RemoveAll(graph.Root)
|
||||
img := createTestImage(graph, t)
|
||||
if err := graph.Delete(utils.TruncateID(img.ID)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assertNImages(graph, t, 0)
|
||||
}
|
||||
|
||||
func createTestImage(graph *Graph, t *testing.T) *Image {
|
||||
archive, err := fakeTar()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
img, err := graph.Create(archive, nil, "Test image", "", nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return img
|
||||
}
|
||||
|
||||
func TestDelete(t *testing.T) {
|
||||
graph := tempGraph(t)
|
||||
defer os.RemoveAll(graph.Root)
|
||||
archive, err := fakeTar()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assertNImages(graph, t, 0)
|
||||
img, err := graph.Create(archive, nil, "Bla bla", "", nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assertNImages(graph, t, 1)
|
||||
if err := graph.Delete(img.ID); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assertNImages(graph, t, 0)
|
||||
|
||||
archive, err = fakeTar()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Test 2 create (same name) / 1 delete
|
||||
img1, err := graph.Create(archive, nil, "Testing", "", nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
archive, err = fakeTar()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err = graph.Create(archive, nil, "Testing", "", nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assertNImages(graph, t, 2)
|
||||
if err := graph.Delete(img1.ID); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assertNImages(graph, t, 1)
|
||||
|
||||
// Test delete wrong name
|
||||
if err := graph.Delete("Not_foo"); err == nil {
|
||||
t.Fatalf("Deleting wrong ID should return an error")
|
||||
}
|
||||
assertNImages(graph, t, 1)
|
||||
|
||||
archive, err = fakeTar()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Test delete twice (pull -> rm -> pull -> rm)
|
||||
if err := graph.Register(nil, archive, img1); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := graph.Delete(img1.ID); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assertNImages(graph, t, 1)
|
||||
}
|
||||
|
||||
func TestByParent(t *testing.T) {
|
||||
archive1, _ := fakeTar()
|
||||
archive2, _ := fakeTar()
|
||||
archive3, _ := fakeTar()
|
||||
|
||||
graph := tempGraph(t)
|
||||
defer os.RemoveAll(graph.Root)
|
||||
parentImage := &Image{
|
||||
ID: GenerateID(),
|
||||
Comment: "parent",
|
||||
Created: time.Now(),
|
||||
Parent: "",
|
||||
}
|
||||
childImage1 := &Image{
|
||||
ID: GenerateID(),
|
||||
Comment: "child1",
|
||||
Created: time.Now(),
|
||||
Parent: parentImage.ID,
|
||||
}
|
||||
childImage2 := &Image{
|
||||
ID: GenerateID(),
|
||||
Comment: "child2",
|
||||
Created: time.Now(),
|
||||
Parent: parentImage.ID,
|
||||
}
|
||||
_ = graph.Register(nil, archive1, parentImage)
|
||||
_ = graph.Register(nil, archive2, childImage1)
|
||||
_ = graph.Register(nil, archive3, childImage2)
|
||||
|
||||
byParent, err := graph.ByParent()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
numChildren := len(byParent[parentImage.ID])
|
||||
if numChildren != 2 {
|
||||
t.Fatalf("Expected 2 children, found %d", numChildren)
|
||||
}
|
||||
}
|
||||
|
||||
func assertNImages(graph *Graph, t *testing.T, n int) {
|
||||
if images, err := graph.Map(); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if actualN := len(images); actualN != n {
|
||||
t.Fatalf("Expected %d images, found %d", n, actualN)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* HELPER FUNCTIONS
|
||||
*/
|
||||
|
||||
func tempGraph(t *testing.T) *Graph {
|
||||
tmp, err := ioutil.TempDir("", "docker-graph-")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
backend, err := graphdriver.New(tmp)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
graph, err := NewGraph(tmp, backend)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return graph
|
||||
}
|
||||
|
||||
func testArchive(t *testing.T) archive.Archive {
|
||||
archive, err := fakeTar()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return archive
|
||||
}
|
||||
|
||||
func fakeTar() (io.Reader, error) {
|
||||
content := []byte("Hello world!\n")
|
||||
buf := new(bytes.Buffer)
|
||||
tw := tar.NewWriter(buf)
|
||||
for _, name := range []string{"/etc/postgres/postgres.conf", "/etc/passwd", "/var/log/postgres/postgres.conf"} {
|
||||
hdr := new(tar.Header)
|
||||
hdr.Size = int64(len(content))
|
||||
hdr.Name = name
|
||||
if err := tw.WriteHeader(hdr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tw.Write([]byte(content))
|
||||
}
|
||||
tw.Close()
|
||||
return buf, nil
|
||||
}
|
||||
@@ -26,11 +26,11 @@ import (
|
||||
"github.com/dotcloud/docker/archive"
|
||||
"github.com/dotcloud/docker/graphdriver"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"strings"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -103,8 +103,12 @@ func (Driver) String() string {
|
||||
return "aufs"
|
||||
}
|
||||
|
||||
func (Driver) Status() [][2]string {
|
||||
return nil
|
||||
func (a Driver) Status() [][2]string {
|
||||
ids, _ := loadIds(path.Join(a.rootPath(), "layers"))
|
||||
return [][2]string{
|
||||
{"Root Dir", a.rootPath()},
|
||||
{"Dirs", fmt.Sprintf("%d", len(ids))},
|
||||
}
|
||||
}
|
||||
|
||||
// Exists returns true if the given id is registered with
|
||||
@@ -309,24 +313,44 @@ func (a *Driver) Cleanup() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Driver) aufsMount(ro []string, rw, target string) error {
|
||||
rwBranch := fmt.Sprintf("%v=rw", rw)
|
||||
roBranches := ""
|
||||
for _, layer := range ro {
|
||||
roBranches += fmt.Sprintf("%v=ro+wh:", layer)
|
||||
}
|
||||
branches := fmt.Sprintf("br:%v:%v,xino=/dev/shm/aufs.xino", rwBranch, roBranches)
|
||||
func (a *Driver) aufsMount(ro []string, rw, target string) (err error) {
|
||||
defer func() {
|
||||
if err != nil {
|
||||
Unmount(target)
|
||||
}
|
||||
}()
|
||||
|
||||
//if error, try to load aufs kernel module
|
||||
if err := mount("none", target, "aufs", 0, branches); err != nil {
|
||||
log.Printf("Kernel does not support AUFS, trying to load the AUFS module with modprobe...")
|
||||
if err := exec.Command("modprobe", "aufs").Run(); err != nil {
|
||||
return fmt.Errorf("Unable to load the AUFS module")
|
||||
if err = a.tryMount(ro, rw, target); err != nil {
|
||||
if err = a.mountRw(rw, target); err != nil {
|
||||
return
|
||||
}
|
||||
log.Printf("...module loaded.")
|
||||
if err := mount("none", target, "aufs", 0, branches); err != nil {
|
||||
return fmt.Errorf("Unable to mount using aufs %s", err)
|
||||
|
||||
for _, layer := range ro {
|
||||
branch := fmt.Sprintf("append:%s=ro+wh", layer)
|
||||
if err = mount("none", target, "aufs", syscall.MS_REMOUNT, branch); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
// Try to mount using the aufs fast path, if this fails then
|
||||
// append ro layers.
|
||||
func (a *Driver) tryMount(ro []string, rw, target string) (err error) {
|
||||
var (
|
||||
rwBranch = fmt.Sprintf("%s=rw", rw)
|
||||
roBranches = fmt.Sprintf("%s=ro+wh:", strings.Join(ro, "=ro+wh:"))
|
||||
)
|
||||
return mount("none", target, "aufs", 0, fmt.Sprintf("br:%v:%v,xino=/dev/shm/aufs.xino", rwBranch, roBranches))
|
||||
}
|
||||
|
||||
func (a *Driver) mountRw(rw, target string) error {
|
||||
return mount("none", target, "aufs", 0, fmt.Sprintf("br:%s,xino=/dev/shm/aufs.xino", rw))
|
||||
}
|
||||
|
||||
func rollbackMount(target string, err error) {
|
||||
if err != nil {
|
||||
Unmount(target)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,11 @@
|
||||
package aufs
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/archive"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
@@ -446,7 +450,9 @@ func TestDiffSize(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
f.Truncate(size)
|
||||
if err := f.Truncate(size); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
s, err := f.Stat()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -465,6 +471,108 @@ func TestDiffSize(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestChildDiffSize(t *testing.T) {
|
||||
d := newDriver(t)
|
||||
defer os.RemoveAll(tmp)
|
||||
defer d.Cleanup()
|
||||
|
||||
if err := d.Create("1", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
diffPath, err := d.Get("1")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Add a file to the diff path with a fixed size
|
||||
size := int64(1024)
|
||||
|
||||
f, err := os.Create(path.Join(diffPath, "test_file"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := f.Truncate(size); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
s, err := f.Stat()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
size = s.Size()
|
||||
if err := f.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
diffSize, err := d.DiffSize("1")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if diffSize != size {
|
||||
t.Fatalf("Expected size to be %d got %d", size, diffSize)
|
||||
}
|
||||
|
||||
if err := d.Create("2", "1"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
diffSize, err = d.DiffSize("2")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// The diff size for the child should be zero
|
||||
if diffSize != 0 {
|
||||
t.Fatalf("Expected size to be %d got %d", 0, diffSize)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExists(t *testing.T) {
|
||||
d := newDriver(t)
|
||||
defer os.RemoveAll(tmp)
|
||||
defer d.Cleanup()
|
||||
|
||||
if err := d.Create("1", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if d.Exists("none") {
|
||||
t.Fatal("id name should not exist in the driver")
|
||||
}
|
||||
|
||||
if !d.Exists("1") {
|
||||
t.Fatal("id 1 should exist in the driver")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStatus(t *testing.T) {
|
||||
d := newDriver(t)
|
||||
defer os.RemoveAll(tmp)
|
||||
defer d.Cleanup()
|
||||
|
||||
if err := d.Create("1", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
status := d.Status()
|
||||
if status == nil || len(status) == 0 {
|
||||
t.Fatal("Status should not be nil or empty")
|
||||
}
|
||||
rootDir := status[0]
|
||||
dirs := status[1]
|
||||
if rootDir[0] != "Root Dir" {
|
||||
t.Fatalf("Expected Root Dir got %s", rootDir[0])
|
||||
}
|
||||
if rootDir[1] != d.rootPath() {
|
||||
t.Fatalf("Expected %s got %s", d.rootPath(), rootDir[1])
|
||||
}
|
||||
if dirs[0] != "Dirs" {
|
||||
t.Fatalf("Expected Dirs got %s", dirs[0])
|
||||
}
|
||||
if dirs[1] != "1" {
|
||||
t.Fatalf("Expected 1 got %s", dirs[1])
|
||||
}
|
||||
}
|
||||
|
||||
func TestApplyDiff(t *testing.T) {
|
||||
d := newDriver(t)
|
||||
defer os.RemoveAll(tmp)
|
||||
@@ -486,7 +594,9 @@ func TestApplyDiff(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
f.Truncate(size)
|
||||
if err := f.Truncate(size); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
f.Close()
|
||||
|
||||
diff, err := d.Diff("1")
|
||||
@@ -515,3 +625,70 @@ func TestApplyDiff(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func hash(c string) string {
|
||||
h := sha256.New()
|
||||
fmt.Fprint(h, c)
|
||||
return hex.EncodeToString(h.Sum(nil))
|
||||
}
|
||||
|
||||
func TestMountMoreThan42Layers(t *testing.T) {
|
||||
d := newDriver(t)
|
||||
defer os.RemoveAll(tmp)
|
||||
defer d.Cleanup()
|
||||
var last string
|
||||
var expected int
|
||||
|
||||
for i := 1; i < 127; i++ {
|
||||
expected++
|
||||
var (
|
||||
parent = fmt.Sprintf("%d", i-1)
|
||||
current = fmt.Sprintf("%d", i)
|
||||
)
|
||||
|
||||
if parent == "0" {
|
||||
parent = ""
|
||||
} else {
|
||||
parent = hash(parent)
|
||||
}
|
||||
current = hash(current)
|
||||
|
||||
if err := d.Create(current, parent); err != nil {
|
||||
t.Logf("Current layer %d", i)
|
||||
t.Fatal(err)
|
||||
}
|
||||
point, err := d.Get(current)
|
||||
if err != nil {
|
||||
t.Logf("Current layer %d", i)
|
||||
t.Fatal(err)
|
||||
}
|
||||
f, err := os.Create(path.Join(point, current))
|
||||
if err != nil {
|
||||
t.Logf("Current layer %d", i)
|
||||
t.Fatal(err)
|
||||
}
|
||||
f.Close()
|
||||
|
||||
if i%10 == 0 {
|
||||
if err := os.Remove(path.Join(point, parent)); err != nil {
|
||||
t.Logf("Current layer %d", i)
|
||||
t.Fatal(err)
|
||||
}
|
||||
expected--
|
||||
}
|
||||
last = current
|
||||
}
|
||||
|
||||
// Perform the actual mount for the top most image
|
||||
point, err := d.Get(last)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
files, err := ioutil.ReadDir(point)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(files) != expected {
|
||||
t.Fatalf("Expected %d got %d", expected, len(files))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -38,6 +38,9 @@ func pathExists(pth string) bool {
|
||||
// symlink.
|
||||
func (a *Driver) Migrate(pth string, setupInit func(p string) error) error {
|
||||
if pathExists(path.Join(pth, "graph")) {
|
||||
if err := a.migrateRepositories(pth); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := a.migrateImages(path.Join(pth, "graph")); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -46,6 +49,14 @@ func (a *Driver) Migrate(pth string, setupInit func(p string) error) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Driver) migrateRepositories(pth string) error {
|
||||
name := path.Join(pth, "repositories")
|
||||
if err := os.Rename(name, name+"-aufs"); err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Driver) migrateContainers(pth string, setupInit func(p string) error) error {
|
||||
fis, err := ioutil.ReadDir(pth)
|
||||
if err != nil {
|
||||
|
||||
@@ -2,6 +2,6 @@ package aufs
|
||||
|
||||
import "syscall"
|
||||
|
||||
func mount(source string, target string, fstype string, flags uintptr, data string) (err error) {
|
||||
func mount(source string, target string, fstype string, flags uintptr, data string) error {
|
||||
return syscall.Mount(source, target, fstype, flags, data)
|
||||
}
|
||||
|
||||
126
graphdriver/devmapper/attach_loopback.go
Normal file
126
graphdriver/devmapper/attach_loopback.go
Normal file
@@ -0,0 +1,126 @@
|
||||
// +build linux
|
||||
|
||||
package devmapper
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
)
|
||||
|
||||
func stringToLoopName(src string) [LoNameSize]uint8 {
|
||||
var dst [LoNameSize]uint8
|
||||
copy(dst[:], src[:])
|
||||
return dst
|
||||
}
|
||||
|
||||
func getNextFreeLoopbackIndex() (int, error) {
|
||||
f, err := osOpenFile("/dev/loop-control", osORdOnly, 0644)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
index, err := ioctlLoopCtlGetFree(f.Fd())
|
||||
if index < 0 {
|
||||
index = 0
|
||||
}
|
||||
return index, err
|
||||
}
|
||||
|
||||
func openNextAvailableLoopback(index int, sparseFile *osFile) (loopFile *osFile, err error) {
|
||||
// Start looking for a free /dev/loop
|
||||
for {
|
||||
target := fmt.Sprintf("/dev/loop%d", index)
|
||||
index++
|
||||
|
||||
fi, err := osStat(target)
|
||||
if err != nil {
|
||||
if osIsNotExist(err) {
|
||||
utils.Errorf("There are no more loopback device available.")
|
||||
}
|
||||
return nil, ErrAttachLoopbackDevice
|
||||
}
|
||||
|
||||
if fi.Mode()&osModeDevice != osModeDevice {
|
||||
utils.Errorf("Loopback device %s is not a block device.", target)
|
||||
continue
|
||||
}
|
||||
|
||||
// OpenFile adds O_CLOEXEC
|
||||
loopFile, err = osOpenFile(target, osORdWr, 0644)
|
||||
if err != nil {
|
||||
utils.Errorf("Error openning loopback device: %s", err)
|
||||
return nil, ErrAttachLoopbackDevice
|
||||
}
|
||||
|
||||
// Try to attach to the loop file
|
||||
if err := ioctlLoopSetFd(loopFile.Fd(), sparseFile.Fd()); err != nil {
|
||||
loopFile.Close()
|
||||
|
||||
// If the error is EBUSY, then try the next loopback
|
||||
if err != sysEBusy {
|
||||
utils.Errorf("Cannot set up loopback device %s: %s", target, err)
|
||||
return nil, ErrAttachLoopbackDevice
|
||||
}
|
||||
|
||||
// Otherwise, we keep going with the loop
|
||||
continue
|
||||
}
|
||||
// In case of success, we finished. Break the loop.
|
||||
break
|
||||
}
|
||||
|
||||
// This can't happen, but let's be sure
|
||||
if loopFile == nil {
|
||||
utils.Errorf("Unreachable code reached! Error attaching %s to a loopback device.", sparseFile.Name())
|
||||
return nil, ErrAttachLoopbackDevice
|
||||
}
|
||||
|
||||
return loopFile, nil
|
||||
}
|
||||
|
||||
// attachLoopDevice attaches the given sparse file to the next
|
||||
// available loopback device. It returns an opened *osFile.
|
||||
func attachLoopDevice(sparseName string) (loop *osFile, err error) {
|
||||
|
||||
// Try to retrieve the next available loopback device via syscall.
|
||||
// If it fails, we discard error and start loopking for a
|
||||
// loopback from index 0.
|
||||
startIndex, err := getNextFreeLoopbackIndex()
|
||||
if err != nil {
|
||||
utils.Debugf("Error retrieving the next available loopback: %s", err)
|
||||
}
|
||||
|
||||
// OpenFile adds O_CLOEXEC
|
||||
sparseFile, err := osOpenFile(sparseName, osORdWr, 0644)
|
||||
if err != nil {
|
||||
utils.Errorf("Error openning sparse file %s: %s", sparseName, err)
|
||||
return nil, ErrAttachLoopbackDevice
|
||||
}
|
||||
defer sparseFile.Close()
|
||||
|
||||
loopFile, err := openNextAvailableLoopback(startIndex, sparseFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Set the status of the loopback device
|
||||
loopInfo := &LoopInfo64{
|
||||
loFileName: stringToLoopName(loopFile.Name()),
|
||||
loOffset: 0,
|
||||
loFlags: LoFlagsAutoClear,
|
||||
}
|
||||
|
||||
if err := ioctlLoopSetStatus64(loopFile.Fd(), loopInfo); err != nil {
|
||||
utils.Errorf("Cannot set up loopback device info: %s", err)
|
||||
|
||||
// If the call failed, then free the loopback device
|
||||
if err := ioctlLoopClrFd(loopFile.Fd()); err != nil {
|
||||
utils.Errorf("Error while cleaning up the loopback device")
|
||||
}
|
||||
loopFile.Close()
|
||||
return nil, ErrAttachLoopbackDevice
|
||||
}
|
||||
|
||||
return loopFile, nil
|
||||
}
|
||||
@@ -1,18 +1,18 @@
|
||||
// +build linux
|
||||
|
||||
package devmapper
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
@@ -105,7 +105,7 @@ func (devices *DeviceSet) hasImage(name string) bool {
|
||||
dirname := devices.loopbackDir()
|
||||
filename := path.Join(dirname, name)
|
||||
|
||||
_, err := os.Stat(filename)
|
||||
_, err := osStat(filename)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
@@ -117,16 +117,16 @@ func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) {
|
||||
dirname := devices.loopbackDir()
|
||||
filename := path.Join(dirname, name)
|
||||
|
||||
if err := os.MkdirAll(dirname, 0700); err != nil && !os.IsExist(err) {
|
||||
if err := osMkdirAll(dirname, 0700); err != nil && !osIsExist(err) {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if _, err := os.Stat(filename); err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
if _, err := osStat(filename); err != nil {
|
||||
if !osIsNotExist(err) {
|
||||
return "", err
|
||||
}
|
||||
utils.Debugf("Creating loopback file %s for device-manage use", filename)
|
||||
file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0600)
|
||||
file, err := osOpenFile(filename, osORdWr|osOCreate, 0600)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -174,8 +174,8 @@ func (devices *DeviceSet) saveMetadata() error {
|
||||
if err := tmpFile.Close(); err != nil {
|
||||
return fmt.Errorf("Error closing metadata file %s: %s", tmpFile.Name(), err)
|
||||
}
|
||||
if err := os.Rename(tmpFile.Name(), devices.jsonFile()); err != nil {
|
||||
return fmt.Errorf("Error committing metadata file", err)
|
||||
if err := osRename(tmpFile.Name(), devices.jsonFile()); err != nil {
|
||||
return fmt.Errorf("Error committing metadata file %s: %s", tmpFile.Name(), err)
|
||||
}
|
||||
|
||||
if devices.NewTransactionId != devices.TransactionId {
|
||||
@@ -225,9 +225,9 @@ func (devices *DeviceSet) activateDeviceIfNeeded(hash string) error {
|
||||
func (devices *DeviceSet) createFilesystem(info *DevInfo) error {
|
||||
devname := info.DevName()
|
||||
|
||||
err := exec.Command("mkfs.ext4", "-E", "discard,lazy_itable_init=0,lazy_journal_init=0", devname).Run()
|
||||
err := execRun("mkfs.ext4", "-E", "discard,lazy_itable_init=0,lazy_journal_init=0", devname)
|
||||
if err != nil {
|
||||
err = exec.Command("mkfs.ext4", "-E", "discard,lazy_itable_init=0", devname).Run()
|
||||
err = execRun("mkfs.ext4", "-E", "discard,lazy_itable_init=0", devname)
|
||||
}
|
||||
if err != nil {
|
||||
utils.Debugf("\n--->Err: %s\n", err)
|
||||
@@ -252,7 +252,7 @@ func (devices *DeviceSet) loadMetaData() error {
|
||||
devices.NewTransactionId = devices.TransactionId
|
||||
|
||||
jsonData, err := ioutil.ReadFile(devices.jsonFile())
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
if err != nil && !osIsNotExist(err) {
|
||||
utils.Debugf("\n--->Err: %s\n", err)
|
||||
return err
|
||||
}
|
||||
@@ -337,14 +337,13 @@ func (devices *DeviceSet) setupBaseImage() error {
|
||||
}
|
||||
|
||||
func setCloseOnExec(name string) {
|
||||
fileInfos, _ := ioutil.ReadDir("/proc/self/fd")
|
||||
if fileInfos != nil {
|
||||
if fileInfos, _ := ioutil.ReadDir("/proc/self/fd"); fileInfos != nil {
|
||||
for _, i := range fileInfos {
|
||||
link, _ := os.Readlink(filepath.Join("/proc/self/fd", i.Name()))
|
||||
link, _ := osReadlink(filepath.Join("/proc/self/fd", i.Name()))
|
||||
if link == name {
|
||||
fd, err := strconv.Atoi(i.Name())
|
||||
if err == nil {
|
||||
syscall.CloseOnExec(fd)
|
||||
sysCloseOnExec(fd)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -372,7 +371,7 @@ func (devices *DeviceSet) ResizePool(size int64) error {
|
||||
datafilename := path.Join(dirname, "data")
|
||||
metadatafilename := path.Join(dirname, "metadata")
|
||||
|
||||
datafile, err := os.OpenFile(datafilename, os.O_RDWR, 0)
|
||||
datafile, err := osOpenFile(datafilename, osORdWr, 0)
|
||||
if datafile == nil {
|
||||
return err
|
||||
}
|
||||
@@ -393,7 +392,7 @@ func (devices *DeviceSet) ResizePool(size int64) error {
|
||||
}
|
||||
defer dataloopback.Close()
|
||||
|
||||
metadatafile, err := os.OpenFile(metadatafilename, os.O_RDWR, 0)
|
||||
metadatafile, err := osOpenFile(metadatafilename, osORdWr, 0)
|
||||
if metadatafile == nil {
|
||||
return err
|
||||
}
|
||||
@@ -443,11 +442,11 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error {
|
||||
hasMetadata := devices.hasImage("metadata")
|
||||
|
||||
if !doInit && !hasData {
|
||||
return fmt.Errorf("Looback data file not found %s")
|
||||
return errors.New("Loopback data file not found")
|
||||
}
|
||||
|
||||
if !doInit && !hasMetadata {
|
||||
return fmt.Errorf("Looback metadata file not found %s")
|
||||
return errors.New("Loopback metadata file not found")
|
||||
}
|
||||
|
||||
createdLoopback := !hasData || !hasMetadata
|
||||
@@ -464,11 +463,11 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error {
|
||||
|
||||
// Set the device prefix from the device id and inode of the docker root dir
|
||||
|
||||
st, err := os.Stat(devices.root)
|
||||
st, err := osStat(devices.root)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error looking up dir %s: %s", devices.root, err)
|
||||
}
|
||||
sysSt := st.Sys().(*syscall.Stat_t)
|
||||
sysSt := toSysStatT(st.Sys())
|
||||
// "reg-" stands for "regular file".
|
||||
// In the future we might use "dev-" for "device file", etc.
|
||||
// docker-maj,min[-inode] stands for:
|
||||
@@ -495,14 +494,14 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error {
|
||||
if info.Exists == 0 {
|
||||
utils.Debugf("Pool doesn't exist. Creating it.")
|
||||
|
||||
dataFile, err := AttachLoopDevice(data)
|
||||
dataFile, err := attachLoopDevice(data)
|
||||
if err != nil {
|
||||
utils.Debugf("\n--->Err: %s\n", err)
|
||||
return err
|
||||
}
|
||||
defer dataFile.Close()
|
||||
|
||||
metadataFile, err := AttachLoopDevice(metadata)
|
||||
metadataFile, err := attachLoopDevice(metadata)
|
||||
if err != nil {
|
||||
utils.Debugf("\n--->Err: %s\n", err)
|
||||
return err
|
||||
@@ -641,7 +640,7 @@ func (devices *DeviceSet) deactivateDevice(hash string) error {
|
||||
// or b) the 1 second timeout expires.
|
||||
func (devices *DeviceSet) waitRemove(hash string) error {
|
||||
utils.Debugf("[deviceset %s] waitRemove(%s)", devices.devicePrefix, hash)
|
||||
defer utils.Debugf("[deviceset %s] waitRemove END", devices.devicePrefix, hash)
|
||||
defer utils.Debugf("[deviceset %s] waitRemove(%) END", devices.devicePrefix, hash)
|
||||
devname, err := devices.byHash(hash)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -654,10 +653,13 @@ func (devices *DeviceSet) waitRemove(hash string) error {
|
||||
// The error might actually be something else, but we can't differentiate.
|
||||
return nil
|
||||
}
|
||||
utils.Debugf("Waiting for removal of %s: exists=%d", devname, devinfo.Exists)
|
||||
if i%100 == 0 {
|
||||
utils.Debugf("Waiting for removal of %s: exists=%d", devname, devinfo.Exists)
|
||||
}
|
||||
if devinfo.Exists == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
time.Sleep(1 * time.Millisecond)
|
||||
}
|
||||
if i == 1000 {
|
||||
@@ -680,7 +682,9 @@ func (devices *DeviceSet) waitClose(hash string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
utils.Debugf("Waiting for unmount of %s: opencount=%d", devname, devinfo.OpenCount)
|
||||
if i%100 == 0 {
|
||||
utils.Debugf("Waiting for unmount of %s: opencount=%d", devname, devinfo.OpenCount)
|
||||
}
|
||||
if devinfo.OpenCount == 0 {
|
||||
break
|
||||
}
|
||||
@@ -708,15 +712,16 @@ func (devices *DeviceSet) byHash(hash string) (devname string, err error) {
|
||||
}
|
||||
|
||||
func (devices *DeviceSet) Shutdown() error {
|
||||
utils.Debugf("[deviceset %s] shutdown()", devices.devicePrefix)
|
||||
defer utils.Debugf("[deviceset %s] shutdown END", devices.devicePrefix)
|
||||
devices.Lock()
|
||||
utils.Debugf("[devmapper] Shutting down DeviceSet: %s", devices.root)
|
||||
defer devices.Unlock()
|
||||
|
||||
utils.Debugf("[deviceset %s] shutdown()", devices.devicePrefix)
|
||||
utils.Debugf("[devmapper] Shutting down DeviceSet: %s", devices.root)
|
||||
defer utils.Debugf("[deviceset %s] shutdown END", devices.devicePrefix)
|
||||
|
||||
for path, count := range devices.activeMounts {
|
||||
for i := count; i > 0; i-- {
|
||||
if err := syscall.Unmount(path, 0); err != nil {
|
||||
if err := sysUnmount(path, 0); err != nil {
|
||||
utils.Debugf("Shutdown unmounting %s, error: %s\n", path, err)
|
||||
}
|
||||
}
|
||||
@@ -752,15 +757,15 @@ func (devices *DeviceSet) MountDevice(hash, path string, readOnly bool) error {
|
||||
|
||||
info := devices.Devices[hash]
|
||||
|
||||
var flags uintptr = syscall.MS_MGC_VAL
|
||||
var flags uintptr = sysMsMgcVal
|
||||
|
||||
if readOnly {
|
||||
flags = flags | syscall.MS_RDONLY
|
||||
flags = flags | sysMsRdOnly
|
||||
}
|
||||
|
||||
err := syscall.Mount(info.DevName(), path, "ext4", flags, "discard")
|
||||
if err != nil && err == syscall.EINVAL {
|
||||
err = syscall.Mount(info.DevName(), path, "ext4", flags, "")
|
||||
err := sysMount(info.DevName(), path, "ext4", flags, "discard")
|
||||
if err != nil && err == sysEInval {
|
||||
err = sysMount(info.DevName(), path, "ext4", flags, "")
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error mounting '%s' on '%s': %s", info.DevName(), path, err)
|
||||
@@ -779,7 +784,7 @@ func (devices *DeviceSet) UnmountDevice(hash, path string, deactivate bool) erro
|
||||
defer devices.Unlock()
|
||||
|
||||
utils.Debugf("[devmapper] Unmount(%s)", path)
|
||||
if err := syscall.Unmount(path, 0); err != nil {
|
||||
if err := sysUnmount(path, 0); err != nil {
|
||||
utils.Debugf("\n--->Err: %s\n", err)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
// +build linux
|
||||
|
||||
package devmapper
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"os"
|
||||
"runtime"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
type DevmapperLogger interface {
|
||||
@@ -49,7 +49,6 @@ var (
|
||||
ErrTaskAddTarget = errors.New("dm_task_add_target failed")
|
||||
ErrTaskSetSector = errors.New("dm_task_set_sector failed")
|
||||
ErrTaskGetInfo = errors.New("dm_task_get_info failed")
|
||||
ErrTaskGetDriverVersion = errors.New("dm_task_get_driver_version failed")
|
||||
ErrTaskSetCookie = errors.New("dm_task_set_cookie failed")
|
||||
ErrNilCookie = errors.New("cookie ptr can't be nil")
|
||||
ErrAttachLoopbackDevice = errors.New("loopback mounting failed")
|
||||
@@ -86,7 +85,7 @@ type (
|
||||
|
||||
func (t *Task) destroy() {
|
||||
if t != nil {
|
||||
DmTaskDestory(t.unmanaged)
|
||||
DmTaskDestroy(t.unmanaged)
|
||||
runtime.SetFinalizer(t, nil)
|
||||
}
|
||||
}
|
||||
@@ -180,45 +179,37 @@ func (t *Task) GetNextTarget(next uintptr) (nextPtr uintptr, start uint64,
|
||||
start, length, targetType, params
|
||||
}
|
||||
|
||||
func AttachLoopDevice(filename string) (*os.File, error) {
|
||||
var fd int
|
||||
res := DmAttachLoopDevice(filename, &fd)
|
||||
if res == "" {
|
||||
return nil, ErrAttachLoopbackDevice
|
||||
}
|
||||
return os.NewFile(uintptr(fd), res), nil
|
||||
}
|
||||
|
||||
func getLoopbackBackingFile(file *os.File) (uint64, uint64, error) {
|
||||
dev, inode, err := dmGetLoopbackBackingFile(file.Fd())
|
||||
if err != 0 {
|
||||
func getLoopbackBackingFile(file *osFile) (uint64, uint64, error) {
|
||||
loopInfo, err := ioctlLoopGetStatus64(file.Fd())
|
||||
if err != nil {
|
||||
utils.Errorf("Error get loopback backing file: %s\n", err)
|
||||
return 0, 0, ErrGetLoopbackBackingFile
|
||||
}
|
||||
return dev, inode, nil
|
||||
return loopInfo.loDevice, loopInfo.loInode, nil
|
||||
}
|
||||
|
||||
func LoopbackSetCapacity(file *os.File) error {
|
||||
err := dmLoopbackSetCapacity(file.Fd())
|
||||
if err != 0 {
|
||||
func LoopbackSetCapacity(file *osFile) error {
|
||||
if err := ioctlLoopSetCapacity(file.Fd(), 0); err != nil {
|
||||
utils.Errorf("Error loopbackSetCapacity: %s", err)
|
||||
return ErrLoopbackSetCapacity
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func FindLoopDeviceFor(file *os.File) *os.File {
|
||||
func FindLoopDeviceFor(file *osFile) *osFile {
|
||||
stat, err := file.Stat()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
targetInode := stat.Sys().(*syscall.Stat_t).Ino
|
||||
targetDevice := stat.Sys().(*syscall.Stat_t).Dev
|
||||
targetInode := stat.Sys().(*sysStatT).Ino
|
||||
targetDevice := stat.Sys().(*sysStatT).Dev
|
||||
|
||||
for i := 0; true; i++ {
|
||||
path := fmt.Sprintf("/dev/loop%d", i)
|
||||
|
||||
file, err := os.OpenFile(path, os.O_RDWR, 0)
|
||||
file, err := osOpenFile(path, osORdWr, 0)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
if osIsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -231,7 +222,6 @@ func FindLoopDeviceFor(file *os.File) *os.File {
|
||||
if err == nil && dev == targetDevice && inode == targetInode {
|
||||
return file
|
||||
}
|
||||
|
||||
file.Close()
|
||||
}
|
||||
|
||||
@@ -289,16 +279,17 @@ func RemoveDevice(name string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func GetBlockDeviceSize(file *os.File) (uint64, error) {
|
||||
size, errno := DmGetBlockSize(file.Fd())
|
||||
if size == -1 || errno != 0 {
|
||||
func GetBlockDeviceSize(file *osFile) (uint64, error) {
|
||||
size, err := ioctlBlkGetSize64(file.Fd())
|
||||
if err != nil {
|
||||
utils.Errorf("Error getblockdevicesize: %s", err)
|
||||
return 0, ErrGetBlockSize
|
||||
}
|
||||
return uint64(size), nil
|
||||
}
|
||||
|
||||
// This is the programmatic example of "dmsetup create"
|
||||
func createPool(poolName string, dataFile *os.File, metadataFile *os.File) error {
|
||||
func createPool(poolName string, dataFile, metadataFile *osFile) error {
|
||||
task, err := createTask(DeviceCreate, poolName)
|
||||
if task == nil {
|
||||
return err
|
||||
@@ -328,7 +319,7 @@ func createPool(poolName string, dataFile *os.File, metadataFile *os.File) error
|
||||
return nil
|
||||
}
|
||||
|
||||
func reloadPool(poolName string, dataFile *os.File, metadataFile *os.File) error {
|
||||
func reloadPool(poolName string, dataFile, metadataFile *osFile) error {
|
||||
task, err := createTask(DeviceReload, poolName)
|
||||
if task == nil {
|
||||
return err
|
||||
@@ -394,8 +385,8 @@ func getStatus(name string) (uint64, uint64, string, string, error) {
|
||||
return 0, 0, "", "", fmt.Errorf("Non existing device %s", name)
|
||||
}
|
||||
|
||||
_, start, length, target_type, params := task.GetNextTarget(0)
|
||||
return start, length, target_type, params, nil
|
||||
_, start, length, targetType, params := task.GetNextTarget(0)
|
||||
return start, length, targetType, params, nil
|
||||
}
|
||||
|
||||
func setTransactionId(poolName string, oldId uint64, newId uint64) error {
|
||||
@@ -424,7 +415,7 @@ func suspendDevice(name string) error {
|
||||
return err
|
||||
}
|
||||
if err := task.Run(); err != nil {
|
||||
return fmt.Errorf("Error running DeviceSuspend")
|
||||
return fmt.Errorf("Error running DeviceSuspend: %s", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -441,7 +432,7 @@ func resumeDevice(name string) error {
|
||||
}
|
||||
|
||||
if err := task.Run(); err != nil {
|
||||
return fmt.Errorf("Error running DeviceSuspend")
|
||||
return fmt.Errorf("Error running DeviceResume")
|
||||
}
|
||||
|
||||
UdevWait(cookie)
|
||||
|
||||
106
graphdriver/devmapper/devmapper_doc.go
Normal file
106
graphdriver/devmapper/devmapper_doc.go
Normal file
@@ -0,0 +1,106 @@
|
||||
package devmapper
|
||||
|
||||
// Definition of struct dm_task and sub structures (from lvm2)
|
||||
//
|
||||
// struct dm_ioctl {
|
||||
// /*
|
||||
// * The version number is made up of three parts:
|
||||
// * major - no backward or forward compatibility,
|
||||
// * minor - only backwards compatible,
|
||||
// * patch - both backwards and forwards compatible.
|
||||
// *
|
||||
// * All clients of the ioctl interface should fill in the
|
||||
// * version number of the interface that they were
|
||||
// * compiled with.
|
||||
// *
|
||||
// * All recognised ioctl commands (ie. those that don't
|
||||
// * return -ENOTTY) fill out this field, even if the
|
||||
// * command failed.
|
||||
// */
|
||||
// uint32_t version[3]; /* in/out */
|
||||
// uint32_t data_size; /* total size of data passed in
|
||||
// * including this struct */
|
||||
|
||||
// uint32_t data_start; /* offset to start of data
|
||||
// * relative to start of this struct */
|
||||
|
||||
// uint32_t target_count; /* in/out */
|
||||
// int32_t open_count; /* out */
|
||||
// uint32_t flags; /* in/out */
|
||||
|
||||
// /*
|
||||
// * event_nr holds either the event number (input and output) or the
|
||||
// * udev cookie value (input only).
|
||||
// * The DM_DEV_WAIT ioctl takes an event number as input.
|
||||
// * The DM_SUSPEND, DM_DEV_REMOVE and DM_DEV_RENAME ioctls
|
||||
// * use the field as a cookie to return in the DM_COOKIE
|
||||
// * variable with the uevents they issue.
|
||||
// * For output, the ioctls return the event number, not the cookie.
|
||||
// */
|
||||
// uint32_t event_nr; /* in/out */
|
||||
// uint32_t padding;
|
||||
|
||||
// uint64_t dev; /* in/out */
|
||||
|
||||
// char name[DM_NAME_LEN]; /* device name */
|
||||
// char uuid[DM_UUID_LEN]; /* unique identifier for
|
||||
// * the block device */
|
||||
// char data[7]; /* padding or data */
|
||||
// };
|
||||
|
||||
// struct target {
|
||||
// uint64_t start;
|
||||
// uint64_t length;
|
||||
// char *type;
|
||||
// char *params;
|
||||
|
||||
// struct target *next;
|
||||
// };
|
||||
|
||||
// typedef enum {
|
||||
// DM_ADD_NODE_ON_RESUME, /* add /dev/mapper node with dmsetup resume */
|
||||
// DM_ADD_NODE_ON_CREATE /* add /dev/mapper node with dmsetup create */
|
||||
// } dm_add_node_t;
|
||||
|
||||
// struct dm_task {
|
||||
// int type;
|
||||
// char *dev_name;
|
||||
// char *mangled_dev_name;
|
||||
|
||||
// struct target *head, *tail;
|
||||
|
||||
// int read_only;
|
||||
// uint32_t event_nr;
|
||||
// int major;
|
||||
// int minor;
|
||||
// int allow_default_major_fallback;
|
||||
// uid_t uid;
|
||||
// gid_t gid;
|
||||
// mode_t mode;
|
||||
// uint32_t read_ahead;
|
||||
// uint32_t read_ahead_flags;
|
||||
// union {
|
||||
// struct dm_ioctl *v4;
|
||||
// } dmi;
|
||||
// char *newname;
|
||||
// char *message;
|
||||
// char *geometry;
|
||||
// uint64_t sector;
|
||||
// int no_flush;
|
||||
// int no_open_count;
|
||||
// int skip_lockfs;
|
||||
// int query_inactive_table;
|
||||
// int suppress_identical_reload;
|
||||
// dm_add_node_t add_node;
|
||||
// uint64_t existing_table_size;
|
||||
// int cookie_set;
|
||||
// int new_uuid;
|
||||
// int secure_data;
|
||||
// int retry_remove;
|
||||
// int enable_checks;
|
||||
// int expected_errno;
|
||||
|
||||
// char *uuid;
|
||||
// char *mangled_uuid;
|
||||
// };
|
||||
//
|
||||
@@ -1,3 +1,5 @@
|
||||
// +build linux
|
||||
|
||||
package devmapper
|
||||
|
||||
import "C"
|
||||
|
||||
@@ -1,11 +1,13 @@
|
||||
// +build linux
|
||||
|
||||
package devmapper
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestTaskCreate(t *testing.T) {
|
||||
t.Skip("FIXME: not a unit test")
|
||||
// Test success
|
||||
taskCreate(t, DeviceInfo)
|
||||
|
||||
@@ -18,6 +20,7 @@ func TestTaskCreate(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestTaskRun(t *testing.T) {
|
||||
t.Skip("FIXME: not a unit test")
|
||||
task := taskCreate(t, DeviceInfo)
|
||||
|
||||
// Test success
|
||||
@@ -46,6 +49,7 @@ func TestTaskRun(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestTaskSetName(t *testing.T) {
|
||||
t.Skip("FIXME: not a unit test")
|
||||
task := taskCreate(t, DeviceInfo)
|
||||
|
||||
// Test success
|
||||
@@ -63,6 +67,7 @@ func TestTaskSetName(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestTaskSetMessage(t *testing.T) {
|
||||
t.Skip("FIXME: not a unit test")
|
||||
task := taskCreate(t, DeviceInfo)
|
||||
|
||||
// Test success
|
||||
@@ -80,6 +85,7 @@ func TestTaskSetMessage(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestTaskSetSector(t *testing.T) {
|
||||
t.Skip("FIXME: not a unit test")
|
||||
task := taskCreate(t, DeviceInfo)
|
||||
|
||||
// Test success
|
||||
@@ -97,6 +103,7 @@ func TestTaskSetSector(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestTaskSetCookie(t *testing.T) {
|
||||
t.Skip("FIXME: not a unit test")
|
||||
var (
|
||||
cookie uint = 0
|
||||
task = taskCreate(t, DeviceInfo)
|
||||
@@ -121,6 +128,7 @@ func TestTaskSetCookie(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestTaskSetAddNode(t *testing.T) {
|
||||
t.Skip("FIXME: not a unit test")
|
||||
task := taskCreate(t, DeviceInfo)
|
||||
|
||||
// Test success
|
||||
@@ -142,6 +150,7 @@ func TestTaskSetAddNode(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestTaskSetRo(t *testing.T) {
|
||||
t.Skip("FIXME: not a unit test")
|
||||
task := taskCreate(t, DeviceInfo)
|
||||
|
||||
// Test success
|
||||
@@ -159,6 +168,7 @@ func TestTaskSetRo(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestTaskAddTarget(t *testing.T) {
|
||||
t.Skip("FIXME: not a unit test")
|
||||
task := taskCreate(t, DeviceInfo)
|
||||
|
||||
// Test success
|
||||
@@ -247,10 +257,6 @@ func dmTaskAddTargetFail(task *CDmTask,
|
||||
return -1
|
||||
}
|
||||
|
||||
func dmTaskGetDriverVersionFail(task *CDmTask, version *string) int {
|
||||
return -1
|
||||
}
|
||||
|
||||
func dmTaskGetInfoFail(task *CDmTask, info *Info) int {
|
||||
return -1
|
||||
}
|
||||
@@ -264,14 +270,10 @@ func dmAttachLoopDeviceFail(filename string, fd *int) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func sysGetBlockSizeFail(fd uintptr, size *uint64) syscall.Errno {
|
||||
func sysGetBlockSizeFail(fd uintptr, size *uint64) sysErrno {
|
||||
return 1
|
||||
}
|
||||
|
||||
func dmGetBlockSizeFail(fd uintptr) int64 {
|
||||
return -1
|
||||
}
|
||||
|
||||
func dmUdevWaitFail(cookie uint) int {
|
||||
return -1
|
||||
}
|
||||
|
||||
@@ -1,125 +1,25 @@
|
||||
// +build linux
|
||||
|
||||
package devmapper
|
||||
|
||||
/*
|
||||
#cgo LDFLAGS: -L. -ldevmapper
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <unistd.h>
|
||||
#include <libdevmapper.h>
|
||||
#include <linux/loop.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
#include <fcntl.h>
|
||||
#include <sys/ioctl.h>
|
||||
#include <linux/fs.h>
|
||||
#include <errno.h>
|
||||
#include <linux/loop.h> // FIXME: present only for defines, maybe we can remove it?
|
||||
#include <linux/fs.h> // FIXME: present only for BLKGETSIZE64, maybe we can remove it?
|
||||
|
||||
#ifndef LOOP_CTL_GET_FREE
|
||||
#define LOOP_CTL_GET_FREE 0x4C82
|
||||
#define LOOP_CTL_GET_FREE 0x4C82
|
||||
#endif
|
||||
|
||||
// FIXME: this could easily be rewritten in go
|
||||
char* attach_loop_device(const char *filename, int *loop_fd_out)
|
||||
{
|
||||
struct loop_info64 loopinfo = {0};
|
||||
struct stat st;
|
||||
char buf[64];
|
||||
int i, loop_fd, fd, start_index;
|
||||
char* loopname;
|
||||
|
||||
|
||||
*loop_fd_out = -1;
|
||||
|
||||
start_index = 0;
|
||||
fd = open("/dev/loop-control", O_RDONLY);
|
||||
if (fd >= 0) {
|
||||
start_index = ioctl(fd, LOOP_CTL_GET_FREE);
|
||||
close(fd);
|
||||
|
||||
if (start_index < 0)
|
||||
start_index = 0;
|
||||
}
|
||||
|
||||
fd = open(filename, O_RDWR);
|
||||
if (fd < 0) {
|
||||
perror("open");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
loop_fd = -1;
|
||||
for (i = start_index ; loop_fd < 0 ; i++ ) {
|
||||
if (sprintf(buf, "/dev/loop%d", i) < 0) {
|
||||
close(fd);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (stat(buf, &st)) {
|
||||
if (!S_ISBLK(st.st_mode)) {
|
||||
fprintf(stderr, "[error] Loopback device %s is not a block device.\n", buf);
|
||||
} else if (errno == ENOENT) {
|
||||
fprintf(stderr, "[error] There are no more loopback device available.\n");
|
||||
} else {
|
||||
fprintf(stderr, "[error] Unkown error trying to stat the loopback device %s (errno: %d).\n", buf, errno);
|
||||
}
|
||||
close(fd);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
loop_fd = open(buf, O_RDWR);
|
||||
if (loop_fd < 0 && errno == ENOENT) {
|
||||
fprintf(stderr, "[error] The loopback device %s does not exists.\n", buf);
|
||||
close(fd);
|
||||
return NULL;
|
||||
} else if (loop_fd < 0) {
|
||||
fprintf(stderr, "[error] Unkown error openning the loopback device %s. (errno: %d)\n", buf, errno);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (ioctl(loop_fd, LOOP_SET_FD, (void *)(size_t)fd) < 0) {
|
||||
int errsv = errno;
|
||||
close(loop_fd);
|
||||
loop_fd = -1;
|
||||
if (errsv != EBUSY) {
|
||||
close(fd);
|
||||
fprintf(stderr, "cannot set up loopback device %s: %s", buf, strerror(errsv));
|
||||
return NULL;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
close(fd);
|
||||
|
||||
strncpy((char*)loopinfo.lo_file_name, buf, LO_NAME_SIZE);
|
||||
loopinfo.lo_offset = 0;
|
||||
loopinfo.lo_flags = LO_FLAGS_AUTOCLEAR;
|
||||
|
||||
if (ioctl(loop_fd, LOOP_SET_STATUS64, &loopinfo) < 0) {
|
||||
perror("ioctl LOOP_SET_STATUS64");
|
||||
if (ioctl(loop_fd, LOOP_CLR_FD, 0) < 0) {
|
||||
perror("ioctl LOOP_CLR_FD");
|
||||
}
|
||||
close(loop_fd);
|
||||
fprintf (stderr, "cannot set up loopback device info");
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
loopname = strdup(buf);
|
||||
if (loopname == NULL) {
|
||||
close(loop_fd);
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
*loop_fd_out = loop_fd;
|
||||
return (loopname);
|
||||
}
|
||||
|
||||
return (NULL);
|
||||
}
|
||||
#ifndef LO_FLAGS_PARTSCAN
|
||||
#define LO_FLAGS_PARTSCAN 8
|
||||
#endif
|
||||
|
||||
// FIXME: Can't we find a way to do the logging in pure Go?
|
||||
extern void DevmapperLogCallback(int level, char *file, int line, int dm_errno_or_class, char *str);
|
||||
|
||||
static void log_cb(int level, const char *file, int line,
|
||||
int dm_errno_or_class, const char *f, ...)
|
||||
static void log_cb(int level, const char *file, int line, int dm_errno_or_class, const char *f, ...)
|
||||
{
|
||||
char buffer[256];
|
||||
va_list ap;
|
||||
@@ -135,40 +35,72 @@ static void log_with_errno_init()
|
||||
{
|
||||
dm_log_with_errno_init(log_cb);
|
||||
}
|
||||
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
type (
|
||||
CDmTask C.struct_dm_task
|
||||
|
||||
CLoopInfo64 C.struct_loop_info64
|
||||
LoopInfo64 struct {
|
||||
loDevice uint64 /* ioctl r/o */
|
||||
loInode uint64 /* ioctl r/o */
|
||||
loRdevice uint64 /* ioctl r/o */
|
||||
loOffset uint64
|
||||
loSizelimit uint64 /* bytes, 0 == max available */
|
||||
loNumber uint32 /* ioctl r/o */
|
||||
loEncrypt_type uint32
|
||||
loEncrypt_key_size uint32 /* ioctl w/o */
|
||||
loFlags uint32 /* ioctl r/o */
|
||||
loFileName [LoNameSize]uint8
|
||||
loCryptName [LoNameSize]uint8
|
||||
loEncryptKey [LoKeySize]uint8 /* ioctl w/o */
|
||||
loInit [2]uint64
|
||||
}
|
||||
)
|
||||
|
||||
// IOCTL consts
|
||||
const (
|
||||
BlkGetSize64 = C.BLKGETSIZE64
|
||||
|
||||
LoopSetFd = C.LOOP_SET_FD
|
||||
LoopCtlGetFree = C.LOOP_CTL_GET_FREE
|
||||
LoopGetStatus64 = C.LOOP_GET_STATUS64
|
||||
LoopSetStatus64 = C.LOOP_SET_STATUS64
|
||||
LoopClrFd = C.LOOP_CLR_FD
|
||||
LoopSetCapacity = C.LOOP_SET_CAPACITY
|
||||
)
|
||||
|
||||
const (
|
||||
LoFlagsAutoClear = C.LO_FLAGS_AUTOCLEAR
|
||||
LoFlagsReadOnly = C.LO_FLAGS_READ_ONLY
|
||||
LoFlagsPartScan = C.LO_FLAGS_PARTSCAN
|
||||
LoKeySize = C.LO_KEY_SIZE
|
||||
LoNameSize = C.LO_NAME_SIZE
|
||||
)
|
||||
|
||||
var (
|
||||
DmTaskDestory = dmTaskDestroyFct
|
||||
DmTaskCreate = dmTaskCreateFct
|
||||
DmTaskRun = dmTaskRunFct
|
||||
DmTaskSetName = dmTaskSetNameFct
|
||||
DmTaskSetMessage = dmTaskSetMessageFct
|
||||
DmTaskSetSector = dmTaskSetSectorFct
|
||||
DmTaskSetCookie = dmTaskSetCookieFct
|
||||
DmTaskSetAddNode = dmTaskSetAddNodeFct
|
||||
DmTaskSetRo = dmTaskSetRoFct
|
||||
DmTaskAddTarget = dmTaskAddTargetFct
|
||||
DmTaskGetInfo = dmTaskGetInfoFct
|
||||
DmGetLibraryVersion = dmGetLibraryVersionFct
|
||||
DmGetNextTarget = dmGetNextTargetFct
|
||||
DmGetBlockSize = dmGetBlockSizeFct
|
||||
DmAttachLoopDevice = dmAttachLoopDeviceFct
|
||||
DmUdevWait = dmUdevWaitFct
|
||||
DmLogInitVerbose = dmLogInitVerboseFct
|
||||
DmSetDevDir = dmSetDevDirFct
|
||||
DmGetLibraryVersion = dmGetLibraryVersionFct
|
||||
DmTaskAddTarget = dmTaskAddTargetFct
|
||||
DmTaskCreate = dmTaskCreateFct
|
||||
DmTaskDestroy = dmTaskDestroyFct
|
||||
DmTaskGetInfo = dmTaskGetInfoFct
|
||||
DmTaskRun = dmTaskRunFct
|
||||
DmTaskSetAddNode = dmTaskSetAddNodeFct
|
||||
DmTaskSetCookie = dmTaskSetCookieFct
|
||||
DmTaskSetMessage = dmTaskSetMessageFct
|
||||
DmTaskSetName = dmTaskSetNameFct
|
||||
DmTaskSetRo = dmTaskSetRoFct
|
||||
DmTaskSetSector = dmTaskSetSectorFct
|
||||
DmUdevWait = dmUdevWaitFct
|
||||
LogWithErrnoInit = logWithErrnoInitFct
|
||||
GetBlockSize = getBlockSizeFct
|
||||
)
|
||||
|
||||
func free(p *C.char) {
|
||||
@@ -184,28 +116,26 @@ func dmTaskCreateFct(taskType int) *CDmTask {
|
||||
}
|
||||
|
||||
func dmTaskRunFct(task *CDmTask) int {
|
||||
return int(C.dm_task_run((*C.struct_dm_task)(task)))
|
||||
ret, _ := C.dm_task_run((*C.struct_dm_task)(task))
|
||||
return int(ret)
|
||||
}
|
||||
|
||||
func dmTaskSetNameFct(task *CDmTask, name string) int {
|
||||
Cname := C.CString(name)
|
||||
defer free(Cname)
|
||||
|
||||
return int(C.dm_task_set_name((*C.struct_dm_task)(task),
|
||||
Cname))
|
||||
return int(C.dm_task_set_name((*C.struct_dm_task)(task), Cname))
|
||||
}
|
||||
|
||||
func dmTaskSetMessageFct(task *CDmTask, message string) int {
|
||||
Cmessage := C.CString(message)
|
||||
defer free(Cmessage)
|
||||
|
||||
return int(C.dm_task_set_message((*C.struct_dm_task)(task),
|
||||
Cmessage))
|
||||
return int(C.dm_task_set_message((*C.struct_dm_task)(task), Cmessage))
|
||||
}
|
||||
|
||||
func dmTaskSetSectorFct(task *CDmTask, sector uint64) int {
|
||||
return int(C.dm_task_set_sector((*C.struct_dm_task)(task),
|
||||
C.uint64_t(sector)))
|
||||
return int(C.dm_task_set_sector((*C.struct_dm_task)(task), C.uint64_t(sector)))
|
||||
}
|
||||
|
||||
func dmTaskSetCookieFct(task *CDmTask, cookie *uint, flags uint16) int {
|
||||
@@ -213,13 +143,11 @@ func dmTaskSetCookieFct(task *CDmTask, cookie *uint, flags uint16) int {
|
||||
defer func() {
|
||||
*cookie = uint(cCookie)
|
||||
}()
|
||||
return int(C.dm_task_set_cookie((*C.struct_dm_task)(task), &cCookie,
|
||||
C.uint16_t(flags)))
|
||||
return int(C.dm_task_set_cookie((*C.struct_dm_task)(task), &cCookie, C.uint16_t(flags)))
|
||||
}
|
||||
|
||||
func dmTaskSetAddNodeFct(task *CDmTask, addNode AddNodeType) int {
|
||||
return int(C.dm_task_set_add_node((*C.struct_dm_task)(task),
|
||||
C.dm_add_node_t(addNode)))
|
||||
return int(C.dm_task_set_add_node((*C.struct_dm_task)(task), C.dm_add_node_t(addNode)))
|
||||
}
|
||||
|
||||
func dmTaskSetRoFct(task *CDmTask) int {
|
||||
@@ -235,27 +163,7 @@ func dmTaskAddTargetFct(task *CDmTask,
|
||||
Cparams := C.CString(params)
|
||||
defer free(Cparams)
|
||||
|
||||
return int(C.dm_task_add_target((*C.struct_dm_task)(task),
|
||||
C.uint64_t(start), C.uint64_t(size), Cttype, Cparams))
|
||||
}
|
||||
|
||||
func dmGetLoopbackBackingFile(fd uintptr) (uint64, uint64, syscall.Errno) {
|
||||
var lo64 C.struct_loop_info64
|
||||
_, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, C.LOOP_GET_STATUS64,
|
||||
uintptr(unsafe.Pointer(&lo64)))
|
||||
return uint64(lo64.lo_device), uint64(lo64.lo_inode), err
|
||||
}
|
||||
|
||||
func dmLoopbackSetCapacity(fd uintptr) syscall.Errno {
|
||||
_, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, C.LOOP_SET_CAPACITY, 0)
|
||||
return err
|
||||
}
|
||||
|
||||
func dmGetBlockSizeFct(fd uintptr) (int64, syscall.Errno) {
|
||||
var size int64
|
||||
_, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, C.BLKGETSIZE64,
|
||||
uintptr(unsafe.Pointer(&size)))
|
||||
return size, err
|
||||
return int(C.dm_task_add_target((*C.struct_dm_task)(task), C.uint64_t(start), C.uint64_t(size), Cttype, Cparams))
|
||||
}
|
||||
|
||||
func dmTaskGetInfoFct(task *CDmTask, info *Info) int {
|
||||
@@ -275,9 +183,7 @@ func dmTaskGetInfoFct(task *CDmTask, info *Info) int {
|
||||
return int(C.dm_task_get_info((*C.struct_dm_task)(task), &Cinfo))
|
||||
}
|
||||
|
||||
func dmGetNextTargetFct(task *CDmTask, next uintptr, start, length *uint64,
|
||||
target, params *string) uintptr {
|
||||
|
||||
func dmGetNextTargetFct(task *CDmTask, next uintptr, start, length *uint64, target, params *string) uintptr {
|
||||
var (
|
||||
Cstart, Clength C.uint64_t
|
||||
CtargetType, Cparams *C.char
|
||||
@@ -288,31 +194,11 @@ func dmGetNextTargetFct(task *CDmTask, next uintptr, start, length *uint64,
|
||||
*target = C.GoString(CtargetType)
|
||||
*params = C.GoString(Cparams)
|
||||
}()
|
||||
nextp := C.dm_get_next_target((*C.struct_dm_task)(task),
|
||||
unsafe.Pointer(next), &Cstart, &Clength, &CtargetType, &Cparams)
|
||||
|
||||
nextp := C.dm_get_next_target((*C.struct_dm_task)(task), unsafe.Pointer(next), &Cstart, &Clength, &CtargetType, &Cparams)
|
||||
return uintptr(nextp)
|
||||
}
|
||||
|
||||
func dmAttachLoopDeviceFct(filename string, fd *int) string {
|
||||
cFilename := C.CString(filename)
|
||||
defer free(cFilename)
|
||||
|
||||
var cFd C.int
|
||||
defer func() {
|
||||
*fd = int(cFd)
|
||||
}()
|
||||
|
||||
ret := C.attach_loop_device(cFilename, &cFd)
|
||||
defer free(ret)
|
||||
return C.GoString(ret)
|
||||
}
|
||||
|
||||
func getBlockSizeFct(fd uintptr, size *uint64) syscall.Errno {
|
||||
_, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, C.BLKGETSIZE64,
|
||||
uintptr(unsafe.Pointer(&size)))
|
||||
return err
|
||||
}
|
||||
|
||||
func dmUdevWaitFct(cookie uint) int {
|
||||
return int(C.dm_udev_wait(C.uint32_t(cookie)))
|
||||
}
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
// +build linux
|
||||
|
||||
package devmapper
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/graphdriver"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
)
|
||||
|
||||
@@ -22,7 +23,7 @@ type Driver struct {
|
||||
home string
|
||||
}
|
||||
|
||||
func Init(home string) (graphdriver.Driver, error) {
|
||||
var Init = func(home string) (graphdriver.Driver, error) {
|
||||
deviceSet, err := NewDeviceSet(home, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -57,7 +58,7 @@ func (d *Driver) Cleanup() error {
|
||||
return d.DeviceSet.Shutdown()
|
||||
}
|
||||
|
||||
func (d *Driver) Create(id string, parent string) error {
|
||||
func (d *Driver) Create(id, parent string) error {
|
||||
if err := d.DeviceSet.AddDevice(id, parent); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -67,7 +68,7 @@ func (d *Driver) Create(id string, parent string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(path.Join(mp, "rootfs"), 0755); err != nil && !os.IsExist(err) {
|
||||
if err := osMkdirAll(path.Join(mp, "rootfs"), 0755); err != nil && !osIsExist(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -98,7 +99,7 @@ func (d *Driver) Get(id string) (string, error) {
|
||||
|
||||
func (d *Driver) mount(id, mountPoint string) error {
|
||||
// Create the target directories if they don't exist
|
||||
if err := os.MkdirAll(mountPoint, 0755); err != nil && !os.IsExist(err) {
|
||||
if err := osMkdirAll(mountPoint, 0755); err != nil && !osIsExist(err) {
|
||||
return err
|
||||
}
|
||||
// If mountpoint is already mounted, do nothing
|
||||
@@ -121,3 +122,7 @@ func (d *Driver) unmount(id, mountPoint string) error {
|
||||
// Unmount the device
|
||||
return d.DeviceSet.UnmountDevice(id, mountPoint, true)
|
||||
}
|
||||
|
||||
func (d *Driver) Exists(id string) bool {
|
||||
return d.Devices[id] != nil
|
||||
}
|
||||
|
||||
@@ -1,9 +1,15 @@
|
||||
// +build linux
|
||||
|
||||
package devmapper
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/graphdriver"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"runtime"
|
||||
"strings"
|
||||
"syscall"
|
||||
"testing"
|
||||
)
|
||||
|
||||
@@ -12,7 +18,96 @@ func init() {
|
||||
DefaultDataLoopbackSize = 300 * 1024 * 1024
|
||||
DefaultMetaDataLoopbackSize = 200 * 1024 * 1024
|
||||
DefaultBaseFsSize = 300 * 1024 * 1024
|
||||
}
|
||||
|
||||
// denyAllDevmapper mocks all calls to libdevmapper in the unit tests, and denies them by default
|
||||
func denyAllDevmapper() {
|
||||
// Hijack all calls to libdevmapper with default panics.
|
||||
// Authorized calls are selectively hijacked in each tests.
|
||||
DmTaskCreate = func(t int) *CDmTask {
|
||||
panic("DmTaskCreate: this method should not be called here")
|
||||
}
|
||||
DmTaskRun = func(task *CDmTask) int {
|
||||
panic("DmTaskRun: this method should not be called here")
|
||||
}
|
||||
DmTaskSetName = func(task *CDmTask, name string) int {
|
||||
panic("DmTaskSetName: this method should not be called here")
|
||||
}
|
||||
DmTaskSetMessage = func(task *CDmTask, message string) int {
|
||||
panic("DmTaskSetMessage: this method should not be called here")
|
||||
}
|
||||
DmTaskSetSector = func(task *CDmTask, sector uint64) int {
|
||||
panic("DmTaskSetSector: this method should not be called here")
|
||||
}
|
||||
DmTaskSetCookie = func(task *CDmTask, cookie *uint, flags uint16) int {
|
||||
panic("DmTaskSetCookie: this method should not be called here")
|
||||
}
|
||||
DmTaskSetAddNode = func(task *CDmTask, addNode AddNodeType) int {
|
||||
panic("DmTaskSetAddNode: this method should not be called here")
|
||||
}
|
||||
DmTaskSetRo = func(task *CDmTask) int {
|
||||
panic("DmTaskSetRo: this method should not be called here")
|
||||
}
|
||||
DmTaskAddTarget = func(task *CDmTask, start, size uint64, ttype, params string) int {
|
||||
panic("DmTaskAddTarget: this method should not be called here")
|
||||
}
|
||||
DmTaskGetInfo = func(task *CDmTask, info *Info) int {
|
||||
panic("DmTaskGetInfo: this method should not be called here")
|
||||
}
|
||||
DmGetNextTarget = func(task *CDmTask, next uintptr, start, length *uint64, target, params *string) uintptr {
|
||||
panic("DmGetNextTarget: this method should not be called here")
|
||||
}
|
||||
DmUdevWait = func(cookie uint) int {
|
||||
panic("DmUdevWait: this method should not be called here")
|
||||
}
|
||||
DmSetDevDir = func(dir string) int {
|
||||
panic("DmSetDevDir: this method should not be called here")
|
||||
}
|
||||
DmGetLibraryVersion = func(version *string) int {
|
||||
panic("DmGetLibraryVersion: this method should not be called here")
|
||||
}
|
||||
DmLogInitVerbose = func(level int) {
|
||||
panic("DmLogInitVerbose: this method should not be called here")
|
||||
}
|
||||
DmTaskDestroy = func(task *CDmTask) {
|
||||
panic("DmTaskDestroy: this method should not be called here")
|
||||
}
|
||||
LogWithErrnoInit = func() {
|
||||
panic("LogWithErrnoInit: this method should not be called here")
|
||||
}
|
||||
}
|
||||
|
||||
func denyAllSyscall() {
|
||||
sysMount = func(source, target, fstype string, flags uintptr, data string) (err error) {
|
||||
panic("sysMount: this method should not be called here")
|
||||
}
|
||||
sysUnmount = func(target string, flags int) (err error) {
|
||||
panic("sysUnmount: this method should not be called here")
|
||||
}
|
||||
sysCloseOnExec = func(fd int) {
|
||||
panic("sysCloseOnExec: this method should not be called here")
|
||||
}
|
||||
sysSyscall = func(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) {
|
||||
panic("sysSyscall: this method should not be called here")
|
||||
}
|
||||
// Not a syscall, but forbidding it here anyway
|
||||
Mounted = func(mnt string) (bool, error) {
|
||||
panic("devmapper.Mounted: this method should not be called here")
|
||||
}
|
||||
// osOpenFile = os.OpenFile
|
||||
// osNewFile = os.NewFile
|
||||
// osCreate = os.Create
|
||||
// osStat = os.Stat
|
||||
// osIsNotExist = os.IsNotExist
|
||||
// osIsExist = os.IsExist
|
||||
// osMkdirAll = os.MkdirAll
|
||||
// osRemoveAll = os.RemoveAll
|
||||
// osRename = os.Rename
|
||||
// osReadlink = os.Readlink
|
||||
|
||||
// execRun = func(name string, args ...string) error {
|
||||
// return exec.Command(name, args...).Run()
|
||||
// }
|
||||
}
|
||||
|
||||
func mkTestDirectory(t *testing.T) string {
|
||||
@@ -34,72 +129,534 @@ func newDriver(t *testing.T) *Driver {
|
||||
|
||||
func cleanup(d *Driver) {
|
||||
d.Cleanup()
|
||||
os.RemoveAll(d.home)
|
||||
osRemoveAll(d.home)
|
||||
}
|
||||
|
||||
type Set map[string]bool
|
||||
|
||||
func (r Set) Assert(t *testing.T, names ...string) {
|
||||
for _, key := range names {
|
||||
if _, exists := r[key]; !exists {
|
||||
t.Fatalf("Key not set: %s", key)
|
||||
}
|
||||
delete(r, key)
|
||||
}
|
||||
if len(r) != 0 {
|
||||
t.Fatalf("Unexpected keys: %v", r)
|
||||
}
|
||||
}
|
||||
|
||||
func TestInit(t *testing.T) {
|
||||
home := mkTestDirectory(t)
|
||||
defer os.RemoveAll(home)
|
||||
driver, err := Init(home)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
if err := driver.Cleanup(); err != nil {
|
||||
var (
|
||||
calls = make(Set)
|
||||
taskMessages = make(Set)
|
||||
taskTypes = make(Set)
|
||||
home = mkTestDirectory(t)
|
||||
)
|
||||
defer osRemoveAll(home)
|
||||
|
||||
func() {
|
||||
denyAllDevmapper()
|
||||
DmSetDevDir = func(dir string) int {
|
||||
calls["DmSetDevDir"] = true
|
||||
expectedDir := "/dev"
|
||||
if dir != expectedDir {
|
||||
t.Fatalf("Wrong libdevmapper call\nExpected: DmSetDevDir(%v)\nReceived: DmSetDevDir(%v)\n", expectedDir, dir)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
LogWithErrnoInit = func() {
|
||||
calls["DmLogWithErrnoInit"] = true
|
||||
}
|
||||
var task1 CDmTask
|
||||
DmTaskCreate = func(taskType int) *CDmTask {
|
||||
calls["DmTaskCreate"] = true
|
||||
taskTypes[fmt.Sprintf("%d", taskType)] = true
|
||||
return &task1
|
||||
}
|
||||
DmTaskSetName = func(task *CDmTask, name string) int {
|
||||
calls["DmTaskSetName"] = true
|
||||
expectedTask := &task1
|
||||
if task != expectedTask {
|
||||
t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskSetName(%v)\nReceived: DmTaskSetName(%v)\n", expectedTask, task)
|
||||
}
|
||||
// FIXME: use Set.AssertRegexp()
|
||||
if !strings.HasPrefix(name, "docker-") && !strings.HasPrefix(name, "/dev/mapper/docker-") ||
|
||||
!strings.HasSuffix(name, "-pool") && !strings.HasSuffix(name, "-base") {
|
||||
t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskSetName(%v)\nReceived: DmTaskSetName(%v)\n", "docker-...-pool", name)
|
||||
}
|
||||
return 1
|
||||
}
|
||||
DmTaskRun = func(task *CDmTask) int {
|
||||
calls["DmTaskRun"] = true
|
||||
expectedTask := &task1
|
||||
if task != expectedTask {
|
||||
t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskRun(%v)\nReceived: DmTaskRun(%v)\n", expectedTask, task)
|
||||
}
|
||||
return 1
|
||||
}
|
||||
DmTaskGetInfo = func(task *CDmTask, info *Info) int {
|
||||
calls["DmTaskGetInfo"] = true
|
||||
expectedTask := &task1
|
||||
if task != expectedTask {
|
||||
t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskGetInfo(%v)\nReceived: DmTaskGetInfo(%v)\n", expectedTask, task)
|
||||
}
|
||||
// This will crash if info is not dereferenceable
|
||||
info.Exists = 0
|
||||
return 1
|
||||
}
|
||||
DmTaskSetSector = func(task *CDmTask, sector uint64) int {
|
||||
calls["DmTaskSetSector"] = true
|
||||
expectedTask := &task1
|
||||
if task != expectedTask {
|
||||
t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskSetSector(%v)\nReceived: DmTaskSetSector(%v)\n", expectedTask, task)
|
||||
}
|
||||
if expectedSector := uint64(0); sector != expectedSector {
|
||||
t.Fatalf("Wrong libdevmapper call to DmTaskSetSector\nExpected: %v\nReceived: %v\n", expectedSector, sector)
|
||||
}
|
||||
return 1
|
||||
}
|
||||
DmTaskSetMessage = func(task *CDmTask, message string) int {
|
||||
calls["DmTaskSetMessage"] = true
|
||||
expectedTask := &task1
|
||||
if task != expectedTask {
|
||||
t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskSetSector(%v)\nReceived: DmTaskSetSector(%v)\n", expectedTask, task)
|
||||
}
|
||||
taskMessages[message] = true
|
||||
return 1
|
||||
}
|
||||
DmTaskDestroy = func(task *CDmTask) {
|
||||
calls["DmTaskDestroy"] = true
|
||||
expectedTask := &task1
|
||||
if task != expectedTask {
|
||||
t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskDestroy(%v)\nReceived: DmTaskDestroy(%v)\n", expectedTask, task)
|
||||
}
|
||||
}
|
||||
DmTaskAddTarget = func(task *CDmTask, start, size uint64, ttype, params string) int {
|
||||
calls["DmTaskSetTarget"] = true
|
||||
expectedTask := &task1
|
||||
if task != expectedTask {
|
||||
t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskDestroy(%v)\nReceived: DmTaskDestroy(%v)\n", expectedTask, task)
|
||||
}
|
||||
if start != 0 {
|
||||
t.Fatalf("Wrong start: %d != %d", start, 0)
|
||||
}
|
||||
if ttype != "thin" && ttype != "thin-pool" {
|
||||
t.Fatalf("Wrong ttype: %s", ttype)
|
||||
}
|
||||
// Quick smoke test
|
||||
if params == "" {
|
||||
t.Fatalf("Params should not be empty")
|
||||
}
|
||||
return 1
|
||||
}
|
||||
fakeCookie := uint(4321)
|
||||
DmTaskSetCookie = func(task *CDmTask, cookie *uint, flags uint16) int {
|
||||
calls["DmTaskSetCookie"] = true
|
||||
expectedTask := &task1
|
||||
if task != expectedTask {
|
||||
t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskDestroy(%v)\nReceived: DmTaskDestroy(%v)\n", expectedTask, task)
|
||||
}
|
||||
if flags != 0 {
|
||||
t.Fatalf("Cookie flags should be 0 (not %x)", flags)
|
||||
}
|
||||
*cookie = fakeCookie
|
||||
return 1
|
||||
}
|
||||
DmUdevWait = func(cookie uint) int {
|
||||
calls["DmUdevWait"] = true
|
||||
if cookie != fakeCookie {
|
||||
t.Fatalf("Wrong cookie: %d != %d", cookie, fakeCookie)
|
||||
}
|
||||
return 1
|
||||
}
|
||||
DmTaskSetAddNode = func(task *CDmTask, addNode AddNodeType) int {
|
||||
if addNode != AddNodeOnCreate {
|
||||
t.Fatalf("Wrong AddNoteType: %v (expected %v)", addNode, AddNodeOnCreate)
|
||||
}
|
||||
calls["DmTaskSetAddNode"] = true
|
||||
return 1
|
||||
}
|
||||
execRun = func(name string, args ...string) error {
|
||||
calls["execRun"] = true
|
||||
if name != "mkfs.ext4" {
|
||||
t.Fatalf("Expected %s to be executed, not %s", "mkfs.ext4", name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
driver, err := Init(home)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
if err := driver.Cleanup(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
}()
|
||||
// Put all tests in a funciton to make sure the garbage collection will
|
||||
// occur.
|
||||
|
||||
id := "foo"
|
||||
if err := driver.Create(id, ""); err != nil {
|
||||
t.Fatal(err)
|
||||
// Call GC to cleanup runtime.Finalizers
|
||||
runtime.GC()
|
||||
|
||||
calls.Assert(t,
|
||||
"DmSetDevDir",
|
||||
"DmLogWithErrnoInit",
|
||||
"DmTaskSetName",
|
||||
"DmTaskRun",
|
||||
"DmTaskGetInfo",
|
||||
"DmTaskDestroy",
|
||||
"execRun",
|
||||
"DmTaskCreate",
|
||||
"DmTaskSetTarget",
|
||||
"DmTaskSetCookie",
|
||||
"DmUdevWait",
|
||||
"DmTaskSetSector",
|
||||
"DmTaskSetMessage",
|
||||
"DmTaskSetAddNode",
|
||||
)
|
||||
taskTypes.Assert(t, "0", "6", "17")
|
||||
taskMessages.Assert(t, "create_thin 0", "set_transaction_id 0 1")
|
||||
}
|
||||
|
||||
func fakeInit() func(home string) (graphdriver.Driver, error) {
|
||||
oldInit := Init
|
||||
Init = func(home string) (graphdriver.Driver, error) {
|
||||
return &Driver{
|
||||
home: home,
|
||||
}, nil
|
||||
}
|
||||
dir, err := driver.Get(id)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
return oldInit
|
||||
}
|
||||
|
||||
func restoreInit(init func(home string) (graphdriver.Driver, error)) {
|
||||
Init = init
|
||||
}
|
||||
|
||||
func mockAllDevmapper(calls Set) {
|
||||
DmSetDevDir = func(dir string) int {
|
||||
calls["DmSetDevDir"] = true
|
||||
return 0
|
||||
}
|
||||
if st, err := os.Stat(dir); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if !st.IsDir() {
|
||||
t.Fatalf("Get(%V) did not return a directory", id)
|
||||
LogWithErrnoInit = func() {
|
||||
calls["DmLogWithErrnoInit"] = true
|
||||
}
|
||||
DmTaskCreate = func(taskType int) *CDmTask {
|
||||
calls["DmTaskCreate"] = true
|
||||
return &CDmTask{}
|
||||
}
|
||||
DmTaskSetName = func(task *CDmTask, name string) int {
|
||||
calls["DmTaskSetName"] = true
|
||||
return 1
|
||||
}
|
||||
DmTaskRun = func(task *CDmTask) int {
|
||||
calls["DmTaskRun"] = true
|
||||
return 1
|
||||
}
|
||||
DmTaskGetInfo = func(task *CDmTask, info *Info) int {
|
||||
calls["DmTaskGetInfo"] = true
|
||||
return 1
|
||||
}
|
||||
DmTaskSetSector = func(task *CDmTask, sector uint64) int {
|
||||
calls["DmTaskSetSector"] = true
|
||||
return 1
|
||||
}
|
||||
DmTaskSetMessage = func(task *CDmTask, message string) int {
|
||||
calls["DmTaskSetMessage"] = true
|
||||
return 1
|
||||
}
|
||||
DmTaskDestroy = func(task *CDmTask) {
|
||||
calls["DmTaskDestroy"] = true
|
||||
}
|
||||
DmTaskAddTarget = func(task *CDmTask, start, size uint64, ttype, params string) int {
|
||||
calls["DmTaskSetTarget"] = true
|
||||
return 1
|
||||
}
|
||||
DmTaskSetCookie = func(task *CDmTask, cookie *uint, flags uint16) int {
|
||||
calls["DmTaskSetCookie"] = true
|
||||
return 1
|
||||
}
|
||||
DmUdevWait = func(cookie uint) int {
|
||||
calls["DmUdevWait"] = true
|
||||
return 1
|
||||
}
|
||||
DmTaskSetAddNode = func(task *CDmTask, addNode AddNodeType) int {
|
||||
calls["DmTaskSetAddNode"] = true
|
||||
return 1
|
||||
}
|
||||
execRun = func(name string, args ...string) error {
|
||||
calls["execRun"] = true
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func TestDriverName(t *testing.T) {
|
||||
d := newDriver(t)
|
||||
defer cleanup(d)
|
||||
denyAllDevmapper()
|
||||
defer denyAllDevmapper()
|
||||
|
||||
oldInit := fakeInit()
|
||||
defer restoreInit(oldInit)
|
||||
|
||||
d := newDriver(t)
|
||||
if d.String() != "devicemapper" {
|
||||
t.Fatalf("Expected driver name to be devicemapper got %s", d.String())
|
||||
}
|
||||
}
|
||||
|
||||
func TestDriverCreate(t *testing.T) {
|
||||
d := newDriver(t)
|
||||
defer cleanup(d)
|
||||
denyAllDevmapper()
|
||||
denyAllSyscall()
|
||||
defer denyAllSyscall()
|
||||
defer denyAllDevmapper()
|
||||
|
||||
if err := d.Create("1", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
calls := make(Set)
|
||||
mockAllDevmapper(calls)
|
||||
|
||||
sysMount = func(source, target, fstype string, flags uintptr, data string) (err error) {
|
||||
calls["sysMount"] = true
|
||||
// FIXME: compare the exact source and target strings (inodes + devname)
|
||||
if expectedSource := "/dev/mapper/docker-"; !strings.HasPrefix(source, expectedSource) {
|
||||
t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedSource, source)
|
||||
}
|
||||
if expectedTarget := "/tmp/docker-test-devmapper-"; !strings.HasPrefix(target, expectedTarget) {
|
||||
t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedTarget, target)
|
||||
}
|
||||
if expectedFstype := "ext4"; fstype != expectedFstype {
|
||||
t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedFstype, fstype)
|
||||
}
|
||||
if expectedFlags := uintptr(3236757504); flags != expectedFlags {
|
||||
t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedFlags, flags)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
Mounted = func(mnt string) (bool, error) {
|
||||
calls["Mounted"] = true
|
||||
if !strings.HasPrefix(mnt, "/tmp/docker-test-devmapper-") || !strings.HasSuffix(mnt, "/mnt/1") {
|
||||
t.Fatalf("Wrong mounted call\nExpected: Mounted(%v)\nReceived: Mounted(%v)\n", "/tmp/docker-test-devmapper-.../mnt/1", mnt)
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
sysSyscall = func(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) {
|
||||
calls["sysSyscall"] = true
|
||||
if trap != sysSysIoctl {
|
||||
t.Fatalf("Unexpected syscall. Expecting SYS_IOCTL, received: %d", trap)
|
||||
}
|
||||
switch a2 {
|
||||
case LoopSetFd:
|
||||
calls["ioctl.loopsetfd"] = true
|
||||
case LoopCtlGetFree:
|
||||
calls["ioctl.loopctlgetfree"] = true
|
||||
case LoopGetStatus64:
|
||||
calls["ioctl.loopgetstatus"] = true
|
||||
case LoopSetStatus64:
|
||||
calls["ioctl.loopsetstatus"] = true
|
||||
case LoopClrFd:
|
||||
calls["ioctl.loopclrfd"] = true
|
||||
case LoopSetCapacity:
|
||||
calls["ioctl.loopsetcapacity"] = true
|
||||
case BlkGetSize64:
|
||||
calls["ioctl.blkgetsize"] = true
|
||||
default:
|
||||
t.Fatalf("Unexpected IOCTL. Received %d", a2)
|
||||
}
|
||||
return 0, 0, 0
|
||||
}
|
||||
|
||||
func() {
|
||||
d := newDriver(t)
|
||||
|
||||
calls.Assert(t,
|
||||
"DmSetDevDir",
|
||||
"DmLogWithErrnoInit",
|
||||
"DmTaskSetName",
|
||||
"DmTaskRun",
|
||||
"DmTaskGetInfo",
|
||||
"execRun",
|
||||
"DmTaskCreate",
|
||||
"DmTaskSetTarget",
|
||||
"DmTaskSetCookie",
|
||||
"DmUdevWait",
|
||||
"DmTaskSetSector",
|
||||
"DmTaskSetMessage",
|
||||
"DmTaskSetAddNode",
|
||||
"sysSyscall",
|
||||
"ioctl.blkgetsize",
|
||||
"ioctl.loopsetfd",
|
||||
"ioctl.loopsetstatus",
|
||||
)
|
||||
|
||||
if err := d.Create("1", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
calls.Assert(t,
|
||||
"DmTaskCreate",
|
||||
"DmTaskGetInfo",
|
||||
"sysMount",
|
||||
"Mounted",
|
||||
"DmTaskRun",
|
||||
"DmTaskSetTarget",
|
||||
"DmTaskSetSector",
|
||||
"DmTaskSetCookie",
|
||||
"DmUdevWait",
|
||||
"DmTaskSetName",
|
||||
"DmTaskSetMessage",
|
||||
"DmTaskSetAddNode",
|
||||
)
|
||||
|
||||
}()
|
||||
|
||||
runtime.GC()
|
||||
|
||||
calls.Assert(t,
|
||||
"DmTaskDestroy",
|
||||
)
|
||||
}
|
||||
|
||||
func TestDriverRemove(t *testing.T) {
|
||||
d := newDriver(t)
|
||||
defer cleanup(d)
|
||||
denyAllDevmapper()
|
||||
denyAllSyscall()
|
||||
defer denyAllSyscall()
|
||||
defer denyAllDevmapper()
|
||||
|
||||
if err := d.Create("1", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
calls := make(Set)
|
||||
mockAllDevmapper(calls)
|
||||
|
||||
sysMount = func(source, target, fstype string, flags uintptr, data string) (err error) {
|
||||
calls["sysMount"] = true
|
||||
// FIXME: compare the exact source and target strings (inodes + devname)
|
||||
if expectedSource := "/dev/mapper/docker-"; !strings.HasPrefix(source, expectedSource) {
|
||||
t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedSource, source)
|
||||
}
|
||||
if expectedTarget := "/tmp/docker-test-devmapper-"; !strings.HasPrefix(target, expectedTarget) {
|
||||
t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedTarget, target)
|
||||
}
|
||||
if expectedFstype := "ext4"; fstype != expectedFstype {
|
||||
t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedFstype, fstype)
|
||||
}
|
||||
if expectedFlags := uintptr(3236757504); flags != expectedFlags {
|
||||
t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedFlags, flags)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
sysUnmount = func(target string, flags int) (err error) {
|
||||
calls["sysUnmount"] = true
|
||||
// FIXME: compare the exact source and target strings (inodes + devname)
|
||||
if expectedTarget := "/tmp/docker-test-devmapper-"; !strings.HasPrefix(target, expectedTarget) {
|
||||
t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedTarget, target)
|
||||
}
|
||||
if expectedFlags := 0; flags != expectedFlags {
|
||||
t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedFlags, flags)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
Mounted = func(mnt string) (bool, error) {
|
||||
calls["Mounted"] = true
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if err := d.Remove("1"); err != nil {
|
||||
t.Fatal(err)
|
||||
sysSyscall = func(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) {
|
||||
calls["sysSyscall"] = true
|
||||
if trap != sysSysIoctl {
|
||||
t.Fatalf("Unexpected syscall. Expecting SYS_IOCTL, received: %d", trap)
|
||||
}
|
||||
switch a2 {
|
||||
case LoopSetFd:
|
||||
calls["ioctl.loopsetfd"] = true
|
||||
case LoopCtlGetFree:
|
||||
calls["ioctl.loopctlgetfree"] = true
|
||||
case LoopGetStatus64:
|
||||
calls["ioctl.loopgetstatus"] = true
|
||||
case LoopSetStatus64:
|
||||
calls["ioctl.loopsetstatus"] = true
|
||||
case LoopClrFd:
|
||||
calls["ioctl.loopclrfd"] = true
|
||||
case LoopSetCapacity:
|
||||
calls["ioctl.loopsetcapacity"] = true
|
||||
case BlkGetSize64:
|
||||
calls["ioctl.blkgetsize"] = true
|
||||
default:
|
||||
t.Fatalf("Unexpected IOCTL. Received %d", a2)
|
||||
}
|
||||
return 0, 0, 0
|
||||
}
|
||||
|
||||
func() {
|
||||
d := newDriver(t)
|
||||
|
||||
calls.Assert(t,
|
||||
"DmSetDevDir",
|
||||
"DmLogWithErrnoInit",
|
||||
"DmTaskSetName",
|
||||
"DmTaskRun",
|
||||
"DmTaskGetInfo",
|
||||
"execRun",
|
||||
"DmTaskCreate",
|
||||
"DmTaskSetTarget",
|
||||
"DmTaskSetCookie",
|
||||
"DmUdevWait",
|
||||
"DmTaskSetSector",
|
||||
"DmTaskSetMessage",
|
||||
"DmTaskSetAddNode",
|
||||
"sysSyscall",
|
||||
"ioctl.blkgetsize",
|
||||
"ioctl.loopsetfd",
|
||||
"ioctl.loopsetstatus",
|
||||
)
|
||||
|
||||
if err := d.Create("1", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
calls.Assert(t,
|
||||
"DmTaskCreate",
|
||||
"DmTaskGetInfo",
|
||||
"sysMount",
|
||||
"Mounted",
|
||||
"DmTaskRun",
|
||||
"DmTaskSetTarget",
|
||||
"DmTaskSetSector",
|
||||
"DmTaskSetCookie",
|
||||
"DmUdevWait",
|
||||
"DmTaskSetName",
|
||||
"DmTaskSetMessage",
|
||||
"DmTaskSetAddNode",
|
||||
)
|
||||
|
||||
Mounted = func(mnt string) (bool, error) {
|
||||
calls["Mounted"] = true
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if err := d.Remove("1"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
calls.Assert(t,
|
||||
"DmTaskRun",
|
||||
"DmTaskSetSector",
|
||||
"DmTaskSetName",
|
||||
"DmTaskSetMessage",
|
||||
"DmTaskCreate",
|
||||
"DmTaskGetInfo",
|
||||
"Mounted",
|
||||
"sysUnmount",
|
||||
)
|
||||
}()
|
||||
runtime.GC()
|
||||
|
||||
calls.Assert(t,
|
||||
"DmTaskDestroy",
|
||||
)
|
||||
}
|
||||
|
||||
func TestCleanup(t *testing.T) {
|
||||
t.Skip("FIXME: not a unit test")
|
||||
t.Skip("Unimplemented")
|
||||
d := newDriver(t)
|
||||
defer os.RemoveAll(d.home)
|
||||
defer osRemoveAll(d.home)
|
||||
|
||||
mountPoints := make([]string, 2)
|
||||
|
||||
@@ -161,6 +718,7 @@ func TestCleanup(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNotMounted(t *testing.T) {
|
||||
t.Skip("FIXME: not a unit test")
|
||||
t.Skip("Not implemented")
|
||||
d := newDriver(t)
|
||||
defer cleanup(d)
|
||||
@@ -179,6 +737,7 @@ func TestNotMounted(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMounted(t *testing.T) {
|
||||
t.Skip("FIXME: not a unit test")
|
||||
d := newDriver(t)
|
||||
defer cleanup(d)
|
||||
|
||||
@@ -199,6 +758,7 @@ func TestMounted(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestInitCleanedDriver(t *testing.T) {
|
||||
t.Skip("FIXME: not a unit test")
|
||||
d := newDriver(t)
|
||||
|
||||
if err := d.Create("1", ""); err != nil {
|
||||
@@ -225,6 +785,7 @@ func TestInitCleanedDriver(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMountMountedDriver(t *testing.T) {
|
||||
t.Skip("FIXME: not a unit test")
|
||||
d := newDriver(t)
|
||||
defer cleanup(d)
|
||||
|
||||
@@ -243,6 +804,7 @@ func TestMountMountedDriver(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGetReturnsValidDevice(t *testing.T) {
|
||||
t.Skip("FIXME: not a unit test")
|
||||
d := newDriver(t)
|
||||
defer cleanup(d)
|
||||
|
||||
@@ -268,6 +830,7 @@ func TestGetReturnsValidDevice(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDriverGetSize(t *testing.T) {
|
||||
t.Skip("FIXME: not a unit test")
|
||||
t.Skipf("Size is currently not implemented")
|
||||
|
||||
d := newDriver(t)
|
||||
@@ -284,7 +847,7 @@ func TestDriverGetSize(t *testing.T) {
|
||||
|
||||
size := int64(1024)
|
||||
|
||||
f, err := os.Create(path.Join(mountPoint, "test_file"))
|
||||
f, err := osCreate(path.Join(mountPoint, "test_file"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -301,3 +864,15 @@ func TestDriverGetSize(t *testing.T) {
|
||||
// t.Fatalf("Expected size %d got %d", size, diffSize)
|
||||
// }
|
||||
}
|
||||
|
||||
func assertMap(t *testing.T, m map[string]bool, keys ...string) {
|
||||
for _, key := range keys {
|
||||
if _, exists := m[key]; !exists {
|
||||
t.Fatalf("Key not set: %s", key)
|
||||
}
|
||||
delete(m, key)
|
||||
}
|
||||
if len(m) != 0 {
|
||||
t.Fatalf("Unexpected keys: %v", m)
|
||||
}
|
||||
}
|
||||
|
||||
60
graphdriver/devmapper/ioctl.go
Normal file
60
graphdriver/devmapper/ioctl.go
Normal file
@@ -0,0 +1,60 @@
|
||||
// +build linux
|
||||
|
||||
package devmapper
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func ioctlLoopCtlGetFree(fd uintptr) (int, error) {
|
||||
index, _, err := sysSyscall(sysSysIoctl, fd, LoopCtlGetFree, 0)
|
||||
if err != 0 {
|
||||
return 0, err
|
||||
}
|
||||
return int(index), nil
|
||||
}
|
||||
|
||||
func ioctlLoopSetFd(loopFd, sparseFd uintptr) error {
|
||||
if _, _, err := sysSyscall(sysSysIoctl, loopFd, LoopSetFd, sparseFd); err != 0 {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func ioctlLoopSetStatus64(loopFd uintptr, loopInfo *LoopInfo64) error {
|
||||
if _, _, err := sysSyscall(sysSysIoctl, loopFd, LoopSetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func ioctlLoopClrFd(loopFd uintptr) error {
|
||||
if _, _, err := sysSyscall(sysSysIoctl, loopFd, LoopClrFd, 0); err != 0 {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func ioctlLoopGetStatus64(loopFd uintptr) (*LoopInfo64, error) {
|
||||
loopInfo := &LoopInfo64{}
|
||||
|
||||
if _, _, err := sysSyscall(sysSysIoctl, loopFd, LoopGetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 {
|
||||
return nil, err
|
||||
}
|
||||
return loopInfo, nil
|
||||
}
|
||||
|
||||
func ioctlLoopSetCapacity(loopFd uintptr, value int) error {
|
||||
if _, _, err := sysSyscall(sysSysIoctl, loopFd, LoopSetCapacity, uintptr(value)); err != 0 {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func ioctlBlkGetSize64(fd uintptr) (int64, error) {
|
||||
var size int64
|
||||
if _, _, err := sysSyscall(sysSysIoctl, fd, BlkGetSize64, uintptr(unsafe.Pointer(&size))); err != 0 {
|
||||
return 0, err
|
||||
}
|
||||
return size, nil
|
||||
}
|
||||
@@ -1,27 +1,27 @@
|
||||
// +build linux
|
||||
|
||||
package devmapper
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// FIXME: this is copy-pasted from the aufs driver.
|
||||
// It should be moved into the core.
|
||||
|
||||
func Mounted(mountpoint string) (bool, error) {
|
||||
mntpoint, err := os.Stat(mountpoint)
|
||||
var Mounted = func(mountpoint string) (bool, error) {
|
||||
mntpoint, err := osStat(mountpoint)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
if osIsNotExist(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
parent, err := os.Stat(filepath.Join(mountpoint, ".."))
|
||||
parent, err := osStat(filepath.Join(mountpoint, ".."))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
mntpointSt := mntpoint.Sys().(*syscall.Stat_t)
|
||||
parentSt := parent.Sys().(*syscall.Stat_t)
|
||||
mntpointSt := toSysStatT(mntpoint.Sys())
|
||||
parentSt := toSysStatT(parent.Sys())
|
||||
return mntpointSt.Dev != parentSt.Dev, nil
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user