mirror of
https://github.com/moby/moby.git
synced 2026-01-13 11:42:02 +00:00
Compare commits
622 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7b6f50772c | ||
|
|
6e2c32eb9a | ||
|
|
22b0a38df5 | ||
|
|
cb58e63fc5 | ||
|
|
8626598753 | ||
|
|
36231345f1 | ||
|
|
e8f001d451 | ||
|
|
389db5f598 | ||
|
|
6746c385bd | ||
|
|
30f604517a | ||
|
|
080f35fe65 | ||
|
|
5b8287617d | ||
|
|
5799806414 | ||
|
|
14265d9a18 | ||
|
|
17235eb089 | ||
|
|
7f118519eb | ||
|
|
250e47e2eb | ||
|
|
f413fb8e56 | ||
|
|
f0e43dcdb1 | ||
|
|
813771e6b7 | ||
|
|
d3f83a6592 | ||
|
|
7958f1f694 | ||
|
|
4a02c6dab1 | ||
|
|
165d343d06 | ||
|
|
c701de939f | ||
|
|
05b87d2d5b | ||
|
|
78e4a385f7 | ||
|
|
822abab17e | ||
|
|
f1d16ea003 | ||
|
|
fb7eaf67d1 | ||
|
|
e53721ef69 | ||
|
|
2f67a62b5b | ||
|
|
9ee11161bf | ||
|
|
e49f82b9e1 | ||
|
|
ddf5a1940f | ||
|
|
00cf2a1fa2 | ||
|
|
3384943cd3 | ||
|
|
452128f0da | ||
|
|
2eaa0a1dd7 | ||
|
|
8085754507 | ||
|
|
c46382ba29 | ||
|
|
42d1c36a5c | ||
|
|
51a4b65101 | ||
|
|
30fb45c494 | ||
|
|
9cdd39e0d7 | ||
|
|
45a8945746 | ||
|
|
697282d6ad | ||
|
|
78a76ad50e | ||
|
|
5ecfe13be9 | ||
|
|
0bc1c6d57a | ||
|
|
f57175cbad | ||
|
|
81a11a3c30 | ||
|
|
04cca097ae | ||
|
|
48897b5fa1 | ||
|
|
ecae342434 | ||
|
|
f2383151cb | ||
|
|
b4565af256 | ||
|
|
c85e775162 | ||
|
|
3491df6edb | ||
|
|
0e6ec57996 | ||
|
|
f37b158982 | ||
|
|
da54abaf2e | ||
|
|
092c761cec | ||
|
|
5edafd6284 | ||
|
|
d64f105b44 | ||
|
|
2d5eda5141 | ||
|
|
be15d5f2d9 | ||
|
|
5918a5a322 | ||
|
|
f8af296e6f | ||
|
|
432e18990b | ||
|
|
2e9403b047 | ||
|
|
3ea6a2c7c3 | ||
|
|
20bf0e00e8 | ||
|
|
dd53c457d7 | ||
|
|
ac599d6528 | ||
|
|
ca4597e9d7 | ||
|
|
eeea9ac946 | ||
|
|
0a28628c02 | ||
|
|
bcc4754dc1 | ||
|
|
66d9a73362 | ||
|
|
5712e37437 | ||
|
|
b1ed75078e | ||
|
|
47d7486bbe | ||
|
|
d227af1edd | ||
|
|
4e18010731 | ||
|
|
db3242e4bb | ||
|
|
7169212683 | ||
|
|
2a6a1d439c | ||
|
|
37c20fa64b | ||
|
|
ab0d0a28a8 | ||
|
|
0de3f1ca9a | ||
|
|
95d66ebc6b | ||
|
|
393e873d25 | ||
|
|
956491f853 | ||
|
|
302660e362 | ||
|
|
5e6cd21f8b | ||
|
|
9e1cd37bbc | ||
|
|
8d4282cd36 | ||
|
|
968e08a9ba | ||
|
|
4b3a381f39 | ||
|
|
56473d4cce | ||
|
|
efa7ea592c | ||
|
|
afd325a884 | ||
|
|
a3f6054f97 | ||
|
|
da937bf214 | ||
|
|
42b63eb818 | ||
|
|
0d6db333d6 | ||
|
|
3999465c85 | ||
|
|
1cc4049e82 | ||
|
|
4107701062 | ||
|
|
a799cdad3e | ||
|
|
a118ad90ed | ||
|
|
0f23fb949d | ||
|
|
f1992eeea5 | ||
|
|
84d68007cb | ||
|
|
bf63cb9045 | ||
|
|
ce0041832c | ||
|
|
97d5f525f4 | ||
|
|
2ea29ce0ef | ||
|
|
068076f775 | ||
|
|
34c8b24211 | ||
|
|
e3cc625315 | ||
|
|
f67ea78cce | ||
|
|
6255112926 | ||
|
|
c906239220 | ||
|
|
b4682e6707 | ||
|
|
04050c4173 | ||
|
|
7e6ede6379 | ||
|
|
63e80384ea | ||
|
|
7ef9833dbb | ||
|
|
c1ee9bf881 | ||
|
|
c000ef194c | ||
|
|
479ac9afa7 | ||
|
|
716892b95d | ||
|
|
d7a6485dfe | ||
|
|
fd224ee590 | ||
|
|
3922691fb9 | ||
|
|
c566c8efc7 | ||
|
|
06b585ce8a | ||
|
|
e61af8bc62 | ||
|
|
b6825f98c0 | ||
|
|
86ada2fa5d | ||
|
|
b515a5a9ec | ||
|
|
6d5bdff394 | ||
|
|
0ca8844398 | ||
|
|
10ef4f7f39 | ||
|
|
cff3b37a61 | ||
|
|
d26a3b37a6 | ||
|
|
82dd963e08 | ||
|
|
830c458fe7 | ||
|
|
38f29f7d0c | ||
|
|
a8ae398bf5 | ||
|
|
7e59b83053 | ||
|
|
7a4408f608 | ||
|
|
854039b6ba | ||
|
|
070923b14f | ||
|
|
71b1657e8d | ||
|
|
1bafe9da26 | ||
|
|
1ce4ba6c9f | ||
|
|
a55a0d370d | ||
|
|
2b1b3c1270 | ||
|
|
8243f2510e | ||
|
|
0443cc351d | ||
|
|
ca902b6be4 | ||
|
|
844a8db6c6 | ||
|
|
3dd1e4d58c | ||
|
|
62c78696cd | ||
|
|
e16c93486d | ||
|
|
e42eb7fa8c | ||
|
|
cebfde9ea5 | ||
|
|
eff7a15bea | ||
|
|
82dadc2005 | ||
|
|
2d52d4d614 | ||
|
|
ca5ae266b7 | ||
|
|
464765b940 | ||
|
|
e9ffc1e499 | ||
|
|
4fb9a6eafb | ||
|
|
157547845a | ||
|
|
2935ca7ee2 | ||
|
|
23452f1573 | ||
|
|
f6f345b1fe | ||
|
|
e3fd61ad74 | ||
|
|
01ce63aacd | ||
|
|
3ca9c11110 | ||
|
|
b4df0b17af | ||
|
|
7f65bf508e | ||
|
|
a70dd65964 | ||
|
|
3cc0963ad1 | ||
|
|
31eb01ae8a | ||
|
|
64f346779f | ||
|
|
078a19d725 | ||
|
|
561ceac55d | ||
|
|
a373c770b6 | ||
|
|
90b8c5ce67 | ||
|
|
9bc71c101c | ||
|
|
f41d2ec4d9 | ||
|
|
1dae7a25b9 | ||
|
|
926c1d45aa | ||
|
|
80b8756da3 | ||
|
|
7d167590bc | ||
|
|
76bb920449 | ||
|
|
1ac36a3adf | ||
|
|
46bdbbabba | ||
|
|
766a2db0d9 | ||
|
|
fd0c501e6d | ||
|
|
468e4c4b56 | ||
|
|
9eda9154a7 | ||
|
|
2baea24879 | ||
|
|
9060b5c2f5 | ||
|
|
1040225e36 | ||
|
|
1c091657d4 | ||
|
|
8d73740343 | ||
|
|
a148301a03 | ||
|
|
3afdd82e42 | ||
|
|
bd38b47552 | ||
|
|
caaea2e08f | ||
|
|
de7ce7c10d | ||
|
|
5aa95b667c | ||
|
|
c903a6baf8 | ||
|
|
4205b6bb1d | ||
|
|
ca6409059d | ||
|
|
459a2867dd | ||
|
|
28d5b2c15a | ||
|
|
054451fd19 | ||
|
|
43f369ea0c | ||
|
|
6d2e3d2ec0 | ||
|
|
a4e6025cc1 | ||
|
|
56431d3130 | ||
|
|
5324614410 | ||
|
|
531b30119a | ||
|
|
fc788956c5 | ||
|
|
2ed1092dad | ||
|
|
cd002a4d16 | ||
|
|
49e656839f | ||
|
|
194fca8347 | ||
|
|
2c14d3949d | ||
|
|
2a53717e8f | ||
|
|
97247c5c73 | ||
|
|
b2084a9c59 | ||
|
|
9a39404127 | ||
|
|
fc864d2f0f | ||
|
|
dcab408f6a | ||
|
|
faafbf2118 | ||
|
|
881fdc59ed | ||
|
|
560a74af15 | ||
|
|
b6165daa77 | ||
|
|
ae0d555022 | ||
|
|
7ff2e6b797 | ||
|
|
92939569ab | ||
|
|
d97fff60a9 | ||
|
|
33ea1483d5 | ||
|
|
c7af917d13 | ||
|
|
c05e9f856d | ||
|
|
6cbc7757b2 | ||
|
|
75d2244023 | ||
|
|
7e92302c4f | ||
|
|
94f0d478de | ||
|
|
2eb4e2a0b8 | ||
|
|
08e5f12954 | ||
|
|
e33ba9b36d | ||
|
|
a5fe6f8af4 | ||
|
|
f339fc2eb9 | ||
|
|
8f829eb5e4 | ||
|
|
044bdc1b5f | ||
|
|
ea9095c562 | ||
|
|
c00d1a6ebe | ||
|
|
11550c6063 | ||
|
|
dc1fa0745f | ||
|
|
3bac27f240 | ||
|
|
286ce266b4 | ||
|
|
aa42c6f2a2 | ||
|
|
7181edf4b2 | ||
|
|
c7985808ae | ||
|
|
83db1f36e3 | ||
|
|
24ddfe3f25 | ||
|
|
b76d6120ac | ||
|
|
cd0de83917 | ||
|
|
e84306ca61 | ||
|
|
f65327555e | ||
|
|
7aa0d11171 | ||
|
|
54af053623 | ||
|
|
5b33b2463a | ||
|
|
2127f8d6ad | ||
|
|
2897cb0476 | ||
|
|
fe0c0c208c | ||
|
|
522f399d68 | ||
|
|
326faec664 | ||
|
|
28a30eda88 | ||
|
|
387eb5295a | ||
|
|
f3cc1d985e | ||
|
|
cfb8cbe521 | ||
|
|
582a9e0a67 | ||
|
|
a48799016a | ||
|
|
dce82bc856 | ||
|
|
90ffcda055 | ||
|
|
6ae3800151 | ||
|
|
54db18625a | ||
|
|
235ae9cd43 | ||
|
|
444f7020cb | ||
|
|
525080100d | ||
|
|
e5fa4a4956 | ||
|
|
4f9443927e | ||
|
|
8699805756 | ||
|
|
d9670f4275 | ||
|
|
3d8da80611 | ||
|
|
7d6ff7be12 | ||
|
|
fbcd8503b3 | ||
|
|
5a36efb61f | ||
|
|
14212930e4 | ||
|
|
c8c7094b2e | ||
|
|
cb0bc4adc2 | ||
|
|
5f69a53dba | ||
|
|
d3b9733507 | ||
|
|
df23a1e675 | ||
|
|
bb4b35a892 | ||
|
|
194f487749 | ||
|
|
9775f0bd14 | ||
|
|
a05bfb246f | ||
|
|
b438565609 | ||
|
|
ffd9e06deb | ||
|
|
88ef309a94 | ||
|
|
c5f15dcd3d | ||
|
|
d8e60b797f | ||
|
|
064101d82e | ||
|
|
a3293ed854 | ||
|
|
3d1bc2660c | ||
|
|
2cf92abf0e | ||
|
|
48fd8ae79c | ||
|
|
c167d603f2 | ||
|
|
bfb65b733a | ||
|
|
ae72c2f4d6 | ||
|
|
0146f65a44 | ||
|
|
deb9963e6e | ||
|
|
d7b01c049d | ||
|
|
92e4a51965 | ||
|
|
3c7bca7a21 | ||
|
|
1b5ab5afe0 | ||
|
|
4e576f047f | ||
|
|
8dc2ad2c06 | ||
|
|
58ce66e553 | ||
|
|
1f23b4caae | ||
|
|
1c946ef003 | ||
|
|
4dab2fccd3 | ||
|
|
a7d7a06655 | ||
|
|
9ebfcc9a15 | ||
|
|
70d2123efd | ||
|
|
2cd00a47a5 | ||
|
|
e3f0429859 | ||
|
|
d42c10aa09 | ||
|
|
ecb64be6a8 | ||
|
|
83bc5b7435 | ||
|
|
822056094a | ||
|
|
d17c0b8368 | ||
|
|
e0e385ac69 | ||
|
|
66f3a96983 | ||
|
|
31c98bdaaf | ||
|
|
59835135c5 | ||
|
|
13f1939a63 | ||
|
|
dbb7b60cfc | ||
|
|
e77263010c | ||
|
|
2cf29893b3 | ||
|
|
5f30453bfe | ||
|
|
cf35e8ed81 | ||
|
|
9e0427081e | ||
|
|
bc3675d471 | ||
|
|
b45143da9b | ||
|
|
ed56b6a905 | ||
|
|
0f135ad7f3 | ||
|
|
422edd513a | ||
|
|
18cb5c9314 | ||
|
|
4d80924f7c | ||
|
|
f008d1107c | ||
|
|
056698b676 | ||
|
|
b4198de6bf | ||
|
|
800b401f0b | ||
|
|
faae7220c0 | ||
|
|
1ccc7bdd90 | ||
|
|
52113cc443 | ||
|
|
949a649cc2 | ||
|
|
6fce89e60b | ||
|
|
5818813183 | ||
|
|
4489005cb2 | ||
|
|
a3ccec197e | ||
|
|
e6e13d6ade | ||
|
|
3f22842542 | ||
|
|
218812eb3c | ||
|
|
d37dea318e | ||
|
|
49505c599b | ||
|
|
f35f084059 | ||
|
|
3a9ef5f9bb | ||
|
|
0d2fb29537 | ||
|
|
13e687e579 | ||
|
|
b06784b0dd | ||
|
|
d756ae4cb3 | ||
|
|
c6bc90d02d | ||
|
|
b51303cddc | ||
|
|
c2a14bb196 | ||
|
|
67b20f2c8c | ||
|
|
bebbbf914b | ||
|
|
372d81c325 | ||
|
|
ae9d7a5167 | ||
|
|
0bb54813d4 | ||
|
|
1b007828c9 | ||
|
|
98b0fd173b | ||
|
|
2879ef4642 | ||
|
|
b3101beb47 | ||
|
|
580df1dfc6 | ||
|
|
0f312113d3 | ||
|
|
0b785487fe | ||
|
|
8291f8b85c | ||
|
|
6471fd3825 | ||
|
|
ea7fdecd41 | ||
|
|
2b55874584 | ||
|
|
66e9f155c3 | ||
|
|
6102552d61 | ||
|
|
d7673274d2 | ||
|
|
0143be42a1 | ||
|
|
537cce16f2 | ||
|
|
6301373c68 | ||
|
|
72360b2cdf | ||
|
|
45c5180516 | ||
|
|
f01990aad2 | ||
|
|
ffb38ce0ad | ||
|
|
3703a65405 | ||
|
|
55dd0afb5d | ||
|
|
1b0b962b43 | ||
|
|
08121c8f6b | ||
|
|
6145812444 | ||
|
|
f498dd2a2b | ||
|
|
f29e5dc8a1 | ||
|
|
db1e965b65 | ||
|
|
2ae8aaa106 | ||
|
|
e86fe7e5af | ||
|
|
0c5443571d | ||
|
|
6677c7964a | ||
|
|
8454a86e97 | ||
|
|
fef816163c | ||
|
|
f53ec3373d | ||
|
|
0ad34c6cd5 | ||
|
|
acf1d5bf0e | ||
|
|
d8bf5af79b | ||
|
|
7cd7dcda0b | ||
|
|
1b04ccf62c | ||
|
|
10f5b6486c | ||
|
|
822cf67dae | ||
|
|
f3bab52df4 | ||
|
|
10e19e4b97 | ||
|
|
95dd6d31a4 | ||
|
|
b5c1b17b50 | ||
|
|
2ebbd2d636 | ||
|
|
ce35f5d899 | ||
|
|
bb85ce9aff | ||
|
|
dc9d6c1c1f | ||
|
|
aa0d40747c | ||
|
|
3a339b2bb3 | ||
|
|
52ef89f9c2 | ||
|
|
04748a7766 | ||
|
|
97880a223e | ||
|
|
2f4de3867d | ||
|
|
398a6317a0 | ||
|
|
49b61af1f8 | ||
|
|
f27415540f | ||
|
|
c80448c4d1 | ||
|
|
828d1aa507 | ||
|
|
e7077320ff | ||
|
|
9bb3dc9843 | ||
|
|
c75942c79d | ||
|
|
2e69e1727b | ||
|
|
539c876727 | ||
|
|
e6324ff400 | ||
|
|
17ad00a35e | ||
|
|
6c7dc1de86 | ||
|
|
20a57f15b9 | ||
|
|
54871e3683 | ||
|
|
2b620efffd | ||
|
|
fc1d1d871b | ||
|
|
341739d916 | ||
|
|
085ee6fcc4 | ||
|
|
d586662ce5 | ||
|
|
48fcde8193 | ||
|
|
ccc4fae30a | ||
|
|
01cecfe2f3 | ||
|
|
3491957e35 | ||
|
|
be9dd7b85f | ||
|
|
edc7c092d9 | ||
|
|
90b4e097cf | ||
|
|
4551fbd700 | ||
|
|
37b80325d0 | ||
|
|
4fb89027e6 | ||
|
|
6390d25010 | ||
|
|
097531d853 | ||
|
|
02d255457a | ||
|
|
38bc134db3 | ||
|
|
9023db5c5d | ||
|
|
824ad4274a | ||
|
|
182842e3c3 | ||
|
|
a91b710961 | ||
|
|
e82ff22fae | ||
|
|
1990c49a62 | ||
|
|
761731215f | ||
|
|
8b31d30601 | ||
|
|
87038311fc | ||
|
|
f074e983bd | ||
|
|
823bc74935 | ||
|
|
a47d8799b1 | ||
|
|
908e4797a6 | ||
|
|
9416574569 | ||
|
|
9cfcfbae52 | ||
|
|
dfbea4ad9f | ||
|
|
1cce8572e2 | ||
|
|
b99446831f | ||
|
|
f7beba3acc | ||
|
|
7cc082347f | ||
|
|
a98eafaf58 | ||
|
|
6f3e868a7b | ||
|
|
d7adeb8a45 | ||
|
|
052a15ace9 | ||
|
|
ae80c37054 | ||
|
|
5bec9275c0 | ||
|
|
8946b75a0b | ||
|
|
67cdfc0c83 | ||
|
|
7557af19d8 | ||
|
|
d270501de2 | ||
|
|
dbc899130c | ||
|
|
17c1704f4a | ||
|
|
53a8229ce7 | ||
|
|
82313b1de9 | ||
|
|
1c5e9f8a88 | ||
|
|
fae63aa42e | ||
|
|
26bfeb1d67 | ||
|
|
9897e78e32 | ||
|
|
1941c79195 | ||
|
|
15ae314cbb | ||
|
|
310fec4819 | ||
|
|
28fd289b44 | ||
|
|
483c942520 | ||
|
|
eeaea4e873 | ||
|
|
24816a8b80 | ||
|
|
0c6380cc32 | ||
|
|
2a303dab85 | ||
|
|
ede0793d94 | ||
|
|
152ebeea43 | ||
|
|
ff67da9c86 | ||
|
|
add73641e6 | ||
|
|
9af5d9c527 | ||
|
|
3115c5a225 | ||
|
|
7e8b413bcf | ||
|
|
b2b59ddb10 | ||
|
|
c423a790d6 | ||
|
|
074a566164 | ||
|
|
93dc2c331e | ||
|
|
0ecf5e245d | ||
|
|
0862183c86 | ||
|
|
0bcc5d3bee | ||
|
|
b5831eda1e | ||
|
|
7c7619ecf8 | ||
|
|
0a5d86d7be | ||
|
|
4576e11121 | ||
|
|
73321f27f4 | ||
|
|
842cb8909e | ||
|
|
fc29f01528 | ||
|
|
24c785bc06 | ||
|
|
1d42cbaa21 | ||
|
|
bf605fcfc7 | ||
|
|
954ecac388 | ||
|
|
4a1e0d321e | ||
|
|
bc3fa506e9 | ||
|
|
075e1ebb0e | ||
|
|
60ddcaa15d | ||
|
|
cacc7e564a | ||
|
|
2ac4e662f1 | ||
|
|
57cfe72e8c | ||
|
|
755604a2bd | ||
|
|
891c5202ea | ||
|
|
ab96da8eb2 | ||
|
|
279db68b46 | ||
|
|
b56b2da5c5 | ||
|
|
a0880edc63 | ||
|
|
4d30a32c68 | ||
|
|
a5b765a769 | ||
|
|
ac0e27699c | ||
|
|
32cbd72ebe | ||
|
|
4079411375 | ||
|
|
eef8b0d406 | ||
|
|
af9f559f2e | ||
|
|
e36752e033 | ||
|
|
59a6316f5e | ||
|
|
efd9becb78 | ||
|
|
97fdfab446 | ||
|
|
10c0e99037 | ||
|
|
0b6c79b303 | ||
|
|
2f89315bf8 | ||
|
|
b6af9d3d2e | ||
|
|
f796b9c76e | ||
|
|
627f7fdbfd | ||
|
|
5a2a5ccdaf | ||
|
|
f37399d22b | ||
|
|
6f9b574f25 | ||
|
|
04cd20fa62 | ||
|
|
75418ec849 | ||
|
|
c6963da54e | ||
|
|
4f0bda2dd5 | ||
|
|
a4bcf7e1ac | ||
|
|
36b968bb09 | ||
|
|
131c6ab3e6 | ||
|
|
e5104a4cb4 | ||
|
|
22f5e35579 | ||
|
|
30cb4b351f | ||
|
|
a48eb4dff8 | ||
|
|
75c0dc9526 | ||
|
|
c7bbe7ca79 | ||
|
|
79512b2a80 | ||
|
|
1e357c6969 | ||
|
|
cf19be44a8 | ||
|
|
6ce475dbdf | ||
|
|
1aa7f1392d | ||
|
|
b295239de2 | ||
|
|
79e9105806 | ||
|
|
c0d5d5969b | ||
|
|
92cd75607b | ||
|
|
a11b31399b | ||
|
|
38e2d00199 |
4
.mailmap
4
.mailmap
@@ -2,7 +2,7 @@
|
||||
<charles.hooper@dotcloud.com> <chooper@plumata.com>
|
||||
<daniel.mizyrycki@dotcloud.com> <daniel@dotcloud.com>
|
||||
<daniel.mizyrycki@dotcloud.com> <mzdaniel@glidelink.net>
|
||||
Guillaume J. Charmes <guillaume.charmes@dotcloud.com> creack <charmes.guillaume@gmail.com>
|
||||
Guillaume J. Charmes <guillaume.charmes@dotcloud.com> <charmes.guillaume@gmail.com>
|
||||
<guillaume.charmes@dotcloud.com> <guillaume@dotcloud.com>
|
||||
<kencochrane@gmail.com> <KenCochrane@gmail.com>
|
||||
<sridharr@activestate.com> <github@srid.name>
|
||||
@@ -16,4 +16,6 @@ Tim Terhorst <mynamewastaken+git@gmail.com>
|
||||
Andy Smith <github@anarkystic.com>
|
||||
<kalessin@kalessin.fr> <louis@dotcloud.com>
|
||||
<victor.vieux@dotcloud.com> <victor@dotcloud.com>
|
||||
<victor.vieux@dotcloud.com> <dev@vvieux.com>
|
||||
<dominik@honnef.co> <dominikh@fork-bomb.org>
|
||||
Thatcher Peskens <thatcher@dotcloud.com>
|
||||
|
||||
22
AUTHORS
22
AUTHORS
@@ -1,24 +1,40 @@
|
||||
# This file lists all individuals having contributed content to the repository.
|
||||
# If you're submitting a patch, please add your name here in alphabetical order as part of the patch.
|
||||
#
|
||||
# For a list of active project maintainers, see the MAINTAINERS file.
|
||||
#
|
||||
Al Tobey <al@ooyala.com>
|
||||
Alexey Shamrin <shamrin@gmail.com>
|
||||
Andrea Luzzardi <aluzzardi@gmail.com>
|
||||
Andy Rothfusz <github@metaliveblog.com>
|
||||
Andy Smith <github@anarkystic.com>
|
||||
Antony Messerli <amesserl@rackspace.com>
|
||||
Barry Allard <barry.allard@gmail.com>
|
||||
Brandon Liu <bdon@bdon.org>
|
||||
Brian McCallister <brianm@skife.org>
|
||||
Bruno Bigras <bigras.bruno@gmail.com>
|
||||
Caleb Spare <cespare@gmail.com>
|
||||
Charles Hooper <charles.hooper@dotcloud.com>
|
||||
Daniel Gasienica <daniel@gasienica.ch>
|
||||
Daniel Mizyrycki <daniel.mizyrycki@dotcloud.com>
|
||||
Daniel Robinson <gottagetmac@gmail.com>
|
||||
Daniel Von Fange <daniel@leancoder.com>
|
||||
Dominik Honnef <dominik@honnef.co>
|
||||
Don Spaulding <donspauldingii@gmail.com>
|
||||
Dr Nic Williams <drnicwilliams@gmail.com>
|
||||
Evan Wies <evan@neomantra.net>
|
||||
ezbercih <cem.ezberci@gmail.com>
|
||||
Flavio Castelli <fcastelli@suse.com>
|
||||
Francisco Souza <f@souza.cc>
|
||||
Frederick F. Kautz IV <fkautz@alumni.cmu.edu>
|
||||
Guillaume J. Charmes <guillaume.charmes@dotcloud.com>
|
||||
Harley Laue <losinggeneration@gmail.com>
|
||||
Hunter Blanks <hunter@twilio.com>
|
||||
Jeff Lindsay <progrium@gmail.com>
|
||||
Jeremy Grosser <jeremy@synack.me>
|
||||
Joffrey F <joffrey@dotcloud.com>
|
||||
John Costa <john.costa@gmail.com>
|
||||
Jonas Pfenniger <jonas@pfenniger.name>
|
||||
Jonathan Rudenberg <jonathan@titanous.com>
|
||||
Julien Barbier <write0@gmail.com>
|
||||
Jérôme Petazzoni <jerome.petazzoni@dotcloud.com>
|
||||
@@ -26,9 +42,13 @@ Ken Cochrane <kencochrane@gmail.com>
|
||||
Kevin J. Lynagh <kevin@keminglabs.com>
|
||||
Louis Opter <kalessin@kalessin.fr>
|
||||
Maxim Treskin <zerthurd@gmail.com>
|
||||
Michael Crosby <crosby.michael@gmail.com>
|
||||
Mikhail Sobolev <mss@mawhrin.net>
|
||||
Nate Jones <nate@endot.org>
|
||||
Nelson Chen <crazysim@gmail.com>
|
||||
Niall O'Higgins <niallo@unworkable.org>
|
||||
odk- <github@odkurzacz.org>
|
||||
Paul Bowsher <pbowsher@globalpersonals.co.uk>
|
||||
Paul Hammond <paul@paulhammond.org>
|
||||
Piotr Bogdan <ppbogdan@gmail.com>
|
||||
Robert Obryk <robryk@gmail.com>
|
||||
@@ -38,6 +58,8 @@ Silas Sewell <silas@sewell.org>
|
||||
Solomon Hykes <solomon@dotcloud.com>
|
||||
Sridhar Ratnakumar <sridharr@activestate.com>
|
||||
Thatcher Peskens <thatcher@dotcloud.com>
|
||||
Thomas Bikeev <thomas.bikeev@mac.com>
|
||||
Tianon Gravi <admwiggin@gmail.com>
|
||||
Tim Terhorst <mynamewastaken+git@gmail.com>
|
||||
Troy Howard <thoward37@gmail.com>
|
||||
unclejack <unclejacksons@gmail.com>
|
||||
|
||||
35
CHANGELOG.md
35
CHANGELOG.md
@@ -1,5 +1,40 @@
|
||||
# Changelog
|
||||
|
||||
## 0.4.1 (2013-06-17)
|
||||
+ Remote Api: Add flag to enable cross domain requests
|
||||
+ Remote Api/Client: Add images and containers sizes in docker ps and docker images
|
||||
+ Runtime: Configure dns configuration host-wide with 'docker -d -dns'
|
||||
+ Runtime: Detect faulty DNS configuration and replace it with a public default
|
||||
+ Runtime: allow docker run <name>:<id>
|
||||
+ Runtime: you can now specify public port (ex: -p 80:4500)
|
||||
* Client: allow multiple params in inspect
|
||||
* Client: Print the container id before the hijack in `docker run`
|
||||
* Registry: add regexp check on repo's name
|
||||
* Registry: Move auth to the client
|
||||
* Runtime: improved image removal to garbage-collect unreferenced parents
|
||||
* Vagrantfile: Add the rest api port to vagrantfile's port_forward
|
||||
* Upgrade to Go 1.1
|
||||
- Builder: don't ignore last line in Dockerfile when it doesn't end with \n
|
||||
- Registry: Remove login check on pull
|
||||
|
||||
## 0.4.0 (2013-06-03)
|
||||
+ Introducing Builder: 'docker build' builds a container, layer by layer, from a source repository containing a Dockerfile
|
||||
+ Introducing Remote API: control Docker programmatically using a simple HTTP/json API
|
||||
* Runtime: various reliability and usability improvements
|
||||
|
||||
## 0.3.4 (2013-05-30)
|
||||
+ Builder: 'docker build' builds a container, layer by layer, from a source repository containing a Dockerfile
|
||||
+ Builder: 'docker build -t FOO' applies the tag FOO to the newly built container.
|
||||
+ Runtime: interactive TTYs correctly handle window resize
|
||||
* Runtime: fix how configuration is merged between layers
|
||||
+ Remote API: split stdout and stderr on 'docker run'
|
||||
+ Remote API: optionally listen on a different IP and port (use at your own risk)
|
||||
* Documentation: improved install instructions.
|
||||
|
||||
## 0.3.3 (2013-05-23)
|
||||
- Registry: Fix push regression
|
||||
- Various bugfixes
|
||||
|
||||
## 0.3.2 (2013-05-09)
|
||||
* Runtime: Store the actual archive on commit
|
||||
* Registry: Improve the checksum process
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
# Contributing to Docker
|
||||
|
||||
Want to hack on Docker? Awesome! There are instructions to get you
|
||||
started on the website: http://docker.io/gettingstarted.html
|
||||
|
||||
They are probably not perfect, please let us know if anything feels
|
||||
Want to hack on Docker? Awesome! Here are instructions to get you started. They are probably not perfect, please let us know if anything feels
|
||||
wrong or incomplete.
|
||||
|
||||
## Contribution guidelines
|
||||
@@ -91,3 +88,73 @@ Add your name to the AUTHORS file, but make sure the list is sorted and your
|
||||
name and email address match your git configuration. The AUTHORS file is
|
||||
regenerated occasionally from the git commit history, so a mismatch may result
|
||||
in your changes being overwritten.
|
||||
|
||||
|
||||
## Decision process
|
||||
|
||||
### How are decisions made?
|
||||
|
||||
Short answer: with pull requests to the docker repository.
|
||||
|
||||
Docker is an open-source project with an open design philosophy. This means that the repository is the source of truth for EVERY aspect of the project,
|
||||
including its philosophy, design, roadmap and APIs. *If it's part of the project, it's in the repo. It's in the repo, it's part of the project.*
|
||||
|
||||
As a result, all decisions can be expressed as changes to the repository. An implementation change is a change to the source code. An API change is a change to
|
||||
the API specification. A philosophy change is a change to the philosophy manifesto. And so on.
|
||||
|
||||
All decisions affecting docker, big and small, follow the same 3 steps:
|
||||
|
||||
* Step 1: Open a pull request. Anyone can do this.
|
||||
|
||||
* Step 2: Discuss the pull request. Anyone can do this.
|
||||
|
||||
* Step 3: Accept or refuse a pull request. The relevant maintainer does this (see below "Who decides what?")
|
||||
|
||||
|
||||
### Who decides what?
|
||||
|
||||
So all decisions are pull requests, and the relevant maintainer makes the decision by accepting or refusing the pull request.
|
||||
But how do we identify the relevant maintainer for a given pull request?
|
||||
|
||||
Docker follows the timeless, highly efficient and totally unfair system known as [Benevolent dictator for life](http://en.wikipedia.org/wiki/Benevolent_Dictator_for_Life),
|
||||
with yours truly, Solomon Hykes, in the role of BDFL.
|
||||
This means that all decisions are made by default by me. Since making every decision myself would be highly unscalable, in practice decisions are spread across multiple maintainers.
|
||||
|
||||
The relevant maintainer for a pull request is assigned in 3 steps:
|
||||
|
||||
* Step 1: Determine the subdirectory affected by the pull request. This might be src/registry, docs/source/api, or any other part of the repo.
|
||||
|
||||
* Step 2: Find the MAINTAINERS file which affects this directory. If the directory itself does not have a MAINTAINERS file, work your way up the the repo hierarchy until you find one.
|
||||
|
||||
* Step 3: The first maintainer listed is the primary maintainer. The pull request is assigned to him. He may assign it to other listed maintainers, at his discretion.
|
||||
|
||||
|
||||
### I'm a maintainer, should I make pull requests too?
|
||||
|
||||
Primary maintainers are not required to create pull requests when changing their own subdirectory, but secondary maintainers are.
|
||||
|
||||
### Who assigns maintainers?
|
||||
|
||||
Solomon.
|
||||
|
||||
### How can I become a maintainer?
|
||||
|
||||
* Step 1: learn the component inside out
|
||||
* Step 2: make yourself useful by contributing code, bugfixes, support etc.
|
||||
* Step 3: volunteer on the irc channel (#docker@freenode)
|
||||
|
||||
Don't forget: being a maintainer is a time investment. Make sure you will have time to make yourself available.
|
||||
You don't have to be a maintainer to make a difference on the project!
|
||||
|
||||
### What are a maintainer's responsibility?
|
||||
|
||||
It is every maintainer's responsibility to:
|
||||
|
||||
* 1) Expose a clear roadmap for improving their component.
|
||||
* 2) Deliver prompt feedback and decisions on pull requests.
|
||||
* 3) Be available to anyone with questions, bug reports, criticism etc. on their component. This includes irc, github requests and the mailing list.
|
||||
* 4) Make sure their component respects the philosophy, design and roadmap of the project.
|
||||
|
||||
### How is this process changed?
|
||||
|
||||
Just like everything else: by making a pull request :)
|
||||
|
||||
36
FIXME
Normal file
36
FIXME
Normal file
@@ -0,0 +1,36 @@
|
||||
|
||||
## FIXME
|
||||
|
||||
This file is a loose collection of things to improve in the codebase, for the internal
|
||||
use of the maintainers.
|
||||
|
||||
They are not big enough to be in the roadmap, not user-facing enough to be github issues,
|
||||
and not important enough to be discussed in the mailing list.
|
||||
|
||||
They are just like FIXME comments in the source code, except we're not sure where in the source
|
||||
to put them - so we put them here :)
|
||||
|
||||
|
||||
* Merge Runtime, Server and Builder into Runtime
|
||||
* Run linter on codebase
|
||||
* Unify build commands and regular commands
|
||||
* Move source code into src/ subdir for clarity
|
||||
* Clean up the Makefile, it's a mess
|
||||
* docker buidl: show short IDs
|
||||
* docker build: on non-existent local path for ADD, don't show full absolute path on the host
|
||||
* mount into /dockerinit rather than /sbin/init
|
||||
* docker tag foo REPO:TAG
|
||||
* use size header for progress bar in pull
|
||||
* Clean up context upload in build!!!
|
||||
* Parallel pull
|
||||
* Ensure /proc/sys/net/ipv4/ip_forward is 1
|
||||
* Force DNS to public!
|
||||
* Always generate a resolv.conf per container, to avoid changing resolv.conf under thne container's feet
|
||||
* Save metadata with import/export
|
||||
* Upgrade dockerd without stopping containers
|
||||
* bring back git revision info, looks like it was lost
|
||||
* Simple command to remove all untagged images
|
||||
* Simple command to clean up containers for disk space
|
||||
* Caching after an ADD
|
||||
* entry point config
|
||||
* bring back git revision info, looks like it was lost
|
||||
5
MAINTAINERS
Normal file
5
MAINTAINERS
Normal file
@@ -0,0 +1,5 @@
|
||||
Solomon Hykes <solomon@dotcloud.com>
|
||||
Guillaume Charmes <guillaume@dotcloud.com>
|
||||
Victor Vieux <victor@dotcloud.com>
|
||||
api.go: Victor Vieux <victor@dotcloud.com>
|
||||
Vagrantfile: Daniel Mizyrycki <daniel@dotcloud.com>
|
||||
2
Makefile
2
Makefile
@@ -46,6 +46,7 @@ whichrelease:
|
||||
|
||||
release: $(BINRELEASE)
|
||||
s3cmd -P put $(BINRELEASE) s3://get.docker.io/builds/`uname -s`/`uname -m`/docker-$(RELEASE_VERSION).tgz
|
||||
s3cmd -P put docker-latest.tgz s3://get.docker.io/builds/`uname -s`/`uname -m`/docker-latest.tgz
|
||||
|
||||
srcrelease: $(SRCRELEASE)
|
||||
deps: $(DOCKER_DIR)
|
||||
@@ -60,6 +61,7 @@ $(SRCRELEASE):
|
||||
$(BINRELEASE): $(SRCRELEASE)
|
||||
rm -f $(BINRELEASE)
|
||||
cd $(SRCRELEASE); make; cp -R bin docker-$(RELEASE_VERSION); tar -f ../$(BINRELEASE) -zv -c docker-$(RELEASE_VERSION)
|
||||
cd $(SRCRELEASE); cp -R bin docker-latest; tar -f ../docker-latest.tgz -zv -c docker-latest
|
||||
|
||||
clean:
|
||||
@rm -rf $(dir $(DOCKER_BIN))
|
||||
|
||||
7
NOTICE
7
NOTICE
@@ -3,4 +3,9 @@ Copyright 2012-2013 dotCloud, inc.
|
||||
|
||||
This product includes software developed at dotCloud, inc. (http://www.dotcloud.com).
|
||||
|
||||
This product contains software (https://github.com/kr/pty) developed by Keith Rarick, licensed under the MIT License.
|
||||
This product contains software (https://github.com/kr/pty) developed by Keith Rarick, licensed under the MIT License.
|
||||
|
||||
Transfers of Docker shall be in accordance with applicable export controls of any country and all other applicable
|
||||
legal requirements. Docker shall not be distributed or downloaded to or in Cuba, Iran, North Korea, Sudan or Syria
|
||||
and shall not be distributed or downloaded to any person on the Denied Persons List administered by the U.S.
|
||||
Department of Commerce.
|
||||
|
||||
36
README.md
36
README.md
@@ -12,7 +12,7 @@ Docker is an open-source implementation of the deployment engine which powers [d
|
||||
It benefits directly from the experience accumulated over several years of large-scale operation and support of hundreds of thousands
|
||||
of applications and databases.
|
||||
|
||||

|
||||

|
||||
|
||||
## Better than VMs
|
||||
|
||||
@@ -35,7 +35,7 @@ for containerization, including Linux with [openvz](http://openvz.org), [vserver
|
||||
|
||||
Docker builds on top of these low-level primitives to offer developers a portable format and runtime environment that solves
|
||||
all 4 problems. Docker containers are small (and their transfer can be optimized with layers), they have basically zero memory and cpu overhead,
|
||||
the are completely portable and are designed from the ground up with an application-centric design.
|
||||
they are completely portable and are designed from the ground up with an application-centric design.
|
||||
|
||||
The best part: because docker operates at the OS level, it can still be run inside a VM!
|
||||
|
||||
@@ -46,7 +46,7 @@ Docker does not require that you buy into a particular programming language, fra
|
||||
Is your application a unix process? Does it use files, tcp connections, environment variables, standard unix streams and command-line
|
||||
arguments as inputs and outputs? Then docker can run it.
|
||||
|
||||
Can your application's build be expressed a sequence of such commands? Then docker can build it.
|
||||
Can your application's build be expressed as a sequence of such commands? Then docker can build it.
|
||||
|
||||
|
||||
## Escape dependency hell
|
||||
@@ -70,21 +70,21 @@ Docker solves dependency hell by giving the developer a simple way to express *a
|
||||
and streamline the process of assembling them. If this makes you think of [XKCD 927](http://xkcd.com/927/), don't worry. Docker doesn't
|
||||
*replace* your favorite packaging systems. It simply orchestrates their use in a simple and repeatable way. How does it do that? With layers.
|
||||
|
||||
Docker defines a build as running a sequence unix commands, one after the other, in the same container. Build commands modify the contents of the container
|
||||
Docker defines a build as running a sequence of unix commands, one after the other, in the same container. Build commands modify the contents of the container
|
||||
(usually by installing new files on the filesystem), the next command modifies it some more, etc. Since each build command inherits the result of the previous
|
||||
commands, the *order* in which the commands are executed expresses *dependencies*.
|
||||
|
||||
Here's a typical docker build process:
|
||||
|
||||
```bash
|
||||
from ubuntu:12.10
|
||||
run apt-get update
|
||||
run apt-get install python
|
||||
run apt-get install python-pip
|
||||
run pip install django
|
||||
run apt-get install curl
|
||||
run curl http://github.com/shykes/helloflask/helloflask/master.tar.gz | tar -zxv
|
||||
run cd master && pip install -r requirements.txt
|
||||
from ubuntu:12.10
|
||||
run apt-get update
|
||||
run DEBIAN_FRONTEND=noninteractive apt-get install -q -y python
|
||||
run DEBIAN_FRONTEND=noninteractive apt-get install -q -y python-pip
|
||||
run pip install django
|
||||
run DEBIAN_FRONTEND=noninteractive apt-get install -q -y curl
|
||||
run curl -L https://github.com/shykes/helloflask/archive/master.tar.gz | tar -xzv
|
||||
run cd helloflask-master && pip install -r requirements.txt
|
||||
```
|
||||
|
||||
Note that Docker doesn't care *how* dependencies are built - as long as they can be built by running a unix command in a container.
|
||||
@@ -251,7 +251,7 @@ Note
|
||||
----
|
||||
|
||||
We also keep the documentation in this repository. The website documentation is generated using sphinx using these sources.
|
||||
Please find it under docs/sources/ and read more about it https://github.com/dotcloud/docker/master/docs/README.md
|
||||
Please find it under docs/sources/ and read more about it https://github.com/dotcloud/docker/tree/master/docs/README.md
|
||||
|
||||
Please feel free to fix / update the documentation and send us pull requests. More tutorials are also welcome.
|
||||
|
||||
@@ -293,7 +293,7 @@ a format that is self-describing and portable, so that any compliant runtime can
|
||||
|
||||
The spec for Standard Containers is currently a work in progress, but it is very straightforward. It mostly defines 1) an image format, 2) a set of standard operations, and 3) an execution environment.
|
||||
|
||||
A great analogy for this is the shipping container. Just like Standard Containers are a fundamental unit of software delivery, shipping containers (http://bricks.argz.com/ins/7823-1/12) are a fundamental unit of physical delivery.
|
||||
A great analogy for this is the shipping container. Just like how Standard Containers are a fundamental unit of software delivery, shipping containers (http://bricks.argz.com/ins/7823-1/12) are a fundamental unit of physical delivery.
|
||||
|
||||
### 1. STANDARD OPERATIONS
|
||||
|
||||
@@ -321,7 +321,7 @@ Similarly, before Standard Containers, by the time a software component ran in p
|
||||
|
||||
### 5. INDUSTRIAL-GRADE DELIVERY
|
||||
|
||||
There are 17 million shipping containers in existence, packed with every physical good imaginable. Every single one of them can be loaded on the same boats, by the same cranes, in the same facilities, and sent anywhere in the World with incredible efficiency. It is embarrassing to think that a 30 ton shipment of coffee can safely travel half-way across the World in *less time* than it takes a software team to deliver its code from one datacenter to another sitting 10 miles away.
|
||||
There are 17 million shipping containers in existence, packed with every physical good imaginable. Every single one of them can be loaded onto the same boats, by the same cranes, in the same facilities, and sent anywhere in the World with incredible efficiency. It is embarrassing to think that a 30 ton shipment of coffee can safely travel half-way across the World in *less time* than it takes a software team to deliver its code from one datacenter to another sitting 10 miles away.
|
||||
|
||||
With Standard Containers we can put an end to that embarrassment, by making INDUSTRIAL-GRADE DELIVERY of software a reality.
|
||||
|
||||
@@ -371,4 +371,10 @@ Standard Container Specification
|
||||
|
||||
#### Security
|
||||
|
||||
### Legal
|
||||
|
||||
Transfers of Docker shall be in accordance with applicable export controls of any country and all other applicable
|
||||
legal requirements. Docker shall not be distributed or downloaded to or in Cuba, Iran, North Korea, Sudan or Syria
|
||||
and shall not be distributed or downloaded to any person on the Denied Persons List administered by the U.S.
|
||||
Department of Commerce.
|
||||
|
||||
|
||||
@@ -1,71 +0,0 @@
|
||||
|
||||
## Spec for data volumes
|
||||
|
||||
Spec owner: Solomon Hykes <solomon@dotcloud.com>
|
||||
|
||||
Data volumes (issue #111) are a much-requested feature which trigger much discussion and debate. Below is the current authoritative spec for implementing data volumes.
|
||||
This spec will be deprecated once the feature is fully implemented.
|
||||
|
||||
Discussion, requests, trolls, demands, offerings, threats and other forms of supplications concerning this spec should be addressed to Solomon here: https://github.com/dotcloud/docker/issues/111
|
||||
|
||||
|
||||
### 1. Creating data volumes
|
||||
|
||||
At container creation, parts of a container's filesystem can be mounted as separate data volumes. Volumes are defined with the -v flag.
|
||||
|
||||
For example:
|
||||
|
||||
```bash
|
||||
$ docker run -v /var/lib/postgres -v /var/log postgres /usr/bin/postgres
|
||||
```
|
||||
|
||||
In this example, a new container is created from the 'postgres' image. At the same time, docker creates 2 new data volumes: one will be mapped to the container at /var/lib/postgres, the other at /var/log.
|
||||
|
||||
2 important notes:
|
||||
|
||||
1) Volumes don't have top-level names. At no point does the user provide a name, or is a name given to him. Volumes are identified by the path at which they are mounted inside their container.
|
||||
|
||||
2) The user doesn't choose the source of the volume. Docker only mounts volumes it created itself, in the same way that it only runs containers that it created itself. That is by design.
|
||||
|
||||
|
||||
### 2. Sharing data volumes
|
||||
|
||||
Instead of creating its own volumes, a container can share another container's volumes. For example:
|
||||
|
||||
```bash
|
||||
$ docker run --volumes-from $OTHER_CONTAINER_ID postgres /usr/local/bin/postgres-backup
|
||||
```
|
||||
|
||||
In this example, a new container is created from the 'postgres' example. At the same time, docker will *re-use* the 2 data volumes created in the previous example. One volume will be mounted on the /var/lib/postgres of *both* containers, and the other will be mounted on the /var/log of both containers.
|
||||
|
||||
### 3. Under the hood
|
||||
|
||||
Docker stores volumes in /var/lib/docker/volumes. Each volume receives a globally unique ID at creation, and is stored at /var/lib/docker/volumes/ID.
|
||||
|
||||
At creation, volumes are attached to a single container - the source of truth for this mapping will be the container's configuration.
|
||||
|
||||
Mounting a volume consists of calling "mount --bind" from the volume's directory to the appropriate sub-directory of the container mountpoint. This may be done by Docker itself, or farmed out to lxc (which supports mount-binding) if possible.
|
||||
|
||||
|
||||
### 4. Backups, transfers and other volume operations
|
||||
|
||||
Volumes sometimes need to be backed up, transfered between hosts, synchronized, etc. These operations typically are application-specific or site-specific, eg. rsync vs. S3 upload vs. replication vs...
|
||||
|
||||
Rather than attempting to implement all these scenarios directly, Docker will allow for custom implementations using an extension mechanism.
|
||||
|
||||
### 5. Custom volume handlers
|
||||
|
||||
Docker allows for arbitrary code to be executed against a container's volumes, to implement any custom action: backup, transfer, synchronization across hosts, etc.
|
||||
|
||||
Here's an example:
|
||||
|
||||
```bash
|
||||
$ DB=$(docker run -d -v /var/lib/postgres -v /var/log postgres /usr/bin/postgres)
|
||||
|
||||
$ BACKUP_JOB=$(docker run -d --volumes-from $DB shykes/backuper /usr/local/bin/backup-postgres --s3creds=$S3CREDS)
|
||||
|
||||
$ docker wait $BACKUP_JOB
|
||||
```
|
||||
|
||||
Congratulations, you just implemented a custom volume handler, using Docker's built-in ability to 1) execute arbitrary code and 2) share volumes between containers.
|
||||
|
||||
65
Vagrantfile
vendored
65
Vagrantfile
vendored
@@ -3,41 +3,62 @@
|
||||
|
||||
BOX_NAME = ENV['BOX_NAME'] || "ubuntu"
|
||||
BOX_URI = ENV['BOX_URI'] || "http://files.vagrantup.com/precise64.box"
|
||||
PPA_KEY = "E61D797F63561DC6"
|
||||
AWS_REGION = ENV['AWS_REGION'] || "us-east-1"
|
||||
AWS_AMI = ENV['AWS_AMI'] || "ami-d0f89fb9"
|
||||
FORWARD_DOCKER_PORTS = ENV['FORWARD_DOCKER_PORTS']
|
||||
|
||||
Vagrant::Config.run do |config|
|
||||
# Setup virtual machine box. This VM configuration code is always executed.
|
||||
config.vm.box = BOX_NAME
|
||||
config.vm.box_url = BOX_URI
|
||||
# Add docker PPA key to the local repository and install docker
|
||||
pkg_cmd = "apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys #{PPA_KEY}; "
|
||||
pkg_cmd << "echo 'deb http://ppa.launchpad.net/dotcloud/lxc-docker/ubuntu precise main' >/etc/apt/sources.list.d/lxc-docker.list; "
|
||||
pkg_cmd << "apt-get update -qq; apt-get install -q -y lxc-docker"
|
||||
if ARGV.include?("--provider=aws".downcase)
|
||||
# Add AUFS dependency to amazon's VM
|
||||
pkg_cmd << "; apt-get install linux-image-extra-3.2.0-40-virtual"
|
||||
config.vm.forward_port 4243, 4243
|
||||
|
||||
# Provision docker and new kernel if deployment was not done
|
||||
if Dir.glob("#{File.dirname(__FILE__)}/.vagrant/machines/default/*/id").empty?
|
||||
# Add lxc-docker package
|
||||
pkg_cmd = "apt-get update -qq; apt-get install -q -y python-software-properties; " \
|
||||
"add-apt-repository -y ppa:dotcloud/lxc-docker; apt-get update -qq; " \
|
||||
"apt-get install -q -y lxc-docker; "
|
||||
# Add X.org Ubuntu backported 3.8 kernel
|
||||
pkg_cmd << "add-apt-repository -y ppa:ubuntu-x-swat/r-lts-backport; " \
|
||||
"apt-get update -qq; apt-get install -q -y linux-image-3.8.0-19-generic; "
|
||||
# Add guest additions if local vbox VM
|
||||
is_vbox = true
|
||||
ARGV.each do |arg| is_vbox &&= !arg.downcase.start_with?("--provider") end
|
||||
if is_vbox
|
||||
pkg_cmd << "apt-get install -q -y linux-headers-3.8.0-19-generic dkms; " \
|
||||
"echo 'Downloading VBox Guest Additions...'; " \
|
||||
"wget -q http://dlc.sun.com.edgesuite.net/virtualbox/4.2.12/VBoxGuestAdditions_4.2.12.iso; "
|
||||
# Prepare the VM to add guest additions after reboot
|
||||
pkg_cmd << "echo -e 'mount -o loop,ro /home/vagrant/VBoxGuestAdditions_4.2.12.iso /mnt\n" \
|
||||
"echo yes | /mnt/VBoxLinuxAdditions.run\numount /mnt\n" \
|
||||
"rm /root/guest_additions.sh; ' > /root/guest_additions.sh; " \
|
||||
"chmod 700 /root/guest_additions.sh; " \
|
||||
"sed -i -E 's#^exit 0#[ -x /root/guest_additions.sh ] \\&\\& /root/guest_additions.sh#' /etc/rc.local; " \
|
||||
"echo 'Installation of VBox Guest Additions is proceeding in the background.'; " \
|
||||
"echo '\"vagrant reload\" can be used in about 2 minutes to activate the new guest additions.'; "
|
||||
end
|
||||
# Activate new kernel
|
||||
pkg_cmd << "shutdown -r +1; "
|
||||
config.vm.provision :shell, :inline => pkg_cmd
|
||||
end
|
||||
config.vm.provision :shell, :inline => pkg_cmd
|
||||
end
|
||||
|
||||
|
||||
# Providers were added on Vagrant >= 1.1.0
|
||||
Vagrant::VERSION >= "1.1.0" and Vagrant.configure("2") do |config|
|
||||
config.vm.provider :aws do |aws, override|
|
||||
config.vm.box = "dummy"
|
||||
config.vm.box_url = "https://github.com/mitchellh/vagrant-aws/raw/master/dummy.box"
|
||||
aws.access_key_id = ENV["AWS_ACCESS_KEY_ID"]
|
||||
aws.secret_access_key = ENV["AWS_SECRET_ACCESS_KEY"]
|
||||
aws.keypair_name = ENV["AWS_KEYPAIR_NAME"]
|
||||
override.ssh.private_key_path = ENV["AWS_SSH_PRIVKEY"]
|
||||
override.ssh.username = "ubuntu"
|
||||
aws.region = "us-east-1"
|
||||
aws.ami = "ami-d0f89fb9"
|
||||
aws.region = AWS_REGION
|
||||
aws.ami = AWS_AMI
|
||||
aws.instance_type = "t1.micro"
|
||||
end
|
||||
|
||||
config.vm.provider :rackspace do |rs|
|
||||
config.vm.box = "dummy"
|
||||
config.vm.box_url = "https://github.com/mitchellh/vagrant-rackspace/raw/master/dummy.box"
|
||||
config.ssh.private_key_path = ENV["RS_PRIVATE_KEY"]
|
||||
rs.username = ENV["RS_USERNAME"]
|
||||
rs.api_key = ENV["RS_API_KEY"]
|
||||
@@ -51,3 +72,17 @@ Vagrant::VERSION >= "1.1.0" and Vagrant.configure("2") do |config|
|
||||
config.vm.box_url = BOX_URI
|
||||
end
|
||||
end
|
||||
|
||||
if !FORWARD_DOCKER_PORTS.nil?
|
||||
Vagrant::VERSION < "1.1.0" and Vagrant::Config.run do |config|
|
||||
(49000..49900).each do |port|
|
||||
config.vm.forward_port port, port
|
||||
end
|
||||
end
|
||||
|
||||
Vagrant::VERSION >= "1.1.0" and Vagrant.configure("2") do |config|
|
||||
(49000..49900).each do |port|
|
||||
config.vm.network :forwarded_port, :host => port, :guest => port
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
859
api.go
Normal file
859
api.go
Normal file
@@ -0,0 +1,859 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/auth"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"github.com/gorilla/mux"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const APIVERSION = 1.2
|
||||
|
||||
func hijackServer(w http.ResponseWriter) (io.ReadCloser, io.Writer, error) {
|
||||
conn, _, err := w.(http.Hijacker).Hijack()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
// Flush the options to make sure the client sets the raw mode
|
||||
conn.Write([]byte{})
|
||||
return conn, conn, nil
|
||||
}
|
||||
|
||||
//If we don't do this, POST method without Content-type (even with empty body) will fail
|
||||
func parseForm(r *http.Request) error {
|
||||
if err := r.ParseForm(); err != nil && !strings.HasPrefix(err.Error(), "mime:") {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseMultipartForm(r *http.Request) error {
|
||||
if err := r.ParseMultipartForm(4096); err != nil && !strings.HasPrefix(err.Error(), "mime:") {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func httpError(w http.ResponseWriter, err error) {
|
||||
if strings.HasPrefix(err.Error(), "No such") {
|
||||
http.Error(w, err.Error(), http.StatusNotFound)
|
||||
} else if strings.HasPrefix(err.Error(), "Bad parameter") {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
} else if strings.HasPrefix(err.Error(), "Conflict") {
|
||||
http.Error(w, err.Error(), http.StatusConflict)
|
||||
} else if strings.HasPrefix(err.Error(), "Impossible") {
|
||||
http.Error(w, err.Error(), http.StatusNotAcceptable)
|
||||
} else {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
}
|
||||
}
|
||||
|
||||
func writeJSON(w http.ResponseWriter, b []byte) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Write(b)
|
||||
}
|
||||
|
||||
// FIXME: Use stvconv.ParseBool() instead?
|
||||
func getBoolParam(value string) (bool, error) {
|
||||
if value == "1" || strings.ToLower(value) == "true" {
|
||||
return true, nil
|
||||
}
|
||||
if value == "" || value == "0" || strings.ToLower(value) == "false" {
|
||||
return false, nil
|
||||
}
|
||||
return false, fmt.Errorf("Bad parameter")
|
||||
}
|
||||
|
||||
func getAuth(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
if version > 1.1 {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
return nil
|
||||
}
|
||||
authConfig, err := auth.LoadConfig(srv.runtime.root)
|
||||
if err != nil {
|
||||
if err != auth.ErrConfigFileMissing {
|
||||
return err
|
||||
}
|
||||
authConfig = &auth.AuthConfig{}
|
||||
}
|
||||
b, err := json.Marshal(&auth.AuthConfig{Username: authConfig.Username, Email: authConfig.Email})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
writeJSON(w, b)
|
||||
return nil
|
||||
}
|
||||
|
||||
func postAuth(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
authConfig := &auth.AuthConfig{}
|
||||
err := json.NewDecoder(r.Body).Decode(authConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
status := ""
|
||||
if version > 1.1 {
|
||||
status, err = auth.Login(authConfig, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
localAuthConfig, err := auth.LoadConfig(srv.runtime.root)
|
||||
if err != nil {
|
||||
if err != auth.ErrConfigFileMissing {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if authConfig.Username == localAuthConfig.Username {
|
||||
authConfig.Password = localAuthConfig.Password
|
||||
}
|
||||
|
||||
newAuthConfig := auth.NewAuthConfig(authConfig.Username, authConfig.Password, authConfig.Email, srv.runtime.root)
|
||||
status, err = auth.Login(newAuthConfig, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if status != "" {
|
||||
b, err := json.Marshal(&APIAuth{Status: status})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
writeJSON(w, b)
|
||||
return nil
|
||||
}
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
return nil
|
||||
}
|
||||
|
||||
func getVersion(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
m := srv.DockerVersion()
|
||||
b, err := json.Marshal(m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
writeJSON(w, b)
|
||||
return nil
|
||||
}
|
||||
|
||||
func postContainersKill(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
if vars == nil {
|
||||
return fmt.Errorf("Missing parameter")
|
||||
}
|
||||
name := vars["name"]
|
||||
if err := srv.ContainerKill(name); err != nil {
|
||||
return err
|
||||
}
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
return nil
|
||||
}
|
||||
|
||||
func getContainersExport(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
if vars == nil {
|
||||
return fmt.Errorf("Missing parameter")
|
||||
}
|
||||
name := vars["name"]
|
||||
|
||||
if err := srv.ContainerExport(name, w); err != nil {
|
||||
utils.Debugf("%s", err.Error())
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getImagesJSON(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
if err := parseForm(r); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
all, err := getBoolParam(r.Form.Get("all"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
filter := r.Form.Get("filter")
|
||||
|
||||
outs, err := srv.Images(all, filter)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b, err := json.Marshal(outs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
writeJSON(w, b)
|
||||
return nil
|
||||
}
|
||||
|
||||
func getImagesViz(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
if err := srv.ImagesViz(w); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getInfo(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
out := srv.DockerInfo()
|
||||
b, err := json.Marshal(out)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
writeJSON(w, b)
|
||||
return nil
|
||||
}
|
||||
|
||||
func getImagesHistory(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
if vars == nil {
|
||||
return fmt.Errorf("Missing parameter")
|
||||
}
|
||||
name := vars["name"]
|
||||
outs, err := srv.ImageHistory(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b, err := json.Marshal(outs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
writeJSON(w, b)
|
||||
return nil
|
||||
}
|
||||
|
||||
func getContainersChanges(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
if vars == nil {
|
||||
return fmt.Errorf("Missing parameter")
|
||||
}
|
||||
name := vars["name"]
|
||||
changesStr, err := srv.ContainerChanges(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b, err := json.Marshal(changesStr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
writeJSON(w, b)
|
||||
return nil
|
||||
}
|
||||
|
||||
func getContainersJSON(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
if err := parseForm(r); err != nil {
|
||||
return err
|
||||
}
|
||||
all, err := getBoolParam(r.Form.Get("all"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
since := r.Form.Get("since")
|
||||
before := r.Form.Get("before")
|
||||
n, err := strconv.Atoi(r.Form.Get("limit"))
|
||||
if err != nil {
|
||||
n = -1
|
||||
}
|
||||
|
||||
outs := srv.Containers(all, n, since, before)
|
||||
b, err := json.Marshal(outs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
writeJSON(w, b)
|
||||
return nil
|
||||
}
|
||||
|
||||
func postImagesTag(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
if err := parseForm(r); err != nil {
|
||||
return err
|
||||
}
|
||||
repo := r.Form.Get("repo")
|
||||
tag := r.Form.Get("tag")
|
||||
if vars == nil {
|
||||
return fmt.Errorf("Missing parameter")
|
||||
}
|
||||
name := vars["name"]
|
||||
force, err := getBoolParam(r.Form.Get("force"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := srv.ContainerTag(name, repo, tag, force); err != nil {
|
||||
return err
|
||||
}
|
||||
w.WriteHeader(http.StatusCreated)
|
||||
return nil
|
||||
}
|
||||
|
||||
func postCommit(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
if err := parseForm(r); err != nil {
|
||||
return err
|
||||
}
|
||||
config := &Config{}
|
||||
if err := json.NewDecoder(r.Body).Decode(config); err != nil {
|
||||
utils.Debugf("%s", err.Error())
|
||||
}
|
||||
repo := r.Form.Get("repo")
|
||||
tag := r.Form.Get("tag")
|
||||
container := r.Form.Get("container")
|
||||
author := r.Form.Get("author")
|
||||
comment := r.Form.Get("comment")
|
||||
id, err := srv.ContainerCommit(container, repo, tag, author, comment, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b, err := json.Marshal(&APIID{id})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.WriteHeader(http.StatusCreated)
|
||||
writeJSON(w, b)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Creates an image from Pull or from Import
|
||||
func postImagesCreate(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
if err := parseForm(r); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
src := r.Form.Get("fromSrc")
|
||||
image := r.Form.Get("fromImage")
|
||||
tag := r.Form.Get("tag")
|
||||
repo := r.Form.Get("repo")
|
||||
|
||||
if version > 1.0 {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
}
|
||||
sf := utils.NewStreamFormatter(version > 1.0)
|
||||
if image != "" { //pull
|
||||
registry := r.Form.Get("registry")
|
||||
if err := srv.ImagePull(image, tag, registry, w, sf, &auth.AuthConfig{}); err != nil {
|
||||
if sf.Used() {
|
||||
w.Write(sf.FormatError(err))
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
} else { //import
|
||||
if err := srv.ImageImport(src, repo, tag, r.Body, w, sf); err != nil {
|
||||
if sf.Used() {
|
||||
w.Write(sf.FormatError(err))
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getImagesSearch(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
if err := parseForm(r); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
term := r.Form.Get("term")
|
||||
outs, err := srv.ImagesSearch(term)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b, err := json.Marshal(outs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
writeJSON(w, b)
|
||||
return nil
|
||||
}
|
||||
|
||||
func postImagesInsert(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
if err := parseForm(r); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
url := r.Form.Get("url")
|
||||
path := r.Form.Get("path")
|
||||
if vars == nil {
|
||||
return fmt.Errorf("Missing parameter")
|
||||
}
|
||||
name := vars["name"]
|
||||
if version > 1.0 {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
}
|
||||
sf := utils.NewStreamFormatter(version > 1.0)
|
||||
imgID, err := srv.ImageInsert(name, url, path, w, sf)
|
||||
if err != nil {
|
||||
if sf.Used() {
|
||||
w.Write(sf.FormatError(err))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
b, err := json.Marshal(&APIID{ID: imgID})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
writeJSON(w, b)
|
||||
return nil
|
||||
}
|
||||
|
||||
func postImagesPush(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
authConfig := &auth.AuthConfig{}
|
||||
if version > 1.1 {
|
||||
if err := json.NewDecoder(r.Body).Decode(authConfig); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
localAuthConfig, err := auth.LoadConfig(srv.runtime.root)
|
||||
if err != nil && err != auth.ErrConfigFileMissing {
|
||||
return err
|
||||
}
|
||||
authConfig = localAuthConfig
|
||||
}
|
||||
if err := parseForm(r); err != nil {
|
||||
return err
|
||||
}
|
||||
registry := r.Form.Get("registry")
|
||||
|
||||
if vars == nil {
|
||||
return fmt.Errorf("Missing parameter")
|
||||
}
|
||||
name := vars["name"]
|
||||
if version > 1.0 {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
}
|
||||
sf := utils.NewStreamFormatter(version > 1.0)
|
||||
if err := srv.ImagePush(name, registry, w, sf, authConfig); err != nil {
|
||||
if sf.Used() {
|
||||
w.Write(sf.FormatError(err))
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func postContainersCreate(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
config := &Config{}
|
||||
out := &APIRun{}
|
||||
|
||||
if err := json.NewDecoder(r.Body).Decode(config); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(config.Dns) == 0 && len(srv.runtime.Dns) == 0 && utils.CheckLocalDns() {
|
||||
out.Warnings = append(out.Warnings, fmt.Sprintf("Docker detected local DNS server on resolv.conf. Using default external servers: %v", defaultDns))
|
||||
config.Dns = defaultDns
|
||||
}
|
||||
|
||||
id, err := srv.ContainerCreate(config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
out.ID = id
|
||||
|
||||
if config.Memory > 0 && !srv.runtime.capabilities.MemoryLimit {
|
||||
log.Println("WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.")
|
||||
out.Warnings = append(out.Warnings, "Your kernel does not support memory limit capabilities. Limitation discarded.")
|
||||
}
|
||||
if config.Memory > 0 && !srv.runtime.capabilities.SwapLimit {
|
||||
log.Println("WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.")
|
||||
out.Warnings = append(out.Warnings, "Your kernel does not support memory swap capabilities. Limitation discarded.")
|
||||
}
|
||||
|
||||
b, err := json.Marshal(out)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.WriteHeader(http.StatusCreated)
|
||||
writeJSON(w, b)
|
||||
return nil
|
||||
}
|
||||
|
||||
func postContainersRestart(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
if err := parseForm(r); err != nil {
|
||||
return err
|
||||
}
|
||||
t, err := strconv.Atoi(r.Form.Get("t"))
|
||||
if err != nil || t < 0 {
|
||||
t = 10
|
||||
}
|
||||
if vars == nil {
|
||||
return fmt.Errorf("Missing parameter")
|
||||
}
|
||||
name := vars["name"]
|
||||
if err := srv.ContainerRestart(name, t); err != nil {
|
||||
return err
|
||||
}
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
return nil
|
||||
}
|
||||
|
||||
func deleteContainers(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
if err := parseForm(r); err != nil {
|
||||
return err
|
||||
}
|
||||
if vars == nil {
|
||||
return fmt.Errorf("Missing parameter")
|
||||
}
|
||||
name := vars["name"]
|
||||
removeVolume, err := getBoolParam(r.Form.Get("v"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := srv.ContainerDestroy(name, removeVolume); err != nil {
|
||||
return err
|
||||
}
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
return nil
|
||||
}
|
||||
|
||||
func deleteImages(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
if err := parseForm(r); err != nil {
|
||||
return err
|
||||
}
|
||||
if vars == nil {
|
||||
return fmt.Errorf("Missing parameter")
|
||||
}
|
||||
name := vars["name"]
|
||||
imgs, err := srv.ImageDelete(name, version > 1.1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if imgs != nil {
|
||||
if len(*imgs) != 0 {
|
||||
b, err := json.Marshal(imgs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
writeJSON(w, b)
|
||||
} else {
|
||||
return fmt.Errorf("Conflict, %s wasn't deleted", name)
|
||||
}
|
||||
} else {
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func postContainersStart(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
if vars == nil {
|
||||
return fmt.Errorf("Missing parameter")
|
||||
}
|
||||
name := vars["name"]
|
||||
if err := srv.ContainerStart(name); err != nil {
|
||||
return err
|
||||
}
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
return nil
|
||||
}
|
||||
|
||||
func postContainersStop(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
if err := parseForm(r); err != nil {
|
||||
return err
|
||||
}
|
||||
t, err := strconv.Atoi(r.Form.Get("t"))
|
||||
if err != nil || t < 0 {
|
||||
t = 10
|
||||
}
|
||||
|
||||
if vars == nil {
|
||||
return fmt.Errorf("Missing parameter")
|
||||
}
|
||||
name := vars["name"]
|
||||
|
||||
if err := srv.ContainerStop(name, t); err != nil {
|
||||
return err
|
||||
}
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
return nil
|
||||
}
|
||||
|
||||
func postContainersWait(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
if vars == nil {
|
||||
return fmt.Errorf("Missing parameter")
|
||||
}
|
||||
name := vars["name"]
|
||||
status, err := srv.ContainerWait(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b, err := json.Marshal(&APIWait{StatusCode: status})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
writeJSON(w, b)
|
||||
return nil
|
||||
}
|
||||
|
||||
func postContainersResize(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
if err := parseForm(r); err != nil {
|
||||
return err
|
||||
}
|
||||
height, err := strconv.Atoi(r.Form.Get("h"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
width, err := strconv.Atoi(r.Form.Get("w"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if vars == nil {
|
||||
return fmt.Errorf("Missing parameter")
|
||||
}
|
||||
name := vars["name"]
|
||||
if err := srv.ContainerResize(name, height, width); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func postContainersAttach(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
if err := parseForm(r); err != nil {
|
||||
return err
|
||||
}
|
||||
logs, err := getBoolParam(r.Form.Get("logs"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stream, err := getBoolParam(r.Form.Get("stream"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stdin, err := getBoolParam(r.Form.Get("stdin"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stdout, err := getBoolParam(r.Form.Get("stdout"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stderr, err := getBoolParam(r.Form.Get("stderr"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if vars == nil {
|
||||
return fmt.Errorf("Missing parameter")
|
||||
}
|
||||
name := vars["name"]
|
||||
|
||||
if _, err := srv.ContainerInspect(name); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
in, out, err := hijackServer(w)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer in.Close()
|
||||
|
||||
fmt.Fprintf(out, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n")
|
||||
if err := srv.ContainerAttach(name, logs, stream, stdin, stdout, stderr, in, out); err != nil {
|
||||
fmt.Fprintf(out, "Error: %s\n", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getContainersByName(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
if vars == nil {
|
||||
return fmt.Errorf("Missing parameter")
|
||||
}
|
||||
name := vars["name"]
|
||||
|
||||
container, err := srv.ContainerInspect(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b, err := json.Marshal(container)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
writeJSON(w, b)
|
||||
return nil
|
||||
}
|
||||
|
||||
func getImagesByName(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
if vars == nil {
|
||||
return fmt.Errorf("Missing parameter")
|
||||
}
|
||||
name := vars["name"]
|
||||
|
||||
image, err := srv.ImageInspect(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b, err := json.Marshal(image)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
writeJSON(w, b)
|
||||
return nil
|
||||
}
|
||||
|
||||
func postImagesGetCache(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
apiConfig := &APIImageConfig{}
|
||||
if err := json.NewDecoder(r.Body).Decode(apiConfig); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
image, err := srv.ImageGetCached(apiConfig.ID, apiConfig.Config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if image == nil {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
return nil
|
||||
}
|
||||
apiID := &APIID{ID: image.ID}
|
||||
b, err := json.Marshal(apiID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
writeJSON(w, b)
|
||||
return nil
|
||||
}
|
||||
|
||||
func postBuild(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
if err := r.ParseMultipartForm(4096); err != nil {
|
||||
return err
|
||||
}
|
||||
remote := r.FormValue("t")
|
||||
tag := ""
|
||||
if strings.Contains(remote, ":") {
|
||||
remoteParts := strings.Split(remote, ":")
|
||||
tag = remoteParts[1]
|
||||
remote = remoteParts[0]
|
||||
}
|
||||
|
||||
dockerfile, _, err := r.FormFile("Dockerfile")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
context, _, err := r.FormFile("Context")
|
||||
if err != nil {
|
||||
if err != http.ErrMissingFile {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
b := NewBuildFile(srv, utils.NewWriteFlusher(w))
|
||||
if id, err := b.Build(dockerfile, context); err != nil {
|
||||
fmt.Fprintf(w, "Error build: %s\n", err)
|
||||
} else if remote != "" {
|
||||
srv.runtime.repositories.Set(remote, tag, id, false)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func optionsHandler(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
return nil
|
||||
}
|
||||
func writeCorsHeaders(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Add("Access-Control-Allow-Origin", "*")
|
||||
w.Header().Add("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept")
|
||||
w.Header().Add("Access-Control-Allow-Methods", "GET, POST, DELETE, PUT, OPTIONS")
|
||||
}
|
||||
|
||||
func createRouter(srv *Server, logging bool) (*mux.Router, error) {
|
||||
r := mux.NewRouter()
|
||||
|
||||
m := map[string]map[string]func(*Server, float64, http.ResponseWriter, *http.Request, map[string]string) error{
|
||||
"GET": {
|
||||
"/auth": getAuth,
|
||||
"/version": getVersion,
|
||||
"/info": getInfo,
|
||||
"/images/json": getImagesJSON,
|
||||
"/images/viz": getImagesViz,
|
||||
"/images/search": getImagesSearch,
|
||||
"/images/{name:.*}/history": getImagesHistory,
|
||||
"/images/{name:.*}/json": getImagesByName,
|
||||
"/containers/ps": getContainersJSON,
|
||||
"/containers/json": getContainersJSON,
|
||||
"/containers/{name:.*}/export": getContainersExport,
|
||||
"/containers/{name:.*}/changes": getContainersChanges,
|
||||
"/containers/{name:.*}/json": getContainersByName,
|
||||
},
|
||||
"POST": {
|
||||
"/auth": postAuth,
|
||||
"/commit": postCommit,
|
||||
"/build": postBuild,
|
||||
"/images/create": postImagesCreate,
|
||||
"/images/{name:.*}/insert": postImagesInsert,
|
||||
"/images/{name:.*}/push": postImagesPush,
|
||||
"/images/{name:.*}/tag": postImagesTag,
|
||||
"/images/getCache": postImagesGetCache,
|
||||
"/containers/create": postContainersCreate,
|
||||
"/containers/{name:.*}/kill": postContainersKill,
|
||||
"/containers/{name:.*}/restart": postContainersRestart,
|
||||
"/containers/{name:.*}/start": postContainersStart,
|
||||
"/containers/{name:.*}/stop": postContainersStop,
|
||||
"/containers/{name:.*}/wait": postContainersWait,
|
||||
"/containers/{name:.*}/resize": postContainersResize,
|
||||
"/containers/{name:.*}/attach": postContainersAttach,
|
||||
},
|
||||
"DELETE": {
|
||||
"/containers/{name:.*}": deleteContainers,
|
||||
"/images/{name:.*}": deleteImages,
|
||||
},
|
||||
"OPTIONS": {
|
||||
"": optionsHandler,
|
||||
},
|
||||
}
|
||||
|
||||
for method, routes := range m {
|
||||
for route, fct := range routes {
|
||||
utils.Debugf("Registering %s, %s", method, route)
|
||||
// NOTE: scope issue, make sure the variables are local and won't be changed
|
||||
localRoute := route
|
||||
localMethod := method
|
||||
localFct := fct
|
||||
f := func(w http.ResponseWriter, r *http.Request) {
|
||||
utils.Debugf("Calling %s %s", localMethod, localRoute)
|
||||
if logging {
|
||||
log.Println(r.Method, r.RequestURI)
|
||||
}
|
||||
if strings.Contains(r.Header.Get("User-Agent"), "Docker-Client/") {
|
||||
userAgent := strings.Split(r.Header.Get("User-Agent"), "/")
|
||||
if len(userAgent) == 2 && userAgent[1] != VERSION {
|
||||
utils.Debugf("Warning: client and server don't have the same version (client: %s, server: %s)", userAgent[1], VERSION)
|
||||
}
|
||||
}
|
||||
version, err := strconv.ParseFloat(mux.Vars(r)["version"], 64)
|
||||
if err != nil {
|
||||
version = APIVERSION
|
||||
}
|
||||
if srv.enableCors {
|
||||
writeCorsHeaders(w, r)
|
||||
}
|
||||
if version == 0 || version > APIVERSION {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
if err := localFct(srv, version, w, r, mux.Vars(r)); err != nil {
|
||||
httpError(w, err)
|
||||
}
|
||||
}
|
||||
|
||||
if localRoute == "" {
|
||||
r.Methods(localMethod).HandlerFunc(f)
|
||||
} else {
|
||||
r.Path("/v{version:[0-9.]+}" + localRoute).Methods(localMethod).HandlerFunc(f)
|
||||
r.Path(localRoute).Methods(localMethod).HandlerFunc(f)
|
||||
}
|
||||
}
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func ListenAndServe(addr string, srv *Server, logging bool) error {
|
||||
log.Printf("Listening for HTTP on %s\n", addr)
|
||||
|
||||
r, err := createRouter(srv, logging)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return http.ListenAndServe(addr, r)
|
||||
}
|
||||
79
api_params.go
Normal file
79
api_params.go
Normal file
@@ -0,0 +1,79 @@
|
||||
package docker
|
||||
|
||||
type APIHistory struct {
|
||||
ID string `json:"Id"`
|
||||
Created int64
|
||||
CreatedBy string `json:",omitempty"`
|
||||
}
|
||||
|
||||
type APIImages struct {
|
||||
Repository string `json:",omitempty"`
|
||||
Tag string `json:",omitempty"`
|
||||
ID string `json:"Id"`
|
||||
Created int64
|
||||
Size int64
|
||||
VirtualSize int64
|
||||
}
|
||||
|
||||
type APIInfo struct {
|
||||
Debug bool
|
||||
Containers int
|
||||
Images int
|
||||
NFd int `json:",omitempty"`
|
||||
NGoroutines int `json:",omitempty"`
|
||||
MemoryLimit bool `json:",omitempty"`
|
||||
SwapLimit bool `json:",omitempty"`
|
||||
}
|
||||
|
||||
type APIRmi struct {
|
||||
Deleted string `json:",omitempty"`
|
||||
Untagged string `json:",omitempty"`
|
||||
}
|
||||
|
||||
type APIContainers struct {
|
||||
ID string `json:"Id"`
|
||||
Image string
|
||||
Command string
|
||||
Created int64
|
||||
Status string
|
||||
Ports string
|
||||
SizeRw int64
|
||||
SizeRootFs int64
|
||||
}
|
||||
|
||||
type APISearch struct {
|
||||
Name string
|
||||
Description string
|
||||
}
|
||||
|
||||
type APIID struct {
|
||||
ID string `json:"Id"`
|
||||
}
|
||||
|
||||
type APIRun struct {
|
||||
ID string `json:"Id"`
|
||||
Warnings []string `json:",omitempty"`
|
||||
}
|
||||
|
||||
type APIPort struct {
|
||||
Port string
|
||||
}
|
||||
|
||||
type APIVersion struct {
|
||||
Version string
|
||||
GitCommit string `json:",omitempty"`
|
||||
GoVersion string `json:",omitempty"`
|
||||
}
|
||||
|
||||
type APIWait struct {
|
||||
StatusCode int
|
||||
}
|
||||
|
||||
type APIAuth struct {
|
||||
Status string
|
||||
}
|
||||
|
||||
type APIImageConfig struct {
|
||||
ID string `json:"Id"`
|
||||
*Config
|
||||
}
|
||||
1353
api_test.go
Normal file
1353
api_test.go
Normal file
File diff suppressed because it is too large
Load Diff
45
archive.go
45
archive.go
@@ -2,6 +2,7 @@ package docker
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
@@ -31,21 +32,63 @@ func (compression *Compression) Flag() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (compression *Compression) Extension() string {
|
||||
switch *compression {
|
||||
case Uncompressed:
|
||||
return "tar"
|
||||
case Bzip2:
|
||||
return "tar.bz2"
|
||||
case Gzip:
|
||||
return "tar.gz"
|
||||
case Xz:
|
||||
return "tar.xz"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func Tar(path string, compression Compression) (io.Reader, error) {
|
||||
cmd := exec.Command("bsdtar", "-f", "-", "-C", path, "-c"+compression.Flag(), ".")
|
||||
return CmdStream(cmd)
|
||||
}
|
||||
|
||||
// FIXME: specify behavior when target path exists vs. doesn't exist.
|
||||
func Untar(archive io.Reader, path string) error {
|
||||
cmd := exec.Command("bsdtar", "-f", "-", "-C", path, "-x")
|
||||
cmd.Stdin = archive
|
||||
// Hardcode locale environment for predictable outcome regardless of host configuration.
|
||||
// (see https://github.com/dotcloud/docker/issues/355)
|
||||
cmd.Env = []string{"LANG=en_US.utf-8", "LC_ALL=en_US.utf-8"}
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return errors.New(err.Error() + ": " + string(output))
|
||||
return fmt.Errorf("%s: %s", err, output)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UntarPath is a convenience function which looks for an archive
|
||||
// at filesystem path `src`, and unpacks it at `dst`.
|
||||
func UntarPath(src, dst string) error {
|
||||
if archive, err := os.Open(src); err != nil {
|
||||
return err
|
||||
} else if err := Untar(archive, dst); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CopyWithTar creates a tar archive of filesystem path `src`, and
|
||||
// unpacks it at filesystem path `dst`.
|
||||
// The archive is streamed directly with fixed buffering and no
|
||||
// intermediary disk IO.
|
||||
//
|
||||
func CopyWithTar(src, dst string) error {
|
||||
archive, err := Tar(src, Uncompressed)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return Untar(archive, dst)
|
||||
}
|
||||
|
||||
// CmdStream executes a command, and returns its stdout as a stream.
|
||||
// If the command fails to run or doesn't complete successfully, an error
|
||||
// will be returned, including anything written on stderr.
|
||||
|
||||
1
auth/MAINTAINERS
Symbolic link
1
auth/MAINTAINERS
Symbolic link
@@ -0,0 +1 @@
|
||||
../registry/MAINTAINERS
|
||||
53
auth/auth.go
53
auth/auth.go
@@ -3,6 +3,7 @@ package auth
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
@@ -15,13 +16,19 @@ import (
|
||||
const CONFIGFILE = ".dockercfg"
|
||||
|
||||
// the registry server we want to login against
|
||||
const INDEX_SERVER = "https://index.docker.io"
|
||||
const INDEXSERVER = "https://index.docker.io/v1"
|
||||
|
||||
//const INDEXSERVER = "http://indexstaging-docker.dotcloud.com/"
|
||||
|
||||
var (
|
||||
ErrConfigFileMissing = errors.New("The Auth config file is missing")
|
||||
)
|
||||
|
||||
type AuthConfig struct {
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
Email string `json:"email"`
|
||||
rootPath string `json:-`
|
||||
rootPath string
|
||||
}
|
||||
|
||||
func NewAuthConfig(username, password, email, rootPath string) *AuthConfig {
|
||||
@@ -33,8 +40,15 @@ func NewAuthConfig(username, password, email, rootPath string) *AuthConfig {
|
||||
}
|
||||
}
|
||||
|
||||
func IndexServerAddress() string {
|
||||
if os.Getenv("DOCKER_INDEX_URL") != "" {
|
||||
return os.Getenv("DOCKER_INDEX_URL") + "/v1"
|
||||
}
|
||||
return INDEXSERVER
|
||||
}
|
||||
|
||||
// create a base64 encoded auth string to store in config
|
||||
func EncodeAuth(authConfig *AuthConfig) string {
|
||||
func encodeAuth(authConfig *AuthConfig) string {
|
||||
authStr := authConfig.Username + ":" + authConfig.Password
|
||||
msg := []byte(authStr)
|
||||
encoded := make([]byte, base64.StdEncoding.EncodedLen(len(msg)))
|
||||
@@ -43,7 +57,7 @@ func EncodeAuth(authConfig *AuthConfig) string {
|
||||
}
|
||||
|
||||
// decode the auth string
|
||||
func DecodeAuth(authStr string) (*AuthConfig, error) {
|
||||
func decodeAuth(authStr string) (*AuthConfig, error) {
|
||||
decLen := base64.StdEncoding.DecodedLen(len(authStr))
|
||||
decoded := make([]byte, decLen)
|
||||
authByte := []byte(authStr)
|
||||
@@ -68,7 +82,7 @@ func DecodeAuth(authStr string) (*AuthConfig, error) {
|
||||
func LoadConfig(rootPath string) (*AuthConfig, error) {
|
||||
confFile := path.Join(rootPath, CONFIGFILE)
|
||||
if _, err := os.Stat(confFile); err != nil {
|
||||
return &AuthConfig{}, fmt.Errorf("The Auth config file is missing")
|
||||
return &AuthConfig{rootPath:rootPath}, ErrConfigFileMissing
|
||||
}
|
||||
b, err := ioutil.ReadFile(confFile)
|
||||
if err != nil {
|
||||
@@ -80,7 +94,7 @@ func LoadConfig(rootPath string) (*AuthConfig, error) {
|
||||
}
|
||||
origAuth := strings.Split(arr[0], " = ")
|
||||
origEmail := strings.Split(arr[1], " = ")
|
||||
authConfig, err := DecodeAuth(origAuth[1])
|
||||
authConfig, err := decodeAuth(origAuth[1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -90,13 +104,13 @@ func LoadConfig(rootPath string) (*AuthConfig, error) {
|
||||
}
|
||||
|
||||
// save the auth config
|
||||
func saveConfig(rootPath, authStr string, email string) error {
|
||||
confFile := path.Join(rootPath, CONFIGFILE)
|
||||
if len(email) == 0 {
|
||||
func SaveConfig(authConfig *AuthConfig) error {
|
||||
confFile := path.Join(authConfig.rootPath, CONFIGFILE)
|
||||
if len(authConfig.Email) == 0 {
|
||||
os.Remove(confFile)
|
||||
return nil
|
||||
}
|
||||
lines := "auth = " + authStr + "\n" + "email = " + email + "\n"
|
||||
lines := "auth = " + encodeAuth(authConfig) + "\n" + "email = " + authConfig.Email + "\n"
|
||||
b := []byte(lines)
|
||||
err := ioutil.WriteFile(confFile, b, 0600)
|
||||
if err != nil {
|
||||
@@ -106,7 +120,7 @@ func saveConfig(rootPath, authStr string, email string) error {
|
||||
}
|
||||
|
||||
// try to register/login to the registry server
|
||||
func Login(authConfig *AuthConfig) (string, error) {
|
||||
func Login(authConfig *AuthConfig, store bool) (string, error) {
|
||||
storeConfig := false
|
||||
client := &http.Client{}
|
||||
reqStatusCode := 0
|
||||
@@ -119,7 +133,7 @@ func Login(authConfig *AuthConfig) (string, error) {
|
||||
|
||||
// using `bytes.NewReader(jsonBody)` here causes the server to respond with a 411 status.
|
||||
b := strings.NewReader(string(jsonBody))
|
||||
req1, err := http.Post(INDEX_SERVER+"/v1/users/", "application/json; charset=utf-8", b)
|
||||
req1, err := http.Post(IndexServerAddress()+"/users/", "application/json; charset=utf-8", b)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Server Error: %s", err)
|
||||
}
|
||||
@@ -139,7 +153,7 @@ func Login(authConfig *AuthConfig) (string, error) {
|
||||
"Please check your e-mail for a confirmation link.")
|
||||
} else if reqStatusCode == 400 {
|
||||
if string(reqBody) == "\"Username or email already exists\"" {
|
||||
req, err := http.NewRequest("GET", INDEX_SERVER+"/v1/users/", nil)
|
||||
req, err := http.NewRequest("GET", IndexServerAddress()+"/users/", nil)
|
||||
req.SetBasicAuth(authConfig.Username, authConfig.Password)
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
@@ -154,7 +168,11 @@ func Login(authConfig *AuthConfig) (string, error) {
|
||||
status = "Login Succeeded\n"
|
||||
storeConfig = true
|
||||
} else if resp.StatusCode == 401 {
|
||||
saveConfig(authConfig.rootPath, "", "")
|
||||
if store {
|
||||
if err := SaveConfig(authConfig); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("Wrong login/password, please try again")
|
||||
} else {
|
||||
return "", fmt.Errorf("Login: %s (Code: %d; Headers: %s)", body,
|
||||
@@ -166,9 +184,10 @@ func Login(authConfig *AuthConfig) (string, error) {
|
||||
} else {
|
||||
return "", fmt.Errorf("Unexpected status code [%d] : %s", reqStatusCode, reqBody)
|
||||
}
|
||||
if storeConfig {
|
||||
authStr := EncodeAuth(authConfig)
|
||||
saveConfig(authConfig.rootPath, authStr, authConfig.Email)
|
||||
if storeConfig && store {
|
||||
if err := SaveConfig(authConfig); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
return status, nil
|
||||
}
|
||||
|
||||
@@ -1,6 +1,10 @@
|
||||
package auth
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
@@ -21,3 +25,49 @@ func TestEncodeAuth(t *testing.T) {
|
||||
t.Fatal("AuthString encoding isn't correct.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLogin(t *testing.T) {
|
||||
os.Setenv("DOCKER_INDEX_URL", "https://indexstaging-docker.dotcloud.com")
|
||||
defer os.Setenv("DOCKER_INDEX_URL", "")
|
||||
authConfig := NewAuthConfig("unittester", "surlautrerivejetattendrai", "noise+unittester@dotcloud.com", "/tmp")
|
||||
status, err := Login(authConfig)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if status != "Login Succeeded\n" {
|
||||
t.Fatalf("Expected status \"Login Succeeded\", found \"%s\" instead", status)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateAccount(t *testing.T) {
|
||||
os.Setenv("DOCKER_INDEX_URL", "https://indexstaging-docker.dotcloud.com")
|
||||
defer os.Setenv("DOCKER_INDEX_URL", "")
|
||||
tokenBuffer := make([]byte, 16)
|
||||
_, err := rand.Read(tokenBuffer)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
token := hex.EncodeToString(tokenBuffer)[:12]
|
||||
username := "ut" + token
|
||||
authConfig := NewAuthConfig(username, "test42", "docker-ut+"+token+"@example.com", "/tmp")
|
||||
status, err := Login(authConfig)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
expectedStatus := "Account created. Please use the confirmation link we sent" +
|
||||
" to your e-mail to activate it.\n"
|
||||
if status != expectedStatus {
|
||||
t.Fatalf("Expected status: \"%s\", found \"%s\" instead.", expectedStatus, status)
|
||||
}
|
||||
|
||||
status, err = Login(authConfig)
|
||||
if err == nil {
|
||||
t.Fatalf("Expected error but found nil instead")
|
||||
}
|
||||
|
||||
expectedError := "Login: Account is not Active"
|
||||
|
||||
if !strings.Contains(err.Error(), expectedError) {
|
||||
t.Fatalf("Expected message \"%s\" but found \"%s\" instead", expectedError, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,20 +0,0 @@
|
||||
Buildbot
|
||||
========
|
||||
|
||||
Buildbot is a continuous integration system designed to automate the
|
||||
build/test cycle. By automatically rebuilding and testing the tree each time
|
||||
something has changed, build problems are pinpointed quickly, before other
|
||||
developers are inconvenienced by the failure.
|
||||
|
||||
When running 'make hack' at the docker root directory, it spawns a virtual
|
||||
machine in the background running a buildbot instance and adds a git
|
||||
post-commit hook that automatically run docker tests for you.
|
||||
|
||||
You can check your buildbot instance at http://192.168.33.21:8010/waterfall
|
||||
|
||||
|
||||
Buildbot dependencies
|
||||
---------------------
|
||||
|
||||
vagrant, virtualbox packages and python package requests
|
||||
|
||||
28
buildbot/Vagrantfile
vendored
28
buildbot/Vagrantfile
vendored
@@ -1,28 +0,0 @@
|
||||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
|
||||
$BUILDBOT_IP = '192.168.33.21'
|
||||
|
||||
def v10(config)
|
||||
config.vm.box = "quantal64_3.5.0-25"
|
||||
config.vm.box_url = "http://get.docker.io/vbox/ubuntu/12.10/quantal64_3.5.0-25.box"
|
||||
config.vm.share_folder 'v-data', '/data/docker', File.dirname(__FILE__) + '/..'
|
||||
config.vm.network :hostonly, $BUILDBOT_IP
|
||||
|
||||
# Ensure puppet is installed on the instance
|
||||
config.vm.provision :shell, :inline => 'apt-get -qq update; apt-get install -y puppet'
|
||||
|
||||
config.vm.provision :puppet do |puppet|
|
||||
puppet.manifests_path = '.'
|
||||
puppet.manifest_file = 'buildbot.pp'
|
||||
puppet.options = ['--templatedir','.']
|
||||
end
|
||||
end
|
||||
|
||||
Vagrant::VERSION < '1.1.0' and Vagrant::Config.run do |config|
|
||||
v10(config)
|
||||
end
|
||||
|
||||
Vagrant::VERSION >= '1.1.0' and Vagrant.configure('1') do |config|
|
||||
v10(config)
|
||||
end
|
||||
@@ -1,43 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Auto setup of buildbot configuration. Package installation is being done
|
||||
# on buildbot.pp
|
||||
# Dependencies: buildbot, buildbot-slave, supervisor
|
||||
|
||||
SLAVE_NAME='buildworker'
|
||||
SLAVE_SOCKET='localhost:9989'
|
||||
BUILDBOT_PWD='pass-docker'
|
||||
USER='vagrant'
|
||||
ROOT_PATH='/data/buildbot'
|
||||
DOCKER_PATH='/data/docker'
|
||||
BUILDBOT_CFG="$DOCKER_PATH/buildbot/buildbot-cfg"
|
||||
IP=$(grep BUILDBOT_IP /data/docker/buildbot/Vagrantfile | awk -F "'" '{ print $2; }')
|
||||
|
||||
function run { su $USER -c "$1"; }
|
||||
|
||||
export PATH=/bin:sbin:/usr/bin:/usr/sbin:/usr/local/bin
|
||||
|
||||
# Exit if buildbot has already been installed
|
||||
[ -d "$ROOT_PATH" ] && exit 0
|
||||
|
||||
# Setup buildbot
|
||||
run "mkdir -p ${ROOT_PATH}"
|
||||
cd ${ROOT_PATH}
|
||||
run "buildbot create-master master"
|
||||
run "cp $BUILDBOT_CFG/master.cfg master"
|
||||
run "sed -i 's/localhost/$IP/' master/master.cfg"
|
||||
run "buildslave create-slave slave $SLAVE_SOCKET $SLAVE_NAME $BUILDBOT_PWD"
|
||||
|
||||
# Allow buildbot subprocesses (docker tests) to properly run in containers,
|
||||
# in particular with docker -u
|
||||
run "sed -i 's/^umask = None/umask = 000/' ${ROOT_PATH}/slave/buildbot.tac"
|
||||
|
||||
# Setup supervisor
|
||||
cp $BUILDBOT_CFG/buildbot.conf /etc/supervisor/conf.d/buildbot.conf
|
||||
sed -i "s/^chmod=0700.*0700./chmod=0770\nchown=root:$USER/" /etc/supervisor/supervisord.conf
|
||||
kill -HUP `pgrep -f "/usr/bin/python /usr/bin/supervisord"`
|
||||
|
||||
# Add git hook
|
||||
cp $BUILDBOT_CFG/post-commit $DOCKER_PATH/.git/hooks
|
||||
sed -i "s/localhost/$IP/" $DOCKER_PATH/.git/hooks/post-commit
|
||||
|
||||
@@ -1,32 +0,0 @@
|
||||
node default {
|
||||
$USER = 'vagrant'
|
||||
$ROOT_PATH = '/data/buildbot'
|
||||
$DOCKER_PATH = '/data/docker'
|
||||
|
||||
exec {'apt_update': command => '/usr/bin/apt-get update' }
|
||||
Package { require => Exec['apt_update'] }
|
||||
group {'puppet': ensure => 'present'}
|
||||
|
||||
# Install dependencies
|
||||
Package { ensure => 'installed' }
|
||||
package { ['python-dev','python-pip','supervisor','lxc','bsdtar','git','golang']: }
|
||||
|
||||
file{[ '/data' ]:
|
||||
owner => $USER, group => $USER, ensure => 'directory' }
|
||||
|
||||
file {'/var/tmp/requirements.txt':
|
||||
content => template('requirements.txt') }
|
||||
|
||||
exec {'requirements':
|
||||
require => [ Package['python-dev'], Package['python-pip'],
|
||||
File['/var/tmp/requirements.txt'] ],
|
||||
cwd => '/var/tmp',
|
||||
command => "/bin/sh -c '(/usr/bin/pip install -r requirements.txt;
|
||||
rm /var/tmp/requirements.txt)'" }
|
||||
|
||||
exec {'buildbot-cfg-sh':
|
||||
require => [ Package['supervisor'], Exec['requirements']],
|
||||
path => '/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin',
|
||||
cwd => '/data',
|
||||
command => "$DOCKER_PATH/buildbot/buildbot-cfg/buildbot-cfg.sh" }
|
||||
}
|
||||
381
builder.go
381
builder.go
@@ -1,20 +1,22 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
var defaultDns = []string{"8.8.8.8", "8.8.4.4"}
|
||||
|
||||
type Builder struct {
|
||||
runtime *Runtime
|
||||
repositories *TagStore
|
||||
graph *Graph
|
||||
|
||||
config *Config
|
||||
image *Image
|
||||
}
|
||||
|
||||
func NewBuilder(runtime *Runtime) *Builder {
|
||||
@@ -25,42 +27,6 @@ func NewBuilder(runtime *Runtime) *Builder {
|
||||
}
|
||||
}
|
||||
|
||||
func (builder *Builder) mergeConfig(userConf, imageConf *Config) {
|
||||
if userConf.Hostname != "" {
|
||||
userConf.Hostname = imageConf.Hostname
|
||||
}
|
||||
if userConf.User != "" {
|
||||
userConf.User = imageConf.User
|
||||
}
|
||||
if userConf.Memory == 0 {
|
||||
userConf.Memory = imageConf.Memory
|
||||
}
|
||||
if userConf.MemorySwap == 0 {
|
||||
userConf.MemorySwap = imageConf.MemorySwap
|
||||
}
|
||||
if userConf.PortSpecs == nil || len(userConf.PortSpecs) == 0 {
|
||||
userConf.PortSpecs = imageConf.PortSpecs
|
||||
}
|
||||
if !userConf.Tty {
|
||||
userConf.Tty = userConf.Tty
|
||||
}
|
||||
if !userConf.OpenStdin {
|
||||
userConf.OpenStdin = imageConf.OpenStdin
|
||||
}
|
||||
if !userConf.StdinOnce {
|
||||
userConf.StdinOnce = imageConf.StdinOnce
|
||||
}
|
||||
if userConf.Env == nil || len(userConf.Env) == 0 {
|
||||
userConf.Env = imageConf.Env
|
||||
}
|
||||
if userConf.Cmd == nil || len(userConf.Cmd) == 0 {
|
||||
userConf.Cmd = imageConf.Cmd
|
||||
}
|
||||
if userConf.Dns == nil || len(userConf.Dns) == 0 {
|
||||
userConf.Dns = imageConf.Dns
|
||||
}
|
||||
}
|
||||
|
||||
func (builder *Builder) Create(config *Config) (*Container, error) {
|
||||
// Lookup image
|
||||
img, err := builder.repositories.LookupImage(config.Image)
|
||||
@@ -69,7 +35,7 @@ func (builder *Builder) Create(config *Config) (*Container, error) {
|
||||
}
|
||||
|
||||
if img.Config != nil {
|
||||
builder.mergeConfig(config, img.Config)
|
||||
MergeConfig(config, img.Config)
|
||||
}
|
||||
|
||||
if config.Cmd == nil || len(config.Cmd) == 0 {
|
||||
@@ -77,7 +43,7 @@ func (builder *Builder) Create(config *Config) (*Container, error) {
|
||||
}
|
||||
|
||||
// Generate id
|
||||
id := GenerateId()
|
||||
id := GenerateID()
|
||||
// Generate default hostname
|
||||
// FIXME: the lxc template no longer needs to set a default hostname
|
||||
if config.Hostname == "" {
|
||||
@@ -86,32 +52,43 @@ func (builder *Builder) Create(config *Config) (*Container, error) {
|
||||
|
||||
container := &Container{
|
||||
// FIXME: we should generate the ID here instead of receiving it as an argument
|
||||
Id: id,
|
||||
ID: id,
|
||||
Created: time.Now(),
|
||||
Path: config.Cmd[0],
|
||||
Args: config.Cmd[1:], //FIXME: de-duplicate from config
|
||||
Config: config,
|
||||
Image: img.Id, // Always use the resolved image id
|
||||
Image: img.ID, // Always use the resolved image id
|
||||
NetworkSettings: &NetworkSettings{},
|
||||
// FIXME: do we need to store this in the container?
|
||||
SysInitPath: sysInitPath,
|
||||
}
|
||||
container.root = builder.runtime.containerRoot(container.Id)
|
||||
container.root = builder.runtime.containerRoot(container.ID)
|
||||
// Step 1: create the container directory.
|
||||
// This doubles as a barrier to avoid race conditions.
|
||||
if err := os.Mkdir(container.root, 0700); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(config.Dns) == 0 && len(builder.runtime.Dns) == 0 && utils.CheckLocalDns() {
|
||||
//"WARNING: Docker detected local DNS server on resolv.conf. Using default external servers: %v", defaultDns
|
||||
builder.runtime.Dns = defaultDns
|
||||
}
|
||||
|
||||
// If custom dns exists, then create a resolv.conf for the container
|
||||
if len(config.Dns) > 0 {
|
||||
if len(config.Dns) > 0 || len(builder.runtime.Dns) > 0 {
|
||||
var dns []string
|
||||
if len(config.Dns) > 0 {
|
||||
dns = config.Dns
|
||||
} else {
|
||||
dns = builder.runtime.Dns
|
||||
}
|
||||
container.ResolvConfPath = path.Join(container.root, "resolv.conf")
|
||||
f, err := os.Create(container.ResolvConfPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
for _, dns := range config.Dns {
|
||||
for _, dns := range dns {
|
||||
if _, err := f.Write([]byte("nameserver " + dns + "\n")); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -147,317 +124,9 @@ func (builder *Builder) Commit(container *Container, repository, tag, comment, a
|
||||
}
|
||||
// Register the image if needed
|
||||
if repository != "" {
|
||||
if err := builder.repositories.Set(repository, tag, img.Id, true); err != nil {
|
||||
if err := builder.repositories.Set(repository, tag, img.ID, true); err != nil {
|
||||
return img, err
|
||||
}
|
||||
}
|
||||
return img, nil
|
||||
}
|
||||
|
||||
func (builder *Builder) clearTmp(containers, images map[string]struct{}) {
|
||||
for c := range containers {
|
||||
tmp := builder.runtime.Get(c)
|
||||
builder.runtime.Destroy(tmp)
|
||||
Debugf("Removing container %s", c)
|
||||
}
|
||||
for i := range images {
|
||||
builder.runtime.graph.Delete(i)
|
||||
Debugf("Removing image %s", i)
|
||||
}
|
||||
}
|
||||
|
||||
func (builder *Builder) getCachedImage(image *Image, config *Config) (*Image, error) {
|
||||
// Retrieve all images
|
||||
images, err := builder.graph.All()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Store the tree in a map of map (map[parentId][childId])
|
||||
imageMap := make(map[string]map[string]struct{})
|
||||
for _, img := range images {
|
||||
if _, exists := imageMap[img.Parent]; !exists {
|
||||
imageMap[img.Parent] = make(map[string]struct{})
|
||||
}
|
||||
imageMap[img.Parent][img.Id] = struct{}{}
|
||||
}
|
||||
|
||||
// Loop on the children of the given image and check the config
|
||||
for elem := range imageMap[image.Id] {
|
||||
img, err := builder.graph.Get(elem)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if CompareConfig(&img.ContainerConfig, config) {
|
||||
return img, nil
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (builder *Builder) Build(dockerfile io.Reader, stdout io.Writer) (*Image, error) {
|
||||
var (
|
||||
image, base *Image
|
||||
config *Config
|
||||
maintainer string
|
||||
env map[string]string = make(map[string]string)
|
||||
tmpContainers map[string]struct{} = make(map[string]struct{})
|
||||
tmpImages map[string]struct{} = make(map[string]struct{})
|
||||
)
|
||||
defer builder.clearTmp(tmpContainers, tmpImages)
|
||||
|
||||
file := bufio.NewReader(dockerfile)
|
||||
for {
|
||||
line, err := file.ReadString('\n')
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
line = strings.Replace(strings.TrimSpace(line), " ", " ", 1)
|
||||
// Skip comments and empty line
|
||||
if len(line) == 0 || line[0] == '#' {
|
||||
continue
|
||||
}
|
||||
tmp := strings.SplitN(line, " ", 2)
|
||||
if len(tmp) != 2 {
|
||||
return nil, fmt.Errorf("Invalid Dockerfile format")
|
||||
}
|
||||
instruction := strings.Trim(tmp[0], " ")
|
||||
arguments := strings.Trim(tmp[1], " ")
|
||||
switch strings.ToLower(instruction) {
|
||||
case "from":
|
||||
fmt.Fprintf(stdout, "FROM %s\n", arguments)
|
||||
image, err = builder.runtime.repositories.LookupImage(arguments)
|
||||
if err != nil {
|
||||
if builder.runtime.graph.IsNotExist(err) {
|
||||
|
||||
var tag, remote string
|
||||
if strings.Contains(arguments, ":") {
|
||||
remoteParts := strings.Split(arguments, ":")
|
||||
tag = remoteParts[1]
|
||||
remote = remoteParts[0]
|
||||
} else {
|
||||
remote = arguments
|
||||
}
|
||||
|
||||
if err := builder.runtime.graph.PullRepository(stdout, remote, tag, builder.runtime.repositories, builder.runtime.authConfig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
image, err = builder.runtime.repositories.LookupImage(arguments)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
config = &Config{}
|
||||
|
||||
break
|
||||
case "maintainer":
|
||||
fmt.Fprintf(stdout, "MAINTAINER %s\n", arguments)
|
||||
maintainer = arguments
|
||||
break
|
||||
case "run":
|
||||
fmt.Fprintf(stdout, "RUN %s\n", arguments)
|
||||
if image == nil {
|
||||
return nil, fmt.Errorf("Please provide a source image with `from` prior to run")
|
||||
}
|
||||
config, err := ParseRun([]string{image.Id, "/bin/sh", "-c", arguments}, nil, builder.runtime.capabilities)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for key, value := range env {
|
||||
config.Env = append(config.Env, fmt.Sprintf("%s=%s", key, value))
|
||||
}
|
||||
|
||||
if cache, err := builder.getCachedImage(image, config); err != nil {
|
||||
return nil, err
|
||||
} else if cache != nil {
|
||||
image = cache
|
||||
fmt.Fprintf(stdout, "===> %s\n", image.ShortId())
|
||||
break
|
||||
}
|
||||
|
||||
Debugf("Env -----> %v ------ %v\n", config.Env, env)
|
||||
|
||||
// Create the container and start it
|
||||
c, err := builder.Create(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if os.Getenv("DEBUG") != "" {
|
||||
out, _ := c.StdoutPipe()
|
||||
err2, _ := c.StderrPipe()
|
||||
go io.Copy(os.Stdout, out)
|
||||
go io.Copy(os.Stdout, err2)
|
||||
}
|
||||
|
||||
if err := c.Start(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tmpContainers[c.Id] = struct{}{}
|
||||
|
||||
// Wait for it to finish
|
||||
if result := c.Wait(); result != 0 {
|
||||
return nil, fmt.Errorf("!!! '%s' return non-zero exit code '%d'. Aborting.", arguments, result)
|
||||
}
|
||||
|
||||
// Commit the container
|
||||
base, err = builder.Commit(c, "", "", "", maintainer, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tmpImages[base.Id] = struct{}{}
|
||||
|
||||
fmt.Fprintf(stdout, "===> %s\n", base.ShortId())
|
||||
|
||||
// use the base as the new image
|
||||
image = base
|
||||
|
||||
break
|
||||
case "env":
|
||||
tmp := strings.SplitN(arguments, " ", 2)
|
||||
if len(tmp) != 2 {
|
||||
return nil, fmt.Errorf("Invalid ENV format")
|
||||
}
|
||||
key := strings.Trim(tmp[0], " ")
|
||||
value := strings.Trim(tmp[1], " ")
|
||||
fmt.Fprintf(stdout, "ENV %s %s\n", key, value)
|
||||
env[key] = value
|
||||
if image != nil {
|
||||
fmt.Fprintf(stdout, "===> %s\n", image.ShortId())
|
||||
} else {
|
||||
fmt.Fprintf(stdout, "===> <nil>\n")
|
||||
}
|
||||
break
|
||||
case "cmd":
|
||||
fmt.Fprintf(stdout, "CMD %s\n", arguments)
|
||||
|
||||
// Create the container and start it
|
||||
c, err := builder.Create(&Config{Image: image.Id, Cmd: []string{"", ""}})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := c.Start(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tmpContainers[c.Id] = struct{}{}
|
||||
|
||||
cmd := []string{}
|
||||
if err := json.Unmarshal([]byte(arguments), &cmd); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
config.Cmd = cmd
|
||||
|
||||
// Commit the container
|
||||
base, err = builder.Commit(c, "", "", "", maintainer, config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tmpImages[base.Id] = struct{}{}
|
||||
|
||||
fmt.Fprintf(stdout, "===> %s\n", base.ShortId())
|
||||
image = base
|
||||
break
|
||||
case "expose":
|
||||
ports := strings.Split(arguments, " ")
|
||||
|
||||
fmt.Fprintf(stdout, "EXPOSE %v\n", ports)
|
||||
if image == nil {
|
||||
return nil, fmt.Errorf("Please provide a source image with `from` prior to copy")
|
||||
}
|
||||
|
||||
// Create the container and start it
|
||||
c, err := builder.Create(&Config{Image: image.Id, Cmd: []string{"", ""}})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := c.Start(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tmpContainers[c.Id] = struct{}{}
|
||||
|
||||
config.PortSpecs = append(ports, config.PortSpecs...)
|
||||
|
||||
// Commit the container
|
||||
base, err = builder.Commit(c, "", "", "", maintainer, config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tmpImages[base.Id] = struct{}{}
|
||||
|
||||
fmt.Fprintf(stdout, "===> %s\n", base.ShortId())
|
||||
image = base
|
||||
break
|
||||
case "insert":
|
||||
if image == nil {
|
||||
return nil, fmt.Errorf("Please provide a source image with `from` prior to copy")
|
||||
}
|
||||
tmp = strings.SplitN(arguments, " ", 2)
|
||||
if len(tmp) != 2 {
|
||||
return nil, fmt.Errorf("Invalid INSERT format")
|
||||
}
|
||||
sourceUrl := strings.Trim(tmp[0], " ")
|
||||
destPath := strings.Trim(tmp[1], " ")
|
||||
fmt.Fprintf(stdout, "COPY %s to %s in %s\n", sourceUrl, destPath, base.ShortId())
|
||||
|
||||
file, err := Download(sourceUrl, stdout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer file.Body.Close()
|
||||
|
||||
config, err := ParseRun([]string{base.Id, "echo", "insert", sourceUrl, destPath}, nil, builder.runtime.capabilities)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c, err := builder.Create(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := c.Start(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Wait for echo to finish
|
||||
if result := c.Wait(); result != 0 {
|
||||
return nil, fmt.Errorf("!!! '%s' return non-zero exit code '%d'. Aborting.", arguments, result)
|
||||
}
|
||||
|
||||
if err := c.Inject(file.Body, destPath); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
base, err = builder.Commit(c, "", "", "", maintainer, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fmt.Fprintf(stdout, "===> %s\n", base.ShortId())
|
||||
|
||||
image = base
|
||||
|
||||
break
|
||||
default:
|
||||
fmt.Fprintf(stdout, "Skipping unknown instruction %s\n", strings.ToUpper(instruction))
|
||||
}
|
||||
}
|
||||
if image != nil {
|
||||
// The build is successful, keep the temporary containers and images
|
||||
for i := range tmpImages {
|
||||
delete(tmpImages, i)
|
||||
}
|
||||
for i := range tmpContainers {
|
||||
delete(tmpContainers, i)
|
||||
}
|
||||
fmt.Fprintf(stdout, "Build finished. image id: %s\n", image.ShortId())
|
||||
return image, nil
|
||||
}
|
||||
return nil, fmt.Errorf("An error occured during the build\n")
|
||||
}
|
||||
|
||||
314
builder_client.go
Normal file
314
builder_client.go
Normal file
@@ -0,0 +1,314 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"io"
|
||||
"net/url"
|
||||
"os"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type builderClient struct {
|
||||
cli *DockerCli
|
||||
|
||||
image string
|
||||
maintainer string
|
||||
config *Config
|
||||
|
||||
tmpContainers map[string]struct{}
|
||||
tmpImages map[string]struct{}
|
||||
|
||||
needCommit bool
|
||||
}
|
||||
|
||||
func (b *builderClient) clearTmp(containers, images map[string]struct{}) {
|
||||
for i := range images {
|
||||
if _, _, err := b.cli.call("DELETE", "/images/"+i, nil); err != nil {
|
||||
utils.Debugf("%s", err)
|
||||
}
|
||||
utils.Debugf("Removing image %s", i)
|
||||
}
|
||||
}
|
||||
|
||||
func (b *builderClient) CmdFrom(name string) error {
|
||||
obj, statusCode, err := b.cli.call("GET", "/images/"+name+"/json", nil)
|
||||
if statusCode == 404 {
|
||||
|
||||
remote := name
|
||||
var tag string
|
||||
if strings.Contains(remote, ":") {
|
||||
remoteParts := strings.Split(remote, ":")
|
||||
tag = remoteParts[1]
|
||||
remote = remoteParts[0]
|
||||
}
|
||||
var out io.Writer
|
||||
if os.Getenv("DEBUG") != "" {
|
||||
out = os.Stdout
|
||||
} else {
|
||||
out = &utils.NopWriter{}
|
||||
}
|
||||
if err := b.cli.stream("POST", "/images/create?fromImage="+remote+"&tag="+tag, nil, out); err != nil {
|
||||
return err
|
||||
}
|
||||
obj, _, err = b.cli.call("GET", "/images/"+name+"/json", nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
img := &APIID{}
|
||||
if err := json.Unmarshal(obj, img); err != nil {
|
||||
return err
|
||||
}
|
||||
b.image = img.ID
|
||||
utils.Debugf("Using image %s", b.image)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *builderClient) CmdMaintainer(name string) error {
|
||||
b.needCommit = true
|
||||
b.maintainer = name
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *builderClient) CmdRun(args string) error {
|
||||
if b.image == "" {
|
||||
return fmt.Errorf("Please provide a source image with `from` prior to run")
|
||||
}
|
||||
config, _, err := ParseRun([]string{b.image, "/bin/sh", "-c", args}, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cmd, env := b.config.Cmd, b.config.Env
|
||||
b.config.Cmd = nil
|
||||
MergeConfig(b.config, config)
|
||||
|
||||
body, statusCode, err := b.cli.call("POST", "/images/getCache", &APIImageConfig{ID: b.image, Config: b.config})
|
||||
if err != nil {
|
||||
if statusCode != 404 {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if statusCode != 404 {
|
||||
apiID := &APIID{}
|
||||
if err := json.Unmarshal(body, apiID); err != nil {
|
||||
return err
|
||||
}
|
||||
utils.Debugf("Use cached version")
|
||||
b.image = apiID.ID
|
||||
return nil
|
||||
}
|
||||
cid, err := b.run()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.config.Cmd, b.config.Env = cmd, env
|
||||
return b.commit(cid)
|
||||
}
|
||||
|
||||
func (b *builderClient) CmdEnv(args string) error {
|
||||
b.needCommit = true
|
||||
tmp := strings.SplitN(args, " ", 2)
|
||||
if len(tmp) != 2 {
|
||||
return fmt.Errorf("Invalid ENV format")
|
||||
}
|
||||
key := strings.Trim(tmp[0], " ")
|
||||
value := strings.Trim(tmp[1], " ")
|
||||
|
||||
for i, elem := range b.config.Env {
|
||||
if strings.HasPrefix(elem, key+"=") {
|
||||
b.config.Env[i] = key + "=" + value
|
||||
return nil
|
||||
}
|
||||
}
|
||||
b.config.Env = append(b.config.Env, key+"="+value)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *builderClient) CmdCmd(args string) error {
|
||||
b.needCommit = true
|
||||
var cmd []string
|
||||
if err := json.Unmarshal([]byte(args), &cmd); err != nil {
|
||||
utils.Debugf("Error unmarshalling: %s, using /bin/sh -c", err)
|
||||
b.config.Cmd = []string{"/bin/sh", "-c", args}
|
||||
} else {
|
||||
b.config.Cmd = cmd
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *builderClient) CmdExpose(args string) error {
|
||||
ports := strings.Split(args, " ")
|
||||
b.config.PortSpecs = append(ports, b.config.PortSpecs...)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *builderClient) CmdInsert(args string) error {
|
||||
// tmp := strings.SplitN(args, "\t ", 2)
|
||||
// sourceUrl, destPath := tmp[0], tmp[1]
|
||||
|
||||
// v := url.Values{}
|
||||
// v.Set("url", sourceUrl)
|
||||
// v.Set("path", destPath)
|
||||
// body, _, err := b.cli.call("POST", "/images/insert?"+v.Encode(), nil)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
|
||||
// apiId := &APIId{}
|
||||
// if err := json.Unmarshal(body, apiId); err != nil {
|
||||
// return err
|
||||
// }
|
||||
|
||||
// FIXME: Reimplement this, we need to retrieve the resulting Id
|
||||
return fmt.Errorf("INSERT not implemented")
|
||||
}
|
||||
|
||||
func (b *builderClient) run() (string, error) {
|
||||
if b.image == "" {
|
||||
return "", fmt.Errorf("Please provide a source image with `from` prior to run")
|
||||
}
|
||||
b.config.Image = b.image
|
||||
body, _, err := b.cli.call("POST", "/containers/create", b.config)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
apiRun := &APIRun{}
|
||||
if err := json.Unmarshal(body, apiRun); err != nil {
|
||||
return "", err
|
||||
}
|
||||
for _, warning := range apiRun.Warnings {
|
||||
fmt.Fprintln(os.Stderr, "WARNING: ", warning)
|
||||
}
|
||||
|
||||
//start the container
|
||||
_, _, err = b.cli.call("POST", "/containers/"+apiRun.ID+"/start", nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
b.tmpContainers[apiRun.ID] = struct{}{}
|
||||
|
||||
// Wait for it to finish
|
||||
body, _, err = b.cli.call("POST", "/containers/"+apiRun.ID+"/wait", nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
apiWait := &APIWait{}
|
||||
if err := json.Unmarshal(body, apiWait); err != nil {
|
||||
return "", err
|
||||
}
|
||||
if apiWait.StatusCode != 0 {
|
||||
return "", fmt.Errorf("The command %v returned a non-zero code: %d", b.config.Cmd, apiWait.StatusCode)
|
||||
}
|
||||
|
||||
return apiRun.ID, nil
|
||||
}
|
||||
|
||||
func (b *builderClient) commit(id string) error {
|
||||
if b.image == "" {
|
||||
return fmt.Errorf("Please provide a source image with `from` prior to run")
|
||||
}
|
||||
b.config.Image = b.image
|
||||
|
||||
if id == "" {
|
||||
cmd := b.config.Cmd
|
||||
b.config.Cmd = []string{"true"}
|
||||
cid, err := b.run()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
id = cid
|
||||
b.config.Cmd = cmd
|
||||
}
|
||||
|
||||
// Commit the container
|
||||
v := url.Values{}
|
||||
v.Set("container", id)
|
||||
v.Set("author", b.maintainer)
|
||||
|
||||
body, _, err := b.cli.call("POST", "/commit?"+v.Encode(), b.config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
apiID := &APIID{}
|
||||
if err := json.Unmarshal(body, apiID); err != nil {
|
||||
return err
|
||||
}
|
||||
b.tmpImages[apiID.ID] = struct{}{}
|
||||
b.image = apiID.ID
|
||||
b.needCommit = false
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *builderClient) Build(dockerfile, context io.Reader) (string, error) {
|
||||
defer b.clearTmp(b.tmpContainers, b.tmpImages)
|
||||
file := bufio.NewReader(dockerfile)
|
||||
for {
|
||||
line, err := file.ReadString('\n')
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
line = strings.Replace(strings.TrimSpace(line), " ", " ", 1)
|
||||
// Skip comments and empty line
|
||||
if len(line) == 0 || line[0] == '#' {
|
||||
continue
|
||||
}
|
||||
tmp := strings.SplitN(line, " ", 2)
|
||||
if len(tmp) != 2 {
|
||||
return "", fmt.Errorf("Invalid Dockerfile format")
|
||||
}
|
||||
instruction := strings.ToLower(strings.Trim(tmp[0], " "))
|
||||
arguments := strings.Trim(tmp[1], " ")
|
||||
|
||||
fmt.Fprintf(os.Stderr, "%s %s (%s)\n", strings.ToUpper(instruction), arguments, b.image)
|
||||
|
||||
method, exists := reflect.TypeOf(b).MethodByName("Cmd" + strings.ToUpper(instruction[:1]) + strings.ToLower(instruction[1:]))
|
||||
if !exists {
|
||||
fmt.Fprintf(os.Stderr, "Skipping unknown instruction %s\n", strings.ToUpper(instruction))
|
||||
}
|
||||
ret := method.Func.Call([]reflect.Value{reflect.ValueOf(b), reflect.ValueOf(arguments)})[0].Interface()
|
||||
if ret != nil {
|
||||
return "", ret.(error)
|
||||
}
|
||||
|
||||
fmt.Fprintf(os.Stderr, "===> %v\n", b.image)
|
||||
}
|
||||
if b.needCommit {
|
||||
if err := b.commit(""); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
if b.image != "" {
|
||||
// The build is successful, keep the temporary containers and images
|
||||
for i := range b.tmpImages {
|
||||
delete(b.tmpImages, i)
|
||||
}
|
||||
for i := range b.tmpContainers {
|
||||
delete(b.tmpContainers, i)
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "Build finished. image id: %s\n", b.image)
|
||||
return b.image, nil
|
||||
}
|
||||
return "", fmt.Errorf("An error occured during the build\n")
|
||||
}
|
||||
|
||||
func NewBuilderClient(addr string, port int) BuildFile {
|
||||
return &builderClient{
|
||||
cli: NewDockerCli(addr, port),
|
||||
config: &Config{},
|
||||
tmpContainers: make(map[string]struct{}),
|
||||
tmpImages: make(map[string]struct{}),
|
||||
}
|
||||
}
|
||||
@@ -1,88 +0,0 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
const Dockerfile = `
|
||||
# VERSION 0.1
|
||||
# DOCKER-VERSION 0.2
|
||||
|
||||
from ` + unitTestImageName + `
|
||||
run sh -c 'echo root:testpass > /tmp/passwd'
|
||||
run mkdir -p /var/run/sshd
|
||||
insert https://raw.github.com/dotcloud/docker/master/CHANGELOG.md /tmp/CHANGELOG.md
|
||||
`
|
||||
|
||||
func TestBuild(t *testing.T) {
|
||||
runtime, err := newTestRuntime()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer nuke(runtime)
|
||||
|
||||
builder := NewBuilder(runtime)
|
||||
|
||||
img, err := builder.Build(strings.NewReader(Dockerfile), &nopWriter{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
container, err := builder.Create(
|
||||
&Config{
|
||||
Image: img.Id,
|
||||
Cmd: []string{"cat", "/tmp/passwd"},
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer runtime.Destroy(container)
|
||||
|
||||
output, err := container.Output()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if string(output) != "root:testpass\n" {
|
||||
t.Fatalf("Unexpected output. Read '%s', expected '%s'", output, "root:testpass\n")
|
||||
}
|
||||
|
||||
container2, err := builder.Create(
|
||||
&Config{
|
||||
Image: img.Id,
|
||||
Cmd: []string{"ls", "-d", "/var/run/sshd"},
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer runtime.Destroy(container2)
|
||||
|
||||
output, err = container2.Output()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if string(output) != "/var/run/sshd\n" {
|
||||
t.Fatal("/var/run/sshd has not been created")
|
||||
}
|
||||
|
||||
container3, err := builder.Create(
|
||||
&Config{
|
||||
Image: img.Id,
|
||||
Cmd: []string{"cat", "/tmp/CHANGELOG.md"},
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer runtime.Destroy(container3)
|
||||
|
||||
output, err = container3.Output()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(output) == 0 {
|
||||
t.Fatal("/tmp/CHANGELOG.md has not been copied")
|
||||
}
|
||||
}
|
||||
369
buildfile.go
Normal file
369
buildfile.go
Normal file
@@ -0,0 +1,369 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type BuildFile interface {
|
||||
Build(io.Reader, io.Reader) (string, error)
|
||||
CmdFrom(string) error
|
||||
CmdRun(string) error
|
||||
}
|
||||
|
||||
type buildFile struct {
|
||||
runtime *Runtime
|
||||
builder *Builder
|
||||
srv *Server
|
||||
|
||||
image string
|
||||
maintainer string
|
||||
config *Config
|
||||
context string
|
||||
|
||||
tmpContainers map[string]struct{}
|
||||
tmpImages map[string]struct{}
|
||||
|
||||
out io.Writer
|
||||
}
|
||||
|
||||
func (b *buildFile) clearTmp(containers, images map[string]struct{}) {
|
||||
for c := range containers {
|
||||
tmp := b.runtime.Get(c)
|
||||
b.runtime.Destroy(tmp)
|
||||
utils.Debugf("Removing container %s", c)
|
||||
}
|
||||
for i := range images {
|
||||
b.runtime.graph.Delete(i)
|
||||
utils.Debugf("Removing image %s", i)
|
||||
}
|
||||
}
|
||||
|
||||
func (b *buildFile) CmdFrom(name string) error {
|
||||
image, err := b.runtime.repositories.LookupImage(name)
|
||||
if err != nil {
|
||||
if b.runtime.graph.IsNotExist(err) {
|
||||
|
||||
var tag, remote string
|
||||
if strings.Contains(name, ":") {
|
||||
remoteParts := strings.Split(name, ":")
|
||||
tag = remoteParts[1]
|
||||
remote = remoteParts[0]
|
||||
} else {
|
||||
remote = name
|
||||
}
|
||||
|
||||
if err := b.srv.ImagePull(remote, tag, "", b.out, utils.NewStreamFormatter(false), nil); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
image, err = b.runtime.repositories.LookupImage(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
b.image = image.ID
|
||||
b.config = &Config{}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *buildFile) CmdMaintainer(name string) error {
|
||||
b.maintainer = name
|
||||
return b.commit("", b.config.Cmd, fmt.Sprintf("MAINTAINER %s", name))
|
||||
}
|
||||
|
||||
func (b *buildFile) CmdRun(args string) error {
|
||||
if b.image == "" {
|
||||
return fmt.Errorf("Please provide a source image with `from` prior to run")
|
||||
}
|
||||
config, _, err := ParseRun([]string{b.image, "/bin/sh", "-c", args}, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cmd := b.config.Cmd
|
||||
b.config.Cmd = nil
|
||||
MergeConfig(b.config, config)
|
||||
|
||||
utils.Debugf("Command to be executed: %v", b.config.Cmd)
|
||||
|
||||
if cache, err := b.srv.ImageGetCached(b.image, b.config); err != nil {
|
||||
return err
|
||||
} else if cache != nil {
|
||||
utils.Debugf("[BUILDER] Use cached version")
|
||||
b.image = cache.ID
|
||||
return nil
|
||||
} else {
|
||||
utils.Debugf("[BUILDER] Cache miss")
|
||||
}
|
||||
|
||||
cid, err := b.run()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := b.commit(cid, cmd, "run"); err != nil {
|
||||
return err
|
||||
}
|
||||
b.config.Cmd = cmd
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *buildFile) CmdEnv(args string) error {
|
||||
tmp := strings.SplitN(args, " ", 2)
|
||||
if len(tmp) != 2 {
|
||||
return fmt.Errorf("Invalid ENV format")
|
||||
}
|
||||
key := strings.Trim(tmp[0], " ")
|
||||
value := strings.Trim(tmp[1], " ")
|
||||
|
||||
for i, elem := range b.config.Env {
|
||||
if strings.HasPrefix(elem, key+"=") {
|
||||
b.config.Env[i] = key + "=" + value
|
||||
return nil
|
||||
}
|
||||
}
|
||||
b.config.Env = append(b.config.Env, key+"="+value)
|
||||
return b.commit("", b.config.Cmd, fmt.Sprintf("ENV %s=%s", key, value))
|
||||
}
|
||||
|
||||
func (b *buildFile) CmdCmd(args string) error {
|
||||
var cmd []string
|
||||
if err := json.Unmarshal([]byte(args), &cmd); err != nil {
|
||||
utils.Debugf("Error unmarshalling: %s, using /bin/sh -c", err)
|
||||
cmd = []string{"/bin/sh", "-c", args}
|
||||
}
|
||||
if err := b.commit("", cmd, fmt.Sprintf("CMD %v", cmd)); err != nil {
|
||||
return err
|
||||
}
|
||||
b.config.Cmd = cmd
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *buildFile) CmdExpose(args string) error {
|
||||
ports := strings.Split(args, " ")
|
||||
b.config.PortSpecs = append(ports, b.config.PortSpecs...)
|
||||
return b.commit("", b.config.Cmd, fmt.Sprintf("EXPOSE %v", ports))
|
||||
}
|
||||
|
||||
func (b *buildFile) CmdInsert(args string) error {
|
||||
return fmt.Errorf("INSERT has been deprecated. Please use ADD instead")
|
||||
}
|
||||
|
||||
func (b *buildFile) CmdCopy(args string) error {
|
||||
return fmt.Errorf("COPY has been deprecated. Please use ADD instead")
|
||||
}
|
||||
|
||||
func (b *buildFile) CmdAdd(args string) error {
|
||||
if b.context == "" {
|
||||
return fmt.Errorf("No context given. Impossible to use ADD")
|
||||
}
|
||||
tmp := strings.SplitN(args, " ", 2)
|
||||
if len(tmp) != 2 {
|
||||
return fmt.Errorf("Invalid ADD format")
|
||||
}
|
||||
orig := strings.Trim(tmp[0], " ")
|
||||
dest := strings.Trim(tmp[1], " ")
|
||||
|
||||
cmd := b.config.Cmd
|
||||
|
||||
// Create the container and start it
|
||||
b.config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) ADD %s in %s", orig, dest)}
|
||||
b.config.Image = b.image
|
||||
container, err := b.builder.Create(b.config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.tmpContainers[container.ID] = struct{}{}
|
||||
|
||||
if err := container.EnsureMounted(); err != nil {
|
||||
return err
|
||||
}
|
||||
defer container.Unmount()
|
||||
|
||||
origPath := path.Join(b.context, orig)
|
||||
destPath := path.Join(container.RootfsPath(), dest)
|
||||
|
||||
fi, err := os.Stat(origPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if fi.IsDir() {
|
||||
if err := os.MkdirAll(destPath, 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := CopyWithTar(origPath, destPath); err != nil {
|
||||
return err
|
||||
}
|
||||
// First try to unpack the source as an archive
|
||||
} else if err := UntarPath(origPath, destPath); err != nil {
|
||||
utils.Debugf("Couldn't untar %s to %s: %s", origPath, destPath, err)
|
||||
// If that fails, just copy it as a regular file
|
||||
if err := os.MkdirAll(path.Dir(destPath), 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := CopyWithTar(origPath, destPath); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := b.commit(container.ID, cmd, fmt.Sprintf("ADD %s in %s", orig, dest)); err != nil {
|
||||
return err
|
||||
}
|
||||
b.config.Cmd = cmd
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *buildFile) run() (string, error) {
|
||||
if b.image == "" {
|
||||
return "", fmt.Errorf("Please provide a source image with `from` prior to run")
|
||||
}
|
||||
b.config.Image = b.image
|
||||
|
||||
// Create the container and start it
|
||||
c, err := b.builder.Create(b.config)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
b.tmpContainers[c.ID] = struct{}{}
|
||||
|
||||
//start the container
|
||||
if err := c.Start(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Wait for it to finish
|
||||
if ret := c.Wait(); ret != 0 {
|
||||
return "", fmt.Errorf("The command %v returned a non-zero code: %d", b.config.Cmd, ret)
|
||||
}
|
||||
|
||||
return c.ID, nil
|
||||
}
|
||||
|
||||
// Commit the container <id> with the autorun command <autoCmd>
|
||||
func (b *buildFile) commit(id string, autoCmd []string, comment string) error {
|
||||
if b.image == "" {
|
||||
return fmt.Errorf("Please provide a source image with `from` prior to commit")
|
||||
}
|
||||
b.config.Image = b.image
|
||||
if id == "" {
|
||||
b.config.Cmd = []string{"/bin/sh", "-c", "#(nop) " + comment}
|
||||
|
||||
if cache, err := b.srv.ImageGetCached(b.image, b.config); err != nil {
|
||||
return err
|
||||
} else if cache != nil {
|
||||
utils.Debugf("[BUILDER] Use cached version")
|
||||
b.image = cache.ID
|
||||
return nil
|
||||
} else {
|
||||
utils.Debugf("[BUILDER] Cache miss")
|
||||
}
|
||||
|
||||
// Create the container and start it
|
||||
container, err := b.builder.Create(b.config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.tmpContainers[container.ID] = struct{}{}
|
||||
|
||||
if err := container.EnsureMounted(); err != nil {
|
||||
return err
|
||||
}
|
||||
defer container.Unmount()
|
||||
|
||||
id = container.ID
|
||||
}
|
||||
|
||||
container := b.runtime.Get(id)
|
||||
if container == nil {
|
||||
return fmt.Errorf("An error occured while creating the container")
|
||||
}
|
||||
|
||||
// Note: Actually copy the struct
|
||||
autoConfig := *b.config
|
||||
autoConfig.Cmd = autoCmd
|
||||
// Commit the container
|
||||
image, err := b.builder.Commit(container, "", "", "", b.maintainer, &autoConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.tmpImages[image.ID] = struct{}{}
|
||||
b.image = image.ID
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *buildFile) Build(dockerfile, context io.Reader) (string, error) {
|
||||
if context != nil {
|
||||
name, err := ioutil.TempDir("/tmp", "docker-build")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if err := Untar(context, name); err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer os.RemoveAll(name)
|
||||
b.context = name
|
||||
}
|
||||
file := bufio.NewReader(dockerfile)
|
||||
for {
|
||||
line, err := file.ReadString('\n')
|
||||
if err != nil {
|
||||
if err == io.EOF && line == "" {
|
||||
break
|
||||
} else if err != io.EOF {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
line = strings.Replace(strings.TrimSpace(line), " ", " ", 1)
|
||||
// Skip comments and empty line
|
||||
if len(line) == 0 || line[0] == '#' {
|
||||
continue
|
||||
}
|
||||
tmp := strings.SplitN(line, " ", 2)
|
||||
if len(tmp) != 2 {
|
||||
return "", fmt.Errorf("Invalid Dockerfile format")
|
||||
}
|
||||
instruction := strings.ToLower(strings.Trim(tmp[0], " "))
|
||||
arguments := strings.Trim(tmp[1], " ")
|
||||
|
||||
fmt.Fprintf(b.out, "%s %s (%s)\n", strings.ToUpper(instruction), arguments, b.image)
|
||||
|
||||
method, exists := reflect.TypeOf(b).MethodByName("Cmd" + strings.ToUpper(instruction[:1]) + strings.ToLower(instruction[1:]))
|
||||
if !exists {
|
||||
fmt.Fprintf(b.out, "Skipping unknown instruction %s\n", strings.ToUpper(instruction))
|
||||
continue
|
||||
}
|
||||
ret := method.Func.Call([]reflect.Value{reflect.ValueOf(b), reflect.ValueOf(arguments)})[0].Interface()
|
||||
if ret != nil {
|
||||
return "", ret.(error)
|
||||
}
|
||||
|
||||
fmt.Fprintf(b.out, "===> %v\n", b.image)
|
||||
}
|
||||
if b.image != "" {
|
||||
fmt.Fprintf(b.out, "Build successful.\n===> %s\n", b.image)
|
||||
return b.image, nil
|
||||
}
|
||||
return "", fmt.Errorf("An error occured during the build\n")
|
||||
}
|
||||
|
||||
func NewBuildFile(srv *Server, out io.Writer) BuildFile {
|
||||
return &buildFile{
|
||||
builder: NewBuilder(srv.runtime),
|
||||
runtime: srv.runtime,
|
||||
srv: srv,
|
||||
config: &Config{},
|
||||
out: out,
|
||||
tmpContainers: make(map[string]struct{}),
|
||||
tmpImages: make(map[string]struct{}),
|
||||
}
|
||||
}
|
||||
89
buildfile_test.go
Normal file
89
buildfile_test.go
Normal file
@@ -0,0 +1,89 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
const Dockerfile = `
|
||||
# VERSION 0.1
|
||||
# DOCKER-VERSION 0.2
|
||||
|
||||
from ` + unitTestImageName + `
|
||||
run sh -c 'echo root:testpass > /tmp/passwd'
|
||||
run mkdir -p /var/run/sshd
|
||||
`
|
||||
|
||||
const DockerfileNoNewLine = `
|
||||
# VERSION 0.1
|
||||
# DOCKER-VERSION 0.2
|
||||
|
||||
from ` + unitTestImageName + `
|
||||
run sh -c 'echo root:testpass > /tmp/passwd'
|
||||
run mkdir -p /var/run/sshd`
|
||||
|
||||
// FIXME: test building with a context
|
||||
|
||||
// FIXME: test building with a local ADD as first command
|
||||
|
||||
// FIXME: test building with 2 successive overlapping ADD commands
|
||||
|
||||
func TestBuild(t *testing.T) {
|
||||
dockerfiles := []string{Dockerfile, DockerfileNoNewLine}
|
||||
for _, Dockerfile := range dockerfiles {
|
||||
runtime, err := newTestRuntime()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer nuke(runtime)
|
||||
|
||||
srv := &Server{runtime: runtime}
|
||||
|
||||
buildfile := NewBuildFile(srv, &utils.NopWriter{})
|
||||
|
||||
imgID, err := buildfile.Build(strings.NewReader(Dockerfile), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
builder := NewBuilder(runtime)
|
||||
container, err := builder.Create(
|
||||
&Config{
|
||||
Image: imgID,
|
||||
Cmd: []string{"cat", "/tmp/passwd"},
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer runtime.Destroy(container)
|
||||
|
||||
output, err := container.Output()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if string(output) != "root:testpass\n" {
|
||||
t.Fatalf("Unexpected output. Read '%s', expected '%s'", output, "root:testpass\n")
|
||||
}
|
||||
|
||||
container2, err := builder.Create(
|
||||
&Config{
|
||||
Image: imgID,
|
||||
Cmd: []string{"ls", "-d", "/var/run/sshd"},
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer runtime.Destroy(container2)
|
||||
|
||||
output, err = container2.Output()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if string(output) != "/var/run/sshd\n" {
|
||||
t.Fatal("/var/run/sshd has not been created")
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -65,7 +65,7 @@ func Changes(layers []string, rw string) ([]Change, error) {
|
||||
file := filepath.Base(path)
|
||||
// If there is a whiteout, then the file was removed
|
||||
if strings.HasPrefix(file, ".wh.") {
|
||||
originalFile := strings.TrimLeft(file, ".wh.")
|
||||
originalFile := file[len(".wh."):]
|
||||
change.Path = filepath.Join(filepath.Dir(path), originalFile)
|
||||
change.Kind = ChangeDelete
|
||||
} else {
|
||||
|
||||
1737
commands.go
1737
commands.go
File diff suppressed because it is too large
Load Diff
@@ -3,9 +3,8 @@ package docker
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/rcli"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
_ "io/ioutil"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -59,6 +58,7 @@ func assertPipe(input, output string, r io.Reader, w io.Writer, count int) error
|
||||
return nil
|
||||
}
|
||||
|
||||
/*TODO
|
||||
func cmdWait(srv *Server, container *Container) error {
|
||||
stdout, stdoutPipe := io.Pipe()
|
||||
|
||||
@@ -413,6 +413,7 @@ func TestAttachDisconnect(t *testing.T) {
|
||||
container, err := NewBuilder(runtime).Create(
|
||||
&Config{
|
||||
Image: GetTestImage(runtime).Id,
|
||||
CpuShares: 1000,
|
||||
Memory: 33554432,
|
||||
Cmd: []string{"/bin/cat"},
|
||||
OpenStdin: true,
|
||||
@@ -467,3 +468,4 @@ func TestAttachDisconnect(t *testing.T) {
|
||||
cStdin.Close()
|
||||
container.Wait()
|
||||
}
|
||||
*/
|
||||
|
||||
230
container.go
230
container.go
@@ -2,8 +2,10 @@ package docker
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/rcli"
|
||||
"github.com/dotcloud/docker/term"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"github.com/kr/pty"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@@ -11,6 +13,7 @@ import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -21,7 +24,7 @@ import (
|
||||
type Container struct {
|
||||
root string
|
||||
|
||||
Id string
|
||||
ID string
|
||||
|
||||
Created time.Time
|
||||
|
||||
@@ -39,8 +42,8 @@ type Container struct {
|
||||
ResolvConfPath string
|
||||
|
||||
cmd *exec.Cmd
|
||||
stdout *writeBroadcaster
|
||||
stderr *writeBroadcaster
|
||||
stdout *utils.WriteBroadcaster
|
||||
stderr *utils.WriteBroadcaster
|
||||
stdin io.ReadCloser
|
||||
stdinPipe io.WriteCloser
|
||||
ptyMaster io.Closer
|
||||
@@ -56,6 +59,7 @@ type Config struct {
|
||||
User string
|
||||
Memory int64 // Memory limit (in bytes)
|
||||
MemorySwap int64 // Total memory usage (memory + swap); set `-1' to disable swap
|
||||
CpuShares int64 // CPU shares (relative weight vs. other containers)
|
||||
AttachStdin bool
|
||||
AttachStdout bool
|
||||
AttachStderr bool
|
||||
@@ -71,8 +75,8 @@ type Config struct {
|
||||
VolumesFrom string
|
||||
}
|
||||
|
||||
func ParseRun(args []string, stdout io.Writer, capabilities *Capabilities) (*Config, error) {
|
||||
cmd := rcli.Subcmd(stdout, "run", "[OPTIONS] IMAGE COMMAND [ARG...]", "Run a command in a new container")
|
||||
func ParseRun(args []string, capabilities *Capabilities) (*Config, *flag.FlagSet, error) {
|
||||
cmd := Subcmd("run", "[OPTIONS] IMAGE COMMAND [ARG...]", "Run a command in a new container")
|
||||
if len(args) > 0 && args[0] != "--help" {
|
||||
cmd.SetOutput(ioutil.Discard)
|
||||
}
|
||||
@@ -86,11 +90,13 @@ func ParseRun(args []string, stdout io.Writer, capabilities *Capabilities) (*Con
|
||||
flTty := cmd.Bool("t", false, "Allocate a pseudo-tty")
|
||||
flMemory := cmd.Int64("m", 0, "Memory limit (in bytes)")
|
||||
|
||||
if *flMemory > 0 && !capabilities.MemoryLimit {
|
||||
fmt.Fprintf(stdout, "WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.\n")
|
||||
if capabilities != nil && *flMemory > 0 && !capabilities.MemoryLimit {
|
||||
//fmt.Fprintf(stdout, "WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.\n")
|
||||
*flMemory = 0
|
||||
}
|
||||
|
||||
flCpuShares := cmd.Int64("c", 0, "CPU shares (relative weight)")
|
||||
|
||||
var flPorts ListOpts
|
||||
cmd.Var(&flPorts, "p", "Expose a container's port to the host (use 'docker port' to see the actual mapping)")
|
||||
|
||||
@@ -106,10 +112,10 @@ func ParseRun(args []string, stdout io.Writer, capabilities *Capabilities) (*Con
|
||||
flVolumesFrom := cmd.String("volumes-from", "", "Mount volumes from the specified container")
|
||||
|
||||
if err := cmd.Parse(args); err != nil {
|
||||
return nil, err
|
||||
return nil, cmd, err
|
||||
}
|
||||
if *flDetach && len(flAttach) > 0 {
|
||||
return nil, fmt.Errorf("Conflicting options: -a and -d")
|
||||
return nil, cmd, fmt.Errorf("Conflicting options: -a and -d")
|
||||
}
|
||||
// If neither -d or -a are set, attach to everything by default
|
||||
if len(flAttach) == 0 && !*flDetach {
|
||||
@@ -137,6 +143,7 @@ func ParseRun(args []string, stdout io.Writer, capabilities *Capabilities) (*Con
|
||||
Tty: *flTty,
|
||||
OpenStdin: *flStdin,
|
||||
Memory: *flMemory,
|
||||
CpuShares: *flCpuShares,
|
||||
AttachStdin: flAttach.Get("stdin"),
|
||||
AttachStdout: flAttach.Get("stdout"),
|
||||
AttachStderr: flAttach.Get("stderr"),
|
||||
@@ -148,8 +155,8 @@ func ParseRun(args []string, stdout io.Writer, capabilities *Capabilities) (*Con
|
||||
VolumesFrom: *flVolumesFrom,
|
||||
}
|
||||
|
||||
if *flMemory > 0 && !capabilities.SwapLimit {
|
||||
fmt.Fprintf(stdout, "WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.\n")
|
||||
if capabilities != nil && *flMemory > 0 && !capabilities.SwapLimit {
|
||||
//fmt.Fprintf(stdout, "WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.\n")
|
||||
config.MemorySwap = -1
|
||||
}
|
||||
|
||||
@@ -157,12 +164,12 @@ func ParseRun(args []string, stdout io.Writer, capabilities *Capabilities) (*Con
|
||||
if config.OpenStdin && config.AttachStdin {
|
||||
config.StdinOnce = true
|
||||
}
|
||||
return config, nil
|
||||
return config, cmd, nil
|
||||
}
|
||||
|
||||
type NetworkSettings struct {
|
||||
IpAddress string
|
||||
IpPrefixLen int
|
||||
IPAddress string
|
||||
IPPrefixLen int
|
||||
Gateway string
|
||||
Bridge string
|
||||
PortMapping map[string]string
|
||||
@@ -247,9 +254,9 @@ func (container *Container) startPty() error {
|
||||
// Copy the PTYs to our broadcasters
|
||||
go func() {
|
||||
defer container.stdout.CloseWriters()
|
||||
Debugf("[startPty] Begin of stdout pipe")
|
||||
utils.Debugf("[startPty] Begin of stdout pipe")
|
||||
io.Copy(container.stdout, ptyMaster)
|
||||
Debugf("[startPty] End of stdout pipe")
|
||||
utils.Debugf("[startPty] End of stdout pipe")
|
||||
}()
|
||||
|
||||
// stdin
|
||||
@@ -258,9 +265,9 @@ func (container *Container) startPty() error {
|
||||
container.cmd.SysProcAttr = &syscall.SysProcAttr{Setctty: true, Setsid: true}
|
||||
go func() {
|
||||
defer container.stdin.Close()
|
||||
Debugf("[startPty] Begin of stdin pipe")
|
||||
utils.Debugf("[startPty] Begin of stdin pipe")
|
||||
io.Copy(ptyMaster, container.stdin)
|
||||
Debugf("[startPty] End of stdin pipe")
|
||||
utils.Debugf("[startPty] End of stdin pipe")
|
||||
}()
|
||||
}
|
||||
if err := container.cmd.Start(); err != nil {
|
||||
@@ -280,9 +287,9 @@ func (container *Container) start() error {
|
||||
}
|
||||
go func() {
|
||||
defer stdin.Close()
|
||||
Debugf("Begin of stdin pipe [start]")
|
||||
utils.Debugf("Begin of stdin pipe [start]")
|
||||
io.Copy(stdin, container.stdin)
|
||||
Debugf("End of stdin pipe [start]")
|
||||
utils.Debugf("End of stdin pipe [start]")
|
||||
}()
|
||||
}
|
||||
return container.cmd.Start()
|
||||
@@ -299,8 +306,8 @@ func (container *Container) Attach(stdin io.ReadCloser, stdinCloser io.Closer, s
|
||||
errors <- err
|
||||
} else {
|
||||
go func() {
|
||||
Debugf("[start] attach stdin\n")
|
||||
defer Debugf("[end] attach stdin\n")
|
||||
utils.Debugf("[start] attach stdin\n")
|
||||
defer utils.Debugf("[end] attach stdin\n")
|
||||
// No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr
|
||||
if cStdout != nil {
|
||||
defer cStdout.Close()
|
||||
@@ -312,12 +319,12 @@ func (container *Container) Attach(stdin io.ReadCloser, stdinCloser io.Closer, s
|
||||
defer cStdin.Close()
|
||||
}
|
||||
if container.Config.Tty {
|
||||
_, err = CopyEscapable(cStdin, stdin)
|
||||
_, err = utils.CopyEscapable(cStdin, stdin)
|
||||
} else {
|
||||
_, err = io.Copy(cStdin, stdin)
|
||||
}
|
||||
if err != nil {
|
||||
Debugf("[error] attach stdin: %s\n", err)
|
||||
utils.Debugf("[error] attach stdin: %s\n", err)
|
||||
}
|
||||
// Discard error, expecting pipe error
|
||||
errors <- nil
|
||||
@@ -331,8 +338,8 @@ func (container *Container) Attach(stdin io.ReadCloser, stdinCloser io.Closer, s
|
||||
} else {
|
||||
cStdout = p
|
||||
go func() {
|
||||
Debugf("[start] attach stdout\n")
|
||||
defer Debugf("[end] attach stdout\n")
|
||||
utils.Debugf("[start] attach stdout\n")
|
||||
defer utils.Debugf("[end] attach stdout\n")
|
||||
// If we are in StdinOnce mode, then close stdin
|
||||
if container.Config.StdinOnce {
|
||||
if stdin != nil {
|
||||
@@ -344,11 +351,23 @@ func (container *Container) Attach(stdin io.ReadCloser, stdinCloser io.Closer, s
|
||||
}
|
||||
_, err := io.Copy(stdout, cStdout)
|
||||
if err != nil {
|
||||
Debugf("[error] attach stdout: %s\n", err)
|
||||
utils.Debugf("[error] attach stdout: %s\n", err)
|
||||
}
|
||||
errors <- err
|
||||
}()
|
||||
}
|
||||
} else {
|
||||
go func() {
|
||||
if stdinCloser != nil {
|
||||
defer stdinCloser.Close()
|
||||
}
|
||||
|
||||
if cStdout, err := container.StdoutPipe(); err != nil {
|
||||
utils.Debugf("Error stdout pipe")
|
||||
} else {
|
||||
io.Copy(&utils.NopWriter{}, cStdout)
|
||||
}
|
||||
}()
|
||||
}
|
||||
if stderr != nil {
|
||||
nJobs += 1
|
||||
@@ -357,8 +376,8 @@ func (container *Container) Attach(stdin io.ReadCloser, stdinCloser io.Closer, s
|
||||
} else {
|
||||
cStderr = p
|
||||
go func() {
|
||||
Debugf("[start] attach stderr\n")
|
||||
defer Debugf("[end] attach stderr\n")
|
||||
utils.Debugf("[start] attach stderr\n")
|
||||
defer utils.Debugf("[end] attach stderr\n")
|
||||
// If we are in StdinOnce mode, then close stdin
|
||||
if container.Config.StdinOnce {
|
||||
if stdin != nil {
|
||||
@@ -370,13 +389,26 @@ func (container *Container) Attach(stdin io.ReadCloser, stdinCloser io.Closer, s
|
||||
}
|
||||
_, err := io.Copy(stderr, cStderr)
|
||||
if err != nil {
|
||||
Debugf("[error] attach stderr: %s\n", err)
|
||||
utils.Debugf("[error] attach stderr: %s\n", err)
|
||||
}
|
||||
errors <- err
|
||||
}()
|
||||
}
|
||||
} else {
|
||||
go func() {
|
||||
if stdinCloser != nil {
|
||||
defer stdinCloser.Close()
|
||||
}
|
||||
|
||||
if cStderr, err := container.StderrPipe(); err != nil {
|
||||
utils.Debugf("Error stdout pipe")
|
||||
} else {
|
||||
io.Copy(&utils.NopWriter{}, cStderr)
|
||||
}
|
||||
}()
|
||||
}
|
||||
return Go(func() error {
|
||||
|
||||
return utils.Go(func() error {
|
||||
if cStdout != nil {
|
||||
defer cStdout.Close()
|
||||
}
|
||||
@@ -386,14 +418,14 @@ func (container *Container) Attach(stdin io.ReadCloser, stdinCloser io.Closer, s
|
||||
// FIXME: how do clean up the stdin goroutine without the unwanted side effect
|
||||
// of closing the passed stdin? Add an intermediary io.Pipe?
|
||||
for i := 0; i < nJobs; i += 1 {
|
||||
Debugf("Waiting for job %d/%d\n", i+1, nJobs)
|
||||
utils.Debugf("Waiting for job %d/%d\n", i+1, nJobs)
|
||||
if err := <-errors; err != nil {
|
||||
Debugf("Job %d returned error %s. Aborting all jobs\n", i+1, err)
|
||||
utils.Debugf("Job %d returned error %s. Aborting all jobs\n", i+1, err)
|
||||
return err
|
||||
}
|
||||
Debugf("Job %d completed successfully\n", i+1)
|
||||
utils.Debugf("Job %d completed successfully\n", i+1)
|
||||
}
|
||||
Debugf("All jobs completed successfully\n")
|
||||
utils.Debugf("All jobs completed successfully\n")
|
||||
return nil
|
||||
})
|
||||
}
|
||||
@@ -403,7 +435,7 @@ func (container *Container) Start() error {
|
||||
defer container.State.unlock()
|
||||
|
||||
if container.State.Running {
|
||||
return fmt.Errorf("The container %s is already running.", container.Id)
|
||||
return fmt.Errorf("The container %s is already running.", container.ID)
|
||||
}
|
||||
if err := container.EnsureMounted(); err != nil {
|
||||
return err
|
||||
@@ -425,24 +457,24 @@ func (container *Container) Start() error {
|
||||
|
||||
// Create the requested volumes volumes
|
||||
for volPath := range container.Config.Volumes {
|
||||
if c, err := container.runtime.volumes.Create(nil, container, "", "", nil); err != nil {
|
||||
c, err := container.runtime.volumes.Create(nil, container, "", "", nil)
|
||||
if err != nil {
|
||||
return err
|
||||
} else {
|
||||
if err := os.MkdirAll(path.Join(container.RootfsPath(), volPath), 0755); err != nil {
|
||||
return nil
|
||||
}
|
||||
container.Volumes[volPath] = c.Id
|
||||
}
|
||||
if err := os.MkdirAll(path.Join(container.RootfsPath(), volPath), 0755); err != nil {
|
||||
return nil
|
||||
}
|
||||
container.Volumes[volPath] = c.ID
|
||||
}
|
||||
|
||||
if container.Config.VolumesFrom != "" {
|
||||
c := container.runtime.Get(container.Config.VolumesFrom)
|
||||
if c == nil {
|
||||
return fmt.Errorf("Container %s not found. Impossible to mount its volumes", container.Id)
|
||||
return fmt.Errorf("Container %s not found. Impossible to mount its volumes", container.ID)
|
||||
}
|
||||
for volPath, id := range c.Volumes {
|
||||
if _, exists := container.Volumes[volPath]; exists {
|
||||
return fmt.Errorf("The requested volume %s overlap one of the volume of the container %s", volPath, c.Id)
|
||||
return fmt.Errorf("The requested volume %s overlap one of the volume of the container %s", volPath, c.ID)
|
||||
}
|
||||
if err := os.MkdirAll(path.Join(container.RootfsPath(), volPath), 0755); err != nil {
|
||||
return nil
|
||||
@@ -456,7 +488,7 @@ func (container *Container) Start() error {
|
||||
}
|
||||
|
||||
params := []string{
|
||||
"-n", container.Id,
|
||||
"-n", container.ID,
|
||||
"-f", container.lxcConfigPath(),
|
||||
"--",
|
||||
"/sbin/init",
|
||||
@@ -551,13 +583,13 @@ func (container *Container) StdinPipe() (io.WriteCloser, error) {
|
||||
func (container *Container) StdoutPipe() (io.ReadCloser, error) {
|
||||
reader, writer := io.Pipe()
|
||||
container.stdout.AddWriter(writer)
|
||||
return newBufReader(reader), nil
|
||||
return utils.NewBufReader(reader), nil
|
||||
}
|
||||
|
||||
func (container *Container) StderrPipe() (io.ReadCloser, error) {
|
||||
reader, writer := io.Pipe()
|
||||
container.stderr.AddWriter(writer)
|
||||
return newBufReader(reader), nil
|
||||
return utils.NewBufReader(reader), nil
|
||||
}
|
||||
|
||||
func (container *Container) allocateNetwork() error {
|
||||
@@ -567,17 +599,17 @@ func (container *Container) allocateNetwork() error {
|
||||
}
|
||||
container.NetworkSettings.PortMapping = make(map[string]string)
|
||||
for _, spec := range container.Config.PortSpecs {
|
||||
if nat, err := iface.AllocatePort(spec); err != nil {
|
||||
nat, err := iface.AllocatePort(spec)
|
||||
if err != nil {
|
||||
iface.Release()
|
||||
return err
|
||||
} else {
|
||||
container.NetworkSettings.PortMapping[strconv.Itoa(nat.Backend)] = strconv.Itoa(nat.Frontend)
|
||||
}
|
||||
container.NetworkSettings.PortMapping[strconv.Itoa(nat.Backend)] = strconv.Itoa(nat.Frontend)
|
||||
}
|
||||
container.network = iface
|
||||
container.NetworkSettings.Bridge = container.runtime.networkManager.bridgeIface
|
||||
container.NetworkSettings.IpAddress = iface.IPNet.IP.String()
|
||||
container.NetworkSettings.IpPrefixLen, _ = iface.IPNet.Mask.Size()
|
||||
container.NetworkSettings.IPAddress = iface.IPNet.IP.String()
|
||||
container.NetworkSettings.IPPrefixLen, _ = iface.IPNet.Mask.Size()
|
||||
container.NetworkSettings.Gateway = iface.Gateway.String()
|
||||
return nil
|
||||
}
|
||||
@@ -591,36 +623,36 @@ func (container *Container) releaseNetwork() {
|
||||
// FIXME: replace this with a control socket within docker-init
|
||||
func (container *Container) waitLxc() error {
|
||||
for {
|
||||
if output, err := exec.Command("lxc-info", "-n", container.Id).CombinedOutput(); err != nil {
|
||||
output, err := exec.Command("lxc-info", "-n", container.ID).CombinedOutput()
|
||||
if err != nil {
|
||||
return err
|
||||
} else {
|
||||
if !strings.Contains(string(output), "RUNNING") {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
if !strings.Contains(string(output), "RUNNING") {
|
||||
return nil
|
||||
}
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
}
|
||||
return nil
|
||||
panic("Unreachable")
|
||||
}
|
||||
|
||||
func (container *Container) monitor() {
|
||||
// Wait for the program to exit
|
||||
Debugf("Waiting for process")
|
||||
utils.Debugf("Waiting for process")
|
||||
|
||||
// If the command does not exists, try to wait via lxc
|
||||
if container.cmd == nil {
|
||||
if err := container.waitLxc(); err != nil {
|
||||
Debugf("%s: Process: %s", container.Id, err)
|
||||
utils.Debugf("%s: Process: %s", container.ID, err)
|
||||
}
|
||||
} else {
|
||||
if err := container.cmd.Wait(); err != nil {
|
||||
// Discard the error as any signals or non 0 returns will generate an error
|
||||
Debugf("%s: Process: %s", container.Id, err)
|
||||
utils.Debugf("%s: Process: %s", container.ID, err)
|
||||
}
|
||||
}
|
||||
Debugf("Process finished")
|
||||
utils.Debugf("Process finished")
|
||||
|
||||
var exitCode int = -1
|
||||
exitCode := -1
|
||||
if container.cmd != nil {
|
||||
exitCode = container.cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus()
|
||||
}
|
||||
@@ -629,24 +661,24 @@ func (container *Container) monitor() {
|
||||
container.releaseNetwork()
|
||||
if container.Config.OpenStdin {
|
||||
if err := container.stdin.Close(); err != nil {
|
||||
Debugf("%s: Error close stdin: %s", container.Id, err)
|
||||
utils.Debugf("%s: Error close stdin: %s", container.ID, err)
|
||||
}
|
||||
}
|
||||
if err := container.stdout.CloseWriters(); err != nil {
|
||||
Debugf("%s: Error close stdout: %s", container.Id, err)
|
||||
utils.Debugf("%s: Error close stdout: %s", container.ID, err)
|
||||
}
|
||||
if err := container.stderr.CloseWriters(); err != nil {
|
||||
Debugf("%s: Error close stderr: %s", container.Id, err)
|
||||
utils.Debugf("%s: Error close stderr: %s", container.ID, err)
|
||||
}
|
||||
|
||||
if container.ptyMaster != nil {
|
||||
if err := container.ptyMaster.Close(); err != nil {
|
||||
Debugf("%s: Error closing Pty master: %s", container.Id, err)
|
||||
utils.Debugf("%s: Error closing Pty master: %s", container.ID, err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := container.Unmount(); err != nil {
|
||||
log.Printf("%v: Failed to umount filesystem: %v", container.Id, err)
|
||||
log.Printf("%v: Failed to umount filesystem: %v", container.ID, err)
|
||||
}
|
||||
|
||||
// Re-create a brand new stdin pipe once the container exited
|
||||
@@ -667,7 +699,7 @@ func (container *Container) monitor() {
|
||||
// This is because State.setStopped() has already been called, and has caused Wait()
|
||||
// to return.
|
||||
// FIXME: why are we serializing running state to disk in the first place?
|
||||
//log.Printf("%s: Failed to dump configuration to the disk: %s", container.Id, err)
|
||||
//log.Printf("%s: Failed to dump configuration to the disk: %s", container.ID, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -677,17 +709,17 @@ func (container *Container) kill() error {
|
||||
}
|
||||
|
||||
// Sending SIGKILL to the process via lxc
|
||||
output, err := exec.Command("lxc-kill", "-n", container.Id, "9").CombinedOutput()
|
||||
output, err := exec.Command("lxc-kill", "-n", container.ID, "9").CombinedOutput()
|
||||
if err != nil {
|
||||
log.Printf("error killing container %s (%s, %s)", container.Id, output, err)
|
||||
log.Printf("error killing container %s (%s, %s)", container.ID, output, err)
|
||||
}
|
||||
|
||||
// 2. Wait for the process to die, in last resort, try to kill the process directly
|
||||
if err := container.WaitTimeout(10 * time.Second); err != nil {
|
||||
if container.cmd == nil {
|
||||
return fmt.Errorf("lxc-kill failed, impossible to kill the container %s", container.Id)
|
||||
return fmt.Errorf("lxc-kill failed, impossible to kill the container %s", container.ID)
|
||||
}
|
||||
log.Printf("Container %s failed to exit within 10 seconds of lxc SIGKILL - trying direct SIGKILL", container.Id)
|
||||
log.Printf("Container %s failed to exit within 10 seconds of lxc SIGKILL - trying direct SIGKILL", container.ID)
|
||||
if err := container.cmd.Process.Kill(); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -715,7 +747,7 @@ func (container *Container) Stop(seconds int) error {
|
||||
}
|
||||
|
||||
// 1. Send a SIGTERM
|
||||
if output, err := exec.Command("lxc-kill", "-n", container.Id, "15").CombinedOutput(); err != nil {
|
||||
if output, err := exec.Command("lxc-kill", "-n", container.ID, "15").CombinedOutput(); err != nil {
|
||||
log.Print(string(output))
|
||||
log.Print("Failed to send SIGTERM to the process, force killing")
|
||||
if err := container.kill(); err != nil {
|
||||
@@ -725,7 +757,7 @@ func (container *Container) Stop(seconds int) error {
|
||||
|
||||
// 2. Wait for the process to exit on its own
|
||||
if err := container.WaitTimeout(time.Duration(seconds) * time.Second); err != nil {
|
||||
log.Printf("Container %v failed to exit within %d seconds of SIGTERM - using the force", container.Id, seconds)
|
||||
log.Printf("Container %v failed to exit within %d seconds of SIGTERM - using the force", container.ID, seconds)
|
||||
if err := container.kill(); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -749,6 +781,14 @@ func (container *Container) Wait() int {
|
||||
return container.State.ExitCode
|
||||
}
|
||||
|
||||
func (container *Container) Resize(h, w int) error {
|
||||
pty, ok := container.ptyMaster.(*os.File)
|
||||
if !ok {
|
||||
return fmt.Errorf("ptyMaster does not have Fd() method")
|
||||
}
|
||||
return term.SetWinsize(pty.Fd(), &term.Winsize{Height: uint16(h), Width: uint16(w)})
|
||||
}
|
||||
|
||||
func (container *Container) ExportRw() (Archive, error) {
|
||||
return Tar(container.rwPath(), Uncompressed)
|
||||
}
|
||||
@@ -758,7 +798,7 @@ func (container *Container) RwChecksum() (string, error) {
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return HashData(rwData)
|
||||
return utils.HashData(rwData)
|
||||
}
|
||||
|
||||
func (container *Container) Export() (Archive, error) {
|
||||
@@ -781,7 +821,8 @@ func (container *Container) WaitTimeout(timeout time.Duration) error {
|
||||
case <-done:
|
||||
return nil
|
||||
}
|
||||
panic("unreachable")
|
||||
|
||||
panic("Unreachable")
|
||||
}
|
||||
|
||||
func (container *Container) EnsureMounted() error {
|
||||
@@ -824,16 +865,16 @@ func (container *Container) Unmount() error {
|
||||
return Unmount(container.RootfsPath())
|
||||
}
|
||||
|
||||
// ShortId returns a shorthand version of the container's id for convenience.
|
||||
// ShortID returns a shorthand version of the container's id for convenience.
|
||||
// A collision with other container shorthands is very unlikely, but possible.
|
||||
// In case of a collision a lookup with Runtime.Get() will fail, and the caller
|
||||
// will need to use a langer prefix, or the full-length container Id.
|
||||
func (container *Container) ShortId() string {
|
||||
return TruncateId(container.Id)
|
||||
func (container *Container) ShortID() string {
|
||||
return utils.TruncateID(container.ID)
|
||||
}
|
||||
|
||||
func (container *Container) logPath(name string) string {
|
||||
return path.Join(container.root, fmt.Sprintf("%s-%s.log", container.Id, name))
|
||||
return path.Join(container.root, fmt.Sprintf("%s-%s.log", container.ID, name))
|
||||
}
|
||||
|
||||
func (container *Container) ReadLog(name string) (io.Reader, error) {
|
||||
@@ -873,9 +914,32 @@ func (container *Container) rwPath() string {
|
||||
return path.Join(container.root, "rw")
|
||||
}
|
||||
|
||||
func validateId(id string) error {
|
||||
func validateID(id string) error {
|
||||
if id == "" {
|
||||
return fmt.Errorf("Invalid empty id")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetSize, return real size, virtual size
|
||||
func (container *Container) GetSize() (int64, int64) {
|
||||
var sizeRw, sizeRootfs int64
|
||||
|
||||
filepath.Walk(container.rwPath(), func(path string, fileInfo os.FileInfo, err error) error {
|
||||
if fileInfo != nil {
|
||||
sizeRw += fileInfo.Size()
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
_, err := os.Stat(container.RootfsPath())
|
||||
if err == nil {
|
||||
filepath.Walk(container.RootfsPath(), func(path string, fileInfo os.FileInfo, err error) error {
|
||||
if fileInfo != nil {
|
||||
sizeRootfs += fileInfo.Size()
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
return sizeRw, sizeRootfs
|
||||
}
|
||||
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestIdFormat(t *testing.T) {
|
||||
func TestIDFormat(t *testing.T) {
|
||||
runtime, err := newTestRuntime()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -22,19 +22,19 @@ func TestIdFormat(t *testing.T) {
|
||||
defer nuke(runtime)
|
||||
container1, err := NewBuilder(runtime).Create(
|
||||
&Config{
|
||||
Image: GetTestImage(runtime).Id,
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"/bin/sh", "-c", "echo hello world"},
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
match, err := regexp.Match("^[0-9a-f]{64}$", []byte(container1.Id))
|
||||
match, err := regexp.Match("^[0-9a-f]{64}$", []byte(container1.ID))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !match {
|
||||
t.Fatalf("Invalid container ID: %s", container1.Id)
|
||||
t.Fatalf("Invalid container ID: %s", container1.ID)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -46,7 +46,7 @@ func TestMultipleAttachRestart(t *testing.T) {
|
||||
defer nuke(runtime)
|
||||
container, err := NewBuilder(runtime).Create(
|
||||
&Config{
|
||||
Image: GetTestImage(runtime).Id,
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"/bin/sh", "-c",
|
||||
"i=1; while [ $i -le 5 ]; do i=`expr $i + 1`; echo hello; done"},
|
||||
},
|
||||
@@ -153,7 +153,7 @@ func TestDiff(t *testing.T) {
|
||||
// Create a container and remove a file
|
||||
container1, err := builder.Create(
|
||||
&Config{
|
||||
Image: GetTestImage(runtime).Id,
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"/bin/rm", "/etc/passwd"},
|
||||
},
|
||||
)
|
||||
@@ -194,7 +194,7 @@ func TestDiff(t *testing.T) {
|
||||
// Create a new container from the commited image
|
||||
container2, err := builder.Create(
|
||||
&Config{
|
||||
Image: img.Id,
|
||||
Image: img.ID,
|
||||
Cmd: []string{"cat", "/etc/passwd"},
|
||||
},
|
||||
)
|
||||
@@ -217,6 +217,37 @@ func TestDiff(t *testing.T) {
|
||||
t.Fatalf("/etc/passwd should not be present in the diff after commit.")
|
||||
}
|
||||
}
|
||||
|
||||
// Create a new containere
|
||||
container3, err := builder.Create(
|
||||
&Config{
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"rm", "/bin/httpd"},
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer runtime.Destroy(container3)
|
||||
|
||||
if err := container3.Run(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Check the changelog
|
||||
c, err = container3.Changes()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
success = false
|
||||
for _, elem := range c {
|
||||
if elem.Path == "/bin/httpd" && elem.Kind == 2 {
|
||||
success = true
|
||||
}
|
||||
}
|
||||
if !success {
|
||||
t.Fatalf("/bin/httpd should be present in the diff after commit.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCommitAutoRun(t *testing.T) {
|
||||
@@ -229,7 +260,7 @@ func TestCommitAutoRun(t *testing.T) {
|
||||
builder := NewBuilder(runtime)
|
||||
container1, err := builder.Create(
|
||||
&Config{
|
||||
Image: GetTestImage(runtime).Id,
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"/bin/sh", "-c", "echo hello > /world"},
|
||||
},
|
||||
)
|
||||
@@ -260,7 +291,7 @@ func TestCommitAutoRun(t *testing.T) {
|
||||
// FIXME: Make a TestCommit that stops here and check docker.root/layers/img.id/world
|
||||
container2, err := builder.Create(
|
||||
&Config{
|
||||
Image: img.Id,
|
||||
Image: img.ID,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
@@ -309,7 +340,7 @@ func TestCommitRun(t *testing.T) {
|
||||
|
||||
container1, err := builder.Create(
|
||||
&Config{
|
||||
Image: GetTestImage(runtime).Id,
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"/bin/sh", "-c", "echo hello > /world"},
|
||||
},
|
||||
)
|
||||
@@ -341,7 +372,7 @@ func TestCommitRun(t *testing.T) {
|
||||
|
||||
container2, err := builder.Create(
|
||||
&Config{
|
||||
Image: img.Id,
|
||||
Image: img.ID,
|
||||
Cmd: []string{"cat", "/world"},
|
||||
},
|
||||
)
|
||||
@@ -388,8 +419,9 @@ func TestStart(t *testing.T) {
|
||||
defer nuke(runtime)
|
||||
container, err := NewBuilder(runtime).Create(
|
||||
&Config{
|
||||
Image: GetTestImage(runtime).Id,
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Memory: 33554432,
|
||||
CpuShares: 1000,
|
||||
Cmd: []string{"/bin/cat"},
|
||||
OpenStdin: true,
|
||||
},
|
||||
@@ -399,6 +431,11 @@ func TestStart(t *testing.T) {
|
||||
}
|
||||
defer runtime.Destroy(container)
|
||||
|
||||
cStdin, err := container.StdinPipe()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := container.Start(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -414,7 +451,6 @@ func TestStart(t *testing.T) {
|
||||
}
|
||||
|
||||
// Try to avoid the timeoout in destroy. Best effort, don't check error
|
||||
cStdin, _ := container.StdinPipe()
|
||||
cStdin.Close()
|
||||
container.WaitTimeout(2 * time.Second)
|
||||
}
|
||||
@@ -427,7 +463,7 @@ func TestRun(t *testing.T) {
|
||||
defer nuke(runtime)
|
||||
container, err := NewBuilder(runtime).Create(
|
||||
&Config{
|
||||
Image: GetTestImage(runtime).Id,
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"ls", "-al"},
|
||||
},
|
||||
)
|
||||
@@ -455,7 +491,7 @@ func TestOutput(t *testing.T) {
|
||||
defer nuke(runtime)
|
||||
container, err := NewBuilder(runtime).Create(
|
||||
&Config{
|
||||
Image: GetTestImage(runtime).Id,
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"echo", "-n", "foobar"},
|
||||
},
|
||||
)
|
||||
@@ -479,7 +515,7 @@ func TestKillDifferentUser(t *testing.T) {
|
||||
}
|
||||
defer nuke(runtime)
|
||||
container, err := NewBuilder(runtime).Create(&Config{
|
||||
Image: GetTestImage(runtime).Id,
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"tail", "-f", "/etc/resolv.conf"},
|
||||
User: "daemon",
|
||||
},
|
||||
@@ -527,7 +563,7 @@ func TestKill(t *testing.T) {
|
||||
}
|
||||
defer nuke(runtime)
|
||||
container, err := NewBuilder(runtime).Create(&Config{
|
||||
Image: GetTestImage(runtime).Id,
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"cat", "/dev/zero"},
|
||||
},
|
||||
)
|
||||
@@ -575,7 +611,7 @@ func TestExitCode(t *testing.T) {
|
||||
builder := NewBuilder(runtime)
|
||||
|
||||
trueContainer, err := builder.Create(&Config{
|
||||
Image: GetTestImage(runtime).Id,
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"/bin/true", ""},
|
||||
})
|
||||
if err != nil {
|
||||
@@ -590,7 +626,7 @@ func TestExitCode(t *testing.T) {
|
||||
}
|
||||
|
||||
falseContainer, err := builder.Create(&Config{
|
||||
Image: GetTestImage(runtime).Id,
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"/bin/false", ""},
|
||||
})
|
||||
if err != nil {
|
||||
@@ -612,7 +648,7 @@ func TestRestart(t *testing.T) {
|
||||
}
|
||||
defer nuke(runtime)
|
||||
container, err := NewBuilder(runtime).Create(&Config{
|
||||
Image: GetTestImage(runtime).Id,
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"echo", "-n", "foobar"},
|
||||
},
|
||||
)
|
||||
@@ -645,7 +681,7 @@ func TestRestartStdin(t *testing.T) {
|
||||
}
|
||||
defer nuke(runtime)
|
||||
container, err := NewBuilder(runtime).Create(&Config{
|
||||
Image: GetTestImage(runtime).Id,
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"cat"},
|
||||
|
||||
OpenStdin: true,
|
||||
@@ -727,7 +763,7 @@ func TestUser(t *testing.T) {
|
||||
|
||||
// Default user must be root
|
||||
container, err := builder.Create(&Config{
|
||||
Image: GetTestImage(runtime).Id,
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"id"},
|
||||
},
|
||||
)
|
||||
@@ -745,7 +781,7 @@ func TestUser(t *testing.T) {
|
||||
|
||||
// Set a username
|
||||
container, err = builder.Create(&Config{
|
||||
Image: GetTestImage(runtime).Id,
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"id"},
|
||||
|
||||
User: "root",
|
||||
@@ -765,7 +801,7 @@ func TestUser(t *testing.T) {
|
||||
|
||||
// Set a UID
|
||||
container, err = builder.Create(&Config{
|
||||
Image: GetTestImage(runtime).Id,
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"id"},
|
||||
|
||||
User: "0",
|
||||
@@ -785,7 +821,7 @@ func TestUser(t *testing.T) {
|
||||
|
||||
// Set a different user by uid
|
||||
container, err = builder.Create(&Config{
|
||||
Image: GetTestImage(runtime).Id,
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"id"},
|
||||
|
||||
User: "1",
|
||||
@@ -807,7 +843,7 @@ func TestUser(t *testing.T) {
|
||||
|
||||
// Set a different user by username
|
||||
container, err = builder.Create(&Config{
|
||||
Image: GetTestImage(runtime).Id,
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"id"},
|
||||
|
||||
User: "daemon",
|
||||
@@ -836,7 +872,7 @@ func TestMultipleContainers(t *testing.T) {
|
||||
builder := NewBuilder(runtime)
|
||||
|
||||
container1, err := builder.Create(&Config{
|
||||
Image: GetTestImage(runtime).Id,
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"cat", "/dev/zero"},
|
||||
},
|
||||
)
|
||||
@@ -846,7 +882,7 @@ func TestMultipleContainers(t *testing.T) {
|
||||
defer runtime.Destroy(container1)
|
||||
|
||||
container2, err := builder.Create(&Config{
|
||||
Image: GetTestImage(runtime).Id,
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"cat", "/dev/zero"},
|
||||
},
|
||||
)
|
||||
@@ -892,7 +928,7 @@ func TestStdin(t *testing.T) {
|
||||
}
|
||||
defer nuke(runtime)
|
||||
container, err := NewBuilder(runtime).Create(&Config{
|
||||
Image: GetTestImage(runtime).Id,
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"cat"},
|
||||
|
||||
OpenStdin: true,
|
||||
@@ -939,7 +975,7 @@ func TestTty(t *testing.T) {
|
||||
}
|
||||
defer nuke(runtime)
|
||||
container, err := NewBuilder(runtime).Create(&Config{
|
||||
Image: GetTestImage(runtime).Id,
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"cat"},
|
||||
|
||||
OpenStdin: true,
|
||||
@@ -986,7 +1022,7 @@ func TestEnv(t *testing.T) {
|
||||
}
|
||||
defer nuke(runtime)
|
||||
container, err := NewBuilder(runtime).Create(&Config{
|
||||
Image: GetTestImage(runtime).Id,
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"/usr/bin/env"},
|
||||
},
|
||||
)
|
||||
@@ -1059,12 +1095,17 @@ func TestLXCConfig(t *testing.T) {
|
||||
memMin := 33554432
|
||||
memMax := 536870912
|
||||
mem := memMin + rand.Intn(memMax-memMin)
|
||||
// CPU shares as well
|
||||
cpuMin := 100
|
||||
cpuMax := 10000
|
||||
cpu := cpuMin + rand.Intn(cpuMax-cpuMin)
|
||||
container, err := NewBuilder(runtime).Create(&Config{
|
||||
Image: GetTestImage(runtime).Id,
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"/bin/true"},
|
||||
|
||||
Hostname: "foobar",
|
||||
Memory: int64(mem),
|
||||
Hostname: "foobar",
|
||||
Memory: int64(mem),
|
||||
CpuShares: int64(cpu),
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
@@ -1087,7 +1128,7 @@ func BenchmarkRunSequencial(b *testing.B) {
|
||||
defer nuke(runtime)
|
||||
for i := 0; i < b.N; i++ {
|
||||
container, err := NewBuilder(runtime).Create(&Config{
|
||||
Image: GetTestImage(runtime).Id,
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"echo", "-n", "foo"},
|
||||
},
|
||||
)
|
||||
@@ -1122,7 +1163,7 @@ func BenchmarkRunParallel(b *testing.B) {
|
||||
tasks = append(tasks, complete)
|
||||
go func(i int, complete chan error) {
|
||||
container, err := NewBuilder(runtime).Create(&Config{
|
||||
Image: GetTestImage(runtime).Id,
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"echo", "-n", "foo"},
|
||||
},
|
||||
)
|
||||
|
||||
1
contrib/MAINTAINERS
Normal file
1
contrib/MAINTAINERS
Normal file
@@ -0,0 +1 @@
|
||||
# Maintainer wanted! Enroll on #docker@freenode
|
||||
@@ -11,13 +11,13 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
var DOCKER_PATH string = path.Join(os.Getenv("DOCKERPATH"), "docker")
|
||||
var DOCKERPATH = path.Join(os.Getenv("DOCKERPATH"), "docker")
|
||||
|
||||
// WARNING: this crashTest will 1) crash your host, 2) remove all containers
|
||||
func runDaemon() (*exec.Cmd, error) {
|
||||
os.Remove("/var/run/docker.pid")
|
||||
exec.Command("rm", "-rf", "/var/lib/docker/containers").Run()
|
||||
cmd := exec.Command(DOCKER_PATH, "-d")
|
||||
cmd := exec.Command(DOCKERPATH, "-d")
|
||||
outPipe, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -77,7 +77,7 @@ func crashTest() error {
|
||||
stop = false
|
||||
for i := 0; i < 100 && !stop; {
|
||||
func() error {
|
||||
cmd := exec.Command(DOCKER_PATH, "run", "base", "echo", fmt.Sprintf("%d", totalTestCount))
|
||||
cmd := exec.Command(DOCKERPATH, "run", "base", "echo", fmt.Sprintf("%d", totalTestCount))
|
||||
i++
|
||||
totalTestCount++
|
||||
outPipe, err := cmd.StdoutPipe()
|
||||
|
||||
@@ -1,68 +0,0 @@
|
||||
# docker-build: build your software with docker
|
||||
|
||||
## Description
|
||||
|
||||
docker-build is a script to build docker images from source. It will be deprecated once the 'build' feature is incorporated into docker itself (See https://github.com/dotcloud/docker/issues/278)
|
||||
|
||||
Author: Solomon Hykes <solomon@dotcloud.com>
|
||||
|
||||
|
||||
## Install
|
||||
|
||||
docker-builder requires:
|
||||
|
||||
1) A reasonably recent Python setup (tested on 2.7.2).
|
||||
|
||||
2) A running docker daemon at version 0.1.4 or more recent (http://www.docker.io/gettingstarted)
|
||||
|
||||
|
||||
## Usage
|
||||
|
||||
First create a valid Changefile, which defines a sequence of changes to apply to a base image.
|
||||
|
||||
$ cat Changefile
|
||||
# Start build from a know base image
|
||||
from base:ubuntu-12.10
|
||||
# Update ubuntu sources
|
||||
run echo 'deb http://archive.ubuntu.com/ubuntu quantal main universe multiverse' > /etc/apt/sources.list
|
||||
run apt-get update
|
||||
# Install system packages
|
||||
run DEBIAN_FRONTEND=noninteractive apt-get install -y -q git
|
||||
run DEBIAN_FRONTEND=noninteractive apt-get install -y -q curl
|
||||
run DEBIAN_FRONTEND=noninteractive apt-get install -y -q golang
|
||||
# Insert files from the host (./myscript must be present in the current directory)
|
||||
copy myscript /usr/local/bin/myscript
|
||||
|
||||
|
||||
Run docker-build, and pass the contents of your Changefile as standard input.
|
||||
|
||||
$ IMG=$(./docker-build < Changefile)
|
||||
|
||||
This will take a while: for each line of the changefile, docker-build will:
|
||||
|
||||
1. Create a new container to execute the given command or insert the given file
|
||||
2. Wait for the container to complete execution
|
||||
3. Commit the resulting changes as a new image
|
||||
4. Use the resulting image as the input of the next step
|
||||
|
||||
|
||||
If all the steps succeed, the result will be an image containing the combined results of each build step.
|
||||
You can trace back those build steps by inspecting the image's history:
|
||||
|
||||
$ docker history $IMG
|
||||
ID CREATED CREATED BY
|
||||
1e9e2045de86 A few seconds ago /bin/sh -c cat > /usr/local/bin/myscript; chmod +x /usr/local/bin/git
|
||||
77db140aa62a A few seconds ago /bin/sh -c DEBIAN_FRONTEND=noninteractive apt-get install -y -q golang
|
||||
77db140aa62a A few seconds ago /bin/sh -c DEBIAN_FRONTEND=noninteractive apt-get install -y -q curl
|
||||
77db140aa62a A few seconds ago /bin/sh -c DEBIAN_FRONTEND=noninteractive apt-get install -y -q git
|
||||
83e85d155451 A few seconds ago /bin/sh -c apt-get update
|
||||
bfd53b36d9d3 A few seconds ago /bin/sh -c echo 'deb http://archive.ubuntu.com/ubuntu quantal main universe multiverse' > /etc/apt/sources.list
|
||||
base 2 weeks ago /bin/bash
|
||||
27cf78414709 2 weeks ago
|
||||
|
||||
|
||||
Note that your build started from 'base', as instructed by your Changefile. But that base image itself seems to have been built in 2 steps - hence the extra step in the history.
|
||||
|
||||
|
||||
You can use this build technique to create any image you want: a database, a web application, or anything else that can be build by a sequence of unix commands - in other words, anything else.
|
||||
|
||||
@@ -1,142 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# docker-build is a script to build docker images from source.
|
||||
# It will be deprecated once the 'build' feature is incorporated into docker itself.
|
||||
# (See https://github.com/dotcloud/docker/issues/278)
|
||||
#
|
||||
# Author: Solomon Hykes <solomon@dotcloud.com>
|
||||
|
||||
|
||||
|
||||
# First create a valid Changefile, which defines a sequence of changes to apply to a base image.
|
||||
#
|
||||
# $ cat Changefile
|
||||
# # Start build from a know base image
|
||||
# from base:ubuntu-12.10
|
||||
# # Update ubuntu sources
|
||||
# run echo 'deb http://archive.ubuntu.com/ubuntu quantal main universe multiverse' > /etc/apt/sources.list
|
||||
# run apt-get update
|
||||
# # Install system packages
|
||||
# run DEBIAN_FRONTEND=noninteractive apt-get install -y -q git
|
||||
# run DEBIAN_FRONTEND=noninteractive apt-get install -y -q curl
|
||||
# run DEBIAN_FRONTEND=noninteractive apt-get install -y -q golang
|
||||
# # Insert files from the host (./myscript must be present in the current directory)
|
||||
# copy myscript /usr/local/bin/myscript
|
||||
#
|
||||
#
|
||||
# Run docker-build, and pass the contents of your Changefile as standard input.
|
||||
#
|
||||
# $ IMG=$(./docker-build < Changefile)
|
||||
#
|
||||
# This will take a while: for each line of the changefile, docker-build will:
|
||||
#
|
||||
# 1. Create a new container to execute the given command or insert the given file
|
||||
# 2. Wait for the container to complete execution
|
||||
# 3. Commit the resulting changes as a new image
|
||||
# 4. Use the resulting image as the input of the next step
|
||||
|
||||
|
||||
import sys
|
||||
import subprocess
|
||||
import json
|
||||
import hashlib
|
||||
|
||||
def docker(args, stdin=None):
|
||||
print "# docker " + " ".join(args)
|
||||
p = subprocess.Popen(["docker"] + list(args), stdin=stdin, stdout=subprocess.PIPE)
|
||||
return p.stdout
|
||||
|
||||
def image_exists(img):
|
||||
return docker(["inspect", img]).read().strip() != ""
|
||||
|
||||
def image_config(img):
|
||||
return json.loads(docker(["inspect", img]).read()).get("config", {})
|
||||
|
||||
def run_and_commit(img_in, cmd, stdin=None, author=None, run=None):
|
||||
run_id = docker(["run"] + (["-i", "-a", "stdin"] if stdin else ["-d"]) + [img_in, "/bin/sh", "-c", cmd], stdin=stdin).read().rstrip()
|
||||
print "---> Waiting for " + run_id
|
||||
result=int(docker(["wait", run_id]).read().rstrip())
|
||||
if result != 0:
|
||||
print "!!! '{}' return non-zero exit code '{}'. Aborting.".format(cmd, result)
|
||||
sys.exit(1)
|
||||
return docker(["commit"] + (["-author", author] if author else []) + (["-run", json.dumps(run)] if run is not None else []) + [run_id]).read().rstrip()
|
||||
|
||||
def insert(base, src, dst, author=None):
|
||||
print "COPY {} to {} in {}".format(src, dst, base)
|
||||
if dst == "":
|
||||
raise Exception("Missing destination path")
|
||||
stdin = file(src)
|
||||
stdin.seek(0)
|
||||
return run_and_commit(base, "cat > {0}; chmod +x {0}".format(dst), stdin=stdin, author=author)
|
||||
|
||||
def add(base, src, dst, author=None):
|
||||
print "PUSH to {} in {}".format(dst, base)
|
||||
if src == ".":
|
||||
tar = subprocess.Popen(["tar", "-c", "."], stdout=subprocess.PIPE).stdout
|
||||
else:
|
||||
tar = subprocess.Popen(["curl", src], stdout=subprocess.PIPE).stdout
|
||||
if dst == "":
|
||||
raise Exception("Missing argument to push")
|
||||
return run_and_commit(base, "mkdir -p '{0}' && tar -C '{0}' -x".format(dst), stdin=tar, author=author)
|
||||
|
||||
def main():
|
||||
base=""
|
||||
maintainer=""
|
||||
steps = []
|
||||
try:
|
||||
for line in sys.stdin.readlines():
|
||||
line = line.strip()
|
||||
# Skip comments and empty lines
|
||||
if line == "" or line[0] == "#":
|
||||
continue
|
||||
op, param = line.split(None, 1)
|
||||
print op.upper() + " " + param
|
||||
if op == "from":
|
||||
base = param
|
||||
steps.append(base)
|
||||
elif op == "maintainer":
|
||||
maintainer = param
|
||||
elif op == "run":
|
||||
result = run_and_commit(base, param, author=maintainer)
|
||||
steps.append(result)
|
||||
base = result
|
||||
print "===> " + base
|
||||
elif op == "copy":
|
||||
src, dst = param.split(" ", 1)
|
||||
result = insert(base, src, dst, author=maintainer)
|
||||
steps.append(result)
|
||||
base = result
|
||||
print "===> " + base
|
||||
elif op == "add":
|
||||
src, dst = param.split(" ", 1)
|
||||
result = add(base, src, dst, author=maintainer)
|
||||
steps.append(result)
|
||||
base=result
|
||||
print "===> " + base
|
||||
elif op == "expose":
|
||||
config = image_config(base)
|
||||
if config.get("PortSpecs") is None:
|
||||
config["PortSpecs"] = []
|
||||
portspec = param.strip()
|
||||
config["PortSpecs"].append(portspec)
|
||||
result = run_and_commit(base, "# (nop) expose port {}".format(portspec), author=maintainer, run=config)
|
||||
steps.append(result)
|
||||
base=result
|
||||
print "===> " + base
|
||||
elif op == "cmd":
|
||||
config = image_config(base)
|
||||
cmd = list(json.loads(param))
|
||||
config["Cmd"] = cmd
|
||||
result = run_and_commit(base, "# (nop) set default command to '{}'".format(" ".join(cmd)), author=maintainer, run=config)
|
||||
steps.append(result)
|
||||
base=result
|
||||
print "===> " + base
|
||||
else:
|
||||
print "Skipping uknown op " + op
|
||||
except:
|
||||
docker(["rmi"] + steps[1:])
|
||||
raise
|
||||
print base
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,13 +0,0 @@
|
||||
# Start build from a know base image
|
||||
maintainer Solomon Hykes <solomon@dotcloud.com>
|
||||
from base:ubuntu-12.10
|
||||
# Update ubuntu sources
|
||||
run echo 'deb http://archive.ubuntu.com/ubuntu quantal main universe multiverse' > /etc/apt/sources.list
|
||||
run apt-get update
|
||||
# Install system packages
|
||||
run DEBIAN_FRONTEND=noninteractive apt-get install -y -q git
|
||||
run DEBIAN_FRONTEND=noninteractive apt-get install -y -q curl
|
||||
run DEBIAN_FRONTEND=noninteractive apt-get install -y -q golang
|
||||
# Insert files from the host (./myscript must be present in the current directory)
|
||||
copy myscript /usr/local/bin/myscript
|
||||
push /src
|
||||
@@ -1,3 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
echo hello, world!
|
||||
@@ -2,18 +2,15 @@
|
||||
set -e
|
||||
|
||||
# these should match the names found at http://www.debian.org/releases/
|
||||
stableSuite='squeeze'
|
||||
testingSuite='wheezy'
|
||||
stableSuite='wheezy'
|
||||
testingSuite='jessie'
|
||||
unstableSuite='sid'
|
||||
|
||||
# if suite is equal to this, it gets the "latest" tag
|
||||
latestSuite="$testingSuite"
|
||||
|
||||
variant='minbase'
|
||||
include='iproute,iputils-ping'
|
||||
|
||||
repo="$1"
|
||||
suite="${2:-$latestSuite}"
|
||||
suite="${2:-$stableSuite}"
|
||||
mirror="${3:-}" # stick to the default debootstrap mirror if one is not provided
|
||||
|
||||
if [ ! "$repo" ]; then
|
||||
@@ -41,17 +38,14 @@ img=$(sudo tar -c . | docker import -)
|
||||
# tag suite
|
||||
docker tag $img $repo $suite
|
||||
|
||||
if [ "$suite" = "$latestSuite" ]; then
|
||||
# tag latest
|
||||
docker tag $img $repo latest
|
||||
fi
|
||||
|
||||
# test the image
|
||||
docker run -i -t $repo:$suite echo success
|
||||
|
||||
# unstable's version numbers match testing (since it's mostly just a sandbox for testing), so it doesn't get a version number tag
|
||||
if [ "$suite" != "$unstableSuite" -a "$suite" != 'unstable' ]; then
|
||||
# tag the specific version
|
||||
if [ "$suite" = "$stableSuite" -o "$suite" = 'stable' ]; then
|
||||
# tag latest
|
||||
docker tag $img $repo latest
|
||||
|
||||
# tag the specific debian release version
|
||||
ver=$(docker run $repo:$suite cat /etc/debian_version)
|
||||
docker tag $img $repo $ver
|
||||
fi
|
||||
|
||||
@@ -4,54 +4,77 @@ import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker"
|
||||
"github.com/dotcloud/docker/rcli"
|
||||
"github.com/dotcloud/docker/term"
|
||||
"io"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
var (
|
||||
GIT_COMMIT string
|
||||
GITCOMMIT string
|
||||
)
|
||||
|
||||
func main() {
|
||||
if docker.SelfPath() == "/sbin/init" {
|
||||
if utils.SelfPath() == "/sbin/init" {
|
||||
// Running in init mode
|
||||
docker.SysInit()
|
||||
return
|
||||
}
|
||||
host := "127.0.0.1"
|
||||
port := 4243
|
||||
// FIXME: Switch d and D ? (to be more sshd like)
|
||||
flDaemon := flag.Bool("d", false, "Daemon mode")
|
||||
flDebug := flag.Bool("D", false, "Debug mode")
|
||||
flAutoRestart := flag.Bool("r", false, "Restart previously running containers")
|
||||
bridgeName := flag.String("b", "", "Attach containers to a pre-existing network bridge")
|
||||
pidfile := flag.String("p", "/var/run/docker.pid", "File containing process PID")
|
||||
flHost := flag.String("H", fmt.Sprintf("%s:%d", host, port), "Host:port to bind/connect to")
|
||||
flEnableCors := flag.Bool("api-enable-cors", false, "Enable CORS requests in the remote api.")
|
||||
flDns := flag.String("dns", "", "Set custom dns servers")
|
||||
flag.Parse()
|
||||
if *bridgeName != "" {
|
||||
docker.NetworkBridgeIface = *bridgeName
|
||||
} else {
|
||||
docker.NetworkBridgeIface = docker.DefaultNetworkBridge
|
||||
}
|
||||
|
||||
if strings.Contains(*flHost, ":") {
|
||||
hostParts := strings.Split(*flHost, ":")
|
||||
if len(hostParts) != 2 {
|
||||
log.Fatal("Invalid bind address format.")
|
||||
os.Exit(-1)
|
||||
}
|
||||
if hostParts[0] != "" {
|
||||
host = hostParts[0]
|
||||
}
|
||||
if p, err := strconv.Atoi(hostParts[1]); err == nil {
|
||||
port = p
|
||||
}
|
||||
} else {
|
||||
host = *flHost
|
||||
}
|
||||
|
||||
if *flDebug {
|
||||
os.Setenv("DEBUG", "1")
|
||||
}
|
||||
docker.GIT_COMMIT = GIT_COMMIT
|
||||
docker.GITCOMMIT = GITCOMMIT
|
||||
if *flDaemon {
|
||||
if flag.NArg() != 0 {
|
||||
flag.Usage()
|
||||
return
|
||||
}
|
||||
if err := daemon(*pidfile, *flAutoRestart); err != nil {
|
||||
if err := daemon(*pidfile, host, port, *flAutoRestart, *flEnableCors, *flDns); err != nil {
|
||||
log.Fatal(err)
|
||||
os.Exit(-1)
|
||||
}
|
||||
} else {
|
||||
if err := runCommand(flag.Args()); err != nil {
|
||||
if err := docker.ParseCommands(host, port, flag.Args()...); err != nil {
|
||||
log.Fatal(err)
|
||||
os.Exit(-1)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -83,7 +106,10 @@ func removePidFile(pidfile string) {
|
||||
}
|
||||
}
|
||||
|
||||
func daemon(pidfile string, autoRestart bool) error {
|
||||
func daemon(pidfile, addr string, port int, autoRestart, enableCors bool, flDns string) error {
|
||||
if addr != "127.0.0.1" {
|
||||
log.Println("/!\\ DON'T BIND ON ANOTHER IP ADDRESS THAN 127.0.0.1 IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\")
|
||||
}
|
||||
if err := createPidFile(pidfile); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
@@ -97,51 +123,14 @@ func daemon(pidfile string, autoRestart bool) error {
|
||||
removePidFile(pidfile)
|
||||
os.Exit(0)
|
||||
}()
|
||||
|
||||
service, err := docker.NewServer(autoRestart)
|
||||
var dns []string
|
||||
if flDns != "" {
|
||||
dns = []string{flDns}
|
||||
}
|
||||
server, err := docker.NewServer(autoRestart, enableCors, dns)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return rcli.ListenAndServe("tcp", "127.0.0.1:4242", service)
|
||||
}
|
||||
|
||||
func runCommand(args []string) error {
|
||||
// FIXME: we want to use unix sockets here, but net.UnixConn doesn't expose
|
||||
// CloseWrite(), which we need to cleanly signal that stdin is closed without
|
||||
// closing the connection.
|
||||
// See http://code.google.com/p/go/issues/detail?id=3345
|
||||
if conn, err := rcli.Call("tcp", "127.0.0.1:4242", args...); err == nil {
|
||||
options := conn.GetOptions()
|
||||
if options.RawTerminal &&
|
||||
term.IsTerminal(int(os.Stdin.Fd())) &&
|
||||
os.Getenv("NORAW") == "" {
|
||||
if oldState, err := rcli.SetRawTerminal(); err != nil {
|
||||
return err
|
||||
} else {
|
||||
defer rcli.RestoreTerminal(oldState)
|
||||
}
|
||||
}
|
||||
receiveStdout := docker.Go(func() error {
|
||||
_, err := io.Copy(os.Stdout, conn)
|
||||
return err
|
||||
})
|
||||
sendStdin := docker.Go(func() error {
|
||||
_, err := io.Copy(conn, os.Stdin)
|
||||
if err := conn.CloseWrite(); err != nil {
|
||||
log.Printf("Couldn't send EOF: " + err.Error())
|
||||
}
|
||||
return err
|
||||
})
|
||||
if err := <-receiveStdout; err != nil {
|
||||
return err
|
||||
}
|
||||
if !term.IsTerminal(int(os.Stdin.Fd())) {
|
||||
if err := <-sendStdin; err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("Can't connect to docker daemon. Is 'docker -d' running on this host?")
|
||||
}
|
||||
return nil
|
||||
return docker.ListenAndServe(fmt.Sprintf("%s:%d", addr, port), server, true)
|
||||
}
|
||||
|
||||
2
docs/MAINTAINERS
Normal file
2
docs/MAINTAINERS
Normal file
@@ -0,0 +1,2 @@
|
||||
Andy Rothfusz <andy@dotcloud.com>
|
||||
Ken Cochrane <ken@dotcloud.com>
|
||||
@@ -6,6 +6,7 @@ SPHINXOPTS =
|
||||
SPHINXBUILD = sphinx-build
|
||||
PAPER =
|
||||
BUILDDIR = _build
|
||||
PYTHON = python
|
||||
|
||||
# Internal variables.
|
||||
PAPEROPT_a4 = -D latex_paper_size=a4
|
||||
@@ -38,17 +39,19 @@ help:
|
||||
# @echo " linkcheck to check all external links for integrity"
|
||||
# @echo " doctest to run all doctests embedded in the documentation (if enabled)"
|
||||
@echo " docs to build the docs and copy the static files to the outputdir"
|
||||
@echo " server to serve the docs in your browser under \`http://localhost:8000\`"
|
||||
@echo " publish to publish the app to dotcloud"
|
||||
|
||||
clean:
|
||||
-rm -rf $(BUILDDIR)/*
|
||||
|
||||
docs:
|
||||
-rm -rf $(BUILDDIR)/*
|
||||
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/html
|
||||
@echo
|
||||
@echo "Build finished. The documentation pages are now in $(BUILDDIR)/html."
|
||||
|
||||
server: docs
|
||||
@cd $(BUILDDIR)/html; $(PYTHON) -m SimpleHTTPServer 8000
|
||||
|
||||
site:
|
||||
cp -r website $(BUILDDIR)/
|
||||
@@ -58,19 +61,15 @@ site:
|
||||
|
||||
connect:
|
||||
@echo connecting dotcloud to www.docker.io website, make sure to use user 1
|
||||
@cd _build/website/ ; \
|
||||
dotcloud list ; \
|
||||
dotcloud connect dockerwebsite
|
||||
@echo or create your own "dockerwebsite" app
|
||||
@cd $(BUILDDIR)/website/ ; \
|
||||
dotcloud connect dockerwebsite ; \
|
||||
dotcloud list
|
||||
|
||||
push:
|
||||
@cd _build/website/ ; \
|
||||
@cd $(BUILDDIR)/website/ ; \
|
||||
dotcloud push
|
||||
|
||||
github-deploy: docs
|
||||
rm -fr github-deploy
|
||||
git clone ssh://git@github.com/dotcloud/docker github-deploy
|
||||
cd github-deploy && git checkout -f gh-pages && git rm -r * && rsync -avH ../_build/html/ ./ && touch .nojekyll && echo "docker.io" > CNAME && git add * && git commit -m "Updating docs"
|
||||
|
||||
$(VERSIONS):
|
||||
@echo "Hello world"
|
||||
|
||||
|
||||
@@ -14,20 +14,22 @@ Installation
|
||||
------------
|
||||
|
||||
* Work in your own fork of the code, we accept pull requests.
|
||||
* Install sphinx: ``pip install sphinx``
|
||||
* Install sphinx httpdomain contrib package ``sphinxcontrib-httpdomain``
|
||||
* Install sphinx: `pip install sphinx`
|
||||
* Mac OS X: `[sudo] pip-2.7 install sphinx`)
|
||||
* Install sphinx httpdomain contrib package: `pip install sphinxcontrib-httpdomain`
|
||||
* Mac OS X: `[sudo] pip-2.7 install sphinxcontrib-httpdomain`
|
||||
* If pip is not available you can probably install it using your favorite package manager as **python-pip**
|
||||
|
||||
Usage
|
||||
-----
|
||||
* change the .rst files with your favorite editor to your liking
|
||||
* run *make docs* to clean up old files and generate new ones
|
||||
* your static website can now be found in the _build dir
|
||||
* to preview what you have generated, cd into _build/html and then run 'python -m SimpleHTTPServer 8000'
|
||||
* Change the `.rst` files with your favorite editor to your liking.
|
||||
* Run `make docs` to clean up old files and generate new ones.
|
||||
* Your static website can now be found in the `_build` directory.
|
||||
* To preview what you have generated run `make server` and open <http://localhost:8000/> in your favorite browser.
|
||||
|
||||
Working using github's file editor
|
||||
Working using GitHub's file editor
|
||||
----------------------------------
|
||||
Alternatively, for small changes and typo's you might want to use github's built in file editor. It allows
|
||||
Alternatively, for small changes and typo's you might want to use GitHub's built in file editor. It allows
|
||||
you to preview your changes right online. Just be carefull not to create many commits.
|
||||
|
||||
Images
|
||||
@@ -72,4 +74,4 @@ Guides on using sphinx
|
||||
|
||||
* Code examples
|
||||
|
||||
Start without $, so it's easy to copy and paste.
|
||||
Start without $, so it's easy to copy and paste.
|
||||
|
||||
1
docs/sources/api/MAINTAINERS
Normal file
1
docs/sources/api/MAINTAINERS
Normal file
@@ -0,0 +1 @@
|
||||
Solomon Hykes <solomon@dotcloud.com>
|
||||
5
docs/sources/api/README.md
Normal file
5
docs/sources/api/README.md
Normal file
@@ -0,0 +1,5 @@
|
||||
This directory holds the authoritative specifications of APIs defined and implemented by Docker. Currently this includes:
|
||||
|
||||
* The remote API by which a docker node can be queried over HTTP
|
||||
* The registry API by which a docker node can download and upload container images for storage and sharing
|
||||
* The index search API by which a docker node can search the public index for images to download
|
||||
100
docs/sources/api/docker_remote_api.rst
Normal file
100
docs/sources/api/docker_remote_api.rst
Normal file
@@ -0,0 +1,100 @@
|
||||
:title: Remote API
|
||||
:description: API Documentation for Docker
|
||||
:keywords: API, Docker, rcli, REST, documentation
|
||||
|
||||
=================
|
||||
Docker Remote API
|
||||
=================
|
||||
|
||||
.. contents:: Table of Contents
|
||||
|
||||
1. Brief introduction
|
||||
=====================
|
||||
|
||||
- The Remote API is replacing rcli
|
||||
- Default port in the docker deamon is 4243
|
||||
- The API tends to be REST, but for some complex commands, like attach or pull, the HTTP connection is hijacked to transport stdout stdin and stderr
|
||||
- Since API version 1.2, the auth configuration is now handled client side, so the client has to send the authConfig as POST in /images/(name)/push
|
||||
|
||||
2. Versions
|
||||
===========
|
||||
|
||||
The current verson of the API is 1.2
|
||||
Calling /images/<name>/insert is the same as calling /v1.2/images/<name>/insert
|
||||
You can still call an old version of the api using /v1.0/images/<name>/insert
|
||||
|
||||
:doc:`docker_remote_api_v1.2`
|
||||
*****************************
|
||||
|
||||
What's new
|
||||
----------
|
||||
|
||||
The auth configuration is now handled by the client.
|
||||
The client should send it's authConfig as POST on each call of /images/(name)/push
|
||||
|
||||
.. http:get:: /auth is now deprecated
|
||||
.. http:post:: /auth only checks the configuration but doesn't store it on the server
|
||||
|
||||
Deleting an image is now improved, will only untag the image if it has chidrens and remove all the untagged parents if has any.
|
||||
.. http:post:: /images/<name>/delete now returns a JSON with the list of images deleted/untagged
|
||||
|
||||
|
||||
:doc:`docker_remote_api_v1.1`
|
||||
*****************************
|
||||
|
||||
docker v0.4.0 a8ae398_
|
||||
|
||||
What's new
|
||||
----------
|
||||
|
||||
.. http:post:: /images/create
|
||||
.. http:post:: /images/(name)/insert
|
||||
.. http:post:: /images/(name)/push
|
||||
|
||||
Uses json stream instead of HTML hijack, it looks like this:
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/json
|
||||
|
||||
{"status":"Pushing..."}
|
||||
{"status":"Pushing", "progress":"1/? (n/a)"}
|
||||
{"error":"Invalid..."}
|
||||
...
|
||||
|
||||
|
||||
docker v0.3.4 8d73740_
|
||||
|
||||
What's new
|
||||
----------
|
||||
|
||||
Initial version
|
||||
|
||||
|
||||
.. _a8ae398: https://github.com/dotcloud/docker/commit/a8ae398bf52e97148ee7bd0d5868de2e15bd297f
|
||||
.. _8d73740: https://github.com/dotcloud/docker/commit/8d73740343778651c09160cde9661f5f387b36f4
|
||||
|
||||
==================================
|
||||
Docker Remote API Client Libraries
|
||||
==================================
|
||||
|
||||
These libraries have been not tested by the Docker Maintainers for
|
||||
compatibility. Please file issues with the library owners. If you
|
||||
find more library implementations, please list them in Docker doc bugs
|
||||
and we will add the libraries here.
|
||||
|
||||
+----------------------+----------------+--------------------------------------------+
|
||||
| Language/Framework | Name | Repository |
|
||||
+======================+================+============================================+
|
||||
| Python | docker-py | https://github.com/dotcloud/docker-py |
|
||||
+----------------------+----------------+--------------------------------------------+
|
||||
| Ruby | docker-ruby | https://github.com/ActiveState/docker-ruby |
|
||||
+----------------------+----------------+--------------------------------------------+
|
||||
| Ruby | docker-client | https://github.com/geku/docker-client |
|
||||
+----------------------+----------------+--------------------------------------------+
|
||||
| Javascript | docker-js | https://github.com/dgoujard/docker-js |
|
||||
+----------------------+----------------+--------------------------------------------+
|
||||
| Javascript (Angular) | dockerui | https://github.com/crosbymichael/dockerui |
|
||||
| **WebUI** | | |
|
||||
+----------------------+----------------+--------------------------------------------+
|
||||
1015
docs/sources/api/docker_remote_api_v1.0.rst
Normal file
1015
docs/sources/api/docker_remote_api_v1.0.rst
Normal file
File diff suppressed because it is too large
Load Diff
1026
docs/sources/api/docker_remote_api_v1.1.rst
Normal file
1026
docs/sources/api/docker_remote_api_v1.1.rst
Normal file
File diff suppressed because it is too large
Load Diff
1030
docs/sources/api/docker_remote_api_v1.2.rst
Normal file
1030
docs/sources/api/docker_remote_api_v1.2.rst
Normal file
File diff suppressed because it is too large
Load Diff
18
docs/sources/api/index.rst
Normal file
18
docs/sources/api/index.rst
Normal file
@@ -0,0 +1,18 @@
|
||||
:title: API Documentation
|
||||
:description: docker documentation
|
||||
:keywords: docker, ipa, documentation
|
||||
|
||||
APIs
|
||||
====
|
||||
|
||||
Your programs and scripts can access Docker's functionality via these interfaces:
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 3
|
||||
|
||||
registry_index_spec
|
||||
registry_api
|
||||
index_api
|
||||
docker_remote_api
|
||||
|
||||
|
||||
553
docs/sources/api/index_api.rst
Normal file
553
docs/sources/api/index_api.rst
Normal file
@@ -0,0 +1,553 @@
|
||||
:title: Index API
|
||||
:description: API Documentation for Docker Index
|
||||
:keywords: API, Docker, index, REST, documentation
|
||||
|
||||
=================
|
||||
Docker Index API
|
||||
=================
|
||||
|
||||
.. contents:: Table of Contents
|
||||
|
||||
1. Brief introduction
|
||||
=====================
|
||||
|
||||
- This is the REST API for the Docker index
|
||||
- Authorization is done with basic auth over SSL
|
||||
- Not all commands require authentication, only those noted as such.
|
||||
|
||||
2. Endpoints
|
||||
============
|
||||
|
||||
2.1 Repository
|
||||
^^^^^^^^^^^^^^
|
||||
|
||||
Repositories
|
||||
*************
|
||||
|
||||
User Repo
|
||||
~~~~~~~~~
|
||||
|
||||
.. http:put:: /v1/repositories/(namespace)/(repo_name)/
|
||||
|
||||
Create a user repository with the given ``namespace`` and ``repo_name``.
|
||||
|
||||
**Example Request**:
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
PUT /v1/repositories/foo/bar/ HTTP/1.1
|
||||
Host: index.docker.io
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Basic akmklmasadalkm==
|
||||
X-Docker-Token: true
|
||||
|
||||
[{“id”: “9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f”}]
|
||||
|
||||
:parameter namespace: the namespace for the repo
|
||||
:parameter repo_name: the name for the repo
|
||||
|
||||
**Example Response**:
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
HTTP/1.1 200
|
||||
Vary: Accept
|
||||
Content-Type: application/json
|
||||
WWW-Authenticate: Token signature=123abc,repository=”foo/bar”,access=write
|
||||
X-Docker-Endpoints: registry-1.docker.io [, registry-2.docker.io]
|
||||
|
||||
""
|
||||
|
||||
:statuscode 200: Created
|
||||
:statuscode 400: Errors (invalid json, missing or invalid fields, etc)
|
||||
:statuscode 401: Unauthorized
|
||||
:statuscode 403: Account is not Active
|
||||
|
||||
|
||||
.. http:delete:: /v1/repositories/(namespace)/(repo_name)/
|
||||
|
||||
Delete a user repository with the given ``namespace`` and ``repo_name``.
|
||||
|
||||
**Example Request**:
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
DELETE /v1/repositories/foo/bar/ HTTP/1.1
|
||||
Host: index.docker.io
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Basic akmklmasadalkm==
|
||||
X-Docker-Token: true
|
||||
|
||||
""
|
||||
|
||||
:parameter namespace: the namespace for the repo
|
||||
:parameter repo_name: the name for the repo
|
||||
|
||||
**Example Response**:
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
HTTP/1.1 202
|
||||
Vary: Accept
|
||||
Content-Type: application/json
|
||||
WWW-Authenticate: Token signature=123abc,repository=”foo/bar”,access=delete
|
||||
X-Docker-Endpoints: registry-1.docker.io [, registry-2.docker.io]
|
||||
|
||||
""
|
||||
|
||||
:statuscode 200: Deleted
|
||||
:statuscode 202: Accepted
|
||||
:statuscode 400: Errors (invalid json, missing or invalid fields, etc)
|
||||
:statuscode 401: Unauthorized
|
||||
:statuscode 403: Account is not Active
|
||||
|
||||
Library Repo
|
||||
~~~~~~~~~~~~
|
||||
|
||||
.. http:put:: /v1/repositories/(repo_name)/
|
||||
|
||||
Create a library repository with the given ``repo_name``.
|
||||
This is a restricted feature only available to docker admins.
|
||||
|
||||
When namespace is missing, it is assumed to be ``library``
|
||||
|
||||
**Example Request**:
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
PUT /v1/repositories/foobar/ HTTP/1.1
|
||||
Host: index.docker.io
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Basic akmklmasadalkm==
|
||||
X-Docker-Token: true
|
||||
|
||||
[{“id”: “9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f”}]
|
||||
|
||||
:parameter repo_name: the library name for the repo
|
||||
|
||||
**Example Response**:
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
HTTP/1.1 200
|
||||
Vary: Accept
|
||||
Content-Type: application/json
|
||||
WWW-Authenticate: Token signature=123abc,repository=”library/foobar”,access=write
|
||||
X-Docker-Endpoints: registry-1.docker.io [, registry-2.docker.io]
|
||||
|
||||
""
|
||||
|
||||
:statuscode 200: Created
|
||||
:statuscode 400: Errors (invalid json, missing or invalid fields, etc)
|
||||
:statuscode 401: Unauthorized
|
||||
:statuscode 403: Account is not Active
|
||||
|
||||
.. http:delete:: /v1/repositories/(repo_name)/
|
||||
|
||||
Delete a library repository with the given ``repo_name``.
|
||||
This is a restricted feature only available to docker admins.
|
||||
|
||||
When namespace is missing, it is assumed to be ``library``
|
||||
|
||||
**Example Request**:
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
DELETE /v1/repositories/foobar/ HTTP/1.1
|
||||
Host: index.docker.io
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Basic akmklmasadalkm==
|
||||
X-Docker-Token: true
|
||||
|
||||
""
|
||||
|
||||
:parameter repo_name: the library name for the repo
|
||||
|
||||
**Example Response**:
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
HTTP/1.1 202
|
||||
Vary: Accept
|
||||
Content-Type: application/json
|
||||
WWW-Authenticate: Token signature=123abc,repository=”library/foobar”,access=delete
|
||||
X-Docker-Endpoints: registry-1.docker.io [, registry-2.docker.io]
|
||||
|
||||
""
|
||||
|
||||
:statuscode 200: Deleted
|
||||
:statuscode 202: Accepted
|
||||
:statuscode 400: Errors (invalid json, missing or invalid fields, etc)
|
||||
:statuscode 401: Unauthorized
|
||||
:statuscode 403: Account is not Active
|
||||
|
||||
Repository Images
|
||||
*****************
|
||||
|
||||
User Repo Images
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
.. http:put:: /v1/repositories/(namespace)/(repo_name)/images
|
||||
|
||||
Update the images for a user repo.
|
||||
|
||||
**Example Request**:
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
PUT /v1/repositories/foo/bar/images HTTP/1.1
|
||||
Host: index.docker.io
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Basic akmklmasadalkm==
|
||||
|
||||
[{“id”: “9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f”,
|
||||
“checksum”: “b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087”}]
|
||||
|
||||
:parameter namespace: the namespace for the repo
|
||||
:parameter repo_name: the name for the repo
|
||||
|
||||
**Example Response**:
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
HTTP/1.1 204
|
||||
Vary: Accept
|
||||
Content-Type: application/json
|
||||
|
||||
""
|
||||
|
||||
:statuscode 204: Created
|
||||
:statuscode 400: Errors (invalid json, missing or invalid fields, etc)
|
||||
:statuscode 401: Unauthorized
|
||||
:statuscode 403: Account is not Active or permission denied
|
||||
|
||||
|
||||
.. http:get:: /v1/repositories/(namespace)/(repo_name)/images
|
||||
|
||||
get the images for a user repo.
|
||||
|
||||
**Example Request**:
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
GET /v1/repositories/foo/bar/images HTTP/1.1
|
||||
Host: index.docker.io
|
||||
Accept: application/json
|
||||
|
||||
:parameter namespace: the namespace for the repo
|
||||
:parameter repo_name: the name for the repo
|
||||
|
||||
**Example Response**:
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
HTTP/1.1 200
|
||||
Vary: Accept
|
||||
Content-Type: application/json
|
||||
|
||||
[{“id”: “9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f”,
|
||||
“checksum”: “b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087”},
|
||||
{“id”: “ertwetewtwe38722009fe6857087b486531f9a779a0c1dfddgfgsdgdsgds”,
|
||||
“checksum”: “34t23f23fc17e3ed29dae8f12c4f9e89cc6f0bsdfgfsdgdsgdsgerwgew”}]
|
||||
|
||||
:statuscode 200: OK
|
||||
:statuscode 404: Not found
|
||||
|
||||
Library Repo Images
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. http:put:: /v1/repositories/(repo_name)/images
|
||||
|
||||
Update the images for a library repo.
|
||||
|
||||
**Example Request**:
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
PUT /v1/repositories/foobar/images HTTP/1.1
|
||||
Host: index.docker.io
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Basic akmklmasadalkm==
|
||||
|
||||
[{“id”: “9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f”,
|
||||
“checksum”: “b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087”}]
|
||||
|
||||
:parameter repo_name: the library name for the repo
|
||||
|
||||
**Example Response**:
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
HTTP/1.1 204
|
||||
Vary: Accept
|
||||
Content-Type: application/json
|
||||
|
||||
""
|
||||
|
||||
:statuscode 204: Created
|
||||
:statuscode 400: Errors (invalid json, missing or invalid fields, etc)
|
||||
:statuscode 401: Unauthorized
|
||||
:statuscode 403: Account is not Active or permission denied
|
||||
|
||||
|
||||
.. http:get:: /v1/repositories/(repo_name)/images
|
||||
|
||||
get the images for a library repo.
|
||||
|
||||
**Example Request**:
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
GET /v1/repositories/foobar/images HTTP/1.1
|
||||
Host: index.docker.io
|
||||
Accept: application/json
|
||||
|
||||
:parameter repo_name: the library name for the repo
|
||||
|
||||
**Example Response**:
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
HTTP/1.1 200
|
||||
Vary: Accept
|
||||
Content-Type: application/json
|
||||
|
||||
[{“id”: “9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f”,
|
||||
“checksum”: “b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087”},
|
||||
{“id”: “ertwetewtwe38722009fe6857087b486531f9a779a0c1dfddgfgsdgdsgds”,
|
||||
“checksum”: “34t23f23fc17e3ed29dae8f12c4f9e89cc6f0bsdfgfsdgdsgdsgerwgew”}]
|
||||
|
||||
:statuscode 200: OK
|
||||
:statuscode 404: Not found
|
||||
|
||||
|
||||
Repository Authorization
|
||||
************************
|
||||
|
||||
Library Repo
|
||||
~~~~~~~~~~~~
|
||||
|
||||
.. http:put:: /v1/repositories/(repo_name)/auth
|
||||
|
||||
authorize a token for a library repo
|
||||
|
||||
**Example Request**:
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
PUT /v1/repositories/foobar/auth HTTP/1.1
|
||||
Host: index.docker.io
|
||||
Accept: application/json
|
||||
Authorization: Token signature=123abc,repository="library/foobar",access=write
|
||||
|
||||
:parameter repo_name: the library name for the repo
|
||||
|
||||
**Example Response**:
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
HTTP/1.1 200
|
||||
Vary: Accept
|
||||
Content-Type: application/json
|
||||
|
||||
"OK"
|
||||
|
||||
:statuscode 200: OK
|
||||
:statuscode 403: Permission denied
|
||||
:statuscode 404: Not found
|
||||
|
||||
|
||||
User Repo
|
||||
~~~~~~~~~
|
||||
|
||||
.. http:put:: /v1/repositories/(namespace)/(repo_name)/auth
|
||||
|
||||
authorize a token for a user repo
|
||||
|
||||
**Example Request**:
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
PUT /v1/repositories/foo/bar/auth HTTP/1.1
|
||||
Host: index.docker.io
|
||||
Accept: application/json
|
||||
Authorization: Token signature=123abc,repository="foo/bar",access=write
|
||||
|
||||
:parameter namespace: the namespace for the repo
|
||||
:parameter repo_name: the name for the repo
|
||||
|
||||
**Example Response**:
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
HTTP/1.1 200
|
||||
Vary: Accept
|
||||
Content-Type: application/json
|
||||
|
||||
"OK"
|
||||
|
||||
:statuscode 200: OK
|
||||
:statuscode 403: Permission denied
|
||||
:statuscode 404: Not found
|
||||
|
||||
|
||||
2.2 Users
|
||||
^^^^^^^^^
|
||||
|
||||
User Login
|
||||
**********
|
||||
|
||||
.. http:get:: /v1/users
|
||||
|
||||
If you want to check your login, you can try this endpoint
|
||||
|
||||
**Example Request**:
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
GET /v1/users HTTP/1.1
|
||||
Host: index.docker.io
|
||||
Accept: application/json
|
||||
Authorization: Basic akmklmasadalkm==
|
||||
|
||||
**Example Response**:
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
HTTP/1.1 200 OK
|
||||
Vary: Accept
|
||||
Content-Type: application/json
|
||||
|
||||
OK
|
||||
|
||||
:statuscode 200: no error
|
||||
:statuscode 401: Unauthorized
|
||||
:statuscode 403: Account is not Active
|
||||
|
||||
|
||||
User Register
|
||||
*************
|
||||
|
||||
.. http:post:: /v1/users
|
||||
|
||||
Registering a new account.
|
||||
|
||||
**Example request**:
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
POST /v1/users HTTP/1.1
|
||||
Host: index.docker.io
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
|
||||
{"email": "sam@dotcloud.com",
|
||||
"password": "toto42",
|
||||
"username": "foobar"'}
|
||||
|
||||
:jsonparameter email: valid email address, that needs to be confirmed
|
||||
:jsonparameter username: min 4 character, max 30 characters, must match the regular expression [a-z0-9_].
|
||||
:jsonparameter password: min 5 characters
|
||||
|
||||
**Example Response**:
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
HTTP/1.1 201 OK
|
||||
Vary: Accept
|
||||
Content-Type: application/json
|
||||
|
||||
"User Created"
|
||||
|
||||
:statuscode 201: User Created
|
||||
:statuscode 400: Errors (invalid json, missing or invalid fields, etc)
|
||||
|
||||
Update User
|
||||
***********
|
||||
|
||||
.. http:put:: /v1/users/(username)/
|
||||
|
||||
Change a password or email address for given user. If you pass in an email,
|
||||
it will add it to your account, it will not remove the old one. Passwords will
|
||||
be updated.
|
||||
|
||||
It is up to the client to verify that that password that is sent is the one that
|
||||
they want. Common approach is to have them type it twice.
|
||||
|
||||
**Example Request**:
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
PUT /v1/users/fakeuser/ HTTP/1.1
|
||||
Host: index.docker.io
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Basic akmklmasadalkm==
|
||||
|
||||
{"email": "sam@dotcloud.com",
|
||||
"password": "toto42"}
|
||||
|
||||
:parameter username: username for the person you want to update
|
||||
|
||||
**Example Response**:
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
HTTP/1.1 204
|
||||
Vary: Accept
|
||||
Content-Type: application/json
|
||||
|
||||
""
|
||||
|
||||
:statuscode 204: User Updated
|
||||
:statuscode 400: Errors (invalid json, missing or invalid fields, etc)
|
||||
:statuscode 401: Unauthorized
|
||||
:statuscode 403: Account is not Active
|
||||
:statuscode 404: User not found
|
||||
|
||||
|
||||
2.3 Search
|
||||
^^^^^^^^^^
|
||||
If you need to search the index, this is the endpoint you would use.
|
||||
|
||||
Search
|
||||
******
|
||||
|
||||
.. http:get:: /v1/search
|
||||
|
||||
Search the Index given a search term. It accepts :http:method:`get` only.
|
||||
|
||||
**Example request**:
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
GET /v1/search?q=search_term HTTP/1.1
|
||||
Host: example.com
|
||||
Accept: application/json
|
||||
|
||||
|
||||
**Example response**:
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
HTTP/1.1 200 OK
|
||||
Vary: Accept
|
||||
Content-Type: application/json
|
||||
|
||||
{"query":"search_term",
|
||||
"num_results": 2,
|
||||
"results" : [
|
||||
{"name": "dotcloud/base", "description": "A base ubuntu64 image..."},
|
||||
{"name": "base2", "description": "A base ubuntu64 image..."},
|
||||
]
|
||||
}
|
||||
|
||||
:query q: what you want to search for
|
||||
:statuscode 200: no error
|
||||
:statuscode 500: server error
|
||||
463
docs/sources/api/registry_api.rst
Normal file
463
docs/sources/api/registry_api.rst
Normal file
@@ -0,0 +1,463 @@
|
||||
:title: Registry API
|
||||
:description: API Documentation for Docker Registry
|
||||
:keywords: API, Docker, index, registry, REST, documentation
|
||||
|
||||
===================
|
||||
Docker Registry API
|
||||
===================
|
||||
|
||||
.. contents:: Table of Contents
|
||||
|
||||
1. Brief introduction
|
||||
=====================
|
||||
|
||||
- This is the REST API for the Docker Registry
|
||||
- It stores the images and the graph for a set of repositories
|
||||
- It does not have user accounts data
|
||||
- It has no notion of user accounts or authorization
|
||||
- It delegates authentication and authorization to the Index Auth service using tokens
|
||||
- It supports different storage backends (S3, cloud files, local FS)
|
||||
- It doesn’t have a local database
|
||||
- It will be open-sourced at some point
|
||||
|
||||
We expect that there will be multiple registries out there. To help to grasp the context, here are some examples of registries:
|
||||
|
||||
- **sponsor registry**: such a registry is provided by a third-party hosting infrastructure as a convenience for their customers and the docker community as a whole. Its costs are supported by the third party, but the management and operation of the registry are supported by dotCloud. It features read/write access, and delegates authentication and authorization to the Index.
|
||||
- **mirror registry**: such a registry is provided by a third-party hosting infrastructure but is targeted at their customers only. Some mechanism (unspecified to date) ensures that public images are pulled from a sponsor registry to the mirror registry, to make sure that the customers of the third-party provider can “docker pull” those images locally.
|
||||
- **vendor registry**: such a registry is provided by a software vendor, who wants to distribute docker images. It would be operated and managed by the vendor. Only users authorized by the vendor would be able to get write access. Some images would be public (accessible for anyone), others private (accessible only for authorized users). Authentication and authorization would be delegated to the Index. The goal of vendor registries is to let someone do “docker pull basho/riak1.3” and automatically push from the vendor registry (instead of a sponsor registry); i.e. get all the convenience of a sponsor registry, while retaining control on the asset distribution.
|
||||
- **private registry**: such a registry is located behind a firewall, or protected by an additional security layer (HTTP authorization, SSL client-side certificates, IP address authorization...). The registry is operated by a private entity, outside of dotCloud’s control. It can optionally delegate additional authorization to the Index, but it is not mandatory.
|
||||
|
||||
.. note::
|
||||
|
||||
Mirror registries and private registries which do not use the Index don’t even need to run the registry code. They can be implemented by any kind of transport implementing HTTP GET and PUT. Read-only registries can be powered by a simple static HTTP server.
|
||||
|
||||
.. note::
|
||||
|
||||
The latter implies that while HTTP is the protocol of choice for a registry, multiple schemes are possible (and in some cases, trivial):
|
||||
- HTTP with GET (and PUT for read-write registries);
|
||||
- local mount point;
|
||||
- remote docker addressed through SSH.
|
||||
|
||||
The latter would only require two new commands in docker, e.g. “registryget” and “registryput”, wrapping access to the local filesystem (and optionally doing consistency checks). Authentication and authorization are then delegated to SSH (e.g. with public keys).
|
||||
|
||||
2. Endpoints
|
||||
============
|
||||
|
||||
2.1 Images
|
||||
----------
|
||||
|
||||
Layer
|
||||
*****
|
||||
|
||||
.. http:get:: /v1/images/(image_id)/layer
|
||||
|
||||
get image layer for a given ``image_id``
|
||||
|
||||
**Example Request**:
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
GET /v1/images/088b4505aa3adc3d35e79c031fa126b403200f02f51920fbd9b7c503e87c7a2c/layer HTTP/1.1
|
||||
Host: registry-1.docker.io
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Token akmklmasadalkmsdfgsdgdge33
|
||||
|
||||
:parameter image_id: the id for the layer you want to get
|
||||
|
||||
**Example Response**:
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
HTTP/1.1 200
|
||||
Vary: Accept
|
||||
Content-Type: application/json
|
||||
Cookie: (Cookie provided by the Registry)
|
||||
|
||||
{
|
||||
id: "088b4505aa3adc3d35e79c031fa126b403200f02f51920fbd9b7c503e87c7a2c",
|
||||
parent: "aeee6396d62273d180a49c96c62e45438d87c7da4a5cf5d2be6bee4e21bc226f",
|
||||
created: "2013-04-30T17:46:10.843673+03:00",
|
||||
container: "8305672a76cc5e3d168f97221106ced35a76ec7ddbb03209b0f0d96bf74f6ef7",
|
||||
container_config: {
|
||||
Hostname: "host-test",
|
||||
User: "",
|
||||
Memory: 0,
|
||||
MemorySwap: 0,
|
||||
AttachStdin: false,
|
||||
AttachStdout: false,
|
||||
AttachStderr: false,
|
||||
PortSpecs: null,
|
||||
Tty: false,
|
||||
OpenStdin: false,
|
||||
StdinOnce: false,
|
||||
Env: null,
|
||||
Cmd: [
|
||||
"/bin/bash",
|
||||
"-c",
|
||||
"apt-get -q -yy -f install libevent-dev"
|
||||
],
|
||||
Dns: null,
|
||||
Image: "imagename/blah",
|
||||
Volumes: { },
|
||||
VolumesFrom: ""
|
||||
},
|
||||
docker_version: "0.1.7"
|
||||
}
|
||||
|
||||
:statuscode 200: OK
|
||||
:statuscode 401: Requires authorization
|
||||
:statuscode 404: Image not found
|
||||
|
||||
|
||||
.. http:put:: /v1/images/(image_id)/layer
|
||||
|
||||
put image layer for a given ``image_id``
|
||||
|
||||
**Example Request**:
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
PUT /v1/images/088b4505aa3adc3d35e79c031fa126b403200f02f51920fbd9b7c503e87c7a2c/layer HTTP/1.1
|
||||
Host: registry-1.docker.io
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Token akmklmasadalkmsdfgsdgdge33
|
||||
|
||||
{
|
||||
id: "088b4505aa3adc3d35e79c031fa126b403200f02f51920fbd9b7c503e87c7a2c",
|
||||
parent: "aeee6396d62273d180a49c96c62e45438d87c7da4a5cf5d2be6bee4e21bc226f",
|
||||
created: "2013-04-30T17:46:10.843673+03:00",
|
||||
container: "8305672a76cc5e3d168f97221106ced35a76ec7ddbb03209b0f0d96bf74f6ef7",
|
||||
container_config: {
|
||||
Hostname: "host-test",
|
||||
User: "",
|
||||
Memory: 0,
|
||||
MemorySwap: 0,
|
||||
AttachStdin: false,
|
||||
AttachStdout: false,
|
||||
AttachStderr: false,
|
||||
PortSpecs: null,
|
||||
Tty: false,
|
||||
OpenStdin: false,
|
||||
StdinOnce: false,
|
||||
Env: null,
|
||||
Cmd: [
|
||||
"/bin/bash",
|
||||
"-c",
|
||||
"apt-get -q -yy -f install libevent-dev"
|
||||
],
|
||||
Dns: null,
|
||||
Image: "imagename/blah",
|
||||
Volumes: { },
|
||||
VolumesFrom: ""
|
||||
},
|
||||
docker_version: "0.1.7"
|
||||
}
|
||||
|
||||
:parameter image_id: the id for the layer you want to get
|
||||
|
||||
|
||||
**Example Response**:
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
HTTP/1.1 200
|
||||
Vary: Accept
|
||||
Content-Type: application/json
|
||||
|
||||
""
|
||||
|
||||
:statuscode 200: OK
|
||||
:statuscode 401: Requires authorization
|
||||
:statuscode 404: Image not found
|
||||
|
||||
|
||||
Image
|
||||
*****
|
||||
|
||||
.. http:put:: /v1/images/(image_id)/json
|
||||
|
||||
put image for a given ``image_id``
|
||||
|
||||
**Example Request**:
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
PUT /v1/images/088b4505aa3adc3d35e79c031fa126b403200f02f51920fbd9b7c503e87c7a2c/json HTTP/1.1
|
||||
Host: registry-1.docker.io
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Cookie: (Cookie provided by the Registry)
|
||||
|
||||
{
|
||||
“id”: “088b4505aa3adc3d35e79c031fa126b403200f02f51920fbd9b7c503e87c7a2c”,
|
||||
“checksum”: “sha256:b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087”
|
||||
}
|
||||
|
||||
:parameter image_id: the id for the layer you want to get
|
||||
|
||||
|
||||
**Example Response**:
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
HTTP/1.1 200
|
||||
Vary: Accept
|
||||
Content-Type: application/json
|
||||
|
||||
""
|
||||
|
||||
:statuscode 200: OK
|
||||
:statuscode 401: Requires authorization
|
||||
|
||||
.. http:get:: /v1/images/(image_id)/json
|
||||
|
||||
get image for a given ``image_id``
|
||||
|
||||
**Example Request**:
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
GET /v1/images/088b4505aa3adc3d35e79c031fa126b403200f02f51920fbd9b7c503e87c7a2c/json HTTP/1.1
|
||||
Host: registry-1.docker.io
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Cookie: (Cookie provided by the Registry)
|
||||
|
||||
:parameter image_id: the id for the layer you want to get
|
||||
|
||||
**Example Response**:
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
HTTP/1.1 200
|
||||
Vary: Accept
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
“id”: “088b4505aa3adc3d35e79c031fa126b403200f02f51920fbd9b7c503e87c7a2c”,
|
||||
“checksum”: “sha256:b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087”
|
||||
}
|
||||
|
||||
:statuscode 200: OK
|
||||
:statuscode 401: Requires authorization
|
||||
:statuscode 404: Image not found
|
||||
|
||||
|
||||
Ancestry
|
||||
********
|
||||
|
||||
.. http:get:: /v1/images/(image_id)/ancestry
|
||||
|
||||
get ancestry for an image given an ``image_id``
|
||||
|
||||
**Example Request**:
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
GET /v1/images/088b4505aa3adc3d35e79c031fa126b403200f02f51920fbd9b7c503e87c7a2c/ancestry HTTP/1.1
|
||||
Host: registry-1.docker.io
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Cookie: (Cookie provided by the Registry)
|
||||
|
||||
:parameter image_id: the id for the layer you want to get
|
||||
|
||||
**Example Response**:
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
HTTP/1.1 200
|
||||
Vary: Accept
|
||||
Content-Type: application/json
|
||||
|
||||
["088b4502f51920fbd9b7c503e87c7a2c05aa3adc3d35e79c031fa126b403200f",
|
||||
"aeee63968d87c7da4a5cf5d2be6bee4e21bc226fd62273d180a49c96c62e4543",
|
||||
"bfa4c5326bc764280b0863b46a4b20d940bc1897ef9c1dfec060604bdc383280",
|
||||
"6ab5893c6927c15a15665191f2c6cf751f5056d8b95ceee32e43c5e8a3648544"]
|
||||
|
||||
:statuscode 200: OK
|
||||
:statuscode 401: Requires authorization
|
||||
:statuscode 404: Image not found
|
||||
|
||||
|
||||
2.2 Tags
|
||||
--------
|
||||
|
||||
.. http:get:: /v1/repositories/(namespace)/(repository)/tags
|
||||
|
||||
get all of the tags for the given repo.
|
||||
|
||||
**Example Request**:
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
GET /v1/repositories/foo/bar/tags HTTP/1.1
|
||||
Host: registry-1.docker.io
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Cookie: (Cookie provided by the Registry)
|
||||
|
||||
:parameter namespace: namespace for the repo
|
||||
:parameter repository: name for the repo
|
||||
|
||||
**Example Response**:
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
HTTP/1.1 200
|
||||
Vary: Accept
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"latest": "9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f",
|
||||
“0.1.1”: “b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087”
|
||||
}
|
||||
|
||||
:statuscode 200: OK
|
||||
:statuscode 401: Requires authorization
|
||||
:statuscode 404: Repository not found
|
||||
|
||||
|
||||
.. http:get:: /v1/repositories/(namespace)/(repository)/tags/(tag)
|
||||
|
||||
get a tag for the given repo.
|
||||
|
||||
**Example Request**:
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
GET /v1/repositories/foo/bar/tags/latest HTTP/1.1
|
||||
Host: registry-1.docker.io
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Cookie: (Cookie provided by the Registry)
|
||||
|
||||
:parameter namespace: namespace for the repo
|
||||
:parameter repository: name for the repo
|
||||
:parameter tag: name of tag you want to get
|
||||
|
||||
**Example Response**:
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
HTTP/1.1 200
|
||||
Vary: Accept
|
||||
Content-Type: application/json
|
||||
|
||||
"9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f"
|
||||
|
||||
:statuscode 200: OK
|
||||
:statuscode 401: Requires authorization
|
||||
:statuscode 404: Tag not found
|
||||
|
||||
.. http:delete:: /v1/repositories/(namespace)/(repository)/tags/(tag)
|
||||
|
||||
delete the tag for the repo
|
||||
|
||||
**Example Request**:
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
DELETE /v1/repositories/foo/bar/tags/latest HTTP/1.1
|
||||
Host: registry-1.docker.io
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Cookie: (Cookie provided by the Registry)
|
||||
|
||||
:parameter namespace: namespace for the repo
|
||||
:parameter repository: name for the repo
|
||||
:parameter tag: name of tag you want to delete
|
||||
|
||||
**Example Response**:
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
HTTP/1.1 200
|
||||
Vary: Accept
|
||||
Content-Type: application/json
|
||||
|
||||
""
|
||||
|
||||
:statuscode 200: OK
|
||||
:statuscode 401: Requires authorization
|
||||
:statuscode 404: Tag not found
|
||||
|
||||
|
||||
.. http:put:: /v1/repositories/(namespace)/(repository)/tags/(tag)
|
||||
|
||||
put a tag for the given repo.
|
||||
|
||||
**Example Request**:
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
PUT /v1/repositories/foo/bar/tags/latest HTTP/1.1
|
||||
Host: registry-1.docker.io
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Cookie: (Cookie provided by the Registry)
|
||||
|
||||
“9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f”
|
||||
|
||||
:parameter namespace: namespace for the repo
|
||||
:parameter repository: name for the repo
|
||||
:parameter tag: name of tag you want to add
|
||||
|
||||
**Example Response**:
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
HTTP/1.1 200
|
||||
Vary: Accept
|
||||
Content-Type: application/json
|
||||
|
||||
""
|
||||
|
||||
:statuscode 200: OK
|
||||
:statuscode 400: Invalid data
|
||||
:statuscode 401: Requires authorization
|
||||
:statuscode 404: Image not found
|
||||
|
||||
2.3 Repositories
|
||||
----------------
|
||||
|
||||
.. http:delete:: /v1/repositories/(namespace)/(repository)/
|
||||
|
||||
delete a repository
|
||||
|
||||
**Example Request**:
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
DELETE /v1/repositories/foo/bar/ HTTP/1.1
|
||||
Host: registry-1.docker.io
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Cookie: (Cookie provided by the Registry)
|
||||
|
||||
""
|
||||
|
||||
:parameter namespace: namespace for the repo
|
||||
:parameter repository: name for the repo
|
||||
|
||||
**Example Response**:
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
HTTP/1.1 200
|
||||
Vary: Accept
|
||||
Content-Type: application/json
|
||||
|
||||
""
|
||||
|
||||
:statuscode 200: OK
|
||||
:statuscode 401: Requires authorization
|
||||
:statuscode 404: Repository not found
|
||||
|
||||
3.0 Authorization
|
||||
=================
|
||||
This is where we describe the authorization process, including the tokens and cookies.
|
||||
|
||||
TODO: add more info.
|
||||
@@ -1,6 +1,11 @@
|
||||
===================
|
||||
Docker Registry API
|
||||
===================
|
||||
:title: Registry Documentation
|
||||
:description: Documentation for docker Registry and Registry API
|
||||
:keywords: docker, registry, api, index
|
||||
|
||||
|
||||
=====================
|
||||
Registry & index Spec
|
||||
=====================
|
||||
|
||||
.. contents:: Table of Contents
|
||||
|
||||
@@ -44,7 +49,7 @@ We expect that there will be multiple registries out there. To help to grasp the
|
||||
|
||||
.. note::
|
||||
|
||||
Mirror registries and private registries which do not use the Index don’t even need to run the registry code. They can be implemented by any kind of transport implementing HTTP GET and PUT. Read-only registries can be powered by a simple static HTTP server.
|
||||
Mirror registries and private registries which do not use the Index don’t even need to run the registry code. They can be implemented by any kind of transport implementing HTTP GET and PUT. Read-only registries can be powered by a simple static HTTP server.
|
||||
|
||||
.. note::
|
||||
|
||||
@@ -80,7 +85,7 @@ On top of being a runtime for LXC, Docker is the Registry client. It supports:
|
||||
5. Index returns true/false lettings registry know if it should proceed or error out
|
||||
6. Get the payload for all layers
|
||||
|
||||
It’s possible to run docker pull https://<registry>/repositories/samalba/busybox. In this case, docker bypasses the Index. However the security is not guaranteed (in case Registry A is corrupted) because there won’t be any checksum checks.
|
||||
It’s possible to run docker pull \https://<registry>/repositories/samalba/busybox. In this case, docker bypasses the Index. However the security is not guaranteed (in case Registry A is corrupted) because there won’t be any checksum checks.
|
||||
|
||||
Currently registry redirects to s3 urls for downloads, going forward all downloads need to be streamed through the registry. The Registry will then abstract the calls to S3 by a top-level class which implements sub-classes for S3 and local storage.
|
||||
|
||||
@@ -107,7 +112,7 @@ API (pulling repository foo/bar):
|
||||
Jsonified checksums (see part 4.4.1)
|
||||
|
||||
3. (Docker -> Registry) GET /v1/repositories/foo/bar/tags/latest
|
||||
**Headers**:
|
||||
**Headers**:
|
||||
Authorization: Token signature=123abc,repository=”foo/bar”,access=write
|
||||
|
||||
4. (Registry -> Index) GET /v1/repositories/foo/bar/images
|
||||
@@ -121,10 +126,10 @@ API (pulling repository foo/bar):
|
||||
**Action**:
|
||||
( Lookup token see if they have access to pull.)
|
||||
|
||||
If good:
|
||||
If good:
|
||||
HTTP 200 OK
|
||||
Index will invalidate the token
|
||||
If bad:
|
||||
If bad:
|
||||
HTTP 401 Unauthorized
|
||||
|
||||
5. (Docker -> Registry) GET /v1/images/928374982374/ancestry
|
||||
@@ -186,9 +191,9 @@ API (pushing repos foo/bar):
|
||||
**Headers**:
|
||||
Authorization: Token signature=123abc,repository=”foo/bar”,access=write
|
||||
**Action**::
|
||||
- Index:
|
||||
- Index:
|
||||
will invalidate the token.
|
||||
- Registry:
|
||||
- Registry:
|
||||
grants a session (if token is approved) and fetches the images id
|
||||
|
||||
5. (Docker -> Registry) PUT /v1/images/98765432_parent/json
|
||||
@@ -223,7 +228,7 @@ API (pushing repos foo/bar):
|
||||
**Body**:
|
||||
(The image, id’s, tags and checksums)
|
||||
|
||||
[{“id”: “9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f”,
|
||||
[{“id”: “9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f”,
|
||||
“checksum”: “b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087”}]
|
||||
|
||||
**Return** HTTP 204
|
||||
@@ -234,14 +239,80 @@ API (pushing repos foo/bar):
|
||||
|
||||
If it's a retry on the Registry, Docker has a cookie (provided by the registry after token validation). So the Index won’t have to provide a new token.
|
||||
|
||||
2.3 Delete
|
||||
----------
|
||||
|
||||
If you need to delete something from the index or registry, we need a nice clean way to do that. Here is the workflow.
|
||||
|
||||
1. Docker contacts the index to request a delete of a repository “samalba/busybox” (authentication required with user credentials)
|
||||
2. If authentication works and repository is valid, “samalba/busybox” is marked as deleted and a temporary token is returned
|
||||
3. Send a delete request to the registry for the repository (along with the token)
|
||||
4. Registry A contacts the Index to verify the token (token must corresponds to the repository name)
|
||||
5. Index validates the token. Registry A deletes the repository and everything associated to it.
|
||||
6. docker contacts the index to let it know it was removed from the registry, the index removes all records from the database.
|
||||
|
||||
.. note::
|
||||
|
||||
The Docker client should present an "Are you sure?" prompt to confirm the deletion before starting the process. Once it starts it can't be undone.
|
||||
|
||||
API (deleting repository foo/bar):
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
1. (Docker -> Index) DELETE /v1/repositories/foo/bar/
|
||||
**Headers**:
|
||||
Authorization: Basic sdkjfskdjfhsdkjfh==
|
||||
X-Docker-Token: true
|
||||
|
||||
**Action**::
|
||||
- in index, we make sure it is a valid repository, and set to deleted (logically)
|
||||
|
||||
**Body**::
|
||||
Empty
|
||||
|
||||
2. (Index -> Docker) 202 Accepted
|
||||
**Headers**:
|
||||
- WWW-Authenticate: Token signature=123abc,repository=”foo/bar”,access=delete
|
||||
- X-Docker-Endpoints: registry.docker.io [, registry2.docker.io] # list of endpoints where this repo lives.
|
||||
|
||||
3. (Docker -> Registry) DELETE /v1/repositories/foo/bar/
|
||||
**Headers**:
|
||||
Authorization: Token signature=123abc,repository=”foo/bar”,access=delete
|
||||
|
||||
4. (Registry->Index) PUT /v1/repositories/foo/bar/auth
|
||||
**Headers**:
|
||||
Authorization: Token signature=123abc,repository=”foo/bar”,access=delete
|
||||
**Action**::
|
||||
- Index:
|
||||
will invalidate the token.
|
||||
- Registry:
|
||||
deletes the repository (if token is approved)
|
||||
|
||||
5. (Registry -> Docker) 200 OK
|
||||
200 If success
|
||||
403 if forbidden
|
||||
400 if bad request
|
||||
404 if repository isn't found
|
||||
|
||||
6. (Docker -> Index) DELETE /v1/repositories/foo/bar/
|
||||
|
||||
**Headers**:
|
||||
Authorization: Basic 123oislifjsldfj==
|
||||
X-Docker-Endpoints: registry-1.docker.io (no validation on this right now)
|
||||
|
||||
**Body**:
|
||||
Empty
|
||||
|
||||
**Return** HTTP 200
|
||||
|
||||
|
||||
3. How to use the Registry in standalone mode
|
||||
=============================================
|
||||
|
||||
The Index has two main purposes (along with its fancy social features):
|
||||
|
||||
- Resolve short names (to avoid passing absolute URLs all the time)
|
||||
- username/projectname -> https://registry.docker.io/users/<username>/repositories/<projectname>/
|
||||
- team/projectname -> https://registry.docker.io/team/<team>/repositories/<projectname>/
|
||||
- username/projectname -> \https://registry.docker.io/users/<username>/repositories/<projectname>/
|
||||
- team/projectname -> \https://registry.docker.io/team/<team>/repositories/<projectname>/
|
||||
- Authenticate a user as a repos owner (for a central referenced repository)
|
||||
|
||||
3.1 Without an Index
|
||||
@@ -296,7 +367,7 @@ POST /v1/users
|
||||
{"email": "sam@dotcloud.com", "password": "toto42", "username": "foobar"'}
|
||||
|
||||
**Validation**:
|
||||
- **username** : min 4 character, max 30 characters, all lowercase no special characters.
|
||||
- **username** : min 4 character, max 30 characters, must match the regular expression [a-z0-9_].
|
||||
- **password**: min 5 characters
|
||||
|
||||
**Valid**: return HTTP 200
|
||||
@@ -340,6 +411,11 @@ GET /v1/users
|
||||
|
||||
The Registry does not know anything about users. Even though repositories are under usernames, it’s just a namespace for the registry. Allowing us to implement organizations or different namespaces per user later, without modifying the Registry’s API.
|
||||
|
||||
The following naming restrictions apply:
|
||||
|
||||
- Namespaces must match the same regular expression as usernames (See 4.2.1.)
|
||||
- Repository names must match the regular expression [a-zA-Z0-9-_.]
|
||||
|
||||
4.3.1 Get all tags
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
|
||||
@@ -387,9 +463,27 @@ PUT /v1/repositories/<namespace>/<repo_name>/images
|
||||
|
||||
**Body**:
|
||||
[ {“id”: “9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f”, “checksum”: “sha256:b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087”} ]
|
||||
|
||||
|
||||
**Return** 204
|
||||
|
||||
4.5 Repositories
|
||||
----------------
|
||||
|
||||
4.5.1 Remove a Repository (Registry)
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
DELETE /v1/repositories/<namespace>/<repo_name>
|
||||
|
||||
Return 200 OK
|
||||
|
||||
4.5.2 Remove a Repository (Index)
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
This starts the delete process. see 2.3 for more details.
|
||||
|
||||
DELETE /v1/repositories/<namespace>/<repo_name>
|
||||
|
||||
Return 202 OK
|
||||
|
||||
5. Chaining Registries
|
||||
======================
|
||||
|
||||
@@ -466,3 +560,10 @@ Next request::
|
||||
|
||||
GET /(...)
|
||||
Cookie: session="wD/J7LqL5ctqw8haL10vgfhrb2Q=?foo=UydiYXInCnAxCi4=×tamp=RjEzNjYzMTQ5NDcuNDc0NjQzCi4="
|
||||
|
||||
|
||||
7.0 Document Version
|
||||
---------------------
|
||||
|
||||
- 1.0 : May 6th 2013 : initial release
|
||||
- 1.1 : June 1st 2013 : Added Delete Repository and way to handle new source namespace.
|
||||
@@ -1,130 +0,0 @@
|
||||
==============
|
||||
Docker Builder
|
||||
==============
|
||||
|
||||
.. contents:: Table of Contents
|
||||
|
||||
1. Format
|
||||
=========
|
||||
|
||||
The Docker builder format is quite simple:
|
||||
|
||||
``instruction arguments``
|
||||
|
||||
The first instruction must be `FROM`
|
||||
|
||||
All instruction are to be placed in a file named `Dockerfile`
|
||||
|
||||
In order to place comments within a Dockerfile, simply prefix the line with "`#`"
|
||||
|
||||
2. Instructions
|
||||
===============
|
||||
|
||||
Docker builder comes with a set of instructions:
|
||||
|
||||
1. FROM: Set from what image to build
|
||||
2. RUN: Execute a command
|
||||
3. INSERT: Insert a remote file (http) into the image
|
||||
|
||||
2.1 FROM
|
||||
--------
|
||||
``FROM <image>``
|
||||
|
||||
The `FROM` instruction must be the first one in order for Builder to know from where to run commands.
|
||||
|
||||
`FROM` can also be used in order to build multiple images within a single Dockerfile
|
||||
|
||||
2.2 MAINTAINER
|
||||
--------------
|
||||
``MAINTAINER <name>``
|
||||
|
||||
The `MAINTAINER` instruction allow you to set the Author field of the generated images.
|
||||
This instruction is never automatically reset.
|
||||
|
||||
2.3 RUN
|
||||
-------
|
||||
``RUN <command>``
|
||||
|
||||
The `RUN` instruction is the main one, it allows you to execute any commands on the `FROM` image and to save the results.
|
||||
You can use as many `RUN` as you want within a Dockerfile, the commands will be executed on the result of the previous command.
|
||||
|
||||
|
||||
2.4 CMD
|
||||
-------
|
||||
``CMD <command>``
|
||||
|
||||
The `CMD` instruction sets the command to be executed when running the image.
|
||||
It is equivalent to do `docker commit -run '{"Cmd": <command>}'` outside the builder.
|
||||
|
||||
.. note::
|
||||
Do not confuse `RUN` with `CMD`. `RUN` actually run a command and save the result, `CMD` does not execute anything.
|
||||
|
||||
2.5 EXPOSE
|
||||
----------
|
||||
``EXPOSE <port> [<port>...]``
|
||||
|
||||
The `EXPOSE` instruction sets ports to be publicly exposed when running the image.
|
||||
This is equivalent to do `docker commit -run '{"PortSpecs": ["<port>", "<port2>"]}'` outside the builder.
|
||||
|
||||
2.6 ENV
|
||||
-------
|
||||
``ENV <key> <value>``
|
||||
|
||||
The `ENV` instruction set as environment variable `<key>` with the value `<value>`. This value will be passed to all future ``RUN`` instructions.
|
||||
|
||||
.. note::
|
||||
The environment variables are local to the Dockerfile, they will not be set as autorun.
|
||||
|
||||
2.7 INSERT
|
||||
----------
|
||||
|
||||
``INSERT <file url> <path>``
|
||||
|
||||
The `INSERT` instruction will download the file at the given url and place it within the image at the given path.
|
||||
|
||||
.. note::
|
||||
The path must include the file name.
|
||||
|
||||
|
||||
3. Dockerfile Examples
|
||||
======================
|
||||
|
||||
::
|
||||
|
||||
# Nginx
|
||||
#
|
||||
# VERSION 0.0.1
|
||||
# DOCKER-VERSION 0.2
|
||||
|
||||
from ubuntu
|
||||
maintainer Guillaume J. Charmes "guillaume@dotcloud.com"
|
||||
|
||||
# make sure the package repository is up to date
|
||||
run echo "deb http://archive.ubuntu.com/ubuntu precise main universe" > /etc/apt/sources.list
|
||||
run apt-get update
|
||||
|
||||
run apt-get install -y inotify-tools nginx apache2 openssh-server
|
||||
insert https://raw.github.com/creack/docker-vps/master/nginx-wrapper.sh /usr/sbin/nginx-wrapper
|
||||
|
||||
::
|
||||
|
||||
# Firefox over VNC
|
||||
#
|
||||
# VERSION 0.3
|
||||
# DOCKER-VERSION 0.2
|
||||
|
||||
from ubuntu
|
||||
# make sure the package repository is up to date
|
||||
run echo "deb http://archive.ubuntu.com/ubuntu precise main universe" > /etc/apt/sources.list
|
||||
run apt-get update
|
||||
|
||||
# Install vnc, xvfb in order to create a 'fake' display and firefox
|
||||
run apt-get install -y x11vnc xvfb firefox
|
||||
run mkdir /.vnc
|
||||
# Setup a password
|
||||
run x11vnc -storepasswd 1234 ~/.vnc/passwd
|
||||
# Autostart firefox (might not be the best way to do it, but it does the trick)
|
||||
run bash -c 'echo "firefox" >> /.bashrc'
|
||||
|
||||
expose 5900
|
||||
cmd ["x11vnc", "-forever", "-usepw", "-create"]
|
||||
@@ -1,14 +0,0 @@
|
||||
:title: docker documentation
|
||||
:description: Documentation for docker builder
|
||||
:keywords: docker, builder, dockerfile
|
||||
|
||||
|
||||
Builder
|
||||
=======
|
||||
|
||||
Contents:
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
basics
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
.. _cli:
|
||||
|
||||
Command Line Interface
|
||||
Overview
|
||||
======================
|
||||
|
||||
Docker Usage
|
||||
@@ -14,7 +14,8 @@ To list available commands, either run ``docker`` with no parameters or execute
|
||||
``docker help``::
|
||||
|
||||
$ docker
|
||||
Usage: docker COMMAND [arg...]
|
||||
Usage: docker [OPTIONS] COMMAND [arg...]
|
||||
-H="127.0.0.1:4243": Host:port to bind/connect to
|
||||
|
||||
A self-sufficient runtime for linux containers.
|
||||
|
||||
@@ -24,7 +25,7 @@ Available Commands
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:maxdepth: 2
|
||||
|
||||
command/attach
|
||||
command/build
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
:title: Attach Command
|
||||
:description: Attach to a running container
|
||||
:keywords: attach, container, docker, documentation
|
||||
|
||||
===========================================
|
||||
``attach`` -- Attach to a running container
|
||||
===========================================
|
||||
|
||||
@@ -1,9 +1,33 @@
|
||||
========================================================
|
||||
``build`` -- Build a container from Dockerfile via stdin
|
||||
========================================================
|
||||
:title: Build Command
|
||||
:description: Build a new image from the Dockerfile passed via stdin
|
||||
:keywords: build, docker, container, documentation
|
||||
|
||||
================================================
|
||||
``build`` -- Build a container from a Dockerfile
|
||||
================================================
|
||||
|
||||
::
|
||||
|
||||
Usage: docker build -
|
||||
Example: cat Dockerfile | docker build -
|
||||
Build a new image from the Dockerfile passed via stdin
|
||||
Usage: docker build [OPTIONS] PATH | -
|
||||
Build a new container image from the source code at PATH
|
||||
-t="": Tag to be applied to the resulting image in case of success.
|
||||
|
||||
Examples
|
||||
--------
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
docker build .
|
||||
|
||||
| This will read the Dockerfile from the current directory. It will also send any other files and directories found in the current directory to the docker daemon.
|
||||
| The contents of this directory would be used by ADD commands found within the Dockerfile.
|
||||
| This will send a lot of data to the docker daemon if the current directory contains a lot of data.
|
||||
| If the absolute path is provided instead of '.', only the files and directories required by the ADD commands from the Dockerfile will be added to the context and transferred to the docker daemon.
|
||||
|
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
docker build -
|
||||
|
||||
| This will read a Dockerfile from Stdin without context. Due to the lack of a context, no contents of any local directory will be sent to the docker daemon.
|
||||
| ADD doesn't work when running in this mode due to the absence of the context, thus having no source files to copy to the container.
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
:title: Commit Command
|
||||
:description: Create a new image from a container's changes
|
||||
:keywords: commit, docker, container, documentation
|
||||
|
||||
===========================================================
|
||||
``commit`` -- Create a new image from a container's changes
|
||||
===========================================================
|
||||
@@ -16,6 +20,7 @@ Full -run example::
|
||||
|
||||
{"Hostname": "",
|
||||
"User": "",
|
||||
"CpuShares": 0,
|
||||
"Memory": 0,
|
||||
"MemorySwap": 0,
|
||||
"PortSpecs": ["22", "80", "443"],
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
:title: Diff Command
|
||||
:description: Inspect changes on a container's filesystem
|
||||
:keywords: diff, docker, container, documentation
|
||||
|
||||
=======================================================
|
||||
``diff`` -- Inspect changes on a container's filesystem
|
||||
=======================================================
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
:title: Export Command
|
||||
:description: Export the contents of a filesystem as a tar archive
|
||||
:keywords: export, docker, container, documentation
|
||||
|
||||
=================================================================
|
||||
``export`` -- Stream the contents of a container as a tar archive
|
||||
=================================================================
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
:title: History Command
|
||||
:description: Show the history of an image
|
||||
:keywords: history, docker, container, documentation
|
||||
|
||||
===========================================
|
||||
``history`` -- Show the history of an image
|
||||
===========================================
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
:title: Images Command
|
||||
:description: List images
|
||||
:keywords: images, docker, container, documentation
|
||||
|
||||
=========================
|
||||
``images`` -- List images
|
||||
=========================
|
||||
|
||||
@@ -1,9 +1,40 @@
|
||||
:title: Import Command
|
||||
:description: Create a new filesystem image from the contents of a tarball
|
||||
:keywords: import, tarball, docker, url, documentation
|
||||
|
||||
==========================================================================
|
||||
``import`` -- Create a new filesystem image from the contents of a tarball
|
||||
==========================================================================
|
||||
|
||||
::
|
||||
|
||||
Usage: docker import [OPTIONS] URL|- [REPOSITORY [TAG]]
|
||||
Usage: docker import URL|- [REPOSITORY [TAG]]
|
||||
|
||||
Create a new filesystem image from the contents of a tarball
|
||||
|
||||
At this time, the URL must start with ``http`` and point to a single file archive (.tar, .tar.gz, .bzip)
|
||||
containing a root filesystem. If you would like to import from a local directory or archive,
|
||||
you can use the ``-`` parameter to take the data from standard in.
|
||||
|
||||
Examples
|
||||
--------
|
||||
|
||||
Import from a remote location
|
||||
.............................
|
||||
|
||||
``$ docker import http://example.com/exampleimage.tgz exampleimagerepo``
|
||||
|
||||
Import from a local file
|
||||
........................
|
||||
|
||||
Import to docker via pipe and standard in
|
||||
|
||||
``$ cat exampleimage.tgz | docker import - exampleimagelocal``
|
||||
|
||||
Import from a local directory
|
||||
.............................
|
||||
|
||||
``$ sudo tar -c . | docker import - exampleimagedir``
|
||||
|
||||
Note the ``sudo`` in this example -- you must preserve the ownership of the files (especially root ownership)
|
||||
during the archiving with tar. If you are not root (or sudo) when you tar, then the ownerships might not get preserved.
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
:title: Info Command
|
||||
:description: Display system-wide information.
|
||||
:keywords: info, docker, information, documentation
|
||||
|
||||
===========================================
|
||||
``info`` -- Display system-wide information
|
||||
===========================================
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
:title: Inspect Command
|
||||
:description: Return low-level information on a container
|
||||
:keywords: inspect, container, docker, documentation
|
||||
|
||||
==========================================================
|
||||
``inspect`` -- Return low-level information on a container
|
||||
==========================================================
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
:title: Kill Command
|
||||
:description: Kill a running container
|
||||
:keywords: kill, container, docker, documentation
|
||||
|
||||
====================================
|
||||
``kill`` -- Kill a running container
|
||||
====================================
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
:title: Login Command
|
||||
:description: Register or Login to the docker registry server
|
||||
:keywords: login, docker, documentation
|
||||
|
||||
============================================================
|
||||
``login`` -- Register or Login to the docker registry server
|
||||
============================================================
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
:title: Logs Command
|
||||
:description: Fetch the logs of a container
|
||||
:keywords: logs, container, docker, documentation
|
||||
|
||||
=========================================
|
||||
``logs`` -- Fetch the logs of a container
|
||||
=========================================
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
:title: Port Command
|
||||
:description: Lookup the public-facing port which is NAT-ed to PRIVATE_PORT
|
||||
:keywords: port, docker, container, documentation
|
||||
|
||||
=========================================================================
|
||||
``port`` -- Lookup the public-facing port which is NAT-ed to PRIVATE_PORT
|
||||
=========================================================================
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
:title: Ps Command
|
||||
:description: List containers
|
||||
:keywords: ps, docker, documentation, container
|
||||
|
||||
=========================
|
||||
``ps`` -- List containers
|
||||
=========================
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
:title: Pull Command
|
||||
:description: Pull an image or a repository from the registry
|
||||
:keywords: pull, image, repo, repository, documentation, docker
|
||||
|
||||
=========================================================================
|
||||
``pull`` -- Pull an image or a repository from the docker registry server
|
||||
=========================================================================
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
:title: Push Command
|
||||
:description: Push an image or a repository to the registry
|
||||
:keywords: push, docker, image, repository, documentation, repo
|
||||
|
||||
=======================================================================
|
||||
``push`` -- Push an image or a repository to the docker registry server
|
||||
=======================================================================
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
:title: Restart Command
|
||||
:description: Restart a running container
|
||||
:keywords: restart, container, docker, documentation
|
||||
|
||||
==========================================
|
||||
``restart`` -- Restart a running container
|
||||
==========================================
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
:title: Rm Command
|
||||
:description: Remove a container
|
||||
:keywords: remove, container, docker, documentation, rm
|
||||
|
||||
============================
|
||||
``rm`` -- Remove a container
|
||||
============================
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
:title: Rmi Command
|
||||
:description: Remove an image
|
||||
:keywords: rmi, remove, image, docker, documentation
|
||||
|
||||
==========================
|
||||
``rmi`` -- Remove an image
|
||||
==========================
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
:title: Run Command
|
||||
:description: Run a command in a new container
|
||||
:keywords: run, container, docker, documentation
|
||||
|
||||
===========================================
|
||||
``run`` -- Run a command in a new container
|
||||
===========================================
|
||||
@@ -9,6 +13,7 @@
|
||||
Run a command in a new container
|
||||
|
||||
-a=map[]: Attach to stdin, stdout or stderr.
|
||||
-c=0: CPU shares (relative weight)
|
||||
-d=false: Detached mode: leave the container running in the background
|
||||
-e=[]: Set environment variables
|
||||
-h="": Container host name
|
||||
@@ -18,5 +23,5 @@
|
||||
-t=false: Allocate a pseudo-tty
|
||||
-u="": Username or UID
|
||||
-d=[]: Set custom dns servers for the container
|
||||
-v=[]: Creates a new volumes and mount it at the specified path.
|
||||
-v=[]: Creates a new volume and mounts it at the specified path.
|
||||
-volumes-from="": Mount all volumes from the given container.
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
:title: Search Command
|
||||
:description: Searches for the TERM parameter on the Docker index and prints out a list of repositories that match.
|
||||
:keywords: search, docker, image, documentation
|
||||
|
||||
===================================================================
|
||||
``search`` -- Search for an image in the docker index
|
||||
===================================================================
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
:title: Start Command
|
||||
:description: Start a stopped container
|
||||
:keywords: start, docker, container, documentation
|
||||
|
||||
======================================
|
||||
``start`` -- Start a stopped container
|
||||
======================================
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
:title: Stop Command
|
||||
:description: Stop a running container
|
||||
:keywords: stop, container, docker, documentation
|
||||
|
||||
====================================
|
||||
``stop`` -- Stop a running container
|
||||
====================================
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
:title: Tag Command
|
||||
:description: Tag an image into a repository
|
||||
:keywords: tag, docker, image, repository, documentation, repo
|
||||
|
||||
=========================================
|
||||
``tag`` -- Tag an image into a repository
|
||||
=========================================
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
:title: Version Command
|
||||
:description:
|
||||
:keywords: version, docker, documentation
|
||||
|
||||
==================================================
|
||||
``version`` -- Show the docker version information
|
||||
==================================================
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
:title: Wait Command
|
||||
:description: Block until a container stops, then print its exit code.
|
||||
:keywords: wait, docker, container, documentation
|
||||
|
||||
===================================================================
|
||||
``wait`` -- Block until a container stops, then print its exit code
|
||||
===================================================================
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
:title: docker documentation
|
||||
:title: Commands
|
||||
:description: -- todo: change me
|
||||
:keywords: todo: change me
|
||||
:keywords: todo, commands, command line, help, docker, documentation
|
||||
|
||||
|
||||
Commands
|
||||
@@ -9,8 +9,33 @@ Commands
|
||||
Contents:
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 3
|
||||
:maxdepth: 1
|
||||
|
||||
basics
|
||||
workingwithrepository
|
||||
cli
|
||||
cli
|
||||
attach <command/attach>
|
||||
build <command/build>
|
||||
commit <command/commit>
|
||||
diff <command/diff>
|
||||
export <command/export>
|
||||
history <command/history>
|
||||
images <command/images>
|
||||
import <command/import>
|
||||
info <command/info>
|
||||
inspect <command/inspect>
|
||||
kill <command/kill>
|
||||
login <command/login>
|
||||
logs <command/logs>
|
||||
port <command/port>
|
||||
ps <command/ps>
|
||||
pull <command/pull>
|
||||
push <command/push>
|
||||
restart <command/restart>
|
||||
rm <command/rm>
|
||||
rmi <command/rmi>
|
||||
run <command/run>
|
||||
search <command/search>
|
||||
start <command/start>
|
||||
stop <command/stop>
|
||||
tag <command/tag>
|
||||
version <command/version>
|
||||
wait <command/wait>
|
||||
@@ -1,42 +0,0 @@
|
||||
.. _working_with_the_repository:
|
||||
|
||||
Working with the repository
|
||||
============================
|
||||
|
||||
Connecting to the repository
|
||||
----------------------------
|
||||
|
||||
You create a user on the central docker repository by running
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
docker login
|
||||
|
||||
|
||||
If your username does not exist it will prompt you to also enter a password and your e-mail address. It will then
|
||||
automatically log you in.
|
||||
|
||||
|
||||
Committing a container to a named image
|
||||
---------------------------------------
|
||||
|
||||
In order to commit to the repository it is required to have committed your container to an image with your namespace.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# for example docker commit $CONTAINER_ID dhrp/kickassapp
|
||||
docker commit <container_id> <your username>/<some_name>
|
||||
|
||||
|
||||
Pushing a container to the repository
|
||||
-----------------------------------------
|
||||
|
||||
In order to push an image to the repository you need to have committed your container to a named image (see above)
|
||||
|
||||
Now you can commit this image to the repository
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# for example docker push dhrp/kickassapp
|
||||
docker push <image-name>
|
||||
|
||||
@@ -1,25 +0,0 @@
|
||||
:title: Building blocks
|
||||
:description: An introduction to docker and standard containers?
|
||||
:keywords: containers, lxc, concepts, explanation
|
||||
|
||||
|
||||
Building blocks
|
||||
===============
|
||||
|
||||
.. _images:
|
||||
|
||||
Images
|
||||
------
|
||||
An original container image. These are stored on disk and are comparable with what you normally expect from a stopped virtual machine image. Images are stored (and retrieved from) repository
|
||||
|
||||
Images are stored on your local file system under /var/lib/docker/graph
|
||||
|
||||
|
||||
.. _containers:
|
||||
|
||||
Containers
|
||||
----------
|
||||
A container is a local version of an image. It can be running or stopped, The equivalent would be a virtual machine instance.
|
||||
|
||||
Containers are stored on your local file system under /var/lib/docker/containers
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
:title: Introduction
|
||||
:description: An introduction to docker and standard containers?
|
||||
:keywords: containers, lxc, concepts, explanation
|
||||
:keywords: containers, lxc, concepts, explanation, docker, documentation
|
||||
|
||||
|
||||
:note: This version of the introduction is temporary, just to make sure we don't break the links from the website when the documentation is updated
|
||||
|
||||
|
Before Width: | Height: | Size: 194 KiB After Width: | Height: | Size: 194 KiB |
@@ -1,6 +1,6 @@
|
||||
:title: docker documentation
|
||||
:title: Concepts
|
||||
:description: -- todo: change me
|
||||
:keywords: todo: change me
|
||||
:keywords: concepts, documentation, docker, containers
|
||||
|
||||
|
||||
|
||||
@@ -12,6 +12,5 @@ Contents:
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
introduction
|
||||
buildingblocks
|
||||
../index
|
||||
|
||||
|
||||
@@ -2,13 +2,11 @@
|
||||
:description: An introduction to docker and standard containers?
|
||||
:keywords: containers, lxc, concepts, explanation
|
||||
|
||||
.. _introduction:
|
||||
|
||||
Introduction
|
||||
============
|
||||
|
||||
Docker - The Linux container runtime
|
||||
------------------------------------
|
||||
Docker -- The Linux container runtime
|
||||
-------------------------------------
|
||||
|
||||
Docker complements LXC with a high-level API which operates at the process level. It runs unix processes with strong guarantees of isolation and repeatability across servers.
|
||||
|
||||
@@ -20,7 +18,7 @@ Docker is a great building block for automating distributed systems: large-scale
|
||||
- **Isolation** docker isolates processes from each other and from the underlying host, using lightweight containers.
|
||||
- **Repeatability** Because containers are isolated in their own filesystem, they behave the same regardless of where, when, and alongside what they run.
|
||||
|
||||
.. image:: http://www.docker.io/_static/lego_docker.jpg
|
||||
.. image:: images/lego_docker.jpg
|
||||
|
||||
|
||||
What is a Standard Container?
|
||||
|
||||
@@ -41,7 +41,7 @@ html_add_permalinks = None
|
||||
|
||||
|
||||
# The master toctree document.
|
||||
master_doc = 'index'
|
||||
master_doc = 'toctree'
|
||||
|
||||
# General information about the project.
|
||||
project = u'Docker'
|
||||
|
||||
@@ -1,101 +1,9 @@
|
||||
:title: Contribution Guidelines
|
||||
:description: Contribution guidelines: create issues, convetions, pull requests
|
||||
:keywords: contributing, docker, documentation, help, guideline
|
||||
|
||||
Contributing to Docker
|
||||
======================
|
||||
|
||||
Want to hack on Docker? Awesome! There are instructions to get you
|
||||
started on the website: http://docker.io/gettingstarted.html
|
||||
Want to hack on Docker? Awesome! The repository includes `all the instructions you need to get started <https://github.com/dotcloud/docker/blob/master/CONTRIBUTING.md>`_.
|
||||
|
||||
They are probably not perfect, please let us know if anything feels
|
||||
wrong or incomplete.
|
||||
|
||||
Contribution guidelines
|
||||
-----------------------
|
||||
|
||||
Pull requests are always welcome
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
We are always thrilled to receive pull requests, and do our best to
|
||||
process them as fast as possible. Not sure if that typo is worth a pull
|
||||
request? Do it! We will appreciate it.
|
||||
|
||||
If your pull request is not accepted on the first try, don't be
|
||||
discouraged! If there's a problem with the implementation, hopefully you
|
||||
received feedback on what to improve.
|
||||
|
||||
We're trying very hard to keep Docker lean and focused. We don't want it
|
||||
to do everything for everybody. This means that we might decide against
|
||||
incorporating a new feature. However, there might be a way to implement
|
||||
that feature *on top of* docker.
|
||||
|
||||
Discuss your design on the mailing list
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
We recommend discussing your plans `on the mailing
|
||||
list <https://groups.google.com/forum/?fromgroups#!forum/docker-club>`__
|
||||
before starting to code - especially for more ambitious contributions.
|
||||
This gives other contributors a chance to point you in the right
|
||||
direction, give feedback on your design, and maybe point out if someone
|
||||
else is working on the same thing.
|
||||
|
||||
Create issues...
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
Any significant improvement should be documented as `a github
|
||||
issue <https://github.com/dotcloud/docker/issues>`__ before anybody
|
||||
starts working on it.
|
||||
|
||||
...but check for existing issues first!
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Please take a moment to check that an issue doesn't already exist
|
||||
documenting your bug report or improvement proposal. If it does, it
|
||||
never hurts to add a quick "+1" or "I have this problem too". This will
|
||||
help prioritize the most common problems and requests.
|
||||
|
||||
Conventions
|
||||
~~~~~~~~~~~
|
||||
|
||||
Fork the repo and make changes on your fork in a feature branch:
|
||||
|
||||
- If it's a bugfix branch, name it XXX-something where XXX is the number of the
|
||||
issue
|
||||
- If it's a feature branch, create an enhancement issue to announce your
|
||||
intentions, and name it XXX-something where XXX is the number of the issue.
|
||||
|
||||
Submit unit tests for your changes. Go has a great test framework built in; use
|
||||
it! Take a look at existing tests for inspiration. Run the full test suite on
|
||||
your branch before submitting a pull request.
|
||||
|
||||
Make sure you include relevant updates or additions to documentation when
|
||||
creating or modifying features.
|
||||
|
||||
Write clean code. Universally formatted code promotes ease of writing, reading,
|
||||
and maintenance. Always run ``go fmt`` before committing your changes. Most
|
||||
editors have plugins that do this automatically, and there's also a git
|
||||
pre-commit hook:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
curl -o .git/hooks/pre-commit https://raw.github.com/edsrzf/gofmt-git-hook/master/fmt-check && chmod +x .git/hooks/pre-commit
|
||||
|
||||
|
||||
Pull requests descriptions should be as clear as possible and include a
|
||||
reference to all the issues that they address.
|
||||
|
||||
Code review comments may be added to your pull request. Discuss, then make the
|
||||
suggested modifications and push additional commits to your feature branch. Be
|
||||
sure to post a comment after pushing. The new commits will show up in the pull
|
||||
request automatically, but the reviewers will not be notified unless you
|
||||
comment.
|
||||
|
||||
Before the pull request is merged, make sure that you squash your commits into
|
||||
logical units of work using ``git rebase -i`` and ``git push -f``. After every
|
||||
commit the test suite should be passing. Include documentation changes in the
|
||||
same commit so that a revert would remove all traces of the feature or fix.
|
||||
|
||||
Commits that fix or close an issue should include a reference like ``Closes #XXX``
|
||||
or ``Fixes #XXX``, which will automatically close the issue when merged.
|
||||
|
||||
Add your name to the AUTHORS file, but make sure the list is sorted and your
|
||||
name and email address match your git configuration. The AUTHORS file is
|
||||
regenerated occasionally from the git commit history, so a mismatch may result
|
||||
in your changes being overwritten.
|
||||
|
||||
@@ -1,22 +1,46 @@
|
||||
:title: Setting up a dev environment
|
||||
:title: Setting Up a Dev Environment
|
||||
:description: Guides on how to contribute to docker
|
||||
:keywords: Docker, documentation, developers, contributing, dev environment
|
||||
|
||||
Setting up a dev environment
|
||||
Setting Up a Dev Environment
|
||||
============================
|
||||
|
||||
Instructions that have been verified to work on Ubuntu 12.10,
|
||||
Instructions that have been verified to work on Ubuntu Precise 12.04 (LTS) (64-bit),
|
||||
|
||||
|
||||
Dependencies
|
||||
------------
|
||||
|
||||
**Linux kernel 3.8**
|
||||
|
||||
Due to a bug in LXC docker works best on the 3.8 kernel. Precise comes with a 3.2 kernel, so we need to upgrade it. The kernel we install comes with AUFS built in.
|
||||
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo apt-get -y install lxc wget bsdtar curl golang git
|
||||
# install the backported kernel
|
||||
sudo apt-get update && sudo apt-get install linux-image-generic-lts-raring
|
||||
|
||||
# reboot
|
||||
sudo reboot
|
||||
|
||||
|
||||
Installation
|
||||
------------
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo apt-get install python-software-properties
|
||||
sudo add-apt-repository ppa:gophers/go
|
||||
sudo apt-get update
|
||||
sudo apt-get -y install lxc wget bsdtar curl golang-stable git aufs-tools
|
||||
|
||||
export GOPATH=~/go/
|
||||
export PATH=$GOPATH/bin:$PATH
|
||||
|
||||
mkdir -p $GOPATH/src/github.com/dotcloud
|
||||
cd $GOPATH/src/github.com/dotcloud
|
||||
git clone git@github.com:dotcloud/docker.git
|
||||
git clone git://github.com/dotcloud/docker.git
|
||||
cd docker
|
||||
|
||||
go get -v github.com/dotcloud/docker/...
|
||||
|
||||
@@ -4,8 +4,8 @@
|
||||
|
||||
.. _running_couchdb_service:
|
||||
|
||||
Create a CouchDB service
|
||||
======================
|
||||
CouchDB Service
|
||||
===============
|
||||
|
||||
.. include:: example_header.inc
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
:title: Docker Examples
|
||||
:description: Examples on how to use Docker
|
||||
:keywords: docker, hello world, examples
|
||||
:keywords: docker, hello world, node, nodejs, python, couch, couchdb, redis, ssh, sshd, examples
|
||||
|
||||
|
||||
|
||||
@@ -16,6 +16,7 @@ Contents:
|
||||
hello_world
|
||||
hello_world_daemon
|
||||
python_web_app
|
||||
nodejs_web_app
|
||||
running_redis_service
|
||||
running_ssh_service
|
||||
couchdb_data_volumes
|
||||
|
||||
236
docs/sources/examples/nodejs_web_app.rst
Normal file
236
docs/sources/examples/nodejs_web_app.rst
Normal file
@@ -0,0 +1,236 @@
|
||||
:title: Running a Node.js app on CentOS
|
||||
:description: Installing and running a Node.js app on CentOS
|
||||
:keywords: docker, example, package installation, node, centos
|
||||
|
||||
.. _nodejs_web_app:
|
||||
|
||||
Node.js Web App
|
||||
===============
|
||||
|
||||
.. include:: example_header.inc
|
||||
|
||||
The goal of this example is to show you how you can build your own docker images
|
||||
from a parent image using a ``Dockerfile`` . We will do that by making a simple
|
||||
Node.js hello world web application running on CentOS. You can get the full
|
||||
source code at https://github.com/gasi/docker-node-hello.
|
||||
|
||||
Create Node.js app
|
||||
++++++++++++++++++
|
||||
|
||||
First, create a ``package.json`` file that describes your app and its
|
||||
dependencies:
|
||||
|
||||
.. code-block:: json
|
||||
|
||||
{
|
||||
"name": "docker-centos-hello",
|
||||
"private": true,
|
||||
"version": "0.0.1",
|
||||
"description": "Node.js Hello World app on CentOS using docker",
|
||||
"author": "Daniel Gasienica <daniel@gasienica.ch>",
|
||||
"dependencies": {
|
||||
"express": "3.2.4"
|
||||
}
|
||||
}
|
||||
|
||||
Then, create an ``index.js`` file that defines a web app using the
|
||||
`Express.js <http://expressjs.com/>`_ framework:
|
||||
|
||||
.. code-block:: javascript
|
||||
|
||||
var express = require('express');
|
||||
|
||||
// Constants
|
||||
var PORT = 8080;
|
||||
|
||||
// App
|
||||
var app = express();
|
||||
app.get('/', function (req, res) {
|
||||
res.send('Hello World\n');
|
||||
});
|
||||
|
||||
app.listen(PORT)
|
||||
console.log('Running on http://localhost:' + PORT);
|
||||
|
||||
|
||||
In the next steps, we’ll look at how you can run this app inside a CentOS
|
||||
container using docker. First, you’ll need to build a docker image of your app.
|
||||
|
||||
Creating a ``Dockerfile``
|
||||
+++++++++++++++++++++++++
|
||||
|
||||
Create an empty file called ``Dockerfile``:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
touch Dockerfile
|
||||
|
||||
Open the ``Dockerfile`` in your favorite text editor and add the following line
|
||||
that defines the version of docker the image requires to build
|
||||
(this example uses docker 0.3.4):
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# DOCKER-VERSION 0.3.4
|
||||
|
||||
Next, define the parent image you want to use to build your own image on top of.
|
||||
Here, we’ll use `CentOS <https://index.docker.io/_/centos/>`_ (tag: ``6.4``)
|
||||
available on the `docker index`_:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
FROM centos:6.4
|
||||
|
||||
Since we’re building a Node.js app, you’ll have to install Node.js as well as
|
||||
npm on your CentOS image. Node.js is required to run your app and npm to install
|
||||
your app’s dependencies defined in ``package.json``.
|
||||
To install the right package for CentOS, we’ll use the instructions from the
|
||||
`Node.js wiki`_:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# Enable EPEL for Node.js
|
||||
RUN rpm -Uvh http://download.fedoraproject.org/pub/epel/6/i386/epel-release-6-8.noarch.rpm
|
||||
# Install Node.js and npm
|
||||
RUN yum install -y npm-1.2.17-5.el6
|
||||
|
||||
To bundle your app’s source code inside the docker image, use the ``ADD``
|
||||
command:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# Bundle app source
|
||||
ADD . /src
|
||||
|
||||
Install your app dependencies using npm:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# Install app dependencies
|
||||
RUN cd /src; npm install
|
||||
|
||||
Your app binds to port ``8080`` so you’ll use the ``EXPOSE`` command to have it
|
||||
mapped by the docker daemon:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
EXPOSE 8080
|
||||
|
||||
Last but not least, define the command to run your app using ``CMD`` which
|
||||
defines your runtime, i.e. ``node``, and the path to our app, i.e.
|
||||
``src/index.js`` (see the step where we added the source to the container):
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
CMD ["node", "/src/index.js"]
|
||||
|
||||
Your ``Dockerfile`` should now look like this:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
|
||||
# DOCKER-VERSION 0.3.4
|
||||
FROM centos:6.4
|
||||
|
||||
# Enable EPEL for Node.js
|
||||
RUN rpm -Uvh http://download.fedoraproject.org/pub/epel/6/i386/epel-release-6-8.noarch.rpm
|
||||
# Install Node.js and npm
|
||||
RUN yum install -y npm-1.2.17-5.el6
|
||||
|
||||
# Bundle app source
|
||||
ADD . /src
|
||||
# Install app dependencies
|
||||
RUN cd /src; npm install
|
||||
|
||||
EXPOSE 8080
|
||||
CMD ["node", "/src/index.js"]
|
||||
|
||||
|
||||
Building your image
|
||||
+++++++++++++++++++
|
||||
|
||||
Go to the directory that has your ``Dockerfile`` and run the following command
|
||||
to build a docker image. The ``-t`` flag let’s you tag your image so it’s easier
|
||||
to find later using the ``docker images`` command:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
docker build -t <your username>/centos-node-hello .
|
||||
|
||||
Your image will now be listed by docker:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
docker images
|
||||
|
||||
> # Example
|
||||
> REPOSITORY TAG ID CREATED
|
||||
> centos 6.4 539c0211cd76 8 weeks ago
|
||||
> gasi/centos-node-hello latest d64d3505b0d2 2 hours ago
|
||||
|
||||
|
||||
Run the image
|
||||
+++++++++++++
|
||||
|
||||
Running your image with ``-d`` runs the container in detached mode, leaving the
|
||||
container running in the background. Run the image you previously built:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
docker run -d <your username>/centos-node-hello
|
||||
|
||||
Print the output of your app:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# Get container ID
|
||||
docker ps
|
||||
|
||||
# Print app output
|
||||
docker logs <container id>
|
||||
|
||||
> # Example
|
||||
> Running on http://localhost:8080
|
||||
|
||||
|
||||
Test
|
||||
++++
|
||||
|
||||
To test your app, get the the port of your app that docker mapped:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
docker ps
|
||||
|
||||
> # Example
|
||||
> ID IMAGE COMMAND ... PORTS
|
||||
> ecce33b30ebf gasi/centos-node-hello:latest node /src/index.js 49160->8080
|
||||
|
||||
In the example above, docker mapped the ``8080`` port of the container to
|
||||
``49160``.
|
||||
|
||||
Now you can call your app using ``curl`` (install if needed via:
|
||||
``sudo apt-get install curl``):
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
curl -i localhost:49160
|
||||
|
||||
> HTTP/1.1 200 OK
|
||||
> X-Powered-By: Express
|
||||
> Content-Type: text/html; charset=utf-8
|
||||
> Content-Length: 12
|
||||
> Date: Sun, 02 Jun 2013 03:53:22 GMT
|
||||
> Connection: keep-alive
|
||||
>
|
||||
> Hello World
|
||||
|
||||
We hope this tutorial helped you get up and running with Node.js and CentOS on
|
||||
docker. You can get the full source code at
|
||||
https://github.com/gasi/docker-node-hello.
|
||||
|
||||
Continue to :ref:`running_redis_service`.
|
||||
|
||||
|
||||
.. _Node.js wiki: https://github.com/joyent/node/wiki/Installing-Node.js-via-package-manager#rhelcentosscientific-linux-6
|
||||
.. _docker index: https://index.docker.io/
|
||||
@@ -4,8 +4,8 @@
|
||||
|
||||
.. _python_web_app:
|
||||
|
||||
Building a python web app
|
||||
=========================
|
||||
Python Web App
|
||||
==============
|
||||
|
||||
.. include:: example_header.inc
|
||||
|
||||
@@ -40,7 +40,7 @@ We attach to the new container to see what is going on. Ctrl-C to disconnect
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
BUILD_IMG=$(docker commit $BUILD_JOB _/builds/github.com/hykes/helloflask/master)
|
||||
BUILD_IMG=$(docker commit $BUILD_JOB _/builds/github.com/shykes/helloflask/master)
|
||||
|
||||
Save the changed we just made in the container to a new image called "_/builds/github.com/hykes/helloflask/master" and save the image id in the BUILD_IMG variable name.
|
||||
|
||||
@@ -70,7 +70,8 @@ lookup the public-facing port which is NAT-ed store the private port used by the
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
curl http://`hostname`:$WEB_PORT
|
||||
# install curl if necessary, then ...
|
||||
curl http://127.0.0.1:$WEB_PORT
|
||||
Hello world!
|
||||
|
||||
access the web app using curl. If everything worked as planned you should see the line "Hello world!" inside of your console.
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
.. _running_examples:
|
||||
|
||||
Running The Examples
|
||||
Running the Examples
|
||||
--------------------
|
||||
|
||||
All the examples assume your machine is running the docker daemon. To run the docker daemon in the background, simply type:
|
||||
|
||||
@@ -4,8 +4,8 @@
|
||||
|
||||
.. _running_redis_service:
|
||||
|
||||
Create a redis service
|
||||
======================
|
||||
Redis Service
|
||||
=============
|
||||
|
||||
.. include:: example_header.inc
|
||||
|
||||
@@ -34,7 +34,7 @@ Snapshot the installation
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
docker ps -a # grab the container id (this will be the last one in the list)
|
||||
docker ps -a # grab the container id (this will be the first one in the list)
|
||||
docker commit <container_id> <your username>/redis
|
||||
|
||||
Run the service
|
||||
|
||||
@@ -4,8 +4,8 @@
|
||||
|
||||
.. _running_ssh_service:
|
||||
|
||||
Create an ssh daemon service
|
||||
============================
|
||||
SSH Daemon Service
|
||||
==================
|
||||
|
||||
.. include:: example_header.inc
|
||||
|
||||
@@ -20,8 +20,7 @@ minutes and not entirely smooth, but gives you a good idea.
|
||||
<div style="margin-top:10px;">
|
||||
<iframe width="800" height="400" src="http://ascii.io/a/2637/raw" frameborder="0"></iframe>
|
||||
</div>
|
||||
|
||||
|
||||
|
||||
You can also get this sshd container by using
|
||||
::
|
||||
|
||||
@@ -30,3 +29,49 @@ You can also get this sshd container by using
|
||||
|
||||
The password is 'screencast'
|
||||
|
||||
**Video's Transcription:**
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# Hello! We are going to try and install openssh on a container and run it as a servic
|
||||
# let's pull base to get a base ubuntu image.
|
||||
$ docker pull base
|
||||
# I had it so it was quick
|
||||
# now let's connect using -i for interactive and with -t for terminal
|
||||
# we execute /bin/bash to get a prompt.
|
||||
$ docker run -i -t base /bin/bash
|
||||
# now let's commit it
|
||||
# which container was it?
|
||||
$ docker ps -a |more
|
||||
$ docker commit a30a3a2f2b130749995f5902f079dc6ad31ea0621fac595128ec59c6da07feea dhrp/sshd
|
||||
# I gave the name dhrp/sshd for the container
|
||||
# now we can run it again
|
||||
$ docker run -d dhrp/sshd /usr/sbin/sshd -D # D for daemon mode
|
||||
# is it running?
|
||||
$ docker ps
|
||||
# yes!
|
||||
# let's stop it
|
||||
$ docker stop 0ebf7cec294755399d063f4b1627980d4cbff7d999f0bc82b59c300f8536a562
|
||||
$ docker ps
|
||||
# and reconnect, but now open a port to it
|
||||
$ docker run -d -p 22 dhrp/sshd /usr/sbin/sshd -D
|
||||
$ docker port b2b407cf22cf8e7fa3736fa8852713571074536b1d31def3fdfcd9fa4fd8c8c5 22
|
||||
# it has now given us a port to connect to
|
||||
# we have to connect using a public ip of our host
|
||||
$ hostname
|
||||
$ ifconfig
|
||||
$ ssh root@192.168.33.10 -p 49153
|
||||
# Ah! forgot to set root passwd
|
||||
$ docker commit b2b407cf22cf8e7fa3736fa8852713571074536b1d31def3fdfcd9fa4fd8c8c5 dhrp/sshd
|
||||
$ docker ps -a
|
||||
$ docker run -i -t dhrp/sshd /bin/bash
|
||||
$ passwd
|
||||
$ exit
|
||||
$ docker commit 9e863f0ca0af31c8b951048ba87641d67c382d08d655c2e4879c51410e0fedc1 dhrp/sshd
|
||||
$ docker run -d -p 22 dhrp/sshd /usr/sbin/sshd -D
|
||||
$ docker port a0aaa9558c90cf5c7782648df904a82365ebacce523e4acc085ac1213bfe2206 22
|
||||
$ ifconfig
|
||||
$ ssh root@192.168.33.10 -p 49154
|
||||
# Thanks for watching, Thatcher thatcher@dotcloud.com
|
||||
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user