Compare commits
921 commits
Author | SHA1 | Date | |
---|---|---|---|
|
69f9c8c906 | ||
|
420b1d3625 | ||
|
5472f39022 | ||
|
b96fb8837b | ||
|
67de83e70b | ||
|
5eff67a2c2 | ||
|
1342c51d5e | ||
|
df6c53c924 | ||
|
7d75c1d40d | ||
|
d3c5506330 | ||
|
1babdf81e7 | ||
|
35968c420d | ||
|
f80f6304e2 | ||
|
837baebb74 | ||
|
4b181db52b | ||
|
619f1b54c6 | ||
|
7487dca8a5 | ||
|
bb69504a4a | ||
|
c7253a0e1a | ||
|
b27122246a | ||
|
88eec2e811 | ||
|
ecd3baca25 | ||
|
233a6379e5 | ||
|
74c0c5b7f1 | ||
|
88623e101c | ||
|
705762f23c | ||
|
5f32bd9ced | ||
|
bd33bbf049 | ||
|
426396f438 | ||
|
406dba269c | ||
|
50b33bd3cd | ||
|
519462f3df | ||
|
64fffefffa | ||
|
8cf9d50fc0 | ||
|
a4e96a486f | ||
|
9fe291827a | ||
|
a15a770e1b | ||
|
9380ec7397 | ||
|
80cef48453 | ||
|
fc8f88dc14 | ||
|
89a4208757 | ||
|
490c45b756 | ||
|
56d897347d | ||
|
d4c63720e9 | ||
|
ec14dc44d1 | ||
|
a958fc3e65 | ||
|
89da709cb7 | ||
|
88820a4793 | ||
|
83baeafc3c | ||
|
dae08c333e | ||
|
93cb737687 | ||
|
7d597ee2c9 | ||
|
22c458b67c | ||
|
8b97280f11 | ||
|
eda52d433e | ||
|
168254fcfa | ||
|
9dc455dffb | ||
|
c200868fa2 | ||
|
9eade7d03c | ||
|
4685e9ef72 | ||
|
d8f22d0307 | ||
|
32366de5f9 | ||
|
ad0278f002 | ||
|
cb8b7a282d | ||
|
e1ae07b7a0 | ||
|
d49278cc17 | ||
|
892c228219 | ||
|
a7e309944b | ||
|
765245d54b | ||
|
2d4bfdc789 | ||
|
b990b6c2b0 | ||
|
4d9397c268 | ||
|
51bd95dc95 | ||
|
d5ba93575c | ||
|
12b7746a84 | ||
|
0c6bdf5974 | ||
|
630185b4ae | ||
|
d7423180e7 | ||
|
c30ff6885e | ||
|
7005841048 | ||
|
1608292c09 | ||
|
1763b4e88b | ||
|
5e8ab898c7 | ||
|
23d47bd12e | ||
|
789bd1c67b | ||
|
0eaa22b95d | ||
|
9d6053eda2 | ||
|
589b07262c | ||
|
e7c2b106ec | ||
|
a40b877fbb | ||
|
7dd9fdcfbe | ||
|
9dc6525e61 | ||
|
abb5beffff | ||
|
b4ca19a992 | ||
|
d5a82971a4 | ||
|
5fce12cf25 | ||
|
058ea43c5c | ||
|
ae158b371c | ||
|
77e06fda0c | ||
|
b47e742558 | ||
|
b85d75e29a | ||
|
c104a50de4 | ||
|
9482566a5c | ||
|
d4e12315cd | ||
|
4c24512241 | ||
|
ad0f0b3970 | ||
|
29796375c9 | ||
|
c1cc6ec81a | ||
|
8f1ab4e612 | ||
|
811a247d06 | ||
|
4d1885fb94 | ||
|
0a3b2bda34 | ||
|
9057ddf37c | ||
|
ab567a4327 | ||
|
ee3f3ece72 | ||
|
a76633684b | ||
|
0803200be9 | ||
|
706008a1da | ||
|
57f6c9a0ef | ||
|
c4c6cf6b6a | ||
|
7d4dd91a52 | ||
|
edf2c49410 | ||
|
1adcc64f40 | ||
|
e7349349fd | ||
|
3677003554 | ||
|
63841af153 | ||
|
2fbb374ab7 | ||
|
946d0ff67e | ||
|
70e7d6fe4a | ||
|
f432f71595 | ||
|
47a6d9b54f | ||
|
6a0995e0d8 | ||
|
e4f239d68e | ||
|
25b82fa9b8 | ||
|
e149ff62fe | ||
|
90a31c4829 | ||
|
aa98b4f5d6 | ||
|
860e7e273d | ||
|
a58b52b037 | ||
|
a6beb24dc5 | ||
|
282567a58d | ||
|
b66813eb45 | ||
|
edbb1d9e95 | ||
|
9d8eccec8e | ||
|
4275aec641 | ||
|
4b040147cf | ||
|
08a2fe0d56 | ||
|
1e1caccb13 | ||
|
5ba2bf37a8 | ||
|
f432a04243 | ||
|
81458b3144 | ||
|
6e1d159680 | ||
|
4241093b63 | ||
|
162fd8b856 | ||
|
05a1ebd0fd | ||
|
f8d4276a89 | ||
|
7df2d881f3 | ||
|
fed832e224 | ||
|
4581499848 | ||
|
f34a5b5af0 | ||
|
0df114a8f8 | ||
|
9f5a5da4cb | ||
|
037d5a9e9a | ||
|
5ed8f9a203 | ||
|
284bbde996 | ||
|
43b0a73273 | ||
|
89f296a534 | ||
|
d12b6d24d1 | ||
|
359edd8cbf | ||
|
1454987253 | ||
|
021258661b | ||
|
1db5199ddc | ||
|
6ed0f6ab78 | ||
|
100d240d86 | ||
|
4a4b3ed37f | ||
|
57d5105759 | ||
|
68db0c1739 | ||
|
4aaf3ead97 | ||
|
a070874828 | ||
|
237843a059 | ||
|
400b2850ff | ||
|
ddfeaf32ff | ||
|
cb813faebf | ||
|
0499db23d1 | ||
|
c77e7cb3d0 | ||
|
c6511ee4db | ||
|
0fa8a0c575 | ||
|
f3009e2f51 | ||
|
92ca652fc9 | ||
|
fdad16840c | ||
|
075e057de5 | ||
|
aa6a9891b0 | ||
|
0d4f412ecd | ||
|
fe2a25a785 | ||
|
e6c9e2736f | ||
|
71373c6105 | ||
|
498fbecafd | ||
|
5101ce52ae | ||
|
1e3971d556 | ||
|
bb19f8cc90 | ||
|
a18dd2e48e | ||
|
eb7bd90a57 | ||
|
a2d887b6f5 | ||
|
0594484041 | ||
|
cb5a2beaff | ||
|
ad2c5440b5 | ||
|
a515a320f2 | ||
|
56399cdacf | ||
|
5e6469c088 | ||
|
679115602f | ||
|
ce1b8c8c93 | ||
|
911ecc3376 | ||
|
08420b1c95 | ||
|
fbb08f525f | ||
|
1a830501b7 | ||
|
dcae74c44a | ||
|
0349167554 | ||
|
b47f177f20 | ||
|
9ed0504592 | ||
|
1a7d601a15 | ||
|
eee88a2a23 | ||
|
c694d60364 | ||
|
0901d4ab31 | ||
|
75fa1145da | ||
|
d1cf6d1303 | ||
|
e145add0ef | ||
|
2b130c28ca | ||
|
c6afabf3b3 | ||
|
4ac62b478d | ||
|
55af290462 | ||
|
1b8e9a131c | ||
|
5e23653130 | ||
|
130ae89dab | ||
|
1d8da80dbf | ||
|
5b6f2e1c59 | ||
|
1b0edb155f | ||
|
5571ceb5ac | ||
|
5e4574526d | ||
|
9338d0a6b5 | ||
|
5f6d6f3f22 | ||
|
d3dab1f618 | ||
|
3bd1759f80 | ||
|
f8cfa7947c | ||
|
11665130f9 | ||
|
3ba45cef16 | ||
|
a836daf6c5 | ||
|
e686f468f7 | ||
|
0dd0af939f | ||
|
7b575f9813 | ||
|
acca30055a | ||
|
4076c57b50 | ||
|
68e1150357 | ||
|
b813c398bb | ||
|
69098f05cf | ||
|
6949793bb1 | ||
|
c030578fe4 | ||
|
ef7b19365e | ||
|
c3936abb67 | ||
|
78571e9049 | ||
|
d2693998a6 | ||
|
6def98ee7d | ||
|
60220a48b2 | ||
|
efe241644b | ||
|
b645c8c70e | ||
|
dda9b3eced | ||
|
8270df208b | ||
|
facfb9e1b0 | ||
|
abfed203eb | ||
|
e6ba13d3b9 | ||
|
bd9e7fca87 | ||
|
68b270b97c | ||
|
ec50d8f814 | ||
|
325e889ba3 | ||
|
1984d8064b | ||
|
2b05c146ef | ||
|
c7cd5d6726 | ||
|
1942d3a8b1 | ||
|
67ac9ab190 | ||
|
2c571d3a45 | ||
|
9da49d0b99 | ||
|
37851d8f5b | ||
|
e4e71dcf6b | ||
|
d2aa7e3b3f | ||
|
ea5f540fb6 | ||
|
834f0f19c5 | ||
|
062a45bfa4 | ||
|
cc0416d0eb | ||
|
bbd22fb5d9 | ||
|
71dec69ef5 | ||
|
e6ef2b0641 | ||
|
c0fd6556f2 | ||
|
b1798d895a | ||
|
d17c56f639 | ||
|
32206f17d0 | ||
|
b2694b459f | ||
|
10df1f55f1 | ||
|
f13b265b56 | ||
|
89c5fbacfd | ||
|
9077436e6e | ||
|
1a451ca6e0 | ||
|
cf14fa7a23 | ||
|
f6398c1f07 | ||
|
71e07f9130 | ||
|
c4dbf36951 | ||
|
077f093988 | ||
|
96582ab4ba | ||
|
16f503c048 | ||
|
8e57214487 | ||
|
b617355190 | ||
|
8dbc7420ed | ||
|
e2f226b5b4 | ||
|
5302429fff | ||
|
1f18c73c09 | ||
|
3fca5878d6 | ||
|
4d190af804 | ||
|
9ab162a73a | ||
|
fe00613d06 | ||
|
cfcf25bb54 | ||
|
efcd84e47c | ||
|
ba28377919 | ||
|
449b60fcd0 | ||
|
d3d724e45a | ||
|
d699e3de12 | ||
|
e1cae011e2 | ||
|
ea84732a77 | ||
|
33b2719488 | ||
|
075a0201b9 | ||
|
5d5083a57a | ||
|
25162d4a4e | ||
|
35913e58c2 | ||
|
12e7d99439 | ||
|
0c38d56a6d | ||
|
031ef2dc8e | ||
|
ddb60aa6d1 | ||
|
92a8618ddc | ||
|
370def6b30 | ||
|
e2e3abec71 | ||
|
0e8949a003 | ||
|
967aa3a9ef | ||
|
83bcde8f60 | ||
|
d91a85a9b5 | ||
|
e5a0bc6a50 | ||
|
dae4436d1c | ||
|
1e26b431c9 | ||
|
ce74774c09 | ||
|
645f559352 | ||
|
9c388fb119 | ||
|
a8b454a934 | ||
|
fd169c00bf | ||
|
e037bade8c | ||
|
adfac697dc | ||
|
54a58760b6 | ||
|
5787ef7e9c | ||
|
9a21cf7e55 | ||
|
abbc956ac8 | ||
|
646e7a5239 | ||
|
3e077fc866 | ||
|
fb0fca8607 | ||
|
5bd4233d7b | ||
|
2ae5cbcf05 | ||
|
3472e441c5 | ||
|
a2a4576c61 | ||
|
ac62fa7a61 | ||
|
d9fba87f5a | ||
|
ec0e20a9eb | ||
|
923e849f28 | ||
|
060997ca6b | ||
|
adcd369285 | ||
|
b6a7124855 | ||
|
7fe3abf887 | ||
|
3fec3d1f1c | ||
|
49e8f7451d | ||
|
3136dea250 | ||
|
8ddb4c4e95 | ||
|
b4c03dd633 | ||
|
65a6d9d9eb | ||
|
06f11abf43 | ||
|
da8cd68e4f | ||
|
9464d3cd68 | ||
|
50cee7c48d | ||
|
682a46189b | ||
|
e1c5cdf14d | ||
|
4cf69b995e | ||
|
419995682f | ||
|
7ae6aa420d | ||
|
4c3e2dc441 | ||
|
d98c74d38d | ||
|
cf50c5bba8 | ||
|
05933ab2d4 | ||
|
15aa73ea4c | ||
|
df569fd54c | ||
|
0fa81e50e3 | ||
|
a5282fa128 | ||
|
da96e5c27b | ||
|
fce03f9921 | ||
|
c3d8cb99a0 | ||
|
5f7621b01e | ||
|
37555cdeff | ||
|
be42af89f8 | ||
|
9266ff7893 | ||
|
d766dac3bf | ||
|
c30d52b829 | ||
|
be66788e3c | ||
|
dc044f26ea | ||
|
1b1fe4cc64 | ||
|
be28c05949 | ||
|
2f069fa3a5 | ||
|
673cf751ca | ||
|
ed9449a424 | ||
|
07b243656c | ||
|
99c1b63197 | ||
|
e25352a42a | ||
|
d523748a4f | ||
|
bbcad73a27 | ||
|
1da0c05e48 | ||
|
7696045c1d | ||
|
cfd0c55d99 | ||
|
e49237dc7d | ||
|
ef4c63acf6 | ||
|
7b91af803d | ||
|
17e04aa6c2 | ||
|
6dc7846d26 | ||
|
b3d02e7f3c | ||
|
819baeb430 | ||
|
fa8d7029a7 | ||
|
c8da7fbd25 | ||
|
99deded542 | ||
|
64a928a3d4 | ||
|
4a358d0763 | ||
|
a11079a449 | ||
|
e44c6dc109 | ||
|
59a9eda8b6 | ||
|
6abf32fd52 | ||
|
02545bf320 | ||
|
e71e7d8246 | ||
|
99799a9ab5 | ||
|
9f03b73dbd | ||
|
32685e9c2b | ||
|
d2b470142c | ||
|
8b328aa9b4 | ||
|
dc4884a9fb | ||
|
e64c635c31 | ||
|
9eec36e483 | ||
|
dfadf729d3 | ||
|
183cac25f9 | ||
|
168e23a2f5 | ||
|
35e9ee82a6 | ||
|
06cca53fa0 | ||
|
de3a04a65d | ||
|
c2b84fd0e8 | ||
|
55aadb3a8f | ||
|
eba485a3c6 | ||
|
0459d8c7a6 | ||
|
6fdd837110 | ||
|
884551acd1 | ||
|
d53f67be35 | ||
|
b9f2e88286 | ||
|
a1638563f7 | ||
|
5d74bd7ef9 | ||
|
e101935ae8 | ||
|
a365f0745d | ||
|
ff26a23314 | ||
|
4329550a74 | ||
|
d58829550e | ||
|
b116452a03 | ||
|
24181cd265 | ||
|
ae7858ff2c | ||
|
69da36f39e | ||
|
93b38b8008 | ||
|
e8e2666705 | ||
|
45f49fe5c3 | ||
|
1d91898ca6 | ||
|
79e5950b2f | ||
|
6d7d877c73 | ||
|
b8b8bcb8bf | ||
|
08573e2920 | ||
|
5d4f5db76c | ||
|
48e8f18495 | ||
|
517ebe626c | ||
|
14d561eb1c | ||
|
1da2e90b56 | ||
|
316390891c | ||
|
24395d55fc | ||
|
63f2e107b3 | ||
|
a768bf8673 | ||
|
2ebfdfd66c | ||
|
1204f3a77c | ||
|
1d795b53d3 | ||
|
7e76438537 | ||
|
bf212c5b33 | ||
|
eeeb2e941d | ||
|
05c096a1ac | ||
|
ad8327f2ce | ||
|
34418110ec | ||
|
e286096089 | ||
|
a63a02fefd | ||
|
f76cb3e6d5 | ||
|
edeff03134 | ||
|
8c8de170d2 | ||
|
1710bba5c3 | ||
|
0378afaf5f | ||
|
0c8bc0b57a | ||
|
c95330420c | ||
|
589f437b06 | ||
|
559be42fc2 | ||
|
dda2b4454f | ||
|
0ff52c285d | ||
|
09f8810272 | ||
|
f4f8feafe7 | ||
|
29db7cb98b | ||
|
cf298d3073 | ||
|
53b9d440b8 | ||
|
1e13f66fbb | ||
|
9a9ff44418 | ||
|
218af8c7bd | ||
|
a24fddc2ad | ||
|
6d9666c8a0 | ||
|
c5754a7329 | ||
|
c27f11fa2e | ||
|
09b72e0be4 | ||
|
b71e1008a5 | ||
|
0d6d5b392a | ||
|
51f390dd79 | ||
|
168132b632 | ||
|
3f4338cf04 | ||
|
3ade7ca12b | ||
|
9c49308cce | ||
|
91757722a9 | ||
|
235fa0eee8 | ||
|
08af35b250 | ||
|
ef4366ee89 | ||
|
3833f2a60b | ||
|
78f4d6b84f | ||
|
e489130717 | ||
|
34b31d0ee0 | ||
|
486953e2ff | ||
|
8306f1e31e | ||
|
879fba29d5 | ||
|
421a3aa737 | ||
|
23c7134bad | ||
|
2399b7a91b | ||
|
7cb08ca538 | ||
|
2aa5322638 | ||
|
3f7e68e894 | ||
|
4b5c535be9 | ||
|
80727e5a92 | ||
|
ff3dc8a7c4 | ||
|
b2823e4609 | ||
|
a18eea2702 | ||
|
36fc8f5809 | ||
|
ca22ec44ba | ||
|
02465c9f9d | ||
|
f9232e3f11 | ||
|
6de2bd28df | ||
|
2b10608f16 | ||
|
c416072ced | ||
|
5196dc65e7 | ||
|
8abb005598 | ||
|
44ca36c7cf | ||
|
b6190c2713 | ||
|
ca89db221f | ||
|
f3e1aff81d | ||
|
ad1e6bae4f | ||
|
b262d40daf | ||
|
8ab5e2a004 | ||
|
7b2d5556d5 | ||
|
776c2bd113 | ||
|
c67edc5d61 | ||
|
1920db0267 | ||
|
a6b8e81332 | ||
|
bc5df68698 | ||
|
3c63d7fd9b | ||
|
96af5bfbb5 | ||
|
665c5d0c5f | ||
|
9557c7be8e | ||
|
b13e995c78 | ||
|
c93da8ded9 | ||
|
cf05755e9d | ||
|
c502db4955 | ||
|
6ffb8e2b67 | ||
|
48282bea40 | ||
|
1176d6fa66 | ||
|
1242a39e8e | ||
|
3d678eb14a | ||
|
43e842cfd8 | ||
|
525e8ed3fe | ||
|
5772636dc6 | ||
|
0b12b76c28 | ||
|
40e3647f2f | ||
|
892dbfb87e | ||
|
c4d20760d4 | ||
|
768923199f | ||
|
f1a639bf53 | ||
|
f7dbee3eea | ||
|
1e0234ddc6 | ||
|
9cc467c3b3 | ||
|
04995b667b | ||
|
81fcfc67cd | ||
|
78198da34a | ||
|
16342ac1b1 | ||
|
239ac23799 | ||
|
c8ef549bf6 | ||
|
0485e53675 | ||
|
70ca64d736 | ||
|
cb9414bbb7 | ||
|
c5bfb0290e | ||
|
2d3475bac8 | ||
|
22f6cfd4df | ||
|
88301d8f6c | ||
|
5143f3a62c | ||
|
bb53ea71cb | ||
|
339261224f | ||
|
7f2d2e3cc3 | ||
|
9b836acdd0 | ||
|
f7a3fb8f5a | ||
|
d6ba2b6a68 | ||
|
78abff3e39 | ||
|
43919c2455 | ||
|
c5c73c2e1f | ||
|
7639c4bdeb | ||
|
ed20165a37 | ||
|
52cef4bbee | ||
|
278cb7aed5 | ||
|
613a32482f | ||
|
9ae801cfd1 | ||
|
df1d66e6ba | ||
|
0873c3b57d | ||
|
83e7de55aa | ||
|
fb471aab26 | ||
|
70303ded8e | ||
|
16639f549e | ||
|
34fa29e8d4 | ||
|
16d0807e7e | ||
|
75d217961e | ||
|
c243ffaa06 | ||
|
e6b49956d8 | ||
|
4593b400f9 | ||
|
b81a2581ff | ||
|
9ccba2faf1 | ||
|
55b938cb98 | ||
|
46fa5a43ff | ||
|
75c408cb6c | ||
|
23b7bdf785 | ||
|
10dd4a25ba | ||
|
eee3f67571 | ||
|
78207d5380 | ||
|
34a8dcae17 | ||
|
5c2e0c6f9b | ||
|
61e218a502 | ||
|
3dd11dd0b5 | ||
|
0d0b1e77c0 | ||
|
94d56428a6 | ||
|
52ec660936 | ||
|
30549cdb4b | ||
|
62af652a5a | ||
|
63966dec02 | ||
|
997037964e | ||
|
7a82829520 | ||
|
678121ef05 | ||
|
970fb3c1df | ||
|
04ca2e6b92 | ||
|
8dbb761fad | ||
|
0a5ea9d310 | ||
|
c94dca16fa | ||
|
cbbff33086 | ||
|
949708a745 | ||
|
b81c762493 | ||
|
002bf3806e | ||
|
337f0fc80d | ||
|
c6a4351edd | ||
|
b40359b551 | ||
|
1d0821eee2 | ||
|
37536cdfa4 | ||
|
3040f3fdbb | ||
|
ea508a8574 | ||
|
d974674126 | ||
|
31cb280682 | ||
|
de941c990e | ||
|
5da534d8db | ||
|
24144dbdc9 | ||
|
da9289fb54 | ||
|
0b274cf18f | ||
|
55fc016efc | ||
|
5bed812503 | ||
|
3e266baca4 | ||
|
191b03834a | ||
|
63e03b155f | ||
|
130c0746c1 | ||
|
624ff4fef4 | ||
|
19fa3ab213 | ||
|
ab68b5dd9a | ||
|
9ad75d26fc | ||
|
c3a7556f73 | ||
|
89e812a1e6 | ||
|
e092ff3f74 | ||
|
2b33fe3512 | ||
|
93c18c73a3 | ||
|
cad2cd71b7 | ||
|
96e086dc33 | ||
|
cddce2dfa7 | ||
|
65f964aa6b | ||
|
8fca769bd5 | ||
|
ef5dd6e46d | ||
|
8533594ad6 | ||
|
32802bc7d9 | ||
|
4bed01298c | ||
|
56ca630f27 | ||
|
a02539b3e8 | ||
|
0dc7bdc325 | ||
|
b61ee6e4af | ||
|
56784591bf | ||
|
6eeb9ec3d6 | ||
|
dd7ef76474 | ||
|
0375566412 | ||
|
3678438dd8 | ||
|
1cc7b3881d | ||
|
03b1b078f9 | ||
|
5067389c36 | ||
|
6d98ef8c69 | ||
|
d5088c1488 | ||
|
df3689f8d0 | ||
|
3fd0be03f0 | ||
|
37d9901e0f | ||
|
29fe4e58c6 | ||
|
685565ad18 | ||
|
305b2416ea | ||
|
f5b64c3ffe | ||
|
9f9dab03c1 | ||
|
c7139be62b | ||
|
b0ef7422b0 | ||
|
1fbed3ffc9 | ||
|
dd85af0e12 | ||
|
3bbf7b0d4d | ||
|
bc9183ba0e | ||
|
47517880ec | ||
|
7b0cf8b16d | ||
|
47a7f762d3 | ||
|
8ba31dccd1 | ||
|
80376f9e13 | ||
|
ee64eae903 | ||
|
ff0a0e364b | ||
|
791aa3c338 | ||
|
6e9aba883c | ||
|
2f1984c6df | ||
|
640193b2bb | ||
|
97ca6434e0 | ||
|
c364e5d1ba | ||
|
3bf3a1ae65 | ||
|
439ed140ee | ||
|
a50d77700e | ||
|
6b7330dcd4 | ||
|
8ecf5409e9 | ||
|
6efcd74c6b | ||
|
eaa83640fa | ||
|
cbdf487768 | ||
|
b0f01be33f | ||
|
80e2871d21 | ||
|
4ef8f6d323 | ||
|
56ff8ccc91 | ||
|
e01625bc70 | ||
|
fa8dd90ceb | ||
|
509a793378 | ||
|
705d9623b7 | ||
|
c687381870 | ||
|
1eadbf1bd0 | ||
|
685f13f3fd | ||
|
638cf86cbe | ||
|
d27a919cd2 | ||
|
a69cd8239f | ||
|
8a2f96096a | ||
|
b07f53d0a4 | ||
|
e61e107040 | ||
|
023166b530 | ||
|
884c9e268f | ||
|
99678a93ed | ||
|
99cd23cefd | ||
|
4d3dfd24ec | ||
|
21ae66c664 | ||
|
da6dddcd04 | ||
|
d1b0475d89 | ||
|
42757e8794 | ||
|
3452f743ab | ||
|
b9cd7b59b6 | ||
|
8f4b96f19e | ||
|
186afe3ce3 | ||
|
a0063c534a | ||
|
9b97965f22 | ||
|
e3f83e7aa7 | ||
|
44023afb7d | ||
|
29ff2800c3 | ||
|
d44a48835f | ||
|
275bf7ec03 | ||
|
de45ce73eb | ||
|
ceb773e1ff | ||
|
60013ba69b | ||
|
96df6d4d0b | ||
|
a33a82b42f | ||
|
367870a4d5 | ||
|
175013d0cb | ||
|
a6905fa2e5 | ||
|
510e79ebe9 | ||
|
a8d1b4a1ab | ||
|
88374fa982 | ||
|
049a1090c3 | ||
|
020bb75219 | ||
|
a24b9087ce | ||
|
48786ba842 | ||
|
dde48c6715 | ||
|
e7c02a0508 | ||
|
31722d3f5a | ||
|
a81278befe | ||
|
cad766f6c7 | ||
|
f0f7020b5d | ||
|
65ba452bb0 | ||
|
76d936ae76 | ||
|
d1eae89590 | ||
|
7d1414ec3e | ||
|
5fbc0a16e2 | ||
|
0678d71038 | ||
|
746dce1994 | ||
|
36f0fe6524 | ||
|
737d57bad6 | ||
|
287240a965 | ||
|
ca602fa7c6 | ||
|
36324c3bbd | ||
|
21c33eb7e3 | ||
|
feb373a216 | ||
|
d7080a7a2e | ||
|
b915ec1e7b | ||
|
2de4afdee5 | ||
|
26a35ddcd1 | ||
|
d575af39ac | ||
|
57b59f876e | ||
|
3e057d527d | ||
|
9781cceb09 | ||
|
d0f4f42bd4 | ||
|
d59fb97c5b | ||
|
e1e47d090d | ||
|
a62d9b9c21 | ||
|
a004854097 | ||
|
5925508b31 | ||
|
5051fe047c | ||
|
57a9697161 | ||
|
936432326a | ||
|
9eeb2b5ef0 | ||
|
eaa3e69d14 | ||
|
80a35e0bd4 | ||
|
cdeef06801 | ||
|
181a64a5aa | ||
|
63eecadf82 | ||
|
2b216674da | ||
|
868d87b08e | ||
|
a7e03f69be | ||
|
96daf37c83 | ||
|
3dec835d84 | ||
|
4da607559f | ||
|
8dd7bd9981 | ||
|
7cc3681ad6 | ||
|
e205cd89cd | ||
|
c56df1abf3 | ||
|
d8185417d9 | ||
|
7cb78b6259 | ||
|
79ac8f95af | ||
|
1c346f16a3 | ||
|
d347049802 | ||
|
939aa52465 | ||
|
29c50668b3 | ||
|
55c5381584 | ||
|
750e0ace06 | ||
|
29498693dd | ||
|
56e92239a6 | ||
|
11319732ab | ||
|
853816ae79 | ||
|
8f61032ec4 | ||
|
bff7e300e6 | ||
|
ff44133643 | ||
|
9fdccf6a47 | ||
|
3f4657f6db | ||
|
dcc05fcf3e | ||
|
03ce4080a4 | ||
|
61828453db | ||
|
d371b283c3 | ||
|
4784740273 | ||
|
31b0688de7 | ||
|
6896305b57 | ||
|
931c4c1023 | ||
|
6cc14f5854 | ||
|
1910607215 | ||
|
dfa1031015 | ||
|
790388a8c5 | ||
|
ea09008423 | ||
|
8d8904f02b | ||
|
2a7513a972 | ||
|
c47f2a4a1a | ||
|
526a72fd77 | ||
|
f76879dd64 | ||
|
e7a837120d | ||
|
04c51495da | ||
|
02baf07d77 | ||
|
6d0823af0a | ||
|
8493fb18ae | ||
|
e8b9a752d3 | ||
|
14bb71d508 | ||
|
2e95499142 | ||
|
8d428458a2 | ||
|
5f60a56544 | ||
|
a3b4e92d66 | ||
|
cedf201aef | ||
|
545bc6b4d8 | ||
|
620d9d3c75 | ||
|
e1b045c25e | ||
|
11e2802015 | ||
|
cb8d67505d | ||
|
7d3405b4ba | ||
|
d36c7de19e | ||
|
6605a26c75 | ||
|
ce9cabf0f0 | ||
|
dc6d1ac663 | ||
|
1fdd24579c | ||
|
3afbf83cc5 | ||
|
61a234d562 |
10277 changed files with 725259 additions and 1448855 deletions
21
.DEREK.yml
Normal file
21
.DEREK.yml
Normal file
|
@ -0,0 +1,21 @@
|
||||||
|
curators:
|
||||||
|
- aboch
|
||||||
|
- alexellis
|
||||||
|
- andrewhsu
|
||||||
|
- anonymuse
|
||||||
|
- chanwit
|
||||||
|
- ehazlett
|
||||||
|
- fntlnz
|
||||||
|
- gianarb
|
||||||
|
- kolyshkin
|
||||||
|
- mgoelzer
|
||||||
|
- olljanat
|
||||||
|
- programmerq
|
||||||
|
- rheinwein
|
||||||
|
- ripcurld0
|
||||||
|
- thajeztah
|
||||||
|
|
||||||
|
features:
|
||||||
|
- comments
|
||||||
|
- pr_description_required
|
||||||
|
|
|
@ -1,21 +0,0 @@
|
||||||
{
|
|
||||||
"name": "moby",
|
|
||||||
"build": {
|
|
||||||
"context": "..",
|
|
||||||
"dockerfile": "../Dockerfile",
|
|
||||||
"target": "devcontainer"
|
|
||||||
},
|
|
||||||
"workspaceFolder": "/go/src/github.com/docker/docker",
|
|
||||||
"workspaceMount": "source=${localWorkspaceFolder},target=/go/src/github.com/docker/docker,type=bind,consistency=cached",
|
|
||||||
|
|
||||||
"remoteUser": "root",
|
|
||||||
"runArgs": ["--privileged"],
|
|
||||||
|
|
||||||
"customizations": {
|
|
||||||
"vscode": {
|
|
||||||
"extensions": [
|
|
||||||
"golang.go"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,4 +1,6 @@
|
||||||
|
bundles
|
||||||
|
.gopath
|
||||||
|
vendor/pkg
|
||||||
|
.go-pkg-cache
|
||||||
.git
|
.git
|
||||||
bundles/
|
|
||||||
cli/winresources/**/winres.json
|
|
||||||
cli/winresources/**/*.syso
|
|
||||||
|
|
3
.gitattributes
vendored
3
.gitattributes
vendored
|
@ -1,3 +0,0 @@
|
||||||
Dockerfile* linguist-language=Dockerfile
|
|
||||||
vendor.mod linguist-language=Go-Module
|
|
||||||
vendor.sum linguist-language=Go-Checksums
|
|
5
.github/CODEOWNERS
vendored
5
.github/CODEOWNERS
vendored
|
@ -5,8 +5,11 @@
|
||||||
|
|
||||||
builder/** @tonistiigi
|
builder/** @tonistiigi
|
||||||
contrib/mkimage/** @tianon
|
contrib/mkimage/** @tianon
|
||||||
|
daemon/graphdriver/devmapper/** @rhvgoyal
|
||||||
|
daemon/graphdriver/lcow/** @johnstep @jhowardmsft
|
||||||
|
daemon/graphdriver/overlay/** @dmcgowan
|
||||||
daemon/graphdriver/overlay2/** @dmcgowan
|
daemon/graphdriver/overlay2/** @dmcgowan
|
||||||
daemon/graphdriver/windows/** @johnstep
|
daemon/graphdriver/windows/** @johnstep @jhowardmsft
|
||||||
daemon/logger/awslogs/** @samuelkarp
|
daemon/logger/awslogs/** @samuelkarp
|
||||||
hack/** @tianon
|
hack/** @tianon
|
||||||
plugin/** @cpuguy83
|
plugin/** @cpuguy83
|
||||||
|
|
70
.github/ISSUE_TEMPLATE.md
vendored
Normal file
70
.github/ISSUE_TEMPLATE.md
vendored
Normal file
|
@ -0,0 +1,70 @@
|
||||||
|
<!--
|
||||||
|
If you are reporting a new issue, make sure that we do not have any duplicates
|
||||||
|
already open. You can ensure this by searching the issue list for this
|
||||||
|
repository. If there is a duplicate, please close your issue and add a comment
|
||||||
|
to the existing issue instead.
|
||||||
|
|
||||||
|
If you suspect your issue is a bug, please edit your issue description to
|
||||||
|
include the BUG REPORT INFORMATION shown below. If you fail to provide this
|
||||||
|
information within 7 days, we cannot debug your issue and will close it. We
|
||||||
|
will, however, reopen it if you later provide the information.
|
||||||
|
|
||||||
|
For more information about reporting issues, see
|
||||||
|
https://github.com/moby/moby/blob/master/CONTRIBUTING.md#reporting-other-issues
|
||||||
|
|
||||||
|
---------------------------------------------------
|
||||||
|
GENERAL SUPPORT INFORMATION
|
||||||
|
---------------------------------------------------
|
||||||
|
|
||||||
|
The GitHub issue tracker is for bug reports and feature requests.
|
||||||
|
General support for **docker** can be found at the following locations:
|
||||||
|
|
||||||
|
- Docker Support Forums - https://forums.docker.com
|
||||||
|
- Slack - community.docker.com #general channel
|
||||||
|
- Post a question on StackOverflow, using the Docker tag
|
||||||
|
|
||||||
|
General support for **moby** can be found at the following locations:
|
||||||
|
|
||||||
|
- Moby Project Forums - https://forums.mobyproject.org
|
||||||
|
- Slack - community.docker.com #moby-project channel
|
||||||
|
- Post a question on StackOverflow, using the Moby tag
|
||||||
|
|
||||||
|
---------------------------------------------------
|
||||||
|
BUG REPORT INFORMATION
|
||||||
|
---------------------------------------------------
|
||||||
|
Use the commands below to provide key information from your environment:
|
||||||
|
You do NOT have to include this information if this is a FEATURE REQUEST
|
||||||
|
-->
|
||||||
|
|
||||||
|
**Description**
|
||||||
|
|
||||||
|
<!--
|
||||||
|
Briefly describe the problem you are having in a few paragraphs.
|
||||||
|
-->
|
||||||
|
|
||||||
|
**Steps to reproduce the issue:**
|
||||||
|
1.
|
||||||
|
2.
|
||||||
|
3.
|
||||||
|
|
||||||
|
**Describe the results you received:**
|
||||||
|
|
||||||
|
|
||||||
|
**Describe the results you expected:**
|
||||||
|
|
||||||
|
|
||||||
|
**Additional information you deem important (e.g. issue happens only occasionally):**
|
||||||
|
|
||||||
|
**Output of `docker version`:**
|
||||||
|
|
||||||
|
```
|
||||||
|
(paste your output here)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Output of `docker info`:**
|
||||||
|
|
||||||
|
```
|
||||||
|
(paste your output here)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Additional environment details (AWS, VirtualBox, physical, etc.):**
|
146
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
146
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
|
@ -1,146 +0,0 @@
|
||||||
name: Bug report
|
|
||||||
description: Create a report to help us improve
|
|
||||||
labels:
|
|
||||||
- kind/bug
|
|
||||||
- status/0-triage
|
|
||||||
body:
|
|
||||||
- type: markdown
|
|
||||||
attributes:
|
|
||||||
value: |
|
|
||||||
Thank you for taking the time to report a bug!
|
|
||||||
If this is a security issue please report it to the [Docker Security team](mailto:security@docker.com).
|
|
||||||
- type: textarea
|
|
||||||
id: description
|
|
||||||
attributes:
|
|
||||||
label: Description
|
|
||||||
description: Please give a clear and concise description of the bug
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
- type: textarea
|
|
||||||
id: repro
|
|
||||||
attributes:
|
|
||||||
label: Reproduce
|
|
||||||
description: Steps to reproduce the bug
|
|
||||||
placeholder: |
|
|
||||||
1. docker run ...
|
|
||||||
2. docker kill ...
|
|
||||||
3. docker rm ...
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
- type: textarea
|
|
||||||
id: expected
|
|
||||||
attributes:
|
|
||||||
label: Expected behavior
|
|
||||||
description: What is the expected behavior?
|
|
||||||
placeholder: |
|
|
||||||
E.g. "`docker rm` should remove the container and cleanup all associated data"
|
|
||||||
- type: textarea
|
|
||||||
id: version
|
|
||||||
attributes:
|
|
||||||
label: docker version
|
|
||||||
description: Output of `docker version`
|
|
||||||
render: bash
|
|
||||||
placeholder: |
|
|
||||||
Client:
|
|
||||||
Version: 20.10.17
|
|
||||||
API version: 1.41
|
|
||||||
Go version: go1.17.11
|
|
||||||
Git commit: 100c70180fde3601def79a59cc3e996aa553c9b9
|
|
||||||
Built: Mon Jun 6 21:36:39 UTC 2022
|
|
||||||
OS/Arch: linux/amd64
|
|
||||||
Context: default
|
|
||||||
Experimental: true
|
|
||||||
|
|
||||||
Server:
|
|
||||||
Engine:
|
|
||||||
Version: 20.10.17
|
|
||||||
API version: 1.41 (minimum version 1.12)
|
|
||||||
Go version: go1.17.11
|
|
||||||
Git commit: a89b84221c8560e7a3dee2a653353429e7628424
|
|
||||||
Built: Mon Jun 6 22:32:38 2022
|
|
||||||
OS/Arch: linux/amd64
|
|
||||||
Experimental: true
|
|
||||||
containerd:
|
|
||||||
Version: 1.6.6
|
|
||||||
GitCommit: 10c12954828e7c7c9b6e0ea9b0c02b01407d3ae1
|
|
||||||
runc:
|
|
||||||
Version: 1.1.2
|
|
||||||
GitCommit: a916309fff0f838eb94e928713dbc3c0d0ac7aa4
|
|
||||||
docker-init:
|
|
||||||
Version: 0.19.0
|
|
||||||
GitCommit:
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
- type: textarea
|
|
||||||
id: info
|
|
||||||
attributes:
|
|
||||||
label: docker info
|
|
||||||
description: Output of `docker info`
|
|
||||||
render: bash
|
|
||||||
placeholder: |
|
|
||||||
Client:
|
|
||||||
Context: default
|
|
||||||
Debug Mode: false
|
|
||||||
Plugins:
|
|
||||||
buildx: Docker Buildx (Docker Inc., 0.8.2)
|
|
||||||
compose: Docker Compose (Docker Inc., 2.6.0)
|
|
||||||
|
|
||||||
Server:
|
|
||||||
Containers: 4
|
|
||||||
Running: 2
|
|
||||||
Paused: 0
|
|
||||||
Stopped: 2
|
|
||||||
Images: 80
|
|
||||||
Server Version: 20.10.17
|
|
||||||
Storage Driver: overlay2
|
|
||||||
Backing Filesystem: xfs
|
|
||||||
Supports d_type: true
|
|
||||||
Native Overlay Diff: false
|
|
||||||
userxattr: false
|
|
||||||
Logging Driver: local
|
|
||||||
Cgroup Driver: cgroupfs
|
|
||||||
Cgroup Version: 1
|
|
||||||
Plugins:
|
|
||||||
Volume: local
|
|
||||||
Network: bridge host ipvlan macvlan null overlay
|
|
||||||
Log: awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslog
|
|
||||||
Swarm: inactive
|
|
||||||
Runtimes: runc io.containerd.runc.v2 io.containerd.runtime.v1.linux
|
|
||||||
Default Runtime: runc
|
|
||||||
Init Binary: docker-init
|
|
||||||
containerd version: 10c12954828e7c7c9b6e0ea9b0c02b01407d3ae1
|
|
||||||
runc version: a916309fff0f838eb94e928713dbc3c0d0ac7aa4
|
|
||||||
init version:
|
|
||||||
Security Options:
|
|
||||||
apparmor
|
|
||||||
seccomp
|
|
||||||
Profile: default
|
|
||||||
Kernel Version: 5.13.0-1031-azure
|
|
||||||
Operating System: Ubuntu 20.04.4 LTS
|
|
||||||
OSType: linux
|
|
||||||
Architecture: x86_64
|
|
||||||
CPUs: 4
|
|
||||||
Total Memory: 15.63GiB
|
|
||||||
Name: dev
|
|
||||||
ID: UC44:2RFL:7NQ5:GGFW:34O5:DYRE:CLOH:VLGZ:64AZ:GFXC:PY6H:SAHY
|
|
||||||
Docker Root Dir: /var/lib/docker
|
|
||||||
Debug Mode: true
|
|
||||||
File Descriptors: 46
|
|
||||||
Goroutines: 134
|
|
||||||
System Time: 2022-07-06T18:07:54.812439392Z
|
|
||||||
EventsListeners: 0
|
|
||||||
Registry: https://index.docker.io/v1/
|
|
||||||
Labels:
|
|
||||||
Experimental: true
|
|
||||||
Insecure Registries:
|
|
||||||
127.0.0.0/8
|
|
||||||
Live Restore Enabled: true
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
- type: textarea
|
|
||||||
id: additional
|
|
||||||
attributes:
|
|
||||||
label: Additional Info
|
|
||||||
description: Additional info you want to provide such as logs, system info, environment, etc.
|
|
||||||
validations:
|
|
||||||
required: false
|
|
8
.github/ISSUE_TEMPLATE/config.yml
vendored
8
.github/ISSUE_TEMPLATE/config.yml
vendored
|
@ -1,8 +0,0 @@
|
||||||
blank_issues_enabled: false
|
|
||||||
contact_links:
|
|
||||||
- name: Security and Vulnerabilities
|
|
||||||
url: https://github.com/moby/moby/blob/master/SECURITY.md
|
|
||||||
about: Please report any security issues or vulnerabilities responsibly to the Docker security team. Please do not use the public issue tracker.
|
|
||||||
- name: Questions and Discussions
|
|
||||||
url: https://github.com/moby/moby/discussions/new
|
|
||||||
about: Use Github Discussions to ask questions and/or open discussion topics.
|
|
13
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
13
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
|
@ -1,13 +0,0 @@
|
||||||
name: Feature request
|
|
||||||
description: Missing functionality? Come tell us about it!
|
|
||||||
labels:
|
|
||||||
- kind/feature
|
|
||||||
- status/0-triage
|
|
||||||
body:
|
|
||||||
- type: textarea
|
|
||||||
id: description
|
|
||||||
attributes:
|
|
||||||
label: Description
|
|
||||||
description: What is the feature you want to see?
|
|
||||||
validations:
|
|
||||||
required: true
|
|
5
.github/PULL_REQUEST_TEMPLATE.md
vendored
5
.github/PULL_REQUEST_TEMPLATE.md
vendored
|
@ -22,12 +22,9 @@ Please provide the following information:
|
||||||
**- Description for the changelog**
|
**- Description for the changelog**
|
||||||
<!--
|
<!--
|
||||||
Write a short (one line) summary that describes the changes in this
|
Write a short (one line) summary that describes the changes in this
|
||||||
pull request for inclusion in the changelog.
|
pull request for inclusion in the changelog:
|
||||||
It must be placed inside the below triple backticks section:
|
|
||||||
-->
|
-->
|
||||||
```markdown changelog
|
|
||||||
|
|
||||||
```
|
|
||||||
|
|
||||||
**- A picture of a cute animal (not mandatory but encouraged)**
|
**- A picture of a cute animal (not mandatory but encouraged)**
|
||||||
|
|
||||||
|
|
27
.github/actions/setup-runner/action.yml
vendored
27
.github/actions/setup-runner/action.yml
vendored
|
@ -1,27 +0,0 @@
|
||||||
name: 'Setup Runner'
|
|
||||||
description: 'Composite action to set up the GitHub Runner for jobs in the test.yml workflow'
|
|
||||||
|
|
||||||
runs:
|
|
||||||
using: composite
|
|
||||||
steps:
|
|
||||||
- run: |
|
|
||||||
sudo modprobe ip_vs
|
|
||||||
sudo modprobe ipv6
|
|
||||||
sudo modprobe ip6table_filter
|
|
||||||
sudo modprobe -r overlay
|
|
||||||
sudo modprobe overlay redirect_dir=off
|
|
||||||
shell: bash
|
|
||||||
- run: |
|
|
||||||
if [ ! -e /etc/docker/daemon.json ]; then
|
|
||||||
echo '{}' | sudo tee /etc/docker/daemon.json >/dev/null
|
|
||||||
fi
|
|
||||||
DOCKERD_CONFIG=$(jq '.+{"experimental":true,"live-restore":true,"ipv6":true,"fixed-cidr-v6":"2001:db8:1::/64"}' /etc/docker/daemon.json)
|
|
||||||
sudo tee /etc/docker/daemon.json <<<"$DOCKERD_CONFIG" >/dev/null
|
|
||||||
sudo service docker restart
|
|
||||||
shell: bash
|
|
||||||
- run: |
|
|
||||||
./contrib/check-config.sh || true
|
|
||||||
shell: bash
|
|
||||||
- run: |
|
|
||||||
docker info
|
|
||||||
shell: bash
|
|
14
.github/actions/setup-tracing/action.yml
vendored
14
.github/actions/setup-tracing/action.yml
vendored
|
@ -1,14 +0,0 @@
|
||||||
name: 'Setup Tracing'
|
|
||||||
description: 'Composite action to set up the tracing for test jobs'
|
|
||||||
|
|
||||||
runs:
|
|
||||||
using: composite
|
|
||||||
steps:
|
|
||||||
- run: |
|
|
||||||
set -e
|
|
||||||
# Jaeger is set up on Windows through an inline run step. If you update Jaeger here, don't forget to update
|
|
||||||
# the version set in .github/workflows/.windows.yml.
|
|
||||||
docker run -d --net=host --name jaeger -e COLLECTOR_OTLP_ENABLED=true jaegertracing/all-in-one:1.46
|
|
||||||
docker0_ip="$(ip -f inet addr show docker0 | grep -Po 'inet \K[\d.]+')"
|
|
||||||
echo "OTEL_EXPORTER_OTLP_ENDPOINT=http://${docker0_ip}:4318" >> "${GITHUB_ENV}"
|
|
||||||
shell: bash
|
|
48
.github/workflows/.dco.yml
vendored
48
.github/workflows/.dco.yml
vendored
|
@ -1,48 +0,0 @@
|
||||||
# reusable workflow
|
|
||||||
name: .dco
|
|
||||||
|
|
||||||
# TODO: hide reusable workflow from the UI. Tracked in https://github.com/community/community/discussions/12025
|
|
||||||
|
|
||||||
on:
|
|
||||||
workflow_call:
|
|
||||||
|
|
||||||
env:
|
|
||||||
ALPINE_VERSION: 3.16
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
run:
|
|
||||||
runs-on: ubuntu-20.04
|
|
||||||
steps:
|
|
||||||
-
|
|
||||||
name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
-
|
|
||||||
name: Dump context
|
|
||||||
uses: actions/github-script@v7
|
|
||||||
with:
|
|
||||||
script: |
|
|
||||||
console.log(JSON.stringify(context, null, 2));
|
|
||||||
-
|
|
||||||
name: Get base ref
|
|
||||||
id: base-ref
|
|
||||||
uses: actions/github-script@v7
|
|
||||||
with:
|
|
||||||
result-encoding: string
|
|
||||||
script: |
|
|
||||||
if (/^refs\/pull\//.test(context.ref) && context.payload?.pull_request?.base?.ref != undefined) {
|
|
||||||
return context.payload.pull_request.base.ref;
|
|
||||||
}
|
|
||||||
return context.ref.replace(/^refs\/heads\//g, '');
|
|
||||||
-
|
|
||||||
name: Validate
|
|
||||||
run: |
|
|
||||||
docker run --rm \
|
|
||||||
-v "$(pwd):/workspace" \
|
|
||||||
-e VALIDATE_REPO \
|
|
||||||
-e VALIDATE_BRANCH \
|
|
||||||
alpine:${{ env.ALPINE_VERSION }} sh -c 'apk add --no-cache -q bash git openssh-client && git config --system --add safe.directory /workspace && cd /workspace && hack/validate/dco'
|
|
||||||
env:
|
|
||||||
VALIDATE_REPO: ${{ github.server_url }}/${{ github.repository }}.git
|
|
||||||
VALIDATE_BRANCH: ${{ steps.base-ref.outputs.result }}
|
|
35
.github/workflows/.test-prepare.yml
vendored
35
.github/workflows/.test-prepare.yml
vendored
|
@ -1,35 +0,0 @@
|
||||||
# reusable workflow
|
|
||||||
name: .test-prepare
|
|
||||||
|
|
||||||
# TODO: hide reusable workflow from the UI. Tracked in https://github.com/community/community/discussions/12025
|
|
||||||
|
|
||||||
on:
|
|
||||||
workflow_call:
|
|
||||||
outputs:
|
|
||||||
matrix:
|
|
||||||
description: Test matrix
|
|
||||||
value: ${{ jobs.run.outputs.matrix }}
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
run:
|
|
||||||
runs-on: ubuntu-20.04
|
|
||||||
outputs:
|
|
||||||
matrix: ${{ steps.set.outputs.matrix }}
|
|
||||||
steps:
|
|
||||||
-
|
|
||||||
name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
-
|
|
||||||
name: Create matrix
|
|
||||||
id: set
|
|
||||||
uses: actions/github-script@v7
|
|
||||||
with:
|
|
||||||
script: |
|
|
||||||
let matrix = ['graphdriver'];
|
|
||||||
if ("${{ contains(github.event.pull_request.labels.*.name, 'containerd-integration') || github.event_name != 'pull_request' }}" == "true") {
|
|
||||||
matrix.push('snapshotter');
|
|
||||||
}
|
|
||||||
await core.group(`Set matrix`, async () => {
|
|
||||||
core.info(`matrix: ${JSON.stringify(matrix)}`);
|
|
||||||
core.setOutput('matrix', JSON.stringify(matrix));
|
|
||||||
});
|
|
445
.github/workflows/.test.yml
vendored
445
.github/workflows/.test.yml
vendored
|
@ -1,445 +0,0 @@
|
||||||
# reusable workflow
|
|
||||||
name: .test
|
|
||||||
|
|
||||||
# TODO: hide reusable workflow from the UI. Tracked in https://github.com/community/community/discussions/12025
|
|
||||||
|
|
||||||
on:
|
|
||||||
workflow_call:
|
|
||||||
inputs:
|
|
||||||
storage:
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
default: "graphdriver"
|
|
||||||
|
|
||||||
env:
|
|
||||||
GO_VERSION: "1.21.9"
|
|
||||||
GOTESTLIST_VERSION: v0.3.1
|
|
||||||
TESTSTAT_VERSION: v0.1.25
|
|
||||||
ITG_CLI_MATRIX_SIZE: 6
|
|
||||||
DOCKER_EXPERIMENTAL: 1
|
|
||||||
DOCKER_GRAPHDRIVER: ${{ inputs.storage == 'snapshotter' && 'overlayfs' || 'overlay2' }}
|
|
||||||
TEST_INTEGRATION_USE_SNAPSHOTTER: ${{ inputs.storage == 'snapshotter' && '1' || '' }}
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
unit:
|
|
||||||
runs-on: ubuntu-20.04
|
|
||||||
continue-on-error: ${{ github.event_name != 'pull_request' }}
|
|
||||||
timeout-minutes: 120
|
|
||||||
steps:
|
|
||||||
-
|
|
||||||
name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
-
|
|
||||||
name: Set up runner
|
|
||||||
uses: ./.github/actions/setup-runner
|
|
||||||
-
|
|
||||||
name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v3
|
|
||||||
-
|
|
||||||
name: Build dev image
|
|
||||||
uses: docker/bake-action@v4
|
|
||||||
with:
|
|
||||||
targets: dev
|
|
||||||
set: |
|
|
||||||
dev.cache-from=type=gha,scope=dev
|
|
||||||
-
|
|
||||||
name: Test
|
|
||||||
run: |
|
|
||||||
make -o build test-unit
|
|
||||||
-
|
|
||||||
name: Prepare reports
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
mkdir -p bundles /tmp/reports
|
|
||||||
find bundles -path '*/root/*overlay2' -prune -o -type f \( -name '*-report.json' -o -name '*.log' -o -name '*.out' -o -name '*.prof' -o -name '*-report.xml' \) -print | xargs sudo tar -czf /tmp/reports.tar.gz
|
|
||||||
tar -xzf /tmp/reports.tar.gz -C /tmp/reports
|
|
||||||
sudo chown -R $(id -u):$(id -g) /tmp/reports
|
|
||||||
tree -nh /tmp/reports
|
|
||||||
-
|
|
||||||
name: Send to Codecov
|
|
||||||
uses: codecov/codecov-action@v4
|
|
||||||
with:
|
|
||||||
directory: ./bundles
|
|
||||||
env_vars: RUNNER_OS
|
|
||||||
flags: unit
|
|
||||||
token: ${{ secrets.CODECOV_TOKEN }} # used to upload coverage reports: https://github.com/moby/buildkit/pull/4660#issue-2142122533
|
|
||||||
-
|
|
||||||
name: Upload reports
|
|
||||||
if: always()
|
|
||||||
uses: actions/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: test-reports-unit-${{ inputs.storage }}
|
|
||||||
path: /tmp/reports/*
|
|
||||||
retention-days: 1
|
|
||||||
|
|
||||||
unit-report:
|
|
||||||
runs-on: ubuntu-20.04
|
|
||||||
continue-on-error: ${{ github.event_name != 'pull_request' }}
|
|
||||||
timeout-minutes: 10
|
|
||||||
if: always()
|
|
||||||
needs:
|
|
||||||
- unit
|
|
||||||
steps:
|
|
||||||
-
|
|
||||||
name: Set up Go
|
|
||||||
uses: actions/setup-go@v5
|
|
||||||
with:
|
|
||||||
go-version: ${{ env.GO_VERSION }}
|
|
||||||
-
|
|
||||||
name: Download reports
|
|
||||||
uses: actions/download-artifact@v4
|
|
||||||
with:
|
|
||||||
name: test-reports-unit-${{ inputs.storage }}
|
|
||||||
path: /tmp/reports
|
|
||||||
-
|
|
||||||
name: Install teststat
|
|
||||||
run: |
|
|
||||||
go install github.com/vearutop/teststat@${{ env.TESTSTAT_VERSION }}
|
|
||||||
-
|
|
||||||
name: Create summary
|
|
||||||
run: |
|
|
||||||
find /tmp/reports -type f -name '*-go-test-report.json' -exec teststat -markdown {} \+ >> $GITHUB_STEP_SUMMARY
|
|
||||||
|
|
||||||
docker-py:
|
|
||||||
runs-on: ubuntu-20.04
|
|
||||||
continue-on-error: ${{ github.event_name != 'pull_request' }}
|
|
||||||
timeout-minutes: 120
|
|
||||||
steps:
|
|
||||||
-
|
|
||||||
name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
-
|
|
||||||
name: Set up runner
|
|
||||||
uses: ./.github/actions/setup-runner
|
|
||||||
-
|
|
||||||
name: Set up tracing
|
|
||||||
uses: ./.github/actions/setup-tracing
|
|
||||||
-
|
|
||||||
name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v3
|
|
||||||
-
|
|
||||||
name: Build dev image
|
|
||||||
uses: docker/bake-action@v4
|
|
||||||
with:
|
|
||||||
targets: dev
|
|
||||||
set: |
|
|
||||||
dev.cache-from=type=gha,scope=dev
|
|
||||||
-
|
|
||||||
name: Test
|
|
||||||
run: |
|
|
||||||
make -o build test-docker-py
|
|
||||||
-
|
|
||||||
name: Prepare reports
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
mkdir -p bundles /tmp/reports
|
|
||||||
find bundles -path '*/root/*overlay2' -prune -o -type f \( -name '*-report.json' -o -name '*.log' -o -name '*.out' -o -name '*.prof' -o -name '*-report.xml' \) -print | xargs sudo tar -czf /tmp/reports.tar.gz
|
|
||||||
tar -xzf /tmp/reports.tar.gz -C /tmp/reports
|
|
||||||
sudo chown -R $(id -u):$(id -g) /tmp/reports
|
|
||||||
tree -nh /tmp/reports
|
|
||||||
|
|
||||||
curl -sSLf localhost:16686/api/traces?service=integration-test-client > /tmp/reports/jaeger-trace.json
|
|
||||||
-
|
|
||||||
name: Test daemon logs
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
cat bundles/test-docker-py/docker.log
|
|
||||||
-
|
|
||||||
name: Upload reports
|
|
||||||
if: always()
|
|
||||||
uses: actions/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: test-reports-docker-py-${{ inputs.storage }}
|
|
||||||
path: /tmp/reports/*
|
|
||||||
retention-days: 1
|
|
||||||
|
|
||||||
integration-flaky:
|
|
||||||
runs-on: ubuntu-20.04
|
|
||||||
continue-on-error: ${{ github.event_name != 'pull_request' }}
|
|
||||||
timeout-minutes: 120
|
|
||||||
steps:
|
|
||||||
-
|
|
||||||
name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
-
|
|
||||||
name: Set up runner
|
|
||||||
uses: ./.github/actions/setup-runner
|
|
||||||
-
|
|
||||||
name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v3
|
|
||||||
-
|
|
||||||
name: Build dev image
|
|
||||||
uses: docker/bake-action@v4
|
|
||||||
with:
|
|
||||||
targets: dev
|
|
||||||
set: |
|
|
||||||
dev.cache-from=type=gha,scope=dev
|
|
||||||
-
|
|
||||||
name: Test
|
|
||||||
run: |
|
|
||||||
make -o build test-integration-flaky
|
|
||||||
env:
|
|
||||||
TEST_SKIP_INTEGRATION_CLI: 1
|
|
||||||
|
|
||||||
integration:
|
|
||||||
runs-on: ${{ matrix.os }}
|
|
||||||
continue-on-error: ${{ github.event_name != 'pull_request' }}
|
|
||||||
timeout-minutes: 120
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
os:
|
|
||||||
- ubuntu-20.04
|
|
||||||
- ubuntu-22.04
|
|
||||||
mode:
|
|
||||||
- ""
|
|
||||||
- rootless
|
|
||||||
- systemd
|
|
||||||
#- rootless-systemd FIXME: https://github.com/moby/moby/issues/44084
|
|
||||||
steps:
|
|
||||||
-
|
|
||||||
name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
-
|
|
||||||
name: Set up runner
|
|
||||||
uses: ./.github/actions/setup-runner
|
|
||||||
-
|
|
||||||
name: Set up tracing
|
|
||||||
uses: ./.github/actions/setup-tracing
|
|
||||||
-
|
|
||||||
name: Prepare
|
|
||||||
run: |
|
|
||||||
CACHE_DEV_SCOPE=dev
|
|
||||||
if [[ "${{ matrix.mode }}" == *"rootless"* ]]; then
|
|
||||||
echo "DOCKER_ROOTLESS=1" >> $GITHUB_ENV
|
|
||||||
fi
|
|
||||||
if [[ "${{ matrix.mode }}" == *"systemd"* ]]; then
|
|
||||||
echo "SYSTEMD=true" >> $GITHUB_ENV
|
|
||||||
CACHE_DEV_SCOPE="${CACHE_DEV_SCOPE}systemd"
|
|
||||||
fi
|
|
||||||
echo "CACHE_DEV_SCOPE=${CACHE_DEV_SCOPE}" >> $GITHUB_ENV
|
|
||||||
-
|
|
||||||
name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v3
|
|
||||||
-
|
|
||||||
name: Build dev image
|
|
||||||
uses: docker/bake-action@v4
|
|
||||||
with:
|
|
||||||
targets: dev
|
|
||||||
set: |
|
|
||||||
dev.cache-from=type=gha,scope=${{ env.CACHE_DEV_SCOPE }}
|
|
||||||
-
|
|
||||||
name: Test
|
|
||||||
run: |
|
|
||||||
make -o build test-integration
|
|
||||||
env:
|
|
||||||
TEST_SKIP_INTEGRATION_CLI: 1
|
|
||||||
TESTCOVERAGE: 1
|
|
||||||
-
|
|
||||||
name: Prepare reports
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
reportsName=${{ matrix.os }}
|
|
||||||
if [ -n "${{ matrix.mode }}" ]; then
|
|
||||||
reportsName="$reportsName-${{ matrix.mode }}"
|
|
||||||
fi
|
|
||||||
reportsPath="/tmp/reports/$reportsName"
|
|
||||||
echo "TESTREPORTS_NAME=$reportsName" >> $GITHUB_ENV
|
|
||||||
|
|
||||||
mkdir -p bundles $reportsPath
|
|
||||||
find bundles -path '*/root/*overlay2' -prune -o -type f \( -name '*-report.json' -o -name '*.log' -o -name '*.out' -o -name '*.prof' -o -name '*-report.xml' \) -print | xargs sudo tar -czf /tmp/reports.tar.gz
|
|
||||||
tar -xzf /tmp/reports.tar.gz -C $reportsPath
|
|
||||||
sudo chown -R $(id -u):$(id -g) $reportsPath
|
|
||||||
tree -nh $reportsPath
|
|
||||||
|
|
||||||
curl -sSLf localhost:16686/api/traces?service=integration-test-client > $reportsPath/jaeger-trace.json
|
|
||||||
-
|
|
||||||
name: Send to Codecov
|
|
||||||
uses: codecov/codecov-action@v4
|
|
||||||
with:
|
|
||||||
directory: ./bundles/test-integration
|
|
||||||
env_vars: RUNNER_OS
|
|
||||||
flags: integration,${{ matrix.mode }}
|
|
||||||
token: ${{ secrets.CODECOV_TOKEN }} # used to upload coverage reports: https://github.com/moby/buildkit/pull/4660#issue-2142122533
|
|
||||||
-
|
|
||||||
name: Test daemon logs
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
cat bundles/test-integration/docker.log
|
|
||||||
-
|
|
||||||
name: Upload reports
|
|
||||||
if: always()
|
|
||||||
uses: actions/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: test-reports-integration-${{ inputs.storage }}-${{ env.TESTREPORTS_NAME }}
|
|
||||||
path: /tmp/reports/*
|
|
||||||
retention-days: 1
|
|
||||||
|
|
||||||
integration-report:
|
|
||||||
runs-on: ubuntu-20.04
|
|
||||||
continue-on-error: ${{ github.event_name != 'pull_request' }}
|
|
||||||
timeout-minutes: 10
|
|
||||||
if: always()
|
|
||||||
needs:
|
|
||||||
- integration
|
|
||||||
steps:
|
|
||||||
-
|
|
||||||
name: Set up Go
|
|
||||||
uses: actions/setup-go@v5
|
|
||||||
with:
|
|
||||||
go-version: ${{ env.GO_VERSION }}
|
|
||||||
-
|
|
||||||
name: Download reports
|
|
||||||
uses: actions/download-artifact@v4
|
|
||||||
with:
|
|
||||||
path: /tmp/reports
|
|
||||||
pattern: test-reports-integration-${{ inputs.storage }}-*
|
|
||||||
merge-multiple: true
|
|
||||||
-
|
|
||||||
name: Install teststat
|
|
||||||
run: |
|
|
||||||
go install github.com/vearutop/teststat@${{ env.TESTSTAT_VERSION }}
|
|
||||||
-
|
|
||||||
name: Create summary
|
|
||||||
run: |
|
|
||||||
find /tmp/reports -type f -name '*-go-test-report.json' -exec teststat -markdown {} \+ >> $GITHUB_STEP_SUMMARY
|
|
||||||
|
|
||||||
integration-cli-prepare:
|
|
||||||
runs-on: ubuntu-20.04
|
|
||||||
continue-on-error: ${{ github.event_name != 'pull_request' }}
|
|
||||||
outputs:
|
|
||||||
matrix: ${{ steps.tests.outputs.matrix }}
|
|
||||||
steps:
|
|
||||||
-
|
|
||||||
name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
-
|
|
||||||
name: Set up Go
|
|
||||||
uses: actions/setup-go@v5
|
|
||||||
with:
|
|
||||||
go-version: ${{ env.GO_VERSION }}
|
|
||||||
-
|
|
||||||
name: Install gotestlist
|
|
||||||
run:
|
|
||||||
go install github.com/crazy-max/gotestlist/cmd/gotestlist@${{ env.GOTESTLIST_VERSION }}
|
|
||||||
-
|
|
||||||
name: Create matrix
|
|
||||||
id: tests
|
|
||||||
working-directory: ./integration-cli
|
|
||||||
run: |
|
|
||||||
# This step creates a matrix for integration-cli tests. Tests suites
|
|
||||||
# are distributed in integration-cli job through a matrix. There is
|
|
||||||
# also overrides being added to the matrix like "./..." to run
|
|
||||||
# "Test integration" step exclusively and specific tests suites that
|
|
||||||
# take a long time to run.
|
|
||||||
matrix="$(gotestlist -d ${{ env.ITG_CLI_MATRIX_SIZE }} -o "./..." -o "DockerSwarmSuite" -o "DockerNetworkSuite|DockerExternalVolumeSuite" ./...)"
|
|
||||||
echo "matrix=$matrix" >> $GITHUB_OUTPUT
|
|
||||||
-
|
|
||||||
name: Show matrix
|
|
||||||
run: |
|
|
||||||
echo ${{ steps.tests.outputs.matrix }}
|
|
||||||
|
|
||||||
integration-cli:
|
|
||||||
runs-on: ubuntu-20.04
|
|
||||||
continue-on-error: ${{ github.event_name != 'pull_request' }}
|
|
||||||
timeout-minutes: 120
|
|
||||||
needs:
|
|
||||||
- integration-cli-prepare
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
test: ${{ fromJson(needs.integration-cli-prepare.outputs.matrix) }}
|
|
||||||
steps:
|
|
||||||
-
|
|
||||||
name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
-
|
|
||||||
name: Set up runner
|
|
||||||
uses: ./.github/actions/setup-runner
|
|
||||||
-
|
|
||||||
name: Set up tracing
|
|
||||||
uses: ./.github/actions/setup-tracing
|
|
||||||
-
|
|
||||||
name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v3
|
|
||||||
-
|
|
||||||
name: Build dev image
|
|
||||||
uses: docker/bake-action@v4
|
|
||||||
with:
|
|
||||||
targets: dev
|
|
||||||
set: |
|
|
||||||
dev.cache-from=type=gha,scope=dev
|
|
||||||
-
|
|
||||||
name: Test
|
|
||||||
run: |
|
|
||||||
make -o build test-integration
|
|
||||||
env:
|
|
||||||
TEST_SKIP_INTEGRATION: 1
|
|
||||||
TESTCOVERAGE: 1
|
|
||||||
TESTFLAGS: "-test.run (${{ matrix.test }})/"
|
|
||||||
-
|
|
||||||
name: Prepare reports
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
reportsName=$(echo -n "${{ matrix.test }}" | sha256sum | cut -d " " -f 1)
|
|
||||||
reportsPath=/tmp/reports/$reportsName
|
|
||||||
echo "TESTREPORTS_NAME=$reportsName" >> $GITHUB_ENV
|
|
||||||
|
|
||||||
mkdir -p bundles $reportsPath
|
|
||||||
echo "${{ matrix.test }}" | tr -s '|' '\n' | tee -a "$reportsPath/tests.txt"
|
|
||||||
find bundles -path '*/root/*overlay2' -prune -o -type f \( -name '*-report.json' -o -name '*.log' -o -name '*.out' -o -name '*.prof' -o -name '*-report.xml' \) -print | xargs sudo tar -czf /tmp/reports.tar.gz
|
|
||||||
tar -xzf /tmp/reports.tar.gz -C $reportsPath
|
|
||||||
sudo chown -R $(id -u):$(id -g) $reportsPath
|
|
||||||
tree -nh $reportsPath
|
|
||||||
|
|
||||||
curl -sSLf localhost:16686/api/traces?service=integration-test-client > $reportsPath/jaeger-trace.json
|
|
||||||
-
|
|
||||||
name: Send to Codecov
|
|
||||||
uses: codecov/codecov-action@v4
|
|
||||||
with:
|
|
||||||
directory: ./bundles/test-integration
|
|
||||||
env_vars: RUNNER_OS
|
|
||||||
flags: integration-cli
|
|
||||||
token: ${{ secrets.CODECOV_TOKEN }} # used to upload coverage reports: https://github.com/moby/buildkit/pull/4660#issue-2142122533
|
|
||||||
-
|
|
||||||
name: Test daemon logs
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
cat bundles/test-integration/docker.log
|
|
||||||
-
|
|
||||||
name: Upload reports
|
|
||||||
if: always()
|
|
||||||
uses: actions/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: test-reports-integration-cli-${{ inputs.storage }}-${{ env.TESTREPORTS_NAME }}
|
|
||||||
path: /tmp/reports/*
|
|
||||||
retention-days: 1
|
|
||||||
|
|
||||||
integration-cli-report:
|
|
||||||
runs-on: ubuntu-20.04
|
|
||||||
continue-on-error: ${{ github.event_name != 'pull_request' }}
|
|
||||||
timeout-minutes: 10
|
|
||||||
if: always()
|
|
||||||
needs:
|
|
||||||
- integration-cli
|
|
||||||
steps:
|
|
||||||
-
|
|
||||||
name: Set up Go
|
|
||||||
uses: actions/setup-go@v5
|
|
||||||
with:
|
|
||||||
go-version: ${{ env.GO_VERSION }}
|
|
||||||
-
|
|
||||||
name: Download reports
|
|
||||||
uses: actions/download-artifact@v4
|
|
||||||
with:
|
|
||||||
path: /tmp/reports
|
|
||||||
pattern: test-reports-integration-cli-${{ inputs.storage }}-*
|
|
||||||
merge-multiple: true
|
|
||||||
-
|
|
||||||
name: Install teststat
|
|
||||||
run: |
|
|
||||||
go install github.com/vearutop/teststat@${{ env.TESTSTAT_VERSION }}
|
|
||||||
-
|
|
||||||
name: Create summary
|
|
||||||
run: |
|
|
||||||
find /tmp/reports -type f -name '*-go-test-report.json' -exec teststat -markdown {} \+ >> $GITHUB_STEP_SUMMARY
|
|
551
.github/workflows/.windows.yml
vendored
551
.github/workflows/.windows.yml
vendored
|
@ -1,551 +0,0 @@
|
||||||
# reusable workflow
|
|
||||||
name: .windows
|
|
||||||
|
|
||||||
# TODO: hide reusable workflow from the UI. Tracked in https://github.com/community/community/discussions/12025
|
|
||||||
|
|
||||||
on:
|
|
||||||
workflow_call:
|
|
||||||
inputs:
|
|
||||||
os:
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
storage:
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
default: "graphdriver"
|
|
||||||
send_coverage:
|
|
||||||
required: false
|
|
||||||
type: boolean
|
|
||||||
default: false
|
|
||||||
|
|
||||||
env:
|
|
||||||
GO_VERSION: "1.21.9"
|
|
||||||
GOTESTLIST_VERSION: v0.3.1
|
|
||||||
TESTSTAT_VERSION: v0.1.25
|
|
||||||
WINDOWS_BASE_IMAGE: mcr.microsoft.com/windows/servercore
|
|
||||||
WINDOWS_BASE_TAG_2019: ltsc2019
|
|
||||||
WINDOWS_BASE_TAG_2022: ltsc2022
|
|
||||||
TEST_IMAGE_NAME: moby:test
|
|
||||||
TEST_CTN_NAME: moby
|
|
||||||
DOCKER_BUILDKIT: 0
|
|
||||||
ITG_CLI_MATRIX_SIZE: 6
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build:
|
|
||||||
runs-on: ${{ inputs.os }}
|
|
||||||
env:
|
|
||||||
GOPATH: ${{ github.workspace }}\go
|
|
||||||
GOBIN: ${{ github.workspace }}\go\bin
|
|
||||||
BIN_OUT: ${{ github.workspace }}\out
|
|
||||||
defaults:
|
|
||||||
run:
|
|
||||||
working-directory: ${{ env.GOPATH }}/src/github.com/docker/docker
|
|
||||||
steps:
|
|
||||||
-
|
|
||||||
name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
path: ${{ env.GOPATH }}/src/github.com/docker/docker
|
|
||||||
-
|
|
||||||
name: Env
|
|
||||||
run: |
|
|
||||||
Get-ChildItem Env: | Out-String
|
|
||||||
-
|
|
||||||
name: Init
|
|
||||||
run: |
|
|
||||||
New-Item -ItemType "directory" -Path "${{ github.workspace }}\go-build"
|
|
||||||
New-Item -ItemType "directory" -Path "${{ github.workspace }}\go\pkg\mod"
|
|
||||||
If ("${{ inputs.os }}" -eq "windows-2019") {
|
|
||||||
echo "WINDOWS_BASE_IMAGE_TAG=${{ env.WINDOWS_BASE_TAG_2019 }}" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf-8 -Append
|
|
||||||
} ElseIf ("${{ inputs.os }}" -eq "windows-2022") {
|
|
||||||
echo "WINDOWS_BASE_IMAGE_TAG=${{ env.WINDOWS_BASE_TAG_2022 }}" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf-8 -Append
|
|
||||||
}
|
|
||||||
-
|
|
||||||
name: Cache
|
|
||||||
uses: actions/cache@v4
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~\AppData\Local\go-build
|
|
||||||
~\go\pkg\mod
|
|
||||||
${{ github.workspace }}\go-build
|
|
||||||
${{ env.GOPATH }}\pkg\mod
|
|
||||||
key: ${{ inputs.os }}-${{ github.job }}-${{ hashFiles('**/vendor.sum') }}
|
|
||||||
restore-keys: |
|
|
||||||
${{ inputs.os }}-${{ github.job }}-
|
|
||||||
-
|
|
||||||
name: Docker info
|
|
||||||
run: |
|
|
||||||
docker info
|
|
||||||
-
|
|
||||||
name: Build base image
|
|
||||||
run: |
|
|
||||||
& docker build `
|
|
||||||
--build-arg WINDOWS_BASE_IMAGE `
|
|
||||||
--build-arg WINDOWS_BASE_IMAGE_TAG `
|
|
||||||
--build-arg GO_VERSION `
|
|
||||||
-t ${{ env.TEST_IMAGE_NAME }} `
|
|
||||||
-f Dockerfile.windows .
|
|
||||||
-
|
|
||||||
name: Build binaries
|
|
||||||
run: |
|
|
||||||
& docker run --name ${{ env.TEST_CTN_NAME }} -e "DOCKER_GITCOMMIT=${{ github.sha }}" `
|
|
||||||
-v "${{ github.workspace }}\go-build:C:\Users\ContainerAdministrator\AppData\Local\go-build" `
|
|
||||||
-v "${{ github.workspace }}\go\pkg\mod:C:\gopath\pkg\mod" `
|
|
||||||
${{ env.TEST_IMAGE_NAME }} hack\make.ps1 -Daemon -Client
|
|
||||||
-
|
|
||||||
name: Copy artifacts
|
|
||||||
run: |
|
|
||||||
New-Item -ItemType "directory" -Path "${{ env.BIN_OUT }}"
|
|
||||||
docker cp "${{ env.TEST_CTN_NAME }}`:c`:\gopath\src\github.com\docker\docker\bundles\docker.exe" ${{ env.BIN_OUT }}\
|
|
||||||
docker cp "${{ env.TEST_CTN_NAME }}`:c`:\gopath\src\github.com\docker\docker\bundles\dockerd.exe" ${{ env.BIN_OUT }}\
|
|
||||||
docker cp "${{ env.TEST_CTN_NAME }}`:c`:\gopath\bin\gotestsum.exe" ${{ env.BIN_OUT }}\
|
|
||||||
docker cp "${{ env.TEST_CTN_NAME }}`:c`:\containerd\bin\containerd.exe" ${{ env.BIN_OUT }}\
|
|
||||||
docker cp "${{ env.TEST_CTN_NAME }}`:c`:\containerd\bin\containerd-shim-runhcs-v1.exe" ${{ env.BIN_OUT }}\
|
|
||||||
-
|
|
||||||
name: Upload artifacts
|
|
||||||
uses: actions/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: build-${{ inputs.storage }}-${{ inputs.os }}
|
|
||||||
path: ${{ env.BIN_OUT }}/*
|
|
||||||
if-no-files-found: error
|
|
||||||
retention-days: 2
|
|
||||||
|
|
||||||
unit-test:
|
|
||||||
runs-on: ${{ inputs.os }}
|
|
||||||
timeout-minutes: 120
|
|
||||||
env:
|
|
||||||
GOPATH: ${{ github.workspace }}\go
|
|
||||||
GOBIN: ${{ github.workspace }}\go\bin
|
|
||||||
defaults:
|
|
||||||
run:
|
|
||||||
working-directory: ${{ env.GOPATH }}/src/github.com/docker/docker
|
|
||||||
steps:
|
|
||||||
-
|
|
||||||
name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
path: ${{ env.GOPATH }}/src/github.com/docker/docker
|
|
||||||
-
|
|
||||||
name: Env
|
|
||||||
run: |
|
|
||||||
Get-ChildItem Env: | Out-String
|
|
||||||
-
|
|
||||||
name: Init
|
|
||||||
run: |
|
|
||||||
New-Item -ItemType "directory" -Path "${{ github.workspace }}\go-build"
|
|
||||||
New-Item -ItemType "directory" -Path "${{ github.workspace }}\go\pkg\mod"
|
|
||||||
New-Item -ItemType "directory" -Path "bundles"
|
|
||||||
If ("${{ inputs.os }}" -eq "windows-2019") {
|
|
||||||
echo "WINDOWS_BASE_IMAGE_TAG=${{ env.WINDOWS_BASE_TAG_2019 }}" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf-8 -Append
|
|
||||||
} ElseIf ("${{ inputs.os }}" -eq "windows-2022") {
|
|
||||||
echo "WINDOWS_BASE_IMAGE_TAG=${{ env.WINDOWS_BASE_TAG_2022 }}" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf-8 -Append
|
|
||||||
}
|
|
||||||
-
|
|
||||||
name: Cache
|
|
||||||
uses: actions/cache@v4
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~\AppData\Local\go-build
|
|
||||||
~\go\pkg\mod
|
|
||||||
${{ github.workspace }}\go-build
|
|
||||||
${{ env.GOPATH }}\pkg\mod
|
|
||||||
key: ${{ inputs.os }}-${{ github.job }}-${{ hashFiles('**/vendor.sum') }}
|
|
||||||
restore-keys: |
|
|
||||||
${{ inputs.os }}-${{ github.job }}-
|
|
||||||
-
|
|
||||||
name: Docker info
|
|
||||||
run: |
|
|
||||||
docker info
|
|
||||||
-
|
|
||||||
name: Build base image
|
|
||||||
run: |
|
|
||||||
& docker build `
|
|
||||||
--build-arg WINDOWS_BASE_IMAGE `
|
|
||||||
--build-arg WINDOWS_BASE_IMAGE_TAG `
|
|
||||||
--build-arg GO_VERSION `
|
|
||||||
-t ${{ env.TEST_IMAGE_NAME }} `
|
|
||||||
-f Dockerfile.windows .
|
|
||||||
-
|
|
||||||
name: Test
|
|
||||||
run: |
|
|
||||||
& docker run --name ${{ env.TEST_CTN_NAME }} -e "DOCKER_GITCOMMIT=${{ github.sha }}" `
|
|
||||||
-v "${{ github.workspace }}\go-build:C:\Users\ContainerAdministrator\AppData\Local\go-build" `
|
|
||||||
-v "${{ github.workspace }}\go\pkg\mod:C:\gopath\pkg\mod" `
|
|
||||||
-v "${{ env.GOPATH }}\src\github.com\docker\docker\bundles:C:\gopath\src\github.com\docker\docker\bundles" `
|
|
||||||
${{ env.TEST_IMAGE_NAME }} hack\make.ps1 -TestUnit
|
|
||||||
-
|
|
||||||
name: Send to Codecov
|
|
||||||
if: inputs.send_coverage
|
|
||||||
uses: codecov/codecov-action@v4
|
|
||||||
with:
|
|
||||||
working-directory: ${{ env.GOPATH }}\src\github.com\docker\docker
|
|
||||||
directory: bundles
|
|
||||||
env_vars: RUNNER_OS
|
|
||||||
flags: unit
|
|
||||||
token: ${{ secrets.CODECOV_TOKEN }} # used to upload coverage reports: https://github.com/moby/buildkit/pull/4660#issue-2142122533
|
|
||||||
-
|
|
||||||
name: Upload reports
|
|
||||||
if: always()
|
|
||||||
uses: actions/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: ${{ inputs.os }}-${{ inputs.storage }}-unit-reports
|
|
||||||
path: ${{ env.GOPATH }}\src\github.com\docker\docker\bundles\*
|
|
||||||
retention-days: 1
|
|
||||||
|
|
||||||
unit-test-report:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
if: always()
|
|
||||||
needs:
|
|
||||||
- unit-test
|
|
||||||
steps:
|
|
||||||
-
|
|
||||||
name: Set up Go
|
|
||||||
uses: actions/setup-go@v5
|
|
||||||
with:
|
|
||||||
go-version: ${{ env.GO_VERSION }}
|
|
||||||
-
|
|
||||||
name: Download artifacts
|
|
||||||
uses: actions/download-artifact@v4
|
|
||||||
with:
|
|
||||||
name: ${{ inputs.os }}-${{ inputs.storage }}-unit-reports
|
|
||||||
path: /tmp/artifacts
|
|
||||||
-
|
|
||||||
name: Install teststat
|
|
||||||
run: |
|
|
||||||
go install github.com/vearutop/teststat@${{ env.TESTSTAT_VERSION }}
|
|
||||||
-
|
|
||||||
name: Create summary
|
|
||||||
run: |
|
|
||||||
find /tmp/artifacts -type f -name '*-go-test-report.json' -exec teststat -markdown {} \+ >> $GITHUB_STEP_SUMMARY
|
|
||||||
|
|
||||||
integration-test-prepare:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
outputs:
|
|
||||||
matrix: ${{ steps.tests.outputs.matrix }}
|
|
||||||
steps:
|
|
||||||
-
|
|
||||||
name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
-
|
|
||||||
name: Set up Go
|
|
||||||
uses: actions/setup-go@v5
|
|
||||||
with:
|
|
||||||
go-version: ${{ env.GO_VERSION }}
|
|
||||||
-
|
|
||||||
name: Install gotestlist
|
|
||||||
run:
|
|
||||||
go install github.com/crazy-max/gotestlist/cmd/gotestlist@${{ env.GOTESTLIST_VERSION }}
|
|
||||||
-
|
|
||||||
name: Create matrix
|
|
||||||
id: tests
|
|
||||||
working-directory: ./integration-cli
|
|
||||||
run: |
|
|
||||||
# This step creates a matrix for integration-cli tests. Tests suites
|
|
||||||
# are distributed in integration-test job through a matrix. There is
|
|
||||||
# also an override being added to the matrix like "./..." to run
|
|
||||||
# "Test integration" step exclusively.
|
|
||||||
matrix="$(gotestlist -d ${{ env.ITG_CLI_MATRIX_SIZE }} -o "./..." ./...)"
|
|
||||||
echo "matrix=$matrix" >> $GITHUB_OUTPUT
|
|
||||||
-
|
|
||||||
name: Show matrix
|
|
||||||
run: |
|
|
||||||
echo ${{ steps.tests.outputs.matrix }}
|
|
||||||
|
|
||||||
integration-test:
|
|
||||||
runs-on: ${{ inputs.os }}
|
|
||||||
continue-on-error: ${{ inputs.storage == 'snapshotter' && github.event_name != 'pull_request' }}
|
|
||||||
timeout-minutes: 120
|
|
||||||
needs:
|
|
||||||
- build
|
|
||||||
- integration-test-prepare
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
storage:
|
|
||||||
- ${{ inputs.storage }}
|
|
||||||
runtime:
|
|
||||||
- builtin
|
|
||||||
- containerd
|
|
||||||
test: ${{ fromJson(needs.integration-test-prepare.outputs.matrix) }}
|
|
||||||
exclude:
|
|
||||||
- storage: snapshotter
|
|
||||||
runtime: builtin
|
|
||||||
env:
|
|
||||||
GOPATH: ${{ github.workspace }}\go
|
|
||||||
GOBIN: ${{ github.workspace }}\go\bin
|
|
||||||
BIN_OUT: ${{ github.workspace }}\out
|
|
||||||
defaults:
|
|
||||||
run:
|
|
||||||
working-directory: ${{ env.GOPATH }}/src/github.com/docker/docker
|
|
||||||
steps:
|
|
||||||
-
|
|
||||||
name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
path: ${{ env.GOPATH }}/src/github.com/docker/docker
|
|
||||||
-
|
|
||||||
name: Set up Jaeger
|
|
||||||
run: |
|
|
||||||
# Jaeger is set up on Linux through the setup-tracing action. If you update Jaeger here, don't forget to
|
|
||||||
# update the version set in .github/actions/setup-tracing/action.yml.
|
|
||||||
Invoke-WebRequest -Uri "https://github.com/jaegertracing/jaeger/releases/download/v1.46.0/jaeger-1.46.0-windows-amd64.tar.gz" -OutFile ".\jaeger-1.46.0-windows-amd64.tar.gz"
|
|
||||||
tar -zxvf ".\jaeger-1.46.0-windows-amd64.tar.gz"
|
|
||||||
Start-Process '.\jaeger-1.46.0-windows-amd64\jaeger-all-in-one.exe'
|
|
||||||
echo "OTEL_EXPORTER_OTLP_ENDPOINT=http://127.0.0.1:4318" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf-8 -Append
|
|
||||||
shell: pwsh
|
|
||||||
-
|
|
||||||
name: Env
|
|
||||||
run: |
|
|
||||||
Get-ChildItem Env: | Out-String
|
|
||||||
-
|
|
||||||
name: Download artifacts
|
|
||||||
uses: actions/download-artifact@v4
|
|
||||||
with:
|
|
||||||
name: build-${{ inputs.storage }}-${{ inputs.os }}
|
|
||||||
path: ${{ env.BIN_OUT }}
|
|
||||||
-
|
|
||||||
name: Init
|
|
||||||
run: |
|
|
||||||
New-Item -ItemType "directory" -Path "bundles"
|
|
||||||
If ("${{ inputs.os }}" -eq "windows-2019") {
|
|
||||||
echo "WINDOWS_BASE_IMAGE_TAG=${{ env.WINDOWS_BASE_TAG_2019 }}" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf-8 -Append
|
|
||||||
} ElseIf ("${{ inputs.os }}" -eq "windows-2022") {
|
|
||||||
echo "WINDOWS_BASE_IMAGE_TAG=${{ env.WINDOWS_BASE_TAG_2022 }}" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf-8 -Append
|
|
||||||
}
|
|
||||||
Write-Output "${{ env.BIN_OUT }}" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
|
|
||||||
|
|
||||||
$testName = ([System.BitConverter]::ToString((New-Object System.Security.Cryptography.SHA256Managed).ComputeHash([System.Text.Encoding]::UTF8.GetBytes("${{ matrix.test }}"))) -replace '-').ToLower()
|
|
||||||
echo "TESTREPORTS_NAME=$testName" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf-8 -Append
|
|
||||||
-
|
|
||||||
# removes docker service that is currently installed on the runner. we
|
|
||||||
# could use Uninstall-Package but not yet available on Windows runners.
|
|
||||||
# more info: https://github.com/actions/virtual-environments/blob/d3a5bad25f3b4326c5666bab0011ac7f1beec95e/images/win/scripts/Installers/Install-Docker.ps1#L11
|
|
||||||
name: Removing current daemon
|
|
||||||
run: |
|
|
||||||
if (Get-Service docker -ErrorAction SilentlyContinue) {
|
|
||||||
$dockerVersion = (docker version -f "{{.Server.Version}}")
|
|
||||||
Write-Host "Current installed Docker version: $dockerVersion"
|
|
||||||
# remove service
|
|
||||||
Stop-Service -Force -Name docker
|
|
||||||
Remove-Service -Name docker
|
|
||||||
# removes event log entry. we could use "Remove-EventLog -LogName -Source docker"
|
|
||||||
# but this cmd is not available atm
|
|
||||||
$ErrorActionPreference = "SilentlyContinue"
|
|
||||||
& reg delete "HKLM\SYSTEM\CurrentControlSet\Services\EventLog\Application\docker" /f 2>&1 | Out-Null
|
|
||||||
$ErrorActionPreference = "Stop"
|
|
||||||
Write-Host "Service removed"
|
|
||||||
}
|
|
||||||
-
|
|
||||||
name: Starting containerd
|
|
||||||
if: matrix.runtime == 'containerd'
|
|
||||||
run: |
|
|
||||||
Write-Host "Generating config"
|
|
||||||
& "${{ env.BIN_OUT }}\containerd.exe" config default | Out-File "$env:TEMP\ctn.toml" -Encoding ascii
|
|
||||||
Write-Host "Creating service"
|
|
||||||
New-Item -ItemType Directory "$env:TEMP\ctn-root" -ErrorAction SilentlyContinue | Out-Null
|
|
||||||
New-Item -ItemType Directory "$env:TEMP\ctn-state" -ErrorAction SilentlyContinue | Out-Null
|
|
||||||
Start-Process -Wait "${{ env.BIN_OUT }}\containerd.exe" `
|
|
||||||
-ArgumentList "--log-level=debug", `
|
|
||||||
"--config=$env:TEMP\ctn.toml", `
|
|
||||||
"--address=\\.\pipe\containerd-containerd", `
|
|
||||||
"--root=$env:TEMP\ctn-root", `
|
|
||||||
"--state=$env:TEMP\ctn-state", `
|
|
||||||
"--log-file=$env:TEMP\ctn.log", `
|
|
||||||
"--register-service"
|
|
||||||
Write-Host "Starting service"
|
|
||||||
Start-Service -Name containerd
|
|
||||||
Start-Sleep -Seconds 5
|
|
||||||
Write-Host "Service started successfully!"
|
|
||||||
-
|
|
||||||
name: Starting test daemon
|
|
||||||
run: |
|
|
||||||
Write-Host "Creating service"
|
|
||||||
If ("${{ matrix.runtime }}" -eq "containerd") {
|
|
||||||
$runtimeArg="--containerd=\\.\pipe\containerd-containerd"
|
|
||||||
echo "DOCKER_WINDOWS_CONTAINERD_RUNTIME=1" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf-8 -Append
|
|
||||||
}
|
|
||||||
New-Item -ItemType Directory "$env:TEMP\moby-root" -ErrorAction SilentlyContinue | Out-Null
|
|
||||||
New-Item -ItemType Directory "$env:TEMP\moby-exec" -ErrorAction SilentlyContinue | Out-Null
|
|
||||||
Start-Process -Wait -NoNewWindow "${{ env.BIN_OUT }}\dockerd" `
|
|
||||||
-ArgumentList $runtimeArg, "--debug", `
|
|
||||||
"--host=npipe:////./pipe/docker_engine", `
|
|
||||||
"--data-root=$env:TEMP\moby-root", `
|
|
||||||
"--exec-root=$env:TEMP\moby-exec", `
|
|
||||||
"--pidfile=$env:TEMP\docker.pid", `
|
|
||||||
"--register-service"
|
|
||||||
If ("${{ inputs.storage }}" -eq "snapshotter") {
|
|
||||||
# Make the env-var visible to the service-managed dockerd, as there's no CLI flag for this option.
|
|
||||||
& reg add "HKLM\SYSTEM\CurrentControlSet\Services\docker" /v Environment /t REG_MULTI_SZ /s '@' /d TEST_INTEGRATION_USE_SNAPSHOTTER=1
|
|
||||||
echo "TEST_INTEGRATION_USE_SNAPSHOTTER=1" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf-8 -Append
|
|
||||||
}
|
|
||||||
Write-Host "Starting service"
|
|
||||||
Start-Service -Name docker
|
|
||||||
Write-Host "Service started successfully!"
|
|
||||||
-
|
|
||||||
name: Waiting for test daemon to start
|
|
||||||
run: |
|
|
||||||
$tries=20
|
|
||||||
Write-Host "Waiting for the test daemon to start..."
|
|
||||||
While ($true) {
|
|
||||||
$ErrorActionPreference = "SilentlyContinue"
|
|
||||||
& "${{ env.BIN_OUT }}\docker" version
|
|
||||||
$ErrorActionPreference = "Stop"
|
|
||||||
If ($LastExitCode -eq 0) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
$tries--
|
|
||||||
If ($tries -le 0) {
|
|
||||||
Throw "Failed to get a response from the daemon"
|
|
||||||
}
|
|
||||||
Write-Host -NoNewline "."
|
|
||||||
Start-Sleep -Seconds 1
|
|
||||||
}
|
|
||||||
Write-Host "Test daemon started and replied!"
|
|
||||||
env:
|
|
||||||
DOCKER_HOST: npipe:////./pipe/docker_engine
|
|
||||||
-
|
|
||||||
name: Docker info
|
|
||||||
run: |
|
|
||||||
& "${{ env.BIN_OUT }}\docker" info
|
|
||||||
env:
|
|
||||||
DOCKER_HOST: npipe:////./pipe/docker_engine
|
|
||||||
-
|
|
||||||
name: Building contrib/busybox
|
|
||||||
run: |
|
|
||||||
& "${{ env.BIN_OUT }}\docker" build -t busybox `
|
|
||||||
--build-arg WINDOWS_BASE_IMAGE `
|
|
||||||
--build-arg WINDOWS_BASE_IMAGE_TAG `
|
|
||||||
.\contrib\busybox\
|
|
||||||
env:
|
|
||||||
DOCKER_HOST: npipe:////./pipe/docker_engine
|
|
||||||
-
|
|
||||||
name: List images
|
|
||||||
run: |
|
|
||||||
& "${{ env.BIN_OUT }}\docker" images
|
|
||||||
env:
|
|
||||||
DOCKER_HOST: npipe:////./pipe/docker_engine
|
|
||||||
-
|
|
||||||
name: Set up Go
|
|
||||||
uses: actions/setup-go@v5
|
|
||||||
with:
|
|
||||||
go-version: ${{ env.GO_VERSION }}
|
|
||||||
-
|
|
||||||
name: Test integration
|
|
||||||
if: matrix.test == './...'
|
|
||||||
run: |
|
|
||||||
.\hack\make.ps1 -TestIntegration
|
|
||||||
env:
|
|
||||||
DOCKER_HOST: npipe:////./pipe/docker_engine
|
|
||||||
GO111MODULE: "off"
|
|
||||||
TEST_CLIENT_BINARY: ${{ env.BIN_OUT }}\docker
|
|
||||||
-
|
|
||||||
name: Test integration-cli
|
|
||||||
if: matrix.test != './...'
|
|
||||||
run: |
|
|
||||||
.\hack\make.ps1 -TestIntegrationCli
|
|
||||||
env:
|
|
||||||
DOCKER_HOST: npipe:////./pipe/docker_engine
|
|
||||||
GO111MODULE: "off"
|
|
||||||
TEST_CLIENT_BINARY: ${{ env.BIN_OUT }}\docker
|
|
||||||
INTEGRATION_TESTRUN: ${{ matrix.test }}
|
|
||||||
-
|
|
||||||
name: Send to Codecov
|
|
||||||
if: inputs.send_coverage
|
|
||||||
uses: codecov/codecov-action@v4
|
|
||||||
with:
|
|
||||||
working-directory: ${{ env.GOPATH }}\src\github.com\docker\docker
|
|
||||||
directory: bundles
|
|
||||||
env_vars: RUNNER_OS
|
|
||||||
flags: integration,${{ matrix.runtime }}
|
|
||||||
token: ${{ secrets.CODECOV_TOKEN }} # used to upload coverage reports: https://github.com/moby/buildkit/pull/4660#issue-2142122533
|
|
||||||
-
|
|
||||||
name: Docker info
|
|
||||||
run: |
|
|
||||||
& "${{ env.BIN_OUT }}\docker" info
|
|
||||||
env:
|
|
||||||
DOCKER_HOST: npipe:////./pipe/docker_engine
|
|
||||||
-
|
|
||||||
name: Stop containerd
|
|
||||||
if: always() && matrix.runtime == 'containerd'
|
|
||||||
run: |
|
|
||||||
$ErrorActionPreference = "SilentlyContinue"
|
|
||||||
Stop-Service -Force -Name containerd
|
|
||||||
$ErrorActionPreference = "Stop"
|
|
||||||
-
|
|
||||||
name: Containerd logs
|
|
||||||
if: always() && matrix.runtime == 'containerd'
|
|
||||||
run: |
|
|
||||||
Copy-Item "$env:TEMP\ctn.log" -Destination ".\bundles\containerd.log"
|
|
||||||
Get-Content "$env:TEMP\ctn.log" | Out-Host
|
|
||||||
-
|
|
||||||
name: Stop daemon
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
$ErrorActionPreference = "SilentlyContinue"
|
|
||||||
Stop-Service -Force -Name docker
|
|
||||||
$ErrorActionPreference = "Stop"
|
|
||||||
-
|
|
||||||
# as the daemon is registered as a service we have to check the event
|
|
||||||
# logs against the docker provider.
|
|
||||||
name: Daemon event logs
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
Get-WinEvent -ea SilentlyContinue `
|
|
||||||
-FilterHashtable @{ProviderName= "docker"; LogName = "application"} |
|
|
||||||
Sort-Object @{Expression="TimeCreated";Descending=$false} |
|
|
||||||
ForEach-Object {"$($_.TimeCreated.ToUniversalTime().ToString("o")) [$($_.LevelDisplayName)] $($_.Message)"} |
|
|
||||||
Tee-Object -file ".\bundles\daemon.log"
|
|
||||||
-
|
|
||||||
name: Download Jaeger traces
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
Invoke-WebRequest `
|
|
||||||
-Uri "http://127.0.0.1:16686/api/traces?service=integration-test-client" `
|
|
||||||
-OutFile ".\bundles\jaeger-trace.json"
|
|
||||||
-
|
|
||||||
name: Upload reports
|
|
||||||
if: always()
|
|
||||||
uses: actions/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: ${{ inputs.os }}-${{ inputs.storage }}-integration-reports-${{ matrix.runtime }}-${{ env.TESTREPORTS_NAME }}
|
|
||||||
path: ${{ env.GOPATH }}\src\github.com\docker\docker\bundles\*
|
|
||||||
retention-days: 1
|
|
||||||
|
|
||||||
integration-test-report:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
continue-on-error: ${{ inputs.storage == 'snapshotter' && github.event_name != 'pull_request' }}
|
|
||||||
if: always()
|
|
||||||
needs:
|
|
||||||
- integration-test
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
storage:
|
|
||||||
- ${{ inputs.storage }}
|
|
||||||
runtime:
|
|
||||||
- builtin
|
|
||||||
- containerd
|
|
||||||
exclude:
|
|
||||||
- storage: snapshotter
|
|
||||||
runtime: builtin
|
|
||||||
steps:
|
|
||||||
-
|
|
||||||
name: Set up Go
|
|
||||||
uses: actions/setup-go@v5
|
|
||||||
with:
|
|
||||||
go-version: ${{ env.GO_VERSION }}
|
|
||||||
-
|
|
||||||
name: Download reports
|
|
||||||
uses: actions/download-artifact@v4
|
|
||||||
with:
|
|
||||||
path: /tmp/reports
|
|
||||||
pattern: ${{ inputs.os }}-${{ inputs.storage }}-integration-reports-${{ matrix.runtime }}-*
|
|
||||||
merge-multiple: true
|
|
||||||
-
|
|
||||||
name: Install teststat
|
|
||||||
run: |
|
|
||||||
go install github.com/vearutop/teststat@${{ env.TESTSTAT_VERSION }}
|
|
||||||
-
|
|
||||||
name: Create summary
|
|
||||||
run: |
|
|
||||||
find /tmp/reports -type f -name '*-go-test-report.json' -exec teststat -markdown {} \+ >> $GITHUB_STEP_SUMMARY
|
|
191
.github/workflows/bin-image.yml
vendored
191
.github/workflows/bin-image.yml
vendored
|
@ -1,191 +0,0 @@
|
||||||
name: bin-image
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: ${{ github.workflow }}-${{ github.ref }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
on:
|
|
||||||
workflow_dispatch:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- 'master'
|
|
||||||
- '[0-9]+.[0-9]+'
|
|
||||||
tags:
|
|
||||||
- 'v*'
|
|
||||||
pull_request:
|
|
||||||
|
|
||||||
env:
|
|
||||||
MOBYBIN_REPO_SLUG: moby/moby-bin
|
|
||||||
DOCKER_GITCOMMIT: ${{ github.sha }}
|
|
||||||
VERSION: ${{ github.ref }}
|
|
||||||
PLATFORM: Moby Engine - Nightly
|
|
||||||
PRODUCT: moby-bin
|
|
||||||
PACKAGER_NAME: The Moby Project
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
validate-dco:
|
|
||||||
if: ${{ !startsWith(github.ref, 'refs/tags/v') }}
|
|
||||||
uses: ./.github/workflows/.dco.yml
|
|
||||||
|
|
||||||
prepare:
|
|
||||||
runs-on: ubuntu-20.04
|
|
||||||
outputs:
|
|
||||||
platforms: ${{ steps.platforms.outputs.matrix }}
|
|
||||||
steps:
|
|
||||||
-
|
|
||||||
name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
-
|
|
||||||
name: Docker meta
|
|
||||||
id: meta
|
|
||||||
uses: docker/metadata-action@v5
|
|
||||||
with:
|
|
||||||
images: |
|
|
||||||
${{ env.MOBYBIN_REPO_SLUG }}
|
|
||||||
### versioning strategy
|
|
||||||
## push semver tag v23.0.0
|
|
||||||
# moby/moby-bin:23.0.0
|
|
||||||
# moby/moby-bin:latest
|
|
||||||
## push semver prelease tag v23.0.0-beta.1
|
|
||||||
# moby/moby-bin:23.0.0-beta.1
|
|
||||||
## push on master
|
|
||||||
# moby/moby-bin:master
|
|
||||||
## push on 23.0 branch
|
|
||||||
# moby/moby-bin:23.0
|
|
||||||
## any push
|
|
||||||
# moby/moby-bin:sha-ad132f5
|
|
||||||
tags: |
|
|
||||||
type=semver,pattern={{version}}
|
|
||||||
type=ref,event=branch
|
|
||||||
type=ref,event=pr
|
|
||||||
type=sha
|
|
||||||
-
|
|
||||||
name: Rename meta bake definition file
|
|
||||||
# see https://github.com/docker/metadata-action/issues/381#issuecomment-1918607161
|
|
||||||
run: |
|
|
||||||
bakeFile="${{ steps.meta.outputs.bake-file }}"
|
|
||||||
mv "${bakeFile#cwd://}" "/tmp/bake-meta.json"
|
|
||||||
-
|
|
||||||
name: Upload meta bake definition
|
|
||||||
uses: actions/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: bake-meta
|
|
||||||
path: /tmp/bake-meta.json
|
|
||||||
if-no-files-found: error
|
|
||||||
retention-days: 1
|
|
||||||
-
|
|
||||||
name: Create platforms matrix
|
|
||||||
id: platforms
|
|
||||||
run: |
|
|
||||||
echo "matrix=$(docker buildx bake bin-image-cross --print | jq -cr '.target."bin-image-cross".platforms')" >>${GITHUB_OUTPUT}
|
|
||||||
|
|
||||||
build:
|
|
||||||
runs-on: ubuntu-20.04
|
|
||||||
needs:
|
|
||||||
- validate-dco
|
|
||||||
- prepare
|
|
||||||
if: always() && !contains(needs.*.result, 'failure') && !contains(needs.*.result, 'cancelled')
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
platform: ${{ fromJson(needs.prepare.outputs.platforms) }}
|
|
||||||
steps:
|
|
||||||
-
|
|
||||||
name: Prepare
|
|
||||||
run: |
|
|
||||||
platform=${{ matrix.platform }}
|
|
||||||
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
|
|
||||||
-
|
|
||||||
name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
-
|
|
||||||
name: Download meta bake definition
|
|
||||||
uses: actions/download-artifact@v4
|
|
||||||
with:
|
|
||||||
name: bake-meta
|
|
||||||
path: /tmp
|
|
||||||
-
|
|
||||||
name: Set up QEMU
|
|
||||||
uses: docker/setup-qemu-action@v3
|
|
||||||
-
|
|
||||||
name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v3
|
|
||||||
-
|
|
||||||
name: Login to Docker Hub
|
|
||||||
if: github.event_name != 'pull_request' && github.repository == 'moby/moby'
|
|
||||||
uses: docker/login-action@v3
|
|
||||||
with:
|
|
||||||
username: ${{ secrets.DOCKERHUB_MOBYBIN_USERNAME }}
|
|
||||||
password: ${{ secrets.DOCKERHUB_MOBYBIN_TOKEN }}
|
|
||||||
-
|
|
||||||
name: Build
|
|
||||||
id: bake
|
|
||||||
uses: docker/bake-action@v4
|
|
||||||
with:
|
|
||||||
files: |
|
|
||||||
./docker-bake.hcl
|
|
||||||
/tmp/bake-meta.json
|
|
||||||
targets: bin-image
|
|
||||||
set: |
|
|
||||||
*.platform=${{ matrix.platform }}
|
|
||||||
*.output=type=image,name=${{ env.MOBYBIN_REPO_SLUG }},push-by-digest=true,name-canonical=true,push=${{ github.event_name != 'pull_request' && github.repository == 'moby/moby' }}
|
|
||||||
*.tags=
|
|
||||||
-
|
|
||||||
name: Export digest
|
|
||||||
if: github.event_name != 'pull_request' && github.repository == 'moby/moby'
|
|
||||||
run: |
|
|
||||||
mkdir -p /tmp/digests
|
|
||||||
digest="${{ fromJSON(steps.bake.outputs.metadata)['bin-image']['containerimage.digest'] }}"
|
|
||||||
touch "/tmp/digests/${digest#sha256:}"
|
|
||||||
-
|
|
||||||
name: Upload digest
|
|
||||||
if: github.event_name != 'pull_request' && github.repository == 'moby/moby'
|
|
||||||
uses: actions/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: digests-${{ env.PLATFORM_PAIR }}
|
|
||||||
path: /tmp/digests/*
|
|
||||||
if-no-files-found: error
|
|
||||||
retention-days: 1
|
|
||||||
|
|
||||||
merge:
|
|
||||||
runs-on: ubuntu-20.04
|
|
||||||
needs:
|
|
||||||
- build
|
|
||||||
if: always() && !contains(needs.*.result, 'failure') && !contains(needs.*.result, 'cancelled') && github.event_name != 'pull_request' && github.repository == 'moby/moby'
|
|
||||||
steps:
|
|
||||||
-
|
|
||||||
name: Download meta bake definition
|
|
||||||
uses: actions/download-artifact@v4
|
|
||||||
with:
|
|
||||||
name: bake-meta
|
|
||||||
path: /tmp
|
|
||||||
-
|
|
||||||
name: Download digests
|
|
||||||
uses: actions/download-artifact@v4
|
|
||||||
with:
|
|
||||||
path: /tmp/digests
|
|
||||||
pattern: digests-*
|
|
||||||
merge-multiple: true
|
|
||||||
-
|
|
||||||
name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v3
|
|
||||||
-
|
|
||||||
name: Login to Docker Hub
|
|
||||||
uses: docker/login-action@v3
|
|
||||||
with:
|
|
||||||
username: ${{ secrets.DOCKERHUB_MOBYBIN_USERNAME }}
|
|
||||||
password: ${{ secrets.DOCKERHUB_MOBYBIN_TOKEN }}
|
|
||||||
-
|
|
||||||
name: Create manifest list and push
|
|
||||||
working-directory: /tmp/digests
|
|
||||||
run: |
|
|
||||||
set -x
|
|
||||||
docker buildx imagetools create $(jq -cr '.target."docker-metadata-action".tags | map("-t " + .) | join(" ")' /tmp/bake-meta.json) \
|
|
||||||
$(printf '${{ env.MOBYBIN_REPO_SLUG }}@sha256:%s ' *)
|
|
||||||
-
|
|
||||||
name: Inspect image
|
|
||||||
run: |
|
|
||||||
set -x
|
|
||||||
docker buildx imagetools inspect ${{ env.MOBYBIN_REPO_SLUG }}:$(jq -cr '.target."docker-metadata-action".args.DOCKER_META_VERSION' /tmp/bake-meta.json)
|
|
139
.github/workflows/buildkit.yml
vendored
139
.github/workflows/buildkit.yml
vendored
|
@ -1,139 +0,0 @@
|
||||||
name: buildkit
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: ${{ github.workflow }}-${{ github.ref }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
on:
|
|
||||||
workflow_dispatch:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- 'master'
|
|
||||||
- '[0-9]+.[0-9]+'
|
|
||||||
pull_request:
|
|
||||||
|
|
||||||
env:
|
|
||||||
GO_VERSION: "1.21.9"
|
|
||||||
DESTDIR: ./build
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
validate-dco:
|
|
||||||
uses: ./.github/workflows/.dco.yml
|
|
||||||
|
|
||||||
build:
|
|
||||||
runs-on: ubuntu-20.04
|
|
||||||
needs:
|
|
||||||
- validate-dco
|
|
||||||
steps:
|
|
||||||
-
|
|
||||||
name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
-
|
|
||||||
name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v3
|
|
||||||
-
|
|
||||||
name: Build
|
|
||||||
uses: docker/bake-action@v4
|
|
||||||
with:
|
|
||||||
targets: binary
|
|
||||||
-
|
|
||||||
name: Upload artifacts
|
|
||||||
uses: actions/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: binary
|
|
||||||
path: ${{ env.DESTDIR }}
|
|
||||||
if-no-files-found: error
|
|
||||||
retention-days: 1
|
|
||||||
|
|
||||||
test:
|
|
||||||
runs-on: ubuntu-20.04
|
|
||||||
timeout-minutes: 120
|
|
||||||
needs:
|
|
||||||
- build
|
|
||||||
env:
|
|
||||||
TEST_IMAGE_BUILD: "0"
|
|
||||||
TEST_IMAGE_ID: "buildkit-tests"
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
worker:
|
|
||||||
- dockerd
|
|
||||||
- dockerd-containerd
|
|
||||||
pkg:
|
|
||||||
- client
|
|
||||||
- cmd/buildctl
|
|
||||||
- solver
|
|
||||||
- frontend
|
|
||||||
- frontend/dockerfile
|
|
||||||
typ:
|
|
||||||
- integration
|
|
||||||
steps:
|
|
||||||
-
|
|
||||||
name: Prepare
|
|
||||||
run: |
|
|
||||||
disabledFeatures="cache_backend_azblob,cache_backend_s3"
|
|
||||||
if [ "${{ matrix.worker }}" = "dockerd" ]; then
|
|
||||||
disabledFeatures="${disabledFeatures},merge_diff"
|
|
||||||
fi
|
|
||||||
echo "BUILDKIT_TEST_DISABLE_FEATURES=${disabledFeatures}" >> $GITHUB_ENV
|
|
||||||
# Expose `ACTIONS_RUNTIME_TOKEN` and `ACTIONS_CACHE_URL`, which is used
|
|
||||||
# in BuildKit's test suite to skip/unskip cache exporters:
|
|
||||||
# https://github.com/moby/buildkit/blob/567a99433ca23402d5e9b9f9124005d2e59b8861/client/client_test.go#L5407-L5411
|
|
||||||
-
|
|
||||||
name: Expose GitHub Runtime
|
|
||||||
uses: crazy-max/ghaction-github-runtime@v3
|
|
||||||
-
|
|
||||||
name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
path: moby
|
|
||||||
-
|
|
||||||
name: BuildKit ref
|
|
||||||
run: |
|
|
||||||
echo "$(./hack/buildkit-ref)" >> $GITHUB_ENV
|
|
||||||
working-directory: moby
|
|
||||||
-
|
|
||||||
name: Checkout BuildKit ${{ env.BUILDKIT_REF }}
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
repository: ${{ env.BUILDKIT_REPO }}
|
|
||||||
ref: ${{ env.BUILDKIT_REF }}
|
|
||||||
path: buildkit
|
|
||||||
-
|
|
||||||
name: Set up QEMU
|
|
||||||
uses: docker/setup-qemu-action@v3
|
|
||||||
-
|
|
||||||
name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v3
|
|
||||||
-
|
|
||||||
name: Download binary artifacts
|
|
||||||
uses: actions/download-artifact@v4
|
|
||||||
with:
|
|
||||||
name: binary
|
|
||||||
path: ./buildkit/build/moby/
|
|
||||||
-
|
|
||||||
name: Update daemon.json
|
|
||||||
run: |
|
|
||||||
sudo rm -f /etc/docker/daemon.json
|
|
||||||
sudo service docker restart
|
|
||||||
docker version
|
|
||||||
docker info
|
|
||||||
-
|
|
||||||
name: Build test image
|
|
||||||
uses: docker/bake-action@v4
|
|
||||||
with:
|
|
||||||
workdir: ./buildkit
|
|
||||||
targets: integration-tests
|
|
||||||
set: |
|
|
||||||
*.output=type=docker,name=${{ env.TEST_IMAGE_ID }}
|
|
||||||
-
|
|
||||||
name: Test
|
|
||||||
run: |
|
|
||||||
./hack/test ${{ matrix.typ }}
|
|
||||||
env:
|
|
||||||
CONTEXT: "."
|
|
||||||
TEST_DOCKERD: "1"
|
|
||||||
TEST_DOCKERD_BINARY: "./build/moby/dockerd"
|
|
||||||
TESTPKGS: "./${{ matrix.pkg }}"
|
|
||||||
TESTFLAGS: "-v --parallel=1 --timeout=30m --run=//worker=${{ matrix.worker }}$"
|
|
||||||
working-directory: buildkit
|
|
113
.github/workflows/ci.yml
vendored
113
.github/workflows/ci.yml
vendored
|
@ -1,113 +0,0 @@
|
||||||
name: ci
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: ${{ github.workflow }}-${{ github.ref }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
on:
|
|
||||||
workflow_dispatch:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- 'master'
|
|
||||||
- '[0-9]+.[0-9]+'
|
|
||||||
pull_request:
|
|
||||||
|
|
||||||
env:
|
|
||||||
DESTDIR: ./build
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
validate-dco:
|
|
||||||
uses: ./.github/workflows/.dco.yml
|
|
||||||
|
|
||||||
build:
|
|
||||||
runs-on: ubuntu-20.04
|
|
||||||
needs:
|
|
||||||
- validate-dco
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
target:
|
|
||||||
- binary
|
|
||||||
- dynbinary
|
|
||||||
steps:
|
|
||||||
-
|
|
||||||
name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
-
|
|
||||||
name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v3
|
|
||||||
-
|
|
||||||
name: Build
|
|
||||||
uses: docker/bake-action@v4
|
|
||||||
with:
|
|
||||||
targets: ${{ matrix.target }}
|
|
||||||
-
|
|
||||||
name: List artifacts
|
|
||||||
run: |
|
|
||||||
tree -nh ${{ env.DESTDIR }}
|
|
||||||
-
|
|
||||||
name: Check artifacts
|
|
||||||
run: |
|
|
||||||
find ${{ env.DESTDIR }} -type f -exec file -e ascii -- {} +
|
|
||||||
|
|
||||||
prepare-cross:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
needs:
|
|
||||||
- validate-dco
|
|
||||||
outputs:
|
|
||||||
matrix: ${{ steps.platforms.outputs.matrix }}
|
|
||||||
steps:
|
|
||||||
-
|
|
||||||
name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
-
|
|
||||||
name: Create matrix
|
|
||||||
id: platforms
|
|
||||||
run: |
|
|
||||||
matrix="$(docker buildx bake binary-cross --print | jq -cr '.target."binary-cross".platforms')"
|
|
||||||
echo "matrix=$matrix" >> $GITHUB_OUTPUT
|
|
||||||
-
|
|
||||||
name: Show matrix
|
|
||||||
run: |
|
|
||||||
echo ${{ steps.platforms.outputs.matrix }}
|
|
||||||
|
|
||||||
cross:
|
|
||||||
runs-on: ubuntu-20.04
|
|
||||||
needs:
|
|
||||||
- validate-dco
|
|
||||||
- prepare-cross
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
platform: ${{ fromJson(needs.prepare-cross.outputs.matrix) }}
|
|
||||||
steps:
|
|
||||||
-
|
|
||||||
name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
-
|
|
||||||
name: Prepare
|
|
||||||
run: |
|
|
||||||
platform=${{ matrix.platform }}
|
|
||||||
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
|
|
||||||
-
|
|
||||||
name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v3
|
|
||||||
-
|
|
||||||
name: Build
|
|
||||||
uses: docker/bake-action@v4
|
|
||||||
with:
|
|
||||||
targets: all
|
|
||||||
set: |
|
|
||||||
*.platform=${{ matrix.platform }}
|
|
||||||
-
|
|
||||||
name: List artifacts
|
|
||||||
run: |
|
|
||||||
tree -nh ${{ env.DESTDIR }}
|
|
||||||
-
|
|
||||||
name: Check artifacts
|
|
||||||
run: |
|
|
||||||
find ${{ env.DESTDIR }} -type f -exec file -e ascii -- {} +
|
|
177
.github/workflows/test.yml
vendored
177
.github/workflows/test.yml
vendored
|
@ -1,177 +0,0 @@
|
||||||
name: test
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: ${{ github.workflow }}-${{ github.ref }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
on:
|
|
||||||
workflow_dispatch:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- 'master'
|
|
||||||
- '[0-9]+.[0-9]+'
|
|
||||||
pull_request:
|
|
||||||
|
|
||||||
env:
|
|
||||||
GO_VERSION: "1.21.9"
|
|
||||||
GIT_PAGER: "cat"
|
|
||||||
PAGER: "cat"
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
validate-dco:
|
|
||||||
uses: ./.github/workflows/.dco.yml
|
|
||||||
|
|
||||||
build-dev:
|
|
||||||
runs-on: ubuntu-20.04
|
|
||||||
needs:
|
|
||||||
- validate-dco
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
mode:
|
|
||||||
- ""
|
|
||||||
- systemd
|
|
||||||
steps:
|
|
||||||
-
|
|
||||||
name: Prepare
|
|
||||||
run: |
|
|
||||||
if [ "${{ matrix.mode }}" = "systemd" ]; then
|
|
||||||
echo "SYSTEMD=true" >> $GITHUB_ENV
|
|
||||||
fi
|
|
||||||
-
|
|
||||||
name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
-
|
|
||||||
name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v3
|
|
||||||
-
|
|
||||||
name: Build dev image
|
|
||||||
uses: docker/bake-action@v4
|
|
||||||
with:
|
|
||||||
targets: dev
|
|
||||||
set: |
|
|
||||||
*.cache-from=type=gha,scope=dev${{ matrix.mode }}
|
|
||||||
*.cache-to=type=gha,scope=dev${{ matrix.mode }},mode=max
|
|
||||||
*.output=type=cacheonly
|
|
||||||
|
|
||||||
test:
|
|
||||||
needs:
|
|
||||||
- build-dev
|
|
||||||
- validate-dco
|
|
||||||
uses: ./.github/workflows/.test.yml
|
|
||||||
secrets: inherit
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
storage:
|
|
||||||
- graphdriver
|
|
||||||
- snapshotter
|
|
||||||
with:
|
|
||||||
storage: ${{ matrix.storage }}
|
|
||||||
|
|
||||||
validate-prepare:
|
|
||||||
runs-on: ubuntu-20.04
|
|
||||||
needs:
|
|
||||||
- validate-dco
|
|
||||||
outputs:
|
|
||||||
matrix: ${{ steps.scripts.outputs.matrix }}
|
|
||||||
steps:
|
|
||||||
-
|
|
||||||
name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
-
|
|
||||||
name: Create matrix
|
|
||||||
id: scripts
|
|
||||||
run: |
|
|
||||||
scripts=$(cd ./hack/validate && jq -nc '$ARGS.positional - ["all", "default", "dco"] | map(select(test("[.]")|not)) + ["generate-files"]' --args *)
|
|
||||||
echo "matrix=$scripts" >> $GITHUB_OUTPUT
|
|
||||||
-
|
|
||||||
name: Show matrix
|
|
||||||
run: |
|
|
||||||
echo ${{ steps.scripts.outputs.matrix }}
|
|
||||||
|
|
||||||
validate:
|
|
||||||
runs-on: ubuntu-20.04
|
|
||||||
timeout-minutes: 120
|
|
||||||
needs:
|
|
||||||
- validate-prepare
|
|
||||||
- build-dev
|
|
||||||
strategy:
|
|
||||||
fail-fast: true
|
|
||||||
matrix:
|
|
||||||
script: ${{ fromJson(needs.validate-prepare.outputs.matrix) }}
|
|
||||||
steps:
|
|
||||||
-
|
|
||||||
name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
-
|
|
||||||
name: Set up runner
|
|
||||||
uses: ./.github/actions/setup-runner
|
|
||||||
-
|
|
||||||
name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v3
|
|
||||||
-
|
|
||||||
name: Build dev image
|
|
||||||
uses: docker/bake-action@v4
|
|
||||||
with:
|
|
||||||
targets: dev
|
|
||||||
set: |
|
|
||||||
dev.cache-from=type=gha,scope=dev
|
|
||||||
-
|
|
||||||
name: Validate
|
|
||||||
run: |
|
|
||||||
make -o build validate-${{ matrix.script }}
|
|
||||||
|
|
||||||
smoke-prepare:
|
|
||||||
runs-on: ubuntu-20.04
|
|
||||||
needs:
|
|
||||||
- validate-dco
|
|
||||||
outputs:
|
|
||||||
matrix: ${{ steps.platforms.outputs.matrix }}
|
|
||||||
steps:
|
|
||||||
-
|
|
||||||
name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
-
|
|
||||||
name: Create matrix
|
|
||||||
id: platforms
|
|
||||||
run: |
|
|
||||||
matrix="$(docker buildx bake binary-smoketest --print | jq -cr '.target."binary-smoketest".platforms')"
|
|
||||||
echo "matrix=$matrix" >> $GITHUB_OUTPUT
|
|
||||||
-
|
|
||||||
name: Show matrix
|
|
||||||
run: |
|
|
||||||
echo ${{ steps.platforms.outputs.matrix }}
|
|
||||||
|
|
||||||
smoke:
|
|
||||||
runs-on: ubuntu-20.04
|
|
||||||
needs:
|
|
||||||
- smoke-prepare
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
platform: ${{ fromJson(needs.smoke-prepare.outputs.matrix) }}
|
|
||||||
steps:
|
|
||||||
-
|
|
||||||
name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
-
|
|
||||||
name: Prepare
|
|
||||||
run: |
|
|
||||||
platform=${{ matrix.platform }}
|
|
||||||
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
|
|
||||||
-
|
|
||||||
name: Set up QEMU
|
|
||||||
uses: docker/setup-qemu-action@v3
|
|
||||||
-
|
|
||||||
name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v3
|
|
||||||
-
|
|
||||||
name: Test
|
|
||||||
uses: docker/bake-action@v4
|
|
||||||
with:
|
|
||||||
targets: binary-smoketest
|
|
||||||
set: |
|
|
||||||
*.platform=${{ matrix.platform }}
|
|
62
.github/workflows/validate-pr.yml
vendored
62
.github/workflows/validate-pr.yml
vendored
|
@ -1,62 +0,0 @@
|
||||||
name: validate-pr
|
|
||||||
|
|
||||||
on:
|
|
||||||
pull_request:
|
|
||||||
types: [opened, edited, labeled, unlabeled]
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
check-area-label:
|
|
||||||
runs-on: ubuntu-20.04
|
|
||||||
steps:
|
|
||||||
- name: Missing `area/` label
|
|
||||||
if: contains(join(github.event.pull_request.labels.*.name, ','), 'impact/') && !contains(join(github.event.pull_request.labels.*.name, ','), 'area/')
|
|
||||||
run: |
|
|
||||||
echo "::error::Every PR with an 'impact/*' label should also have an 'area/*' label"
|
|
||||||
exit 1
|
|
||||||
- name: OK
|
|
||||||
run: exit 0
|
|
||||||
|
|
||||||
check-changelog:
|
|
||||||
if: contains(join(github.event.pull_request.labels.*.name, ','), 'impact/')
|
|
||||||
runs-on: ubuntu-20.04
|
|
||||||
env:
|
|
||||||
PR_BODY: |
|
|
||||||
${{ github.event.pull_request.body }}
|
|
||||||
steps:
|
|
||||||
- name: Check changelog description
|
|
||||||
run: |
|
|
||||||
# Extract the `markdown changelog` note code block
|
|
||||||
block=$(echo -n "$PR_BODY" | tr -d '\r' | awk '/^```markdown changelog$/{flag=1;next}/^```$/{flag=0}flag')
|
|
||||||
|
|
||||||
# Strip empty lines
|
|
||||||
desc=$(echo "$block" | awk NF)
|
|
||||||
|
|
||||||
if [ -z "$desc" ]; then
|
|
||||||
echo "::error::Changelog section is empty. Please provide a description for the changelog."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
len=$(echo -n "$desc" | wc -c)
|
|
||||||
if [[ $len -le 6 ]]; then
|
|
||||||
echo "::error::Description looks too short: $desc"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "This PR will be included in the release notes with the following note:"
|
|
||||||
echo "$desc"
|
|
||||||
|
|
||||||
check-pr-branch:
|
|
||||||
runs-on: ubuntu-20.04
|
|
||||||
env:
|
|
||||||
PR_TITLE: ${{ github.event.pull_request.title }}
|
|
||||||
steps:
|
|
||||||
# Backports or PR that target a release branch directly should mention the target branch in the title, for example:
|
|
||||||
# [X.Y backport] Some change that needs backporting to X.Y
|
|
||||||
# [X.Y] Change directly targeting the X.Y branch
|
|
||||||
- name: Get branch from PR title
|
|
||||||
id: title_branch
|
|
||||||
run: echo "$PR_TITLE" | sed -n 's/^\[\([0-9]*\.[0-9]*\)[^]]*\].*/branch=\1/p' >> $GITHUB_OUTPUT
|
|
||||||
|
|
||||||
- name: Check release branch
|
|
||||||
if: github.event.pull_request.base.ref != steps.title_branch.outputs.branch && !(github.event.pull_request.base.ref == 'master' && steps.title_branch.outputs.branch == '')
|
|
||||||
run: echo "::error::PR title suggests targetting the ${{ steps.title_branch.outputs.branch }} branch, but is opened against ${{ github.event.pull_request.base.ref }}" && exit 1
|
|
33
.github/workflows/windows-2019.yml
vendored
33
.github/workflows/windows-2019.yml
vendored
|
@ -1,33 +0,0 @@
|
||||||
name: windows-2019
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: ${{ github.workflow }}-${{ github.ref }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
on:
|
|
||||||
schedule:
|
|
||||||
- cron: '0 10 * * *'
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
validate-dco:
|
|
||||||
uses: ./.github/workflows/.dco.yml
|
|
||||||
|
|
||||||
test-prepare:
|
|
||||||
uses: ./.github/workflows/.test-prepare.yml
|
|
||||||
needs:
|
|
||||||
- validate-dco
|
|
||||||
|
|
||||||
run:
|
|
||||||
needs:
|
|
||||||
- test-prepare
|
|
||||||
uses: ./.github/workflows/.windows.yml
|
|
||||||
secrets: inherit
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
storage: ${{ fromJson(needs.test-prepare.outputs.matrix) }}
|
|
||||||
with:
|
|
||||||
os: windows-2019
|
|
||||||
storage: ${{ matrix.storage }}
|
|
||||||
send_coverage: false
|
|
36
.github/workflows/windows-2022.yml
vendored
36
.github/workflows/windows-2022.yml
vendored
|
@ -1,36 +0,0 @@
|
||||||
name: windows-2022
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: ${{ github.workflow }}-${{ github.ref }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
on:
|
|
||||||
workflow_dispatch:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- 'master'
|
|
||||||
- '[0-9]+.[0-9]+'
|
|
||||||
pull_request:
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
validate-dco:
|
|
||||||
uses: ./.github/workflows/.dco.yml
|
|
||||||
|
|
||||||
test-prepare:
|
|
||||||
uses: ./.github/workflows/.test-prepare.yml
|
|
||||||
needs:
|
|
||||||
- validate-dco
|
|
||||||
|
|
||||||
run:
|
|
||||||
needs:
|
|
||||||
- test-prepare
|
|
||||||
uses: ./.github/workflows/.windows.yml
|
|
||||||
secrets: inherit
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
storage: ${{ fromJson(needs.test-prepare.outputs.matrix) }}
|
|
||||||
with:
|
|
||||||
os: windows-2022
|
|
||||||
storage: ${{ matrix.storage }}
|
|
||||||
send_coverage: true
|
|
35
.gitignore
vendored
35
.gitignore
vendored
|
@ -1,28 +1,25 @@
|
||||||
# If you want to ignore files created by your editor/tools, please consider a
|
# Docker project generated files to ignore
|
||||||
# [global .gitignore](https://help.github.com/articles/ignoring-files).
|
# if you want to ignore files created by your editor/tools,
|
||||||
|
# please consider a global .gitignore https://help.github.com/articles/ignoring-files
|
||||||
*~
|
*.exe
|
||||||
*.bak
|
*.exe~
|
||||||
|
*.gz
|
||||||
*.orig
|
*.orig
|
||||||
|
test.main
|
||||||
.*.swp
|
.*.swp
|
||||||
.DS_Store
|
.DS_Store
|
||||||
thumbs.db
|
# a .bashrc may be added to customize the build environment
|
||||||
|
|
||||||
# local repository customization
|
|
||||||
.envrc
|
|
||||||
.bashrc
|
.bashrc
|
||||||
.editorconfig
|
.editorconfig
|
||||||
|
.gopath/
|
||||||
# build artifacts
|
.go-pkg-cache/
|
||||||
|
autogen/
|
||||||
bundles/
|
bundles/
|
||||||
cli/winresources/*/*.syso
|
cmd/dockerd/dockerd
|
||||||
cli/winresources/*/winres.json
|
|
||||||
contrib/builder/rpm/*/changelog
|
contrib/builder/rpm/*/changelog
|
||||||
|
dockerversion/version_autogen.go
|
||||||
# ci artifacts
|
dockerversion/version_autogen_unix.go
|
||||||
*.exe
|
vendor/pkg/
|
||||||
*.gz
|
|
||||||
go-test-report.json
|
go-test-report.json
|
||||||
junit-report.xml
|
|
||||||
profile.out
|
profile.out
|
||||||
test.main
|
junit-report.xml
|
||||||
|
|
137
.golangci.yml
137
.golangci.yml
|
@ -1,137 +0,0 @@
|
||||||
linters:
|
|
||||||
enable:
|
|
||||||
- depguard
|
|
||||||
- dupword # Checks for duplicate words in the source code.
|
|
||||||
- goimports
|
|
||||||
- gosec
|
|
||||||
- gosimple
|
|
||||||
- govet
|
|
||||||
- importas
|
|
||||||
- ineffassign
|
|
||||||
- misspell
|
|
||||||
- revive
|
|
||||||
- staticcheck
|
|
||||||
- typecheck
|
|
||||||
- unconvert
|
|
||||||
- unused
|
|
||||||
|
|
||||||
disable:
|
|
||||||
- errcheck
|
|
||||||
|
|
||||||
run:
|
|
||||||
concurrency: 2
|
|
||||||
modules-download-mode: vendor
|
|
||||||
|
|
||||||
skip-dirs:
|
|
||||||
- docs
|
|
||||||
|
|
||||||
linters-settings:
|
|
||||||
dupword:
|
|
||||||
ignore:
|
|
||||||
- "true" # some tests use this as expected output
|
|
||||||
- "false" # some tests use this as expected output
|
|
||||||
- "root" # for tests using "ls" output with files owned by "root:root"
|
|
||||||
importas:
|
|
||||||
# Do not allow unaliased imports of aliased packages.
|
|
||||||
no-unaliased: true
|
|
||||||
|
|
||||||
alias:
|
|
||||||
# Enforce alias to prevent it accidentally being used instead of our
|
|
||||||
# own errdefs package (or vice-versa).
|
|
||||||
- pkg: github.com/containerd/containerd/errdefs
|
|
||||||
alias: cerrdefs
|
|
||||||
- pkg: github.com/opencontainers/image-spec/specs-go/v1
|
|
||||||
alias: ocispec
|
|
||||||
|
|
||||||
govet:
|
|
||||||
check-shadowing: false
|
|
||||||
depguard:
|
|
||||||
rules:
|
|
||||||
main:
|
|
||||||
deny:
|
|
||||||
- pkg: io/ioutil
|
|
||||||
desc: The io/ioutil package has been deprecated, see https://go.dev/doc/go1.16#ioutil
|
|
||||||
- pkg: "github.com/stretchr/testify/assert"
|
|
||||||
desc: Use "gotest.tools/v3/assert" instead
|
|
||||||
- pkg: "github.com/stretchr/testify/require"
|
|
||||||
desc: Use "gotest.tools/v3/assert" instead
|
|
||||||
- pkg: "github.com/stretchr/testify/suite"
|
|
||||||
desc: Do not use
|
|
||||||
revive:
|
|
||||||
rules:
|
|
||||||
# FIXME make sure all packages have a description. Currently, there's many packages without.
|
|
||||||
- name: package-comments
|
|
||||||
disabled: true
|
|
||||||
issues:
|
|
||||||
# The default exclusion rules are a bit too permissive, so copying the relevant ones below
|
|
||||||
exclude-use-default: false
|
|
||||||
|
|
||||||
exclude-rules:
|
|
||||||
# We prefer to use an "exclude-list" so that new "default" exclusions are not
|
|
||||||
# automatically inherited. We can decide whether or not to follow upstream
|
|
||||||
# defaults when updating golang-ci-lint versions.
|
|
||||||
# Unfortunately, this means we have to copy the whole exclusion pattern, as
|
|
||||||
# (unlike the "include" option), the "exclude" option does not take exclusion
|
|
||||||
# ID's.
|
|
||||||
#
|
|
||||||
# These exclusion patterns are copied from the default excluses at:
|
|
||||||
# https://github.com/golangci/golangci-lint/blob/v1.46.2/pkg/config/issues.go#L10-L104
|
|
||||||
|
|
||||||
# EXC0001
|
|
||||||
- text: "Error return value of .((os\\.)?std(out|err)\\..*|.*Close|.*Flush|os\\.Remove(All)?|.*print(f|ln)?|os\\.(Un)?Setenv). is not checked"
|
|
||||||
linters:
|
|
||||||
- errcheck
|
|
||||||
# EXC0006
|
|
||||||
- text: "Use of unsafe calls should be audited"
|
|
||||||
linters:
|
|
||||||
- gosec
|
|
||||||
# EXC0007
|
|
||||||
- text: "Subprocess launch(ed with variable|ing should be audited)"
|
|
||||||
linters:
|
|
||||||
- gosec
|
|
||||||
# EXC0008
|
|
||||||
# TODO: evaluate these and fix where needed: G307: Deferring unsafe method "*os.File" on type "Close" (gosec)
|
|
||||||
- text: "(G104|G307)"
|
|
||||||
linters:
|
|
||||||
- gosec
|
|
||||||
# EXC0009
|
|
||||||
- text: "(Expect directory permissions to be 0750 or less|Expect file permissions to be 0600 or less)"
|
|
||||||
linters:
|
|
||||||
- gosec
|
|
||||||
# EXC0010
|
|
||||||
- text: "Potential file inclusion via variable"
|
|
||||||
linters:
|
|
||||||
- gosec
|
|
||||||
|
|
||||||
# Looks like the match in "EXC0007" above doesn't catch this one
|
|
||||||
# TODO: consider upstreaming this to golangci-lint's default exclusion rules
|
|
||||||
- text: "G204: Subprocess launched with a potential tainted input or cmd arguments"
|
|
||||||
linters:
|
|
||||||
- gosec
|
|
||||||
# Looks like the match in "EXC0009" above doesn't catch this one
|
|
||||||
# TODO: consider upstreaming this to golangci-lint's default exclusion rules
|
|
||||||
- text: "G306: Expect WriteFile permissions to be 0600 or less"
|
|
||||||
linters:
|
|
||||||
- gosec
|
|
||||||
|
|
||||||
# Exclude some linters from running on tests files.
|
|
||||||
- path: _test\.go
|
|
||||||
linters:
|
|
||||||
- errcheck
|
|
||||||
- gosec
|
|
||||||
|
|
||||||
# Suppress golint complaining about generated types in api/types/
|
|
||||||
- text: "type name will be used as (container|volume)\\.(Container|Volume).* by other packages, and that stutters; consider calling this"
|
|
||||||
path: "api/types/(volume|container)/"
|
|
||||||
linters:
|
|
||||||
- revive
|
|
||||||
# FIXME temporarily suppress these (see https://github.com/gotestyourself/gotest.tools/issues/272)
|
|
||||||
- text: "SA1019: (assert|cmp|is)\\.ErrorType is deprecated"
|
|
||||||
linters:
|
|
||||||
- staticcheck
|
|
||||||
|
|
||||||
# Maximum issues count per one linter. Set to 0 to disable. Default is 50.
|
|
||||||
max-issues-per-linter: 0
|
|
||||||
|
|
||||||
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
|
|
||||||
max-same-issues: 0
|
|
310
.mailmap
310
.mailmap
|
@ -1,24 +1,15 @@
|
||||||
# This file lists the canonical name and email of contributors, and is used to
|
# Generate AUTHORS: hack/generate-authors.sh
|
||||||
# generate AUTHORS (in hack/generate-authors.sh).
|
|
||||||
#
|
|
||||||
# To find new duplicates, regenerate AUTHORS and scan for name duplicates, or
|
|
||||||
# run the following to find email duplicates:
|
|
||||||
# git log --format='%aE - %aN' | sort -uf | awk -v IGNORECASE=1 '$1 in a {print a[$1]; print}; {a[$1]=$0}'
|
|
||||||
#
|
|
||||||
# For an explanation of this file format, consult gitmailmap(5).
|
|
||||||
|
|
||||||
|
# Tip for finding duplicates (besides scanning the output of AUTHORS for name
|
||||||
|
# duplicates that aren't also email duplicates): scan the output of:
|
||||||
|
# git log --format='%aE - %aN' | sort -uf
|
||||||
|
#
|
||||||
|
# For explanation on this file format: man git-shortlog
|
||||||
|
|
||||||
|
<21551195@zju.edu.cn> <hsinko@users.noreply.github.com>
|
||||||
|
<mr.wrfly@gmail.com> <wrfly@users.noreply.github.com>
|
||||||
Aaron L. Xu <liker.xu@foxmail.com>
|
Aaron L. Xu <liker.xu@foxmail.com>
|
||||||
Aaron L. Xu <liker.xu@foxmail.com> <likexu@harmonycloud.cn>
|
Abhinandan Prativadi <abhi@docker.com>
|
||||||
Aaron Lehmann <alehmann@netflix.com>
|
|
||||||
Aaron Lehmann <alehmann@netflix.com> <aaron.lehmann@docker.com>
|
|
||||||
Abhinandan Prativadi <aprativadi@gmail.com>
|
|
||||||
Abhinandan Prativadi <aprativadi@gmail.com> <abhi@docker.com>
|
|
||||||
Abhinandan Prativadi <aprativadi@gmail.com> abhi <user.email>
|
|
||||||
Abhishek Chanda <abhishek.becs@gmail.com>
|
|
||||||
Abhishek Chanda <abhishek.becs@gmail.com> <abhishek.chanda@emc.com>
|
|
||||||
Ada Mancini <ada@docker.com>
|
|
||||||
Adam Dobrawy <naczelnik@jawnosc.tk>
|
|
||||||
Adam Dobrawy <naczelnik@jawnosc.tk> <ad-m@users.noreply.github.com>
|
|
||||||
Adrien Gallouët <adrien@gallouet.fr> <angt@users.noreply.github.com>
|
Adrien Gallouët <adrien@gallouet.fr> <angt@users.noreply.github.com>
|
||||||
Ahmed Kamal <email.ahmedkamal@googlemail.com>
|
Ahmed Kamal <email.ahmedkamal@googlemail.com>
|
||||||
Ahmet Alp Balkan <ahmetb@microsoft.com> <ahmetalpbalkan@gmail.com>
|
Ahmet Alp Balkan <ahmetb@microsoft.com> <ahmetalpbalkan@gmail.com>
|
||||||
|
@ -27,40 +18,23 @@ AJ Bowen <aj@soulshake.net> <aj@gandi.net>
|
||||||
AJ Bowen <aj@soulshake.net> <amy@gandi.net>
|
AJ Bowen <aj@soulshake.net> <amy@gandi.net>
|
||||||
Akihiro Matsushima <amatsusbit@gmail.com> <amatsus@users.noreply.github.com>
|
Akihiro Matsushima <amatsusbit@gmail.com> <amatsus@users.noreply.github.com>
|
||||||
Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp>
|
Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp>
|
||||||
Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp> <suda.akihiro@lab.ntt.co.jp>
|
|
||||||
Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp> <suda.kyoto@gmail.com>
|
Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp> <suda.kyoto@gmail.com>
|
||||||
Akshay Moghe <akshay.moghe@gmail.com>
|
Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp> <suda.akihiro@lab.ntt.co.jp>
|
||||||
Albin Kerouanton <albinker@gmail.com>
|
|
||||||
Albin Kerouanton <albinker@gmail.com> <albin@akerouanton.name>
|
|
||||||
Albin Kerouanton <albinker@gmail.com> <557933+akerouanton@users.noreply.github.com>
|
|
||||||
Aleksa Sarai <asarai@suse.de>
|
Aleksa Sarai <asarai@suse.de>
|
||||||
Aleksa Sarai <asarai@suse.de> <asarai@suse.com>
|
Aleksa Sarai <asarai@suse.de> <asarai@suse.com>
|
||||||
Aleksa Sarai <asarai@suse.de> <cyphar@cyphar.com>
|
Aleksa Sarai <asarai@suse.de> <cyphar@cyphar.com>
|
||||||
Aleksandrs Fadins <aleks@s-ko.net>
|
Aleksandrs Fadins <aleks@s-ko.net>
|
||||||
Alessandro Boch <aboch@tetrationanalytics.com>
|
|
||||||
Alessandro Boch <aboch@tetrationanalytics.com> <aboch@docker.com>
|
Alessandro Boch <aboch@tetrationanalytics.com> <aboch@docker.com>
|
||||||
Alessandro Boch <aboch@tetrationanalytics.com> <aboch@socketplane.io>
|
|
||||||
Alessandro Boch <aboch@tetrationanalytics.com> <aboch@users.noreply.github.com>
|
|
||||||
Alex Chan <alex@alexwlchan.net>
|
|
||||||
Alex Chan <alex@alexwlchan.net> <alex.chan@metaswitch.com>
|
|
||||||
Alex Chen <alexchenunix@gmail.com> <root@localhost.localdomain>
|
Alex Chen <alexchenunix@gmail.com> <root@localhost.localdomain>
|
||||||
Alex Ellis <alexellis2@gmail.com>
|
Alex Ellis <alexellis2@gmail.com>
|
||||||
Alex Goodman <wagoodman@gmail.com> <wagoodman@users.noreply.github.com>
|
Alex Goodman <wagoodman@gmail.com> <wagoodman@users.noreply.github.com>
|
||||||
Alexander Larsson <alexl@redhat.com> <alexander.larsson@gmail.com>
|
Alexander Larsson <alexl@redhat.com> <alexander.larsson@gmail.com>
|
||||||
Alexander Morozov <lk4d4math@gmail.com>
|
Alexander Morozov <lk4d4@docker.com>
|
||||||
Alexander Morozov <lk4d4math@gmail.com> <lk4d4@docker.com>
|
Alexander Morozov <lk4d4@docker.com> <lk4d4math@gmail.com>
|
||||||
Alexandre Beslic <alexandre.beslic@gmail.com> <abronan@docker.com>
|
Alexandre Beslic <alexandre.beslic@gmail.com> <abronan@docker.com>
|
||||||
Alexandre González <agonzalezro@gmail.com>
|
|
||||||
Alexis Ries <ries.alexis@gmail.com>
|
|
||||||
Alexis Ries <ries.alexis@gmail.com> <alexis.ries.ext@orange.com>
|
|
||||||
Alexis Thomas <fr.alexisthomas@gmail.com>
|
|
||||||
Alicia Lauerman <alicia@eta.im> <allydevour@me.com>
|
Alicia Lauerman <alicia@eta.im> <allydevour@me.com>
|
||||||
Allen Sun <allensun.shl@alibaba-inc.com> <allen.sun@daocloud.io>
|
Allen Sun <allensun.shl@alibaba-inc.com> <allen.sun@daocloud.io>
|
||||||
Allen Sun <allensun.shl@alibaba-inc.com> <shlallen1990@gmail.com>
|
Allen Sun <allensun.shl@alibaba-inc.com> <shlallen1990@gmail.com>
|
||||||
Anca Iordache <anca.iordache@docker.com>
|
|
||||||
Andrea Denisse Gómez <crypto.andrea@protonmail.ch>
|
|
||||||
Andrew Kim <taeyeonkim90@gmail.com>
|
|
||||||
Andrew Kim <taeyeonkim90@gmail.com> <akim01@fortinet.com>
|
|
||||||
Andrew Weiss <andrew.weiss@docker.com> <andrew.weiss@microsoft.com>
|
Andrew Weiss <andrew.weiss@docker.com> <andrew.weiss@microsoft.com>
|
||||||
Andrew Weiss <andrew.weiss@docker.com> <andrew.weiss@outlook.com>
|
Andrew Weiss <andrew.weiss@docker.com> <andrew.weiss@outlook.com>
|
||||||
Andrey Kolomentsev <andrey.kolomentsev@docker.com>
|
Andrey Kolomentsev <andrey.kolomentsev@docker.com>
|
||||||
|
@ -68,8 +42,6 @@ Andrey Kolomentsev <andrey.kolomentsev@docker.com> <andrey.kolomentsev@gmail.com
|
||||||
André Martins <aanm90@gmail.com> <martins@noironetworks.com>
|
André Martins <aanm90@gmail.com> <martins@noironetworks.com>
|
||||||
Andy Rothfusz <github@developersupport.net> <github@metaliveblog.com>
|
Andy Rothfusz <github@developersupport.net> <github@metaliveblog.com>
|
||||||
Andy Smith <github@anarkystic.com>
|
Andy Smith <github@anarkystic.com>
|
||||||
Andy Zhang <andy.zhangtao@hotmail.com>
|
|
||||||
Andy Zhang <andy.zhangtao@hotmail.com> <ztao@tibco-support.com>
|
|
||||||
Ankush Agarwal <ankushagarwal11@gmail.com> <ankushagarwal@users.noreply.github.com>
|
Ankush Agarwal <ankushagarwal11@gmail.com> <ankushagarwal@users.noreply.github.com>
|
||||||
Antonio Murdaca <antonio.murdaca@gmail.com> <amurdaca@redhat.com>
|
Antonio Murdaca <antonio.murdaca@gmail.com> <amurdaca@redhat.com>
|
||||||
Antonio Murdaca <antonio.murdaca@gmail.com> <me@runcom.ninja>
|
Antonio Murdaca <antonio.murdaca@gmail.com> <me@runcom.ninja>
|
||||||
|
@ -79,48 +51,30 @@ Antonio Murdaca <antonio.murdaca@gmail.com> <runcom@users.noreply.github.com>
|
||||||
Anuj Bahuguna <anujbahuguna.dev@gmail.com>
|
Anuj Bahuguna <anujbahuguna.dev@gmail.com>
|
||||||
Anuj Bahuguna <anujbahuguna.dev@gmail.com> <abahuguna@fiberlink.com>
|
Anuj Bahuguna <anujbahuguna.dev@gmail.com> <abahuguna@fiberlink.com>
|
||||||
Anusha Ragunathan <anusha.ragunathan@docker.com> <anusha@docker.com>
|
Anusha Ragunathan <anusha.ragunathan@docker.com> <anusha@docker.com>
|
||||||
Anyu Wang <wanganyu@outlook.com>
|
Arnaud Porterie <arnaud.porterie@docker.com>
|
||||||
Arko Dasgupta <arko@tetrate.io>
|
Arnaud Porterie <arnaud.porterie@docker.com> <icecrime@gmail.com>
|
||||||
Arko Dasgupta <arko@tetrate.io> <arko.dasgupta@docker.com>
|
|
||||||
Arko Dasgupta <arko@tetrate.io> <arkodg@users.noreply.github.com>
|
|
||||||
Arnaud Porterie <icecrime@gmail.com>
|
|
||||||
Arnaud Porterie <icecrime@gmail.com> <arnaud.porterie@docker.com>
|
|
||||||
Arnaud Rebillout <arnaud.rebillout@collabora.com>
|
|
||||||
Arnaud Rebillout <arnaud.rebillout@collabora.com> <elboulangero@gmail.com>
|
|
||||||
Arthur Gautier <baloo@gandi.net> <superbaloo+registrations.github@superbaloo.net>
|
Arthur Gautier <baloo@gandi.net> <superbaloo+registrations.github@superbaloo.net>
|
||||||
Artur Meyster <arthurfbi@yahoo.com>
|
|
||||||
Avi Miller <avi.miller@oracle.com> <avi.miller@gmail.com>
|
Avi Miller <avi.miller@oracle.com> <avi.miller@gmail.com>
|
||||||
Ben Bonnefoy <frenchben@docker.com>
|
Ben Bonnefoy <frenchben@docker.com>
|
||||||
Ben Golub <ben.golub@dotcloud.com>
|
Ben Golub <ben.golub@dotcloud.com>
|
||||||
Ben Toews <mastahyeti@gmail.com> <mastahyeti@users.noreply.github.com>
|
Ben Toews <mastahyeti@gmail.com> <mastahyeti@users.noreply.github.com>
|
||||||
Benny Ng <benny.tpng@gmail.com>
|
|
||||||
Benoit Chesneau <bchesneau@gmail.com>
|
Benoit Chesneau <bchesneau@gmail.com>
|
||||||
Bevisy Zhang <binbin36520@gmail.com>
|
Bevisy Zhang <binbin36520@gmail.com>
|
||||||
Bhiraj Butala <abhiraj.butala@gmail.com>
|
Bhiraj Butala <abhiraj.butala@gmail.com>
|
||||||
Bhumika Bayani <bhumikabayani@gmail.com>
|
Bhumika Bayani <bhumikabayani@gmail.com>
|
||||||
Bilal Amarni <bilal.amarni@gmail.com> <bamarni@users.noreply.github.com>
|
Bilal Amarni <bilal.amarni@gmail.com> <bamarni@users.noreply.github.com>
|
||||||
Bill Wang <ozbillwang@gmail.com> <SydOps@users.noreply.github.com>
|
|
||||||
Bily Zhang <xcoder@tenxcloud.com>
|
Bily Zhang <xcoder@tenxcloud.com>
|
||||||
|
Bill Wang <ozbillwang@gmail.com> <SydOps@users.noreply.github.com>
|
||||||
Bin Liu <liubin0329@gmail.com>
|
Bin Liu <liubin0329@gmail.com>
|
||||||
Bin Liu <liubin0329@gmail.com> <liubin0329@users.noreply.github.com>
|
Bin Liu <liubin0329@gmail.com> <liubin0329@users.noreply.github.com>
|
||||||
Bingshen Wang <bingshen.wbs@alibaba-inc.com>
|
Bingshen Wang <bingshen.wbs@alibaba-inc.com>
|
||||||
Bjorn Neergaard <bjorn@neersighted.com>
|
|
||||||
Bjorn Neergaard <bjorn@neersighted.com> <bjorn.neergaard@docker.com>
|
|
||||||
Bjorn Neergaard <bjorn@neersighted.com> <bneergaard@mirantis.com>
|
|
||||||
Boaz Shuster <ripcurld.github@gmail.com>
|
Boaz Shuster <ripcurld.github@gmail.com>
|
||||||
Bojun Zhu <bojun.zhu@foxmail.com>
|
|
||||||
Boqin Qin <bobbqqin@gmail.com>
|
|
||||||
Boshi Lian <farmer1992@gmail.com>
|
|
||||||
Brandon Philips <brandon.philips@coreos.com> <brandon@ifup.co>
|
Brandon Philips <brandon.philips@coreos.com> <brandon@ifup.co>
|
||||||
Brandon Philips <brandon.philips@coreos.com> <brandon@ifup.org>
|
Brandon Philips <brandon.philips@coreos.com> <brandon@ifup.org>
|
||||||
Brent Salisbury <brent.salisbury@docker.com> <brent@docker.com>
|
Brent Salisbury <brent.salisbury@docker.com> <brent@docker.com>
|
||||||
Brian Goff <cpuguy83@gmail.com>
|
Brian Goff <cpuguy83@gmail.com>
|
||||||
Brian Goff <cpuguy83@gmail.com> <bgoff@cpuguy83-mbp.home>
|
Brian Goff <cpuguy83@gmail.com> <bgoff@cpuguy83-mbp.home>
|
||||||
Brian Goff <cpuguy83@gmail.com> <bgoff@cpuguy83-mbp.local>
|
Brian Goff <cpuguy83@gmail.com> <bgoff@cpuguy83-mbp.local>
|
||||||
Brian Goff <cpuguy83@gmail.com> <brian.goff@microsoft.com>
|
|
||||||
Brian Goff <cpuguy83@gmail.com> <cpuguy@hey.com>
|
|
||||||
Cameron Sparr <gh@sparr.email>
|
|
||||||
Carlos de Paula <me@carlosedp.com>
|
|
||||||
Chander Govindarajan <chandergovind@gmail.com>
|
Chander Govindarajan <chandergovind@gmail.com>
|
||||||
Chao Wang <wangchao.fnst@cn.fujitsu.com> <chaowang@localhost.localdomain>
|
Chao Wang <wangchao.fnst@cn.fujitsu.com> <chaowang@localhost.localdomain>
|
||||||
Charles Hooper <charles.hooper@dotcloud.com> <chooper@plumata.com>
|
Charles Hooper <charles.hooper@dotcloud.com> <chooper@plumata.com>
|
||||||
|
@ -132,21 +86,13 @@ Chen Qiu <cheney-90@hotmail.com> <21321229@zju.edu.cn>
|
||||||
Chengfei Shang <cfshang@alauda.io>
|
Chengfei Shang <cfshang@alauda.io>
|
||||||
Chris Dias <cdias@microsoft.com>
|
Chris Dias <cdias@microsoft.com>
|
||||||
Chris McKinnel <chris.mckinnel@tangentlabs.co.uk>
|
Chris McKinnel <chris.mckinnel@tangentlabs.co.uk>
|
||||||
Chris Price <cprice@mirantis.com>
|
|
||||||
Chris Price <cprice@mirantis.com> <chris.price@docker.com>
|
|
||||||
Chris Telfer <ctelfer@docker.com>
|
|
||||||
Chris Telfer <ctelfer@docker.com> <ctelfer@users.noreply.github.com>
|
|
||||||
Christopher Biscardi <biscarch@sketcht.com>
|
Christopher Biscardi <biscarch@sketcht.com>
|
||||||
Christopher Latham <sudosurootdev@gmail.com>
|
Christopher Latham <sudosurootdev@gmail.com>
|
||||||
Christy Norman <christy@linux.vnet.ibm.com>
|
|
||||||
Chun Chen <ramichen@tencent.com> <chenchun.feed@gmail.com>
|
Chun Chen <ramichen@tencent.com> <chenchun.feed@gmail.com>
|
||||||
Corbin Coleman <corbin.coleman@docker.com>
|
Corbin Coleman <corbin.coleman@docker.com>
|
||||||
Cristian Ariza <dev@cristianrz.com>
|
|
||||||
Cristian Staretu <cristian.staretu@gmail.com>
|
Cristian Staretu <cristian.staretu@gmail.com>
|
||||||
Cristian Staretu <cristian.staretu@gmail.com> <unclejack@users.noreply.github.com>
|
Cristian Staretu <cristian.staretu@gmail.com> <unclejack@users.noreply.github.com>
|
||||||
Cristian Staretu <cristian.staretu@gmail.com> <unclejacksons@gmail.com>
|
Cristian Staretu <cristian.staretu@gmail.com> <unclejacksons@gmail.com>
|
||||||
cui fliter <imcusg@gmail.com>
|
|
||||||
cui fliter <imcusg@gmail.com> cuishuang <imcusg@gmail.com>
|
|
||||||
CUI Wei <ghostplant@qq.com> cuiwei13 <cuiwei13@pku.edu.cn>
|
CUI Wei <ghostplant@qq.com> cuiwei13 <cuiwei13@pku.edu.cn>
|
||||||
Daehyeok Mun <daehyeok@gmail.com>
|
Daehyeok Mun <daehyeok@gmail.com>
|
||||||
Daehyeok Mun <daehyeok@gmail.com> <daehyeok@daehyeok-ui-MacBook-Air.local>
|
Daehyeok Mun <daehyeok@gmail.com> <daehyeok@daehyeok-ui-MacBook-Air.local>
|
||||||
|
@ -173,35 +119,22 @@ Dattatraya Kumbhar <dattatraya.kumbhar@gslab.com>
|
||||||
Dave Goodchild <buddhamagnet@gmail.com>
|
Dave Goodchild <buddhamagnet@gmail.com>
|
||||||
Dave Henderson <dhenderson@gmail.com> <Dave.Henderson@ca.ibm.com>
|
Dave Henderson <dhenderson@gmail.com> <Dave.Henderson@ca.ibm.com>
|
||||||
Dave Tucker <dt@docker.com> <dave@dtucker.co.uk>
|
Dave Tucker <dt@docker.com> <dave@dtucker.co.uk>
|
||||||
David Dooling <dooling@gmail.com>
|
|
||||||
David Dooling <dooling@gmail.com> <david.dooling@docker.com>
|
|
||||||
David M. Karr <davidmichaelkarr@gmail.com>
|
David M. Karr <davidmichaelkarr@gmail.com>
|
||||||
David Sheets <dsheets@docker.com> <sheets@alum.mit.edu>
|
David Sheets <dsheets@docker.com> <sheets@alum.mit.edu>
|
||||||
David Sissitka <me@dsissitka.com>
|
David Sissitka <me@dsissitka.com>
|
||||||
David Williamson <david.williamson@docker.com> <davidwilliamson@users.noreply.github.com>
|
David Williamson <david.williamson@docker.com> <davidwilliamson@users.noreply.github.com>
|
||||||
Derek Ch <denc716@gmail.com>
|
|
||||||
Derek McGowan <derek@mcg.dev>
|
|
||||||
Derek McGowan <derek@mcg.dev> <derek@mcgstyle.net>
|
|
||||||
Deshi Xiao <dxiao@redhat.com> <dsxiao@dataman-inc.com>
|
Deshi Xiao <dxiao@redhat.com> <dsxiao@dataman-inc.com>
|
||||||
Deshi Xiao <dxiao@redhat.com> <xiaods@gmail.com>
|
Deshi Xiao <dxiao@redhat.com> <xiaods@gmail.com>
|
||||||
Dhilip Kumars <dhilip.kumar.s@huawei.com>
|
|
||||||
Diego Siqueira <dieg0@live.com>
|
Diego Siqueira <dieg0@live.com>
|
||||||
Diogo Monica <diogo@docker.com> <diogo.monica@gmail.com>
|
Diogo Monica <diogo@docker.com> <diogo.monica@gmail.com>
|
||||||
Dmitry Sharshakov <d3dx12.xx@gmail.com>
|
Dmitry Sharshakov <d3dx12.xx@gmail.com>
|
||||||
Dmitry Sharshakov <d3dx12.xx@gmail.com> <sh7dm@outlook.com>
|
Dmitry Sharshakov <d3dx12.xx@gmail.com> <sh7dm@outlook.com>
|
||||||
Dmytro Iakovliev <dmytro.iakovliev@zodiacsystems.com>
|
|
||||||
Dominic Yin <yindongchao@inspur.com>
|
|
||||||
Dominik Honnef <dominik@honnef.co> <dominikh@fork-bomb.org>
|
Dominik Honnef <dominik@honnef.co> <dominikh@fork-bomb.org>
|
||||||
Doug Davis <dug@us.ibm.com> <duglin@users.noreply.github.com>
|
Doug Davis <dug@us.ibm.com> <duglin@users.noreply.github.com>
|
||||||
Doug Tangren <d.tangren@gmail.com>
|
Doug Tangren <d.tangren@gmail.com>
|
||||||
Drew Erny <derny@mirantis.com>
|
|
||||||
Drew Erny <derny@mirantis.com> <drew.erny@docker.com>
|
|
||||||
Elan Ruusamäe <glen@pld-linux.org>
|
Elan Ruusamäe <glen@pld-linux.org>
|
||||||
Elan Ruusamäe <glen@pld-linux.org> <glen@delfi.ee>
|
Elan Ruusamäe <glen@pld-linux.org> <glen@delfi.ee>
|
||||||
Elango Sivanandam <elango.siva@docker.com>
|
Elango Sivanandam <elango.siva@docker.com>
|
||||||
Elango Sivanandam <elango.siva@docker.com> <elango@docker.com>
|
|
||||||
Eli Uriegas <seemethere101@gmail.com>
|
|
||||||
Eli Uriegas <seemethere101@gmail.com> <eli.uriegas@docker.com>
|
|
||||||
Eric G. Noriega <enoriega@vizuri.com> <egnoriega@users.noreply.github.com>
|
Eric G. Noriega <enoriega@vizuri.com> <egnoriega@users.noreply.github.com>
|
||||||
Eric Hanchrow <ehanchrow@ine.com> <eric.hanchrow@gmail.com>
|
Eric Hanchrow <ehanchrow@ine.com> <eric.hanchrow@gmail.com>
|
||||||
Eric Rosenberg <ehaydenr@gmail.com> <ehaydenr@users.noreply.github.com>
|
Eric Rosenberg <ehaydenr@gmail.com> <ehaydenr@users.noreply.github.com>
|
||||||
|
@ -221,14 +154,10 @@ Felix Hupfeld <felix@quobyte.com> <quofelix@users.noreply.github.com>
|
||||||
Felix Ruess <felix.ruess@gmail.com> <felix.ruess@roboception.de>
|
Felix Ruess <felix.ruess@gmail.com> <felix.ruess@roboception.de>
|
||||||
Feng Yan <fy2462@gmail.com>
|
Feng Yan <fy2462@gmail.com>
|
||||||
Fengtu Wang <wangfengtu@huawei.com> <wangfengtu@huawei.com>
|
Fengtu Wang <wangfengtu@huawei.com> <wangfengtu@huawei.com>
|
||||||
Filipe Pina <hzlu1ot0@duck.com>
|
|
||||||
Filipe Pina <hzlu1ot0@duck.com> <636320+fopina@users.noreply.github.com>
|
|
||||||
Francisco Carriedo <fcarriedo@gmail.com>
|
Francisco Carriedo <fcarriedo@gmail.com>
|
||||||
Frank Rosquin <frank.rosquin+github@gmail.com> <frank.rosquin@gmail.com>
|
Frank Rosquin <frank.rosquin+github@gmail.com> <frank.rosquin@gmail.com>
|
||||||
Frank Yang <yyb196@gmail.com>
|
|
||||||
Frederick F. Kautz IV <fkautz@redhat.com> <fkautz@alumni.cmu.edu>
|
Frederick F. Kautz IV <fkautz@redhat.com> <fkautz@alumni.cmu.edu>
|
||||||
Fu JinLin <withlin@yeah.net>
|
Fu JinLin <withlin@yeah.net>
|
||||||
Gabriel Goller <gabrielgoller123@gmail.com>
|
|
||||||
Gabriel Nicolas Avellaneda <avellaneda.gabriel@gmail.com>
|
Gabriel Nicolas Avellaneda <avellaneda.gabriel@gmail.com>
|
||||||
Gaetan de Villele <gdevillele@gmail.com>
|
Gaetan de Villele <gdevillele@gmail.com>
|
||||||
Gang Qiao <qiaohai8866@gmail.com> <1373319223@qq.com>
|
Gang Qiao <qiaohai8866@gmail.com> <1373319223@qq.com>
|
||||||
|
@ -239,71 +168,43 @@ Giampaolo Mancini <giampaolo@trampolineup.com>
|
||||||
Giovan Isa Musthofa <giovanism@outlook.co.id>
|
Giovan Isa Musthofa <giovanism@outlook.co.id>
|
||||||
Gopikannan Venugopalsamy <gopikannan.venugopalsamy@gmail.com>
|
Gopikannan Venugopalsamy <gopikannan.venugopalsamy@gmail.com>
|
||||||
Gou Rao <gou@portworx.com> <gourao@users.noreply.github.com>
|
Gou Rao <gou@portworx.com> <gourao@users.noreply.github.com>
|
||||||
Grant Millar <rid@cylo.io>
|
|
||||||
Grant Millar <rid@cylo.io> <grant@cylo.io>
|
|
||||||
Grant Millar <rid@cylo.io> <grant@seednet.eu>
|
|
||||||
Greg Stephens <greg@udon.org>
|
Greg Stephens <greg@udon.org>
|
||||||
Guillaume J. Charmes <guillaume.charmes@docker.com> <charmes.guillaume@gmail.com>
|
Guillaume J. Charmes <guillaume.charmes@docker.com> <charmes.guillaume@gmail.com>
|
||||||
Guillaume J. Charmes <guillaume.charmes@docker.com> <guillaume.charmes@dotcloud.com>
|
Guillaume J. Charmes <guillaume.charmes@docker.com> <guillaume.charmes@dotcloud.com>
|
||||||
Guillaume J. Charmes <guillaume.charmes@docker.com> <guillaume@charmes.net>
|
Guillaume J. Charmes <guillaume.charmes@docker.com> <guillaume@charmes.net>
|
||||||
Guillaume J. Charmes <guillaume.charmes@docker.com> <guillaume@docker.com>
|
Guillaume J. Charmes <guillaume.charmes@docker.com> <guillaume@docker.com>
|
||||||
Guillaume J. Charmes <guillaume.charmes@docker.com> <guillaume@dotcloud.com>
|
Guillaume J. Charmes <guillaume.charmes@docker.com> <guillaume@dotcloud.com>
|
||||||
Gunadhya S. <6939749+gunadhya@users.noreply.github.com>
|
|
||||||
Guoqiang QI <guoqiang.qi1@gmail.com>
|
|
||||||
Guri <odg0318@gmail.com>
|
Guri <odg0318@gmail.com>
|
||||||
Gurjeet Singh <gurjeet@singh.im> <singh.gurjeet@gmail.com>
|
Gurjeet Singh <gurjeet@singh.im> <singh.gurjeet@gmail.com>
|
||||||
Gustav Sinder <gustav.sinder@gmail.com>
|
Gustav Sinder <gustav.sinder@gmail.com>
|
||||||
Günther Jungbluth <gunther@gameslabs.net>
|
Günther Jungbluth <gunther@gameslabs.net>
|
||||||
Hakan Özler <hakan.ozler@kodcu.com>
|
Hakan Özler <hakan.ozler@kodcu.com>
|
||||||
Hao Shu Wei <haoshuwei24@gmail.com>
|
Hao Shu Wei <haosw@cn.ibm.com>
|
||||||
Hao Shu Wei <haoshuwei24@gmail.com> <haoshuwei1989@163.com>
|
Hao Shu Wei <haosw@cn.ibm.com> <haoshuwei1989@163.com>
|
||||||
Hao Shu Wei <haoshuwei24@gmail.com> <haosw@cn.ibm.com>
|
|
||||||
Harald Albers <github@albersweb.de> <albers@users.noreply.github.com>
|
Harald Albers <github@albersweb.de> <albers@users.noreply.github.com>
|
||||||
Harald Niesche <harald@niesche.de>
|
|
||||||
Harold Cooper <hrldcpr@gmail.com>
|
Harold Cooper <hrldcpr@gmail.com>
|
||||||
Harry Zhang <harryz@hyper.sh>
|
|
||||||
Harry Zhang <harryz@hyper.sh> <harryzhang@zju.edu.cn>
|
Harry Zhang <harryz@hyper.sh> <harryzhang@zju.edu.cn>
|
||||||
Harry Zhang <harryz@hyper.sh> <resouer@163.com>
|
Harry Zhang <harryz@hyper.sh> <resouer@163.com>
|
||||||
Harry Zhang <harryz@hyper.sh> <resouer@gmail.com>
|
Harry Zhang <harryz@hyper.sh> <resouer@gmail.com>
|
||||||
|
Harry Zhang <resouer@163.com>
|
||||||
Harshal Patil <harshal.patil@in.ibm.com> <harche@users.noreply.github.com>
|
Harshal Patil <harshal.patil@in.ibm.com> <harche@users.noreply.github.com>
|
||||||
He Simei <hesimei@zju.edu.cn>
|
|
||||||
Helen Xie <chenjg@harmonycloud.cn>
|
Helen Xie <chenjg@harmonycloud.cn>
|
||||||
Hiroyuki Sasagawa <hs19870702@gmail.com>
|
Hiroyuki Sasagawa <hs19870702@gmail.com>
|
||||||
Hollie Teal <hollie@docker.com>
|
Hollie Teal <hollie@docker.com>
|
||||||
Hollie Teal <hollie@docker.com> <hollie.teal@docker.com>
|
Hollie Teal <hollie@docker.com> <hollie.teal@docker.com>
|
||||||
Hollie Teal <hollie@docker.com> <hollietealok@users.noreply.github.com>
|
Hollie Teal <hollie@docker.com> <hollietealok@users.noreply.github.com>
|
||||||
hsinko <21551195@zju.edu.cn> <hsinko@users.noreply.github.com>
|
|
||||||
Hu Keping <hukeping@huawei.com>
|
Hu Keping <hukeping@huawei.com>
|
||||||
Huajin Tong <fliterdashen@gmail.com>
|
|
||||||
Hui Kang <hkang.sunysb@gmail.com>
|
|
||||||
Hui Kang <hkang.sunysb@gmail.com> <kangh@us.ibm.com>
|
|
||||||
Huu Nguyen <huu@prismskylabs.com> <whoshuu@gmail.com>
|
Huu Nguyen <huu@prismskylabs.com> <whoshuu@gmail.com>
|
||||||
Hyeongkyu Lee <hyeongkyu.lee@navercorp.com>
|
|
||||||
Hyzhou Zhy <hyzhou.zhy@alibaba-inc.com>
|
Hyzhou Zhy <hyzhou.zhy@alibaba-inc.com>
|
||||||
Hyzhou Zhy <hyzhou.zhy@alibaba-inc.com> <1187766782@qq.com>
|
Hyzhou Zhy <hyzhou.zhy@alibaba-inc.com> <1187766782@qq.com>
|
||||||
Ian Campbell <ian.campbell@docker.com>
|
|
||||||
Ian Campbell <ian.campbell@docker.com> <ijc@docker.com>
|
|
||||||
Ilya Khlopotov <ilya.khlopotov@gmail.com>
|
Ilya Khlopotov <ilya.khlopotov@gmail.com>
|
||||||
Iskander Sharipov <quasilyte@gmail.com>
|
Iskander Sharipov <quasilyte@gmail.com>
|
||||||
Ivan Babrou <ibobrik@gmail.com>
|
|
||||||
Ivan Markin <sw@nogoegst.net> <twim@riseup.net>
|
Ivan Markin <sw@nogoegst.net> <twim@riseup.net>
|
||||||
Jack Laxson <jackjrabbit@gmail.com>
|
Jack Laxson <jackjrabbit@gmail.com>
|
||||||
Jacob Atzen <jacob@jacobatzen.dk> <jatzen@gmail.com>
|
Jacob Atzen <jacob@jacobatzen.dk> <jatzen@gmail.com>
|
||||||
Jacob Tomlinson <jacob@tom.linson.uk> <jacobtomlinson@users.noreply.github.com>
|
Jacob Tomlinson <jacob@tom.linson.uk> <jacobtomlinson@users.noreply.github.com>
|
||||||
Jaivish Kothari <janonymous.codevulture@gmail.com>
|
Jaivish Kothari <janonymous.codevulture@gmail.com>
|
||||||
Jake Moshenko <jake@devtable.com>
|
|
||||||
Jakub Drahos <jdrahos@pulsepoint.com>
|
|
||||||
Jakub Drahos <jdrahos@pulsepoint.com> <jack.drahos@gmail.com>
|
|
||||||
James Nesbitt <jnesbitt@mirantis.com>
|
|
||||||
James Nesbitt <jnesbitt@mirantis.com> <james.nesbitt@wunderkraut.com>
|
|
||||||
Jamie Hannaford <jamie@limetree.org> <jamie.hannaford@rackspace.com>
|
Jamie Hannaford <jamie@limetree.org> <jamie.hannaford@rackspace.com>
|
||||||
Jan Götte <jaseg@jaseg.net>
|
|
||||||
Jana Radhakrishnan <mrjana@docker.com>
|
|
||||||
Jana Radhakrishnan <mrjana@docker.com> <mrjana@socketplane.io>
|
|
||||||
Javier Bassi <javierbassi@gmail.com>
|
|
||||||
Javier Bassi <javierbassi@gmail.com> <CrimsonGlory@users.noreply.github.com>
|
|
||||||
Jay Lim <jay@imjching.com>
|
|
||||||
Jay Lim <jay@imjching.com> <imjching@hotmail.com>
|
|
||||||
Jean Rouge <rougej+github@gmail.com> <jer329@cornell.edu>
|
Jean Rouge <rougej+github@gmail.com> <jer329@cornell.edu>
|
||||||
Jean-Baptiste Barth <jeanbaptiste.barth@gmail.com>
|
Jean-Baptiste Barth <jeanbaptiste.barth@gmail.com>
|
||||||
Jean-Baptiste Dalido <jeanbaptiste@appgratis.com>
|
Jean-Baptiste Dalido <jeanbaptiste@appgratis.com>
|
||||||
|
@ -311,16 +212,15 @@ Jean-Tiare Le Bigot <jt@yadutaf.fr> <admin@jtlebi.fr>
|
||||||
Jeff Anderson <jeff@docker.com> <jefferya@programmerq.net>
|
Jeff Anderson <jeff@docker.com> <jefferya@programmerq.net>
|
||||||
Jeff Nickoloff <jeff.nickoloff@gmail.com> <jeff@allingeek.com>
|
Jeff Nickoloff <jeff.nickoloff@gmail.com> <jeff@allingeek.com>
|
||||||
Jeroen Franse <jeroenfranse@gmail.com>
|
Jeroen Franse <jeroenfranse@gmail.com>
|
||||||
Jessica Frazelle <jess@oxide.computer>
|
Jessica Frazelle <acidburn@microsoft.com>
|
||||||
Jessica Frazelle <jess@oxide.computer> <acidburn@docker.com>
|
Jessica Frazelle <acidburn@microsoft.com> <acidburn@docker.com>
|
||||||
Jessica Frazelle <jess@oxide.computer> <acidburn@google.com>
|
Jessica Frazelle <acidburn@microsoft.com> <acidburn@google.com>
|
||||||
Jessica Frazelle <jess@oxide.computer> <acidburn@microsoft.com>
|
Jessica Frazelle <acidburn@microsoft.com> <jess@docker.com>
|
||||||
Jessica Frazelle <jess@oxide.computer> <jess@docker.com>
|
Jessica Frazelle <acidburn@microsoft.com> <jess@mesosphere.com>
|
||||||
Jessica Frazelle <jess@oxide.computer> <jess@mesosphere.com>
|
Jessica Frazelle <acidburn@microsoft.com> <jessfraz@google.com>
|
||||||
Jessica Frazelle <jess@oxide.computer> <jessfraz@google.com>
|
Jessica Frazelle <acidburn@microsoft.com> <jfrazelle@users.noreply.github.com>
|
||||||
Jessica Frazelle <jess@oxide.computer> <jfrazelle@users.noreply.github.com>
|
Jessica Frazelle <acidburn@microsoft.com> <me@jessfraz.com>
|
||||||
Jessica Frazelle <jess@oxide.computer> <me@jessfraz.com>
|
Jessica Frazelle <acidburn@microsoft.com> <princess@docker.com>
|
||||||
Jessica Frazelle <jess@oxide.computer> <princess@docker.com>
|
|
||||||
Jian Liao <jliao@alauda.io>
|
Jian Liao <jliao@alauda.io>
|
||||||
Jiang Jinyang <jjyruby@gmail.com>
|
Jiang Jinyang <jjyruby@gmail.com>
|
||||||
Jiang Jinyang <jjyruby@gmail.com> <jiangjinyang@outlook.com>
|
Jiang Jinyang <jjyruby@gmail.com> <jiangjinyang@outlook.com>
|
||||||
|
@ -332,17 +232,15 @@ Joffrey F <joffrey@docker.com> <f.joffrey@gmail.com>
|
||||||
Joffrey F <joffrey@docker.com> <joffrey@dotcloud.com>
|
Joffrey F <joffrey@docker.com> <joffrey@dotcloud.com>
|
||||||
Johan Euphrosine <proppy@google.com> <proppy@aminche.com>
|
Johan Euphrosine <proppy@google.com> <proppy@aminche.com>
|
||||||
John Harris <john@johnharris.io>
|
John Harris <john@johnharris.io>
|
||||||
John Howard <github@lowenna.com>
|
John Howard (VM) <John.Howard@microsoft.com>
|
||||||
John Howard <github@lowenna.com> <10522484+lowenna@users.noreply.github.com>
|
John Howard (VM) <John.Howard@microsoft.com> <jhoward@microsoft.com>
|
||||||
John Howard <github@lowenna.com> <jhoward@microsoft.com>
|
John Howard (VM) <John.Howard@microsoft.com> <jhoward@ntdev.microsoft.com>
|
||||||
John Howard <github@lowenna.com> <jhoward@ntdev.microsoft.com>
|
John Howard (VM) <John.Howard@microsoft.com> <jhowardmsft@users.noreply.github.com>
|
||||||
John Howard <github@lowenna.com> <jhowardmsft@users.noreply.github.com>
|
John Howard (VM) <John.Howard@microsoft.com> <john.howard@microsoft.com>
|
||||||
John Howard <github@lowenna.com> <john.howard@microsoft.com>
|
|
||||||
John Howard <github@lowenna.com> <john@lowenna.com>
|
|
||||||
John Stephens <johnstep@docker.com> <johnstep@users.noreply.github.com>
|
John Stephens <johnstep@docker.com> <johnstep@users.noreply.github.com>
|
||||||
Jon Surrell <jon.surrell@gmail.com> <jon.surrell@automattic.com>
|
|
||||||
Jonathan Choy <jonathan.j.choy@gmail.com>
|
Jonathan Choy <jonathan.j.choy@gmail.com>
|
||||||
Jonathan Choy <jonathan.j.choy@gmail.com> <oni@tetsujinlabs.com>
|
Jonathan Choy <jonathan.j.choy@gmail.com> <oni@tetsujinlabs.com>
|
||||||
|
Jon Surrell <jon.surrell@gmail.com> <jon.surrell@automattic.com>
|
||||||
Jordan Arentsen <blissdev@gmail.com>
|
Jordan Arentsen <blissdev@gmail.com>
|
||||||
Jordan Jennings <jjn2009@gmail.com> <jjn2009@users.noreply.github.com>
|
Jordan Jennings <jjn2009@gmail.com> <jjn2009@users.noreply.github.com>
|
||||||
Jorit Kleine-Möllhoff <joppich@bricknet.de> <joppich@users.noreply.github.com>
|
Jorit Kleine-Möllhoff <joppich@bricknet.de> <joppich@users.noreply.github.com>
|
||||||
|
@ -358,12 +256,9 @@ Josh Wilson <josh.wilson@fivestars.com> <jcwilson@users.noreply.github.com>
|
||||||
Joyce Jang <mail@joycejang.com>
|
Joyce Jang <mail@joycejang.com>
|
||||||
Julien Bordellier <julienbordellier@gmail.com> <git@julienbordellier.com>
|
Julien Bordellier <julienbordellier@gmail.com> <git@julienbordellier.com>
|
||||||
Julien Bordellier <julienbordellier@gmail.com> <me@julienbordellier.com>
|
Julien Bordellier <julienbordellier@gmail.com> <me@julienbordellier.com>
|
||||||
Jun Du <dujun5@huawei.com>
|
|
||||||
Justin Cormack <justin.cormack@docker.com>
|
Justin Cormack <justin.cormack@docker.com>
|
||||||
Justin Cormack <justin.cormack@docker.com> <justin.cormack@unikernel.com>
|
Justin Cormack <justin.cormack@docker.com> <justin.cormack@unikernel.com>
|
||||||
Justin Cormack <justin.cormack@docker.com> <justin@specialbusservice.com>
|
Justin Cormack <justin.cormack@docker.com> <justin@specialbusservice.com>
|
||||||
Justin Keller <85903732+jk-vb@users.noreply.github.com>
|
|
||||||
Justin Keller <85903732+jk-vb@users.noreply.github.com> <jkeller@vb-jkeller-mbp.local>
|
|
||||||
Justin Simonelis <justin.p.simonelis@gmail.com> <justin.simonelis@PTS-JSIMON2.toronto.exclamation.com>
|
Justin Simonelis <justin.p.simonelis@gmail.com> <justin.simonelis@PTS-JSIMON2.toronto.exclamation.com>
|
||||||
Justin Terry <juterry@microsoft.com>
|
Justin Terry <juterry@microsoft.com>
|
||||||
Jérôme Petazzoni <jerome.petazzoni@docker.com> <jerome.petazzoni@dotcloud.com>
|
Jérôme Petazzoni <jerome.petazzoni@docker.com> <jerome.petazzoni@dotcloud.com>
|
||||||
|
@ -380,9 +275,6 @@ Ken Cochrane <kencochrane@gmail.com> <KenCochrane@gmail.com>
|
||||||
Ken Herner <kherner@progress.com> <chosenken@gmail.com>
|
Ken Herner <kherner@progress.com> <chosenken@gmail.com>
|
||||||
Ken Reese <krrgithub@gmail.com>
|
Ken Reese <krrgithub@gmail.com>
|
||||||
Kenfe-Mickaël Laventure <mickael.laventure@gmail.com>
|
Kenfe-Mickaël Laventure <mickael.laventure@gmail.com>
|
||||||
Kevin Alvarez <github@crazymax.dev>
|
|
||||||
Kevin Alvarez <github@crazymax.dev> <1951866+crazy-max@users.noreply.github.com>
|
|
||||||
Kevin Alvarez <github@crazymax.dev> <crazy-max@users.noreply.github.com>
|
|
||||||
Kevin Feyrer <kevin.feyrer@btinternet.com> <kevinfeyrer@users.noreply.github.com>
|
Kevin Feyrer <kevin.feyrer@btinternet.com> <kevinfeyrer@users.noreply.github.com>
|
||||||
Kevin Kern <kaiwentan@harmonycloud.cn>
|
Kevin Kern <kaiwentan@harmonycloud.cn>
|
||||||
Kevin Meredith <kevin.m.meredith@gmail.com>
|
Kevin Meredith <kevin.m.meredith@gmail.com>
|
||||||
|
@ -393,17 +285,11 @@ Konrad Kleine <konrad.wilhelm.kleine@gmail.com> <kwk@users.noreply.github.com>
|
||||||
Konstantin Gribov <grossws@gmail.com>
|
Konstantin Gribov <grossws@gmail.com>
|
||||||
Konstantin Pelykh <kpelykh@zettaset.com>
|
Konstantin Pelykh <kpelykh@zettaset.com>
|
||||||
Kotaro Yoshimatsu <kotaro.yoshimatsu@gmail.com>
|
Kotaro Yoshimatsu <kotaro.yoshimatsu@gmail.com>
|
||||||
Kunal Kushwaha <kushwaha_kunal_v7@lab.ntt.co.jp>
|
|
||||||
Kunal Kushwaha <kushwaha_kunal_v7@lab.ntt.co.jp> <btkushuwahak@KUNAL-PC.swh.swh.nttdata.co.jp>
|
|
||||||
Kunal Kushwaha <kushwaha_kunal_v7@lab.ntt.co.jp> <kunal.kushwaha@gmail.com>
|
Kunal Kushwaha <kushwaha_kunal_v7@lab.ntt.co.jp> <kunal.kushwaha@gmail.com>
|
||||||
Kyle Squizzato <ksquizz@gmail.com>
|
|
||||||
Kyle Squizzato <ksquizz@gmail.com> <kyle.squizzato@docker.com>
|
|
||||||
Lajos Papp <lajos.papp@sequenceiq.com> <lalyos@yahoo.com>
|
Lajos Papp <lajos.papp@sequenceiq.com> <lalyos@yahoo.com>
|
||||||
Lei Gong <lgong@alauda.io>
|
Lei Gong <lgong@alauda.io>
|
||||||
Lei Jitang <leijitang@huawei.com>
|
Lei Jitang <leijitang@huawei.com>
|
||||||
Lei Jitang <leijitang@huawei.com> <leijitang@gmail.com>
|
Lei Jitang <leijitang@huawei.com> <leijitang@gmail.com>
|
||||||
Lei Jitang <leijitang@huawei.com> <leijitang@outlook.com>
|
|
||||||
Leiiwang <u2takey@gmail.com>
|
|
||||||
Liang Mingqiang <mqliang.zju@gmail.com>
|
Liang Mingqiang <mqliang.zju@gmail.com>
|
||||||
Liang-Chi Hsieh <viirya@gmail.com>
|
Liang-Chi Hsieh <viirya@gmail.com>
|
||||||
Liao Qingwei <liaoqingwei@huawei.com>
|
Liao Qingwei <liaoqingwei@huawei.com>
|
||||||
|
@ -420,11 +306,8 @@ Lyn <energylyn@zju.edu.cn>
|
||||||
Lynda O'Leary <lyndaoleary29@gmail.com>
|
Lynda O'Leary <lyndaoleary29@gmail.com>
|
||||||
Lynda O'Leary <lyndaoleary29@gmail.com> <lyndaoleary@hotmail.com>
|
Lynda O'Leary <lyndaoleary29@gmail.com> <lyndaoleary@hotmail.com>
|
||||||
Ma Müller <mueller-ma@users.noreply.github.com>
|
Ma Müller <mueller-ma@users.noreply.github.com>
|
||||||
Madhan Raj Mookkandy <MadhanRaj.Mookkandy@microsoft.com>
|
|
||||||
Madhan Raj Mookkandy <MadhanRaj.Mookkandy@microsoft.com> <madhanm@corp.microsoft.com>
|
|
||||||
Madhan Raj Mookkandy <MadhanRaj.Mookkandy@microsoft.com> <madhanm@microsoft.com>
|
Madhan Raj Mookkandy <MadhanRaj.Mookkandy@microsoft.com> <madhanm@microsoft.com>
|
||||||
Madhu Venugopal <mavenugo@gmail.com> <madhu@docker.com>
|
Madhu Venugopal <madhu@socketplane.io> <madhu@docker.com>
|
||||||
Madhu Venugopal <mavenugo@gmail.com> <madhu@socketplane.io>
|
|
||||||
Mageee <fangpuyi@foxmail.com> <21521230.zju.edu.cn>
|
Mageee <fangpuyi@foxmail.com> <21521230.zju.edu.cn>
|
||||||
Mansi Nahar <mmn4185@rit.edu> <mansi.nahar@macbookpro-mansinahar.local>
|
Mansi Nahar <mmn4185@rit.edu> <mansi.nahar@macbookpro-mansinahar.local>
|
||||||
Mansi Nahar <mmn4185@rit.edu> <mansinahar@users.noreply.github.com>
|
Mansi Nahar <mmn4185@rit.edu> <mansinahar@users.noreply.github.com>
|
||||||
|
@ -437,12 +320,10 @@ Markan Patel <mpatel678@gmail.com>
|
||||||
Markus Kortlang <hyp3rdino@googlemail.com> <markus.kortlang@lhsystems.com>
|
Markus Kortlang <hyp3rdino@googlemail.com> <markus.kortlang@lhsystems.com>
|
||||||
Martin Redmond <redmond.martin@gmail.com> <martin@tinychat.com>
|
Martin Redmond <redmond.martin@gmail.com> <martin@tinychat.com>
|
||||||
Martin Redmond <redmond.martin@gmail.com> <xgithub@redmond5.com>
|
Martin Redmond <redmond.martin@gmail.com> <xgithub@redmond5.com>
|
||||||
Maru Newby <mnewby@thesprawl.net>
|
|
||||||
Mary Anthony <mary.anthony@docker.com> <mary@docker.com>
|
Mary Anthony <mary.anthony@docker.com> <mary@docker.com>
|
||||||
Mary Anthony <mary.anthony@docker.com> <moxieandmore@gmail.com>
|
Mary Anthony <mary.anthony@docker.com> <moxieandmore@gmail.com>
|
||||||
Mary Anthony <mary.anthony@docker.com> moxiegirl <mary@docker.com>
|
Mary Anthony <mary.anthony@docker.com> moxiegirl <mary@docker.com>
|
||||||
Masato Ohba <over.rye@gmail.com>
|
Masato Ohba <over.rye@gmail.com>
|
||||||
Mathieu Paturel <mathieu.paturel@gmail.com>
|
|
||||||
Matt Bentley <matt.bentley@docker.com> <mbentley@mbentley.net>
|
Matt Bentley <matt.bentley@docker.com> <mbentley@mbentley.net>
|
||||||
Matt Schurenko <matt.schurenko@gmail.com>
|
Matt Schurenko <matt.schurenko@gmail.com>
|
||||||
Matt Williams <mattyw@me.com>
|
Matt Williams <mattyw@me.com>
|
||||||
|
@ -454,48 +335,30 @@ Matthias Kühnle <git.nivoc@neverbox.com> <kuehnle@online.de>
|
||||||
Mauricio Garavaglia <mauricio@medallia.com> <mauriciogaravaglia@gmail.com>
|
Mauricio Garavaglia <mauricio@medallia.com> <mauriciogaravaglia@gmail.com>
|
||||||
Maxwell <csuhp007@gmail.com>
|
Maxwell <csuhp007@gmail.com>
|
||||||
Maxwell <csuhp007@gmail.com> <csuhqg@foxmail.com>
|
Maxwell <csuhp007@gmail.com> <csuhqg@foxmail.com>
|
||||||
Menghui Chen <menghui.chen@alibaba-inc.com>
|
Michael Crosby <michael@docker.com> <crosby.michael@gmail.com>
|
||||||
Michael Beskin <mrbeskin@gmail.com>
|
Michael Crosby <michael@docker.com> <crosbymichael@gmail.com>
|
||||||
Michael Crosby <crosbymichael@gmail.com>
|
Michael Crosby <michael@docker.com> <michael@crosbymichael.com>
|
||||||
Michael Crosby <crosbymichael@gmail.com> <crosby.michael@gmail.com>
|
Michał Gryko <github@odkurzacz.org>
|
||||||
Michael Crosby <crosbymichael@gmail.com> <michael@crosbymichael.com>
|
|
||||||
Michael Crosby <crosbymichael@gmail.com> <michael@docker.com>
|
|
||||||
Michael Crosby <crosbymichael@gmail.com> <michael@thepasture.io>
|
|
||||||
Michael Hudson-Doyle <michael.hudson@canonical.com> <michael.hudson@linaro.org>
|
Michael Hudson-Doyle <michael.hudson@canonical.com> <michael.hudson@linaro.org>
|
||||||
Michael Huettermann <michael@huettermann.net>
|
Michael Huettermann <michael@huettermann.net>
|
||||||
Michael Käufl <docker@c.michael-kaeufl.de> <michael-k@users.noreply.github.com>
|
Michael Käufl <docker@c.michael-kaeufl.de> <michael-k@users.noreply.github.com>
|
||||||
Michael Nussbaum <michael.nussbaum@getbraintree.com>
|
Michael Nussbaum <michael.nussbaum@getbraintree.com>
|
||||||
Michael Nussbaum <michael.nussbaum@getbraintree.com> <code@getbraintree.com>
|
Michael Nussbaum <michael.nussbaum@getbraintree.com> <code@getbraintree.com>
|
||||||
Michael Spetsiotis <michael_spets@hotmail.com>
|
Michael Spetsiotis <michael_spets@hotmail.com>
|
||||||
Michael Stapelberg <michael+gh@stapelberg.de>
|
|
||||||
Michael Stapelberg <michael+gh@stapelberg.de> <stapelberg@google.com>
|
|
||||||
Michal Kostrzewa <michal.kostrzewa@codilime.com>
|
|
||||||
Michal Kostrzewa <michal.kostrzewa@codilime.com> <kostrzewa.michal@o2.pl>
|
|
||||||
Michal Minář <miminar@redhat.com>
|
Michal Minář <miminar@redhat.com>
|
||||||
Michał Gryko <github@odkurzacz.org>
|
|
||||||
Michiel de Jong <michiel@unhosted.org>
|
Michiel de Jong <michiel@unhosted.org>
|
||||||
Mickaël Fortunato <morsi.morsicus@gmail.com>
|
Mickaël Fortunato <morsi.morsicus@gmail.com>
|
||||||
Miguel Angel Alvarez Cabrerizo <doncicuto@gmail.com> <30386061+doncicuto@users.noreply.github.com>
|
Miguel Angel Alvarez Cabrerizo <doncicuto@gmail.com> <30386061+doncicuto@users.noreply.github.com>
|
||||||
Miguel Angel Fernández <elmendalerenda@gmail.com>
|
Miguel Angel Fernández <elmendalerenda@gmail.com>
|
||||||
Mihai Borobocea <MihaiBorob@gmail.com> <MihaiBorobocea@gmail.com>
|
Mihai Borobocea <MihaiBorob@gmail.com> <MihaiBorobocea@gmail.com>
|
||||||
Mikael Davranche <mikael.davranche@corp.ovh.com>
|
|
||||||
Mikael Davranche <mikael.davranche@corp.ovh.com> <mikael.davranche@corp.ovh.net>
|
|
||||||
Mike Casas <mkcsas0@gmail.com> <mikecasas@users.noreply.github.com>
|
Mike Casas <mkcsas0@gmail.com> <mikecasas@users.noreply.github.com>
|
||||||
Mike Goelzer <mike.goelzer@docker.com> <mgoelzer@docker.com>
|
Mike Goelzer <mike.goelzer@docker.com> <mgoelzer@docker.com>
|
||||||
Milas Bowman <devnull@milas.dev>
|
|
||||||
Milas Bowman <devnull@milas.dev> <milasb@gmail.com>
|
|
||||||
Milas Bowman <devnull@milas.dev> <milas.bowman@docker.com>
|
|
||||||
Milind Chawre <milindchawre@gmail.com>
|
Milind Chawre <milindchawre@gmail.com>
|
||||||
Misty Stanley-Jones <misty@docker.com> <misty@apache.org>
|
Misty Stanley-Jones <misty@docker.com> <misty@apache.org>
|
||||||
Mohammad Banikazemi <MBanikazemi@gmail.com>
|
|
||||||
Mohammad Banikazemi <MBanikazemi@gmail.com> <mb@us.ibm.com>
|
|
||||||
Mohd Sadiq <mohdsadiq058@gmail.com> <mohdsadiq058@gmail.com>
|
|
||||||
Mohd Sadiq <mohdsadiq058@gmail.com> <42430865+msadiq058@users.noreply.github.com>
|
|
||||||
Mohit Soni <mosoni@ebay.com> <mohitsoni1989@gmail.com>
|
Mohit Soni <mosoni@ebay.com> <mohitsoni1989@gmail.com>
|
||||||
Moorthy RS <rsmoorthy@gmail.com> <rsmoorthy@users.noreply.github.com>
|
Moorthy RS <rsmoorthy@gmail.com> <rsmoorthy@users.noreply.github.com>
|
||||||
Moysés Borges <moysesb@gmail.com>
|
Moysés Borges <moysesb@gmail.com>
|
||||||
Moysés Borges <moysesb@gmail.com> <moyses.furtado@wplex.com.br>
|
Moysés Borges <moysesb@gmail.com> <moyses.furtado@wplex.com.br>
|
||||||
mrfly <mr.wrfly@gmail.com> <wrfly@users.noreply.github.com>
|
|
||||||
Nace Oroz <orkica@gmail.com>
|
Nace Oroz <orkica@gmail.com>
|
||||||
Natasha Jarus <linuxmercedes@gmail.com>
|
Natasha Jarus <linuxmercedes@gmail.com>
|
||||||
Nathan LeClaire <nathan.leclaire@docker.com> <nathan.leclaire@gmail.com>
|
Nathan LeClaire <nathan.leclaire@docker.com> <nathan.leclaire@gmail.com>
|
||||||
|
@ -512,8 +375,6 @@ Oh Jinkyun <tintypemolly@gmail.com> <tintypemolly@Ohui-MacBook-Pro.local>
|
||||||
Oliver Reason <oli@overrateddev.co>
|
Oliver Reason <oli@overrateddev.co>
|
||||||
Olli Janatuinen <olli.janatuinen@gmail.com>
|
Olli Janatuinen <olli.janatuinen@gmail.com>
|
||||||
Olli Janatuinen <olli.janatuinen@gmail.com> <olljanat@users.noreply.github.com>
|
Olli Janatuinen <olli.janatuinen@gmail.com> <olljanat@users.noreply.github.com>
|
||||||
Onur Filiz <onur.filiz@microsoft.com>
|
|
||||||
Onur Filiz <onur.filiz@microsoft.com> <ofiliz@users.noreply.github.com>
|
|
||||||
Ouyang Liduo <oyld0210@163.com>
|
Ouyang Liduo <oyld0210@163.com>
|
||||||
Patrick Stapleton <github@gdi2290.com>
|
Patrick Stapleton <github@gdi2290.com>
|
||||||
Paul Liljenberg <liljenberg.paul@gmail.com> <letters@paulnotcom.se>
|
Paul Liljenberg <liljenberg.paul@gmail.com> <letters@paulnotcom.se>
|
||||||
|
@ -524,63 +385,41 @@ Peter Dave Hello <hsu@peterdavehello.org> <PeterDaveHello@users.noreply.github.c
|
||||||
Peter Jaffe <pjaffe@nevo.com>
|
Peter Jaffe <pjaffe@nevo.com>
|
||||||
Peter Nagy <xificurC@gmail.com> <pnagy@gratex.com>
|
Peter Nagy <xificurC@gmail.com> <pnagy@gratex.com>
|
||||||
Peter Waller <p@pwaller.net> <peter@scraperwiki.com>
|
Peter Waller <p@pwaller.net> <peter@scraperwiki.com>
|
||||||
Phil Estes <estesp@gmail.com>
|
Phil Estes <estesp@linux.vnet.ibm.com> <estesp@gmail.com>
|
||||||
Phil Estes <estesp@gmail.com> <estesp@amazon.com>
|
|
||||||
Phil Estes <estesp@gmail.com> <estesp@linux.vnet.ibm.com>
|
|
||||||
Philip Alexander Etling <paetling@gmail.com>
|
Philip Alexander Etling <paetling@gmail.com>
|
||||||
Philipp Gillé <philipp.gille@gmail.com> <philippgille@users.noreply.github.com>
|
Philipp Gillé <philipp.gille@gmail.com> <philippgille@users.noreply.github.com>
|
||||||
Prasanna Gautam <prasannagautam@gmail.com>
|
|
||||||
Puneet Pruthi <puneet.pruthi@oracle.com>
|
|
||||||
Puneet Pruthi <puneet.pruthi@oracle.com> <puneetpruthi@gmail.com>
|
|
||||||
Qiang Huang <h.huangqiang@huawei.com>
|
Qiang Huang <h.huangqiang@huawei.com>
|
||||||
Qiang Huang <h.huangqiang@huawei.com> <qhuang@10.0.2.15>
|
Qiang Huang <h.huangqiang@huawei.com> <qhuang@10.0.2.15>
|
||||||
Qin TianHuan <tianhuan@bingotree.cn>
|
|
||||||
Ray Tsang <rayt@google.com> <saturnism@users.noreply.github.com>
|
Ray Tsang <rayt@google.com> <saturnism@users.noreply.github.com>
|
||||||
Renaud Gaubert <rgaubert@nvidia.com> <renaud.gaubert@gmail.com>
|
Renaud Gaubert <rgaubert@nvidia.com> <renaud.gaubert@gmail.com>
|
||||||
Richard Scothern <richard.scothern@gmail.com>
|
|
||||||
Robert Terhaar <rterhaar@atlanticdynamic.com> <robbyt@users.noreply.github.com>
|
Robert Terhaar <rterhaar@atlanticdynamic.com> <robbyt@users.noreply.github.com>
|
||||||
Roberto G. Hashioka <roberto.hashioka@docker.com> <roberto_hashioka@hotmail.com>
|
Roberto G. Hashioka <roberto.hashioka@docker.com> <roberto_hashioka@hotmail.com>
|
||||||
Roberto Muñoz Fernández <robertomf@gmail.com> <roberto.munoz.fernandez.contractor@bbva.com>
|
Roberto Muñoz Fernández <robertomf@gmail.com> <roberto.munoz.fernandez.contractor@bbva.com>
|
||||||
Robin Thoni <robin@rthoni.com>
|
|
||||||
Roman Dudin <katrmr@gmail.com> <decadent@users.noreply.github.com>
|
Roman Dudin <katrmr@gmail.com> <decadent@users.noreply.github.com>
|
||||||
Rong Zhang <rongzhang@alauda.io>
|
Rong Zhang <rongzhang@alauda.io>
|
||||||
Rongxiang Song <tinysong1226@gmail.com>
|
Rongxiang Song <tinysong1226@gmail.com>
|
||||||
Rony Weng <ronyweng@synology.com>
|
|
||||||
Ross Boucher <rboucher@gmail.com>
|
Ross Boucher <rboucher@gmail.com>
|
||||||
Rui Cao <ruicao@alauda.io>
|
Rui Cao <ruicao@alauda.io>
|
||||||
Runshen Zhu <runshen.zhu@gmail.com>
|
Runshen Zhu <runshen.zhu@gmail.com>
|
||||||
Ryan Stelly <ryan.stelly@live.com>
|
Ryan Stelly <ryan.stelly@live.com>
|
||||||
Ryoga Saito <contact@proelbtn.com>
|
|
||||||
Ryoga Saito <contact@proelbtn.com> <proelbtn@users.noreply.github.com>
|
|
||||||
Sainath Grandhi <sainath.grandhi@intel.com>
|
|
||||||
Sainath Grandhi <sainath.grandhi@intel.com> <saiallforums@gmail.com>
|
|
||||||
Sakeven Jiang <jc5930@sina.cn>
|
Sakeven Jiang <jc5930@sina.cn>
|
||||||
Samuel Karp <me@samuelkarp.com> <skarp@amazon.com>
|
|
||||||
Sandeep Bansal <sabansal@microsoft.com>
|
Sandeep Bansal <sabansal@microsoft.com>
|
||||||
Sandeep Bansal <sabansal@microsoft.com> <msabansal@microsoft.com>
|
Sandeep Bansal <sabansal@microsoft.com> <msabansal@microsoft.com>
|
||||||
Santhosh Manohar <santhosh@docker.com>
|
|
||||||
Sargun Dhillon <sargun@netflix.com> <sargun@sargun.me>
|
Sargun Dhillon <sargun@netflix.com> <sargun@sargun.me>
|
||||||
Satoshi Tagomori <tagomoris@gmail.com>
|
|
||||||
Sean Lee <seanlee@tw.ibm.com> <scaleoutsean@users.noreply.github.com>
|
Sean Lee <seanlee@tw.ibm.com> <scaleoutsean@users.noreply.github.com>
|
||||||
Sebastiaan van Stijn <github@gone.nl>
|
|
||||||
Sebastiaan van Stijn <github@gone.nl> <moby@example.com>
|
|
||||||
Sebastiaan van Stijn <github@gone.nl> <sebastiaan@ws-key-sebas3.dpi1.dpi>
|
Sebastiaan van Stijn <github@gone.nl> <sebastiaan@ws-key-sebas3.dpi1.dpi>
|
||||||
Sebastiaan van Stijn <github@gone.nl> <thaJeztah@users.noreply.github.com>
|
Sebastiaan van Stijn <github@gone.nl> <thaJeztah@users.noreply.github.com>
|
||||||
Sebastian Thomschke <sebthom@users.noreply.github.com>
|
|
||||||
Seongyeol Lim <seongyeol37@gmail.com>
|
|
||||||
Serhii Nakon <serhii.n@thescimus.com>
|
|
||||||
Shaun Kaasten <shaunk@gmail.com>
|
Shaun Kaasten <shaunk@gmail.com>
|
||||||
Shawn Landden <shawn@churchofgit.com> <shawnlandden@gmail.com>
|
Shawn Landden <shawn@churchofgit.com> <shawnlandden@gmail.com>
|
||||||
Shengbo Song <thomassong@tencent.com>
|
Shengbo Song <thomassong@tencent.com>
|
||||||
Shengbo Song <thomassong@tencent.com> <mymneo@163.com>
|
Shengbo Song <thomassong@tencent.com> <mymneo@163.com>
|
||||||
Shih-Yuan Lee <fourdollars@gmail.com>
|
Shih-Yuan Lee <fourdollars@gmail.com>
|
||||||
Shishir Mahajan <shishir.mahajan@redhat.com> <smahajan@redhat.com>
|
Shishir Mahajan <shishir.mahajan@redhat.com> <smahajan@redhat.com>
|
||||||
Shu-Wai Chow <shu-wai.chow@seattlechildrens.org>
|
|
||||||
Shukui Yang <yangshukui@huawei.com>
|
Shukui Yang <yangshukui@huawei.com>
|
||||||
|
Shuwei Hao <haosw@cn.ibm.com>
|
||||||
|
Shuwei Hao <haosw@cn.ibm.com> <haoshuwei24@gmail.com>
|
||||||
Sidhartha Mani <sidharthamn@gmail.com>
|
Sidhartha Mani <sidharthamn@gmail.com>
|
||||||
Sjoerd Langkemper <sjoerd-github@linuxonly.nl> <sjoerd@byte.nl>
|
Sjoerd Langkemper <sjoerd-github@linuxonly.nl> <sjoerd@byte.nl>
|
||||||
Smark Meng <smark@freecoop.net>
|
|
||||||
Smark Meng <smark@freecoop.net> <smarkm@users.noreply.github.com>
|
|
||||||
Solomon Hykes <solomon@docker.com> <s@docker.com>
|
Solomon Hykes <solomon@docker.com> <s@docker.com>
|
||||||
Solomon Hykes <solomon@docker.com> <solomon.hykes@dotcloud.com>
|
Solomon Hykes <solomon@docker.com> <solomon.hykes@dotcloud.com>
|
||||||
Solomon Hykes <solomon@docker.com> <solomon@dotcloud.com>
|
Solomon Hykes <solomon@docker.com> <solomon@dotcloud.com>
|
||||||
|
@ -594,12 +433,9 @@ Stefan Berger <stefanb@linux.vnet.ibm.com>
|
||||||
Stefan Berger <stefanb@linux.vnet.ibm.com> <stefanb@us.ibm.com>
|
Stefan Berger <stefanb@linux.vnet.ibm.com> <stefanb@us.ibm.com>
|
||||||
Stefan J. Wernli <swernli@microsoft.com> <swernli@ntdev.microsoft.com>
|
Stefan J. Wernli <swernli@microsoft.com> <swernli@ntdev.microsoft.com>
|
||||||
Stefan S. <tronicum@user.github.com>
|
Stefan S. <tronicum@user.github.com>
|
||||||
Stefan Scherer <stefan.scherer@docker.com>
|
|
||||||
Stefan Scherer <stefan.scherer@docker.com> <scherer_stefan@icloud.com>
|
|
||||||
Stephan Spindler <shutefan@gmail.com> <shutefan@users.noreply.github.com>
|
Stephan Spindler <shutefan@gmail.com> <shutefan@users.noreply.github.com>
|
||||||
Stephen Day <stevvooe@gmail.com>
|
Stephen Day <stephen.day@docker.com>
|
||||||
Stephen Day <stevvooe@gmail.com> <stephen.day@docker.com>
|
Stephen Day <stephen.day@docker.com> <stevvooe@users.noreply.github.com>
|
||||||
Stephen Day <stevvooe@gmail.com> <stevvooe@users.noreply.github.com>
|
|
||||||
Steve Desmond <steve@vtsv.ca> <stevedesmond-ca@users.noreply.github.com>
|
Steve Desmond <steve@vtsv.ca> <stevedesmond-ca@users.noreply.github.com>
|
||||||
Sun Gengze <690388648@qq.com>
|
Sun Gengze <690388648@qq.com>
|
||||||
Sun Jianbo <wonderflow.sun@gmail.com>
|
Sun Jianbo <wonderflow.sun@gmail.com>
|
||||||
|
@ -611,48 +447,28 @@ Sven Dowideit <SvenDowideit@home.org.au> <SvenDowideit@fosiki.com>
|
||||||
Sven Dowideit <SvenDowideit@home.org.au> <SvenDowideit@home.org.au>
|
Sven Dowideit <SvenDowideit@home.org.au> <SvenDowideit@home.org.au>
|
||||||
Sven Dowideit <SvenDowideit@home.org.au> <SvenDowideit@users.noreply.github.com>
|
Sven Dowideit <SvenDowideit@home.org.au> <SvenDowideit@users.noreply.github.com>
|
||||||
Sven Dowideit <SvenDowideit@home.org.au> <¨SvenDowideit@home.org.au¨>
|
Sven Dowideit <SvenDowideit@home.org.au> <¨SvenDowideit@home.org.au¨>
|
||||||
Sylvain Baubeau <lebauce@gmail.com>
|
|
||||||
Sylvain Baubeau <lebauce@gmail.com> <sbaubeau@redhat.com>
|
|
||||||
Sylvain Bellemare <sylvain@ascribe.io>
|
Sylvain Bellemare <sylvain@ascribe.io>
|
||||||
Sylvain Bellemare <sylvain@ascribe.io> <sylvain.bellemare@ezeep.com>
|
Sylvain Bellemare <sylvain@ascribe.io> <sylvain.bellemare@ezeep.com>
|
||||||
Takuto Sato <tockn.jp@gmail.com>
|
|
||||||
Tangi Colin <tangicolin@gmail.com>
|
Tangi Colin <tangicolin@gmail.com>
|
||||||
Tejesh Mehta <tejesh.mehta@gmail.com> <tj@init.me>
|
Tejesh Mehta <tejesh.mehta@gmail.com> <tj@init.me>
|
||||||
Terry Chu <zue.hterry@gmail.com>
|
|
||||||
Terry Chu <zue.hterry@gmail.com> <jubosh.tw@gmail.com>
|
|
||||||
Thatcher Peskens <thatcher@docker.com>
|
Thatcher Peskens <thatcher@docker.com>
|
||||||
Thatcher Peskens <thatcher@docker.com> <thatcher@dotcloud.com>
|
Thatcher Peskens <thatcher@docker.com> <thatcher@dotcloud.com>
|
||||||
Thatcher Peskens <thatcher@docker.com> <thatcher@gmx.net>
|
Thatcher Peskens <thatcher@docker.com> <thatcher@gmx.net>
|
||||||
Thiago Alves Silva <thiago.alves@aurea.com>
|
|
||||||
Thiago Alves Silva <thiago.alves@aurea.com> <thiagoalves@users.noreply.github.com>
|
|
||||||
Thomas Gazagnaire <thomas@gazagnaire.org> <thomas@gazagnaire.com>
|
Thomas Gazagnaire <thomas@gazagnaire.org> <thomas@gazagnaire.com>
|
||||||
Thomas Ledos <thomas.ledos92@gmail.com>
|
|
||||||
Thomas Léveil <thomasleveil@gmail.com>
|
Thomas Léveil <thomasleveil@gmail.com>
|
||||||
Thomas Léveil <thomasleveil@gmail.com> <thomasleveil@users.noreply.github.com>
|
Thomas Léveil <thomasleveil@gmail.com> <thomasleveil@users.noreply.github.com>
|
||||||
Tibor Vass <teabee89@gmail.com> <tibor@docker.com>
|
Tibor Vass <teabee89@gmail.com> <tibor@docker.com>
|
||||||
Tibor Vass <teabee89@gmail.com> <tiborvass@users.noreply.github.com>
|
Tibor Vass <teabee89@gmail.com> <tiborvass@users.noreply.github.com>
|
||||||
Till Claassen <pixelistik@users.noreply.github.com>
|
|
||||||
Tim Bart <tim@fewagainstmany.com>
|
Tim Bart <tim@fewagainstmany.com>
|
||||||
Tim Bosse <taim@bosboot.org> <maztaim@users.noreply.github.com>
|
Tim Bosse <taim@bosboot.org> <maztaim@users.noreply.github.com>
|
||||||
Tim Potter <tpot@hpe.com>
|
|
||||||
Tim Potter <tpot@hpe.com> <tpot@Tims-MacBook-Pro.local>
|
|
||||||
Tim Ruffles <oi@truffles.me.uk> <timruffles@googlemail.com>
|
Tim Ruffles <oi@truffles.me.uk> <timruffles@googlemail.com>
|
||||||
Tim Terhorst <mynamewastaken+git@gmail.com>
|
Tim Terhorst <mynamewastaken+git@gmail.com>
|
||||||
Tim Wagner <tim.wagner@freenet.ag>
|
|
||||||
Tim Wagner <tim.wagner@freenet.ag> <33624860+herrwagner@users.noreply.github.com>
|
|
||||||
Tim Zju <21651152@zju.edu.cn>
|
Tim Zju <21651152@zju.edu.cn>
|
||||||
Timothy Hobbs <timothyhobbs@seznam.cz>
|
Timothy Hobbs <timothyhobbs@seznam.cz>
|
||||||
Toli Kuznets <toli@docker.com>
|
Toli Kuznets <toli@docker.com>
|
||||||
Tom Barlow <tomwbarlow@gmail.com>
|
Tom Barlow <tomwbarlow@gmail.com>
|
||||||
Tom Denham <tom@tomdee.co.uk>
|
|
||||||
Tom Denham <tom@tomdee.co.uk> <tom.denham@metaswitch.com>
|
|
||||||
Tom Sweeney <tsweeney@redhat.com>
|
Tom Sweeney <tsweeney@redhat.com>
|
||||||
Tom Wilkie <tom.wilkie@gmail.com>
|
|
||||||
Tom Wilkie <tom.wilkie@gmail.com> <tom@weave.works>
|
|
||||||
Tõnis Tiigi <tonistiigi@gmail.com>
|
Tõnis Tiigi <tonistiigi@gmail.com>
|
||||||
Trace Andreason <tandreason@gmail.com>
|
|
||||||
Trapier Marshall <tmarshall@mirantis.com>
|
|
||||||
Trapier Marshall <tmarshall@mirantis.com> <trapier.marshall@docker.com>
|
|
||||||
Trishna Guha <trishnaguha17@gmail.com>
|
Trishna Guha <trishnaguha17@gmail.com>
|
||||||
Tristan Carel <tristan@cogniteev.com>
|
Tristan Carel <tristan@cogniteev.com>
|
||||||
Tristan Carel <tristan@cogniteev.com> <tristan.carel@gmail.com>
|
Tristan Carel <tristan@cogniteev.com> <tristan.carel@gmail.com>
|
||||||
|
@ -666,25 +482,15 @@ Victor Vieux <victor.vieux@docker.com> <victor@docker.com>
|
||||||
Victor Vieux <victor.vieux@docker.com> <victor@dotcloud.com>
|
Victor Vieux <victor.vieux@docker.com> <victor@dotcloud.com>
|
||||||
Victor Vieux <victor.vieux@docker.com> <victorvieux@gmail.com>
|
Victor Vieux <victor.vieux@docker.com> <victorvieux@gmail.com>
|
||||||
Victor Vieux <victor.vieux@docker.com> <vieux@docker.com>
|
Victor Vieux <victor.vieux@docker.com> <vieux@docker.com>
|
||||||
Vikas Choudhary <choudharyvikas16@gmail.com>
|
|
||||||
Vikram bir Singh <vsingh@mirantis.com>
|
|
||||||
Vikram bir Singh <vsingh@mirantis.com> <vikrambir.singh@docker.com>
|
|
||||||
Viktor Vojnovski <viktor.vojnovski@amadeus.com> <vojnovski@gmail.com>
|
Viktor Vojnovski <viktor.vojnovski@amadeus.com> <vojnovski@gmail.com>
|
||||||
Vincent Batts <vbatts@redhat.com> <vbatts@hashbangbash.com>
|
Vincent Batts <vbatts@redhat.com> <vbatts@hashbangbash.com>
|
||||||
Vincent Bernat <vincent@bernat.ch>
|
Vincent Bernat <Vincent.Bernat@exoscale.ch> <bernat@luffy.cx>
|
||||||
Vincent Bernat <vincent@bernat.ch> <bernat@luffy.cx>
|
Vincent Bernat <Vincent.Bernat@exoscale.ch> <vincent@bernat.im>
|
||||||
Vincent Bernat <vincent@bernat.ch> <Vincent.Bernat@exoscale.ch>
|
|
||||||
Vincent Bernat <vincent@bernat.ch> <vincent@bernat.im>
|
|
||||||
Vincent Boulineau <vincent.boulineau@datadoghq.com>
|
|
||||||
Vincent Demeester <vincent.demeester@docker.com> <vincent+github@demeester.fr>
|
Vincent Demeester <vincent.demeester@docker.com> <vincent+github@demeester.fr>
|
||||||
Vincent Demeester <vincent.demeester@docker.com> <vincent@demeester.fr>
|
Vincent Demeester <vincent.demeester@docker.com> <vincent@demeester.fr>
|
||||||
Vincent Demeester <vincent.demeester@docker.com> <vincent@sbr.pm>
|
Vincent Demeester <vincent.demeester@docker.com> <vincent@sbr.pm>
|
||||||
Vishnu Kannan <vishnuk@google.com>
|
Vishnu Kannan <vishnuk@google.com>
|
||||||
Vitaly Ostrosablin <vostrosablin@virtuozzo.com>
|
|
||||||
Vitaly Ostrosablin <vostrosablin@virtuozzo.com> <tmp6154@yandex.ru>
|
|
||||||
Vladimir Rutsky <altsysrq@gmail.com> <iamironbob@gmail.com>
|
Vladimir Rutsky <altsysrq@gmail.com> <iamironbob@gmail.com>
|
||||||
Vladislav Kolesnikov <vkolesnikov@beget.ru>
|
|
||||||
Vladislav Kolesnikov <vkolesnikov@beget.ru> <prime@vladqa.ru>
|
|
||||||
Walter Stanish <walter@pratyeka.org>
|
Walter Stanish <walter@pratyeka.org>
|
||||||
Wang Chao <chao.wang@ucloud.cn>
|
Wang Chao <chao.wang@ucloud.cn>
|
||||||
Wang Chao <chao.wang@ucloud.cn> <wcwxyz@gmail.com>
|
Wang Chao <chao.wang@ucloud.cn> <wcwxyz@gmail.com>
|
||||||
|
@ -696,27 +502,16 @@ Wang Yuexiao <wang.yuexiao@zte.com.cn>
|
||||||
Wayne Chang <wayne@neverfear.org>
|
Wayne Chang <wayne@neverfear.org>
|
||||||
Wayne Song <wsong@docker.com> <wsong@users.noreply.github.com>
|
Wayne Song <wsong@docker.com> <wsong@users.noreply.github.com>
|
||||||
Wei Wu <wuwei4455@gmail.com> cizixs <cizixs@163.com>
|
Wei Wu <wuwei4455@gmail.com> cizixs <cizixs@163.com>
|
||||||
Wei-Ting Kuo <waitingkuo0527@gmail.com>
|
|
||||||
Wen Cheng Ma <wenchma@cn.ibm.com>
|
|
||||||
Wenjun Tang <tangwj2@lenovo.com> <dodia@163.com>
|
Wenjun Tang <tangwj2@lenovo.com> <dodia@163.com>
|
||||||
Wewang Xiaorenfine <wang.xiaoren@zte.com.cn>
|
Wewang Xiaorenfine <wang.xiaoren@zte.com.cn>
|
||||||
Will Weaver <monkey@buildingbananas.com>
|
Will Weaver <monkey@buildingbananas.com>
|
||||||
Wing-Kam Wong <wingkwong.code@gmail.com>
|
|
||||||
WuLonghui <wlh6666@qq.com>
|
|
||||||
Xian Chaobo <xianchaobo@huawei.com>
|
Xian Chaobo <xianchaobo@huawei.com>
|
||||||
Xian Chaobo <xianchaobo@huawei.com> <jimmyxian2004@yahoo.com.cn>
|
Xian Chaobo <xianchaobo@huawei.com> <jimmyxian2004@yahoo.com.cn>
|
||||||
Xianglin Gao <xlgao@zju.edu.cn>
|
Xianglin Gao <xlgao@zju.edu.cn>
|
||||||
Xianjie <guxianjie@gmail.com>
|
|
||||||
Xianjie <guxianjie@gmail.com> <datastream@datastream-laptop.local>
|
|
||||||
Xianlu Bird <xianlubird@gmail.com>
|
Xianlu Bird <xianlubird@gmail.com>
|
||||||
Xiao YongBiao <xyb4638@gmail.com>
|
Xiao YongBiao <xyb4638@gmail.com>
|
||||||
Xiao Zhang <xiaozhang0210@hotmail.com>
|
|
||||||
Xiaodong Liu <liuxiaodong@loongson.cn>
|
|
||||||
Xiaodong Zhang <a4012017@sina.com>
|
Xiaodong Zhang <a4012017@sina.com>
|
||||||
Xiaohua Ding <xiao_hua_ding@sina.cn>
|
|
||||||
Xiaoyu Zhang <zhang.xiaoyu33@zte.com.cn>
|
Xiaoyu Zhang <zhang.xiaoyu33@zte.com.cn>
|
||||||
Xinfeng Liu <XinfengLiu@icloud.com>
|
|
||||||
Xinfeng Liu <XinfengLiu@icloud.com> <xinfeng.liu@gmail.com>
|
|
||||||
Xuecong Liao <satorulogic@gmail.com>
|
Xuecong Liao <satorulogic@gmail.com>
|
||||||
Yamasaki Masahide <masahide.y@gmail.com>
|
Yamasaki Masahide <masahide.y@gmail.com>
|
||||||
Yao Zaiyong <yaozaiyong@hotmail.com>
|
Yao Zaiyong <yaozaiyong@hotmail.com>
|
||||||
|
@ -733,23 +528,12 @@ Yu Changchun <yuchangchun1@huawei.com>
|
||||||
Yu Chengxia <yuchengxia@huawei.com>
|
Yu Chengxia <yuchengxia@huawei.com>
|
||||||
Yu Peng <yu.peng36@zte.com.cn>
|
Yu Peng <yu.peng36@zte.com.cn>
|
||||||
Yu Peng <yu.peng36@zte.com.cn> <yupeng36@zte.com.cn>
|
Yu Peng <yu.peng36@zte.com.cn> <yupeng36@zte.com.cn>
|
||||||
Yuan Sun <sunyuan3@huawei.com>
|
|
||||||
Yue Zhang <zy675793960@yeah.net>
|
Yue Zhang <zy675793960@yeah.net>
|
||||||
Yufei Xiong <yufei.xiong@qq.com>
|
|
||||||
Zach Gershman <zachgersh@gmail.com>
|
|
||||||
Zach Gershman <zachgersh@gmail.com> <zachgersh@users.noreply.github.com>
|
|
||||||
Zachary Jaffee <zjaffee@us.ibm.com> <zij@case.edu>
|
Zachary Jaffee <zjaffee@us.ibm.com> <zij@case.edu>
|
||||||
Zachary Jaffee <zjaffee@us.ibm.com> <zjaffee@apache.org>
|
Zachary Jaffee <zjaffee@us.ibm.com> <zjaffee@apache.org>
|
||||||
Zhang Kun <zkazure@gmail.com>
|
|
||||||
Zhang Wentao <zhangwentao234@huawei.com>
|
|
||||||
ZhangHang <stevezhang2014@gmail.com>
|
ZhangHang <stevezhang2014@gmail.com>
|
||||||
Zhenkun Bi <bi.zhenkun@zte.com.cn>
|
Zhenkun Bi <bi.zhenkun@zte.com.cn>
|
||||||
Zhou Hao <zhouhao@cn.fujitsu.com>
|
|
||||||
Zhoulin Xie <zhoulin.xie@daocloud.io>
|
Zhoulin Xie <zhoulin.xie@daocloud.io>
|
||||||
|
Zhou Hao <zhouhao@cn.fujitsu.com>
|
||||||
Zhu Kunjia <zhu.kunjia@zte.com.cn>
|
Zhu Kunjia <zhu.kunjia@zte.com.cn>
|
||||||
Ziheng Liu <lzhfromustc@gmail.com>
|
|
||||||
Zou Yu <zouyu7@huawei.com>
|
Zou Yu <zouyu7@huawei.com>
|
||||||
Zuhayr Elahi <zuhayr.elahi@docker.com>
|
|
||||||
Zuhayr Elahi <zuhayr.elahi@docker.com> <elahi.zuhayr@gmail.com>
|
|
||||||
정재영 <jjy600901@gmail.com>
|
|
||||||
정재영 <jjy600901@gmail.com> <43400316+J-jaeyoung@users.noreply.github.com>
|
|
||||||
|
|
3609
CHANGELOG.md
Normal file
3609
CHANGELOG.md
Normal file
File diff suppressed because it is too large
Load diff
|
@ -27,10 +27,10 @@ issue, please bring it to their attention right away!
|
||||||
Please **DO NOT** file a public issue, instead send your report privately to
|
Please **DO NOT** file a public issue, instead send your report privately to
|
||||||
[security@docker.com](mailto:security@docker.com).
|
[security@docker.com](mailto:security@docker.com).
|
||||||
|
|
||||||
Security reports are greatly appreciated and we will publicly thank you for it,
|
Security reports are greatly appreciated and we will publicly thank you for it.
|
||||||
although we keep your name confidential if you request it. We also like to send
|
We also like to send gifts—if you're into schwag, make sure to let
|
||||||
gifts—if you're into schwag, make sure to let us know. We currently do not
|
us know. We currently do not offer a paid security bounty program, but are not
|
||||||
offer a paid security bounty program, but are not ruling it out in the future.
|
ruling it out in the future.
|
||||||
|
|
||||||
|
|
||||||
## Reporting other issues
|
## Reporting other issues
|
||||||
|
@ -72,7 +72,7 @@ anybody starts working on it.
|
||||||
We are always thrilled to receive pull requests. We do our best to process them
|
We are always thrilled to receive pull requests. We do our best to process them
|
||||||
quickly. If your pull request is not accepted on the first try,
|
quickly. If your pull request is not accepted on the first try,
|
||||||
don't get discouraged! Our contributor's guide explains [the review process we
|
don't get discouraged! Our contributor's guide explains [the review process we
|
||||||
use for simple changes](https://docs.docker.com/contribute/overview/).
|
use for simple changes](https://docs.docker.com/opensource/workflow/make-a-contribution/).
|
||||||
|
|
||||||
### Design and cleanup proposals
|
### Design and cleanup proposals
|
||||||
|
|
||||||
|
@ -101,8 +101,9 @@ the contributors guide.
|
||||||
<td>
|
<td>
|
||||||
<p>
|
<p>
|
||||||
Register for the Docker Community Slack at
|
Register for the Docker Community Slack at
|
||||||
<a href="https://dockr.ly/comm-slack" target="_blank">https://dockr.ly/comm-slack</a>.
|
<a href="https://community.docker.com/registrations/groups/4316" target="_blank">https://community.docker.com/registrations/groups/4316</a>.
|
||||||
We use the #moby-project channel for general discussion, and there are separate channels for other Moby projects such as #containerd.
|
We use the #moby-project channel for general discussion, and there are separate channels for other Moby projects such as #containerd.
|
||||||
|
Archives are available at <a href="https://dockercommunity.slackarchive.io/" target="_blank">https://dockercommunity.slackarchive.io/</a>.
|
||||||
</p>
|
</p>
|
||||||
</td>
|
</td>
|
||||||
</tr>
|
</tr>
|
||||||
|
@ -309,6 +310,36 @@ Don't forget: being a maintainer is a time investment. Make sure you
|
||||||
will have time to make yourself available. You don't have to be a
|
will have time to make yourself available. You don't have to be a
|
||||||
maintainer to make a difference on the project!
|
maintainer to make a difference on the project!
|
||||||
|
|
||||||
|
### Manage issues and pull requests using the Derek bot
|
||||||
|
|
||||||
|
If you want to help label, assign, close or reopen issues or pull requests
|
||||||
|
without commit rights, ask a maintainer to add your Github handle to the
|
||||||
|
`.DEREK.yml` file. [Derek](https://github.com/alexellis/derek) is a bot that extends
|
||||||
|
Github's user permissions to help non-committers to manage issues and pull requests simply by commenting.
|
||||||
|
|
||||||
|
For example:
|
||||||
|
|
||||||
|
* Labels
|
||||||
|
|
||||||
|
```
|
||||||
|
Derek add label: kind/question
|
||||||
|
Derek remove label: status/claimed
|
||||||
|
```
|
||||||
|
|
||||||
|
* Assign work
|
||||||
|
|
||||||
|
```
|
||||||
|
Derek assign: username
|
||||||
|
Derek unassign: me
|
||||||
|
```
|
||||||
|
|
||||||
|
* Manage issues and PRs
|
||||||
|
|
||||||
|
```
|
||||||
|
Derek close
|
||||||
|
Derek reopen
|
||||||
|
```
|
||||||
|
|
||||||
## Moby community guidelines
|
## Moby community guidelines
|
||||||
|
|
||||||
We want to keep the Moby community awesome, growing and collaborative. We need
|
We want to keep the Moby community awesome, growing and collaborative. We need
|
||||||
|
@ -422,6 +453,6 @@ The rules:
|
||||||
guidelines. Since you've read all the rules, you now know that.
|
guidelines. Since you've read all the rules, you now know that.
|
||||||
|
|
||||||
If you are having trouble getting into the mood of idiomatic Go, we recommend
|
If you are having trouble getting into the mood of idiomatic Go, we recommend
|
||||||
reading through [Effective Go](https://go.dev/doc/effective_go). The
|
reading through [Effective Go](https://golang.org/doc/effective_go.html). The
|
||||||
[Go Blog](https://go.dev/blog/) is also a great resource. Drinking the
|
[Go Blog](https://blog.golang.org) is also a great resource. Drinking the
|
||||||
kool-aid is a lot easier than going thirsty.
|
kool-aid is a lot easier than going thirsty.
|
||||||
|
|
853
Dockerfile
853
Dockerfile
|
@ -1,671 +1,316 @@
|
||||||
# syntax=docker/dockerfile:1.7
|
# This file describes the standard way to build Docker, using docker
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
#
|
||||||
|
# # Use make to build a development environment image and run it in a container.
|
||||||
|
# # This is slow the first time.
|
||||||
|
# make BIND_DIR=. shell
|
||||||
|
#
|
||||||
|
# The following commands are executed inside the running container.
|
||||||
|
|
||||||
ARG GO_VERSION=1.21.9
|
# # Make a dockerd binary.
|
||||||
ARG BASE_DEBIAN_DISTRO="bookworm"
|
# # hack/make.sh binary
|
||||||
ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}"
|
#
|
||||||
ARG XX_VERSION=1.4.0
|
# # Install dockerd to /usr/local/bin
|
||||||
|
# # make install
|
||||||
|
#
|
||||||
|
# # Run unit tests
|
||||||
|
# # hack/test/unit
|
||||||
|
#
|
||||||
|
# # Run tests e.g. integration, py
|
||||||
|
# # hack/make.sh binary test-integration test-docker-py
|
||||||
|
#
|
||||||
|
# Note: AppArmor used to mess with privileged mode, but this is no longer
|
||||||
|
# the case. Therefore, you don't have to disable it anymore.
|
||||||
|
#
|
||||||
|
|
||||||
ARG VPNKIT_VERSION=0.5.0
|
ARG CROSS="false"
|
||||||
|
# IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored
|
||||||
|
ARG GO_VERSION=1.13.15
|
||||||
|
ARG DEBIAN_FRONTEND=noninteractive
|
||||||
|
ARG VPNKIT_DIGEST=e508a17cfacc8fd39261d5b4e397df2b953690da577e2c987a47630cd0c42f8e
|
||||||
|
|
||||||
ARG DOCKERCLI_REPOSITORY="https://github.com/docker/cli.git"
|
FROM golang:${GO_VERSION}-buster AS base
|
||||||
ARG DOCKERCLI_VERSION=v26.0.0
|
ARG APT_MIRROR
|
||||||
# cli version used for integration-cli tests
|
RUN sed -ri "s/(httpredir|deb).debian.org/${APT_MIRROR:-deb.debian.org}/g" /etc/apt/sources.list \
|
||||||
ARG DOCKERCLI_INTEGRATION_REPOSITORY="https://github.com/docker/cli.git"
|
&& sed -ri "s/(security).debian.org/${APT_MIRROR:-security.debian.org}/g" /etc/apt/sources.list
|
||||||
ARG DOCKERCLI_INTEGRATION_VERSION=v17.06.2-ce
|
|
||||||
ARG BUILDX_VERSION=0.13.1
|
|
||||||
ARG COMPOSE_VERSION=v2.25.0
|
|
||||||
|
|
||||||
ARG SYSTEMD="false"
|
|
||||||
ARG DOCKER_STATIC=1
|
|
||||||
|
|
||||||
# REGISTRY_VERSION specifies the version of the registry to download from
|
|
||||||
# https://hub.docker.com/r/distribution/distribution. This version of
|
|
||||||
# the registry is used to test schema 2 manifests. Generally, the version
|
|
||||||
# specified here should match a current release.
|
|
||||||
ARG REGISTRY_VERSION=2.8.3
|
|
||||||
|
|
||||||
# delve is currently only supported on linux/amd64 and linux/arm64;
|
|
||||||
# https://github.com/go-delve/delve/blob/v1.8.1/pkg/proc/native/support_sentinel.go#L1-L6
|
|
||||||
ARG DELVE_SUPPORTED=${TARGETPLATFORM#linux/amd64} DELVE_SUPPORTED=${DELVE_SUPPORTED#linux/arm64}
|
|
||||||
ARG DELVE_SUPPORTED=${DELVE_SUPPORTED:+"unsupported"}
|
|
||||||
ARG DELVE_SUPPORTED=${DELVE_SUPPORTED:-"supported"}
|
|
||||||
|
|
||||||
# cross compilation helper
|
|
||||||
FROM --platform=$BUILDPLATFORM tonistiigi/xx:${XX_VERSION} AS xx
|
|
||||||
|
|
||||||
# dummy stage to make sure the image is built for deps that don't support some
|
|
||||||
# architectures
|
|
||||||
FROM --platform=$BUILDPLATFORM busybox AS build-dummy
|
|
||||||
RUN mkdir -p /build
|
|
||||||
FROM scratch AS binary-dummy
|
|
||||||
COPY --from=build-dummy /build /build
|
|
||||||
|
|
||||||
# base
|
|
||||||
FROM --platform=$BUILDPLATFORM ${GOLANG_IMAGE} AS base
|
|
||||||
COPY --from=xx / /
|
|
||||||
RUN echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache
|
|
||||||
RUN apt-get update && apt-get install --no-install-recommends -y file
|
|
||||||
ENV GO111MODULE=off
|
ENV GO111MODULE=off
|
||||||
ENV GOTOOLCHAIN=local
|
|
||||||
|
|
||||||
FROM base AS criu
|
FROM base AS criu
|
||||||
ADD --chmod=0644 https://download.opensuse.org/repositories/devel:/tools:/criu/Debian_11/Release.key /etc/apt/trusted.gpg.d/criu.gpg.asc
|
ARG DEBIAN_FRONTEND
|
||||||
RUN --mount=type=cache,sharing=locked,id=moby-criu-aptlib,target=/var/lib/apt \
|
# Install dependency packages specific to criu
|
||||||
--mount=type=cache,sharing=locked,id=moby-criu-aptcache,target=/var/cache/apt \
|
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||||
echo 'deb https://download.opensuse.org/repositories/devel:/tools:/criu/Debian_12/ /' > /etc/apt/sources.list.d/criu.list \
|
libcap-dev \
|
||||||
&& apt-get update \
|
libnet-dev \
|
||||||
&& apt-get install -y --no-install-recommends criu \
|
libnl-3-dev \
|
||||||
&& install -D /usr/sbin/criu /build/criu \
|
libprotobuf-c-dev \
|
||||||
&& /build/criu --version
|
libprotobuf-dev \
|
||||||
|
protobuf-c-compiler \
|
||||||
|
protobuf-compiler \
|
||||||
|
python-protobuf \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
# registry
|
# Install CRIU for checkpoint/restore support
|
||||||
FROM base AS registry-src
|
ARG CRIU_VERSION=3.14
|
||||||
WORKDIR /usr/src/registry
|
RUN mkdir -p /usr/src/criu \
|
||||||
RUN git init . && git remote add origin "https://github.com/distribution/distribution.git"
|
&& curl -sSL https://github.com/checkpoint-restore/criu/archive/v${CRIU_VERSION}.tar.gz | tar -C /usr/src/criu/ -xz --strip-components=1 \
|
||||||
|
&& cd /usr/src/criu \
|
||||||
|
&& make \
|
||||||
|
&& make PREFIX=/build/ install-criu
|
||||||
|
|
||||||
FROM base AS registry
|
FROM base AS registry
|
||||||
WORKDIR /go/src/github.com/docker/distribution
|
# Install two versions of the registry. The first is an older version that
|
||||||
|
# only supports schema1 manifests. The second is a newer version that supports
|
||||||
# REGISTRY_VERSION_SCHEMA1 specifies the version of the registry to build and
|
# both. This allows integration-cli tests to cover push/pull with both schema1
|
||||||
# install from the https://github.com/docker/distribution repository. This is
|
# and schema2 manifests.
|
||||||
# an older (pre v2.3.0) version of the registry that only supports schema1
|
ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd
|
||||||
# manifests. This version of the registry is not working on arm64, so installation
|
ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827
|
||||||
# is skipped on that architecture.
|
RUN set -x \
|
||||||
ARG REGISTRY_VERSION_SCHEMA1=v2.1.0
|
&& export GOPATH="$(mktemp -d)" \
|
||||||
ARG TARGETPLATFORM
|
&& git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \
|
||||||
RUN --mount=from=registry-src,src=/usr/src/registry,rw \
|
&& (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \
|
||||||
--mount=type=cache,target=/root/.cache/go-build,id=registry-build-$TARGETPLATFORM \
|
&& GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \
|
||||||
--mount=type=cache,target=/go/pkg/mod \
|
go build -buildmode=pie -o /build/registry-v2 github.com/docker/distribution/cmd/registry \
|
||||||
--mount=type=tmpfs,target=/go/src <<EOT
|
&& case $(dpkg --print-architecture) in \
|
||||||
set -ex
|
amd64|ppc64*|s390x) \
|
||||||
export GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH"
|
(cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1"); \
|
||||||
# Make the /build directory no matter what so that it doesn't fail on arm64 or
|
GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH"; \
|
||||||
# any other platform where we don't build this registry
|
go build -buildmode=pie -o /build/registry-v2-schema1 github.com/docker/distribution/cmd/registry; \
|
||||||
mkdir /build
|
;; \
|
||||||
case $TARGETPLATFORM in
|
esac \
|
||||||
linux/amd64|linux/arm/v7|linux/ppc64le|linux/s390x)
|
&& rm -rf "$GOPATH"
|
||||||
git fetch -q --depth 1 origin "${REGISTRY_VERSION_SCHEMA1}" +refs/tags/*:refs/tags/*
|
|
||||||
git checkout -q FETCH_HEAD
|
|
||||||
CGO_ENABLED=0 xx-go build -o /build/registry-v2-schema1 -v ./cmd/registry
|
|
||||||
xx-verify /build/registry-v2-schema1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
EOT
|
|
||||||
|
|
||||||
FROM distribution/distribution:$REGISTRY_VERSION AS registry-v2
|
|
||||||
RUN mkdir /build && mv /bin/registry /build/registry-v2
|
|
||||||
|
|
||||||
# go-swagger
|
|
||||||
FROM base AS swagger-src
|
|
||||||
WORKDIR /usr/src/swagger
|
|
||||||
# Currently uses a fork from https://github.com/kolyshkin/go-swagger/tree/golang-1.13-fix
|
|
||||||
# TODO: move to under moby/ or fix upstream go-swagger to work for us.
|
|
||||||
RUN git init . && git remote add origin "https://github.com/kolyshkin/go-swagger.git"
|
|
||||||
# GO_SWAGGER_COMMIT specifies the version of the go-swagger binary to build and
|
|
||||||
# install. Go-swagger is used in CI for validating swagger.yaml in hack/validate/swagger-gen
|
|
||||||
ARG GO_SWAGGER_COMMIT=c56166c036004ba7a3a321e5951ba472b9ae298c
|
|
||||||
RUN git fetch -q --depth 1 origin "${GO_SWAGGER_COMMIT}" && git checkout -q FETCH_HEAD
|
|
||||||
|
|
||||||
FROM base AS swagger
|
FROM base AS swagger
|
||||||
WORKDIR /go/src/github.com/go-swagger/go-swagger
|
# Install go-swagger for validating swagger.yaml
|
||||||
ARG TARGETPLATFORM
|
# This is https://github.com/kolyshkin/go-swagger/tree/golang-1.13-fix
|
||||||
RUN --mount=from=swagger-src,src=/usr/src/swagger,rw \
|
# TODO: move to under moby/ or fix upstream go-swagger to work for us.
|
||||||
--mount=type=cache,target=/root/.cache/go-build,id=swagger-build-$TARGETPLATFORM \
|
ENV GO_SWAGGER_COMMIT 5793aa66d4b4112c2602c716516e24710e4adbb5
|
||||||
--mount=type=cache,target=/go/pkg/mod \
|
RUN set -x \
|
||||||
--mount=type=tmpfs,target=/go/src/ <<EOT
|
&& export GOPATH="$(mktemp -d)" \
|
||||||
set -e
|
&& git clone https://github.com/kolyshkin/go-swagger.git "$GOPATH/src/github.com/go-swagger/go-swagger" \
|
||||||
xx-go build -o /build/swagger ./cmd/swagger
|
&& (cd "$GOPATH/src/github.com/go-swagger/go-swagger" && git checkout -q "$GO_SWAGGER_COMMIT") \
|
||||||
xx-verify /build/swagger
|
&& go build -o /build/swagger github.com/go-swagger/go-swagger/cmd/swagger \
|
||||||
EOT
|
&& rm -rf "$GOPATH"
|
||||||
|
|
||||||
# frozen-images
|
FROM base AS frozen-images
|
||||||
# See also frozenImages in "testutil/environment/protect.go" (which needs to
|
ARG DEBIAN_FRONTEND
|
||||||
# be updated when adding images to this list)
|
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||||
FROM debian:${BASE_DEBIAN_DISTRO} AS frozen-images
|
|
||||||
RUN --mount=type=cache,sharing=locked,id=moby-frozen-images-aptlib,target=/var/lib/apt \
|
|
||||||
--mount=type=cache,sharing=locked,id=moby-frozen-images-aptcache,target=/var/cache/apt \
|
|
||||||
apt-get update && apt-get install -y --no-install-recommends \
|
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
curl \
|
jq \
|
||||||
jq
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
# Get useful and necessary Hub images so we can "docker load" locally instead of pulling
|
# Get useful and necessary Hub images so we can "docker load" locally instead of pulling
|
||||||
COPY contrib/download-frozen-image-v2.sh /
|
COPY contrib/download-frozen-image-v2.sh /
|
||||||
ARG TARGETARCH
|
|
||||||
ARG TARGETVARIANT
|
|
||||||
RUN /download-frozen-image-v2.sh /build \
|
RUN /download-frozen-image-v2.sh /build \
|
||||||
busybox:latest@sha256:95cf004f559831017cdf4628aaf1bb30133677be8702a8c5f2994629f637a209 \
|
buildpack-deps:jessie@sha256:dd86dced7c9cd2a724e779730f0a53f93b7ef42228d4344b25ce9a42a1486251 \
|
||||||
busybox:glibc@sha256:1f81263701cddf6402afe9f33fca0266d9fff379e59b1748f33d3072da71ee85 \
|
busybox:latest@sha256:bbc3a03235220b170ba48a157dd097dd1379299370e1ed99ce976df0355d24f0 \
|
||||||
debian:bookworm-slim@sha256:2bc5c236e9b262645a323e9088dfa3bb1ecb16cc75811daf40a23a824d665be9 \
|
busybox:glibc@sha256:0b55a30394294ab23b9afd58fab94e61a923f5834fba7ddbae7f8e0c11ba85e6 \
|
||||||
hello-world:latest@sha256:d58e752213a51785838f9eed2b7a498ffa1cb3aa7f946dda11af39286c3db9a9 \
|
debian:jessie@sha256:287a20c5f73087ab406e6b364833e3fb7b3ae63ca0eb3486555dc27ed32c6e60 \
|
||||||
arm32v7/hello-world:latest@sha256:50b8560ad574c779908da71f7ce370c0a2471c098d44d1c8f6b513c5a55eeeb1
|
hello-world:latest@sha256:be0cd392e45be79ffeffa6b05338b98ebb16c87b255f48e297ec7f98e123905c
|
||||||
|
# See also ensureFrozenImagesLinux() in "integration-cli/fixtures_linux_daemon_test.go" (which needs to be updated when adding images to this list)
|
||||||
|
|
||||||
# delve
|
FROM base AS cross-false
|
||||||
FROM base AS delve-src
|
|
||||||
WORKDIR /usr/src/delve
|
|
||||||
RUN git init . && git remote add origin "https://github.com/go-delve/delve.git"
|
|
||||||
# DELVE_VERSION specifies the version of the Delve debugger binary
|
|
||||||
# from the https://github.com/go-delve/delve repository.
|
|
||||||
# It can be used to run Docker with a possibility of
|
|
||||||
# attaching debugger to it.
|
|
||||||
ARG DELVE_VERSION=v1.21.1
|
|
||||||
RUN git fetch -q --depth 1 origin "${DELVE_VERSION}" +refs/tags/*:refs/tags/* && git checkout -q FETCH_HEAD
|
|
||||||
|
|
||||||
FROM base AS delve-supported
|
FROM base AS cross-true
|
||||||
WORKDIR /usr/src/delve
|
ARG DEBIAN_FRONTEND
|
||||||
ARG TARGETPLATFORM
|
RUN dpkg --add-architecture arm64
|
||||||
RUN --mount=from=delve-src,src=/usr/src/delve,rw \
|
RUN dpkg --add-architecture armel
|
||||||
--mount=type=cache,target=/root/.cache/go-build,id=delve-build-$TARGETPLATFORM \
|
RUN dpkg --add-architecture armhf
|
||||||
--mount=type=cache,target=/go/pkg/mod <<EOT
|
RUN if [ "$(go env GOHOSTARCH)" = "amd64" ]; then \
|
||||||
set -e
|
apt-get update && apt-get install -y --no-install-recommends \
|
||||||
GO111MODULE=on xx-go build -o /build/dlv ./cmd/dlv
|
crossbuild-essential-arm64 \
|
||||||
xx-verify /build/dlv
|
crossbuild-essential-armel \
|
||||||
EOT
|
crossbuild-essential-armhf \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*; \
|
||||||
|
fi
|
||||||
|
|
||||||
FROM binary-dummy AS delve-unsupported
|
FROM cross-${CROSS} as dev-base
|
||||||
FROM delve-${DELVE_SUPPORTED} AS delve
|
|
||||||
|
|
||||||
FROM base AS tomll
|
FROM dev-base AS runtime-dev-cross-false
|
||||||
# GOTOML_VERSION specifies the version of the tomll binary to build and install
|
ARG DEBIAN_FRONTEND
|
||||||
# from the https://github.com/pelletier/go-toml repository. This binary is used
|
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||||
# in CI in the hack/validate/toml script.
|
libapparmor-dev \
|
||||||
#
|
libseccomp-dev \
|
||||||
# When updating this version, consider updating the github.com/pelletier/go-toml
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
# dependency in vendor.mod accordingly.
|
|
||||||
ARG GOTOML_VERSION=v1.8.1
|
|
||||||
RUN --mount=type=cache,target=/root/.cache/go-build \
|
|
||||||
--mount=type=cache,target=/go/pkg/mod \
|
|
||||||
GOBIN=/build/ GO111MODULE=on go install "github.com/pelletier/go-toml/cmd/tomll@${GOTOML_VERSION}" \
|
|
||||||
&& /build/tomll --help
|
|
||||||
|
|
||||||
FROM base AS gowinres
|
FROM cross-true AS runtime-dev-cross-true
|
||||||
# GOWINRES_VERSION defines go-winres tool version
|
ARG DEBIAN_FRONTEND
|
||||||
ARG GOWINRES_VERSION=v0.3.1
|
# These crossbuild packages rely on gcc-<arch>, but this doesn't want to install
|
||||||
RUN --mount=type=cache,target=/root/.cache/go-build \
|
# on non-amd64 systems.
|
||||||
--mount=type=cache,target=/go/pkg/mod \
|
# Additionally, the crossbuild-amd64 is currently only on debian:buster, so
|
||||||
GOBIN=/build/ GO111MODULE=on go install "github.com/tc-hib/go-winres@${GOWINRES_VERSION}" \
|
# other architectures cannnot crossbuild amd64.
|
||||||
&& /build/go-winres --help
|
RUN if [ "$(go env GOHOSTARCH)" = "amd64" ]; then \
|
||||||
|
apt-get update && apt-get install -y --no-install-recommends \
|
||||||
|
libapparmor-dev:arm64 \
|
||||||
|
libapparmor-dev:armel \
|
||||||
|
libapparmor-dev:armhf \
|
||||||
|
libseccomp-dev:arm64 \
|
||||||
|
libseccomp-dev:armel \
|
||||||
|
libseccomp-dev:armhf \
|
||||||
|
# install this arches seccomp here due to compat issues with the v0 builder
|
||||||
|
# This is as opposed to inheriting from runtime-dev-cross-false
|
||||||
|
libapparmor-dev \
|
||||||
|
libseccomp-dev \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*; \
|
||||||
|
fi
|
||||||
|
|
||||||
# containerd
|
FROM runtime-dev-cross-${CROSS} AS runtime-dev
|
||||||
FROM base AS containerd-src
|
|
||||||
WORKDIR /usr/src/containerd
|
|
||||||
RUN git init . && git remote add origin "https://github.com/containerd/containerd.git"
|
|
||||||
# CONTAINERD_VERSION is used to build containerd binaries, and used for the
|
|
||||||
# integration tests. The distributed docker .deb and .rpm packages depend on a
|
|
||||||
# separate (containerd.io) package, which may be a different version as is
|
|
||||||
# specified here. The containerd golang package is also pinned in vendor.mod.
|
|
||||||
# When updating the binary version you may also need to update the vendor
|
|
||||||
# version to pick up bug fixes or new APIs, however, usually the Go packages
|
|
||||||
# are built from a commit from the master branch.
|
|
||||||
ARG CONTAINERD_VERSION=v1.7.15
|
|
||||||
RUN git fetch -q --depth 1 origin "${CONTAINERD_VERSION}" +refs/tags/*:refs/tags/* && git checkout -q FETCH_HEAD
|
|
||||||
|
|
||||||
FROM base AS containerd-build
|
FROM base AS tomlv
|
||||||
WORKDIR /go/src/github.com/containerd/containerd
|
ENV INSTALL_BINARY_NAME=tomlv
|
||||||
ARG TARGETPLATFORM
|
ARG TOMLV_COMMIT
|
||||||
RUN --mount=type=cache,sharing=locked,id=moby-containerd-aptlib,target=/var/lib/apt \
|
COPY hack/dockerfile/install/install.sh ./install.sh
|
||||||
--mount=type=cache,sharing=locked,id=moby-containerd-aptcache,target=/var/cache/apt \
|
COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./
|
||||||
apt-get update && xx-apt-get install -y --no-install-recommends \
|
RUN PREFIX=/build ./install.sh $INSTALL_BINARY_NAME
|
||||||
gcc \
|
|
||||||
|
FROM base AS vndr
|
||||||
|
ENV INSTALL_BINARY_NAME=vndr
|
||||||
|
ARG VNDR_COMMIT
|
||||||
|
COPY hack/dockerfile/install/install.sh ./install.sh
|
||||||
|
COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./
|
||||||
|
RUN PREFIX=/build ./install.sh $INSTALL_BINARY_NAME
|
||||||
|
|
||||||
|
FROM dev-base AS containerd
|
||||||
|
ARG DEBIAN_FRONTEND
|
||||||
|
ARG CONTAINERD_COMMIT
|
||||||
|
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||||
libbtrfs-dev \
|
libbtrfs-dev \
|
||||||
libsecret-1-dev \
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
pkg-config
|
ENV INSTALL_BINARY_NAME=containerd
|
||||||
ARG DOCKER_STATIC
|
COPY hack/dockerfile/install/install.sh ./install.sh
|
||||||
RUN --mount=from=containerd-src,src=/usr/src/containerd,rw \
|
COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./
|
||||||
--mount=type=cache,target=/root/.cache/go-build,id=containerd-build-$TARGETPLATFORM <<EOT
|
RUN PREFIX=/build ./install.sh $INSTALL_BINARY_NAME
|
||||||
set -e
|
|
||||||
export CC=$(xx-info)-gcc
|
|
||||||
export CGO_ENABLED=$([ "$DOCKER_STATIC" = "1" ] && echo "0" || echo "1")
|
|
||||||
xx-go --wrap
|
|
||||||
make $([ "$DOCKER_STATIC" = "1" ] && echo "STATIC=1") binaries
|
|
||||||
xx-verify $([ "$DOCKER_STATIC" = "1" ] && echo "--static") bin/containerd
|
|
||||||
xx-verify $([ "$DOCKER_STATIC" = "1" ] && echo "--static") bin/containerd-shim-runc-v2
|
|
||||||
xx-verify $([ "$DOCKER_STATIC" = "1" ] && echo "--static") bin/ctr
|
|
||||||
mkdir /build
|
|
||||||
mv bin/containerd bin/containerd-shim-runc-v2 bin/ctr /build
|
|
||||||
EOT
|
|
||||||
|
|
||||||
FROM containerd-build AS containerd-linux
|
FROM dev-base AS proxy
|
||||||
FROM binary-dummy AS containerd-windows
|
ENV INSTALL_BINARY_NAME=proxy
|
||||||
FROM containerd-${TARGETOS} AS containerd
|
ARG LIBNETWORK_COMMIT
|
||||||
|
COPY hack/dockerfile/install/install.sh ./install.sh
|
||||||
|
COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./
|
||||||
|
RUN PREFIX=/build ./install.sh $INSTALL_BINARY_NAME
|
||||||
|
|
||||||
FROM base AS golangci_lint
|
FROM base AS gometalinter
|
||||||
ARG GOLANGCI_LINT_VERSION=v1.55.2
|
ENV INSTALL_BINARY_NAME=gometalinter
|
||||||
RUN --mount=type=cache,target=/root/.cache/go-build \
|
COPY hack/dockerfile/install/install.sh ./install.sh
|
||||||
--mount=type=cache,target=/go/pkg/mod \
|
COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./
|
||||||
GOBIN=/build/ GO111MODULE=on go install "github.com/golangci/golangci-lint/cmd/golangci-lint@${GOLANGCI_LINT_VERSION}" \
|
RUN PREFIX=/build ./install.sh $INSTALL_BINARY_NAME
|
||||||
&& /build/golangci-lint --version
|
|
||||||
|
|
||||||
FROM base AS gotestsum
|
FROM base AS gotestsum
|
||||||
ARG GOTESTSUM_VERSION=v1.8.2
|
ENV INSTALL_BINARY_NAME=gotestsum
|
||||||
RUN --mount=type=cache,target=/root/.cache/go-build \
|
ARG GOTESTSUM_COMMIT
|
||||||
--mount=type=cache,target=/go/pkg/mod \
|
COPY hack/dockerfile/install/install.sh ./install.sh
|
||||||
GOBIN=/build/ GO111MODULE=on go install "gotest.tools/gotestsum@${GOTESTSUM_VERSION}" \
|
COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./
|
||||||
&& /build/gotestsum --version
|
RUN PREFIX=/build ./install.sh $INSTALL_BINARY_NAME
|
||||||
|
|
||||||
FROM base AS shfmt
|
FROM dev-base AS dockercli
|
||||||
ARG SHFMT_VERSION=v3.8.0
|
ENV INSTALL_BINARY_NAME=dockercli
|
||||||
RUN --mount=type=cache,target=/root/.cache/go-build \
|
ARG DOCKERCLI_CHANNEL
|
||||||
--mount=type=cache,target=/go/pkg/mod \
|
|
||||||
GOBIN=/build/ GO111MODULE=on go install "mvdan.cc/sh/v3/cmd/shfmt@${SHFMT_VERSION}" \
|
|
||||||
&& /build/shfmt --version
|
|
||||||
|
|
||||||
FROM base AS gopls
|
|
||||||
RUN --mount=type=cache,target=/root/.cache/go-build \
|
|
||||||
--mount=type=cache,target=/go/pkg/mod \
|
|
||||||
GOBIN=/build/ GO111MODULE=on go install "golang.org/x/tools/gopls@latest" \
|
|
||||||
&& /build/gopls version
|
|
||||||
|
|
||||||
FROM base AS dockercli
|
|
||||||
WORKDIR /go/src/github.com/docker/cli
|
|
||||||
ARG DOCKERCLI_REPOSITORY
|
|
||||||
ARG DOCKERCLI_VERSION
|
ARG DOCKERCLI_VERSION
|
||||||
ARG TARGETPLATFORM
|
COPY hack/dockerfile/install/install.sh ./install.sh
|
||||||
RUN --mount=source=hack/dockerfile/cli.sh,target=/download-or-build-cli.sh \
|
COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./
|
||||||
--mount=type=cache,id=dockercli-git-$TARGETPLATFORM,sharing=locked,target=./.git \
|
RUN PREFIX=/build ./install.sh $INSTALL_BINARY_NAME
|
||||||
--mount=type=cache,target=/root/.cache/go-build,id=dockercli-build-$TARGETPLATFORM \
|
|
||||||
rm -f ./.git/*.lock \
|
|
||||||
&& /download-or-build-cli.sh ${DOCKERCLI_VERSION} ${DOCKERCLI_REPOSITORY} /build \
|
|
||||||
&& /build/docker --version
|
|
||||||
|
|
||||||
FROM base AS dockercli-integration
|
FROM runtime-dev AS runc
|
||||||
WORKDIR /go/src/github.com/docker/cli
|
ENV INSTALL_BINARY_NAME=runc
|
||||||
ARG DOCKERCLI_INTEGRATION_REPOSITORY
|
ARG RUNC_COMMIT
|
||||||
ARG DOCKERCLI_INTEGRATION_VERSION
|
ARG RUNC_BUILDTAGS
|
||||||
ARG TARGETPLATFORM
|
COPY hack/dockerfile/install/install.sh ./install.sh
|
||||||
RUN --mount=source=hack/dockerfile/cli.sh,target=/download-or-build-cli.sh \
|
COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./
|
||||||
--mount=type=cache,id=dockercli-git-$TARGETPLATFORM,sharing=locked,target=./.git \
|
RUN PREFIX=/build ./install.sh $INSTALL_BINARY_NAME
|
||||||
--mount=type=cache,target=/root/.cache/go-build,id=dockercli-build-$TARGETPLATFORM \
|
|
||||||
rm -f ./.git/*.lock \
|
|
||||||
&& /download-or-build-cli.sh ${DOCKERCLI_INTEGRATION_VERSION} ${DOCKERCLI_INTEGRATION_REPOSITORY} /build \
|
|
||||||
&& /build/docker --version
|
|
||||||
|
|
||||||
# runc
|
FROM dev-base AS tini
|
||||||
FROM base AS runc-src
|
ARG DEBIAN_FRONTEND
|
||||||
WORKDIR /usr/src/runc
|
ARG TINI_COMMIT
|
||||||
RUN git init . && git remote add origin "https://github.com/opencontainers/runc.git"
|
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||||
# RUNC_VERSION should match the version that is used by the containerd version
|
cmake \
|
||||||
# that is used. If you need to update runc, open a pull request in the containerd
|
vim-common \
|
||||||
# project first, and update both after that is merged. When updating RUNC_VERSION,
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
# consider updating runc in vendor.mod accordingly.
|
COPY hack/dockerfile/install/install.sh ./install.sh
|
||||||
ARG RUNC_VERSION=v1.1.12
|
ENV INSTALL_BINARY_NAME=tini
|
||||||
RUN git fetch -q --depth 1 origin "${RUNC_VERSION}" +refs/tags/*:refs/tags/* && git checkout -q FETCH_HEAD
|
COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./
|
||||||
|
RUN PREFIX=/build ./install.sh $INSTALL_BINARY_NAME
|
||||||
|
|
||||||
FROM base AS runc-build
|
FROM dev-base AS rootlesskit
|
||||||
WORKDIR /go/src/github.com/opencontainers/runc
|
ENV INSTALL_BINARY_NAME=rootlesskit
|
||||||
ARG TARGETPLATFORM
|
ARG ROOTLESSKIT_COMMIT
|
||||||
RUN --mount=type=cache,sharing=locked,id=moby-runc-aptlib,target=/var/lib/apt \
|
COPY hack/dockerfile/install/install.sh ./install.sh
|
||||||
--mount=type=cache,sharing=locked,id=moby-runc-aptcache,target=/var/cache/apt \
|
COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./
|
||||||
apt-get update && xx-apt-get install -y --no-install-recommends \
|
RUN PREFIX=/build/ ./install.sh $INSTALL_BINARY_NAME
|
||||||
dpkg-dev \
|
COPY ./contrib/dockerd-rootless.sh /build
|
||||||
gcc \
|
|
||||||
libc6-dev \
|
|
||||||
libseccomp-dev \
|
|
||||||
pkg-config
|
|
||||||
ARG DOCKER_STATIC
|
|
||||||
RUN --mount=from=runc-src,src=/usr/src/runc,rw \
|
|
||||||
--mount=type=cache,target=/root/.cache/go-build,id=runc-build-$TARGETPLATFORM <<EOT
|
|
||||||
set -e
|
|
||||||
xx-go --wrap
|
|
||||||
CGO_ENABLED=1 make "$([ "$DOCKER_STATIC" = "1" ] && echo "static" || echo "runc")"
|
|
||||||
xx-verify $([ "$DOCKER_STATIC" = "1" ] && echo "--static") runc
|
|
||||||
mkdir /build
|
|
||||||
mv runc /build/
|
|
||||||
EOT
|
|
||||||
|
|
||||||
FROM runc-build AS runc-linux
|
FROM djs55/vpnkit@sha256:${VPNKIT_DIGEST} AS vpnkit
|
||||||
FROM binary-dummy AS runc-windows
|
|
||||||
FROM runc-${TARGETOS} AS runc
|
|
||||||
|
|
||||||
# tini
|
# TODO: Some of this is only really needed for testing, it would be nice to split this up
|
||||||
FROM base AS tini-src
|
FROM runtime-dev AS dev
|
||||||
WORKDIR /usr/src/tini
|
ARG DEBIAN_FRONTEND
|
||||||
RUN git init . && git remote add origin "https://github.com/krallin/tini.git"
|
|
||||||
# TINI_VERSION specifies the version of tini (docker-init) to build. This
|
|
||||||
# binary is used when starting containers with the `--init` option.
|
|
||||||
ARG TINI_VERSION=v0.19.0
|
|
||||||
RUN git fetch -q --depth 1 origin "${TINI_VERSION}" +refs/tags/*:refs/tags/* && git checkout -q FETCH_HEAD
|
|
||||||
|
|
||||||
FROM base AS tini-build
|
|
||||||
WORKDIR /go/src/github.com/krallin/tini
|
|
||||||
RUN --mount=type=cache,sharing=locked,id=moby-tini-aptlib,target=/var/lib/apt \
|
|
||||||
--mount=type=cache,sharing=locked,id=moby-tini-aptcache,target=/var/cache/apt \
|
|
||||||
apt-get update && apt-get install -y --no-install-recommends cmake
|
|
||||||
ARG TARGETPLATFORM
|
|
||||||
RUN --mount=type=cache,sharing=locked,id=moby-tini-aptlib,target=/var/lib/apt \
|
|
||||||
--mount=type=cache,sharing=locked,id=moby-tini-aptcache,target=/var/cache/apt \
|
|
||||||
xx-apt-get install -y --no-install-recommends \
|
|
||||||
gcc \
|
|
||||||
libc6-dev \
|
|
||||||
pkg-config
|
|
||||||
RUN --mount=from=tini-src,src=/usr/src/tini,rw \
|
|
||||||
--mount=type=cache,target=/root/.cache/go-build,id=tini-build-$TARGETPLATFORM <<EOT
|
|
||||||
set -e
|
|
||||||
CC=$(xx-info)-gcc cmake .
|
|
||||||
make tini-static
|
|
||||||
xx-verify --static tini-static
|
|
||||||
mkdir /build
|
|
||||||
mv tini-static /build/docker-init
|
|
||||||
EOT
|
|
||||||
|
|
||||||
FROM tini-build AS tini-linux
|
|
||||||
FROM binary-dummy AS tini-windows
|
|
||||||
FROM tini-${TARGETOS} AS tini
|
|
||||||
|
|
||||||
# rootlesskit
|
|
||||||
FROM base AS rootlesskit-src
|
|
||||||
WORKDIR /usr/src/rootlesskit
|
|
||||||
RUN git init . && git remote add origin "https://github.com/rootless-containers/rootlesskit.git"
|
|
||||||
# When updating, also update vendor.mod and hack/dockerfile/install/rootlesskit.installer accordingly.
|
|
||||||
ARG ROOTLESSKIT_VERSION=v2.0.2
|
|
||||||
RUN git fetch -q --depth 1 origin "${ROOTLESSKIT_VERSION}" +refs/tags/*:refs/tags/* && git checkout -q FETCH_HEAD
|
|
||||||
|
|
||||||
FROM base AS rootlesskit-build
|
|
||||||
WORKDIR /go/src/github.com/rootless-containers/rootlesskit
|
|
||||||
ARG TARGETPLATFORM
|
|
||||||
RUN --mount=type=cache,sharing=locked,id=moby-rootlesskit-aptlib,target=/var/lib/apt \
|
|
||||||
--mount=type=cache,sharing=locked,id=moby-rootlesskit-aptcache,target=/var/cache/apt \
|
|
||||||
apt-get update && xx-apt-get install -y --no-install-recommends \
|
|
||||||
gcc \
|
|
||||||
libc6-dev \
|
|
||||||
pkg-config
|
|
||||||
ENV GO111MODULE=on
|
|
||||||
ARG DOCKER_STATIC
|
|
||||||
RUN --mount=from=rootlesskit-src,src=/usr/src/rootlesskit,rw \
|
|
||||||
--mount=type=cache,target=/go/pkg/mod \
|
|
||||||
--mount=type=cache,target=/root/.cache/go-build,id=rootlesskit-build-$TARGETPLATFORM <<EOT
|
|
||||||
set -e
|
|
||||||
export CGO_ENABLED=$([ "$DOCKER_STATIC" = "1" ] && echo "0" || echo "1")
|
|
||||||
xx-go build -o /build/rootlesskit -ldflags="$([ "$DOCKER_STATIC" != "1" ] && echo "-linkmode=external")" ./cmd/rootlesskit
|
|
||||||
xx-verify $([ "$DOCKER_STATIC" = "1" ] && echo "--static") /build/rootlesskit
|
|
||||||
xx-go build -o /build/rootlesskit-docker-proxy -ldflags="$([ "$DOCKER_STATIC" != "1" ] && echo "-linkmode=external")" ./cmd/rootlesskit-docker-proxy
|
|
||||||
xx-verify $([ "$DOCKER_STATIC" = "1" ] && echo "--static") /build/rootlesskit-docker-proxy
|
|
||||||
EOT
|
|
||||||
COPY --link ./contrib/dockerd-rootless.sh /build/
|
|
||||||
COPY --link ./contrib/dockerd-rootless-setuptool.sh /build/
|
|
||||||
|
|
||||||
FROM rootlesskit-build AS rootlesskit-linux
|
|
||||||
FROM binary-dummy AS rootlesskit-windows
|
|
||||||
FROM rootlesskit-${TARGETOS} AS rootlesskit
|
|
||||||
|
|
||||||
FROM base AS crun
|
|
||||||
ARG CRUN_VERSION=1.12
|
|
||||||
RUN --mount=type=cache,sharing=locked,id=moby-crun-aptlib,target=/var/lib/apt \
|
|
||||||
--mount=type=cache,sharing=locked,id=moby-crun-aptcache,target=/var/cache/apt \
|
|
||||||
apt-get update && apt-get install -y --no-install-recommends \
|
|
||||||
autoconf \
|
|
||||||
automake \
|
|
||||||
build-essential \
|
|
||||||
libcap-dev \
|
|
||||||
libprotobuf-c-dev \
|
|
||||||
libseccomp-dev \
|
|
||||||
libsystemd-dev \
|
|
||||||
libtool \
|
|
||||||
libudev-dev \
|
|
||||||
libyajl-dev \
|
|
||||||
python3 \
|
|
||||||
;
|
|
||||||
RUN --mount=type=tmpfs,target=/tmp/crun-build \
|
|
||||||
git clone https://github.com/containers/crun.git /tmp/crun-build && \
|
|
||||||
cd /tmp/crun-build && \
|
|
||||||
git checkout -q "${CRUN_VERSION}" && \
|
|
||||||
./autogen.sh && \
|
|
||||||
./configure --bindir=/build && \
|
|
||||||
make -j install
|
|
||||||
|
|
||||||
# vpnkit
|
|
||||||
# use dummy scratch stage to avoid build to fail for unsupported platforms
|
|
||||||
FROM scratch AS vpnkit-windows
|
|
||||||
FROM scratch AS vpnkit-linux-386
|
|
||||||
FROM scratch AS vpnkit-linux-arm
|
|
||||||
FROM scratch AS vpnkit-linux-ppc64le
|
|
||||||
FROM scratch AS vpnkit-linux-riscv64
|
|
||||||
FROM scratch AS vpnkit-linux-s390x
|
|
||||||
FROM djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-linux-amd64
|
|
||||||
FROM djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-linux-arm64
|
|
||||||
FROM vpnkit-linux-${TARGETARCH} AS vpnkit-linux
|
|
||||||
FROM vpnkit-${TARGETOS} AS vpnkit
|
|
||||||
|
|
||||||
# containerutility
|
|
||||||
FROM base AS containerutil-src
|
|
||||||
WORKDIR /usr/src/containerutil
|
|
||||||
RUN git init . && git remote add origin "https://github.com/docker-archive/windows-container-utility.git"
|
|
||||||
ARG CONTAINERUTILITY_VERSION=aa1ba87e99b68e0113bd27ec26c60b88f9d4ccd9
|
|
||||||
RUN git fetch -q --depth 1 origin "${CONTAINERUTILITY_VERSION}" +refs/tags/*:refs/tags/* && git checkout -q FETCH_HEAD
|
|
||||||
|
|
||||||
FROM base AS containerutil-build
|
|
||||||
WORKDIR /usr/src/containerutil
|
|
||||||
ARG TARGETPLATFORM
|
|
||||||
RUN xx-apt-get install -y --no-install-recommends \
|
|
||||||
gcc \
|
|
||||||
g++ \
|
|
||||||
libc6-dev \
|
|
||||||
pkg-config
|
|
||||||
RUN --mount=from=containerutil-src,src=/usr/src/containerutil,rw \
|
|
||||||
--mount=type=cache,target=/root/.cache/go-build,id=containerutil-build-$TARGETPLATFORM <<EOT
|
|
||||||
set -e
|
|
||||||
CC="$(xx-info)-gcc" CXX="$(xx-info)-g++" make
|
|
||||||
xx-verify --static containerutility.exe
|
|
||||||
mkdir /build
|
|
||||||
mv containerutility.exe /build/
|
|
||||||
EOT
|
|
||||||
|
|
||||||
FROM binary-dummy AS containerutil-linux
|
|
||||||
FROM containerutil-build AS containerutil-windows-amd64
|
|
||||||
FROM containerutil-windows-${TARGETARCH} AS containerutil-windows
|
|
||||||
FROM containerutil-${TARGETOS} AS containerutil
|
|
||||||
FROM docker/buildx-bin:${BUILDX_VERSION} as buildx
|
|
||||||
FROM docker/compose-bin:${COMPOSE_VERSION} as compose
|
|
||||||
|
|
||||||
FROM base AS dev-systemd-false
|
|
||||||
COPY --link --from=frozen-images /build/ /docker-frozen-images
|
|
||||||
COPY --link --from=swagger /build/ /usr/local/bin/
|
|
||||||
COPY --link --from=delve /build/ /usr/local/bin/
|
|
||||||
COPY --link --from=tomll /build/ /usr/local/bin/
|
|
||||||
COPY --link --from=gowinres /build/ /usr/local/bin/
|
|
||||||
COPY --link --from=tini /build/ /usr/local/bin/
|
|
||||||
COPY --link --from=registry /build/ /usr/local/bin/
|
|
||||||
COPY --link --from=registry-v2 /build/ /usr/local/bin/
|
|
||||||
|
|
||||||
# Skip the CRIU stage for now, as the opensuse package repository is sometimes
|
|
||||||
# unstable, and we're currently not using it in CI.
|
|
||||||
#
|
|
||||||
# FIXME(thaJeztah): re-enable this stage when https://github.com/moby/moby/issues/38963 is resolved (see https://github.com/moby/moby/pull/38984)
|
|
||||||
# COPY --link --from=criu /build/ /usr/local/bin/
|
|
||||||
COPY --link --from=gotestsum /build/ /usr/local/bin/
|
|
||||||
COPY --link --from=golangci_lint /build/ /usr/local/bin/
|
|
||||||
COPY --link --from=shfmt /build/ /usr/local/bin/
|
|
||||||
COPY --link --from=runc /build/ /usr/local/bin/
|
|
||||||
COPY --link --from=containerd /build/ /usr/local/bin/
|
|
||||||
COPY --link --from=rootlesskit /build/ /usr/local/bin/
|
|
||||||
COPY --link --from=vpnkit / /usr/local/bin/
|
|
||||||
COPY --link --from=containerutil /build/ /usr/local/bin/
|
|
||||||
COPY --link --from=crun /build/ /usr/local/bin/
|
|
||||||
COPY --link hack/dockerfile/etc/docker/ /etc/docker/
|
|
||||||
COPY --link --from=buildx /buildx /usr/local/libexec/docker/cli-plugins/docker-buildx
|
|
||||||
COPY --link --from=compose /docker-compose /usr/libexec/docker/cli-plugins/docker-compose
|
|
||||||
|
|
||||||
ENV PATH=/usr/local/cli:$PATH
|
|
||||||
ENV TEST_CLIENT_BINARY=/usr/local/cli-integration/docker
|
|
||||||
ENV CONTAINERD_ADDRESS=/run/docker/containerd/containerd.sock
|
|
||||||
ENV CONTAINERD_NAMESPACE=moby
|
|
||||||
WORKDIR /go/src/github.com/docker/docker
|
|
||||||
VOLUME /var/lib/docker
|
|
||||||
VOLUME /home/unprivilegeduser/.local/share/docker
|
|
||||||
# Wrap all commands in the "docker-in-docker" script to allow nested containers
|
|
||||||
ENTRYPOINT ["hack/dind"]
|
|
||||||
|
|
||||||
FROM dev-systemd-false AS dev-systemd-true
|
|
||||||
RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \
|
|
||||||
--mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \
|
|
||||||
apt-get update && apt-get install -y --no-install-recommends \
|
|
||||||
dbus \
|
|
||||||
dbus-user-session \
|
|
||||||
systemd \
|
|
||||||
systemd-sysv
|
|
||||||
ENTRYPOINT ["hack/dind-systemd"]
|
|
||||||
|
|
||||||
FROM dev-systemd-${SYSTEMD} AS dev-base
|
|
||||||
RUN groupadd -r docker
|
RUN groupadd -r docker
|
||||||
RUN useradd --create-home --gid docker unprivilegeduser \
|
RUN useradd --create-home --gid docker unprivilegeduser
|
||||||
&& mkdir -p /home/unprivilegeduser/.local/share/docker \
|
|
||||||
&& chown -R unprivilegeduser /home/unprivilegeduser
|
|
||||||
# Let us use a .bashrc file
|
# Let us use a .bashrc file
|
||||||
RUN ln -sfv /go/src/github.com/docker/docker/.bashrc ~/.bashrc
|
RUN ln -sfv /go/src/github.com/docker/docker/.bashrc ~/.bashrc
|
||||||
# Activate bash completion and include Docker's completion if mounted with DOCKER_BASH_COMPLETION_PATH
|
# Activate bash completion and include Docker's completion if mounted with DOCKER_BASH_COMPLETION_PATH
|
||||||
RUN echo "source /usr/share/bash-completion/bash_completion" >> /etc/bash.bashrc
|
RUN echo "source /usr/share/bash-completion/bash_completion" >> /etc/bash.bashrc
|
||||||
RUN ln -s /usr/local/completion/bash/docker /etc/bash_completion.d/docker
|
RUN ln -s /usr/local/completion/bash/docker /etc/bash_completion.d/docker
|
||||||
RUN ldconfig
|
RUN ldconfig
|
||||||
# Set dev environment as safe git directory to prevent "dubious ownership" errors
|
|
||||||
# when bind-mounting the source into the dev-container. See https://github.com/moby/moby/pull/44930
|
|
||||||
RUN git config --global --add safe.directory $GOPATH/src/github.com/docker/docker
|
|
||||||
# This should only install packages that are specifically needed for the dev environment and nothing else
|
# This should only install packages that are specifically needed for the dev environment and nothing else
|
||||||
# Do you really need to add another package here? Can it be done in a different build stage?
|
# Do you really need to add another package here? Can it be done in a different build stage?
|
||||||
RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \
|
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||||
--mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \
|
|
||||||
apt-get update && apt-get install -y --no-install-recommends \
|
|
||||||
apparmor \
|
apparmor \
|
||||||
|
aufs-tools \
|
||||||
bash-completion \
|
bash-completion \
|
||||||
|
binutils-mingw-w64 \
|
||||||
|
libbtrfs-dev \
|
||||||
bzip2 \
|
bzip2 \
|
||||||
inetutils-ping \
|
g++-mingw-w64-x86-64 \
|
||||||
iproute2 \
|
|
||||||
iptables \
|
iptables \
|
||||||
jq \
|
jq \
|
||||||
libcap2-bin \
|
libcap2-bin \
|
||||||
|
libdevmapper-dev \
|
||||||
libnet1 \
|
libnet1 \
|
||||||
libnl-3-200 \
|
libnl-3-200 \
|
||||||
libprotobuf-c1 \
|
libprotobuf-c1 \
|
||||||
libyajl2 \
|
libsystemd-dev \
|
||||||
|
libudev-dev \
|
||||||
net-tools \
|
net-tools \
|
||||||
patch \
|
|
||||||
pigz \
|
pigz \
|
||||||
sudo \
|
python3-pip \
|
||||||
systemd-journal-remote \
|
python3-setuptools \
|
||||||
|
python3-wheel \
|
||||||
thin-provisioning-tools \
|
thin-provisioning-tools \
|
||||||
uidmap \
|
|
||||||
vim \
|
vim \
|
||||||
vim-common \
|
vim-common \
|
||||||
xfsprogs \
|
xfsprogs \
|
||||||
xz-utils \
|
xz-utils \
|
||||||
zip \
|
zip \
|
||||||
zstd
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
# Switch to use iptables instead of nftables (to match the CI hosts)
|
|
||||||
# TODO use some kind of runtime auto-detection instead if/when nftables is supported (https://github.com/moby/moby/issues/26824)
|
# Switch to use iptables instead of nftables (to match the host machine)
|
||||||
RUN update-alternatives --set iptables /usr/sbin/iptables-legacy || true \
|
RUN update-alternatives --set iptables /usr/sbin/iptables-legacy || true \
|
||||||
&& update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy || true \
|
&& update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy || true \
|
||||||
&& update-alternatives --set arptables /usr/sbin/arptables-legacy || true
|
&& update-alternatives --set arptables /usr/sbin/arptables-legacy || true
|
||||||
RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \
|
|
||||||
--mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \
|
|
||||||
apt-get update && apt-get install --no-install-recommends -y \
|
|
||||||
gcc \
|
|
||||||
pkg-config \
|
|
||||||
dpkg-dev \
|
|
||||||
libapparmor-dev \
|
|
||||||
libseccomp-dev \
|
|
||||||
libsecret-1-dev \
|
|
||||||
libsystemd-dev \
|
|
||||||
libudev-dev \
|
|
||||||
yamllint
|
|
||||||
COPY --link --from=dockercli /build/ /usr/local/cli
|
|
||||||
COPY --link --from=dockercli-integration /build/ /usr/local/cli-integration
|
|
||||||
|
|
||||||
FROM base AS build
|
RUN pip3 install yamllint==1.16.0
|
||||||
COPY --from=gowinres /build/ /usr/local/bin/
|
|
||||||
|
COPY --from=dockercli /build/ /usr/local/cli
|
||||||
|
COPY --from=frozen-images /build/ /docker-frozen-images
|
||||||
|
COPY --from=swagger /build/ /usr/local/bin/
|
||||||
|
COPY --from=tomlv /build/ /usr/local/bin/
|
||||||
|
COPY --from=tini /build/ /usr/local/bin/
|
||||||
|
COPY --from=registry /build/ /usr/local/bin/
|
||||||
|
COPY --from=criu /build/ /usr/local/
|
||||||
|
COPY --from=vndr /build/ /usr/local/bin/
|
||||||
|
COPY --from=gotestsum /build/ /usr/local/bin/
|
||||||
|
COPY --from=gometalinter /build/ /usr/local/bin/
|
||||||
|
COPY --from=runc /build/ /usr/local/bin/
|
||||||
|
COPY --from=containerd /build/ /usr/local/bin/
|
||||||
|
COPY --from=rootlesskit /build/ /usr/local/bin/
|
||||||
|
COPY --from=vpnkit /vpnkit /usr/local/bin/vpnkit.x86_64
|
||||||
|
COPY --from=proxy /build/ /usr/local/bin/
|
||||||
|
|
||||||
|
ENV PATH=/usr/local/cli:$PATH
|
||||||
|
ENV DOCKER_BUILDTAGS apparmor seccomp selinux
|
||||||
WORKDIR /go/src/github.com/docker/docker
|
WORKDIR /go/src/github.com/docker/docker
|
||||||
ENV GO111MODULE=off
|
VOLUME /var/lib/docker
|
||||||
ENV CGO_ENABLED=1
|
# Wrap all commands in the "docker-in-docker" script to allow nested containers
|
||||||
RUN --mount=type=cache,sharing=locked,id=moby-build-aptlib,target=/var/lib/apt \
|
ENTRYPOINT ["hack/dind"]
|
||||||
--mount=type=cache,sharing=locked,id=moby-build-aptcache,target=/var/cache/apt \
|
|
||||||
apt-get update && apt-get install --no-install-recommends -y \
|
|
||||||
clang \
|
|
||||||
lld \
|
|
||||||
llvm
|
|
||||||
ARG TARGETPLATFORM
|
|
||||||
RUN --mount=type=cache,sharing=locked,id=moby-build-aptlib,target=/var/lib/apt \
|
|
||||||
--mount=type=cache,sharing=locked,id=moby-build-aptcache,target=/var/cache/apt \
|
|
||||||
xx-apt-get install --no-install-recommends -y \
|
|
||||||
dpkg-dev \
|
|
||||||
gcc \
|
|
||||||
libapparmor-dev \
|
|
||||||
libc6-dev \
|
|
||||||
libseccomp-dev \
|
|
||||||
libsecret-1-dev \
|
|
||||||
libsystemd-dev \
|
|
||||||
libudev-dev \
|
|
||||||
pkg-config
|
|
||||||
ARG DOCKER_BUILDTAGS
|
|
||||||
ARG DOCKER_DEBUG
|
|
||||||
ARG DOCKER_GITCOMMIT=HEAD
|
|
||||||
ARG DOCKER_LDFLAGS
|
|
||||||
ARG DOCKER_STATIC
|
|
||||||
ARG VERSION
|
|
||||||
ARG PLATFORM
|
|
||||||
ARG PRODUCT
|
|
||||||
ARG DEFAULT_PRODUCT_LICENSE
|
|
||||||
ARG PACKAGER_NAME
|
|
||||||
# PREFIX overrides DEST dir in make.sh script otherwise it fails because of
|
|
||||||
# read only mount in current work dir
|
|
||||||
ENV PREFIX=/tmp
|
|
||||||
RUN <<EOT
|
|
||||||
# in bullseye arm64 target does not link with lld so configure it to use ld instead
|
|
||||||
if [ "$(xx-info arch)" = "arm64" ]; then
|
|
||||||
XX_CC_PREFER_LINKER=ld xx-clang --setup-target-triple
|
|
||||||
fi
|
|
||||||
EOT
|
|
||||||
RUN --mount=type=bind,target=.,rw \
|
|
||||||
--mount=type=tmpfs,target=cli/winresources/dockerd \
|
|
||||||
--mount=type=tmpfs,target=cli/winresources/docker-proxy \
|
|
||||||
--mount=type=cache,target=/root/.cache/go-build,id=moby-build-$TARGETPLATFORM <<EOT
|
|
||||||
set -e
|
|
||||||
target=$([ "$DOCKER_STATIC" = "1" ] && echo "binary" || echo "dynbinary")
|
|
||||||
xx-go --wrap
|
|
||||||
PKG_CONFIG=$(xx-go env PKG_CONFIG) ./hack/make.sh $target
|
|
||||||
xx-verify $([ "$DOCKER_STATIC" = "1" ] && echo "--static") /tmp/bundles/${target}-daemon/dockerd$([ "$(xx-info os)" = "windows" ] && echo ".exe")
|
|
||||||
xx-verify $([ "$DOCKER_STATIC" = "1" ] && echo "--static") /tmp/bundles/${target}-daemon/docker-proxy$([ "$(xx-info os)" = "windows" ] && echo ".exe")
|
|
||||||
mkdir /build
|
|
||||||
mv /tmp/bundles/${target}-daemon/* /build/
|
|
||||||
EOT
|
|
||||||
|
|
||||||
# usage:
|
FROM dev AS final
|
||||||
# > docker buildx bake binary
|
# Upload docker source
|
||||||
# > DOCKER_STATIC=0 docker buildx bake binary
|
COPY . /go/src/github.com/docker/docker
|
||||||
# or
|
|
||||||
# > make binary
|
|
||||||
# > make dynbinary
|
|
||||||
FROM scratch AS binary
|
|
||||||
COPY --from=build /build/ /
|
|
||||||
|
|
||||||
# usage:
|
|
||||||
# > docker buildx bake all
|
|
||||||
FROM scratch AS all
|
|
||||||
COPY --link --from=tini /build/ /
|
|
||||||
COPY --link --from=runc /build/ /
|
|
||||||
COPY --link --from=containerd /build/ /
|
|
||||||
COPY --link --from=rootlesskit /build/ /
|
|
||||||
COPY --link --from=containerutil /build/ /
|
|
||||||
COPY --link --from=vpnkit / /
|
|
||||||
COPY --link --from=build /build /
|
|
||||||
|
|
||||||
# smoke tests
|
|
||||||
# usage:
|
|
||||||
# > docker buildx bake binary-smoketest
|
|
||||||
FROM --platform=$TARGETPLATFORM base AS smoketest
|
|
||||||
WORKDIR /usr/local/bin
|
|
||||||
COPY --from=build /build .
|
|
||||||
RUN <<EOT
|
|
||||||
set -ex
|
|
||||||
file dockerd
|
|
||||||
dockerd --version
|
|
||||||
file docker-proxy
|
|
||||||
docker-proxy --version
|
|
||||||
EOT
|
|
||||||
|
|
||||||
# devcontainer is a stage used by .devcontainer/devcontainer.json
|
|
||||||
FROM dev-base AS devcontainer
|
|
||||||
COPY --link . .
|
|
||||||
COPY --link --from=gopls /build/ /usr/local/bin/
|
|
||||||
|
|
||||||
# usage:
|
|
||||||
# > make shell
|
|
||||||
# > SYSTEMD=true make shell
|
|
||||||
FROM dev-base AS dev
|
|
||||||
COPY --link . .
|
|
||||||
|
|
84
Dockerfile.e2e
Normal file
84
Dockerfile.e2e
Normal file
|
@ -0,0 +1,84 @@
|
||||||
|
ARG GO_VERSION=1.13.15
|
||||||
|
|
||||||
|
FROM golang:${GO_VERSION}-alpine AS base
|
||||||
|
ENV GO111MODULE=off
|
||||||
|
RUN apk --no-cache add \
|
||||||
|
bash \
|
||||||
|
btrfs-progs-dev \
|
||||||
|
build-base \
|
||||||
|
curl \
|
||||||
|
lvm2-dev \
|
||||||
|
jq
|
||||||
|
|
||||||
|
RUN mkdir -p /build/
|
||||||
|
RUN mkdir -p /go/src/github.com/docker/docker/
|
||||||
|
WORKDIR /go/src/github.com/docker/docker/
|
||||||
|
|
||||||
|
FROM base AS frozen-images
|
||||||
|
# Get useful and necessary Hub images so we can "docker load" locally instead of pulling
|
||||||
|
COPY contrib/download-frozen-image-v2.sh /
|
||||||
|
RUN /download-frozen-image-v2.sh /build \
|
||||||
|
buildpack-deps:jessie@sha256:dd86dced7c9cd2a724e779730f0a53f93b7ef42228d4344b25ce9a42a1486251 \
|
||||||
|
busybox:latest@sha256:bbc3a03235220b170ba48a157dd097dd1379299370e1ed99ce976df0355d24f0 \
|
||||||
|
busybox:glibc@sha256:0b55a30394294ab23b9afd58fab94e61a923f5834fba7ddbae7f8e0c11ba85e6 \
|
||||||
|
debian:jessie@sha256:287a20c5f73087ab406e6b364833e3fb7b3ae63ca0eb3486555dc27ed32c6e60 \
|
||||||
|
hello-world:latest@sha256:be0cd392e45be79ffeffa6b05338b98ebb16c87b255f48e297ec7f98e123905c
|
||||||
|
# See also ensureFrozenImagesLinux() in "integration-cli/fixtures_linux_daemon_test.go" (which needs to be updated when adding images to this list)
|
||||||
|
|
||||||
|
FROM base AS dockercli
|
||||||
|
ENV INSTALL_BINARY_NAME=dockercli
|
||||||
|
COPY hack/dockerfile/install/install.sh ./install.sh
|
||||||
|
COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./
|
||||||
|
RUN PREFIX=/build ./install.sh $INSTALL_BINARY_NAME
|
||||||
|
|
||||||
|
# Build DockerSuite.TestBuild* dependency
|
||||||
|
FROM base AS contrib
|
||||||
|
COPY contrib/syscall-test /build/syscall-test
|
||||||
|
COPY contrib/httpserver/Dockerfile /build/httpserver/Dockerfile
|
||||||
|
COPY contrib/httpserver contrib/httpserver
|
||||||
|
RUN CGO_ENABLED=0 go build -buildmode=pie -o /build/httpserver/httpserver github.com/docker/docker/contrib/httpserver
|
||||||
|
|
||||||
|
# Build the integration tests and copy the resulting binaries to /build/tests
|
||||||
|
FROM base AS builder
|
||||||
|
|
||||||
|
# Set tag and add sources
|
||||||
|
COPY . .
|
||||||
|
# Copy test sources tests that use assert can print errors
|
||||||
|
RUN mkdir -p /build${PWD} && find integration integration-cli -name \*_test.go -exec cp --parents '{}' /build${PWD} \;
|
||||||
|
# Build and install test binaries
|
||||||
|
ARG DOCKER_GITCOMMIT=undefined
|
||||||
|
RUN hack/make.sh build-integration-test-binary
|
||||||
|
RUN mkdir -p /build/tests && find . -name test.main -exec cp --parents '{}' /build/tests \;
|
||||||
|
|
||||||
|
## Generate testing image
|
||||||
|
FROM alpine:3.10 as runner
|
||||||
|
|
||||||
|
ENV DOCKER_REMOTE_DAEMON=1
|
||||||
|
ENV DOCKER_INTEGRATION_DAEMON_DEST=/
|
||||||
|
ENTRYPOINT ["/scripts/run.sh"]
|
||||||
|
|
||||||
|
# Add an unprivileged user to be used for tests which need it
|
||||||
|
RUN addgroup docker && adduser -D -G docker unprivilegeduser -s /bin/ash
|
||||||
|
|
||||||
|
# GNU tar is used for generating the emptyfs image
|
||||||
|
RUN apk --no-cache add \
|
||||||
|
bash \
|
||||||
|
ca-certificates \
|
||||||
|
g++ \
|
||||||
|
git \
|
||||||
|
iptables \
|
||||||
|
pigz \
|
||||||
|
tar \
|
||||||
|
xz
|
||||||
|
|
||||||
|
COPY hack/test/e2e-run.sh /scripts/run.sh
|
||||||
|
COPY hack/make/.ensure-emptyfs /scripts/ensure-emptyfs.sh
|
||||||
|
|
||||||
|
COPY integration/testdata /tests/integration/testdata
|
||||||
|
COPY integration/build/testdata /tests/integration/build/testdata
|
||||||
|
COPY integration-cli/fixtures /tests/integration-cli/fixtures
|
||||||
|
|
||||||
|
COPY --from=frozen-images /build/ /docker-frozen-images
|
||||||
|
COPY --from=dockercli /build/ /usr/bin/
|
||||||
|
COPY --from=contrib /build/ /tests/contrib/
|
||||||
|
COPY --from=builder /build/ /
|
|
@ -5,24 +5,27 @@
|
||||||
|
|
||||||
# This represents the bare minimum required to build and test Docker.
|
# This represents the bare minimum required to build and test Docker.
|
||||||
|
|
||||||
ARG GO_VERSION=1.21.9
|
ARG GO_VERSION=1.13.15
|
||||||
|
|
||||||
ARG BASE_DEBIAN_DISTRO="bookworm"
|
FROM golang:${GO_VERSION}-stretch
|
||||||
ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}"
|
|
||||||
|
|
||||||
FROM ${GOLANG_IMAGE}
|
|
||||||
ENV GO111MODULE=off
|
ENV GO111MODULE=off
|
||||||
ENV GOTOOLCHAIN=local
|
|
||||||
|
# allow replacing httpredir or deb mirror
|
||||||
|
ARG APT_MIRROR=deb.debian.org
|
||||||
|
RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list
|
||||||
|
|
||||||
# Compile and runtime deps
|
# Compile and runtime deps
|
||||||
# https://github.com/docker/docker/blob/master/project/PACKAGERS.md#build-dependencies
|
# https://github.com/docker/docker/blob/master/project/PACKAGERS.md#build-dependencies
|
||||||
# https://github.com/docker/docker/blob/master/project/PACKAGERS.md#runtime-dependencies
|
# https://github.com/docker/docker/blob/master/project/PACKAGERS.md#runtime-dependencies
|
||||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||||
|
btrfs-tools \
|
||||||
build-essential \
|
build-essential \
|
||||||
curl \
|
curl \
|
||||||
cmake \
|
cmake \
|
||||||
|
gcc \
|
||||||
git \
|
git \
|
||||||
libapparmor-dev \
|
libapparmor-dev \
|
||||||
|
libdevmapper-dev \
|
||||||
libseccomp-dev \
|
libseccomp-dev \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
e2fsprogs \
|
e2fsprogs \
|
||||||
|
@ -33,6 +36,7 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||||
xfsprogs \
|
xfsprogs \
|
||||||
xz-utils \
|
xz-utils \
|
||||||
\
|
\
|
||||||
|
aufs-tools \
|
||||||
vim-common \
|
vim-common \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
|
|
@ -154,31 +154,27 @@
|
||||||
|
|
||||||
# The number of build steps below are explicitly minimised to improve performance.
|
# The number of build steps below are explicitly minimised to improve performance.
|
||||||
|
|
||||||
ARG WINDOWS_BASE_IMAGE=mcr.microsoft.com/windows/servercore
|
# Extremely important - do not change the following line to reference a "specific" image,
|
||||||
ARG WINDOWS_BASE_IMAGE_TAG=ltsc2022
|
# such as `mcr.microsoft.com/windows/servercore:ltsc2019`. If using this Dockerfile in process
|
||||||
FROM ${WINDOWS_BASE_IMAGE}:${WINDOWS_BASE_IMAGE_TAG}
|
# isolated containers, the kernel of the host must match the container image, and hence
|
||||||
|
# would fail between Windows Server 2016 (aka RS1) and Windows Server 2019 (aka RS5).
|
||||||
|
# It is expected that the image `microsoft/windowsservercore:latest` is present, and matches
|
||||||
|
# the hosts kernel version before doing a build.
|
||||||
|
FROM microsoft/windowsservercore
|
||||||
|
|
||||||
# Use PowerShell as the default shell
|
# Use PowerShell as the default shell
|
||||||
SHELL ["powershell", "-Command", "$ErrorActionPreference = 'Stop'; $ProgressPreference = 'SilentlyContinue';"]
|
SHELL ["powershell", "-Command", "$ErrorActionPreference = 'Stop'; $ProgressPreference = 'SilentlyContinue';"]
|
||||||
|
|
||||||
ARG GO_VERSION=1.21.9
|
ARG GO_VERSION=1.13.15
|
||||||
ARG GOTESTSUM_VERSION=v1.8.2
|
|
||||||
ARG GOWINRES_VERSION=v0.3.1
|
|
||||||
ARG CONTAINERD_VERSION=v1.7.15
|
|
||||||
|
|
||||||
# Environment variable notes:
|
# Environment variable notes:
|
||||||
# - GO_VERSION must be consistent with 'Dockerfile' used by Linux.
|
# - GO_VERSION must be consistent with 'Dockerfile' used by Linux.
|
||||||
# - CONTAINERD_VERSION must be consistent with 'hack/dockerfile/install/containerd.installer' used by Linux.
|
|
||||||
# - FROM_DOCKERFILE is used for detection of building within a container.
|
# - FROM_DOCKERFILE is used for detection of building within a container.
|
||||||
ENV GO_VERSION=${GO_VERSION} `
|
ENV GO_VERSION=${GO_VERSION} `
|
||||||
CONTAINERD_VERSION=${CONTAINERD_VERSION} `
|
|
||||||
GIT_VERSION=2.11.1 `
|
GIT_VERSION=2.11.1 `
|
||||||
GOPATH=C:\gopath `
|
GOPATH=C:\gopath `
|
||||||
GO111MODULE=off `
|
GO111MODULE=off `
|
||||||
GOTOOLCHAIN=local `
|
FROM_DOCKERFILE=1
|
||||||
FROM_DOCKERFILE=1 `
|
|
||||||
GOTESTSUM_VERSION=${GOTESTSUM_VERSION} `
|
|
||||||
GOWINRES_VERSION=${GOWINRES_VERSION}
|
|
||||||
|
|
||||||
RUN `
|
RUN `
|
||||||
Function Test-Nano() { `
|
Function Test-Nano() { `
|
||||||
|
@ -207,21 +203,20 @@ RUN `
|
||||||
Throw ("Failed to download " + $source) `
|
Throw ("Failed to download " + $source) `
|
||||||
}`
|
}`
|
||||||
} else { `
|
} else { `
|
||||||
[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; `
|
|
||||||
$webClient = New-Object System.Net.WebClient; `
|
$webClient = New-Object System.Net.WebClient; `
|
||||||
$webClient.DownloadFile($source, $target); `
|
$webClient.DownloadFile($source, $target); `
|
||||||
} `
|
} `
|
||||||
} `
|
} `
|
||||||
`
|
`
|
||||||
setx /M PATH $('C:\git\cmd;C:\git\usr\bin;'+$Env:PATH+';C:\gcc\bin;C:\go\bin;C:\containerd\bin'); `
|
setx /M PATH $('C:\git\cmd;C:\git\usr\bin;'+$Env:PATH+';C:\gcc\bin;C:\go\bin'); `
|
||||||
`
|
`
|
||||||
Write-Host INFO: Downloading git...; `
|
Write-Host INFO: Downloading git...; `
|
||||||
$location='https://www.nuget.org/api/v2/package/GitForWindows/'+$Env:GIT_VERSION; `
|
$location='https://www.nuget.org/api/v2/package/GitForWindows/'+$Env:GIT_VERSION; `
|
||||||
Download-File $location C:\gitsetup.zip; `
|
Download-File $location C:\gitsetup.zip; `
|
||||||
`
|
`
|
||||||
Write-Host INFO: Downloading go...; `
|
Write-Host INFO: Downloading go...; `
|
||||||
$dlGoVersion=$Env:GO_VERSION; `
|
$dlGoVersion=$Env:GO_VERSION -replace '\.0$',''; `
|
||||||
Download-File "https://go.dev/dl/go${dlGoVersion}.windows-amd64.zip" C:\go.zip; `
|
Download-File "https://golang.org/dl/go${dlGoVersion}.windows-amd64.zip" C:\go.zip; `
|
||||||
`
|
`
|
||||||
Write-Host INFO: Downloading compiler 1 of 3...; `
|
Write-Host INFO: Downloading compiler 1 of 3...; `
|
||||||
Download-File https://raw.githubusercontent.com/moby/docker-tdmgcc/master/gcc.zip C:\gcc.zip; `
|
Download-File https://raw.githubusercontent.com/moby/docker-tdmgcc/master/gcc.zip C:\gcc.zip; `
|
||||||
|
@ -254,55 +249,13 @@ RUN `
|
||||||
Remove-Item C:\binutils.zip; `
|
Remove-Item C:\binutils.zip; `
|
||||||
Remove-Item C:\gitsetup.zip; `
|
Remove-Item C:\gitsetup.zip; `
|
||||||
`
|
`
|
||||||
Write-Host INFO: Downloading containerd; `
|
Write-Host INFO: Creating source directory...; `
|
||||||
Install-Package -Force 7Zip4PowerShell; `
|
New-Item -ItemType Directory -Path ${GOPATH}\src\github.com\docker\docker | Out-Null; `
|
||||||
$location='https://github.com/containerd/containerd/releases/download/'+$Env:CONTAINERD_VERSION+'/containerd-'+$Env:CONTAINERD_VERSION.TrimStart('v')+'-windows-amd64.tar.gz'; `
|
|
||||||
Download-File $location C:\containerd.tar.gz; `
|
|
||||||
New-Item -Path C:\containerd -ItemType Directory; `
|
|
||||||
Expand-7Zip C:\containerd.tar.gz C:\; `
|
|
||||||
Expand-7Zip C:\containerd.tar C:\containerd; `
|
|
||||||
Remove-Item C:\containerd.tar.gz; `
|
|
||||||
Remove-Item C:\containerd.tar; `
|
|
||||||
`
|
|
||||||
# Ensure all directories exist that we will require below....
|
|
||||||
$srcDir = """$Env:GOPATH`\src\github.com\docker\docker\bundles"""; `
|
|
||||||
Write-Host INFO: Ensuring existence of directory $srcDir...; `
|
|
||||||
New-Item -Force -ItemType Directory -Path $srcDir | Out-Null; `
|
|
||||||
`
|
`
|
||||||
Write-Host INFO: Configuring git core.autocrlf...; `
|
Write-Host INFO: Configuring git core.autocrlf...; `
|
||||||
C:\git\cmd\git config --global core.autocrlf true;
|
C:\git\cmd\git config --global core.autocrlf true; `
|
||||||
|
|
||||||
RUN `
|
|
||||||
Function Install-GoTestSum() { `
|
|
||||||
$Env:GO111MODULE = 'on'; `
|
|
||||||
$tmpGobin = "${Env:GOBIN_TMP}"; `
|
|
||||||
$Env:GOBIN = """${Env:GOPATH}`\bin"""; `
|
|
||||||
Write-Host "INFO: Installing gotestsum version $Env:GOTESTSUM_VERSION in $Env:GOBIN"; `
|
|
||||||
&go install "gotest.tools/gotestsum@${Env:GOTESTSUM_VERSION}"; `
|
|
||||||
$Env:GOBIN = "${tmpGobin}"; `
|
|
||||||
$Env:GO111MODULE = 'off'; `
|
|
||||||
if ($LASTEXITCODE -ne 0) { `
|
|
||||||
Throw '"gotestsum install failed..."'; `
|
|
||||||
} `
|
|
||||||
} `
|
|
||||||
`
|
`
|
||||||
Install-GoTestSum
|
Write-Host INFO: Completed
|
||||||
|
|
||||||
RUN `
|
|
||||||
Function Install-GoWinres() { `
|
|
||||||
$Env:GO111MODULE = 'on'; `
|
|
||||||
$tmpGobin = "${Env:GOBIN_TMP}"; `
|
|
||||||
$Env:GOBIN = """${Env:GOPATH}`\bin"""; `
|
|
||||||
Write-Host "INFO: Installing go-winres version $Env:GOWINRES_VERSION in $Env:GOBIN"; `
|
|
||||||
&go install "github.com/tc-hib/go-winres@${Env:GOWINRES_VERSION}"; `
|
|
||||||
$Env:GOBIN = "${tmpGobin}"; `
|
|
||||||
$Env:GO111MODULE = 'off'; `
|
|
||||||
if ($LASTEXITCODE -ne 0) { `
|
|
||||||
Throw '"go-winres install failed..."'; `
|
|
||||||
} `
|
|
||||||
} `
|
|
||||||
`
|
|
||||||
Install-GoWinres
|
|
||||||
|
|
||||||
# Make PowerShell the default entrypoint
|
# Make PowerShell the default entrypoint
|
||||||
ENTRYPOINT ["powershell.exe"]
|
ENTRYPOINT ["powershell.exe"]
|
||||||
|
|
748
Jenkinsfile
vendored
748
Jenkinsfile
vendored
|
@ -8,14 +8,20 @@ pipeline {
|
||||||
timestamps()
|
timestamps()
|
||||||
}
|
}
|
||||||
parameters {
|
parameters {
|
||||||
booleanParam(name: 'arm64', defaultValue: true, description: 'ARM (arm64) Build/Test')
|
booleanParam(name: 'unit_validate', defaultValue: true, description: 'amd64 (x86_64) unit tests and vendor check')
|
||||||
booleanParam(name: 'dco', defaultValue: true, description: 'Run the DCO check')
|
booleanParam(name: 'amd64', defaultValue: true, description: 'amd64 (x86_64) Build/Test')
|
||||||
|
booleanParam(name: 's390x', defaultValue: true, description: 'IBM Z (s390x) Build/Test')
|
||||||
|
booleanParam(name: 'ppc64le', defaultValue: true, description: 'PowerPC (ppc64le) Build/Test')
|
||||||
|
booleanParam(name: 'windowsRS1', defaultValue: false, description: 'Windows 2016 (RS1) Build/Test')
|
||||||
|
booleanParam(name: 'windowsRS5', defaultValue: true, description: 'Windows 2019 (RS5) Build/Test')
|
||||||
|
booleanParam(name: 'skip_dco', defaultValue: false, description: 'Skip the DCO check')
|
||||||
}
|
}
|
||||||
environment {
|
environment {
|
||||||
DOCKER_BUILDKIT = '1'
|
DOCKER_BUILDKIT = '1'
|
||||||
DOCKER_EXPERIMENTAL = '1'
|
DOCKER_EXPERIMENTAL = '1'
|
||||||
DOCKER_GRAPHDRIVER = 'overlay2'
|
DOCKER_GRAPHDRIVER = 'overlay2'
|
||||||
CHECK_CONFIG_COMMIT = '33a3680e08d1007e72c3b3f1454f823d8e9948ee'
|
APT_MIRROR = 'cdn-fastly.deb.debian.org'
|
||||||
|
CHECK_CONFIG_COMMIT = '78405559cfe5987174aa2cb6463b9b2c1b917255'
|
||||||
TESTDEBUG = '0'
|
TESTDEBUG = '0'
|
||||||
TIMEOUT = '120m'
|
TIMEOUT = '120m'
|
||||||
}
|
}
|
||||||
|
@ -34,30 +40,27 @@ pipeline {
|
||||||
stage('DCO-check') {
|
stage('DCO-check') {
|
||||||
when {
|
when {
|
||||||
beforeAgent true
|
beforeAgent true
|
||||||
expression { params.dco }
|
expression { !params.skip_dco }
|
||||||
}
|
}
|
||||||
agent { label 'arm64 && ubuntu-2004' }
|
agent { label 'amd64 && ubuntu-1804 && overlay2' }
|
||||||
steps {
|
steps {
|
||||||
sh '''
|
sh '''
|
||||||
docker run --rm \
|
docker run --rm \
|
||||||
-v "$WORKSPACE:/workspace" \
|
-v "$WORKSPACE:/workspace" \
|
||||||
-e VALIDATE_REPO=${GIT_URL} \
|
-e VALIDATE_REPO=${GIT_URL} \
|
||||||
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
|
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
|
||||||
alpine sh -c 'apk add --no-cache -q bash git openssh-client && git config --system --add safe.directory /workspace && cd /workspace && hack/validate/dco'
|
alpine sh -c 'apk add --no-cache -q bash git openssh-client && cd /workspace && hack/validate/dco'
|
||||||
'''
|
'''
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
stage('Build') {
|
stage('Build') {
|
||||||
parallel {
|
parallel {
|
||||||
stage('arm64') {
|
stage('unit-validate') {
|
||||||
when {
|
when {
|
||||||
beforeAgent true
|
beforeAgent true
|
||||||
expression { params.arm64 }
|
expression { params.unit_validate }
|
||||||
}
|
|
||||||
agent { label 'arm64 && ubuntu-2004' }
|
|
||||||
environment {
|
|
||||||
TEST_SKIP_INTEGRATION_CLI = '1'
|
|
||||||
}
|
}
|
||||||
|
agent { label 'amd64 && ubuntu-1804 && overlay2' }
|
||||||
|
|
||||||
stages {
|
stages {
|
||||||
stage("Print info") {
|
stage("Print info") {
|
||||||
|
@ -73,14 +76,98 @@ pipeline {
|
||||||
}
|
}
|
||||||
stage("Build dev image") {
|
stage("Build dev image") {
|
||||||
steps {
|
steps {
|
||||||
sh 'docker build --force-rm -t docker:${GIT_COMMIT} .'
|
sh 'docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .'
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
stage("Unit tests") {
|
stage("Validate") {
|
||||||
steps {
|
steps {
|
||||||
sh '''
|
sh '''
|
||||||
sudo modprobe ip6table_filter
|
docker run --rm -t --privileged \
|
||||||
|
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
|
||||||
|
-v "$WORKSPACE/.git:/go/src/github.com/docker/docker/.git" \
|
||||||
|
--name docker-pr$BUILD_NUMBER \
|
||||||
|
-e DOCKER_EXPERIMENTAL \
|
||||||
|
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||||
|
-e DOCKER_GRAPHDRIVER \
|
||||||
|
-e VALIDATE_REPO=${GIT_URL} \
|
||||||
|
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
|
||||||
|
docker:${GIT_COMMIT} \
|
||||||
|
hack/validate/default
|
||||||
'''
|
'''
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stage("Docker-py") {
|
||||||
|
steps {
|
||||||
|
sh '''
|
||||||
|
docker run --rm -t --privileged \
|
||||||
|
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
|
||||||
|
--name docker-pr$BUILD_NUMBER \
|
||||||
|
-e DOCKER_EXPERIMENTAL \
|
||||||
|
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||||
|
-e DOCKER_GRAPHDRIVER \
|
||||||
|
-e VALIDATE_REPO=${GIT_URL} \
|
||||||
|
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
|
||||||
|
docker:${GIT_COMMIT} \
|
||||||
|
hack/make.sh \
|
||||||
|
dynbinary-daemon \
|
||||||
|
test-docker-py
|
||||||
|
'''
|
||||||
|
}
|
||||||
|
post {
|
||||||
|
always {
|
||||||
|
junit testResults: 'bundles/test-docker-py/junit-report.xml', allowEmptyResults: true
|
||||||
|
|
||||||
|
sh '''
|
||||||
|
echo "Ensuring container killed."
|
||||||
|
docker rm -vf docker-pr$BUILD_NUMBER || true
|
||||||
|
'''
|
||||||
|
|
||||||
|
sh '''
|
||||||
|
echo 'Chowning /workspace to jenkins user'
|
||||||
|
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
|
||||||
|
'''
|
||||||
|
|
||||||
|
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
|
||||||
|
sh '''
|
||||||
|
bundleName=docker-py
|
||||||
|
echo "Creating ${bundleName}-bundles.tar.gz"
|
||||||
|
tar -czf ${bundleName}-bundles.tar.gz bundles/test-docker-py/*.xml bundles/test-docker-py/*.log
|
||||||
|
'''
|
||||||
|
|
||||||
|
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stage("Static") {
|
||||||
|
steps {
|
||||||
|
sh '''
|
||||||
|
docker run --rm -t --privileged \
|
||||||
|
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
|
||||||
|
--name docker-pr$BUILD_NUMBER \
|
||||||
|
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||||
|
-e DOCKER_GRAPHDRIVER \
|
||||||
|
docker:${GIT_COMMIT} \
|
||||||
|
hack/make.sh binary-daemon
|
||||||
|
'''
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stage("Cross") {
|
||||||
|
steps {
|
||||||
|
sh '''
|
||||||
|
docker run --rm -t --privileged \
|
||||||
|
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
|
||||||
|
--name docker-pr$BUILD_NUMBER \
|
||||||
|
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||||
|
-e DOCKER_GRAPHDRIVER \
|
||||||
|
docker:${GIT_COMMIT} \
|
||||||
|
hack/make.sh cross
|
||||||
|
'''
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// needs to be last stage that calls make.sh for the junit report to work
|
||||||
|
stage("Unit tests") {
|
||||||
|
steps {
|
||||||
sh '''
|
sh '''
|
||||||
docker run --rm -t --privileged \
|
docker run --rm -t --privileged \
|
||||||
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
|
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
|
||||||
|
@ -96,7 +183,237 @@ pipeline {
|
||||||
}
|
}
|
||||||
post {
|
post {
|
||||||
always {
|
always {
|
||||||
junit testResults: 'bundles/junit-report*.xml', allowEmptyResults: true
|
junit testResults: 'bundles/junit-report.xml', allowEmptyResults: true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stage("Validate vendor") {
|
||||||
|
steps {
|
||||||
|
sh '''
|
||||||
|
docker run --rm -t --privileged \
|
||||||
|
-v "$WORKSPACE/.git:/go/src/github.com/docker/docker/.git" \
|
||||||
|
--name docker-pr$BUILD_NUMBER \
|
||||||
|
-e DOCKER_EXPERIMENTAL \
|
||||||
|
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||||
|
-e DOCKER_GRAPHDRIVER \
|
||||||
|
-e VALIDATE_REPO=${GIT_URL} \
|
||||||
|
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
|
||||||
|
docker:${GIT_COMMIT} \
|
||||||
|
hack/validate/vendor
|
||||||
|
'''
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stage("Build e2e image") {
|
||||||
|
steps {
|
||||||
|
sh '''
|
||||||
|
echo "Building e2e image"
|
||||||
|
docker build --build-arg DOCKER_GITCOMMIT=${GIT_COMMIT} -t moby-e2e-test -f Dockerfile.e2e .
|
||||||
|
'''
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
post {
|
||||||
|
always {
|
||||||
|
sh '''
|
||||||
|
echo 'Ensuring container killed.'
|
||||||
|
docker rm -vf docker-pr$BUILD_NUMBER || true
|
||||||
|
'''
|
||||||
|
|
||||||
|
sh '''
|
||||||
|
echo 'Chowning /workspace to jenkins user'
|
||||||
|
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
|
||||||
|
'''
|
||||||
|
|
||||||
|
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
|
||||||
|
sh '''
|
||||||
|
bundleName=unit
|
||||||
|
echo "Creating ${bundleName}-bundles.tar.gz"
|
||||||
|
tar -czvf ${bundleName}-bundles.tar.gz bundles/junit-report.xml bundles/go-test-report.json bundles/profile.out
|
||||||
|
'''
|
||||||
|
|
||||||
|
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cleanup {
|
||||||
|
sh 'make clean'
|
||||||
|
deleteDir()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stage('amd64') {
|
||||||
|
when {
|
||||||
|
beforeAgent true
|
||||||
|
expression { params.amd64 }
|
||||||
|
}
|
||||||
|
agent { label 'amd64 && ubuntu-1804 && overlay2' }
|
||||||
|
|
||||||
|
stages {
|
||||||
|
stage("Print info") {
|
||||||
|
steps {
|
||||||
|
sh 'docker version'
|
||||||
|
sh 'docker info'
|
||||||
|
sh '''
|
||||||
|
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
|
||||||
|
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
|
||||||
|
&& bash ${WORKSPACE}/check-config.sh || true
|
||||||
|
'''
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stage("Build dev image") {
|
||||||
|
steps {
|
||||||
|
sh '''
|
||||||
|
# todo: include ip_vs in base image
|
||||||
|
sudo modprobe ip_vs
|
||||||
|
|
||||||
|
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
|
||||||
|
'''
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stage("Run tests") {
|
||||||
|
steps {
|
||||||
|
sh '''#!/bin/bash
|
||||||
|
# bash is needed so 'jobs -p' works properly
|
||||||
|
# it also accepts setting inline envvars for functions without explicitly exporting
|
||||||
|
set -x
|
||||||
|
|
||||||
|
run_tests() {
|
||||||
|
[ -n "$TESTDEBUG" ] && rm= || rm=--rm;
|
||||||
|
docker run $rm -t --privileged \
|
||||||
|
-v "$WORKSPACE/bundles/${TEST_INTEGRATION_DEST}:/go/src/github.com/docker/docker/bundles" \
|
||||||
|
-v "$WORKSPACE/bundles/dynbinary-daemon:/go/src/github.com/docker/docker/bundles/dynbinary-daemon" \
|
||||||
|
-v "$WORKSPACE/.git:/go/src/github.com/docker/docker/.git" \
|
||||||
|
--name "$CONTAINER_NAME" \
|
||||||
|
-e KEEPBUNDLE=1 \
|
||||||
|
-e TESTDEBUG \
|
||||||
|
-e TESTFLAGS \
|
||||||
|
-e TEST_SKIP_INTEGRATION \
|
||||||
|
-e TEST_SKIP_INTEGRATION_CLI \
|
||||||
|
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||||
|
-e DOCKER_GRAPHDRIVER \
|
||||||
|
-e TIMEOUT \
|
||||||
|
-e VALIDATE_REPO=${GIT_URL} \
|
||||||
|
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
|
||||||
|
docker:${GIT_COMMIT} \
|
||||||
|
hack/make.sh \
|
||||||
|
"$1" \
|
||||||
|
test-integration
|
||||||
|
}
|
||||||
|
|
||||||
|
trap "exit" INT TERM
|
||||||
|
trap 'pids=$(jobs -p); echo "Remaining pids to kill: [$pids]"; [ -z "$pids" ] || kill $pids' EXIT
|
||||||
|
|
||||||
|
CONTAINER_NAME=docker-pr$BUILD_NUMBER
|
||||||
|
|
||||||
|
docker run --rm -t --privileged \
|
||||||
|
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
|
||||||
|
-v "$WORKSPACE/.git:/go/src/github.com/docker/docker/.git" \
|
||||||
|
--name ${CONTAINER_NAME}-build \
|
||||||
|
-e DOCKER_EXPERIMENTAL \
|
||||||
|
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||||
|
-e DOCKER_GRAPHDRIVER \
|
||||||
|
docker:${GIT_COMMIT} \
|
||||||
|
hack/make.sh \
|
||||||
|
dynbinary-daemon
|
||||||
|
|
||||||
|
# flaky + integration
|
||||||
|
TEST_INTEGRATION_DEST=1 CONTAINER_NAME=${CONTAINER_NAME}-1 TEST_SKIP_INTEGRATION_CLI=1 run_tests test-integration-flaky &
|
||||||
|
|
||||||
|
# integration-cli first set
|
||||||
|
TEST_INTEGRATION_DEST=2 CONTAINER_NAME=${CONTAINER_NAME}-2 TEST_SKIP_INTEGRATION=1 TESTFLAGS="-test.run Test(DockerSuite|DockerNetworkSuite|DockerHubPullSuite|DockerRegistrySuite|DockerSchema1RegistrySuite|DockerRegistryAuthTokenSuite|DockerRegistryAuthHtpasswdSuite)/" run_tests &
|
||||||
|
|
||||||
|
# integration-cli second set
|
||||||
|
TEST_INTEGRATION_DEST=3 CONTAINER_NAME=${CONTAINER_NAME}-3 TEST_SKIP_INTEGRATION=1 TESTFLAGS="-test.run Test(DockerSwarmSuite|DockerDaemonSuite|DockerExternalVolumeSuite)/" run_tests &
|
||||||
|
|
||||||
|
c=0
|
||||||
|
for job in $(jobs -p); do
|
||||||
|
wait ${job} || c=$?
|
||||||
|
done
|
||||||
|
exit $c
|
||||||
|
'''
|
||||||
|
}
|
||||||
|
post {
|
||||||
|
always {
|
||||||
|
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
post {
|
||||||
|
always {
|
||||||
|
sh '''
|
||||||
|
echo "Ensuring container killed."
|
||||||
|
cids=$(docker ps -aq -f name=docker-pr${BUILD_NUMBER}-*)
|
||||||
|
[ -n "$cids" ] && docker rm -vf $cids || true
|
||||||
|
'''
|
||||||
|
|
||||||
|
sh '''
|
||||||
|
echo "Chowning /workspace to jenkins user"
|
||||||
|
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
|
||||||
|
'''
|
||||||
|
|
||||||
|
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
|
||||||
|
sh '''
|
||||||
|
bundleName=amd64
|
||||||
|
echo "Creating ${bundleName}-bundles.tar.gz"
|
||||||
|
# exclude overlay2 directories
|
||||||
|
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
|
||||||
|
'''
|
||||||
|
|
||||||
|
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cleanup {
|
||||||
|
sh 'make clean'
|
||||||
|
deleteDir()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stage('s390x') {
|
||||||
|
when {
|
||||||
|
beforeAgent true
|
||||||
|
expression { params.s390x }
|
||||||
|
}
|
||||||
|
agent { label 's390x-ubuntu-1804' }
|
||||||
|
|
||||||
|
stages {
|
||||||
|
stage("Print info") {
|
||||||
|
steps {
|
||||||
|
sh 'docker version'
|
||||||
|
sh 'docker info'
|
||||||
|
sh '''
|
||||||
|
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
|
||||||
|
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
|
||||||
|
&& bash ${WORKSPACE}/check-config.sh || true
|
||||||
|
'''
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stage("Build dev image") {
|
||||||
|
steps {
|
||||||
|
sh '''
|
||||||
|
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
|
||||||
|
'''
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stage("Unit tests") {
|
||||||
|
steps {
|
||||||
|
sh '''
|
||||||
|
docker run --rm -t --privileged \
|
||||||
|
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
|
||||||
|
--name docker-pr$BUILD_NUMBER \
|
||||||
|
-e DOCKER_EXPERIMENTAL \
|
||||||
|
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||||
|
-e DOCKER_GRAPHDRIVER \
|
||||||
|
-e VALIDATE_REPO=${GIT_URL} \
|
||||||
|
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
|
||||||
|
docker:${GIT_COMMIT} \
|
||||||
|
hack/test/unit
|
||||||
|
'''
|
||||||
|
}
|
||||||
|
post {
|
||||||
|
always {
|
||||||
|
junit testResults: 'bundles/junit-report.xml', allowEmptyResults: true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -111,7 +428,6 @@ pipeline {
|
||||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||||
-e DOCKER_GRAPHDRIVER \
|
-e DOCKER_GRAPHDRIVER \
|
||||||
-e TESTDEBUG \
|
-e TESTDEBUG \
|
||||||
-e TEST_INTEGRATION_USE_SNAPSHOTTER \
|
|
||||||
-e TEST_SKIP_INTEGRATION_CLI \
|
-e TEST_SKIP_INTEGRATION_CLI \
|
||||||
-e TIMEOUT \
|
-e TIMEOUT \
|
||||||
-e VALIDATE_REPO=${GIT_URL} \
|
-e VALIDATE_REPO=${GIT_URL} \
|
||||||
|
@ -144,7 +460,7 @@ pipeline {
|
||||||
|
|
||||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
|
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
|
||||||
sh '''
|
sh '''
|
||||||
bundleName=arm64-integration
|
bundleName=s390x-integration
|
||||||
echo "Creating ${bundleName}-bundles.tar.gz"
|
echo "Creating ${bundleName}-bundles.tar.gz"
|
||||||
# exclude overlay2 directories
|
# exclude overlay2 directories
|
||||||
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
|
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
|
||||||
|
@ -159,6 +475,402 @@ pipeline {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
stage('s390x integration-cli') {
|
||||||
|
when {
|
||||||
|
beforeAgent true
|
||||||
|
not { changeRequest() }
|
||||||
|
expression { params.s390x }
|
||||||
|
}
|
||||||
|
agent { label 's390x-ubuntu-1804' }
|
||||||
|
|
||||||
|
stages {
|
||||||
|
stage("Print info") {
|
||||||
|
steps {
|
||||||
|
sh 'docker version'
|
||||||
|
sh 'docker info'
|
||||||
|
sh '''
|
||||||
|
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
|
||||||
|
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
|
||||||
|
&& bash ${WORKSPACE}/check-config.sh || true
|
||||||
|
'''
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stage("Build dev image") {
|
||||||
|
steps {
|
||||||
|
sh '''
|
||||||
|
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
|
||||||
|
'''
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stage("Integration-cli tests") {
|
||||||
|
environment { TEST_SKIP_INTEGRATION = '1' }
|
||||||
|
steps {
|
||||||
|
sh '''
|
||||||
|
docker run --rm -t --privileged \
|
||||||
|
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
|
||||||
|
--name docker-pr$BUILD_NUMBER \
|
||||||
|
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||||
|
-e DOCKER_GRAPHDRIVER \
|
||||||
|
-e TEST_SKIP_INTEGRATION \
|
||||||
|
-e TIMEOUT \
|
||||||
|
-e VALIDATE_REPO=${GIT_URL} \
|
||||||
|
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
|
||||||
|
docker:${GIT_COMMIT} \
|
||||||
|
hack/make.sh \
|
||||||
|
dynbinary \
|
||||||
|
test-integration
|
||||||
|
'''
|
||||||
|
}
|
||||||
|
post {
|
||||||
|
always {
|
||||||
|
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
post {
|
||||||
|
always {
|
||||||
|
sh '''
|
||||||
|
echo "Ensuring container killed."
|
||||||
|
docker rm -vf docker-pr$BUILD_NUMBER || true
|
||||||
|
'''
|
||||||
|
|
||||||
|
sh '''
|
||||||
|
echo "Chowning /workspace to jenkins user"
|
||||||
|
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
|
||||||
|
'''
|
||||||
|
|
||||||
|
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
|
||||||
|
sh '''
|
||||||
|
bundleName=s390x-integration-cli
|
||||||
|
echo "Creating ${bundleName}-bundles.tar.gz"
|
||||||
|
# exclude overlay2 directories
|
||||||
|
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
|
||||||
|
'''
|
||||||
|
|
||||||
|
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cleanup {
|
||||||
|
sh 'make clean'
|
||||||
|
deleteDir()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stage('ppc64le') {
|
||||||
|
when {
|
||||||
|
beforeAgent true
|
||||||
|
expression { params.ppc64le }
|
||||||
|
}
|
||||||
|
agent { label 'ppc64le-ubuntu-1604' }
|
||||||
|
// ppc64le machines run on Docker 18.06, and buildkit has some bugs on that version
|
||||||
|
environment { DOCKER_BUILDKIT = '0' }
|
||||||
|
|
||||||
|
stages {
|
||||||
|
stage("Print info") {
|
||||||
|
steps {
|
||||||
|
sh 'docker version'
|
||||||
|
sh 'docker info'
|
||||||
|
sh '''
|
||||||
|
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
|
||||||
|
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
|
||||||
|
&& bash ${WORKSPACE}/check-config.sh || true
|
||||||
|
'''
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stage("Build dev image") {
|
||||||
|
steps {
|
||||||
|
sh 'docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stage("Unit tests") {
|
||||||
|
steps {
|
||||||
|
sh '''
|
||||||
|
docker run --rm -t --privileged \
|
||||||
|
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
|
||||||
|
--name docker-pr$BUILD_NUMBER \
|
||||||
|
-e DOCKER_EXPERIMENTAL \
|
||||||
|
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||||
|
-e DOCKER_GRAPHDRIVER \
|
||||||
|
-e VALIDATE_REPO=${GIT_URL} \
|
||||||
|
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
|
||||||
|
docker:${GIT_COMMIT} \
|
||||||
|
hack/test/unit
|
||||||
|
'''
|
||||||
|
}
|
||||||
|
post {
|
||||||
|
always {
|
||||||
|
junit testResults: 'bundles/junit-report.xml', allowEmptyResults: true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stage("Integration tests") {
|
||||||
|
environment { TEST_SKIP_INTEGRATION_CLI = '1' }
|
||||||
|
steps {
|
||||||
|
sh '''
|
||||||
|
docker run --rm -t --privileged \
|
||||||
|
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
|
||||||
|
--name docker-pr$BUILD_NUMBER \
|
||||||
|
-e DOCKER_EXPERIMENTAL \
|
||||||
|
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||||
|
-e DOCKER_GRAPHDRIVER \
|
||||||
|
-e TESTDEBUG \
|
||||||
|
-e TEST_SKIP_INTEGRATION_CLI \
|
||||||
|
-e TIMEOUT \
|
||||||
|
-e VALIDATE_REPO=${GIT_URL} \
|
||||||
|
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
|
||||||
|
docker:${GIT_COMMIT} \
|
||||||
|
hack/make.sh \
|
||||||
|
dynbinary \
|
||||||
|
test-integration
|
||||||
|
'''
|
||||||
|
}
|
||||||
|
post {
|
||||||
|
always {
|
||||||
|
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
post {
|
||||||
|
always {
|
||||||
|
sh '''
|
||||||
|
echo "Ensuring container killed."
|
||||||
|
docker rm -vf docker-pr$BUILD_NUMBER || true
|
||||||
|
'''
|
||||||
|
|
||||||
|
sh '''
|
||||||
|
echo "Chowning /workspace to jenkins user"
|
||||||
|
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
|
||||||
|
'''
|
||||||
|
|
||||||
|
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
|
||||||
|
sh '''
|
||||||
|
bundleName=ppc64le-integration
|
||||||
|
echo "Creating ${bundleName}-bundles.tar.gz"
|
||||||
|
# exclude overlay2 directories
|
||||||
|
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
|
||||||
|
'''
|
||||||
|
|
||||||
|
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cleanup {
|
||||||
|
sh 'make clean'
|
||||||
|
deleteDir()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stage('ppc64le integration-cli') {
|
||||||
|
when {
|
||||||
|
beforeAgent true
|
||||||
|
not { changeRequest() }
|
||||||
|
expression { params.ppc64le }
|
||||||
|
}
|
||||||
|
agent { label 'ppc64le-ubuntu-1604' }
|
||||||
|
// ppc64le machines run on Docker 18.06, and buildkit has some bugs on that version
|
||||||
|
environment { DOCKER_BUILDKIT = '0' }
|
||||||
|
|
||||||
|
stages {
|
||||||
|
stage("Print info") {
|
||||||
|
steps {
|
||||||
|
sh 'docker version'
|
||||||
|
sh 'docker info'
|
||||||
|
sh '''
|
||||||
|
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
|
||||||
|
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
|
||||||
|
&& bash ${WORKSPACE}/check-config.sh || true
|
||||||
|
'''
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stage("Build dev image") {
|
||||||
|
steps {
|
||||||
|
sh 'docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stage("Integration-cli tests") {
|
||||||
|
environment { TEST_SKIP_INTEGRATION = '1' }
|
||||||
|
steps {
|
||||||
|
sh '''
|
||||||
|
docker run --rm -t --privileged \
|
||||||
|
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
|
||||||
|
--name docker-pr$BUILD_NUMBER \
|
||||||
|
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
||||||
|
-e DOCKER_GRAPHDRIVER \
|
||||||
|
-e TEST_SKIP_INTEGRATION \
|
||||||
|
-e TIMEOUT \
|
||||||
|
-e VALIDATE_REPO=${GIT_URL} \
|
||||||
|
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
|
||||||
|
docker:${GIT_COMMIT} \
|
||||||
|
hack/make.sh \
|
||||||
|
dynbinary \
|
||||||
|
test-integration
|
||||||
|
'''
|
||||||
|
}
|
||||||
|
post {
|
||||||
|
always {
|
||||||
|
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
post {
|
||||||
|
always {
|
||||||
|
sh '''
|
||||||
|
echo "Ensuring container killed."
|
||||||
|
docker rm -vf docker-pr$BUILD_NUMBER || true
|
||||||
|
'''
|
||||||
|
|
||||||
|
sh '''
|
||||||
|
echo "Chowning /workspace to jenkins user"
|
||||||
|
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
|
||||||
|
'''
|
||||||
|
|
||||||
|
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
|
||||||
|
sh '''
|
||||||
|
bundleName=ppc64le-integration-cli
|
||||||
|
echo "Creating ${bundleName}-bundles.tar.gz"
|
||||||
|
# exclude overlay2 directories
|
||||||
|
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
|
||||||
|
'''
|
||||||
|
|
||||||
|
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cleanup {
|
||||||
|
sh 'make clean'
|
||||||
|
deleteDir()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stage('win-RS1') {
|
||||||
|
when {
|
||||||
|
beforeAgent true
|
||||||
|
// Skip this stage on PRs unless the windowsRS1 checkbox is selected
|
||||||
|
anyOf {
|
||||||
|
not { changeRequest() }
|
||||||
|
expression { params.windowsRS1 }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
environment {
|
||||||
|
DOCKER_BUILDKIT = '0'
|
||||||
|
DOCKER_DUT_DEBUG = '1'
|
||||||
|
SKIP_VALIDATION_TESTS = '1'
|
||||||
|
SOURCES_DRIVE = 'd'
|
||||||
|
SOURCES_SUBDIR = 'gopath'
|
||||||
|
TESTRUN_DRIVE = 'd'
|
||||||
|
TESTRUN_SUBDIR = "CI"
|
||||||
|
WINDOWS_BASE_IMAGE = 'mcr.microsoft.com/windows/servercore'
|
||||||
|
WINDOWS_BASE_IMAGE_TAG = 'ltsc2016'
|
||||||
|
}
|
||||||
|
agent {
|
||||||
|
node {
|
||||||
|
customWorkspace 'd:\\gopath\\src\\github.com\\docker\\docker'
|
||||||
|
label 'windows-2016'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stages {
|
||||||
|
stage("Print info") {
|
||||||
|
steps {
|
||||||
|
sh 'docker version'
|
||||||
|
sh 'docker info'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stage("Run tests") {
|
||||||
|
steps {
|
||||||
|
powershell '''
|
||||||
|
$ErrorActionPreference = 'Stop'
|
||||||
|
[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12
|
||||||
|
Invoke-WebRequest https://github.com/moby/docker-ci-zap/blob/master/docker-ci-zap.exe?raw=true -OutFile C:/Windows/System32/docker-ci-zap.exe
|
||||||
|
./hack/ci/windows.ps1
|
||||||
|
exit $LastExitCode
|
||||||
|
'''
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
post {
|
||||||
|
always {
|
||||||
|
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
|
||||||
|
powershell '''
|
||||||
|
$bundleName="windowsRS1-integration"
|
||||||
|
Write-Host -ForegroundColor Green "Creating ${bundleName}-bundles.zip"
|
||||||
|
|
||||||
|
# archiveArtifacts does not support env-vars to , so save the artifacts in a fixed location
|
||||||
|
Compress-Archive -Path "${env:TEMP}/CIDUT.out", "${env:TEMP}/CIDUT.err" -CompressionLevel Optimal -DestinationPath "${bundleName}-bundles.zip"
|
||||||
|
'''
|
||||||
|
|
||||||
|
archiveArtifacts artifacts: '*-bundles.zip', allowEmptyArchive: true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cleanup {
|
||||||
|
sh 'make clean'
|
||||||
|
deleteDir()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stage('win-RS5') {
|
||||||
|
when {
|
||||||
|
beforeAgent true
|
||||||
|
expression { params.windowsRS5 }
|
||||||
|
}
|
||||||
|
environment {
|
||||||
|
DOCKER_BUILDKIT = '0'
|
||||||
|
DOCKER_DUT_DEBUG = '1'
|
||||||
|
SKIP_VALIDATION_TESTS = '1'
|
||||||
|
SOURCES_DRIVE = 'd'
|
||||||
|
SOURCES_SUBDIR = 'gopath'
|
||||||
|
TESTRUN_DRIVE = 'd'
|
||||||
|
TESTRUN_SUBDIR = "CI"
|
||||||
|
WINDOWS_BASE_IMAGE = 'mcr.microsoft.com/windows/servercore'
|
||||||
|
WINDOWS_BASE_IMAGE_TAG = 'ltsc2019'
|
||||||
|
}
|
||||||
|
agent {
|
||||||
|
node {
|
||||||
|
customWorkspace 'd:\\gopath\\src\\github.com\\docker\\docker'
|
||||||
|
label 'windows-2019'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stages {
|
||||||
|
stage("Print info") {
|
||||||
|
steps {
|
||||||
|
sh 'docker version'
|
||||||
|
sh 'docker info'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stage("Run tests") {
|
||||||
|
steps {
|
||||||
|
powershell '''
|
||||||
|
$ErrorActionPreference = 'Stop'
|
||||||
|
Invoke-WebRequest https://github.com/moby/docker-ci-zap/blob/master/docker-ci-zap.exe?raw=true -OutFile C:/Windows/System32/docker-ci-zap.exe
|
||||||
|
./hack/ci/windows.ps1
|
||||||
|
exit $LastExitCode
|
||||||
|
'''
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
post {
|
||||||
|
always {
|
||||||
|
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
|
||||||
|
powershell '''
|
||||||
|
$bundleName="windowsRS5-integration"
|
||||||
|
Write-Host -ForegroundColor Green "Creating ${bundleName}-bundles.zip"
|
||||||
|
|
||||||
|
# archiveArtifacts does not support env-vars to , so save the artifacts in a fixed location
|
||||||
|
Compress-Archive -Path "${env:TEMP}/CIDUT.out", "${env:TEMP}/CIDUT.err" -CompressionLevel Optimal -DestinationPath "${bundleName}-bundles.zip"
|
||||||
|
'''
|
||||||
|
|
||||||
|
archiveArtifacts artifacts: '*-bundles.zip', allowEmptyArchive: true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cleanup {
|
||||||
|
sh 'make clean'
|
||||||
|
deleteDir()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
155
MAINTAINERS
155
MAINTAINERS
|
@ -24,23 +24,22 @@
|
||||||
# subsystem maintainers accountable. If ownership is unclear, they are the de facto owners.
|
# subsystem maintainers accountable. If ownership is unclear, they are the de facto owners.
|
||||||
|
|
||||||
people = [
|
people = [
|
||||||
"akerouanton",
|
"aaronlehmann",
|
||||||
"akihirosuda",
|
"akihirosuda",
|
||||||
"anusha",
|
"anusha",
|
||||||
"coolljt0725",
|
"coolljt0725",
|
||||||
"corhere",
|
|
||||||
"cpuguy83",
|
"cpuguy83",
|
||||||
"crazy-max",
|
"crosbymichael",
|
||||||
|
"dnephin",
|
||||||
|
"duglin",
|
||||||
"estesp",
|
"estesp",
|
||||||
|
"jhowardmsft",
|
||||||
"johnstep",
|
"johnstep",
|
||||||
"justincormack",
|
"justincormack",
|
||||||
"kolyshkin",
|
"kolyshkin",
|
||||||
"laurazard",
|
|
||||||
"mhbauer",
|
"mhbauer",
|
||||||
"neersighted",
|
"mlaventure",
|
||||||
"rumpl",
|
|
||||||
"runcom",
|
"runcom",
|
||||||
"samuelkarp",
|
|
||||||
"stevvooe",
|
"stevvooe",
|
||||||
"thajeztah",
|
"thajeztah",
|
||||||
"tianon",
|
"tianon",
|
||||||
|
@ -49,7 +48,6 @@
|
||||||
"unclejack",
|
"unclejack",
|
||||||
"vdemeester",
|
"vdemeester",
|
||||||
"vieux",
|
"vieux",
|
||||||
"vvoland",
|
|
||||||
"yongtang"
|
"yongtang"
|
||||||
]
|
]
|
||||||
|
|
||||||
|
@ -68,16 +66,14 @@
|
||||||
people = [
|
people = [
|
||||||
"alexellis",
|
"alexellis",
|
||||||
"andrewhsu",
|
"andrewhsu",
|
||||||
"bsousaa",
|
"anonymuse",
|
||||||
"dmcgowan",
|
"chanwit",
|
||||||
"fntlnz",
|
"fntlnz",
|
||||||
"gianarb",
|
"gianarb",
|
||||||
"olljanat",
|
"olljanat",
|
||||||
"programmerq",
|
"programmerq",
|
||||||
|
"rheinwein",
|
||||||
"ripcurld",
|
"ripcurld",
|
||||||
"robmry",
|
|
||||||
"sam-thibault",
|
|
||||||
"samwhited",
|
|
||||||
"thajeztah"
|
"thajeztah"
|
||||||
]
|
]
|
||||||
|
|
||||||
|
@ -88,12 +84,6 @@
|
||||||
# Thank you!
|
# Thank you!
|
||||||
|
|
||||||
people = [
|
people = [
|
||||||
# Aaron Lehmann was a maintainer for swarmkit, the registry, and the engine,
|
|
||||||
# and contributed many improvements, features, and bugfixes in those areas,
|
|
||||||
# among which "automated service rollbacks", templated secrets and configs,
|
|
||||||
# and resumable image layer downloads.
|
|
||||||
"aaronlehmann",
|
|
||||||
|
|
||||||
# Harald Albers is the mastermind behind the bash completion scripts for the
|
# Harald Albers is the mastermind behind the bash completion scripts for the
|
||||||
# Docker CLI. The completion scripts moved to the Docker CLI repository, so
|
# Docker CLI. The completion scripts moved to the Docker CLI repository, so
|
||||||
# you can now find him perform his magic in the https://github.com/docker/cli repository.
|
# you can now find him perform his magic in the https://github.com/docker/cli repository.
|
||||||
|
@ -113,30 +103,6 @@
|
||||||
# and tweets as @calavera.
|
# and tweets as @calavera.
|
||||||
"calavera",
|
"calavera",
|
||||||
|
|
||||||
# Michael Crosby was "chief maintainer" of the Docker project.
|
|
||||||
# During his time as a maintainer, Michael contributed to many
|
|
||||||
# milestones of the project; he was release captain of Docker v1.0.0,
|
|
||||||
# started the development of "libcontainer" (what later became runc)
|
|
||||||
# and containerd, as well as demoing cool hacks such as live migrating
|
|
||||||
# a game server container with checkpoint/restore.
|
|
||||||
#
|
|
||||||
# Michael is currently a maintainer of containerd, but you may see
|
|
||||||
# him around in other projects on GitHub.
|
|
||||||
"crosbymichael",
|
|
||||||
|
|
||||||
# Before becoming a maintainer, Daniel Nephin was a core contributor
|
|
||||||
# to "Fig" (now known as Docker Compose). As a maintainer for both the
|
|
||||||
# Engine and Docker CLI, Daniel contributed many features, among which
|
|
||||||
# the `docker stack` commands, allowing users to deploy their Docker
|
|
||||||
# Compose projects as a Swarm service.
|
|
||||||
"dnephin",
|
|
||||||
|
|
||||||
# Doug Davis contributed many features and fixes for the classic builder,
|
|
||||||
# such as "wildcard" copy, the dockerignore file, custom paths/names
|
|
||||||
# for the Dockerfile, as well as enhancements to the API and documentation.
|
|
||||||
# Follow Doug on Twitter, where he tweets as @duginabox.
|
|
||||||
"duglin",
|
|
||||||
|
|
||||||
# As a maintainer, Erik was responsible for the "builder", and
|
# As a maintainer, Erik was responsible for the "builder", and
|
||||||
# started the first designs for the new networking model in
|
# started the first designs for the new networking model in
|
||||||
# Docker. Erik is now working on all kinds of plugins for Docker
|
# Docker. Erik is now working on all kinds of plugins for Docker
|
||||||
|
@ -187,16 +153,6 @@
|
||||||
# check out her open source projects on GitHub https://github.com/jessfraz (a must-try).
|
# check out her open source projects on GitHub https://github.com/jessfraz (a must-try).
|
||||||
"jessfraz",
|
"jessfraz",
|
||||||
|
|
||||||
# As a maintainer, John Howard managed to make the impossible possible;
|
|
||||||
# to run Docker on Windows. After facing many challenges, teaching
|
|
||||||
# fellow-maintainers that 'Windows is not Linux', and many changes in
|
|
||||||
# Windows Server to facilitate containers, native Windows containers
|
|
||||||
# saw the light of day in 2015.
|
|
||||||
#
|
|
||||||
# John is now enjoying life without containers: playing piano, painting,
|
|
||||||
# and walking his dogs, but you may occasionally see him drop by on GitHub.
|
|
||||||
"lowenna",
|
|
||||||
|
|
||||||
# Alexander Morozov contributed many features to Docker, worked on the premise of
|
# Alexander Morozov contributed many features to Docker, worked on the premise of
|
||||||
# what later became containerd (and worked on that too), and made a "stupid" Go
|
# what later became containerd (and worked on that too), and made a "stupid" Go
|
||||||
# vendor tool specifically for docker/docker needs: vndr (https://github.com/LK4D4/vndr).
|
# vendor tool specifically for docker/docker needs: vndr (https://github.com/LK4D4/vndr).
|
||||||
|
@ -210,13 +166,6 @@
|
||||||
# Swarm mode networking.
|
# Swarm mode networking.
|
||||||
"mavenugo",
|
"mavenugo",
|
||||||
|
|
||||||
# As a maintainer, Kenfe-Mickaël Laventure worked on the container runtime,
|
|
||||||
# integrating containerd 1.0 with the daemon, and adding support for custom
|
|
||||||
# OCI runtimes, as well as implementing the `docker prune` subcommands,
|
|
||||||
# which was a welcome feature to be added. You can keep up with Mickaél on
|
|
||||||
# Twitter (@kmlaventure).
|
|
||||||
"mlaventure",
|
|
||||||
|
|
||||||
# As a docs maintainer, Mary Anthony contributed greatly to the Docker
|
# As a docs maintainer, Mary Anthony contributed greatly to the Docker
|
||||||
# docs. She wrote the Docker Contributor Guide and Getting Started
|
# docs. She wrote the Docker Contributor Guide and Getting Started
|
||||||
# Guides. She helped create a doc build system independent of
|
# Guides. She helped create a doc build system independent of
|
||||||
|
@ -284,11 +233,6 @@
|
||||||
Email = "aaron.lehmann@docker.com"
|
Email = "aaron.lehmann@docker.com"
|
||||||
GitHub = "aaronlehmann"
|
GitHub = "aaronlehmann"
|
||||||
|
|
||||||
[people.akerouanton]
|
|
||||||
Name = "Albin Kerouanton"
|
|
||||||
Email = "albinker@gmail.com"
|
|
||||||
GitHub = "akerouanton"
|
|
||||||
|
|
||||||
[people.alexellis]
|
[people.alexellis]
|
||||||
Name = "Alex Ellis"
|
Name = "Alex Ellis"
|
||||||
Email = "alexellis2@gmail.com"
|
Email = "alexellis2@gmail.com"
|
||||||
|
@ -314,16 +258,16 @@
|
||||||
Email = "andrewhsu@docker.com"
|
Email = "andrewhsu@docker.com"
|
||||||
GitHub = "andrewhsu"
|
GitHub = "andrewhsu"
|
||||||
|
|
||||||
|
[people.anonymuse]
|
||||||
|
Name = "Jesse White"
|
||||||
|
Email = "anonymuse@gmail.com"
|
||||||
|
GitHub = "anonymuse"
|
||||||
|
|
||||||
[people.anusha]
|
[people.anusha]
|
||||||
Name = "Anusha Ragunathan"
|
Name = "Anusha Ragunathan"
|
||||||
Email = "anusha@docker.com"
|
Email = "anusha@docker.com"
|
||||||
GitHub = "anusha-ragunathan"
|
GitHub = "anusha-ragunathan"
|
||||||
|
|
||||||
[people.bsousaa]
|
|
||||||
Name = "Bruno de Sousa"
|
|
||||||
Email = "bruno.sousa@docker.com"
|
|
||||||
GitHub = "bsousaa"
|
|
||||||
|
|
||||||
[people.calavera]
|
[people.calavera]
|
||||||
Name = "David Calavera"
|
Name = "David Calavera"
|
||||||
Email = "david.calavera@gmail.com"
|
Email = "david.calavera@gmail.com"
|
||||||
|
@ -334,20 +278,15 @@
|
||||||
Email = "leijitang@huawei.com"
|
Email = "leijitang@huawei.com"
|
||||||
GitHub = "coolljt0725"
|
GitHub = "coolljt0725"
|
||||||
|
|
||||||
[people.corhere]
|
|
||||||
Name = "Cory Snider"
|
|
||||||
Email = "csnider@mirantis.com"
|
|
||||||
GitHub = "corhere"
|
|
||||||
|
|
||||||
[people.cpuguy83]
|
[people.cpuguy83]
|
||||||
Name = "Brian Goff"
|
Name = "Brian Goff"
|
||||||
Email = "cpuguy83@gmail.com"
|
Email = "cpuguy83@gmail.com"
|
||||||
GitHub = "cpuguy83"
|
GitHub = "cpuguy83"
|
||||||
|
|
||||||
[people.crazy-max]
|
[people.chanwit]
|
||||||
Name = "Kevin Alvarez"
|
Name = "Chanwit Kaewkasi"
|
||||||
Email = "contact@crazymax.dev"
|
Email = "chanwit@gmail.com"
|
||||||
GitHub = "crazy-max"
|
GitHub = "chanwit"
|
||||||
|
|
||||||
[people.crosbymichael]
|
[people.crosbymichael]
|
||||||
Name = "Michael Crosby"
|
Name = "Michael Crosby"
|
||||||
|
@ -359,11 +298,6 @@
|
||||||
Email = "dnephin@gmail.com"
|
Email = "dnephin@gmail.com"
|
||||||
GitHub = "dnephin"
|
GitHub = "dnephin"
|
||||||
|
|
||||||
[people.dmcgowan]
|
|
||||||
Name = "Derek McGowan"
|
|
||||||
Email = "derek@mcgstyle.net"
|
|
||||||
GitHub = "dmcgowan"
|
|
||||||
|
|
||||||
[people.duglin]
|
[people.duglin]
|
||||||
Name = "Doug Davis"
|
Name = "Doug Davis"
|
||||||
Email = "dug@us.ibm.com"
|
Email = "dug@us.ibm.com"
|
||||||
|
@ -404,6 +338,11 @@
|
||||||
Email = "james@lovedthanlost.net"
|
Email = "james@lovedthanlost.net"
|
||||||
GitHub = "jamtur01"
|
GitHub = "jamtur01"
|
||||||
|
|
||||||
|
[people.jhowardmsft]
|
||||||
|
Name = "John Howard"
|
||||||
|
Email = "jhoward@microsoft.com"
|
||||||
|
GitHub = "jhowardmsft"
|
||||||
|
|
||||||
[people.jessfraz]
|
[people.jessfraz]
|
||||||
Name = "Jessie Frazelle"
|
Name = "Jessie Frazelle"
|
||||||
Email = "jess@linux.com"
|
Email = "jess@linux.com"
|
||||||
|
@ -424,21 +363,11 @@
|
||||||
Email = "kolyshkin@gmail.com"
|
Email = "kolyshkin@gmail.com"
|
||||||
GitHub = "kolyshkin"
|
GitHub = "kolyshkin"
|
||||||
|
|
||||||
[people.laurazard]
|
|
||||||
Name = "Laura Brehm"
|
|
||||||
Email = "laura.brehm@docker.com"
|
|
||||||
GitHub = "laurazard"
|
|
||||||
|
|
||||||
[people.lk4d4]
|
[people.lk4d4]
|
||||||
Name = "Alexander Morozov"
|
Name = "Alexander Morozov"
|
||||||
Email = "lk4d4@docker.com"
|
Email = "lk4d4@docker.com"
|
||||||
GitHub = "lk4d4"
|
GitHub = "lk4d4"
|
||||||
|
|
||||||
[people.lowenna]
|
|
||||||
Name = "John Howard"
|
|
||||||
Email = "github@lowenna.com"
|
|
||||||
GitHub = "lowenna"
|
|
||||||
|
|
||||||
[people.mavenugo]
|
[people.mavenugo]
|
||||||
Name = "Madhu Venugopal"
|
Name = "Madhu Venugopal"
|
||||||
Email = "madhu@docker.com"
|
Email = "madhu@docker.com"
|
||||||
|
@ -464,11 +393,6 @@
|
||||||
Email = "mrjana@docker.com"
|
Email = "mrjana@docker.com"
|
||||||
GitHub = "mrjana"
|
GitHub = "mrjana"
|
||||||
|
|
||||||
[people.neersighted]
|
|
||||||
Name = "Bjorn Neergaard"
|
|
||||||
Email = "bjorn@neersighted.com"
|
|
||||||
GitHub = "neersighted"
|
|
||||||
|
|
||||||
[people.olljanat]
|
[people.olljanat]
|
||||||
Name = "Olli Janatuinen"
|
Name = "Olli Janatuinen"
|
||||||
Email = "olli.janatuinen@gmail.com"
|
Email = "olli.janatuinen@gmail.com"
|
||||||
|
@ -479,41 +403,21 @@
|
||||||
Email = "jeff@docker.com"
|
Email = "jeff@docker.com"
|
||||||
GitHub = "programmerq"
|
GitHub = "programmerq"
|
||||||
|
|
||||||
[people.robmry]
|
[people.rheinwein]
|
||||||
Name = "Rob Murray"
|
Name = "Laura Frank"
|
||||||
Email = "rob.murray@docker.com"
|
Email = "laura@codeship.com"
|
||||||
GitHub = "robmry"
|
GitHub = "rheinwein"
|
||||||
|
|
||||||
[people.ripcurld]
|
[people.ripcurld]
|
||||||
Name = "Boaz Shuster"
|
Name = "Boaz Shuster"
|
||||||
Email = "ripcurld.github@gmail.com"
|
Email = "ripcurld.github@gmail.com"
|
||||||
GitHub = "ripcurld"
|
GitHub = "ripcurld"
|
||||||
|
|
||||||
[people.rumpl]
|
|
||||||
Name = "Djordje Lukic"
|
|
||||||
Email = "djordje.lukic@docker.com"
|
|
||||||
GitHub = "rumpl"
|
|
||||||
|
|
||||||
[people.runcom]
|
[people.runcom]
|
||||||
Name = "Antonio Murdaca"
|
Name = "Antonio Murdaca"
|
||||||
Email = "runcom@redhat.com"
|
Email = "runcom@redhat.com"
|
||||||
GitHub = "runcom"
|
GitHub = "runcom"
|
||||||
|
|
||||||
[people.sam-thibault]
|
|
||||||
Name = "Sam Thibault"
|
|
||||||
Email = "sam.thibault@docker.com"
|
|
||||||
GitHub = "sam-thibault"
|
|
||||||
|
|
||||||
[people.samuelkarp]
|
|
||||||
Name = "Samuel Karp"
|
|
||||||
Email = "me@samuelkarp.com"
|
|
||||||
GitHub = "samuelkarp"
|
|
||||||
|
|
||||||
[people.samwhited]
|
|
||||||
Name = "Sam Whited"
|
|
||||||
Email = "sam@samwhited.com"
|
|
||||||
GitHub = "samwhited"
|
|
||||||
|
|
||||||
[people.shykes]
|
[people.shykes]
|
||||||
Name = "Solomon Hykes"
|
Name = "Solomon Hykes"
|
||||||
Email = "solomon@docker.com"
|
Email = "solomon@docker.com"
|
||||||
|
@ -574,11 +478,6 @@
|
||||||
Email = "vishnuk@google.com"
|
Email = "vishnuk@google.com"
|
||||||
GitHub = "vishh"
|
GitHub = "vishh"
|
||||||
|
|
||||||
[people.vvoland]
|
|
||||||
Name = "Paweł Gronowski"
|
|
||||||
Email = "pawel.gronowski@docker.com"
|
|
||||||
GitHub = "vvoland"
|
|
||||||
|
|
||||||
[people.yongtang]
|
[people.yongtang]
|
||||||
Name = "Yong Tang"
|
Name = "Yong Tang"
|
||||||
Email = "yong.tang.github@outlook.com"
|
Email = "yong.tang.github@outlook.com"
|
||||||
|
|
152
Makefile
152
Makefile
|
@ -1,13 +1,17 @@
|
||||||
.PHONY: all binary dynbinary build cross help install manpages run shell test test-docker-py test-integration test-unit validate validate-% win
|
.PHONY: all binary dynbinary build cross help install manpages run shell test test-docker-py test-integration test-unit validate win
|
||||||
|
|
||||||
DOCKER ?= docker
|
|
||||||
BUILDX ?= $(DOCKER) buildx
|
|
||||||
|
|
||||||
# set the graph driver as the current graphdriver if not set
|
# set the graph driver as the current graphdriver if not set
|
||||||
DOCKER_GRAPHDRIVER := $(if $(DOCKER_GRAPHDRIVER),$(DOCKER_GRAPHDRIVER),$(shell docker info -f '{{ .Driver }}' 2>&1))
|
DOCKER_GRAPHDRIVER := $(if $(DOCKER_GRAPHDRIVER),$(DOCKER_GRAPHDRIVER),$(shell docker info 2>&1 | grep "Storage Driver" | sed 's/.*: //'))
|
||||||
export DOCKER_GRAPHDRIVER
|
export DOCKER_GRAPHDRIVER
|
||||||
|
|
||||||
DOCKER_GITCOMMIT := $(shell git rev-parse HEAD)
|
# enable/disable cross-compile
|
||||||
|
DOCKER_CROSS ?= false
|
||||||
|
|
||||||
|
# get OS/Arch of docker engine
|
||||||
|
DOCKER_OSARCH := $(shell bash -c 'source hack/make/.detect-daemon-osarch && echo $${DOCKER_ENGINE_OSARCH}')
|
||||||
|
DOCKERFILE := $(shell bash -c 'source hack/make/.detect-daemon-osarch && echo $${DOCKERFILE}')
|
||||||
|
|
||||||
|
DOCKER_GITCOMMIT := $(shell git rev-parse --short HEAD || echo unsupported)
|
||||||
export DOCKER_GITCOMMIT
|
export DOCKER_GITCOMMIT
|
||||||
|
|
||||||
# allow overriding the repository and branch that validation scripts are running
|
# allow overriding the repository and branch that validation scripts are running
|
||||||
|
@ -16,9 +20,6 @@ export VALIDATE_REPO
|
||||||
export VALIDATE_BRANCH
|
export VALIDATE_BRANCH
|
||||||
export VALIDATE_ORIGIN_BRANCH
|
export VALIDATE_ORIGIN_BRANCH
|
||||||
|
|
||||||
export PAGER
|
|
||||||
export GIT_PAGER
|
|
||||||
|
|
||||||
# env vars passed through directly to Docker's build scripts
|
# env vars passed through directly to Docker's build scripts
|
||||||
# to allow things like `make KEEPBUNDLE=1 binary` easily
|
# to allow things like `make KEEPBUNDLE=1 binary` easily
|
||||||
# `project/PACKAGERS.md` have some limited documentation of some of these
|
# `project/PACKAGERS.md` have some limited documentation of some of these
|
||||||
|
@ -27,9 +28,11 @@ export GIT_PAGER
|
||||||
# option of "go build". For example, a built-in graphdriver priority list
|
# option of "go build". For example, a built-in graphdriver priority list
|
||||||
# can be changed during build time like this:
|
# can be changed during build time like this:
|
||||||
#
|
#
|
||||||
# make DOCKER_LDFLAGS="-X github.com/docker/docker/daemon/graphdriver.priority=overlay2,zfs" dynbinary
|
# make DOCKER_LDFLAGS="-X github.com/docker/docker/daemon/graphdriver.priority=overlay2,devicemapper" dynbinary
|
||||||
#
|
#
|
||||||
DOCKER_ENVS := \
|
DOCKER_ENVS := \
|
||||||
|
-e DOCKER_CROSSPLATFORMS \
|
||||||
|
-e BUILD_APT_MIRROR \
|
||||||
-e BUILDFLAGS \
|
-e BUILDFLAGS \
|
||||||
-e KEEPBUNDLE \
|
-e KEEPBUNDLE \
|
||||||
-e DOCKER_BUILD_ARGS \
|
-e DOCKER_BUILD_ARGS \
|
||||||
|
@ -39,10 +42,6 @@ DOCKER_ENVS := \
|
||||||
-e DOCKER_BUILDKIT \
|
-e DOCKER_BUILDKIT \
|
||||||
-e DOCKER_BASH_COMPLETION_PATH \
|
-e DOCKER_BASH_COMPLETION_PATH \
|
||||||
-e DOCKER_CLI_PATH \
|
-e DOCKER_CLI_PATH \
|
||||||
-e DOCKERCLI_VERSION \
|
|
||||||
-e DOCKERCLI_REPOSITORY \
|
|
||||||
-e DOCKERCLI_INTEGRATION_VERSION \
|
|
||||||
-e DOCKERCLI_INTEGRATION_REPOSITORY \
|
|
||||||
-e DOCKER_DEBUG \
|
-e DOCKER_DEBUG \
|
||||||
-e DOCKER_EXPERIMENTAL \
|
-e DOCKER_EXPERIMENTAL \
|
||||||
-e DOCKER_GITCOMMIT \
|
-e DOCKER_GITCOMMIT \
|
||||||
|
@ -50,21 +49,13 @@ DOCKER_ENVS := \
|
||||||
-e DOCKER_LDFLAGS \
|
-e DOCKER_LDFLAGS \
|
||||||
-e DOCKER_PORT \
|
-e DOCKER_PORT \
|
||||||
-e DOCKER_REMAP_ROOT \
|
-e DOCKER_REMAP_ROOT \
|
||||||
-e DOCKER_ROOTLESS \
|
|
||||||
-e DOCKER_STORAGE_OPTS \
|
-e DOCKER_STORAGE_OPTS \
|
||||||
-e DOCKER_TEST_HOST \
|
-e DOCKER_TEST_HOST \
|
||||||
-e DOCKER_USERLANDPROXY \
|
-e DOCKER_USERLANDPROXY \
|
||||||
-e DOCKERD_ARGS \
|
-e DOCKERD_ARGS \
|
||||||
-e DELVE_PORT \
|
|
||||||
-e GITHUB_ACTIONS \
|
|
||||||
-e TEST_FORCE_VALIDATE \
|
|
||||||
-e TEST_INTEGRATION_DIR \
|
-e TEST_INTEGRATION_DIR \
|
||||||
-e TEST_INTEGRATION_USE_SNAPSHOTTER \
|
|
||||||
-e TEST_INTEGRATION_FAIL_FAST \
|
|
||||||
-e TEST_SKIP_INTEGRATION \
|
-e TEST_SKIP_INTEGRATION \
|
||||||
-e TEST_SKIP_INTEGRATION_CLI \
|
-e TEST_SKIP_INTEGRATION_CLI \
|
||||||
-e TEST_IGNORE_CGROUP_CHECK \
|
|
||||||
-e TESTCOVERAGE \
|
|
||||||
-e TESTDEBUG \
|
-e TESTDEBUG \
|
||||||
-e TESTDIRS \
|
-e TESTDIRS \
|
||||||
-e TESTFLAGS \
|
-e TESTFLAGS \
|
||||||
|
@ -75,16 +66,16 @@ DOCKER_ENVS := \
|
||||||
-e VALIDATE_REPO \
|
-e VALIDATE_REPO \
|
||||||
-e VALIDATE_BRANCH \
|
-e VALIDATE_BRANCH \
|
||||||
-e VALIDATE_ORIGIN_BRANCH \
|
-e VALIDATE_ORIGIN_BRANCH \
|
||||||
|
-e HTTP_PROXY \
|
||||||
|
-e HTTPS_PROXY \
|
||||||
|
-e NO_PROXY \
|
||||||
|
-e http_proxy \
|
||||||
|
-e https_proxy \
|
||||||
|
-e no_proxy \
|
||||||
-e VERSION \
|
-e VERSION \
|
||||||
-e PLATFORM \
|
-e PLATFORM \
|
||||||
-e DEFAULT_PRODUCT_LICENSE \
|
-e DEFAULT_PRODUCT_LICENSE \
|
||||||
-e PRODUCT \
|
-e PRODUCT
|
||||||
-e PACKAGER_NAME \
|
|
||||||
-e PAGER \
|
|
||||||
-e GIT_PAGER \
|
|
||||||
-e OTEL_EXPORTER_OTLP_ENDPOINT \
|
|
||||||
-e OTEL_EXPORTER_OTLP_PROTOCOL \
|
|
||||||
-e OTEL_SERVICE_NAME
|
|
||||||
# note: we _cannot_ add "-e DOCKER_BUILDTAGS" here because even if it's unset in the shell, that would shadow the "ENV DOCKER_BUILDTAGS" set in our Dockerfile, which is very important for our official builds
|
# note: we _cannot_ add "-e DOCKER_BUILDTAGS" here because even if it's unset in the shell, that would shadow the "ENV DOCKER_BUILDTAGS" set in our Dockerfile, which is very important for our official builds
|
||||||
|
|
||||||
# to allow `make BIND_DIR=. shell` or `make BIND_DIR= test`
|
# to allow `make BIND_DIR=. shell` or `make BIND_DIR= test`
|
||||||
|
@ -102,7 +93,7 @@ DOCKER_MOUNT := $(if $(DOCKER_BINDDIR_MOUNT_OPTS),$(DOCKER_MOUNT):$(DOCKER_BINDD
|
||||||
# Note that `BIND_DIR` will already be set to `bundles` if `DOCKER_HOST` is not set (see above BIND_DIR line), in such case this will do nothing since `DOCKER_MOUNT` will already be set.
|
# Note that `BIND_DIR` will already be set to `bundles` if `DOCKER_HOST` is not set (see above BIND_DIR line), in such case this will do nothing since `DOCKER_MOUNT` will already be set.
|
||||||
DOCKER_MOUNT := $(if $(DOCKER_MOUNT),$(DOCKER_MOUNT),-v /go/src/github.com/docker/docker/bundles) -v "$(CURDIR)/.git:/go/src/github.com/docker/docker/.git"
|
DOCKER_MOUNT := $(if $(DOCKER_MOUNT),$(DOCKER_MOUNT),-v /go/src/github.com/docker/docker/bundles) -v "$(CURDIR)/.git:/go/src/github.com/docker/docker/.git"
|
||||||
|
|
||||||
DOCKER_MOUNT_CACHE := -v docker-dev-cache:/root/.cache -v docker-mod-cache:/go/pkg/mod/
|
DOCKER_MOUNT_CACHE := -v docker-dev-cache:/root/.cache
|
||||||
DOCKER_MOUNT_CLI := $(if $(DOCKER_CLI_PATH),-v $(shell dirname $(DOCKER_CLI_PATH)):/usr/local/cli,)
|
DOCKER_MOUNT_CLI := $(if $(DOCKER_CLI_PATH),-v $(shell dirname $(DOCKER_CLI_PATH)):/usr/local/cli,)
|
||||||
DOCKER_MOUNT_BASH_COMPLETION := $(if $(DOCKER_BASH_COMPLETION_PATH),-v $(shell dirname $(DOCKER_BASH_COMPLETION_PATH)):/usr/local/completion/bash,)
|
DOCKER_MOUNT_BASH_COMPLETION := $(if $(DOCKER_BASH_COMPLETION_PATH),-v $(shell dirname $(DOCKER_BASH_COMPLETION_PATH)):/usr/local/completion/bash,)
|
||||||
DOCKER_MOUNT := $(DOCKER_MOUNT) $(DOCKER_MOUNT_CACHE) $(DOCKER_MOUNT_CLI) $(DOCKER_MOUNT_BASH_COMPLETION)
|
DOCKER_MOUNT := $(DOCKER_MOUNT) $(DOCKER_MOUNT_CACHE) $(DOCKER_MOUNT_CLI) $(DOCKER_MOUNT_BASH_COMPLETION)
|
||||||
|
@ -111,11 +102,14 @@ endif # ifndef DOCKER_MOUNT
|
||||||
# This allows to set the docker-dev container name
|
# This allows to set the docker-dev container name
|
||||||
DOCKER_CONTAINER_NAME := $(if $(CONTAINER_NAME),--name $(CONTAINER_NAME),)
|
DOCKER_CONTAINER_NAME := $(if $(CONTAINER_NAME),--name $(CONTAINER_NAME),)
|
||||||
|
|
||||||
DOCKER_IMAGE := docker-dev
|
GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
|
||||||
|
GIT_BRANCH_CLEAN := $(shell echo $(GIT_BRANCH) | sed -e "s/[^[:alnum:]]/-/g")
|
||||||
|
DOCKER_IMAGE := docker-dev$(if $(GIT_BRANCH_CLEAN),:$(GIT_BRANCH_CLEAN))
|
||||||
DOCKER_PORT_FORWARD := $(if $(DOCKER_PORT),-p "$(DOCKER_PORT)",)
|
DOCKER_PORT_FORWARD := $(if $(DOCKER_PORT),-p "$(DOCKER_PORT)",)
|
||||||
DELVE_PORT_FORWARD := $(if $(DELVE_PORT),-p "$(DELVE_PORT)",)
|
|
||||||
|
|
||||||
DOCKER_FLAGS := $(DOCKER) run --rm --privileged $(DOCKER_CONTAINER_NAME) $(DOCKER_ENVS) $(DOCKER_MOUNT) $(DOCKER_PORT_FORWARD) $(DELVE_PORT_FORWARD)
|
DOCKER_FLAGS := docker run --rm -i --privileged $(DOCKER_CONTAINER_NAME) $(DOCKER_ENVS) $(DOCKER_MOUNT) $(DOCKER_PORT_FORWARD)
|
||||||
|
BUILD_APT_MIRROR := $(if $(DOCKER_BUILD_APT_MIRROR),--build-arg APT_MIRROR=$(DOCKER_BUILD_APT_MIRROR))
|
||||||
|
export BUILD_APT_MIRROR
|
||||||
|
|
||||||
SWAGGER_DOCS_PORT ?= 9000
|
SWAGGER_DOCS_PORT ?= 9000
|
||||||
|
|
||||||
|
@ -132,42 +126,36 @@ ifeq ($(INTERACTIVE), 1)
|
||||||
DOCKER_FLAGS += -t
|
DOCKER_FLAGS += -t
|
||||||
endif
|
endif
|
||||||
|
|
||||||
# on GitHub Runners input device is not a TTY but we allocate a pseudo-one,
|
|
||||||
# otherwise keep STDIN open even if not attached if not a GitHub Runner.
|
|
||||||
ifeq ($(GITHUB_ACTIONS),true)
|
|
||||||
DOCKER_FLAGS += -t
|
|
||||||
else
|
|
||||||
DOCKER_FLAGS += -i
|
|
||||||
endif
|
|
||||||
|
|
||||||
DOCKER_RUN_DOCKER := $(DOCKER_FLAGS) "$(DOCKER_IMAGE)"
|
DOCKER_RUN_DOCKER := $(DOCKER_FLAGS) "$(DOCKER_IMAGE)"
|
||||||
|
|
||||||
DOCKER_BUILD_ARGS += --build-arg=GO_VERSION
|
|
||||||
DOCKER_BUILD_ARGS += --build-arg=DOCKERCLI_VERSION
|
|
||||||
DOCKER_BUILD_ARGS += --build-arg=DOCKERCLI_REPOSITORY
|
|
||||||
DOCKER_BUILD_ARGS += --build-arg=DOCKERCLI_INTEGRATION_VERSION
|
|
||||||
DOCKER_BUILD_ARGS += --build-arg=DOCKERCLI_INTEGRATION_REPOSITORY
|
|
||||||
ifdef DOCKER_SYSTEMD
|
|
||||||
DOCKER_BUILD_ARGS += --build-arg=SYSTEMD=true
|
|
||||||
endif
|
|
||||||
|
|
||||||
BUILD_OPTS := ${DOCKER_BUILD_ARGS} ${DOCKER_BUILD_OPTS}
|
|
||||||
BUILD_CMD := $(BUILDX) build
|
|
||||||
BAKE_CMD := $(BUILDX) bake
|
|
||||||
|
|
||||||
default: binary
|
default: binary
|
||||||
|
|
||||||
all: build ## validate all checks, build linux binaries, run all tests,\ncross build non-linux binaries, and generate archives
|
all: build ## validate all checks, build linux binaries, run all tests\ncross build non-linux binaries and generate archives
|
||||||
$(DOCKER_RUN_DOCKER) bash -c 'hack/validate/default && hack/make.sh'
|
$(DOCKER_RUN_DOCKER) bash -c 'hack/validate/default && hack/make.sh'
|
||||||
|
|
||||||
binary: bundles ## build statically linked linux binaries
|
binary: build ## build the linux binaries
|
||||||
$(BAKE_CMD) binary
|
$(DOCKER_RUN_DOCKER) hack/make.sh binary
|
||||||
|
|
||||||
dynbinary: bundles ## build dynamically linked linux binaries
|
dynbinary: build ## build the linux dynbinaries
|
||||||
$(BAKE_CMD) dynbinary
|
$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary
|
||||||
|
|
||||||
cross: bundles ## cross build the binaries
|
|
||||||
$(BAKE_CMD) binary-cross
|
|
||||||
|
cross: DOCKER_CROSS := true
|
||||||
|
cross: build ## cross build the binaries for darwin, freebsd and\nwindows
|
||||||
|
$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary binary cross
|
||||||
|
|
||||||
|
ifdef DOCKER_CROSSPLATFORMS
|
||||||
|
build: DOCKER_CROSS := true
|
||||||
|
endif
|
||||||
|
ifeq ($(BIND_DIR), .)
|
||||||
|
build: DOCKER_BUILD_OPTS += --target=dev
|
||||||
|
endif
|
||||||
|
build: DOCKER_BUILD_ARGS += --build-arg=CROSS=$(DOCKER_CROSS)
|
||||||
|
build: DOCKER_BUILDKIT ?= 1
|
||||||
|
build: bundles
|
||||||
|
$(warning The docker client CLI has moved to github.com/docker/cli. For a dev-test cycle involving the CLI, run:${\n} DOCKER_CLI_PATH=/host/path/to/cli/binary make shell ${\n} then change the cli and compile into a binary at the same location.${\n})
|
||||||
|
DOCKER_BUILDKIT="${DOCKER_BUILDKIT}" docker build --build-arg=GO_VERSION ${BUILD_APT_MIRROR} ${DOCKER_BUILD_ARGS} ${DOCKER_BUILD_OPTS} -t "$(DOCKER_IMAGE)" -f "$(DOCKERFILE)" .
|
||||||
|
|
||||||
bundles:
|
bundles:
|
||||||
mkdir bundles
|
mkdir bundles
|
||||||
|
@ -176,8 +164,8 @@ bundles:
|
||||||
clean: clean-cache
|
clean: clean-cache
|
||||||
|
|
||||||
.PHONY: clean-cache
|
.PHONY: clean-cache
|
||||||
clean-cache: ## remove the docker volumes that are used for caching in the dev-container
|
clean-cache:
|
||||||
docker volume rm -f docker-dev-cache docker-mod-cache
|
docker volume rm -f docker-dev-cache
|
||||||
|
|
||||||
help: ## this help
|
help: ## this help
|
||||||
@awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z0-9_-]+:.*?## / {gsub("\\\\n",sprintf("\n%22c",""), $$2);printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST)
|
@awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z0-9_-]+:.*?## / {gsub("\\\\n",sprintf("\n%22c",""), $$2);printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST)
|
||||||
|
@ -188,20 +176,11 @@ install: ## install the linux binaries
|
||||||
run: build ## run the docker daemon in a container
|
run: build ## run the docker daemon in a container
|
||||||
$(DOCKER_RUN_DOCKER) sh -c "KEEPBUNDLE=1 hack/make.sh install-binary run"
|
$(DOCKER_RUN_DOCKER) sh -c "KEEPBUNDLE=1 hack/make.sh install-binary run"
|
||||||
|
|
||||||
.PHONY: build
|
|
||||||
ifeq ($(BIND_DIR), .)
|
|
||||||
build: shell_target := --target=dev-base
|
|
||||||
else
|
|
||||||
build: shell_target := --target=dev
|
|
||||||
endif
|
|
||||||
build: bundles
|
|
||||||
$(BUILD_CMD) $(BUILD_OPTS) $(shell_target) --load -t "$(DOCKER_IMAGE)" .
|
|
||||||
|
|
||||||
shell: build ## start a shell inside the build env
|
shell: build ## start a shell inside the build env
|
||||||
$(DOCKER_RUN_DOCKER) bash
|
$(DOCKER_RUN_DOCKER) bash
|
||||||
|
|
||||||
test: build test-unit ## run the unit, integration and docker-py tests
|
test: build test-unit ## run the unit, integration and docker-py tests
|
||||||
$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary test-integration test-docker-py
|
$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary cross test-integration test-docker-py
|
||||||
|
|
||||||
test-docker-py: build ## run the docker-py tests
|
test-docker-py: build ## run the docker-py tests
|
||||||
$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary test-docker-py
|
$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary test-docker-py
|
||||||
|
@ -225,16 +204,8 @@ test-unit: build ## run the unit tests
|
||||||
validate: build ## validate DCO, Seccomp profile generation, gofmt,\n./pkg/ isolation, golint, tests, tomls, go vet and vendor
|
validate: build ## validate DCO, Seccomp profile generation, gofmt,\n./pkg/ isolation, golint, tests, tomls, go vet and vendor
|
||||||
$(DOCKER_RUN_DOCKER) hack/validate/all
|
$(DOCKER_RUN_DOCKER) hack/validate/all
|
||||||
|
|
||||||
validate-generate-files:
|
win: build ## cross build the binary for windows
|
||||||
$(BUILD_CMD) --target "validate" \
|
$(DOCKER_RUN_DOCKER) DOCKER_CROSSPLATFORMS=windows/amd64 hack/make.sh cross
|
||||||
--output "type=cacheonly" \
|
|
||||||
--file "./hack/dockerfiles/generate-files.Dockerfile" .
|
|
||||||
|
|
||||||
validate-%: build ## validate specific check
|
|
||||||
$(DOCKER_RUN_DOCKER) hack/validate/$*
|
|
||||||
|
|
||||||
win: bundles ## cross build the binary for windows
|
|
||||||
$(BAKE_CMD) --set *.platform=windows/amd64 binary
|
|
||||||
|
|
||||||
.PHONY: swagger-gen
|
.PHONY: swagger-gen
|
||||||
swagger-gen:
|
swagger-gen:
|
||||||
|
@ -250,17 +221,4 @@ swagger-docs: ## preview the API documentation
|
||||||
@docker run --rm -v $(PWD)/api/swagger.yaml:/usr/share/nginx/html/swagger.yaml \
|
@docker run --rm -v $(PWD)/api/swagger.yaml:/usr/share/nginx/html/swagger.yaml \
|
||||||
-e 'REDOC_OPTIONS=hide-hostname="true" lazy-rendering' \
|
-e 'REDOC_OPTIONS=hide-hostname="true" lazy-rendering' \
|
||||||
-p $(SWAGGER_DOCS_PORT):80 \
|
-p $(SWAGGER_DOCS_PORT):80 \
|
||||||
bfirsh/redoc:1.14.0
|
bfirsh/redoc:1.6.2
|
||||||
|
|
||||||
.PHONY: generate-files
|
|
||||||
generate-files:
|
|
||||||
$(eval $@_TMP_OUT := $(shell mktemp -d -t moby-output.XXXXXXXXXX))
|
|
||||||
@if [ -z "$($@_TMP_OUT)" ]; then \
|
|
||||||
echo "Temp dir is not set"; \
|
|
||||||
exit 1; \
|
|
||||||
fi
|
|
||||||
$(BUILD_CMD) --target "update" \
|
|
||||||
--output "type=local,dest=$($@_TMP_OUT)" \
|
|
||||||
--file "./hack/dockerfiles/generate-files.Dockerfile" .
|
|
||||||
cp -R "$($@_TMP_OUT)"/. .
|
|
||||||
rm -rf "$($@_TMP_OUT)"/*
|
|
||||||
|
|
|
@ -14,7 +14,7 @@ Moby is an open project guided by strong principles, aiming to be modular, flexi
|
||||||
It is open to the community to help set its direction.
|
It is open to the community to help set its direction.
|
||||||
|
|
||||||
- Modular: the project includes lots of components that have well-defined functions and APIs that work together.
|
- Modular: the project includes lots of components that have well-defined functions and APIs that work together.
|
||||||
- Batteries included but swappable: Moby includes enough components to build fully featured container systems, but its modular architecture ensures that most of the components can be swapped by different implementations.
|
- Batteries included but swappable: Moby includes enough components to build fully featured container system, but its modular architecture ensures that most of the components can be swapped by different implementations.
|
||||||
- Usable security: Moby provides secure defaults without compromising usability.
|
- Usable security: Moby provides secure defaults without compromising usability.
|
||||||
- Developer focused: The APIs are intended to be functional and useful to build powerful tools.
|
- Developer focused: The APIs are intended to be functional and useful to build powerful tools.
|
||||||
They are not necessarily intended as end user tools but as components aimed at developers.
|
They are not necessarily intended as end user tools but as components aimed at developers.
|
||||||
|
|
|
@ -1,9 +0,0 @@
|
||||||
# Reporting security issues
|
|
||||||
|
|
||||||
The Moby maintainers take security seriously. If you discover a security issue, please bring it to their attention right away!
|
|
||||||
|
|
||||||
### Reporting a Vulnerability
|
|
||||||
|
|
||||||
Please **DO NOT** file a public issue, instead send your report privately to security@docker.com.
|
|
||||||
|
|
||||||
Security reports are greatly appreciated and we will publicly thank you for it, although we keep your name confidential if you request it. We also like to send gifts—if you're into schwag, make sure to let us know. We currently do not offer a paid security bounty program, but are not ruling it out in the future.
|
|
19
TESTING.md
19
TESTING.md
|
@ -28,7 +28,7 @@ Most code changes will fall into one of the following categories.
|
||||||
### Writing tests for new features
|
### Writing tests for new features
|
||||||
|
|
||||||
New code should be covered by unit tests. If the code is difficult to test with
|
New code should be covered by unit tests. If the code is difficult to test with
|
||||||
unit tests, then that is a good sign that it should be refactored to make it
|
a unit tests then that is a good sign that it should be refactored to make it
|
||||||
easier to reuse and maintain. Consider accepting unexported interfaces instead
|
easier to reuse and maintain. Consider accepting unexported interfaces instead
|
||||||
of structs so that fakes can be provided for dependencies.
|
of structs so that fakes can be provided for dependencies.
|
||||||
|
|
||||||
|
@ -44,23 +44,16 @@ case. Error cases should be handled by unit tests.
|
||||||
|
|
||||||
Bugs fixes should include a unit test case which exercises the bug.
|
Bugs fixes should include a unit test case which exercises the bug.
|
||||||
|
|
||||||
A bug fix may also include new assertions in existing integration tests for the
|
A bug fix may also include new assertions in an existing integration tests for the
|
||||||
API endpoint.
|
API endpoint.
|
||||||
|
|
||||||
### Writing new integration tests
|
|
||||||
|
|
||||||
Note the `integration-cli` tests are deprecated; new tests will be rejected by
|
|
||||||
the CI.
|
|
||||||
|
|
||||||
Instead, implement new tests under `integration/`.
|
|
||||||
|
|
||||||
### Integration tests environment considerations
|
### Integration tests environment considerations
|
||||||
|
|
||||||
When adding new tests or modifying existing tests under `integration/`, testing
|
When adding new tests or modifying existing test under `integration/`, testing
|
||||||
environment should be properly considered. `skip.If` from
|
environment should be properly considered. `skip.If` from
|
||||||
[gotest.tools/skip](https://godoc.org/gotest.tools/skip) can be used to make the
|
[gotest.tools/skip](https://godoc.org/gotest.tools/skip) can be used to make the
|
||||||
test run conditionally. Full testing environment conditions can be found at
|
test run conditionally. Full testing environment conditions can be found at
|
||||||
[environment.go](https://github.com/moby/moby/blob/6b6eeed03b963a27085ea670f40cd5ff8a61f32e/testutil/environment/environment.go)
|
[environment.go](https://github.com/moby/moby/blob/cb37987ee11655ed6bbef663d245e55922354c68/internal/test/environment/environment.go)
|
||||||
|
|
||||||
Here is a quick example. If the test needs to interact with a docker daemon on
|
Here is a quick example. If the test needs to interact with a docker daemon on
|
||||||
the same host, the following condition should be checked within the test code
|
the same host, the following condition should be checked within the test code
|
||||||
|
@ -111,9 +104,9 @@ TEST_SKIP_INTEGRATION and/or TEST_SKIP_INTEGRATION_CLI environment variables.
|
||||||
Flags specific to each suite can be set in the TESTFLAGS_INTEGRATION and
|
Flags specific to each suite can be set in the TESTFLAGS_INTEGRATION and
|
||||||
TESTFLAGS_INTEGRATION_CLI environment variables.
|
TESTFLAGS_INTEGRATION_CLI environment variables.
|
||||||
|
|
||||||
If all you want is to specify a test filter to run, you can set the
|
If all you want is to specity a test filter to run, you can set the
|
||||||
`TEST_FILTER` environment variable. This ends up getting passed directly to `go
|
`TEST_FILTER` environment variable. This ends up getting passed directly to `go
|
||||||
test -run` (or `go test -check-f`, depending on the test suite). It will also
|
test -run` (or `go test -check-f`, dpenending on the test suite). It will also
|
||||||
automatically set the other above mentioned environment variables accordingly.
|
automatically set the other above mentioned environment variables accordingly.
|
||||||
|
|
||||||
### Go Version
|
### Go Version
|
||||||
|
|
|
@ -37,6 +37,6 @@ There is hopefully enough example material in the file for you to copy a similar
|
||||||
|
|
||||||
When you make edits to `swagger.yaml`, you may want to check the generated API documentation to ensure it renders correctly.
|
When you make edits to `swagger.yaml`, you may want to check the generated API documentation to ensure it renders correctly.
|
||||||
|
|
||||||
Run `make swagger-docs` and a preview will be running at `http://localhost:9000`. Some of the styling may be incorrect, but you'll be able to ensure that it is generating the correct documentation.
|
Run `make swagger-docs` and a preview will be running at `http://localhost`. Some of the styling may be incorrect, but you'll be able to ensure that it is generating the correct documentation.
|
||||||
|
|
||||||
The production documentation is generated by vendoring `swagger.yaml` into [docker/docker.github.io](https://github.com/docker/docker.github.io).
|
The production documentation is generated by vendoring `swagger.yaml` into [docker/docker.github.io](https://github.com/docker/docker.github.io).
|
||||||
|
|
|
@ -2,17 +2,8 @@ package api // import "github.com/docker/docker/api"
|
||||||
|
|
||||||
// Common constants for daemon and client.
|
// Common constants for daemon and client.
|
||||||
const (
|
const (
|
||||||
// DefaultVersion of the current REST API.
|
// DefaultVersion of Current REST API
|
||||||
DefaultVersion = "1.45"
|
DefaultVersion = "1.40"
|
||||||
|
|
||||||
// MinSupportedAPIVersion is the minimum API version that can be supported
|
|
||||||
// by the API server, specified as "major.minor". Note that the daemon
|
|
||||||
// may be configured with a different minimum API version, as returned
|
|
||||||
// in [github.com/docker/docker/api/types.Version.MinAPIVersion].
|
|
||||||
//
|
|
||||||
// API requests for API versions lower than the configured version produce
|
|
||||||
// an error.
|
|
||||||
MinSupportedAPIVersion = "1.24"
|
|
||||||
|
|
||||||
// NoBaseImageSpecifier is the symbol used by the FROM
|
// NoBaseImageSpecifier is the symbol used by the FROM
|
||||||
// command to specify that no base image is to be used.
|
// command to specify that no base image is to be used.
|
||||||
|
|
6
api/common_unix.go
Normal file
6
api/common_unix.go
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package api // import "github.com/docker/docker/api"
|
||||||
|
|
||||||
|
// MinVersion represents Minimum REST API version supported
|
||||||
|
const MinVersion = "1.12"
|
8
api/common_windows.go
Normal file
8
api/common_windows.go
Normal file
|
@ -0,0 +1,8 @@
|
||||||
|
package api // import "github.com/docker/docker/api"
|
||||||
|
|
||||||
|
// MinVersion represents Minimum REST API version supported
|
||||||
|
// Technically the first daemon API version released on Windows is v1.25 in
|
||||||
|
// engine version 1.13. However, some clients are explicitly using downlevel
|
||||||
|
// APIs (e.g. docker-compose v2.1 file format) and that is just too restrictive.
|
||||||
|
// Hence also allowing 1.24 on Windows.
|
||||||
|
const MinVersion string = "1.24"
|
|
@ -3,25 +3,24 @@ package build // import "github.com/docker/docker/api/server/backend/build"
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strconv"
|
|
||||||
|
|
||||||
"github.com/distribution/reference"
|
"github.com/docker/distribution/reference"
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
"github.com/docker/docker/api/types/backend"
|
"github.com/docker/docker/api/types/backend"
|
||||||
"github.com/docker/docker/api/types/events"
|
|
||||||
"github.com/docker/docker/builder"
|
"github.com/docker/docker/builder"
|
||||||
buildkit "github.com/docker/docker/builder/builder-next"
|
buildkit "github.com/docker/docker/builder/builder-next"
|
||||||
daemonevents "github.com/docker/docker/daemon/events"
|
"github.com/docker/docker/builder/fscache"
|
||||||
"github.com/docker/docker/image"
|
"github.com/docker/docker/image"
|
||||||
"github.com/docker/docker/pkg/stringid"
|
"github.com/docker/docker/pkg/stringid"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"golang.org/x/sync/errgroup"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ImageComponent provides an interface for working with images
|
// ImageComponent provides an interface for working with images
|
||||||
type ImageComponent interface {
|
type ImageComponent interface {
|
||||||
SquashImage(from string, to string) (string, error)
|
SquashImage(from string, to string) (string, error)
|
||||||
TagImage(context.Context, image.ID, reference.Named) error
|
TagImageWithReference(image.ID, reference.Named) error
|
||||||
}
|
}
|
||||||
|
|
||||||
// Builder defines interface for running a build
|
// Builder defines interface for running a build
|
||||||
|
@ -32,14 +31,14 @@ type Builder interface {
|
||||||
// Backend provides build functionality to the API router
|
// Backend provides build functionality to the API router
|
||||||
type Backend struct {
|
type Backend struct {
|
||||||
builder Builder
|
builder Builder
|
||||||
|
fsCache *fscache.FSCache
|
||||||
imageComponent ImageComponent
|
imageComponent ImageComponent
|
||||||
buildkit *buildkit.Builder
|
buildkit *buildkit.Builder
|
||||||
eventsService *daemonevents.Events
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewBackend creates a new build backend from components
|
// NewBackend creates a new build backend from components
|
||||||
func NewBackend(components ImageComponent, builder Builder, buildkit *buildkit.Builder, es *daemonevents.Events) (*Backend, error) {
|
func NewBackend(components ImageComponent, builder Builder, fsCache *fscache.FSCache, buildkit *buildkit.Builder) (*Backend, error) {
|
||||||
return &Backend{imageComponent: components, builder: builder, buildkit: buildkit, eventsService: es}, nil
|
return &Backend{imageComponent: components, builder: builder, fsCache: fsCache, buildkit: buildkit}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// RegisterGRPC registers buildkit controller to the grpc server.
|
// RegisterGRPC registers buildkit controller to the grpc server.
|
||||||
|
@ -54,7 +53,7 @@ func (b *Backend) Build(ctx context.Context, config backend.BuildConfig) (string
|
||||||
options := config.Options
|
options := config.Options
|
||||||
useBuildKit := options.Version == types.BuilderBuildKit
|
useBuildKit := options.Version == types.BuilderBuildKit
|
||||||
|
|
||||||
tags, err := sanitizeRepoAndTags(options.Tags)
|
tagger, err := NewTagger(b.imageComponent, config.ProgressWriter.StdoutFormatter, options.Tags)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
@ -76,7 +75,7 @@ func (b *Backend) Build(ctx context.Context, config backend.BuildConfig) (string
|
||||||
return "", nil
|
return "", nil
|
||||||
}
|
}
|
||||||
|
|
||||||
imageID := build.ImageID
|
var imageID = build.ImageID
|
||||||
if options.Squash {
|
if options.Squash {
|
||||||
if imageID, err = squashBuild(build, b.imageComponent); err != nil {
|
if imageID, err = squashBuild(build, b.imageComponent); err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
|
@ -92,24 +91,42 @@ func (b *Backend) Build(ctx context.Context, config backend.BuildConfig) (string
|
||||||
stdout := config.ProgressWriter.StdoutFormatter
|
stdout := config.ProgressWriter.StdoutFormatter
|
||||||
fmt.Fprintf(stdout, "Successfully built %s\n", stringid.TruncateID(imageID))
|
fmt.Fprintf(stdout, "Successfully built %s\n", stringid.TruncateID(imageID))
|
||||||
}
|
}
|
||||||
if imageID != "" && !useBuildKit {
|
if imageID != "" {
|
||||||
err = tagImages(ctx, b.imageComponent, config.ProgressWriter.StdoutFormatter, image.ID(imageID), tags)
|
err = tagger.TagImages(image.ID(imageID))
|
||||||
}
|
}
|
||||||
return imageID, err
|
return imageID, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// PruneCache removes all cached build sources
|
// PruneCache removes all cached build sources
|
||||||
func (b *Backend) PruneCache(ctx context.Context, opts types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error) {
|
func (b *Backend) PruneCache(ctx context.Context, opts types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error) {
|
||||||
buildCacheSize, cacheIDs, err := b.buildkit.Prune(ctx, opts)
|
eg, ctx := errgroup.WithContext(ctx)
|
||||||
|
|
||||||
|
var fsCacheSize uint64
|
||||||
|
eg.Go(func() error {
|
||||||
|
var err error
|
||||||
|
fsCacheSize, err = b.fsCache.Prune(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to prune build cache")
|
return errors.Wrap(err, "failed to prune fscache")
|
||||||
}
|
}
|
||||||
b.eventsService.Log(events.ActionPrune, events.BuilderEventType, events.Actor{
|
return nil
|
||||||
Attributes: map[string]string{
|
|
||||||
"reclaimed": strconv.FormatInt(buildCacheSize, 10),
|
|
||||||
},
|
|
||||||
})
|
})
|
||||||
return &types.BuildCachePruneReport{SpaceReclaimed: uint64(buildCacheSize), CachesDeleted: cacheIDs}, nil
|
|
||||||
|
var buildCacheSize int64
|
||||||
|
var cacheIDs []string
|
||||||
|
eg.Go(func() error {
|
||||||
|
var err error
|
||||||
|
buildCacheSize, cacheIDs, err = b.buildkit.Prune(ctx, opts)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "failed to prune build cache")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
if err := eg.Wait(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &types.BuildCachePruneReport{SpaceReclaimed: fsCacheSize + uint64(buildCacheSize), CachesDeleted: cacheIDs}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cancel cancels the build by ID
|
// Cancel cancels the build by ID
|
||||||
|
|
|
@ -1,31 +1,55 @@
|
||||||
package build // import "github.com/docker/docker/api/server/backend/build"
|
package build // import "github.com/docker/docker/api/server/backend/build"
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
|
||||||
"github.com/distribution/reference"
|
"github.com/docker/distribution/reference"
|
||||||
"github.com/docker/docker/image"
|
"github.com/docker/docker/image"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
// tagImages creates image tags for the imageID.
|
// Tagger is responsible for tagging an image created by a builder
|
||||||
func tagImages(ctx context.Context, ic ImageComponent, stdout io.Writer, imageID image.ID, repoAndTags []reference.Named) error {
|
type Tagger struct {
|
||||||
for _, rt := range repoAndTags {
|
imageComponent ImageComponent
|
||||||
if err := ic.TagImage(ctx, imageID, rt); err != nil {
|
stdout io.Writer
|
||||||
|
repoAndTags []reference.Named
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTagger returns a new Tagger for tagging the images of a build.
|
||||||
|
// If any of the names are invalid tags an error is returned.
|
||||||
|
func NewTagger(backend ImageComponent, stdout io.Writer, names []string) (*Tagger, error) {
|
||||||
|
reposAndTags, err := sanitizeRepoAndTags(names)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &Tagger{
|
||||||
|
imageComponent: backend,
|
||||||
|
stdout: stdout,
|
||||||
|
repoAndTags: reposAndTags,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// TagImages creates image tags for the imageID
|
||||||
|
func (bt *Tagger) TagImages(imageID image.ID) error {
|
||||||
|
for _, rt := range bt.repoAndTags {
|
||||||
|
if err := bt.imageComponent.TagImageWithReference(imageID, rt); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
_, _ = fmt.Fprintln(stdout, "Successfully tagged", reference.FamiliarString(rt))
|
fmt.Fprintf(bt.stdout, "Successfully tagged %s\n", reference.FamiliarString(rt))
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// sanitizeRepoAndTags parses the raw "t" parameter received from the client
|
// sanitizeRepoAndTags parses the raw "t" parameter received from the client
|
||||||
// to a slice of repoAndTag. It removes duplicates, and validates each name
|
// to a slice of repoAndTag.
|
||||||
// to not contain a digest.
|
// It also validates each repoName and tag.
|
||||||
func sanitizeRepoAndTags(names []string) (repoAndTags []reference.Named, err error) {
|
func sanitizeRepoAndTags(names []string) ([]reference.Named, error) {
|
||||||
uniqNames := map[string]struct{}{}
|
var (
|
||||||
|
repoAndTags []reference.Named
|
||||||
|
// This map is used for deduplicating the "-t" parameter.
|
||||||
|
uniqNames = make(map[string]struct{})
|
||||||
|
)
|
||||||
for _, repo := range names {
|
for _, repo := range names {
|
||||||
if repo == "" {
|
if repo == "" {
|
||||||
continue
|
continue
|
||||||
|
@ -36,12 +60,14 @@ func sanitizeRepoAndTags(names []string) (repoAndTags []reference.Named, err err
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, ok := ref.(reference.Digested); ok {
|
if _, isCanonical := ref.(reference.Canonical); isCanonical {
|
||||||
return nil, errors.New("build tag cannot contain a digest")
|
return nil, errors.New("build tag cannot contain a digest")
|
||||||
}
|
}
|
||||||
|
|
||||||
ref = reference.TagNameOnly(ref)
|
ref = reference.TagNameOnly(ref)
|
||||||
|
|
||||||
nameWithTag := ref.String()
|
nameWithTag := ref.String()
|
||||||
|
|
||||||
if _, exists := uniqNames[nameWithTag]; !exists {
|
if _, exists := uniqNames[nameWithTag]; !exists {
|
||||||
uniqNames[nameWithTag] = struct{}{}
|
uniqNames[nameWithTag] = struct{}{}
|
||||||
repoAndTags = append(repoAndTags, ref)
|
repoAndTags = append(repoAndTags, ref)
|
||||||
|
|
|
@ -1,152 +0,0 @@
|
||||||
package httpstatus // import "github.com/docker/docker/api/server/httpstatus"
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
|
|
||||||
cerrdefs "github.com/containerd/containerd/errdefs"
|
|
||||||
"github.com/containerd/log"
|
|
||||||
"github.com/docker/distribution/registry/api/errcode"
|
|
||||||
"github.com/docker/docker/errdefs"
|
|
||||||
"google.golang.org/grpc/codes"
|
|
||||||
"google.golang.org/grpc/status"
|
|
||||||
)
|
|
||||||
|
|
||||||
type causer interface {
|
|
||||||
Cause() error
|
|
||||||
}
|
|
||||||
|
|
||||||
// FromError retrieves status code from error message.
|
|
||||||
func FromError(err error) int {
|
|
||||||
if err == nil {
|
|
||||||
log.G(context.TODO()).WithError(err).Error("unexpected HTTP error handling")
|
|
||||||
return http.StatusInternalServerError
|
|
||||||
}
|
|
||||||
|
|
||||||
var statusCode int
|
|
||||||
|
|
||||||
// Stop right there
|
|
||||||
// Are you sure you should be adding a new error class here? Do one of the existing ones work?
|
|
||||||
|
|
||||||
// Note that the below functions are already checking the error causal chain for matches.
|
|
||||||
switch {
|
|
||||||
case errdefs.IsNotFound(err):
|
|
||||||
statusCode = http.StatusNotFound
|
|
||||||
case errdefs.IsInvalidParameter(err):
|
|
||||||
statusCode = http.StatusBadRequest
|
|
||||||
case errdefs.IsConflict(err):
|
|
||||||
statusCode = http.StatusConflict
|
|
||||||
case errdefs.IsUnauthorized(err):
|
|
||||||
statusCode = http.StatusUnauthorized
|
|
||||||
case errdefs.IsUnavailable(err):
|
|
||||||
statusCode = http.StatusServiceUnavailable
|
|
||||||
case errdefs.IsForbidden(err):
|
|
||||||
statusCode = http.StatusForbidden
|
|
||||||
case errdefs.IsNotModified(err):
|
|
||||||
statusCode = http.StatusNotModified
|
|
||||||
case errdefs.IsNotImplemented(err):
|
|
||||||
statusCode = http.StatusNotImplemented
|
|
||||||
case errdefs.IsSystem(err) || errdefs.IsUnknown(err) || errdefs.IsDataLoss(err) || errdefs.IsDeadline(err) || errdefs.IsCancelled(err):
|
|
||||||
statusCode = http.StatusInternalServerError
|
|
||||||
default:
|
|
||||||
statusCode = statusCodeFromGRPCError(err)
|
|
||||||
if statusCode != http.StatusInternalServerError {
|
|
||||||
return statusCode
|
|
||||||
}
|
|
||||||
statusCode = statusCodeFromContainerdError(err)
|
|
||||||
if statusCode != http.StatusInternalServerError {
|
|
||||||
return statusCode
|
|
||||||
}
|
|
||||||
statusCode = statusCodeFromDistributionError(err)
|
|
||||||
if statusCode != http.StatusInternalServerError {
|
|
||||||
return statusCode
|
|
||||||
}
|
|
||||||
if e, ok := err.(causer); ok {
|
|
||||||
return FromError(e.Cause())
|
|
||||||
}
|
|
||||||
|
|
||||||
log.G(context.TODO()).WithFields(log.Fields{
|
|
||||||
"module": "api",
|
|
||||||
"error": err,
|
|
||||||
"error_type": fmt.Sprintf("%T", err),
|
|
||||||
}).Debug("FIXME: Got an API for which error does not match any expected type!!!")
|
|
||||||
}
|
|
||||||
|
|
||||||
if statusCode == 0 {
|
|
||||||
statusCode = http.StatusInternalServerError
|
|
||||||
}
|
|
||||||
|
|
||||||
return statusCode
|
|
||||||
}
|
|
||||||
|
|
||||||
// statusCodeFromGRPCError returns status code according to gRPC error
|
|
||||||
func statusCodeFromGRPCError(err error) int {
|
|
||||||
switch status.Code(err) {
|
|
||||||
case codes.InvalidArgument: // code 3
|
|
||||||
return http.StatusBadRequest
|
|
||||||
case codes.NotFound: // code 5
|
|
||||||
return http.StatusNotFound
|
|
||||||
case codes.AlreadyExists: // code 6
|
|
||||||
return http.StatusConflict
|
|
||||||
case codes.PermissionDenied: // code 7
|
|
||||||
return http.StatusForbidden
|
|
||||||
case codes.FailedPrecondition: // code 9
|
|
||||||
return http.StatusBadRequest
|
|
||||||
case codes.Unauthenticated: // code 16
|
|
||||||
return http.StatusUnauthorized
|
|
||||||
case codes.OutOfRange: // code 11
|
|
||||||
return http.StatusBadRequest
|
|
||||||
case codes.Unimplemented: // code 12
|
|
||||||
return http.StatusNotImplemented
|
|
||||||
case codes.Unavailable: // code 14
|
|
||||||
return http.StatusServiceUnavailable
|
|
||||||
default:
|
|
||||||
// codes.Canceled(1)
|
|
||||||
// codes.Unknown(2)
|
|
||||||
// codes.DeadlineExceeded(4)
|
|
||||||
// codes.ResourceExhausted(8)
|
|
||||||
// codes.Aborted(10)
|
|
||||||
// codes.Internal(13)
|
|
||||||
// codes.DataLoss(15)
|
|
||||||
return http.StatusInternalServerError
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// statusCodeFromDistributionError returns status code according to registry errcode
|
|
||||||
// code is loosely based on errcode.ServeJSON() in docker/distribution
|
|
||||||
func statusCodeFromDistributionError(err error) int {
|
|
||||||
switch errs := err.(type) {
|
|
||||||
case errcode.Errors:
|
|
||||||
if len(errs) < 1 {
|
|
||||||
return http.StatusInternalServerError
|
|
||||||
}
|
|
||||||
if _, ok := errs[0].(errcode.ErrorCoder); ok {
|
|
||||||
return statusCodeFromDistributionError(errs[0])
|
|
||||||
}
|
|
||||||
case errcode.ErrorCoder:
|
|
||||||
return errs.ErrorCode().Descriptor().HTTPStatusCode
|
|
||||||
}
|
|
||||||
return http.StatusInternalServerError
|
|
||||||
}
|
|
||||||
|
|
||||||
// statusCodeFromContainerdError returns status code for containerd errors when
|
|
||||||
// consumed directly (not through gRPC)
|
|
||||||
func statusCodeFromContainerdError(err error) int {
|
|
||||||
switch {
|
|
||||||
case cerrdefs.IsInvalidArgument(err):
|
|
||||||
return http.StatusBadRequest
|
|
||||||
case cerrdefs.IsNotFound(err):
|
|
||||||
return http.StatusNotFound
|
|
||||||
case cerrdefs.IsAlreadyExists(err):
|
|
||||||
return http.StatusConflict
|
|
||||||
case cerrdefs.IsFailedPrecondition(err):
|
|
||||||
return http.StatusPreconditionFailed
|
|
||||||
case cerrdefs.IsUnavailable(err):
|
|
||||||
return http.StatusServiceUnavailable
|
|
||||||
case cerrdefs.IsNotImplemented(err):
|
|
||||||
return http.StatusNotImplemented
|
|
||||||
default:
|
|
||||||
return http.StatusInternalServerError
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -12,4 +12,5 @@ import (
|
||||||
// container configuration.
|
// container configuration.
|
||||||
type ContainerDecoder interface {
|
type ContainerDecoder interface {
|
||||||
DecodeConfig(src io.Reader) (*container.Config, *container.HostConfig, *network.NetworkingConfig, error)
|
DecodeConfig(src io.Reader) (*container.Config, *container.HostConfig, *network.NetworkingConfig, error)
|
||||||
|
DecodeHostConfig(src io.Reader) (*container.HostConfig, error)
|
||||||
}
|
}
|
||||||
|
|
9
api/server/httputils/errors_deprecated.go
Normal file
9
api/server/httputils/errors_deprecated.go
Normal file
|
@ -0,0 +1,9 @@
|
||||||
|
package httputils // import "github.com/docker/docker/api/server/httputils"
|
||||||
|
import "github.com/docker/docker/errdefs"
|
||||||
|
|
||||||
|
// GetHTTPErrorStatusCode retrieves status code from error message.
|
||||||
|
//
|
||||||
|
// Deprecated: use errdefs.GetHTTPErrorStatusCode
|
||||||
|
func GetHTTPErrorStatusCode(err error) int {
|
||||||
|
return errdefs.GetHTTPErrorStatusCode(err)
|
||||||
|
}
|
|
@ -1,12 +1,9 @@
|
||||||
package httputils // import "github.com/docker/docker/api/server/httputils"
|
package httputils // import "github.com/docker/docker/api/server/httputils"
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/distribution/reference"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// BoolValue transforms a form value in different formats into a boolean type.
|
// BoolValue transforms a form value in different formats into a boolean type.
|
||||||
|
@ -44,38 +41,6 @@ func Int64ValueOrDefault(r *http.Request, field string, def int64) (int64, error
|
||||||
return def, nil
|
return def, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// RepoTagReference parses form values "repo" and "tag" and returns a valid
|
|
||||||
// reference with repository and tag.
|
|
||||||
// If repo is empty, then a nil reference is returned.
|
|
||||||
// If no tag is given, then the default "latest" tag is set.
|
|
||||||
func RepoTagReference(repo, tag string) (reference.NamedTagged, error) {
|
|
||||||
if repo == "" {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
ref, err := reference.ParseNormalizedNamed(repo)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, isDigested := ref.(reference.Digested); isDigested {
|
|
||||||
return nil, fmt.Errorf("cannot import digest reference")
|
|
||||||
}
|
|
||||||
|
|
||||||
if tag != "" {
|
|
||||||
return reference.WithTag(ref, tag)
|
|
||||||
}
|
|
||||||
|
|
||||||
withDefaultTag := reference.TagNameOnly(ref)
|
|
||||||
|
|
||||||
namedTagged, ok := withDefaultTag.(reference.NamedTagged)
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("unexpected reference: %q", ref.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
return namedTagged, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ArchiveOptions stores archive information for different operations.
|
// ArchiveOptions stores archive information for different operations.
|
||||||
type ArchiveOptions struct {
|
type ArchiveOptions struct {
|
||||||
Name string
|
Name string
|
||||||
|
|
|
@ -23,7 +23,7 @@ func TestBoolValue(t *testing.T) {
|
||||||
for c, e := range cases {
|
for c, e := range cases {
|
||||||
v := url.Values{}
|
v := url.Values{}
|
||||||
v.Set("test", c)
|
v.Set("test", c)
|
||||||
r, _ := http.NewRequest(http.MethodPost, "", nil)
|
r, _ := http.NewRequest("POST", "", nil)
|
||||||
r.Form = v
|
r.Form = v
|
||||||
|
|
||||||
a := BoolValue(r, "test")
|
a := BoolValue(r, "test")
|
||||||
|
@ -34,14 +34,14 @@ func TestBoolValue(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBoolValueOrDefault(t *testing.T) {
|
func TestBoolValueOrDefault(t *testing.T) {
|
||||||
r, _ := http.NewRequest(http.MethodGet, "", nil)
|
r, _ := http.NewRequest("GET", "", nil)
|
||||||
if !BoolValueOrDefault(r, "queryparam", true) {
|
if !BoolValueOrDefault(r, "queryparam", true) {
|
||||||
t.Fatal("Expected to get true default value, got false")
|
t.Fatal("Expected to get true default value, got false")
|
||||||
}
|
}
|
||||||
|
|
||||||
v := url.Values{}
|
v := url.Values{}
|
||||||
v.Set("param", "")
|
v.Set("param", "")
|
||||||
r, _ = http.NewRequest(http.MethodGet, "", nil)
|
r, _ = http.NewRequest("GET", "", nil)
|
||||||
r.Form = v
|
r.Form = v
|
||||||
if BoolValueOrDefault(r, "param", true) {
|
if BoolValueOrDefault(r, "param", true) {
|
||||||
t.Fatal("Expected not to get true")
|
t.Fatal("Expected not to get true")
|
||||||
|
@ -59,7 +59,7 @@ func TestInt64ValueOrZero(t *testing.T) {
|
||||||
for c, e := range cases {
|
for c, e := range cases {
|
||||||
v := url.Values{}
|
v := url.Values{}
|
||||||
v.Set("test", c)
|
v.Set("test", c)
|
||||||
r, _ := http.NewRequest(http.MethodPost, "", nil)
|
r, _ := http.NewRequest("POST", "", nil)
|
||||||
r.Form = v
|
r.Form = v
|
||||||
|
|
||||||
a := Int64ValueOrZero(r, "test")
|
a := Int64ValueOrZero(r, "test")
|
||||||
|
@ -79,7 +79,7 @@ func TestInt64ValueOrDefault(t *testing.T) {
|
||||||
for c, e := range cases {
|
for c, e := range cases {
|
||||||
v := url.Values{}
|
v := url.Values{}
|
||||||
v.Set("test", c)
|
v.Set("test", c)
|
||||||
r, _ := http.NewRequest(http.MethodPost, "", nil)
|
r, _ := http.NewRequest("POST", "", nil)
|
||||||
r.Form = v
|
r.Form = v
|
||||||
|
|
||||||
a, err := Int64ValueOrDefault(r, "test", -1)
|
a, err := Int64ValueOrDefault(r, "test", -1)
|
||||||
|
@ -95,7 +95,7 @@ func TestInt64ValueOrDefault(t *testing.T) {
|
||||||
func TestInt64ValueOrDefaultWithError(t *testing.T) {
|
func TestInt64ValueOrDefaultWithError(t *testing.T) {
|
||||||
v := url.Values{}
|
v := url.Values{}
|
||||||
v.Set("test", "invalid")
|
v.Set("test", "invalid")
|
||||||
r, _ := http.NewRequest(http.MethodPost, "", nil)
|
r, _ := http.NewRequest("POST", "", nil)
|
||||||
r.Form = v
|
r.Form = v
|
||||||
|
|
||||||
_, err := Int64ValueOrDefault(r, "test", -1)
|
_, err := Int64ValueOrDefault(r, "test", -1)
|
||||||
|
|
|
@ -2,14 +2,18 @@ package httputils // import "github.com/docker/docker/api/server/httputils"
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
|
||||||
"io"
|
"io"
|
||||||
"mime"
|
"mime"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/docker/docker/api/types"
|
||||||
|
"github.com/docker/docker/api/types/versions"
|
||||||
"github.com/docker/docker/errdefs"
|
"github.com/docker/docker/errdefs"
|
||||||
|
"github.com/gorilla/mux"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
)
|
)
|
||||||
|
|
||||||
// APIVersionKey is the client's requested API version.
|
// APIVersionKey is the client's requested API version.
|
||||||
|
@ -27,7 +31,7 @@ func HijackConnection(w http.ResponseWriter) (io.ReadCloser, io.Writer, error) {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
// Flush the options to make sure the client sets the raw mode
|
// Flush the options to make sure the client sets the raw mode
|
||||||
_, _ = conn.Write([]byte{})
|
conn.Write([]byte{})
|
||||||
return conn, conn, nil
|
return conn, conn, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -37,9 +41,9 @@ func CloseStreams(streams ...interface{}) {
|
||||||
if tcpc, ok := stream.(interface {
|
if tcpc, ok := stream.(interface {
|
||||||
CloseWrite() error
|
CloseWrite() error
|
||||||
}); ok {
|
}); ok {
|
||||||
_ = tcpc.CloseWrite()
|
tcpc.CloseWrite()
|
||||||
} else if closer, ok := stream.(io.Closer); ok {
|
} else if closer, ok := stream.(io.Closer); ok {
|
||||||
_ = closer.Close()
|
closer.Close()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -49,50 +53,17 @@ func CheckForJSON(r *http.Request) error {
|
||||||
ct := r.Header.Get("Content-Type")
|
ct := r.Header.Get("Content-Type")
|
||||||
|
|
||||||
// No Content-Type header is ok as long as there's no Body
|
// No Content-Type header is ok as long as there's no Body
|
||||||
if ct == "" && (r.Body == nil || r.ContentLength == 0) {
|
if ct == "" {
|
||||||
|
if r.Body == nil || r.ContentLength == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Otherwise it better be json
|
// Otherwise it better be json
|
||||||
return matchesContentType(ct, "application/json")
|
if matchesContentType(ct, "application/json") {
|
||||||
}
|
|
||||||
|
|
||||||
// ReadJSON validates the request to have the correct content-type, and decodes
|
|
||||||
// the request's Body into out.
|
|
||||||
func ReadJSON(r *http.Request, out interface{}) error {
|
|
||||||
err := CheckForJSON(r)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if r.Body == nil || r.ContentLength == 0 {
|
|
||||||
// an empty body is not invalid, so don't return an error; see
|
|
||||||
// https://lists.w3.org/Archives/Public/ietf-http-wg/2010JulSep/0272.html
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
return errdefs.InvalidParameter(errors.Errorf("Content-Type specified (%s) must be 'application/json'", ct))
|
||||||
dec := json.NewDecoder(r.Body)
|
|
||||||
err = dec.Decode(out)
|
|
||||||
defer r.Body.Close()
|
|
||||||
if err != nil {
|
|
||||||
if err == io.EOF {
|
|
||||||
return errdefs.InvalidParameter(errors.New("invalid JSON: got EOF while reading request body"))
|
|
||||||
}
|
|
||||||
return errdefs.InvalidParameter(errors.Wrap(err, "invalid JSON"))
|
|
||||||
}
|
|
||||||
|
|
||||||
if dec.More() {
|
|
||||||
return errdefs.InvalidParameter(errors.New("unexpected content after JSON"))
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteJSON writes the value v to the http response stream as json with standard json encoding.
|
|
||||||
func WriteJSON(w http.ResponseWriter, code int, v interface{}) error {
|
|
||||||
w.Header().Set("Content-Type", "application/json")
|
|
||||||
w.WriteHeader(code)
|
|
||||||
enc := json.NewEncoder(w)
|
|
||||||
enc.SetEscapeHTML(false)
|
|
||||||
return enc.Encode(v)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ParseForm ensures the request form is parsed even with invalid content types.
|
// ParseForm ensures the request form is parsed even with invalid content types.
|
||||||
|
@ -121,14 +92,33 @@ func VersionFromContext(ctx context.Context) string {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MakeErrorHandler makes an HTTP handler that decodes a Docker error and
|
||||||
|
// returns it in the response.
|
||||||
|
func MakeErrorHandler(err error) http.HandlerFunc {
|
||||||
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
statusCode := errdefs.GetHTTPErrorStatusCode(err)
|
||||||
|
vars := mux.Vars(r)
|
||||||
|
if apiVersionSupportsJSONErrors(vars["version"]) {
|
||||||
|
response := &types.ErrorResponse{
|
||||||
|
Message: err.Error(),
|
||||||
|
}
|
||||||
|
WriteJSON(w, statusCode, response)
|
||||||
|
} else {
|
||||||
|
http.Error(w, status.Convert(err).Message(), statusCode)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func apiVersionSupportsJSONErrors(version string) bool {
|
||||||
|
const firstAPIVersionWithJSONErrors = "1.23"
|
||||||
|
return version == "" || versions.GreaterThan(version, firstAPIVersionWithJSONErrors)
|
||||||
|
}
|
||||||
|
|
||||||
// matchesContentType validates the content type against the expected one
|
// matchesContentType validates the content type against the expected one
|
||||||
func matchesContentType(contentType, expectedType string) error {
|
func matchesContentType(contentType, expectedType string) bool {
|
||||||
mimetype, _, err := mime.ParseMediaType(contentType)
|
mimetype, _, err := mime.ParseMediaType(contentType)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errdefs.InvalidParameter(errors.Wrapf(err, "malformed Content-Type header (%s)", contentType))
|
logrus.Errorf("Error parsing media type: %s error: %v", contentType, err)
|
||||||
}
|
}
|
||||||
if mimetype != expectedType {
|
return err == nil && mimetype == expectedType
|
||||||
return errdefs.InvalidParameter(errors.Errorf("unsupported Content-Type header (%s): must be '%s'", contentType, expectedType))
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,130 +1,18 @@
|
||||||
package httputils // import "github.com/docker/docker/api/server/httputils"
|
package httputils // import "github.com/docker/docker/api/server/httputils"
|
||||||
|
|
||||||
import (
|
import "testing"
|
||||||
"net/http"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
// matchesContentType
|
// matchesContentType
|
||||||
func TestJsonContentType(t *testing.T) {
|
func TestJsonContentType(t *testing.T) {
|
||||||
err := matchesContentType("application/json", "application/json")
|
if !matchesContentType("application/json", "application/json") {
|
||||||
if err != nil {
|
t.Fail()
|
||||||
t.Error(err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
err = matchesContentType("application/json; charset=utf-8", "application/json")
|
if !matchesContentType("application/json; charset=utf-8", "application/json") {
|
||||||
if err != nil {
|
t.Fail()
|
||||||
t.Error(err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
expected := "unsupported Content-Type header (dockerapplication/json): must be 'application/json'"
|
if matchesContentType("dockerapplication/json", "application/json") {
|
||||||
err = matchesContentType("dockerapplication/json", "application/json")
|
t.Fail()
|
||||||
if err == nil || err.Error() != expected {
|
|
||||||
t.Errorf(`expected "%s", got "%v"`, expected, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
expected = "malformed Content-Type header (foo;;;bar): mime: invalid media parameter"
|
|
||||||
err = matchesContentType("foo;;;bar", "application/json")
|
|
||||||
if err == nil || err.Error() != expected {
|
|
||||||
t.Errorf(`expected "%s", got "%v"`, expected, err)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestReadJSON(t *testing.T) {
|
|
||||||
t.Run("nil body", func(t *testing.T) {
|
|
||||||
req, err := http.NewRequest(http.MethodPost, "https://example.com/some/path", nil)
|
|
||||||
if err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
foo := struct{}{}
|
|
||||||
err = ReadJSON(req, &foo)
|
|
||||||
if err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("empty body", func(t *testing.T) {
|
|
||||||
req, err := http.NewRequest(http.MethodPost, "https://example.com/some/path", strings.NewReader(""))
|
|
||||||
if err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
foo := struct{ SomeField string }{}
|
|
||||||
err = ReadJSON(req, &foo)
|
|
||||||
if err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
if foo.SomeField != "" {
|
|
||||||
t.Errorf("expected: '', got: %s", foo.SomeField)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("with valid request", func(t *testing.T) {
|
|
||||||
req, err := http.NewRequest(http.MethodPost, "https://example.com/some/path", strings.NewReader(`{"SomeField":"some value"}`))
|
|
||||||
if err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
req.Header.Set("Content-Type", "application/json")
|
|
||||||
foo := struct{ SomeField string }{}
|
|
||||||
err = ReadJSON(req, &foo)
|
|
||||||
if err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
if foo.SomeField != "some value" {
|
|
||||||
t.Errorf("expected: 'some value', got: %s", foo.SomeField)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
t.Run("with whitespace", func(t *testing.T) {
|
|
||||||
req, err := http.NewRequest(http.MethodPost, "https://example.com/some/path", strings.NewReader(`
|
|
||||||
|
|
||||||
{"SomeField":"some value"}
|
|
||||||
|
|
||||||
`))
|
|
||||||
if err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
req.Header.Set("Content-Type", "application/json")
|
|
||||||
foo := struct{ SomeField string }{}
|
|
||||||
err = ReadJSON(req, &foo)
|
|
||||||
if err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
if foo.SomeField != "some value" {
|
|
||||||
t.Errorf("expected: 'some value', got: %s", foo.SomeField)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("with extra content", func(t *testing.T) {
|
|
||||||
req, err := http.NewRequest(http.MethodPost, "https://example.com/some/path", strings.NewReader(`{"SomeField":"some value"} and more content`))
|
|
||||||
if err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
req.Header.Set("Content-Type", "application/json")
|
|
||||||
foo := struct{ SomeField string }{}
|
|
||||||
err = ReadJSON(req, &foo)
|
|
||||||
if err == nil {
|
|
||||||
t.Error("expected an error, got none")
|
|
||||||
}
|
|
||||||
expected := "unexpected content after JSON"
|
|
||||||
if err.Error() != expected {
|
|
||||||
t.Errorf("expected: '%s', got: %s", expected, err.Error())
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("invalid JSON", func(t *testing.T) {
|
|
||||||
req, err := http.NewRequest(http.MethodPost, "https://example.com/some/path", strings.NewReader(`{invalid json`))
|
|
||||||
if err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
req.Header.Set("Content-Type", "application/json")
|
|
||||||
foo := struct{ SomeField string }{}
|
|
||||||
err = ReadJSON(req, &foo)
|
|
||||||
if err == nil {
|
|
||||||
t.Error("expected an error, got none")
|
|
||||||
}
|
|
||||||
expected := "invalid JSON: invalid character 'i' looking for beginning of object key string"
|
|
||||||
if err.Error() != expected {
|
|
||||||
t.Errorf("expected: '%s', got: %s", expected, err.Error())
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
15
api/server/httputils/httputils_write_json.go
Normal file
15
api/server/httputils/httputils_write_json.go
Normal file
|
@ -0,0 +1,15 @@
|
||||||
|
package httputils // import "github.com/docker/docker/api/server/httputils"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"net/http"
|
||||||
|
)
|
||||||
|
|
||||||
|
// WriteJSON writes the value v to the http response stream as json with standard json encoding.
|
||||||
|
func WriteJSON(w http.ResponseWriter, code int, v interface{}) error {
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
w.WriteHeader(code)
|
||||||
|
enc := json.NewEncoder(w)
|
||||||
|
enc.SetEscapeHTML(false)
|
||||||
|
return enc.Encode(v)
|
||||||
|
}
|
|
@ -7,8 +7,8 @@ import (
|
||||||
"net/url"
|
"net/url"
|
||||||
"sort"
|
"sort"
|
||||||
|
|
||||||
|
"github.com/docker/docker/api/types"
|
||||||
"github.com/docker/docker/api/types/backend"
|
"github.com/docker/docker/api/types/backend"
|
||||||
"github.com/docker/docker/api/types/container"
|
|
||||||
"github.com/docker/docker/pkg/ioutils"
|
"github.com/docker/docker/pkg/ioutils"
|
||||||
"github.com/docker/docker/pkg/jsonmessage"
|
"github.com/docker/docker/pkg/jsonmessage"
|
||||||
"github.com/docker/docker/pkg/stdcopy"
|
"github.com/docker/docker/pkg/stdcopy"
|
||||||
|
@ -16,7 +16,7 @@ import (
|
||||||
|
|
||||||
// WriteLogStream writes an encoded byte stream of log messages from the
|
// WriteLogStream writes an encoded byte stream of log messages from the
|
||||||
// messages channel, multiplexing them with a stdcopy.Writer if mux is true
|
// messages channel, multiplexing them with a stdcopy.Writer if mux is true
|
||||||
func WriteLogStream(_ context.Context, w io.Writer, msgs <-chan *backend.LogMessage, config *container.LogsOptions, mux bool) {
|
func WriteLogStream(_ context.Context, w io.Writer, msgs <-chan *backend.LogMessage, config *types.ContainerLogsOptions, mux bool) {
|
||||||
wf := ioutils.NewWriteFlusher(w)
|
wf := ioutils.NewWriteFlusher(w)
|
||||||
defer wf.Close()
|
defer wf.Close()
|
||||||
|
|
||||||
|
@ -51,10 +51,10 @@ func WriteLogStream(_ context.Context, w io.Writer, msgs <-chan *backend.LogMess
|
||||||
logLine = append([]byte(msg.Timestamp.Format(jsonmessage.RFC3339NanoFixed)+" "), logLine...)
|
logLine = append([]byte(msg.Timestamp.Format(jsonmessage.RFC3339NanoFixed)+" "), logLine...)
|
||||||
}
|
}
|
||||||
if msg.Source == "stdout" && config.ShowStdout {
|
if msg.Source == "stdout" && config.ShowStdout {
|
||||||
_, _ = outStream.Write(logLine)
|
outStream.Write(logLine)
|
||||||
}
|
}
|
||||||
if msg.Source == "stderr" && config.ShowStderr {
|
if msg.Source == "stderr" && config.ShowStderr {
|
||||||
_, _ = errStream.Write(logLine)
|
errStream.Write(logLine)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,9 +1,9 @@
|
||||||
package server // import "github.com/docker/docker/api/server"
|
package server // import "github.com/docker/docker/api/server"
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/containerd/log"
|
|
||||||
"github.com/docker/docker/api/server/httputils"
|
"github.com/docker/docker/api/server/httputils"
|
||||||
"github.com/docker/docker/api/server/middleware"
|
"github.com/docker/docker/api/server/middleware"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
// handlerWithGlobalMiddlewares wraps the handler function for a request with
|
// handlerWithGlobalMiddlewares wraps the handler function for a request with
|
||||||
|
@ -16,7 +16,7 @@ func (s *Server) handlerWithGlobalMiddlewares(handler httputils.APIFunc) httputi
|
||||||
next = m.WrapHandler(next)
|
next = m.WrapHandler(next)
|
||||||
}
|
}
|
||||||
|
|
||||||
if log.GetLevel() == log.DebugLevel {
|
if s.cfg.Logging && logrus.GetLevel() == logrus.DebugLevel {
|
||||||
next = middleware.DebugRequestMiddleware(next)
|
next = middleware.DebugRequestMiddleware(next)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -4,8 +4,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
"github.com/containerd/log"
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/docker/docker/api/types/registry"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// CORSMiddleware injects CORS headers to each request
|
// CORSMiddleware injects CORS headers to each request
|
||||||
|
@ -29,9 +28,9 @@ func (c CORSMiddleware) WrapHandler(handler func(ctx context.Context, w http.Res
|
||||||
corsHeaders = "*"
|
corsHeaders = "*"
|
||||||
}
|
}
|
||||||
|
|
||||||
log.G(ctx).Debugf("CORS header is enabled and set to: %s", corsHeaders)
|
logrus.Debugf("CORS header is enabled and set to: %s", corsHeaders)
|
||||||
w.Header().Add("Access-Control-Allow-Origin", corsHeaders)
|
w.Header().Add("Access-Control-Allow-Origin", corsHeaders)
|
||||||
w.Header().Add("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept, "+registry.AuthHeader)
|
w.Header().Add("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept, X-Registry-Auth")
|
||||||
w.Header().Add("Access-Control-Allow-Methods", "HEAD, GET, POST, DELETE, PUT, OPTIONS")
|
w.Header().Add("Access-Control-Allow-Methods", "HEAD, GET, POST, DELETE, PUT, OPTIONS")
|
||||||
return handler(ctx, w, r, vars)
|
return handler(ctx, w, r, vars)
|
||||||
}
|
}
|
||||||
|
|
|
@ -8,17 +8,17 @@ import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/containerd/log"
|
|
||||||
"github.com/docker/docker/api/server/httputils"
|
"github.com/docker/docker/api/server/httputils"
|
||||||
"github.com/docker/docker/pkg/ioutils"
|
"github.com/docker/docker/pkg/ioutils"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
// DebugRequestMiddleware dumps the request to logger
|
// DebugRequestMiddleware dumps the request to logger
|
||||||
func DebugRequestMiddleware(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
func DebugRequestMiddleware(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||||
return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||||
log.G(ctx).Debugf("Calling %s %s", r.Method, r.RequestURI)
|
logrus.Debugf("Calling %s %s", r.Method, r.RequestURI)
|
||||||
|
|
||||||
if r.Method != http.MethodPost {
|
if r.Method != "POST" {
|
||||||
return handler(ctx, w, r, vars)
|
return handler(ctx, w, r, vars)
|
||||||
}
|
}
|
||||||
if err := httputils.CheckForJSON(r); err != nil {
|
if err := httputils.CheckForJSON(r); err != nil {
|
||||||
|
@ -44,9 +44,9 @@ func DebugRequestMiddleware(handler func(ctx context.Context, w http.ResponseWri
|
||||||
maskSecretKeys(postForm)
|
maskSecretKeys(postForm)
|
||||||
formStr, errMarshal := json.Marshal(postForm)
|
formStr, errMarshal := json.Marshal(postForm)
|
||||||
if errMarshal == nil {
|
if errMarshal == nil {
|
||||||
log.G(ctx).Debugf("form data: %s", string(formStr))
|
logrus.Debugf("form data: %s", string(formStr))
|
||||||
} else {
|
} else {
|
||||||
log.G(ctx).Debugf("form data: %q", postForm)
|
logrus.Debugf("form data: %q", postForm)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -3,8 +3,8 @@ package middleware // import "github.com/docker/docker/api/server/middleware"
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"gotest.tools/v3/assert"
|
"gotest.tools/assert"
|
||||||
is "gotest.tools/v3/assert/cmp"
|
is "gotest.tools/assert/cmp"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestMaskSecretKeys(t *testing.T) {
|
func TestMaskSecretKeys(t *testing.T) {
|
||||||
|
|
|
@ -6,7 +6,6 @@ import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
|
||||||
"github.com/docker/docker/api"
|
|
||||||
"github.com/docker/docker/api/server/httputils"
|
"github.com/docker/docker/api/server/httputils"
|
||||||
"github.com/docker/docker/api/types/versions"
|
"github.com/docker/docker/api/types/versions"
|
||||||
)
|
)
|
||||||
|
@ -15,39 +14,18 @@ import (
|
||||||
// validates the client and server versions.
|
// validates the client and server versions.
|
||||||
type VersionMiddleware struct {
|
type VersionMiddleware struct {
|
||||||
serverVersion string
|
serverVersion string
|
||||||
|
defaultVersion string
|
||||||
// defaultAPIVersion is the default API version provided by the API server,
|
minVersion string
|
||||||
// specified as "major.minor". It is usually configured to the latest API
|
|
||||||
// version [github.com/docker/docker/api.DefaultVersion].
|
|
||||||
//
|
|
||||||
// API requests for API versions greater than this version are rejected by
|
|
||||||
// the server and produce a [versionUnsupportedError].
|
|
||||||
defaultAPIVersion string
|
|
||||||
|
|
||||||
// minAPIVersion is the minimum API version provided by the API server,
|
|
||||||
// specified as "major.minor".
|
|
||||||
//
|
|
||||||
// API requests for API versions lower than this version are rejected by
|
|
||||||
// the server and produce a [versionUnsupportedError].
|
|
||||||
minAPIVersion string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewVersionMiddleware creates a VersionMiddleware with the given versions.
|
// NewVersionMiddleware creates a new VersionMiddleware
|
||||||
func NewVersionMiddleware(serverVersion, defaultAPIVersion, minAPIVersion string) (*VersionMiddleware, error) {
|
// with the default versions.
|
||||||
if versions.LessThan(defaultAPIVersion, api.MinSupportedAPIVersion) || versions.GreaterThan(defaultAPIVersion, api.DefaultVersion) {
|
func NewVersionMiddleware(s, d, m string) VersionMiddleware {
|
||||||
return nil, fmt.Errorf("invalid default API version (%s): must be between %s and %s", defaultAPIVersion, api.MinSupportedAPIVersion, api.DefaultVersion)
|
return VersionMiddleware{
|
||||||
|
serverVersion: s,
|
||||||
|
defaultVersion: d,
|
||||||
|
minVersion: m,
|
||||||
}
|
}
|
||||||
if versions.LessThan(minAPIVersion, api.MinSupportedAPIVersion) || versions.GreaterThan(minAPIVersion, api.DefaultVersion) {
|
|
||||||
return nil, fmt.Errorf("invalid minimum API version (%s): must be between %s and %s", minAPIVersion, api.MinSupportedAPIVersion, api.DefaultVersion)
|
|
||||||
}
|
|
||||||
if versions.GreaterThan(minAPIVersion, defaultAPIVersion) {
|
|
||||||
return nil, fmt.Errorf("invalid API version: the minimum API version (%s) is higher than the default version (%s)", minAPIVersion, defaultAPIVersion)
|
|
||||||
}
|
|
||||||
return &VersionMiddleware{
|
|
||||||
serverVersion: serverVersion,
|
|
||||||
defaultAPIVersion: defaultAPIVersion,
|
|
||||||
minAPIVersion: minAPIVersion,
|
|
||||||
}, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type versionUnsupportedError struct {
|
type versionUnsupportedError struct {
|
||||||
|
@ -67,20 +45,21 @@ func (e versionUnsupportedError) InvalidParameter() {}
|
||||||
func (v VersionMiddleware) WrapHandler(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
func (v VersionMiddleware) WrapHandler(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||||
return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||||
w.Header().Set("Server", fmt.Sprintf("Docker/%s (%s)", v.serverVersion, runtime.GOOS))
|
w.Header().Set("Server", fmt.Sprintf("Docker/%s (%s)", v.serverVersion, runtime.GOOS))
|
||||||
w.Header().Set("API-Version", v.defaultAPIVersion)
|
w.Header().Set("API-Version", v.defaultVersion)
|
||||||
w.Header().Set("OSType", runtime.GOOS)
|
w.Header().Set("OSType", runtime.GOOS)
|
||||||
|
|
||||||
apiVersion := vars["version"]
|
apiVersion := vars["version"]
|
||||||
if apiVersion == "" {
|
if apiVersion == "" {
|
||||||
apiVersion = v.defaultAPIVersion
|
apiVersion = v.defaultVersion
|
||||||
}
|
}
|
||||||
if versions.LessThan(apiVersion, v.minAPIVersion) {
|
if versions.LessThan(apiVersion, v.minVersion) {
|
||||||
return versionUnsupportedError{version: apiVersion, minVersion: v.minAPIVersion}
|
return versionUnsupportedError{version: apiVersion, minVersion: v.minVersion}
|
||||||
}
|
}
|
||||||
if versions.GreaterThan(apiVersion, v.defaultAPIVersion) {
|
if versions.GreaterThan(apiVersion, v.defaultVersion) {
|
||||||
return versionUnsupportedError{version: apiVersion, maxVersion: v.defaultAPIVersion}
|
return versionUnsupportedError{version: apiVersion, maxVersion: v.defaultVersion}
|
||||||
}
|
}
|
||||||
ctx = context.WithValue(ctx, httputils.APIVersionKey{}, apiVersion)
|
ctx = context.WithValue(ctx, httputils.APIVersionKey{}, apiVersion)
|
||||||
return handler(ctx, w, r, vars)
|
return handler(ctx, w, r, vars)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,85 +2,30 @@ package middleware // import "github.com/docker/docker/api/server/middleware"
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/httptest"
|
"net/http/httptest"
|
||||||
"runtime"
|
"runtime"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/docker/docker/api"
|
|
||||||
"github.com/docker/docker/api/server/httputils"
|
"github.com/docker/docker/api/server/httputils"
|
||||||
"gotest.tools/v3/assert"
|
"gotest.tools/assert"
|
||||||
is "gotest.tools/v3/assert/cmp"
|
is "gotest.tools/assert/cmp"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestNewVersionMiddlewareValidation(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
doc, defaultVersion, minVersion, expectedErr string
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
doc: "defaults",
|
|
||||||
defaultVersion: api.DefaultVersion,
|
|
||||||
minVersion: api.MinSupportedAPIVersion,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
doc: "invalid default lower than min",
|
|
||||||
defaultVersion: api.MinSupportedAPIVersion,
|
|
||||||
minVersion: api.DefaultVersion,
|
|
||||||
expectedErr: fmt.Sprintf("invalid API version: the minimum API version (%s) is higher than the default version (%s)", api.DefaultVersion, api.MinSupportedAPIVersion),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
doc: "invalid default too low",
|
|
||||||
defaultVersion: "0.1",
|
|
||||||
minVersion: api.MinSupportedAPIVersion,
|
|
||||||
expectedErr: fmt.Sprintf("invalid default API version (0.1): must be between %s and %s", api.MinSupportedAPIVersion, api.DefaultVersion),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
doc: "invalid default too high",
|
|
||||||
defaultVersion: "9999.9999",
|
|
||||||
minVersion: api.DefaultVersion,
|
|
||||||
expectedErr: fmt.Sprintf("invalid default API version (9999.9999): must be between %s and %s", api.MinSupportedAPIVersion, api.DefaultVersion),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
doc: "invalid minimum too low",
|
|
||||||
defaultVersion: api.MinSupportedAPIVersion,
|
|
||||||
minVersion: "0.1",
|
|
||||||
expectedErr: fmt.Sprintf("invalid minimum API version (0.1): must be between %s and %s", api.MinSupportedAPIVersion, api.DefaultVersion),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
doc: "invalid minimum too high",
|
|
||||||
defaultVersion: api.DefaultVersion,
|
|
||||||
minVersion: "9999.9999",
|
|
||||||
expectedErr: fmt.Sprintf("invalid minimum API version (9999.9999): must be between %s and %s", api.MinSupportedAPIVersion, api.DefaultVersion),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range tests {
|
|
||||||
tc := tc
|
|
||||||
t.Run(tc.doc, func(t *testing.T) {
|
|
||||||
_, err := NewVersionMiddleware("1.2.3", tc.defaultVersion, tc.minVersion)
|
|
||||||
if tc.expectedErr == "" {
|
|
||||||
assert.Check(t, err)
|
|
||||||
} else {
|
|
||||||
assert.Check(t, is.Error(err, tc.expectedErr))
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestVersionMiddlewareVersion(t *testing.T) {
|
func TestVersionMiddlewareVersion(t *testing.T) {
|
||||||
expectedVersion := "<not set>"
|
defaultVersion := "1.10.0"
|
||||||
|
minVersion := "1.2.0"
|
||||||
|
expectedVersion := defaultVersion
|
||||||
handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||||
v := httputils.VersionFromContext(ctx)
|
v := httputils.VersionFromContext(ctx)
|
||||||
assert.Check(t, is.Equal(expectedVersion, v))
|
assert.Check(t, is.Equal(expectedVersion, v))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
m, err := NewVersionMiddleware("1.2.3", api.DefaultVersion, api.MinSupportedAPIVersion)
|
m := NewVersionMiddleware(defaultVersion, defaultVersion, minVersion)
|
||||||
assert.NilError(t, err)
|
|
||||||
h := m.WrapHandler(handler)
|
h := m.WrapHandler(handler)
|
||||||
|
|
||||||
req, _ := http.NewRequest(http.MethodGet, "/containers/json", nil)
|
req, _ := http.NewRequest("GET", "/containers/json", nil)
|
||||||
resp := httptest.NewRecorder()
|
resp := httptest.NewRecorder()
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
|
@ -90,19 +35,19 @@ func TestVersionMiddlewareVersion(t *testing.T) {
|
||||||
errString string
|
errString string
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
expectedVersion: api.DefaultVersion,
|
expectedVersion: "1.10.0",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
reqVersion: api.MinSupportedAPIVersion,
|
reqVersion: "1.9.0",
|
||||||
expectedVersion: api.MinSupportedAPIVersion,
|
expectedVersion: "1.9.0",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
reqVersion: "0.1",
|
reqVersion: "0.1",
|
||||||
errString: fmt.Sprintf("client version 0.1 is too old. Minimum supported API version is %s, please upgrade your client to a newer version", api.MinSupportedAPIVersion),
|
errString: "client version 0.1 is too old. Minimum supported API version is 1.2.0, please upgrade your client to a newer version",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
reqVersion: "9999.9999",
|
reqVersion: "9999.9999",
|
||||||
errString: fmt.Sprintf("client version 9999.9999 is too new. Maximum supported API version is %s", api.DefaultVersion),
|
errString: "client version 9999.9999 is too new. Maximum supported API version is 1.10.0",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -126,21 +71,22 @@ func TestVersionMiddlewareWithErrorsReturnsHeaders(t *testing.T) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
m, err := NewVersionMiddleware("1.2.3", api.DefaultVersion, api.MinSupportedAPIVersion)
|
defaultVersion := "1.10.0"
|
||||||
assert.NilError(t, err)
|
minVersion := "1.2.0"
|
||||||
|
m := NewVersionMiddleware(defaultVersion, defaultVersion, minVersion)
|
||||||
h := m.WrapHandler(handler)
|
h := m.WrapHandler(handler)
|
||||||
|
|
||||||
req, _ := http.NewRequest(http.MethodGet, "/containers/json", nil)
|
req, _ := http.NewRequest("GET", "/containers/json", nil)
|
||||||
resp := httptest.NewRecorder()
|
resp := httptest.NewRecorder()
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
vars := map[string]string{"version": "0.1"}
|
vars := map[string]string{"version": "0.1"}
|
||||||
err = h(ctx, resp, req, vars)
|
err := h(ctx, resp, req, vars)
|
||||||
assert.Check(t, is.ErrorContains(err, ""))
|
assert.Check(t, is.ErrorContains(err, ""))
|
||||||
|
|
||||||
hdr := resp.Result().Header
|
hdr := resp.Result().Header
|
||||||
assert.Check(t, is.Contains(hdr.Get("Server"), "Docker/1.2.3"))
|
assert.Check(t, is.Contains(hdr.Get("Server"), "Docker/"+defaultVersion))
|
||||||
assert.Check(t, is.Contains(hdr.Get("Server"), runtime.GOOS))
|
assert.Check(t, is.Contains(hdr.Get("Server"), runtime.GOOS))
|
||||||
assert.Check(t, is.Equal(hdr.Get("API-Version"), api.DefaultVersion))
|
assert.Check(t, is.Equal(hdr.Get("API-Version"), defaultVersion))
|
||||||
assert.Check(t, is.Equal(hdr.Get("OSType"), runtime.GOOS))
|
assert.Check(t, is.Equal(hdr.Get("OSType"), runtime.GOOS))
|
||||||
}
|
}
|
||||||
|
|
|
@ -15,6 +15,7 @@ type Backend interface {
|
||||||
|
|
||||||
// Prune build cache
|
// Prune build cache
|
||||||
PruneCache(context.Context, types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error)
|
PruneCache(context.Context, types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error)
|
||||||
|
|
||||||
Cancel(context.Context, string) error
|
Cancel(context.Context, string) error
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,8 +1,6 @@
|
||||||
package build // import "github.com/docker/docker/api/server/router/build"
|
package build // import "github.com/docker/docker/api/server/router/build"
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"runtime"
|
|
||||||
|
|
||||||
"github.com/docker/docker/api/server/router"
|
"github.com/docker/docker/api/server/router"
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
)
|
)
|
||||||
|
@ -12,13 +10,15 @@ type buildRouter struct {
|
||||||
backend Backend
|
backend Backend
|
||||||
daemon experimentalProvider
|
daemon experimentalProvider
|
||||||
routes []router.Route
|
routes []router.Route
|
||||||
|
features *map[string]bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewRouter initializes a new build router
|
// NewRouter initializes a new build router
|
||||||
func NewRouter(b Backend, d experimentalProvider) router.Router {
|
func NewRouter(b Backend, d experimentalProvider, features *map[string]bool) router.Router {
|
||||||
r := &buildRouter{
|
r := &buildRouter{
|
||||||
backend: b,
|
backend: b,
|
||||||
daemon: d,
|
daemon: d,
|
||||||
|
features: features,
|
||||||
}
|
}
|
||||||
r.initRoutes()
|
r.initRoutes()
|
||||||
return r
|
return r
|
||||||
|
@ -37,24 +37,17 @@ func (r *buildRouter) initRoutes() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// BuilderVersion derives the default docker builder version from the config.
|
// BuilderVersion derives the default docker builder version from the config
|
||||||
//
|
// Note: it is valid to have BuilderVersion unset which means it is up to the
|
||||||
// The default on Linux is version "2" (BuildKit), but the daemon can be
|
// client to choose which builder to use.
|
||||||
// configured to recommend version "1" (classic Builder). Windows does not
|
|
||||||
// yet support BuildKit for native Windows images, and uses "1" (classic builder)
|
|
||||||
// as a default.
|
|
||||||
//
|
|
||||||
// This value is only a recommendation as advertised by the daemon, and it is
|
|
||||||
// up to the client to choose which builder to use.
|
|
||||||
func BuilderVersion(features map[string]bool) types.BuilderVersion {
|
func BuilderVersion(features map[string]bool) types.BuilderVersion {
|
||||||
// TODO(thaJeztah) move the default to daemon/config
|
var bv types.BuilderVersion
|
||||||
if runtime.GOOS == "windows" {
|
if v, ok := features["buildkit"]; ok {
|
||||||
return types.BuilderV1
|
if v {
|
||||||
}
|
bv = types.BuilderBuildKit
|
||||||
|
} else {
|
||||||
bv := types.BuilderBuildKit
|
|
||||||
if v, ok := features["buildkit"]; ok && !v {
|
|
||||||
bv = types.BuilderV1
|
bv = types.BuilderV1
|
||||||
}
|
}
|
||||||
|
}
|
||||||
return bv
|
return bv
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,100 +14,90 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/containerd/log"
|
|
||||||
"github.com/docker/docker/api/server/httputils"
|
"github.com/docker/docker/api/server/httputils"
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
"github.com/docker/docker/api/types/backend"
|
"github.com/docker/docker/api/types/backend"
|
||||||
"github.com/docker/docker/api/types/container"
|
"github.com/docker/docker/api/types/container"
|
||||||
"github.com/docker/docker/api/types/filters"
|
"github.com/docker/docker/api/types/filters"
|
||||||
"github.com/docker/docker/api/types/registry"
|
|
||||||
"github.com/docker/docker/api/types/versions"
|
"github.com/docker/docker/api/types/versions"
|
||||||
|
"github.com/docker/docker/errdefs"
|
||||||
"github.com/docker/docker/pkg/ioutils"
|
"github.com/docker/docker/pkg/ioutils"
|
||||||
"github.com/docker/docker/pkg/progress"
|
"github.com/docker/docker/pkg/progress"
|
||||||
"github.com/docker/docker/pkg/streamformatter"
|
"github.com/docker/docker/pkg/streamformatter"
|
||||||
units "github.com/docker/go-units"
|
units "github.com/docker/go-units"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
type invalidParam struct {
|
type invalidIsolationError string
|
||||||
error
|
|
||||||
|
func (e invalidIsolationError) Error() string {
|
||||||
|
return fmt.Sprintf("Unsupported isolation: %q", string(e))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e invalidParam) InvalidParameter() {}
|
func (e invalidIsolationError) InvalidParameter() {}
|
||||||
|
|
||||||
func newImageBuildOptions(ctx context.Context, r *http.Request) (*types.ImageBuildOptions, error) {
|
func newImageBuildOptions(ctx context.Context, r *http.Request) (*types.ImageBuildOptions, error) {
|
||||||
options := &types.ImageBuildOptions{
|
version := httputils.VersionFromContext(ctx)
|
||||||
Version: types.BuilderV1, // Builder V1 is the default, but can be overridden
|
options := &types.ImageBuildOptions{}
|
||||||
Dockerfile: r.FormValue("dockerfile"),
|
if httputils.BoolValue(r, "forcerm") && versions.GreaterThanOrEqualTo(version, "1.12") {
|
||||||
SuppressOutput: httputils.BoolValue(r, "q"),
|
|
||||||
NoCache: httputils.BoolValue(r, "nocache"),
|
|
||||||
ForceRemove: httputils.BoolValue(r, "forcerm"),
|
|
||||||
PullParent: httputils.BoolValue(r, "pull"),
|
|
||||||
MemorySwap: httputils.Int64ValueOrZero(r, "memswap"),
|
|
||||||
Memory: httputils.Int64ValueOrZero(r, "memory"),
|
|
||||||
CPUShares: httputils.Int64ValueOrZero(r, "cpushares"),
|
|
||||||
CPUPeriod: httputils.Int64ValueOrZero(r, "cpuperiod"),
|
|
||||||
CPUQuota: httputils.Int64ValueOrZero(r, "cpuquota"),
|
|
||||||
CPUSetCPUs: r.FormValue("cpusetcpus"),
|
|
||||||
CPUSetMems: r.FormValue("cpusetmems"),
|
|
||||||
CgroupParent: r.FormValue("cgroupparent"),
|
|
||||||
NetworkMode: r.FormValue("networkmode"),
|
|
||||||
Tags: r.Form["t"],
|
|
||||||
ExtraHosts: r.Form["extrahosts"],
|
|
||||||
SecurityOpt: r.Form["securityopt"],
|
|
||||||
Squash: httputils.BoolValue(r, "squash"),
|
|
||||||
Target: r.FormValue("target"),
|
|
||||||
RemoteContext: r.FormValue("remote"),
|
|
||||||
SessionID: r.FormValue("session"),
|
|
||||||
BuildID: r.FormValue("buildid"),
|
|
||||||
}
|
|
||||||
|
|
||||||
if runtime.GOOS != "windows" && options.SecurityOpt != nil {
|
|
||||||
// SecurityOpt only supports "credentials-spec" on Windows, and not used on other platforms.
|
|
||||||
return nil, invalidParam{errors.New("security options are not supported on " + runtime.GOOS)}
|
|
||||||
}
|
|
||||||
|
|
||||||
if httputils.BoolValue(r, "forcerm") {
|
|
||||||
options.Remove = true
|
options.Remove = true
|
||||||
} else if r.FormValue("rm") == "" {
|
} else if r.FormValue("rm") == "" && versions.GreaterThanOrEqualTo(version, "1.12") {
|
||||||
options.Remove = true
|
options.Remove = true
|
||||||
} else {
|
} else {
|
||||||
options.Remove = httputils.BoolValue(r, "rm")
|
options.Remove = httputils.BoolValue(r, "rm")
|
||||||
}
|
}
|
||||||
version := httputils.VersionFromContext(ctx)
|
if httputils.BoolValue(r, "pull") && versions.GreaterThanOrEqualTo(version, "1.16") {
|
||||||
|
options.PullParent = true
|
||||||
|
}
|
||||||
|
|
||||||
|
options.Dockerfile = r.FormValue("dockerfile")
|
||||||
|
options.SuppressOutput = httputils.BoolValue(r, "q")
|
||||||
|
options.NoCache = httputils.BoolValue(r, "nocache")
|
||||||
|
options.ForceRemove = httputils.BoolValue(r, "forcerm")
|
||||||
|
options.MemorySwap = httputils.Int64ValueOrZero(r, "memswap")
|
||||||
|
options.Memory = httputils.Int64ValueOrZero(r, "memory")
|
||||||
|
options.CPUShares = httputils.Int64ValueOrZero(r, "cpushares")
|
||||||
|
options.CPUPeriod = httputils.Int64ValueOrZero(r, "cpuperiod")
|
||||||
|
options.CPUQuota = httputils.Int64ValueOrZero(r, "cpuquota")
|
||||||
|
options.CPUSetCPUs = r.FormValue("cpusetcpus")
|
||||||
|
options.CPUSetMems = r.FormValue("cpusetmems")
|
||||||
|
options.CgroupParent = r.FormValue("cgroupparent")
|
||||||
|
options.NetworkMode = r.FormValue("networkmode")
|
||||||
|
options.Tags = r.Form["t"]
|
||||||
|
options.ExtraHosts = r.Form["extrahosts"]
|
||||||
|
options.SecurityOpt = r.Form["securityopt"]
|
||||||
|
options.Squash = httputils.BoolValue(r, "squash")
|
||||||
|
options.Target = r.FormValue("target")
|
||||||
|
options.RemoteContext = r.FormValue("remote")
|
||||||
if versions.GreaterThanOrEqualTo(version, "1.32") {
|
if versions.GreaterThanOrEqualTo(version, "1.32") {
|
||||||
options.Platform = r.FormValue("platform")
|
options.Platform = r.FormValue("platform")
|
||||||
}
|
}
|
||||||
if versions.GreaterThanOrEqualTo(version, "1.40") {
|
|
||||||
outputsJSON := r.FormValue("outputs")
|
|
||||||
if outputsJSON != "" {
|
|
||||||
var outputs []types.ImageBuildOutput
|
|
||||||
if err := json.Unmarshal([]byte(outputsJSON), &outputs); err != nil {
|
|
||||||
return nil, invalidParam{errors.Wrap(err, "invalid outputs specified")}
|
|
||||||
}
|
|
||||||
options.Outputs = outputs
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if s := r.Form.Get("shmsize"); s != "" {
|
if r.Form.Get("shmsize") != "" {
|
||||||
shmSize, err := strconv.ParseInt(s, 10, 64)
|
shmSize, err := strconv.ParseInt(r.Form.Get("shmsize"), 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
options.ShmSize = shmSize
|
options.ShmSize = shmSize
|
||||||
}
|
}
|
||||||
|
|
||||||
if i := r.FormValue("isolation"); i != "" {
|
if i := container.Isolation(r.FormValue("isolation")); i != "" {
|
||||||
options.Isolation = container.Isolation(i)
|
if !container.Isolation.IsValid(i) {
|
||||||
if !options.Isolation.IsValid() {
|
return nil, invalidIsolationError(i)
|
||||||
return nil, invalidParam{errors.Errorf("unsupported isolation: %q", i)}
|
|
||||||
}
|
}
|
||||||
|
options.Isolation = i
|
||||||
}
|
}
|
||||||
|
|
||||||
if ulimitsJSON := r.FormValue("ulimits"); ulimitsJSON != "" {
|
if runtime.GOOS != "windows" && options.SecurityOpt != nil {
|
||||||
buildUlimits := []*units.Ulimit{}
|
return nil, errdefs.InvalidParameter(errors.New("The daemon on this platform does not support setting security options on build"))
|
||||||
|
}
|
||||||
|
|
||||||
|
var buildUlimits = []*units.Ulimit{}
|
||||||
|
ulimitsJSON := r.FormValue("ulimits")
|
||||||
|
if ulimitsJSON != "" {
|
||||||
if err := json.Unmarshal([]byte(ulimitsJSON), &buildUlimits); err != nil {
|
if err := json.Unmarshal([]byte(ulimitsJSON), &buildUlimits); err != nil {
|
||||||
return nil, invalidParam{errors.Wrap(err, "error reading ulimit settings")}
|
return nil, errors.Wrap(errdefs.InvalidParameter(err), "error reading ulimit settings")
|
||||||
}
|
}
|
||||||
options.Ulimits = buildUlimits
|
options.Ulimits = buildUlimits
|
||||||
}
|
}
|
||||||
|
@ -124,59 +114,71 @@ func newImageBuildOptions(ctx context.Context, r *http.Request) (*types.ImageBui
|
||||||
// the fact they mentioned it, we need to pass that along to the builder
|
// the fact they mentioned it, we need to pass that along to the builder
|
||||||
// so that it can print a warning about "foo" being unused if there is
|
// so that it can print a warning about "foo" being unused if there is
|
||||||
// no "ARG foo" in the Dockerfile.
|
// no "ARG foo" in the Dockerfile.
|
||||||
if buildArgsJSON := r.FormValue("buildargs"); buildArgsJSON != "" {
|
buildArgsJSON := r.FormValue("buildargs")
|
||||||
buildArgs := map[string]*string{}
|
if buildArgsJSON != "" {
|
||||||
|
var buildArgs = map[string]*string{}
|
||||||
if err := json.Unmarshal([]byte(buildArgsJSON), &buildArgs); err != nil {
|
if err := json.Unmarshal([]byte(buildArgsJSON), &buildArgs); err != nil {
|
||||||
return nil, invalidParam{errors.Wrap(err, "error reading build args")}
|
return nil, errors.Wrap(errdefs.InvalidParameter(err), "error reading build args")
|
||||||
}
|
}
|
||||||
options.BuildArgs = buildArgs
|
options.BuildArgs = buildArgs
|
||||||
}
|
}
|
||||||
|
|
||||||
if labelsJSON := r.FormValue("labels"); labelsJSON != "" {
|
labelsJSON := r.FormValue("labels")
|
||||||
labels := map[string]string{}
|
if labelsJSON != "" {
|
||||||
|
var labels = map[string]string{}
|
||||||
if err := json.Unmarshal([]byte(labelsJSON), &labels); err != nil {
|
if err := json.Unmarshal([]byte(labelsJSON), &labels); err != nil {
|
||||||
return nil, invalidParam{errors.Wrap(err, "error reading labels")}
|
return nil, errors.Wrap(errdefs.InvalidParameter(err), "error reading labels")
|
||||||
}
|
}
|
||||||
options.Labels = labels
|
options.Labels = labels
|
||||||
}
|
}
|
||||||
|
|
||||||
if cacheFromJSON := r.FormValue("cachefrom"); cacheFromJSON != "" {
|
cacheFromJSON := r.FormValue("cachefrom")
|
||||||
cacheFrom := []string{}
|
if cacheFromJSON != "" {
|
||||||
|
var cacheFrom = []string{}
|
||||||
if err := json.Unmarshal([]byte(cacheFromJSON), &cacheFrom); err != nil {
|
if err := json.Unmarshal([]byte(cacheFromJSON), &cacheFrom); err != nil {
|
||||||
return nil, invalidParam{errors.Wrap(err, "error reading cache-from")}
|
return nil, err
|
||||||
}
|
}
|
||||||
options.CacheFrom = cacheFrom
|
options.CacheFrom = cacheFrom
|
||||||
}
|
}
|
||||||
|
options.SessionID = r.FormValue("session")
|
||||||
if bv := r.FormValue("version"); bv != "" {
|
options.BuildID = r.FormValue("buildid")
|
||||||
v, err := parseVersion(bv)
|
builderVersion, err := parseVersion(r.FormValue("version"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
options.Version = v
|
options.Version = builderVersion
|
||||||
|
|
||||||
|
if versions.GreaterThanOrEqualTo(version, "1.40") {
|
||||||
|
outputsJSON := r.FormValue("outputs")
|
||||||
|
if outputsJSON != "" {
|
||||||
|
var outputs []types.ImageBuildOutput
|
||||||
|
if err := json.Unmarshal([]byte(outputsJSON), &outputs); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
options.Outputs = outputs
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return options, nil
|
return options, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseVersion(s string) (types.BuilderVersion, error) {
|
func parseVersion(s string) (types.BuilderVersion, error) {
|
||||||
switch types.BuilderVersion(s) {
|
if s == "" || s == string(types.BuilderV1) {
|
||||||
case types.BuilderV1:
|
|
||||||
return types.BuilderV1, nil
|
return types.BuilderV1, nil
|
||||||
case types.BuilderBuildKit:
|
|
||||||
return types.BuilderBuildKit, nil
|
|
||||||
default:
|
|
||||||
return "", invalidParam{errors.Errorf("invalid version %q", s)}
|
|
||||||
}
|
}
|
||||||
|
if s == string(types.BuilderBuildKit) {
|
||||||
|
return types.BuilderBuildKit, nil
|
||||||
|
}
|
||||||
|
return "", errors.Errorf("invalid version %s", s)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (br *buildRouter) postPrune(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
func (br *buildRouter) postPrune(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||||
if err := httputils.ParseForm(r); err != nil {
|
if err := httputils.ParseForm(r); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
fltrs, err := filters.FromJSON(r.Form.Get("filters"))
|
filters, err := filters.FromJSON(r.Form.Get("filters"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return errors.Wrap(err, "could not parse filters")
|
||||||
}
|
}
|
||||||
ksfv := r.FormValue("keep-storage")
|
ksfv := r.FormValue("keep-storage")
|
||||||
if ksfv == "" {
|
if ksfv == "" {
|
||||||
|
@ -184,12 +186,12 @@ func (br *buildRouter) postPrune(ctx context.Context, w http.ResponseWriter, r *
|
||||||
}
|
}
|
||||||
ks, err := strconv.Atoi(ksfv)
|
ks, err := strconv.Atoi(ksfv)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return invalidParam{errors.Wrapf(err, "keep-storage is in bytes and expects an integer, got %v", ksfv)}
|
return errors.Wrapf(err, "keep-storage is in bytes and expects an integer, got %v", ksfv)
|
||||||
}
|
}
|
||||||
|
|
||||||
opts := types.BuildCachePruneOptions{
|
opts := types.BuildCachePruneOptions{
|
||||||
All: httputils.BoolValue(r, "all"),
|
All: httputils.BoolValue(r, "all"),
|
||||||
Filters: fltrs,
|
Filters: filters,
|
||||||
KeepStorage: int64(ks),
|
KeepStorage: int64(ks),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -205,7 +207,7 @@ func (br *buildRouter) postCancel(ctx context.Context, w http.ResponseWriter, r
|
||||||
|
|
||||||
id := r.FormValue("id")
|
id := r.FormValue("id")
|
||||||
if id == "" {
|
if id == "" {
|
||||||
return invalidParam{errors.New("build ID not provided")}
|
return errors.Errorf("build ID not provided")
|
||||||
}
|
}
|
||||||
|
|
||||||
return br.backend.Cancel(ctx, id)
|
return br.backend.Cancel(ctx, id)
|
||||||
|
@ -232,11 +234,12 @@ func (br *buildRouter) postBuild(ctx context.Context, w http.ResponseWriter, r *
|
||||||
}
|
}
|
||||||
|
|
||||||
output := ioutils.NewWriteFlusher(ww)
|
output := ioutils.NewWriteFlusher(ww)
|
||||||
defer func() { _ = output.Close() }()
|
defer output.Close()
|
||||||
|
|
||||||
errf := func(err error) error {
|
errf := func(err error) error {
|
||||||
|
|
||||||
if httputils.BoolValue(r, "q") && notVerboseBuffer.Len() > 0 {
|
if httputils.BoolValue(r, "q") && notVerboseBuffer.Len() > 0 {
|
||||||
_, _ = output.Write(notVerboseBuffer.Bytes())
|
output.Write(notVerboseBuffer.Bytes())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Do not write the error in the http output if it's still empty.
|
// Do not write the error in the http output if it's still empty.
|
||||||
|
@ -246,7 +249,7 @@ func (br *buildRouter) postBuild(ctx context.Context, w http.ResponseWriter, r *
|
||||||
}
|
}
|
||||||
_, err = output.Write(streamformatter.FormatError(err))
|
_, err = output.Write(streamformatter.FormatError(err))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.G(ctx).Warnf("could not write error response: %v", err)
|
logrus.Warnf("could not write error response: %v", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -258,7 +261,7 @@ func (br *buildRouter) postBuild(ctx context.Context, w http.ResponseWriter, r *
|
||||||
buildOptions.AuthConfigs = getAuthConfigs(r.Header)
|
buildOptions.AuthConfigs = getAuthConfigs(r.Header)
|
||||||
|
|
||||||
if buildOptions.Squash && !br.daemon.HasExperimental() {
|
if buildOptions.Squash && !br.daemon.HasExperimental() {
|
||||||
return invalidParam{errors.New("squash is only supported with experimental mode")}
|
return errdefs.InvalidParameter(errors.New("squash is only supported with experimental mode"))
|
||||||
}
|
}
|
||||||
|
|
||||||
out := io.Writer(output)
|
out := io.Writer(output)
|
||||||
|
@ -287,13 +290,13 @@ func (br *buildRouter) postBuild(ctx context.Context, w http.ResponseWriter, r *
|
||||||
// Everything worked so if -q was provided the output from the daemon
|
// Everything worked so if -q was provided the output from the daemon
|
||||||
// should be just the image ID and we'll print that to stdout.
|
// should be just the image ID and we'll print that to stdout.
|
||||||
if buildOptions.SuppressOutput {
|
if buildOptions.SuppressOutput {
|
||||||
_, _ = fmt.Fprintln(streamformatter.NewStdoutWriter(output), imgID)
|
fmt.Fprintln(streamformatter.NewStdoutWriter(output), imgID)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getAuthConfigs(header http.Header) map[string]registry.AuthConfig {
|
func getAuthConfigs(header http.Header) map[string]types.AuthConfig {
|
||||||
authConfigs := map[string]registry.AuthConfig{}
|
authConfigs := map[string]types.AuthConfig{}
|
||||||
authConfigsEncoded := header.Get("X-Registry-Config")
|
authConfigsEncoded := header.Get("X-Registry-Config")
|
||||||
|
|
||||||
if authConfigsEncoded == "" {
|
if authConfigsEncoded == "" {
|
||||||
|
@ -303,7 +306,7 @@ func getAuthConfigs(header http.Header) map[string]registry.AuthConfig {
|
||||||
authConfigsJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authConfigsEncoded))
|
authConfigsJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authConfigsEncoded))
|
||||||
// Pulling an image does not error when no auth is provided so to remain
|
// Pulling an image does not error when no auth is provided so to remain
|
||||||
// consistent with the existing api decode errors are ignored
|
// consistent with the existing api decode errors are ignored
|
||||||
_ = json.NewDecoder(authConfigsJSON).Decode(&authConfigs)
|
json.NewDecoder(authConfigsJSON).Decode(&authConfigs)
|
||||||
return authConfigs
|
return authConfigs
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -425,7 +428,7 @@ func (w *wcf) notify() {
|
||||||
w.mu.Lock()
|
w.mu.Lock()
|
||||||
if !w.ready {
|
if !w.ready {
|
||||||
if w.buf.Len() > 0 {
|
if w.buf.Len() > 0 {
|
||||||
_, _ = io.Copy(w.Writer, w.buf)
|
io.Copy(w.Writer, w.buf)
|
||||||
}
|
}
|
||||||
if w.flushed {
|
if w.flushed {
|
||||||
w.flusher.Flush()
|
w.flusher.Flush()
|
||||||
|
|
|
@ -1,10 +1,10 @@
|
||||||
package checkpoint // import "github.com/docker/docker/api/server/router/checkpoint"
|
package checkpoint // import "github.com/docker/docker/api/server/router/checkpoint"
|
||||||
|
|
||||||
import "github.com/docker/docker/api/types/checkpoint"
|
import "github.com/docker/docker/api/types"
|
||||||
|
|
||||||
// Backend for Checkpoint
|
// Backend for Checkpoint
|
||||||
type Backend interface {
|
type Backend interface {
|
||||||
CheckpointCreate(container string, config checkpoint.CreateOptions) error
|
CheckpointCreate(container string, config types.CheckpointCreateOptions) error
|
||||||
CheckpointDelete(container string, config checkpoint.DeleteOptions) error
|
CheckpointDelete(container string, config types.CheckpointDeleteOptions) error
|
||||||
CheckpointList(container string, config checkpoint.ListOptions) ([]checkpoint.Summary, error)
|
CheckpointList(container string, config types.CheckpointListOptions) ([]types.Checkpoint, error)
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,10 +2,11 @@ package checkpoint // import "github.com/docker/docker/api/server/router/checkpo
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/json"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
"github.com/docker/docker/api/server/httputils"
|
"github.com/docker/docker/api/server/httputils"
|
||||||
"github.com/docker/docker/api/types/checkpoint"
|
"github.com/docker/docker/api/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (s *checkpointRouter) postContainerCheckpoint(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
func (s *checkpointRouter) postContainerCheckpoint(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||||
|
@ -13,8 +14,10 @@ func (s *checkpointRouter) postContainerCheckpoint(ctx context.Context, w http.R
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
var options checkpoint.CreateOptions
|
var options types.CheckpointCreateOptions
|
||||||
if err := httputils.ReadJSON(r, &options); err != nil {
|
|
||||||
|
decoder := json.NewDecoder(r.Body)
|
||||||
|
if err := decoder.Decode(&options); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -32,9 +35,10 @@ func (s *checkpointRouter) getContainerCheckpoints(ctx context.Context, w http.R
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
checkpoints, err := s.backend.CheckpointList(vars["name"], checkpoint.ListOptions{
|
checkpoints, err := s.backend.CheckpointList(vars["name"], types.CheckpointListOptions{
|
||||||
CheckpointDir: r.Form.Get("dir"),
|
CheckpointDir: r.Form.Get("dir"),
|
||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -47,10 +51,11 @@ func (s *checkpointRouter) deleteContainerCheckpoint(ctx context.Context, w http
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
err := s.backend.CheckpointDelete(vars["name"], checkpoint.DeleteOptions{
|
err := s.backend.CheckpointDelete(vars["name"], types.CheckpointDeleteOptions{
|
||||||
CheckpointDir: r.Form.Get("dir"),
|
CheckpointDir: r.Form.Get("dir"),
|
||||||
CheckpointID: vars["checkpoint"],
|
CheckpointID: vars["checkpoint"],
|
||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,29 +17,30 @@ type execBackend interface {
|
||||||
ContainerExecCreate(name string, config *types.ExecConfig) (string, error)
|
ContainerExecCreate(name string, config *types.ExecConfig) (string, error)
|
||||||
ContainerExecInspect(id string) (*backend.ExecInspect, error)
|
ContainerExecInspect(id string) (*backend.ExecInspect, error)
|
||||||
ContainerExecResize(name string, height, width int) error
|
ContainerExecResize(name string, height, width int) error
|
||||||
ContainerExecStart(ctx context.Context, name string, options container.ExecStartOptions) error
|
ContainerExecStart(ctx context.Context, name string, stdin io.Reader, stdout io.Writer, stderr io.Writer) error
|
||||||
ExecExists(name string) (bool, error)
|
ExecExists(name string) (bool, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// copyBackend includes functions to implement to provide container copy functionality.
|
// copyBackend includes functions to implement to provide container copy functionality.
|
||||||
type copyBackend interface {
|
type copyBackend interface {
|
||||||
ContainerArchivePath(name string, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error)
|
ContainerArchivePath(name string, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error)
|
||||||
ContainerExport(ctx context.Context, name string, out io.Writer) error
|
ContainerCopy(name string, res string) (io.ReadCloser, error)
|
||||||
|
ContainerExport(name string, out io.Writer) error
|
||||||
ContainerExtractToDir(name, path string, copyUIDGID, noOverwriteDirNonDir bool, content io.Reader) error
|
ContainerExtractToDir(name, path string, copyUIDGID, noOverwriteDirNonDir bool, content io.Reader) error
|
||||||
ContainerStatPath(name string, path string) (stat *types.ContainerPathStat, err error)
|
ContainerStatPath(name string, path string) (stat *types.ContainerPathStat, err error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// stateBackend includes functions to implement to provide container state lifecycle functionality.
|
// stateBackend includes functions to implement to provide container state lifecycle functionality.
|
||||||
type stateBackend interface {
|
type stateBackend interface {
|
||||||
ContainerCreate(ctx context.Context, config backend.ContainerCreateConfig) (container.CreateResponse, error)
|
ContainerCreate(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error)
|
||||||
ContainerKill(name string, signal string) error
|
ContainerKill(name string, sig uint64) error
|
||||||
ContainerPause(name string) error
|
ContainerPause(name string) error
|
||||||
ContainerRename(oldName, newName string) error
|
ContainerRename(oldName, newName string) error
|
||||||
ContainerResize(name string, height, width int) error
|
ContainerResize(name string, height, width int) error
|
||||||
ContainerRestart(ctx context.Context, name string, options container.StopOptions) error
|
ContainerRestart(name string, seconds *int) error
|
||||||
ContainerRm(name string, config *backend.ContainerRmConfig) error
|
ContainerRm(name string, config *types.ContainerRmConfig) error
|
||||||
ContainerStart(ctx context.Context, name string, checkpoint string, checkpointDir string) error
|
ContainerStart(name string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error
|
||||||
ContainerStop(ctx context.Context, name string, options container.StopOptions) error
|
ContainerStop(name string, seconds *int) error
|
||||||
ContainerUnpause(name string) error
|
ContainerUnpause(name string) error
|
||||||
ContainerUpdate(name string, hostConfig *container.HostConfig) (container.ContainerUpdateOKBody, error)
|
ContainerUpdate(name string, hostConfig *container.HostConfig) (container.ContainerUpdateOKBody, error)
|
||||||
ContainerWait(ctx context.Context, name string, condition containerpkg.WaitCondition) (<-chan containerpkg.StateStatus, error)
|
ContainerWait(ctx context.Context, name string, condition containerpkg.WaitCondition) (<-chan containerpkg.StateStatus, error)
|
||||||
|
@ -47,12 +48,13 @@ type stateBackend interface {
|
||||||
|
|
||||||
// monitorBackend includes functions to implement to provide containers monitoring functionality.
|
// monitorBackend includes functions to implement to provide containers monitoring functionality.
|
||||||
type monitorBackend interface {
|
type monitorBackend interface {
|
||||||
ContainerChanges(ctx context.Context, name string) ([]archive.Change, error)
|
ContainerChanges(name string) ([]archive.Change, error)
|
||||||
ContainerInspect(ctx context.Context, name string, size bool, version string) (interface{}, error)
|
ContainerInspect(name string, size bool, version string) (interface{}, error)
|
||||||
ContainerLogs(ctx context.Context, name string, config *container.LogsOptions) (msgs <-chan *backend.LogMessage, tty bool, err error)
|
ContainerLogs(ctx context.Context, name string, config *types.ContainerLogsOptions) (msgs <-chan *backend.LogMessage, tty bool, err error)
|
||||||
ContainerStats(ctx context.Context, name string, config *backend.ContainerStatsConfig) error
|
ContainerStats(ctx context.Context, name string, config *backend.ContainerStatsConfig) error
|
||||||
ContainerTop(name string, psArgs string) (*container.ContainerTopOKBody, error)
|
ContainerTop(name string, psArgs string) (*container.ContainerTopOKBody, error)
|
||||||
Containers(ctx context.Context, config *container.ListOptions) ([]*types.Container, error)
|
|
||||||
|
Containers(config *types.ContainerListOptions) ([]*types.Container, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// attachBackend includes function to implement to provide container attaching functionality.
|
// attachBackend includes function to implement to provide container attaching functionality.
|
||||||
|
@ -66,7 +68,7 @@ type systemBackend interface {
|
||||||
}
|
}
|
||||||
|
|
||||||
type commitBackend interface {
|
type commitBackend interface {
|
||||||
CreateImageFromContainer(ctx context.Context, name string, config *backend.CreateImageConfig) (imageID string, err error)
|
CreateImageFromContainer(name string, config *backend.CreateImageConfig) (imageID string, err error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Backend is all the methods that need to be implemented to provide container specific functionality.
|
// Backend is all the methods that need to be implemented to provide container specific functionality.
|
||||||
|
|
|
@ -10,15 +10,13 @@ type containerRouter struct {
|
||||||
backend Backend
|
backend Backend
|
||||||
decoder httputils.ContainerDecoder
|
decoder httputils.ContainerDecoder
|
||||||
routes []router.Route
|
routes []router.Route
|
||||||
cgroup2 bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewRouter initializes a new container router
|
// NewRouter initializes a new container router
|
||||||
func NewRouter(b Backend, decoder httputils.ContainerDecoder, cgroup2 bool) router.Router {
|
func NewRouter(b Backend, decoder httputils.ContainerDecoder) router.Router {
|
||||||
r := &containerRouter{
|
r := &containerRouter{
|
||||||
backend: b,
|
backend: b,
|
||||||
decoder: decoder,
|
decoder: decoder,
|
||||||
cgroup2: cgroup2,
|
|
||||||
}
|
}
|
||||||
r.initRoutes()
|
r.initRoutes()
|
||||||
return r
|
return r
|
||||||
|
@ -56,6 +54,7 @@ func (r *containerRouter) initRoutes() {
|
||||||
router.NewPostRoute("/containers/{name:.*}/wait", r.postContainersWait),
|
router.NewPostRoute("/containers/{name:.*}/wait", r.postContainersWait),
|
||||||
router.NewPostRoute("/containers/{name:.*}/resize", r.postContainersResize),
|
router.NewPostRoute("/containers/{name:.*}/resize", r.postContainersResize),
|
||||||
router.NewPostRoute("/containers/{name:.*}/attach", r.postContainersAttach),
|
router.NewPostRoute("/containers/{name:.*}/attach", r.postContainersAttach),
|
||||||
|
router.NewPostRoute("/containers/{name:.*}/copy", r.postContainersCopy), // Deprecated since 1.8, Errors out since 1.12
|
||||||
router.NewPostRoute("/containers/{name:.*}/exec", r.postContainerExecCreate),
|
router.NewPostRoute("/containers/{name:.*}/exec", r.postContainerExecCreate),
|
||||||
router.NewPostRoute("/exec/{name:.*}/start", r.postContainerExecStart),
|
router.NewPostRoute("/exec/{name:.*}/start", r.postContainerExecStart),
|
||||||
router.NewPostRoute("/exec/{name:.*}/resize", r.postContainerExecResize),
|
router.NewPostRoute("/exec/{name:.*}/resize", r.postContainerExecResize),
|
||||||
|
|
|
@ -6,27 +6,21 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"runtime"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"syscall"
|
||||||
|
|
||||||
"github.com/containerd/containerd/platforms"
|
|
||||||
"github.com/containerd/log"
|
|
||||||
"github.com/docker/docker/api/server/httpstatus"
|
|
||||||
"github.com/docker/docker/api/server/httputils"
|
"github.com/docker/docker/api/server/httputils"
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
"github.com/docker/docker/api/types/backend"
|
"github.com/docker/docker/api/types/backend"
|
||||||
"github.com/docker/docker/api/types/container"
|
"github.com/docker/docker/api/types/container"
|
||||||
"github.com/docker/docker/api/types/filters"
|
"github.com/docker/docker/api/types/filters"
|
||||||
"github.com/docker/docker/api/types/mount"
|
|
||||||
"github.com/docker/docker/api/types/network"
|
|
||||||
"github.com/docker/docker/api/types/versions"
|
"github.com/docker/docker/api/types/versions"
|
||||||
containerpkg "github.com/docker/docker/container"
|
containerpkg "github.com/docker/docker/container"
|
||||||
"github.com/docker/docker/errdefs"
|
"github.com/docker/docker/errdefs"
|
||||||
"github.com/docker/docker/pkg/ioutils"
|
"github.com/docker/docker/pkg/ioutils"
|
||||||
"github.com/docker/docker/runconfig"
|
"github.com/docker/docker/pkg/signal"
|
||||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
"golang.org/x/net/websocket"
|
"golang.org/x/net/websocket"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -39,24 +33,29 @@ func (s *containerRouter) postCommit(ctx context.Context, w http.ResponseWriter,
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: remove pause arg, and always pause in backend
|
||||||
|
pause := httputils.BoolValue(r, "pause")
|
||||||
|
version := httputils.VersionFromContext(ctx)
|
||||||
|
if r.FormValue("pause") == "" && versions.GreaterThanOrEqualTo(version, "1.13") {
|
||||||
|
pause = true
|
||||||
|
}
|
||||||
|
|
||||||
config, _, _, err := s.decoder.DecodeConfig(r.Body)
|
config, _, _, err := s.decoder.DecodeConfig(r.Body)
|
||||||
if err != nil && !errors.Is(err, io.EOF) { // Do not fail if body is empty.
|
if err != nil && err != io.EOF { //Do not fail if body is empty.
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
ref, err := httputils.RepoTagReference(r.Form.Get("repo"), r.Form.Get("tag"))
|
commitCfg := &backend.CreateImageConfig{
|
||||||
if err != nil {
|
Pause: pause,
|
||||||
return errdefs.InvalidParameter(err)
|
Repo: r.Form.Get("repo"),
|
||||||
}
|
Tag: r.Form.Get("tag"),
|
||||||
|
|
||||||
imgID, err := s.backend.CreateImageFromContainer(ctx, r.Form.Get("container"), &backend.CreateImageConfig{
|
|
||||||
Pause: httputils.BoolValueOrDefault(r, "pause", true), // TODO(dnephin): remove pause arg, and always pause in backend
|
|
||||||
Tag: ref,
|
|
||||||
Author: r.Form.Get("author"),
|
Author: r.Form.Get("author"),
|
||||||
Comment: r.Form.Get("comment"),
|
Comment: r.Form.Get("comment"),
|
||||||
Config: config,
|
Config: config,
|
||||||
Changes: r.Form["changes"],
|
Changes: r.Form["changes"],
|
||||||
})
|
}
|
||||||
|
|
||||||
|
imgID, err := s.backend.CreateImageFromContainer(r.Form.Get("container"), commitCfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -73,7 +72,7 @@ func (s *containerRouter) getContainersJSON(ctx context.Context, w http.Response
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
config := &container.ListOptions{
|
config := &types.ContainerListOptions{
|
||||||
All: httputils.BoolValue(r, "all"),
|
All: httputils.BoolValue(r, "all"),
|
||||||
Size: httputils.BoolValue(r, "size"),
|
Size: httputils.BoolValue(r, "size"),
|
||||||
Since: r.Form.Get("since"),
|
Since: r.Form.Get("since"),
|
||||||
|
@ -89,7 +88,7 @@ func (s *containerRouter) getContainersJSON(ctx context.Context, w http.Response
|
||||||
config.Limit = limit
|
config.Limit = limit
|
||||||
}
|
}
|
||||||
|
|
||||||
containers, err := s.backend.Containers(ctx, config)
|
containers, err := s.backend.Containers(config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -106,16 +105,14 @@ func (s *containerRouter) getContainersStats(ctx context.Context, w http.Respons
|
||||||
if !stream {
|
if !stream {
|
||||||
w.Header().Set("Content-Type", "application/json")
|
w.Header().Set("Content-Type", "application/json")
|
||||||
}
|
}
|
||||||
var oneShot bool
|
|
||||||
if versions.GreaterThanOrEqualTo(httputils.VersionFromContext(ctx), "1.41") {
|
config := &backend.ContainerStatsConfig{
|
||||||
oneShot = httputils.BoolValueOrDefault(r, "one-shot", false)
|
Stream: stream,
|
||||||
|
OutStream: w,
|
||||||
|
Version: httputils.VersionFromContext(ctx),
|
||||||
}
|
}
|
||||||
|
|
||||||
return s.backend.ContainerStats(ctx, vars["name"], &backend.ContainerStatsConfig{
|
return s.backend.ContainerStats(ctx, vars["name"], config)
|
||||||
Stream: stream,
|
|
||||||
OneShot: oneShot,
|
|
||||||
OutStream: w,
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *containerRouter) getContainersLogs(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
func (s *containerRouter) getContainersLogs(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||||
|
@ -134,7 +131,7 @@ func (s *containerRouter) getContainersLogs(ctx context.Context, w http.Response
|
||||||
}
|
}
|
||||||
|
|
||||||
containerName := vars["name"]
|
containerName := vars["name"]
|
||||||
logsConfig := &container.LogsOptions{
|
logsConfig := &types.ContainerLogsOptions{
|
||||||
Follow: httputils.BoolValue(r, "follow"),
|
Follow: httputils.BoolValue(r, "follow"),
|
||||||
Timestamps: httputils.BoolValue(r, "timestamps"),
|
Timestamps: httputils.BoolValue(r, "timestamps"),
|
||||||
Since: r.Form.Get("since"),
|
Since: r.Form.Get("since"),
|
||||||
|
@ -150,12 +147,6 @@ func (s *containerRouter) getContainersLogs(ctx context.Context, w http.Response
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
contentType := types.MediaTypeRawStream
|
|
||||||
if !tty && versions.GreaterThanOrEqualTo(httputils.VersionFromContext(ctx), "1.42") {
|
|
||||||
contentType = types.MediaTypeMultiplexedStream
|
|
||||||
}
|
|
||||||
w.Header().Set("Content-Type", contentType)
|
|
||||||
|
|
||||||
// if has a tty, we're not muxing streams. if it doesn't, we are. simple.
|
// if has a tty, we're not muxing streams. if it doesn't, we are. simple.
|
||||||
// this is the point of no return for writing a response. once we call
|
// this is the point of no return for writing a response. once we call
|
||||||
// WriteLogStream, the response has been started and errors will be
|
// WriteLogStream, the response has been started and errors will be
|
||||||
|
@ -165,9 +156,17 @@ func (s *containerRouter) getContainersLogs(ctx context.Context, w http.Response
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *containerRouter) getContainersExport(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
func (s *containerRouter) getContainersExport(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||||
return s.backend.ContainerExport(ctx, vars["name"], w)
|
return s.backend.ContainerExport(vars["name"], w)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type bodyOnStartError struct{}
|
||||||
|
|
||||||
|
func (bodyOnStartError) Error() string {
|
||||||
|
return "starting container with non-empty request body was deprecated since API v1.22 and removed in v1.24"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bodyOnStartError) InvalidParameter() {}
|
||||||
|
|
||||||
func (s *containerRouter) postContainersStart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
func (s *containerRouter) postContainersStart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||||
// If contentLength is -1, we can assumed chunked encoding
|
// If contentLength is -1, we can assumed chunked encoding
|
||||||
// or more technically that the length is unknown
|
// or more technically that the length is unknown
|
||||||
|
@ -175,17 +174,33 @@ func (s *containerRouter) postContainersStart(ctx context.Context, w http.Respon
|
||||||
// net/http otherwise seems to swallow any headers related to chunked encoding
|
// net/http otherwise seems to swallow any headers related to chunked encoding
|
||||||
// including r.TransferEncoding
|
// including r.TransferEncoding
|
||||||
// allow a nil body for backwards compatibility
|
// allow a nil body for backwards compatibility
|
||||||
//
|
|
||||||
|
version := httputils.VersionFromContext(ctx)
|
||||||
|
var hostConfig *container.HostConfig
|
||||||
// A non-nil json object is at least 7 characters.
|
// A non-nil json object is at least 7 characters.
|
||||||
if r.ContentLength > 7 || r.ContentLength == -1 {
|
if r.ContentLength > 7 || r.ContentLength == -1 {
|
||||||
return errdefs.InvalidParameter(errors.New("starting container with non-empty request body was deprecated since API v1.22 and removed in v1.24"))
|
if versions.GreaterThanOrEqualTo(version, "1.24") {
|
||||||
|
return bodyOnStartError{}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := httputils.CheckForJSON(r); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
c, err := s.decoder.DecodeHostConfig(r.Body)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
hostConfig = c
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := httputils.ParseForm(r); err != nil {
|
if err := httputils.ParseForm(r); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := s.backend.ContainerStart(ctx, vars["name"], r.Form.Get("checkpoint"), r.Form.Get("checkpoint-dir")); err != nil {
|
checkpoint := r.Form.Get("checkpoint")
|
||||||
|
checkpointDir := r.Form.Get("checkpoint-dir")
|
||||||
|
if err := s.backend.ContainerStart(vars["name"], hostConfig, checkpoint, checkpointDir); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -198,37 +213,52 @@ func (s *containerRouter) postContainersStop(ctx context.Context, w http.Respons
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var seconds *int
|
||||||
options container.StopOptions
|
|
||||||
version = httputils.VersionFromContext(ctx)
|
|
||||||
)
|
|
||||||
if versions.GreaterThanOrEqualTo(version, "1.42") {
|
|
||||||
options.Signal = r.Form.Get("signal")
|
|
||||||
}
|
|
||||||
if tmpSeconds := r.Form.Get("t"); tmpSeconds != "" {
|
if tmpSeconds := r.Form.Get("t"); tmpSeconds != "" {
|
||||||
valSeconds, err := strconv.Atoi(tmpSeconds)
|
valSeconds, err := strconv.Atoi(tmpSeconds)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
options.Timeout = &valSeconds
|
seconds = &valSeconds
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := s.backend.ContainerStop(ctx, vars["name"], options); err != nil {
|
if err := s.backend.ContainerStop(vars["name"], seconds); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
w.WriteHeader(http.StatusNoContent)
|
w.WriteHeader(http.StatusNoContent)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *containerRouter) postContainersKill(_ context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
func (s *containerRouter) postContainersKill(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||||
if err := httputils.ParseForm(r); err != nil {
|
if err := httputils.ParseForm(r); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var sig syscall.Signal
|
||||||
name := vars["name"]
|
name := vars["name"]
|
||||||
if err := s.backend.ContainerKill(name, r.Form.Get("signal")); err != nil {
|
|
||||||
return errors.Wrapf(err, "cannot kill container: %s", name)
|
// If we have a signal, look at it. Otherwise, do nothing
|
||||||
|
if sigStr := r.Form.Get("signal"); sigStr != "" {
|
||||||
|
var err error
|
||||||
|
if sig, err = signal.ParseSignal(sigStr); err != nil {
|
||||||
|
return errdefs.InvalidParameter(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := s.backend.ContainerKill(name, uint64(sig)); err != nil {
|
||||||
|
var isStopped bool
|
||||||
|
if errdefs.IsConflict(err) {
|
||||||
|
isStopped = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return error that's not caused because the container is stopped.
|
||||||
|
// Return error if the container is not running and the api is >= 1.20
|
||||||
|
// to keep backwards compatibility.
|
||||||
|
version := httputils.VersionFromContext(ctx)
|
||||||
|
if versions.GreaterThanOrEqualTo(version, "1.20") || !isStopped {
|
||||||
|
return errors.Wrapf(err, "Cannot kill container: %s", name)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
w.WriteHeader(http.StatusNoContent)
|
w.WriteHeader(http.StatusNoContent)
|
||||||
|
@ -240,26 +270,21 @@ func (s *containerRouter) postContainersRestart(ctx context.Context, w http.Resp
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var seconds *int
|
||||||
options container.StopOptions
|
|
||||||
version = httputils.VersionFromContext(ctx)
|
|
||||||
)
|
|
||||||
if versions.GreaterThanOrEqualTo(version, "1.42") {
|
|
||||||
options.Signal = r.Form.Get("signal")
|
|
||||||
}
|
|
||||||
if tmpSeconds := r.Form.Get("t"); tmpSeconds != "" {
|
if tmpSeconds := r.Form.Get("t"); tmpSeconds != "" {
|
||||||
valSeconds, err := strconv.Atoi(tmpSeconds)
|
valSeconds, err := strconv.Atoi(tmpSeconds)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
options.Timeout = &valSeconds
|
seconds = &valSeconds
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := s.backend.ContainerRestart(ctx, vars["name"], options); err != nil {
|
if err := s.backend.ContainerRestart(vars["name"], seconds); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
w.WriteHeader(http.StatusNoContent)
|
w.WriteHeader(http.StatusNoContent)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -304,18 +329,12 @@ func (s *containerRouter) postContainersWait(ctx context.Context, w http.Respons
|
||||||
if err := httputils.ParseForm(r); err != nil {
|
if err := httputils.ParseForm(r); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if v := r.Form.Get("condition"); v != "" {
|
switch container.WaitCondition(r.Form.Get("condition")) {
|
||||||
switch container.WaitCondition(v) {
|
|
||||||
case container.WaitConditionNotRunning:
|
|
||||||
waitCondition = containerpkg.WaitConditionNotRunning
|
|
||||||
case container.WaitConditionNextExit:
|
case container.WaitConditionNextExit:
|
||||||
waitCondition = containerpkg.WaitConditionNextExit
|
waitCondition = containerpkg.WaitConditionNextExit
|
||||||
case container.WaitConditionRemoved:
|
case container.WaitConditionRemoved:
|
||||||
waitCondition = containerpkg.WaitConditionRemoved
|
waitCondition = containerpkg.WaitConditionRemoved
|
||||||
legacyRemovalWaitPre134 = versions.LessThan(version, "1.34")
|
legacyRemovalWaitPre134 = versions.LessThan(version, "1.34")
|
||||||
default:
|
|
||||||
return errdefs.InvalidParameter(errors.Errorf("invalid condition: %q", v))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -345,19 +364,19 @@ func (s *containerRouter) postContainersWait(ctx context.Context, w http.Respons
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var waitError *container.WaitExitError
|
var waitError *container.ContainerWaitOKBodyError
|
||||||
if status.Err() != nil {
|
if status.Err() != nil {
|
||||||
waitError = &container.WaitExitError{Message: status.Err().Error()}
|
waitError = &container.ContainerWaitOKBodyError{Message: status.Err().Error()}
|
||||||
}
|
}
|
||||||
|
|
||||||
return json.NewEncoder(w).Encode(&container.WaitResponse{
|
return json.NewEncoder(w).Encode(&container.ContainerWaitOKBody{
|
||||||
StatusCode: int64(status.ExitCode()),
|
StatusCode: int64(status.ExitCode()),
|
||||||
Error: waitError,
|
Error: waitError,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *containerRouter) getContainersChanges(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
func (s *containerRouter) getContainersChanges(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||||
changes, err := s.backend.ContainerChanges(ctx, vars["name"])
|
changes, err := s.backend.ContainerChanges(vars["name"])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -396,20 +415,19 @@ func (s *containerRouter) postContainerUpdate(ctx context.Context, w http.Respon
|
||||||
if err := httputils.ParseForm(r); err != nil {
|
if err := httputils.ParseForm(r); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if err := httputils.CheckForJSON(r); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
var updateConfig container.UpdateConfig
|
var updateConfig container.UpdateConfig
|
||||||
if err := httputils.ReadJSON(r, &updateConfig); err != nil {
|
|
||||||
|
decoder := json.NewDecoder(r.Body)
|
||||||
|
if err := decoder.Decode(&updateConfig); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if versions.LessThan(httputils.VersionFromContext(ctx), "1.40") {
|
if versions.LessThan(httputils.VersionFromContext(ctx), "1.40") {
|
||||||
updateConfig.PidsLimit = nil
|
updateConfig.PidsLimit = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if versions.GreaterThanOrEqualTo(httputils.VersionFromContext(ctx), "1.42") {
|
|
||||||
// Ignore KernelMemory removed in API 1.42.
|
|
||||||
updateConfig.KernelMemory = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
if updateConfig.PidsLimit != nil && *updateConfig.PidsLimit <= 0 {
|
if updateConfig.PidsLimit != nil && *updateConfig.PidsLimit <= 0 {
|
||||||
// Both `0` and `-1` are accepted to set "unlimited" when updating.
|
// Both `0` and `-1` are accepted to set "unlimited" when updating.
|
||||||
// Historically, any negative value was accepted, so treat them as
|
// Historically, any negative value was accepted, so treat them as
|
||||||
|
@ -444,182 +462,36 @@ func (s *containerRouter) postContainersCreate(ctx context.Context, w http.Respo
|
||||||
|
|
||||||
config, hostConfig, networkingConfig, err := s.decoder.DecodeConfig(r.Body)
|
config, hostConfig, networkingConfig, err := s.decoder.DecodeConfig(r.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, io.EOF) {
|
|
||||||
return errdefs.InvalidParameter(errors.New("invalid JSON: got EOF while reading request body"))
|
|
||||||
}
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if config == nil {
|
|
||||||
return errdefs.InvalidParameter(runconfig.ErrEmptyConfig)
|
|
||||||
}
|
|
||||||
if hostConfig == nil {
|
|
||||||
hostConfig = &container.HostConfig{}
|
|
||||||
}
|
|
||||||
if networkingConfig == nil {
|
|
||||||
networkingConfig = &network.NetworkingConfig{}
|
|
||||||
}
|
|
||||||
if networkingConfig.EndpointsConfig == nil {
|
|
||||||
networkingConfig.EndpointsConfig = make(map[string]*network.EndpointSettings)
|
|
||||||
}
|
|
||||||
// The NetworkMode "default" is used as a way to express a container should
|
|
||||||
// be attached to the OS-dependant default network, in an OS-independent
|
|
||||||
// way. Doing this conversion as soon as possible ensures we have less
|
|
||||||
// NetworkMode to handle down the path (including in the
|
|
||||||
// backward-compatibility layer we have just below).
|
|
||||||
//
|
|
||||||
// Note that this is not the only place where this conversion has to be
|
|
||||||
// done (as there are various other places where containers get created).
|
|
||||||
if hostConfig.NetworkMode == "" || hostConfig.NetworkMode.IsDefault() {
|
|
||||||
hostConfig.NetworkMode = runconfig.DefaultDaemonNetworkMode()
|
|
||||||
if nw, ok := networkingConfig.EndpointsConfig[network.NetworkDefault]; ok {
|
|
||||||
networkingConfig.EndpointsConfig[hostConfig.NetworkMode.NetworkName()] = nw
|
|
||||||
delete(networkingConfig.EndpointsConfig, network.NetworkDefault)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
version := httputils.VersionFromContext(ctx)
|
version := httputils.VersionFromContext(ctx)
|
||||||
|
adjustCPUShares := versions.LessThan(version, "1.19")
|
||||||
|
|
||||||
// When using API 1.24 and under, the client is responsible for removing the container
|
// When using API 1.24 and under, the client is responsible for removing the container
|
||||||
if versions.LessThan(version, "1.25") {
|
if hostConfig != nil && versions.LessThan(version, "1.25") {
|
||||||
hostConfig.AutoRemove = false
|
hostConfig.AutoRemove = false
|
||||||
}
|
}
|
||||||
|
|
||||||
if versions.LessThan(version, "1.40") {
|
if hostConfig != nil && versions.LessThan(version, "1.40") {
|
||||||
// Ignore BindOptions.NonRecursive because it was added in API 1.40.
|
// Ignore BindOptions.NonRecursive because it was added in API 1.40.
|
||||||
for _, m := range hostConfig.Mounts {
|
for _, m := range hostConfig.Mounts {
|
||||||
if bo := m.BindOptions; bo != nil {
|
if bo := m.BindOptions; bo != nil {
|
||||||
bo.NonRecursive = false
|
bo.NonRecursive = false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ignore KernelMemoryTCP because it was added in API 1.40.
|
// Ignore KernelMemoryTCP because it was added in API 1.40.
|
||||||
hostConfig.KernelMemoryTCP = 0
|
hostConfig.KernelMemoryTCP = 0
|
||||||
|
|
||||||
|
// Ignore Capabilities because it was added in API 1.40.
|
||||||
|
hostConfig.Capabilities = nil
|
||||||
|
|
||||||
// Older clients (API < 1.40) expects the default to be shareable, make them happy
|
// Older clients (API < 1.40) expects the default to be shareable, make them happy
|
||||||
if hostConfig.IpcMode.IsEmpty() {
|
if hostConfig.IpcMode.IsEmpty() {
|
||||||
hostConfig.IpcMode = container.IPCModeShareable
|
hostConfig.IpcMode = container.IpcMode("shareable")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if versions.LessThan(version, "1.41") {
|
if hostConfig != nil && hostConfig.PidsLimit != nil && *hostConfig.PidsLimit <= 0 {
|
||||||
// Older clients expect the default to be "host" on cgroup v1 hosts
|
|
||||||
if !s.cgroup2 && hostConfig.CgroupnsMode.IsEmpty() {
|
|
||||||
hostConfig.CgroupnsMode = container.CgroupnsModeHost
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var platform *ocispec.Platform
|
|
||||||
if versions.GreaterThanOrEqualTo(version, "1.41") {
|
|
||||||
if v := r.Form.Get("platform"); v != "" {
|
|
||||||
p, err := platforms.Parse(v)
|
|
||||||
if err != nil {
|
|
||||||
return errdefs.InvalidParameter(err)
|
|
||||||
}
|
|
||||||
platform = &p
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if versions.LessThan(version, "1.42") {
|
|
||||||
for _, m := range hostConfig.Mounts {
|
|
||||||
// Ignore BindOptions.CreateMountpoint because it was added in API 1.42.
|
|
||||||
if bo := m.BindOptions; bo != nil {
|
|
||||||
bo.CreateMountpoint = false
|
|
||||||
}
|
|
||||||
|
|
||||||
// These combinations are invalid, but weren't validated in API < 1.42.
|
|
||||||
// We reset them here, so that validation doesn't produce an error.
|
|
||||||
if o := m.VolumeOptions; o != nil && m.Type != mount.TypeVolume {
|
|
||||||
m.VolumeOptions = nil
|
|
||||||
}
|
|
||||||
if o := m.TmpfsOptions; o != nil && m.Type != mount.TypeTmpfs {
|
|
||||||
m.TmpfsOptions = nil
|
|
||||||
}
|
|
||||||
if bo := m.BindOptions; bo != nil {
|
|
||||||
// Ignore BindOptions.CreateMountpoint because it was added in API 1.42.
|
|
||||||
bo.CreateMountpoint = false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if runtime.GOOS == "linux" {
|
|
||||||
// ConsoleSize is not respected by Linux daemon before API 1.42
|
|
||||||
hostConfig.ConsoleSize = [2]uint{0, 0}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if versions.GreaterThanOrEqualTo(version, "1.42") {
|
|
||||||
// Ignore KernelMemory removed in API 1.42.
|
|
||||||
hostConfig.KernelMemory = 0
|
|
||||||
for _, m := range hostConfig.Mounts {
|
|
||||||
if o := m.VolumeOptions; o != nil && m.Type != mount.TypeVolume {
|
|
||||||
return errdefs.InvalidParameter(fmt.Errorf("VolumeOptions must not be specified on mount type %q", m.Type))
|
|
||||||
}
|
|
||||||
if o := m.BindOptions; o != nil && m.Type != mount.TypeBind {
|
|
||||||
return errdefs.InvalidParameter(fmt.Errorf("BindOptions must not be specified on mount type %q", m.Type))
|
|
||||||
}
|
|
||||||
if o := m.TmpfsOptions; o != nil && m.Type != mount.TypeTmpfs {
|
|
||||||
return errdefs.InvalidParameter(fmt.Errorf("TmpfsOptions must not be specified on mount type %q", m.Type))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if versions.LessThan(version, "1.43") {
|
|
||||||
// Ignore Annotations because it was added in API v1.43.
|
|
||||||
hostConfig.Annotations = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
defaultReadOnlyNonRecursive := false
|
|
||||||
if versions.LessThan(version, "1.44") {
|
|
||||||
if config.Healthcheck != nil {
|
|
||||||
// StartInterval was added in API 1.44
|
|
||||||
config.Healthcheck.StartInterval = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set ReadOnlyNonRecursive to true because it was added in API 1.44
|
|
||||||
// Before that all read-only mounts were non-recursive.
|
|
||||||
// Keep that behavior for clients on older APIs.
|
|
||||||
defaultReadOnlyNonRecursive = true
|
|
||||||
|
|
||||||
for _, m := range hostConfig.Mounts {
|
|
||||||
if m.Type == mount.TypeBind {
|
|
||||||
if m.BindOptions != nil && m.BindOptions.ReadOnlyForceRecursive {
|
|
||||||
// NOTE: that technically this is a breaking change for older
|
|
||||||
// API versions, and we should ignore the new field.
|
|
||||||
// However, this option may be incorrectly set by a client with
|
|
||||||
// the expectation that the failing to apply recursive read-only
|
|
||||||
// is enforced, so we decided to produce an error instead,
|
|
||||||
// instead of silently ignoring.
|
|
||||||
return errdefs.InvalidParameter(errors.New("BindOptions.ReadOnlyForceRecursive needs API v1.44 or newer"))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Creating a container connected to several networks is not supported until v1.44.
|
|
||||||
if len(networkingConfig.EndpointsConfig) > 1 {
|
|
||||||
l := make([]string, 0, len(networkingConfig.EndpointsConfig))
|
|
||||||
for k := range networkingConfig.EndpointsConfig {
|
|
||||||
l = append(l, k)
|
|
||||||
}
|
|
||||||
return errdefs.InvalidParameter(errors.Errorf("Container cannot be created with multiple network endpoints: %s", strings.Join(l, ", ")))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if versions.LessThan(version, "1.45") {
|
|
||||||
for _, m := range hostConfig.Mounts {
|
|
||||||
if m.VolumeOptions != nil && m.VolumeOptions.Subpath != "" {
|
|
||||||
return errdefs.InvalidParameter(errors.New("VolumeOptions.Subpath needs API v1.45 or newer"))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var warnings []string
|
|
||||||
if warn, err := handleMACAddressBC(config, hostConfig, networkingConfig, version); err != nil {
|
|
||||||
return err
|
|
||||||
} else if warn != "" {
|
|
||||||
warnings = append(warnings, warn)
|
|
||||||
}
|
|
||||||
|
|
||||||
if hostConfig.PidsLimit != nil && *hostConfig.PidsLimit <= 0 {
|
|
||||||
// Don't set a limit if either no limit was specified, or "unlimited" was
|
// Don't set a limit if either no limit was specified, or "unlimited" was
|
||||||
// explicitly set.
|
// explicitly set.
|
||||||
// Both `0` and `-1` are accepted as "unlimited", and historically any
|
// Both `0` and `-1` are accepted as "unlimited", and historically any
|
||||||
|
@ -627,107 +499,27 @@ func (s *containerRouter) postContainersCreate(ctx context.Context, w http.Respo
|
||||||
hostConfig.PidsLimit = nil
|
hostConfig.PidsLimit = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
ccr, err := s.backend.ContainerCreate(ctx, backend.ContainerCreateConfig{
|
ccr, err := s.backend.ContainerCreate(types.ContainerCreateConfig{
|
||||||
Name: name,
|
Name: name,
|
||||||
Config: config,
|
Config: config,
|
||||||
HostConfig: hostConfig,
|
HostConfig: hostConfig,
|
||||||
NetworkingConfig: networkingConfig,
|
NetworkingConfig: networkingConfig,
|
||||||
Platform: platform,
|
AdjustCPUShares: adjustCPUShares,
|
||||||
DefaultReadOnlyNonRecursive: defaultReadOnlyNonRecursive,
|
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
ccr.Warnings = append(ccr.Warnings, warnings...)
|
|
||||||
return httputils.WriteJSON(w, http.StatusCreated, ccr)
|
return httputils.WriteJSON(w, http.StatusCreated, ccr)
|
||||||
}
|
}
|
||||||
|
|
||||||
// handleMACAddressBC takes care of backward-compatibility for the container-wide MAC address by mutating the
|
|
||||||
// networkingConfig to set the endpoint-specific MACAddress field introduced in API v1.44. It returns a warning message
|
|
||||||
// or an error if the container-wide field was specified for API >= v1.44.
|
|
||||||
func handleMACAddressBC(config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, version string) (string, error) {
|
|
||||||
deprecatedMacAddress := config.MacAddress //nolint:staticcheck // ignore SA1019: field is deprecated, but still used on API < v1.44.
|
|
||||||
|
|
||||||
// For older versions of the API, migrate the container-wide MAC address to EndpointsConfig.
|
|
||||||
if versions.LessThan(version, "1.44") {
|
|
||||||
if deprecatedMacAddress == "" {
|
|
||||||
// If a MAC address is supplied in EndpointsConfig, discard it because the old API
|
|
||||||
// would have ignored it.
|
|
||||||
for _, ep := range networkingConfig.EndpointsConfig {
|
|
||||||
ep.MacAddress = ""
|
|
||||||
}
|
|
||||||
return "", nil
|
|
||||||
}
|
|
||||||
if !hostConfig.NetworkMode.IsBridge() && !hostConfig.NetworkMode.IsUserDefined() {
|
|
||||||
return "", runconfig.ErrConflictContainerNetworkAndMac
|
|
||||||
}
|
|
||||||
|
|
||||||
// There cannot be more than one entry in EndpointsConfig with API < 1.44.
|
|
||||||
|
|
||||||
// If there's no EndpointsConfig, create a place to store the configured address. It is
|
|
||||||
// safe to use NetworkMode as the network name, whether it's a name or id/short-id, as
|
|
||||||
// it will be normalised later and there is no other EndpointSettings object that might
|
|
||||||
// refer to this network/endpoint.
|
|
||||||
if len(networkingConfig.EndpointsConfig) == 0 {
|
|
||||||
nwName := hostConfig.NetworkMode.NetworkName()
|
|
||||||
networkingConfig.EndpointsConfig[nwName] = &network.EndpointSettings{}
|
|
||||||
}
|
|
||||||
// There's exactly one network in EndpointsConfig, either from the API or just-created.
|
|
||||||
// Migrate the container-wide setting to it.
|
|
||||||
// No need to check for a match between NetworkMode and the names/ids in EndpointsConfig,
|
|
||||||
// the old version of the API would have applied the address to this network anyway.
|
|
||||||
for _, ep := range networkingConfig.EndpointsConfig {
|
|
||||||
ep.MacAddress = deprecatedMacAddress
|
|
||||||
}
|
|
||||||
return "", nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// The container-wide MacAddress parameter is deprecated and should now be specified in EndpointsConfig.
|
|
||||||
if deprecatedMacAddress == "" {
|
|
||||||
return "", nil
|
|
||||||
}
|
|
||||||
var warning string
|
|
||||||
if hostConfig.NetworkMode.IsBridge() || hostConfig.NetworkMode.IsUserDefined() {
|
|
||||||
nwName := hostConfig.NetworkMode.NetworkName()
|
|
||||||
// If there's no endpoint config, create a place to store the configured address.
|
|
||||||
if len(networkingConfig.EndpointsConfig) == 0 {
|
|
||||||
networkingConfig.EndpointsConfig[nwName] = &network.EndpointSettings{
|
|
||||||
MacAddress: deprecatedMacAddress,
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// There is existing endpoint config - if it's not indexed by NetworkMode.Name(), we
|
|
||||||
// can't tell which network the container-wide settings was intended for. NetworkMode,
|
|
||||||
// the keys in EndpointsConfig and the NetworkID in EndpointsConfig may mix network
|
|
||||||
// name/id/short-id. It's not safe to create EndpointsConfig under the NetworkMode
|
|
||||||
// name to store the container-wide MAC address, because that may result in two sets
|
|
||||||
// of EndpointsConfig for the same network and one set will be discarded later. So,
|
|
||||||
// reject the request ...
|
|
||||||
ep, ok := networkingConfig.EndpointsConfig[nwName]
|
|
||||||
if !ok {
|
|
||||||
return "", errdefs.InvalidParameter(errors.New("if a container-wide MAC address is supplied, HostConfig.NetworkMode must match the identity of a network in NetworkSettings.Networks"))
|
|
||||||
}
|
|
||||||
// ep is the endpoint that needs the container-wide MAC address; migrate the address
|
|
||||||
// to it, or bail out if there's a mismatch.
|
|
||||||
if ep.MacAddress == "" {
|
|
||||||
ep.MacAddress = deprecatedMacAddress
|
|
||||||
} else if ep.MacAddress != deprecatedMacAddress {
|
|
||||||
return "", errdefs.InvalidParameter(errors.New("the container-wide MAC address must match the endpoint-specific MAC address for the main network, or be left empty"))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
warning = "The container-wide MacAddress field is now deprecated. It should be specified in EndpointsConfig instead."
|
|
||||||
config.MacAddress = "" //nolint:staticcheck // ignore SA1019: field is deprecated, but still used on API < v1.44.
|
|
||||||
|
|
||||||
return warning, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *containerRouter) deleteContainers(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
func (s *containerRouter) deleteContainers(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||||
if err := httputils.ParseForm(r); err != nil {
|
if err := httputils.ParseForm(r); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
name := vars["name"]
|
name := vars["name"]
|
||||||
config := &backend.ContainerRmConfig{
|
config := &types.ContainerRmConfig{
|
||||||
ForceRemove: httputils.BoolValue(r, "force"),
|
ForceRemove: httputils.BoolValue(r, "force"),
|
||||||
RemoveVolume: httputils.BoolValue(r, "v"),
|
RemoveVolume: httputils.BoolValue(r, "v"),
|
||||||
RemoveLink: httputils.BoolValue(r, "link"),
|
RemoveLink: httputils.BoolValue(r, "link"),
|
||||||
|
@ -774,8 +566,7 @@ func (s *containerRouter) postContainersAttach(ctx context.Context, w http.Respo
|
||||||
return errdefs.InvalidParameter(errors.Errorf("error attaching to container %s, hijack connection missing", containerName))
|
return errdefs.InvalidParameter(errors.Errorf("error attaching to container %s, hijack connection missing", containerName))
|
||||||
}
|
}
|
||||||
|
|
||||||
contentType := types.MediaTypeRawStream
|
setupStreams := func() (io.ReadCloser, io.Writer, io.Writer, error) {
|
||||||
setupStreams := func(multiplexed bool) (io.ReadCloser, io.Writer, io.Writer, error) {
|
|
||||||
conn, _, err := hijacker.Hijack()
|
conn, _, err := hijacker.Hijack()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, err
|
return nil, nil, nil, err
|
||||||
|
@ -785,10 +576,7 @@ func (s *containerRouter) postContainersAttach(ctx context.Context, w http.Respo
|
||||||
conn.Write([]byte{})
|
conn.Write([]byte{})
|
||||||
|
|
||||||
if upgrade {
|
if upgrade {
|
||||||
if multiplexed && versions.GreaterThanOrEqualTo(httputils.VersionFromContext(ctx), "1.42") {
|
fmt.Fprintf(conn, "HTTP/1.1 101 UPGRADED\r\nContent-Type: application/vnd.docker.raw-stream\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n\r\n")
|
||||||
contentType = types.MediaTypeMultiplexedStream
|
|
||||||
}
|
|
||||||
fmt.Fprintf(conn, "HTTP/1.1 101 UPGRADED\r\nContent-Type: "+contentType+"\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n\r\n")
|
|
||||||
} else {
|
} else {
|
||||||
fmt.Fprintf(conn, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n")
|
fmt.Fprintf(conn, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n")
|
||||||
}
|
}
|
||||||
|
@ -812,16 +600,16 @@ func (s *containerRouter) postContainersAttach(ctx context.Context, w http.Respo
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = s.backend.ContainerAttach(containerName, attachConfig); err != nil {
|
if err = s.backend.ContainerAttach(containerName, attachConfig); err != nil {
|
||||||
log.G(ctx).WithError(err).Errorf("Handler for %s %s returned error", r.Method, r.URL.Path)
|
logrus.Errorf("Handler for %s %s returned error: %v", r.Method, r.URL.Path, err)
|
||||||
// Remember to close stream if error happens
|
// Remember to close stream if error happens
|
||||||
conn, _, errHijack := hijacker.Hijack()
|
conn, _, errHijack := hijacker.Hijack()
|
||||||
if errHijack != nil {
|
if errHijack == nil {
|
||||||
log.G(ctx).WithError(err).Errorf("Handler for %s %s: unable to close stream; error when hijacking connection", r.Method, r.URL.Path)
|
statusCode := errdefs.GetHTTPErrorStatusCode(err)
|
||||||
} else {
|
|
||||||
statusCode := httpstatus.FromError(err)
|
|
||||||
statusText := http.StatusText(statusCode)
|
statusText := http.StatusText(statusCode)
|
||||||
fmt.Fprintf(conn, "HTTP/1.1 %d %s\r\nContent-Type: %s\r\n\r\n%s\r\n", statusCode, statusText, contentType, err.Error())
|
fmt.Fprintf(conn, "HTTP/1.1 %d %s\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n%s\r\n", statusCode, statusText, err.Error())
|
||||||
httputils.CloseStreams(conn)
|
httputils.CloseStreams(conn)
|
||||||
|
} else {
|
||||||
|
logrus.Errorf("Error Hijacking: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -841,7 +629,7 @@ func (s *containerRouter) wsContainersAttach(ctx context.Context, w http.Respons
|
||||||
|
|
||||||
version := httputils.VersionFromContext(ctx)
|
version := httputils.VersionFromContext(ctx)
|
||||||
|
|
||||||
setupStreams := func(multiplexed bool) (io.ReadCloser, io.Writer, io.Writer, error) {
|
setupStreams := func() (io.ReadCloser, io.Writer, io.Writer, error) {
|
||||||
wsChan := make(chan *websocket.Conn)
|
wsChan := make(chan *websocket.Conn)
|
||||||
h := func(conn *websocket.Conn) {
|
h := func(conn *websocket.Conn) {
|
||||||
wsChan <- conn
|
wsChan <- conn
|
||||||
|
@ -863,22 +651,15 @@ func (s *containerRouter) wsContainersAttach(ctx context.Context, w http.Respons
|
||||||
return conn, conn, conn, nil
|
return conn, conn, conn, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
useStdin, useStdout, useStderr := true, true, true
|
|
||||||
if versions.GreaterThanOrEqualTo(version, "1.42") {
|
|
||||||
useStdin = httputils.BoolValue(r, "stdin")
|
|
||||||
useStdout = httputils.BoolValue(r, "stdout")
|
|
||||||
useStderr = httputils.BoolValue(r, "stderr")
|
|
||||||
}
|
|
||||||
|
|
||||||
attachConfig := &backend.ContainerAttachConfig{
|
attachConfig := &backend.ContainerAttachConfig{
|
||||||
GetStreams: setupStreams,
|
GetStreams: setupStreams,
|
||||||
UseStdin: useStdin,
|
|
||||||
UseStdout: useStdout,
|
|
||||||
UseStderr: useStderr,
|
|
||||||
Logs: httputils.BoolValue(r, "logs"),
|
Logs: httputils.BoolValue(r, "logs"),
|
||||||
Stream: httputils.BoolValue(r, "stream"),
|
Stream: httputils.BoolValue(r, "stream"),
|
||||||
DetachKeys: detachKeys,
|
DetachKeys: detachKeys,
|
||||||
MuxStreams: false, // never multiplex, as we rely on websocket to manage distinct streams
|
UseStdin: true,
|
||||||
|
UseStdout: true,
|
||||||
|
UseStderr: true,
|
||||||
|
MuxStreams: false, // TODO: this should be true since it's a single stream for both stdout and stderr
|
||||||
}
|
}
|
||||||
|
|
||||||
err = s.backend.ContainerAttach(containerName, attachConfig)
|
err = s.backend.ContainerAttach(containerName, attachConfig)
|
||||||
|
@ -886,9 +667,9 @@ func (s *containerRouter) wsContainersAttach(ctx context.Context, w http.Respons
|
||||||
select {
|
select {
|
||||||
case <-started:
|
case <-started:
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.G(ctx).Errorf("Error attaching websocket: %s", err)
|
logrus.Errorf("Error attaching websocket: %s", err)
|
||||||
} else {
|
} else {
|
||||||
log.G(ctx).Debug("websocket connection was closed by client")
|
logrus.Debug("websocket connection was closed by client")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
default:
|
default:
|
||||||
|
@ -903,7 +684,7 @@ func (s *containerRouter) postContainersPrune(ctx context.Context, w http.Respon
|
||||||
|
|
||||||
pruneFilters, err := filters.FromJSON(r.Form.Get("filters"))
|
pruneFilters, err := filters.FromJSON(r.Form.Get("filters"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return errdefs.InvalidParameter(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
pruneReport, err := s.backend.ContainersPrune(ctx, pruneFilters)
|
pruneReport, err := s.backend.ContainersPrune(ctx, pruneFilters)
|
||||||
|
|
|
@ -1,160 +0,0 @@
|
||||||
package container
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/docker/docker/api/types/container"
|
|
||||||
"github.com/docker/docker/api/types/network"
|
|
||||||
"gotest.tools/v3/assert"
|
|
||||||
is "gotest.tools/v3/assert/cmp"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestHandleMACAddressBC(t *testing.T) {
|
|
||||||
testcases := []struct {
|
|
||||||
name string
|
|
||||||
apiVersion string
|
|
||||||
ctrWideMAC string
|
|
||||||
networkMode container.NetworkMode
|
|
||||||
epConfig map[string]*network.EndpointSettings
|
|
||||||
expEpWithCtrWideMAC string
|
|
||||||
expEpWithNoMAC string
|
|
||||||
expCtrWideMAC string
|
|
||||||
expWarning string
|
|
||||||
expError string
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "old api ctr-wide mac mix id and name",
|
|
||||||
apiVersion: "1.43",
|
|
||||||
ctrWideMAC: "11:22:33:44:55:66",
|
|
||||||
networkMode: "aNetId",
|
|
||||||
epConfig: map[string]*network.EndpointSettings{"aNetName": {}},
|
|
||||||
expEpWithCtrWideMAC: "aNetName",
|
|
||||||
expCtrWideMAC: "11:22:33:44:55:66",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "old api clear ep mac",
|
|
||||||
apiVersion: "1.43",
|
|
||||||
networkMode: "aNetId",
|
|
||||||
epConfig: map[string]*network.EndpointSettings{"aNetName": {MacAddress: "11:22:33:44:55:66"}},
|
|
||||||
expEpWithNoMAC: "aNetName",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "old api no-network ctr-wide mac",
|
|
||||||
apiVersion: "1.43",
|
|
||||||
networkMode: "none",
|
|
||||||
ctrWideMAC: "11:22:33:44:55:66",
|
|
||||||
expError: "conflicting options: mac-address and the network mode",
|
|
||||||
expCtrWideMAC: "11:22:33:44:55:66",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "old api create ep",
|
|
||||||
apiVersion: "1.43",
|
|
||||||
networkMode: "aNetId",
|
|
||||||
ctrWideMAC: "11:22:33:44:55:66",
|
|
||||||
epConfig: map[string]*network.EndpointSettings{},
|
|
||||||
expEpWithCtrWideMAC: "aNetId",
|
|
||||||
expCtrWideMAC: "11:22:33:44:55:66",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "old api migrate ctr-wide mac",
|
|
||||||
apiVersion: "1.43",
|
|
||||||
ctrWideMAC: "11:22:33:44:55:66",
|
|
||||||
networkMode: "aNetName",
|
|
||||||
epConfig: map[string]*network.EndpointSettings{"aNetName": {}},
|
|
||||||
expEpWithCtrWideMAC: "aNetName",
|
|
||||||
expCtrWideMAC: "11:22:33:44:55:66",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "new api no macs",
|
|
||||||
apiVersion: "1.44",
|
|
||||||
networkMode: "aNetId",
|
|
||||||
epConfig: map[string]*network.EndpointSettings{"aNetName": {}},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "new api ep specific mac",
|
|
||||||
apiVersion: "1.44",
|
|
||||||
networkMode: "aNetName",
|
|
||||||
epConfig: map[string]*network.EndpointSettings{"aNetName": {MacAddress: "11:22:33:44:55:66"}},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "new api migrate ctr-wide mac to new ep",
|
|
||||||
apiVersion: "1.44",
|
|
||||||
ctrWideMAC: "11:22:33:44:55:66",
|
|
||||||
networkMode: "aNetName",
|
|
||||||
epConfig: map[string]*network.EndpointSettings{},
|
|
||||||
expEpWithCtrWideMAC: "aNetName",
|
|
||||||
expWarning: "The container-wide MacAddress field is now deprecated",
|
|
||||||
expCtrWideMAC: "",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "new api migrate ctr-wide mac to existing ep",
|
|
||||||
apiVersion: "1.44",
|
|
||||||
ctrWideMAC: "11:22:33:44:55:66",
|
|
||||||
networkMode: "aNetName",
|
|
||||||
epConfig: map[string]*network.EndpointSettings{"aNetName": {}},
|
|
||||||
expEpWithCtrWideMAC: "aNetName",
|
|
||||||
expWarning: "The container-wide MacAddress field is now deprecated",
|
|
||||||
expCtrWideMAC: "",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "new api mode vs name mismatch",
|
|
||||||
apiVersion: "1.44",
|
|
||||||
ctrWideMAC: "11:22:33:44:55:66",
|
|
||||||
networkMode: "aNetId",
|
|
||||||
epConfig: map[string]*network.EndpointSettings{"aNetName": {}},
|
|
||||||
expError: "if a container-wide MAC address is supplied, HostConfig.NetworkMode must match the identity of a network in NetworkSettings.Networks",
|
|
||||||
expCtrWideMAC: "11:22:33:44:55:66",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "new api mac mismatch",
|
|
||||||
apiVersion: "1.44",
|
|
||||||
ctrWideMAC: "11:22:33:44:55:66",
|
|
||||||
networkMode: "aNetName",
|
|
||||||
epConfig: map[string]*network.EndpointSettings{"aNetName": {MacAddress: "00:11:22:33:44:55"}},
|
|
||||||
expError: "the container-wide MAC address must match the endpoint-specific MAC address",
|
|
||||||
expCtrWideMAC: "11:22:33:44:55:66",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range testcases {
|
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
|
||||||
cfg := &container.Config{
|
|
||||||
MacAddress: tc.ctrWideMAC, //nolint:staticcheck // ignore SA1019: field is deprecated, but still used on API < v1.44.
|
|
||||||
}
|
|
||||||
hostCfg := &container.HostConfig{
|
|
||||||
NetworkMode: tc.networkMode,
|
|
||||||
}
|
|
||||||
epConfig := make(map[string]*network.EndpointSettings, len(tc.epConfig))
|
|
||||||
for k, v := range tc.epConfig {
|
|
||||||
v := v
|
|
||||||
epConfig[k] = v
|
|
||||||
}
|
|
||||||
netCfg := &network.NetworkingConfig{
|
|
||||||
EndpointsConfig: epConfig,
|
|
||||||
}
|
|
||||||
|
|
||||||
warning, err := handleMACAddressBC(cfg, hostCfg, netCfg, tc.apiVersion)
|
|
||||||
|
|
||||||
if tc.expError == "" {
|
|
||||||
assert.Check(t, err)
|
|
||||||
} else {
|
|
||||||
assert.Check(t, is.ErrorContains(err, tc.expError))
|
|
||||||
}
|
|
||||||
if tc.expWarning == "" {
|
|
||||||
assert.Check(t, is.Equal(warning, ""))
|
|
||||||
} else {
|
|
||||||
assert.Check(t, is.Contains(warning, tc.expWarning))
|
|
||||||
}
|
|
||||||
if tc.expEpWithCtrWideMAC != "" {
|
|
||||||
got := netCfg.EndpointsConfig[tc.expEpWithCtrWideMAC].MacAddress
|
|
||||||
assert.Check(t, is.Equal(got, tc.ctrWideMAC))
|
|
||||||
}
|
|
||||||
if tc.expEpWithNoMAC != "" {
|
|
||||||
got := netCfg.EndpointsConfig[tc.expEpWithNoMAC].MacAddress
|
|
||||||
assert.Check(t, is.Equal(got, ""))
|
|
||||||
}
|
|
||||||
gotCtrWideMAC := cfg.MacAddress //nolint:staticcheck // ignore SA1019: field is deprecated, but still used on API < v1.44.
|
|
||||||
assert.Check(t, is.Equal(gotCtrWideMAC, tc.expCtrWideMAC))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -6,15 +6,61 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
"github.com/docker/docker/api/server/httputils"
|
"github.com/docker/docker/api/server/httputils"
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
|
"github.com/docker/docker/api/types/versions"
|
||||||
|
"github.com/docker/docker/errdefs"
|
||||||
gddohttputil "github.com/golang/gddo/httputil"
|
gddohttputil "github.com/golang/gddo/httputil"
|
||||||
)
|
)
|
||||||
|
|
||||||
// setContainerPathStatHeader encodes the stat to JSON, base64 encode, and place in a header.
|
type pathError struct{}
|
||||||
|
|
||||||
|
func (pathError) Error() string {
|
||||||
|
return "Path cannot be empty"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pathError) InvalidParameter() {}
|
||||||
|
|
||||||
|
// postContainersCopy is deprecated in favor of getContainersArchive.
|
||||||
|
func (s *containerRouter) postContainersCopy(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||||
|
// Deprecated since 1.8, Errors out since 1.12
|
||||||
|
version := httputils.VersionFromContext(ctx)
|
||||||
|
if versions.GreaterThanOrEqualTo(version, "1.24") {
|
||||||
|
w.WriteHeader(http.StatusNotFound)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if err := httputils.CheckForJSON(r); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg := types.CopyConfig{}
|
||||||
|
if err := json.NewDecoder(r.Body).Decode(&cfg); err != nil {
|
||||||
|
if err == io.EOF {
|
||||||
|
return errdefs.InvalidParameter(errors.New("got EOF while reading request body"))
|
||||||
|
}
|
||||||
|
return errdefs.InvalidParameter(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if cfg.Resource == "" {
|
||||||
|
return pathError{}
|
||||||
|
}
|
||||||
|
|
||||||
|
data, err := s.backend.ContainerCopy(vars["name"], cfg.Resource)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer data.Close()
|
||||||
|
|
||||||
|
w.Header().Set("Content-Type", "application/x-tar")
|
||||||
|
_, err = io.Copy(w, data)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// // Encode the stat to JSON, base64 encode, and place in a header.
|
||||||
func setContainerPathStatHeader(stat *types.ContainerPathStat, header http.Header) error {
|
func setContainerPathStatHeader(stat *types.ContainerPathStat, header http.Header) error {
|
||||||
statJSON, err := json.Marshal(stat)
|
statJSON, err := json.Marshal(stat)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -2,18 +2,19 @@ package container // import "github.com/docker/docker/api/server/router/containe
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
"github.com/containerd/log"
|
|
||||||
"github.com/docker/docker/api/server/httputils"
|
"github.com/docker/docker/api/server/httputils"
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
"github.com/docker/docker/api/types/container"
|
|
||||||
"github.com/docker/docker/api/types/versions"
|
"github.com/docker/docker/api/types/versions"
|
||||||
"github.com/docker/docker/errdefs"
|
"github.com/docker/docker/errdefs"
|
||||||
"github.com/docker/docker/pkg/stdcopy"
|
"github.com/docker/docker/pkg/stdcopy"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (s *containerRouter) getExecByID(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
func (s *containerRouter) getExecByID(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||||
|
@ -37,26 +38,27 @@ func (s *containerRouter) postContainerExecCreate(ctx context.Context, w http.Re
|
||||||
if err := httputils.ParseForm(r); err != nil {
|
if err := httputils.ParseForm(r); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if err := httputils.CheckForJSON(r); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
name := vars["name"]
|
||||||
|
|
||||||
execConfig := &types.ExecConfig{}
|
execConfig := &types.ExecConfig{}
|
||||||
if err := httputils.ReadJSON(r, execConfig); err != nil {
|
if err := json.NewDecoder(r.Body).Decode(execConfig); err != nil {
|
||||||
return err
|
if err == io.EOF {
|
||||||
|
return errdefs.InvalidParameter(errors.New("got EOF while reading request body"))
|
||||||
|
}
|
||||||
|
return errdefs.InvalidParameter(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(execConfig.Cmd) == 0 {
|
if len(execConfig.Cmd) == 0 {
|
||||||
return execCommandError{}
|
return execCommandError{}
|
||||||
}
|
}
|
||||||
|
|
||||||
version := httputils.VersionFromContext(ctx)
|
|
||||||
if versions.LessThan(version, "1.42") {
|
|
||||||
// Not supported by API versions before 1.42
|
|
||||||
execConfig.ConsoleSize = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Register an instance of Exec in container.
|
// Register an instance of Exec in container.
|
||||||
id, err := s.backend.ContainerExecCreate(vars["name"], execConfig)
|
id, err := s.backend.ContainerExecCreate(name, execConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.G(ctx).Errorf("Error setting up exec command in container %s: %v", vars["name"], err)
|
logrus.Errorf("Error setting up exec command in container %s: %v", name, err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -71,6 +73,13 @@ func (s *containerRouter) postContainerExecStart(ctx context.Context, w http.Res
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
version := httputils.VersionFromContext(ctx)
|
||||||
|
if versions.GreaterThan(version, "1.21") {
|
||||||
|
if err := httputils.CheckForJSON(r); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
execName = vars["name"]
|
execName = vars["name"]
|
||||||
stdin, inStream io.ReadCloser
|
stdin, inStream io.ReadCloser
|
||||||
|
@ -78,28 +87,17 @@ func (s *containerRouter) postContainerExecStart(ctx context.Context, w http.Res
|
||||||
)
|
)
|
||||||
|
|
||||||
execStartCheck := &types.ExecStartCheck{}
|
execStartCheck := &types.ExecStartCheck{}
|
||||||
if err := httputils.ReadJSON(r, execStartCheck); err != nil {
|
if err := json.NewDecoder(r.Body).Decode(execStartCheck); err != nil {
|
||||||
return err
|
if err == io.EOF {
|
||||||
|
return errdefs.InvalidParameter(errors.New("got EOF while reading request body"))
|
||||||
|
}
|
||||||
|
return errdefs.InvalidParameter(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if exists, err := s.backend.ExecExists(execName); !exists {
|
if exists, err := s.backend.ExecExists(execName); !exists {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if execStartCheck.ConsoleSize != nil {
|
|
||||||
version := httputils.VersionFromContext(ctx)
|
|
||||||
|
|
||||||
// Not supported before 1.42
|
|
||||||
if versions.LessThan(version, "1.42") {
|
|
||||||
execStartCheck.ConsoleSize = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// No console without tty
|
|
||||||
if !execStartCheck.Tty {
|
|
||||||
execStartCheck.ConsoleSize = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !execStartCheck.Detach {
|
if !execStartCheck.Detach {
|
||||||
var err error
|
var err error
|
||||||
// Setting up the streaming http interface.
|
// Setting up the streaming http interface.
|
||||||
|
@ -110,11 +108,7 @@ func (s *containerRouter) postContainerExecStart(ctx context.Context, w http.Res
|
||||||
defer httputils.CloseStreams(inStream, outStream)
|
defer httputils.CloseStreams(inStream, outStream)
|
||||||
|
|
||||||
if _, ok := r.Header["Upgrade"]; ok {
|
if _, ok := r.Header["Upgrade"]; ok {
|
||||||
contentType := types.MediaTypeRawStream
|
fmt.Fprint(outStream, "HTTP/1.1 101 UPGRADED\r\nContent-Type: application/vnd.docker.raw-stream\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n")
|
||||||
if !execStartCheck.Tty && versions.GreaterThanOrEqualTo(httputils.VersionFromContext(ctx), "1.42") {
|
|
||||||
contentType = types.MediaTypeMultiplexedStream
|
|
||||||
}
|
|
||||||
fmt.Fprint(outStream, "HTTP/1.1 101 UPGRADED\r\nContent-Type: "+contentType+"\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n")
|
|
||||||
} else {
|
} else {
|
||||||
fmt.Fprint(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n")
|
fmt.Fprint(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n")
|
||||||
}
|
}
|
||||||
|
@ -133,21 +127,14 @@ func (s *containerRouter) postContainerExecStart(ctx context.Context, w http.Res
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
options := container.ExecStartOptions{
|
|
||||||
Stdin: stdin,
|
|
||||||
Stdout: stdout,
|
|
||||||
Stderr: stderr,
|
|
||||||
ConsoleSize: execStartCheck.ConsoleSize,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Now run the user process in container.
|
// Now run the user process in container.
|
||||||
// Maybe we should we pass ctx here if we're not detaching?
|
// Maybe we should we pass ctx here if we're not detaching?
|
||||||
if err := s.backend.ContainerExecStart(context.Background(), execName, options); err != nil {
|
if err := s.backend.ContainerExecStart(context.Background(), execName, stdin, stdout, stderr); err != nil {
|
||||||
if execStartCheck.Detach {
|
if execStartCheck.Detach {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
stdout.Write([]byte(err.Error() + "\r\n"))
|
stdout.Write([]byte(err.Error() + "\r\n"))
|
||||||
log.G(ctx).Errorf("Error running exec %s in container: %v", execName, err)
|
logrus.Errorf("Error running exec %s in container: %v", execName, err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,7 +12,7 @@ func (s *containerRouter) getContainersByName(ctx context.Context, w http.Respon
|
||||||
displaySize := httputils.BoolValue(r, "size")
|
displaySize := httputils.BoolValue(r, "size")
|
||||||
|
|
||||||
version := httputils.VersionFromContext(ctx)
|
version := httputils.VersionFromContext(ctx)
|
||||||
json, err := s.backend.ContainerInspect(ctx, vars["name"], displaySize, version)
|
json, err := s.backend.ContainerInspect(vars["name"], displaySize, version)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,13 +3,13 @@ package distribution // import "github.com/docker/docker/api/server/router/distr
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/distribution/reference"
|
|
||||||
"github.com/docker/distribution"
|
"github.com/docker/distribution"
|
||||||
"github.com/docker/docker/api/types/registry"
|
"github.com/docker/distribution/reference"
|
||||||
|
"github.com/docker/docker/api/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Backend is all the methods that need to be implemented
|
// Backend is all the methods that need to be implemented
|
||||||
// to provide image specific functionality.
|
// to provide image specific functionality.
|
||||||
type Backend interface {
|
type Backend interface {
|
||||||
GetRepositories(context.Context, reference.Named, *registry.AuthConfig) ([]distribution.Repository, error)
|
GetRepository(context.Context, reference.Named, *types.AuthConfig) (distribution.Repository, bool, error)
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,20 +2,20 @@ package distribution // import "github.com/docker/docker/api/server/router/distr
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/base64"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"strings"
|
||||||
|
|
||||||
"github.com/distribution/reference"
|
|
||||||
"github.com/docker/distribution"
|
|
||||||
"github.com/docker/distribution/manifest/manifestlist"
|
"github.com/docker/distribution/manifest/manifestlist"
|
||||||
"github.com/docker/distribution/manifest/schema1"
|
"github.com/docker/distribution/manifest/schema1"
|
||||||
"github.com/docker/distribution/manifest/schema2"
|
"github.com/docker/distribution/manifest/schema2"
|
||||||
|
"github.com/docker/distribution/reference"
|
||||||
"github.com/docker/docker/api/server/httputils"
|
"github.com/docker/docker/api/server/httputils"
|
||||||
"github.com/docker/docker/api/types/registry"
|
"github.com/docker/docker/api/types"
|
||||||
distributionpkg "github.com/docker/docker/distribution"
|
registrytypes "github.com/docker/docker/api/types/registry"
|
||||||
"github.com/docker/docker/errdefs"
|
"github.com/docker/docker/errdefs"
|
||||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
"github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -26,10 +26,25 @@ func (s *distributionRouter) getDistributionInfo(ctx context.Context, w http.Res
|
||||||
|
|
||||||
w.Header().Set("Content-Type", "application/json")
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
|
||||||
imgName := vars["name"]
|
var (
|
||||||
|
config = &types.AuthConfig{}
|
||||||
|
authEncoded = r.Header.Get("X-Registry-Auth")
|
||||||
|
distributionInspect registrytypes.DistributionInspect
|
||||||
|
)
|
||||||
|
|
||||||
|
if authEncoded != "" {
|
||||||
|
authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded))
|
||||||
|
if err := json.NewDecoder(authJSON).Decode(&config); err != nil {
|
||||||
|
// for a search it is not an error if no auth was given
|
||||||
|
// to increase compatibility with the existing api it is defaulting to be empty
|
||||||
|
config = &types.AuthConfig{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
image := vars["name"]
|
||||||
|
|
||||||
// TODO why is reference.ParseAnyReference() / reference.ParseNormalizedNamed() not using the reference.ErrTagInvalidFormat (and so on) errors?
|
// TODO why is reference.ParseAnyReference() / reference.ParseNormalizedNamed() not using the reference.ErrTagInvalidFormat (and so on) errors?
|
||||||
ref, err := reference.ParseAnyReference(imgName)
|
ref, err := reference.ParseAnyReference(image)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errdefs.InvalidParameter(err)
|
return errdefs.InvalidParameter(err)
|
||||||
}
|
}
|
||||||
|
@ -39,58 +54,28 @@ func (s *distributionRouter) getDistributionInfo(ctx context.Context, w http.Res
|
||||||
// full image ID
|
// full image ID
|
||||||
return errors.Errorf("no manifest found for full image ID")
|
return errors.Errorf("no manifest found for full image ID")
|
||||||
}
|
}
|
||||||
return errdefs.InvalidParameter(errors.Errorf("unknown image reference format: %s", imgName))
|
return errdefs.InvalidParameter(errors.Errorf("unknown image reference format: %s", image))
|
||||||
}
|
}
|
||||||
|
|
||||||
// For a search it is not an error if no auth was given. Ignore invalid
|
distrepo, _, err := s.backend.GetRepository(ctx, namedRef, config)
|
||||||
// AuthConfig to increase compatibility with the existing API.
|
|
||||||
authConfig, _ := registry.DecodeAuthConfig(r.Header.Get(registry.AuthHeader))
|
|
||||||
repos, err := s.backend.GetRepositories(ctx, namedRef, authConfig)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
blobsrvc := distrepo.Blobs(ctx)
|
||||||
|
|
||||||
// Fetch the manifest; if a mirror is configured, try the mirror first,
|
|
||||||
// but continue with upstream on failure.
|
|
||||||
//
|
|
||||||
// FIXME(thaJeztah): construct "repositories" on-demand;
|
|
||||||
// GetRepositories() will attempt to connect to all endpoints (registries),
|
|
||||||
// but we may only need the first one if it contains the manifest we're
|
|
||||||
// looking for, or if the configured mirror is a pull-through mirror.
|
|
||||||
//
|
|
||||||
// Logic for this could be implemented similar to "distribution.Pull()",
|
|
||||||
// which uses the "pullEndpoints" utility to iterate over the list
|
|
||||||
// of endpoints;
|
|
||||||
//
|
|
||||||
// - https://github.com/moby/moby/blob/12c7411b6b7314bef130cd59f1c7384a7db06d0b/distribution/pull.go#L17-L31
|
|
||||||
// - https://github.com/moby/moby/blob/12c7411b6b7314bef130cd59f1c7384a7db06d0b/distribution/pull.go#L76-L152
|
|
||||||
var lastErr error
|
|
||||||
for _, repo := range repos {
|
|
||||||
distributionInspect, err := s.fetchManifest(ctx, repo, namedRef)
|
|
||||||
if err != nil {
|
|
||||||
lastErr = err
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
return httputils.WriteJSON(w, http.StatusOK, distributionInspect)
|
|
||||||
}
|
|
||||||
return lastErr
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *distributionRouter) fetchManifest(ctx context.Context, distrepo distribution.Repository, namedRef reference.Named) (registry.DistributionInspect, error) {
|
|
||||||
var distributionInspect registry.DistributionInspect
|
|
||||||
if canonicalRef, ok := namedRef.(reference.Canonical); !ok {
|
if canonicalRef, ok := namedRef.(reference.Canonical); !ok {
|
||||||
namedRef = reference.TagNameOnly(namedRef)
|
namedRef = reference.TagNameOnly(namedRef)
|
||||||
|
|
||||||
taggedRef, ok := namedRef.(reference.NamedTagged)
|
taggedRef, ok := namedRef.(reference.NamedTagged)
|
||||||
if !ok {
|
if !ok {
|
||||||
return registry.DistributionInspect{}, errdefs.InvalidParameter(errors.Errorf("image reference not tagged: %s", namedRef))
|
return errdefs.InvalidParameter(errors.Errorf("image reference not tagged: %s", image))
|
||||||
}
|
}
|
||||||
|
|
||||||
descriptor, err := distrepo.Tags(ctx).Get(ctx, taggedRef.Tag())
|
descriptor, err := distrepo.Tags(ctx).Get(ctx, taggedRef.Tag())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return registry.DistributionInspect{}, err
|
return err
|
||||||
}
|
}
|
||||||
distributionInspect.Descriptor = ocispec.Descriptor{
|
distributionInspect.Descriptor = v1.Descriptor{
|
||||||
MediaType: descriptor.MediaType,
|
MediaType: descriptor.MediaType,
|
||||||
Digest: descriptor.Digest,
|
Digest: descriptor.Digest,
|
||||||
Size: descriptor.Size,
|
Size: descriptor.Size,
|
||||||
|
@ -105,7 +90,7 @@ func (s *distributionRouter) fetchManifest(ctx context.Context, distrepo distrib
|
||||||
// we have a digest, so we can retrieve the manifest
|
// we have a digest, so we can retrieve the manifest
|
||||||
mnfstsrvc, err := distrepo.Manifests(ctx)
|
mnfstsrvc, err := distrepo.Manifests(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return registry.DistributionInspect{}, err
|
return err
|
||||||
}
|
}
|
||||||
mnfst, err := mnfstsrvc.Get(ctx, distributionInspect.Descriptor.Digest)
|
mnfst, err := mnfstsrvc.Get(ctx, distributionInspect.Descriptor.Digest)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -117,14 +102,14 @@ func (s *distributionRouter) fetchManifest(ctx context.Context, distrepo distrib
|
||||||
reference.ErrNameEmpty,
|
reference.ErrNameEmpty,
|
||||||
reference.ErrNameTooLong,
|
reference.ErrNameTooLong,
|
||||||
reference.ErrNameNotCanonical:
|
reference.ErrNameNotCanonical:
|
||||||
return registry.DistributionInspect{}, errdefs.InvalidParameter(err)
|
return errdefs.InvalidParameter(err)
|
||||||
}
|
}
|
||||||
return registry.DistributionInspect{}, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
mediaType, payload, err := mnfst.Payload()
|
mediaType, payload, err := mnfst.Payload()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return registry.DistributionInspect{}, err
|
return err
|
||||||
}
|
}
|
||||||
// update MediaType because registry might return something incorrect
|
// update MediaType because registry might return something incorrect
|
||||||
distributionInspect.Descriptor.MediaType = mediaType
|
distributionInspect.Descriptor.MediaType = mediaType
|
||||||
|
@ -136,7 +121,7 @@ func (s *distributionRouter) fetchManifest(ctx context.Context, distrepo distrib
|
||||||
switch mnfstObj := mnfst.(type) {
|
switch mnfstObj := mnfst.(type) {
|
||||||
case *manifestlist.DeserializedManifestList:
|
case *manifestlist.DeserializedManifestList:
|
||||||
for _, m := range mnfstObj.Manifests {
|
for _, m := range mnfstObj.Manifests {
|
||||||
distributionInspect.Platforms = append(distributionInspect.Platforms, ocispec.Platform{
|
distributionInspect.Platforms = append(distributionInspect.Platforms, v1.Platform{
|
||||||
Architecture: m.Platform.Architecture,
|
Architecture: m.Platform.Architecture,
|
||||||
OS: m.Platform.OS,
|
OS: m.Platform.OS,
|
||||||
OSVersion: m.Platform.OSVersion,
|
OSVersion: m.Platform.OSVersion,
|
||||||
|
@ -145,9 +130,8 @@ func (s *distributionRouter) fetchManifest(ctx context.Context, distrepo distrib
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
case *schema2.DeserializedManifest:
|
case *schema2.DeserializedManifest:
|
||||||
blobStore := distrepo.Blobs(ctx)
|
configJSON, err := blobsrvc.Get(ctx, mnfstObj.Config.Digest)
|
||||||
configJSON, err := blobStore.Get(ctx, mnfstObj.Config.Digest)
|
var platform v1.Platform
|
||||||
var platform ocispec.Platform
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
err := json.Unmarshal(configJSON, &platform)
|
err := json.Unmarshal(configJSON, &platform)
|
||||||
if err == nil && (platform.OS != "" || platform.Architecture != "") {
|
if err == nil && (platform.OS != "" || platform.Architecture != "") {
|
||||||
|
@ -155,14 +139,12 @@ func (s *distributionRouter) fetchManifest(ctx context.Context, distrepo distrib
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case *schema1.SignedManifest:
|
case *schema1.SignedManifest:
|
||||||
if os.Getenv("DOCKER_ENABLE_DEPRECATED_PULL_SCHEMA_1_IMAGE") == "" {
|
platform := v1.Platform{
|
||||||
return registry.DistributionInspect{}, distributionpkg.DeprecatedSchema1ImageError(namedRef)
|
|
||||||
}
|
|
||||||
platform := ocispec.Platform{
|
|
||||||
Architecture: mnfstObj.Architecture,
|
Architecture: mnfstObj.Architecture,
|
||||||
OS: "linux",
|
OS: "linux",
|
||||||
}
|
}
|
||||||
distributionInspect.Platforms = append(distributionInspect.Platforms, platform)
|
distributionInspect.Platforms = append(distributionInspect.Platforms, platform)
|
||||||
}
|
}
|
||||||
return distributionInspect, nil
|
|
||||||
|
return httputils.WriteJSON(w, http.StatusOK, distributionInspect)
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,13 +1,7 @@
|
||||||
package grpc // import "github.com/docker/docker/api/server/router/grpc"
|
package grpc // import "github.com/docker/docker/api/server/router/grpc"
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/docker/docker/api/server/router"
|
"github.com/docker/docker/api/server/router"
|
||||||
grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
|
|
||||||
"github.com/moby/buildkit/util/grpcerrors"
|
|
||||||
"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
|
|
||||||
"golang.org/x/net/http2"
|
"golang.org/x/net/http2"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
)
|
)
|
||||||
|
@ -20,12 +14,9 @@ type grpcRouter struct {
|
||||||
|
|
||||||
// NewRouter initializes a new grpc http router
|
// NewRouter initializes a new grpc http router
|
||||||
func NewRouter(backends ...Backend) router.Router {
|
func NewRouter(backends ...Backend) router.Router {
|
||||||
unary := grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(unaryInterceptor(), grpcerrors.UnaryServerInterceptor))
|
|
||||||
stream := grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(otelgrpc.StreamServerInterceptor(), grpcerrors.StreamServerInterceptor)) //nolint:staticcheck // TODO(thaJeztah): ignore SA1019 for deprecated options: see https://github.com/moby/moby/issues/47437
|
|
||||||
|
|
||||||
r := &grpcRouter{
|
r := &grpcRouter{
|
||||||
h2Server: &http2.Server{},
|
h2Server: &http2.Server{},
|
||||||
grpcServer: grpc.NewServer(unary, stream),
|
grpcServer: grpc.NewServer(),
|
||||||
}
|
}
|
||||||
for _, b := range backends {
|
for _, b := range backends {
|
||||||
b.RegisterGRPC(r.grpcServer)
|
b.RegisterGRPC(r.grpcServer)
|
||||||
|
@ -35,26 +26,12 @@ func NewRouter(backends ...Backend) router.Router {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Routes returns the available routers to the session controller
|
// Routes returns the available routers to the session controller
|
||||||
func (gr *grpcRouter) Routes() []router.Route {
|
func (r *grpcRouter) Routes() []router.Route {
|
||||||
return gr.routes
|
return r.routes
|
||||||
}
|
}
|
||||||
|
|
||||||
func (gr *grpcRouter) initRoutes() {
|
func (r *grpcRouter) initRoutes() {
|
||||||
gr.routes = []router.Route{
|
r.routes = []router.Route{
|
||||||
router.NewPostRoute("/grpc", gr.serveGRPC),
|
router.NewPostRoute("/grpc", r.serveGRPC),
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func unaryInterceptor() grpc.UnaryServerInterceptor {
|
|
||||||
withTrace := otelgrpc.UnaryServerInterceptor() //nolint:staticcheck // TODO(thaJeztah): ignore SA1019 for deprecated options: see https://github.com/moby/moby/issues/47437
|
|
||||||
|
|
||||||
return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) {
|
|
||||||
// This method is used by the clients to send their traces to buildkit so they can be included
|
|
||||||
// in the daemon trace and stored in the build history record. This method can not be traced because
|
|
||||||
// it would cause an infinite loop.
|
|
||||||
if strings.HasSuffix(info.FullMethod, "opentelemetry.proto.collector.trace.v1.TraceService/Export") {
|
|
||||||
return handler(ctx, req)
|
|
||||||
}
|
|
||||||
return withTrace(ctx, req, info, handler)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,14 +4,11 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"io"
|
"io"
|
||||||
|
|
||||||
"github.com/distribution/reference"
|
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
"github.com/docker/docker/api/types/backend"
|
|
||||||
"github.com/docker/docker/api/types/filters"
|
"github.com/docker/docker/api/types/filters"
|
||||||
"github.com/docker/docker/api/types/image"
|
"github.com/docker/docker/api/types/image"
|
||||||
"github.com/docker/docker/api/types/registry"
|
"github.com/docker/docker/api/types/registry"
|
||||||
dockerimage "github.com/docker/docker/image"
|
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Backend is all the methods that need to be implemented
|
// Backend is all the methods that need to be implemented
|
||||||
|
@ -23,25 +20,22 @@ type Backend interface {
|
||||||
}
|
}
|
||||||
|
|
||||||
type imageBackend interface {
|
type imageBackend interface {
|
||||||
ImageDelete(ctx context.Context, imageRef string, force, prune bool) ([]image.DeleteResponse, error)
|
ImageDelete(imageRef string, force, prune bool) ([]types.ImageDeleteResponseItem, error)
|
||||||
ImageHistory(ctx context.Context, imageName string) ([]*image.HistoryResponseItem, error)
|
ImageHistory(imageName string) ([]*image.HistoryResponseItem, error)
|
||||||
Images(ctx context.Context, opts image.ListOptions) ([]*image.Summary, error)
|
Images(imageFilters filters.Args, all bool, withExtraAttrs bool) ([]*types.ImageSummary, error)
|
||||||
GetImage(ctx context.Context, refOrID string, options backend.GetImageOpts) (*dockerimage.Image, error)
|
LookupImage(name string) (*types.ImageInspect, error)
|
||||||
TagImage(ctx context.Context, id dockerimage.ID, newRef reference.Named) error
|
TagImage(imageName, repository, tag string) (string, error)
|
||||||
ImagesPrune(ctx context.Context, pruneFilters filters.Args) (*types.ImagesPruneReport, error)
|
ImagesPrune(ctx context.Context, pruneFilters filters.Args) (*types.ImagesPruneReport, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type importExportBackend interface {
|
type importExportBackend interface {
|
||||||
LoadImage(ctx context.Context, inTar io.ReadCloser, outStream io.Writer, quiet bool) error
|
LoadImage(inTar io.ReadCloser, outStream io.Writer, quiet bool) error
|
||||||
ImportImage(ctx context.Context, ref reference.Named, platform *ocispec.Platform, msg string, layerReader io.Reader, changes []string) (dockerimage.ID, error)
|
ImportImage(src string, repository, platform string, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, changes []string) error
|
||||||
ExportImage(ctx context.Context, names []string, outStream io.Writer) error
|
ExportImage(names []string, outStream io.Writer) error
|
||||||
}
|
}
|
||||||
|
|
||||||
type registryBackend interface {
|
type registryBackend interface {
|
||||||
PullImage(ctx context.Context, ref reference.Named, platform *ocispec.Platform, metaHeaders map[string][]string, authConfig *registry.AuthConfig, outStream io.Writer) error
|
PullImage(ctx context.Context, image, tag string, platform *specs.Platform, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error
|
||||||
PushImage(ctx context.Context, ref reference.Named, metaHeaders map[string][]string, authConfig *registry.AuthConfig, outStream io.Writer) error
|
PushImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error
|
||||||
}
|
SearchRegistryForImages(ctx context.Context, filtersArgs string, term string, limit int, authConfig *types.AuthConfig, metaHeaders map[string][]string) (*registry.SearchResults, error)
|
||||||
|
|
||||||
type Searcher interface {
|
|
||||||
Search(ctx context.Context, searchFilters filters.Args, term string, limit int, authConfig *registry.AuthConfig, headers map[string][]string) ([]registry.SearchResult, error)
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,56 +2,43 @@ package image // import "github.com/docker/docker/api/server/router/image"
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/docker/docker/api/server/router"
|
"github.com/docker/docker/api/server/router"
|
||||||
"github.com/docker/docker/image"
|
|
||||||
"github.com/docker/docker/layer"
|
|
||||||
"github.com/docker/docker/reference"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// imageRouter is a router to talk with the image controller
|
// imageRouter is a router to talk with the image controller
|
||||||
type imageRouter struct {
|
type imageRouter struct {
|
||||||
backend Backend
|
backend Backend
|
||||||
searcher Searcher
|
|
||||||
referenceBackend reference.Store
|
|
||||||
imageStore image.Store
|
|
||||||
layerStore layer.Store
|
|
||||||
routes []router.Route
|
routes []router.Route
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewRouter initializes a new image router
|
// NewRouter initializes a new image router
|
||||||
func NewRouter(backend Backend, searcher Searcher, referenceBackend reference.Store, imageStore image.Store, layerStore layer.Store) router.Router {
|
func NewRouter(backend Backend) router.Router {
|
||||||
ir := &imageRouter{
|
r := &imageRouter{backend: backend}
|
||||||
backend: backend,
|
r.initRoutes()
|
||||||
searcher: searcher,
|
return r
|
||||||
referenceBackend: referenceBackend,
|
|
||||||
imageStore: imageStore,
|
|
||||||
layerStore: layerStore,
|
|
||||||
}
|
|
||||||
ir.initRoutes()
|
|
||||||
return ir
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Routes returns the available routes to the image controller
|
// Routes returns the available routes to the image controller
|
||||||
func (ir *imageRouter) Routes() []router.Route {
|
func (r *imageRouter) Routes() []router.Route {
|
||||||
return ir.routes
|
return r.routes
|
||||||
}
|
}
|
||||||
|
|
||||||
// initRoutes initializes the routes in the image router
|
// initRoutes initializes the routes in the image router
|
||||||
func (ir *imageRouter) initRoutes() {
|
func (r *imageRouter) initRoutes() {
|
||||||
ir.routes = []router.Route{
|
r.routes = []router.Route{
|
||||||
// GET
|
// GET
|
||||||
router.NewGetRoute("/images/json", ir.getImagesJSON),
|
router.NewGetRoute("/images/json", r.getImagesJSON),
|
||||||
router.NewGetRoute("/images/search", ir.getImagesSearch),
|
router.NewGetRoute("/images/search", r.getImagesSearch),
|
||||||
router.NewGetRoute("/images/get", ir.getImagesGet),
|
router.NewGetRoute("/images/get", r.getImagesGet),
|
||||||
router.NewGetRoute("/images/{name:.*}/get", ir.getImagesGet),
|
router.NewGetRoute("/images/{name:.*}/get", r.getImagesGet),
|
||||||
router.NewGetRoute("/images/{name:.*}/history", ir.getImagesHistory),
|
router.NewGetRoute("/images/{name:.*}/history", r.getImagesHistory),
|
||||||
router.NewGetRoute("/images/{name:.*}/json", ir.getImagesByName),
|
router.NewGetRoute("/images/{name:.*}/json", r.getImagesByName),
|
||||||
// POST
|
// POST
|
||||||
router.NewPostRoute("/images/load", ir.postImagesLoad),
|
router.NewPostRoute("/images/load", r.postImagesLoad),
|
||||||
router.NewPostRoute("/images/create", ir.postImagesCreate),
|
router.NewPostRoute("/images/create", r.postImagesCreate),
|
||||||
router.NewPostRoute("/images/{name:.*}/push", ir.postImagesPush),
|
router.NewPostRoute("/images/{name:.*}/push", r.postImagesPush),
|
||||||
router.NewPostRoute("/images/{name:.*}/tag", ir.postImagesTag),
|
router.NewPostRoute("/images/{name:.*}/tag", r.postImagesTag),
|
||||||
router.NewPostRoute("/images/prune", ir.postImagesPrune),
|
router.NewPostRoute("/images/prune", r.postImagesPrune),
|
||||||
// DELETE
|
// DELETE
|
||||||
router.NewDeleteRoute("/images/{name:.*}", ir.deleteImages),
|
router.NewDeleteRoute("/images/{name:.*}", r.deleteImages),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,50 +2,41 @@ package image // import "github.com/docker/docker/api/server/router/image"
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"encoding/base64"
|
||||||
"io"
|
"encoding/json"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/containerd/containerd/platforms"
|
"github.com/containerd/containerd/platforms"
|
||||||
"github.com/distribution/reference"
|
|
||||||
"github.com/docker/docker/api"
|
|
||||||
"github.com/docker/docker/api/server/httputils"
|
"github.com/docker/docker/api/server/httputils"
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
"github.com/docker/docker/api/types/backend"
|
|
||||||
"github.com/docker/docker/api/types/filters"
|
"github.com/docker/docker/api/types/filters"
|
||||||
imagetypes "github.com/docker/docker/api/types/image"
|
|
||||||
"github.com/docker/docker/api/types/registry"
|
|
||||||
"github.com/docker/docker/api/types/versions"
|
"github.com/docker/docker/api/types/versions"
|
||||||
"github.com/docker/docker/builder/remotecontext"
|
|
||||||
"github.com/docker/docker/dockerversion"
|
|
||||||
"github.com/docker/docker/errdefs"
|
"github.com/docker/docker/errdefs"
|
||||||
"github.com/docker/docker/image"
|
|
||||||
"github.com/docker/docker/pkg/ioutils"
|
"github.com/docker/docker/pkg/ioutils"
|
||||||
"github.com/docker/docker/pkg/progress"
|
|
||||||
"github.com/docker/docker/pkg/streamformatter"
|
"github.com/docker/docker/pkg/streamformatter"
|
||||||
"github.com/opencontainers/go-digest"
|
"github.com/docker/docker/pkg/system"
|
||||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
"github.com/docker/docker/registry"
|
||||||
|
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Creates an image from Pull or from Import
|
// Creates an image from Pull or from Import
|
||||||
func (ir *imageRouter) postImagesCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
func (s *imageRouter) postImagesCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||||
|
|
||||||
if err := httputils.ParseForm(r); err != nil {
|
if err := httputils.ParseForm(r); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
img = r.Form.Get("fromImage")
|
image = r.Form.Get("fromImage")
|
||||||
repo = r.Form.Get("repo")
|
repo = r.Form.Get("repo")
|
||||||
tag = r.Form.Get("tag")
|
tag = r.Form.Get("tag")
|
||||||
comment = r.Form.Get("message")
|
message = r.Form.Get("message")
|
||||||
progressErr error
|
err error
|
||||||
output = ioutils.NewWriteFlusher(w)
|
output = ioutils.NewWriteFlusher(w)
|
||||||
platform *ocispec.Platform
|
platform *specs.Platform
|
||||||
)
|
)
|
||||||
defer output.Close()
|
defer output.Close()
|
||||||
|
|
||||||
|
@ -53,16 +44,20 @@ func (ir *imageRouter) postImagesCreate(ctx context.Context, w http.ResponseWrit
|
||||||
|
|
||||||
version := httputils.VersionFromContext(ctx)
|
version := httputils.VersionFromContext(ctx)
|
||||||
if versions.GreaterThanOrEqualTo(version, "1.32") {
|
if versions.GreaterThanOrEqualTo(version, "1.32") {
|
||||||
if p := r.FormValue("platform"); p != "" {
|
apiPlatform := r.FormValue("platform")
|
||||||
sp, err := platforms.Parse(p)
|
if apiPlatform != "" {
|
||||||
|
sp, err := platforms.Parse(apiPlatform)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if err := system.ValidatePlatform(sp); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
platform = &sp
|
platform = &sp
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if img != "" { // pull
|
if image != "" { //pull
|
||||||
metaHeaders := map[string][]string{}
|
metaHeaders := map[string][]string{}
|
||||||
for k, v := range r.Header {
|
for k, v := range r.Header {
|
||||||
if strings.HasPrefix(k, "X-Meta-") {
|
if strings.HasPrefix(k, "X-Meta-") {
|
||||||
|
@ -70,92 +65,39 @@ func (ir *imageRouter) postImagesCreate(ctx context.Context, w http.ResponseWrit
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Special case: "pull -a" may send an image name with a
|
authEncoded := r.Header.Get("X-Registry-Auth")
|
||||||
// trailing :. This is ugly, but let's not break API
|
authConfig := &types.AuthConfig{}
|
||||||
// compatibility.
|
if authEncoded != "" {
|
||||||
imgName := strings.TrimSuffix(img, ":")
|
authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded))
|
||||||
|
if err := json.NewDecoder(authJSON).Decode(authConfig); err != nil {
|
||||||
ref, err := reference.ParseNormalizedNamed(imgName)
|
// for a pull it is not an error if no auth was given
|
||||||
if err != nil {
|
// to increase compatibility with the existing api it is defaulting to be empty
|
||||||
return errdefs.InvalidParameter(err)
|
authConfig = &types.AuthConfig{}
|
||||||
}
|
|
||||||
|
|
||||||
// TODO(thaJeztah) this could use a WithTagOrDigest() utility
|
|
||||||
if tag != "" {
|
|
||||||
// The "tag" could actually be a digest.
|
|
||||||
var dgst digest.Digest
|
|
||||||
dgst, err = digest.Parse(tag)
|
|
||||||
if err == nil {
|
|
||||||
ref, err = reference.WithDigest(reference.TrimNamed(ref), dgst)
|
|
||||||
} else {
|
|
||||||
ref, err = reference.WithTag(ref, tag)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return errdefs.InvalidParameter(err)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
err = s.backend.PullImage(ctx, image, tag, platform, metaHeaders, authConfig, output)
|
||||||
if err := validateRepoName(ref); err != nil {
|
|
||||||
return errdefs.Forbidden(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// For a pull it is not an error if no auth was given. Ignore invalid
|
|
||||||
// AuthConfig to increase compatibility with the existing API.
|
|
||||||
authConfig, _ := registry.DecodeAuthConfig(r.Header.Get(registry.AuthHeader))
|
|
||||||
progressErr = ir.backend.PullImage(ctx, ref, platform, metaHeaders, authConfig, output)
|
|
||||||
} else { //import
|
} else { //import
|
||||||
src := r.Form.Get("fromSrc")
|
src := r.Form.Get("fromSrc")
|
||||||
|
// 'err' MUST NOT be defined within this block, we need any error
|
||||||
tagRef, err := httputils.RepoTagReference(repo, tag)
|
// generated from the download to be available to the output
|
||||||
if err != nil {
|
// stream processing below
|
||||||
return errdefs.InvalidParameter(err)
|
os := ""
|
||||||
}
|
if platform != nil {
|
||||||
|
os = platform.OS
|
||||||
if len(comment) == 0 {
|
}
|
||||||
comment = "Imported from " + src
|
err = s.backend.ImportImage(src, repo, os, tag, message, r.Body, output, r.Form["changes"])
|
||||||
}
|
}
|
||||||
|
|
||||||
var layerReader io.ReadCloser
|
|
||||||
defer r.Body.Close()
|
|
||||||
if src == "-" {
|
|
||||||
layerReader = r.Body
|
|
||||||
} else {
|
|
||||||
if len(strings.Split(src, "://")) == 1 {
|
|
||||||
src = "http://" + src
|
|
||||||
}
|
|
||||||
u, err := url.Parse(src)
|
|
||||||
if err != nil {
|
|
||||||
return errdefs.InvalidParameter(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
resp, err := remotecontext.GetWithStatusError(u.String())
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if !output.Flushed() {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
output.Write(streamformatter.FormatStatus("", "Downloading from %s", u))
|
output.Write(streamformatter.FormatError(err))
|
||||||
progressOutput := streamformatter.NewJSONProgressOutput(output, true)
|
|
||||||
layerReader = progress.NewProgressReader(resp.Body, progressOutput, resp.ContentLength, "", "Importing")
|
|
||||||
defer layerReader.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
var id image.ID
|
|
||||||
id, progressErr = ir.backend.ImportImage(ctx, tagRef, platform, comment, layerReader, r.Form["changes"])
|
|
||||||
|
|
||||||
if progressErr == nil {
|
|
||||||
output.Write(streamformatter.FormatStatus("", id.String()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if progressErr != nil {
|
|
||||||
if !output.Flushed() {
|
|
||||||
return progressErr
|
|
||||||
}
|
|
||||||
_, _ = output.Write(streamformatter.FormatError(progressErr))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ir *imageRouter) postImagesPush(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
func (s *imageRouter) postImagesPush(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||||
metaHeaders := map[string][]string{}
|
metaHeaders := map[string][]string{}
|
||||||
for k, v := range r.Header {
|
for k, v := range r.Header {
|
||||||
if strings.HasPrefix(k, "X-Meta-") {
|
if strings.HasPrefix(k, "X-Meta-") {
|
||||||
|
@ -165,56 +107,41 @@ func (ir *imageRouter) postImagesPush(ctx context.Context, w http.ResponseWriter
|
||||||
if err := httputils.ParseForm(r); err != nil {
|
if err := httputils.ParseForm(r); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
authConfig := &types.AuthConfig{}
|
||||||
|
|
||||||
var authConfig *registry.AuthConfig
|
authEncoded := r.Header.Get("X-Registry-Auth")
|
||||||
if authEncoded := r.Header.Get(registry.AuthHeader); authEncoded != "" {
|
if authEncoded != "" {
|
||||||
// the new format is to handle the authConfig as a header. Ignore invalid
|
// the new format is to handle the authConfig as a header
|
||||||
// AuthConfig to increase compatibility with the existing API.
|
authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded))
|
||||||
authConfig, _ = registry.DecodeAuthConfig(authEncoded)
|
if err := json.NewDecoder(authJSON).Decode(authConfig); err != nil {
|
||||||
|
// to increase compatibility to existing api it is defaulting to be empty
|
||||||
|
authConfig = &types.AuthConfig{}
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
// the old format is supported for compatibility if there was no authConfig header
|
// the old format is supported for compatibility if there was no authConfig header
|
||||||
var err error
|
if err := json.NewDecoder(r.Body).Decode(authConfig); err != nil {
|
||||||
authConfig, err = registry.DecodeAuthConfigBody(r.Body)
|
return errors.Wrap(errdefs.InvalidParameter(err), "Bad parameters and missing X-Registry-Auth")
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "bad parameters and missing X-Registry-Auth")
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
image := vars["name"]
|
||||||
|
tag := r.Form.Get("tag")
|
||||||
|
|
||||||
output := ioutils.NewWriteFlusher(w)
|
output := ioutils.NewWriteFlusher(w)
|
||||||
defer output.Close()
|
defer output.Close()
|
||||||
|
|
||||||
w.Header().Set("Content-Type", "application/json")
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
|
||||||
img := vars["name"]
|
if err := s.backend.PushImage(ctx, image, tag, metaHeaders, authConfig, output); err != nil {
|
||||||
tag := r.Form.Get("tag")
|
|
||||||
|
|
||||||
var ref reference.Named
|
|
||||||
|
|
||||||
// Tag is empty only in case PushOptions.All is true.
|
|
||||||
if tag != "" {
|
|
||||||
r, err := httputils.RepoTagReference(img, tag)
|
|
||||||
if err != nil {
|
|
||||||
return errdefs.InvalidParameter(err)
|
|
||||||
}
|
|
||||||
ref = r
|
|
||||||
} else {
|
|
||||||
r, err := reference.ParseNormalizedNamed(img)
|
|
||||||
if err != nil {
|
|
||||||
return errdefs.InvalidParameter(err)
|
|
||||||
}
|
|
||||||
ref = r
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := ir.backend.PushImage(ctx, ref, metaHeaders, authConfig, output); err != nil {
|
|
||||||
if !output.Flushed() {
|
if !output.Flushed() {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
_, _ = output.Write(streamformatter.FormatError(err))
|
output.Write(streamformatter.FormatError(err))
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ir *imageRouter) getImagesGet(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
func (s *imageRouter) getImagesGet(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||||
if err := httputils.ParseForm(r); err != nil {
|
if err := httputils.ParseForm(r); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -230,16 +157,16 @@ func (ir *imageRouter) getImagesGet(ctx context.Context, w http.ResponseWriter,
|
||||||
names = r.Form["names"]
|
names = r.Form["names"]
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := ir.backend.ExportImage(ctx, names, output); err != nil {
|
if err := s.backend.ExportImage(names, output); err != nil {
|
||||||
if !output.Flushed() {
|
if !output.Flushed() {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
_, _ = output.Write(streamformatter.FormatError(err))
|
output.Write(streamformatter.FormatError(err))
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ir *imageRouter) postImagesLoad(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
func (s *imageRouter) postImagesLoad(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||||
if err := httputils.ParseForm(r); err != nil {
|
if err := httputils.ParseForm(r); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -249,8 +176,8 @@ func (ir *imageRouter) postImagesLoad(ctx context.Context, w http.ResponseWriter
|
||||||
|
|
||||||
output := ioutils.NewWriteFlusher(w)
|
output := ioutils.NewWriteFlusher(w)
|
||||||
defer output.Close()
|
defer output.Close()
|
||||||
if err := ir.backend.LoadImage(ctx, r.Body, output, quiet); err != nil {
|
if err := s.backend.LoadImage(r.Body, output, quiet); err != nil {
|
||||||
_, _ = output.Write(streamformatter.FormatError(err))
|
output.Write(streamformatter.FormatError(err))
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -263,7 +190,7 @@ func (missingImageError) Error() string {
|
||||||
|
|
||||||
func (missingImageError) InvalidParameter() {}
|
func (missingImageError) InvalidParameter() {}
|
||||||
|
|
||||||
func (ir *imageRouter) deleteImages(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
func (s *imageRouter) deleteImages(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||||
if err := httputils.ParseForm(r); err != nil {
|
if err := httputils.ParseForm(r); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -277,7 +204,7 @@ func (ir *imageRouter) deleteImages(ctx context.Context, w http.ResponseWriter,
|
||||||
force := httputils.BoolValue(r, "force")
|
force := httputils.BoolValue(r, "force")
|
||||||
prune := !httputils.BoolValue(r, "noprune")
|
prune := !httputils.BoolValue(r, "noprune")
|
||||||
|
|
||||||
list, err := ir.backend.ImageDelete(ctx, name, force, prune)
|
list, err := s.backend.ImageDelete(name, force, prune)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -285,103 +212,16 @@ func (ir *imageRouter) deleteImages(ctx context.Context, w http.ResponseWriter,
|
||||||
return httputils.WriteJSON(w, http.StatusOK, list)
|
return httputils.WriteJSON(w, http.StatusOK, list)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ir *imageRouter) getImagesByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
func (s *imageRouter) getImagesByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||||
img, err := ir.backend.GetImage(ctx, vars["name"], backend.GetImageOpts{Details: true})
|
imageInspect, err := s.backend.LookupImage(vars["name"])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
imageInspect, err := ir.toImageInspect(img)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
version := httputils.VersionFromContext(ctx)
|
|
||||||
if versions.LessThan(version, "1.44") {
|
|
||||||
imageInspect.VirtualSize = imageInspect.Size //nolint:staticcheck // ignore SA1019: field is deprecated, but still set on API < v1.44.
|
|
||||||
|
|
||||||
if imageInspect.Created == "" {
|
|
||||||
// backwards compatibility for Created not existing returning "0001-01-01T00:00:00Z"
|
|
||||||
// https://github.com/moby/moby/issues/47368
|
|
||||||
imageInspect.Created = time.Time{}.Format(time.RFC3339Nano)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if versions.GreaterThanOrEqualTo(version, "1.45") {
|
|
||||||
imageInspect.Container = "" //nolint:staticcheck // ignore SA1019: field is deprecated, but still set on API < v1.45.
|
|
||||||
imageInspect.ContainerConfig = nil //nolint:staticcheck // ignore SA1019: field is deprecated, but still set on API < v1.45.
|
|
||||||
}
|
|
||||||
return httputils.WriteJSON(w, http.StatusOK, imageInspect)
|
return httputils.WriteJSON(w, http.StatusOK, imageInspect)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ir *imageRouter) toImageInspect(img *image.Image) (*types.ImageInspect, error) {
|
func (s *imageRouter) getImagesJSON(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||||
var repoTags, repoDigests []string
|
|
||||||
for _, ref := range img.Details.References {
|
|
||||||
switch ref.(type) {
|
|
||||||
case reference.NamedTagged:
|
|
||||||
repoTags = append(repoTags, reference.FamiliarString(ref))
|
|
||||||
case reference.Canonical:
|
|
||||||
repoDigests = append(repoDigests, reference.FamiliarString(ref))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
comment := img.Comment
|
|
||||||
if len(comment) == 0 && len(img.History) > 0 {
|
|
||||||
comment = img.History[len(img.History)-1].Comment
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make sure we output empty arrays instead of nil.
|
|
||||||
if repoTags == nil {
|
|
||||||
repoTags = []string{}
|
|
||||||
}
|
|
||||||
if repoDigests == nil {
|
|
||||||
repoDigests = []string{}
|
|
||||||
}
|
|
||||||
|
|
||||||
var created string
|
|
||||||
if img.Created != nil {
|
|
||||||
created = img.Created.Format(time.RFC3339Nano)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &types.ImageInspect{
|
|
||||||
ID: img.ID().String(),
|
|
||||||
RepoTags: repoTags,
|
|
||||||
RepoDigests: repoDigests,
|
|
||||||
Parent: img.Parent.String(),
|
|
||||||
Comment: comment,
|
|
||||||
Created: created,
|
|
||||||
Container: img.Container, //nolint:staticcheck // ignore SA1019: field is deprecated, but still set on API < v1.45.
|
|
||||||
ContainerConfig: &img.ContainerConfig, //nolint:staticcheck // ignore SA1019: field is deprecated, but still set on API < v1.45.
|
|
||||||
DockerVersion: img.DockerVersion,
|
|
||||||
Author: img.Author,
|
|
||||||
Config: img.Config,
|
|
||||||
Architecture: img.Architecture,
|
|
||||||
Variant: img.Variant,
|
|
||||||
Os: img.OperatingSystem(),
|
|
||||||
OsVersion: img.OSVersion,
|
|
||||||
Size: img.Details.Size,
|
|
||||||
GraphDriver: types.GraphDriverData{
|
|
||||||
Name: img.Details.Driver,
|
|
||||||
Data: img.Details.Metadata,
|
|
||||||
},
|
|
||||||
RootFS: rootFSToAPIType(img.RootFS),
|
|
||||||
Metadata: imagetypes.Metadata{
|
|
||||||
LastTagTime: img.Details.LastUpdated,
|
|
||||||
},
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func rootFSToAPIType(rootfs *image.RootFS) types.RootFS {
|
|
||||||
var layers []string
|
|
||||||
for _, l := range rootfs.DiffIDs {
|
|
||||||
layers = append(layers, l.String())
|
|
||||||
}
|
|
||||||
return types.RootFS{
|
|
||||||
Type: rootfs.Type,
|
|
||||||
Layers: layers,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ir *imageRouter) getImagesJSON(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
|
||||||
if err := httputils.ParseForm(r); err != nil {
|
if err := httputils.ParseForm(r); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -391,56 +231,23 @@ func (ir *imageRouter) getImagesJSON(ctx context.Context, w http.ResponseWriter,
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
version := httputils.VersionFromContext(ctx)
|
|
||||||
if versions.LessThan(version, "1.41") {
|
|
||||||
// NOTE: filter is a shell glob string applied to repository names.
|
|
||||||
filterParam := r.Form.Get("filter")
|
filterParam := r.Form.Get("filter")
|
||||||
|
// FIXME(vdemeester) This has been deprecated in 1.13, and is target for removal for v17.12
|
||||||
if filterParam != "" {
|
if filterParam != "" {
|
||||||
imageFilters.Add("reference", filterParam)
|
imageFilters.Add("reference", filterParam)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
var sharedSize bool
|
images, err := s.backend.Images(imageFilters, httputils.BoolValue(r, "all"), false)
|
||||||
if versions.GreaterThanOrEqualTo(version, "1.42") {
|
|
||||||
// NOTE: Support for the "shared-size" parameter was added in API 1.42.
|
|
||||||
sharedSize = httputils.BoolValue(r, "shared-size")
|
|
||||||
}
|
|
||||||
|
|
||||||
images, err := ir.backend.Images(ctx, imagetypes.ListOptions{
|
|
||||||
All: httputils.BoolValue(r, "all"),
|
|
||||||
Filters: imageFilters,
|
|
||||||
SharedSize: sharedSize,
|
|
||||||
})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
useNone := versions.LessThan(version, "1.43")
|
|
||||||
withVirtualSize := versions.LessThan(version, "1.44")
|
|
||||||
for _, img := range images {
|
|
||||||
if useNone {
|
|
||||||
if len(img.RepoTags) == 0 && len(img.RepoDigests) == 0 {
|
|
||||||
img.RepoTags = append(img.RepoTags, "<none>:<none>")
|
|
||||||
img.RepoDigests = append(img.RepoDigests, "<none>@<none>")
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if img.RepoTags == nil {
|
|
||||||
img.RepoTags = []string{}
|
|
||||||
}
|
|
||||||
if img.RepoDigests == nil {
|
|
||||||
img.RepoDigests = []string{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if withVirtualSize {
|
|
||||||
img.VirtualSize = img.Size //nolint:staticcheck // ignore SA1019: field is deprecated, but still set on API < v1.44.
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return httputils.WriteJSON(w, http.StatusOK, images)
|
return httputils.WriteJSON(w, http.StatusOK, images)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ir *imageRouter) getImagesHistory(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
func (s *imageRouter) getImagesHistory(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||||
history, err := ir.backend.ImageHistory(ctx, vars["name"])
|
name := vars["name"]
|
||||||
|
history, err := s.backend.ImageHistory(name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -448,71 +255,56 @@ func (ir *imageRouter) getImagesHistory(ctx context.Context, w http.ResponseWrit
|
||||||
return httputils.WriteJSON(w, http.StatusOK, history)
|
return httputils.WriteJSON(w, http.StatusOK, history)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ir *imageRouter) postImagesTag(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
func (s *imageRouter) postImagesTag(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||||
if err := httputils.ParseForm(r); err != nil {
|
if err := httputils.ParseForm(r); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if _, err := s.backend.TagImage(vars["name"], r.Form.Get("repo"), r.Form.Get("tag")); err != nil {
|
||||||
ref, err := httputils.RepoTagReference(r.Form.Get("repo"), r.Form.Get("tag"))
|
|
||||||
if ref == nil || err != nil {
|
|
||||||
return errdefs.InvalidParameter(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
refName := reference.FamiliarName(ref)
|
|
||||||
if refName == string(digest.Canonical) {
|
|
||||||
return errdefs.InvalidParameter(errors.New("refusing to create an ambiguous tag using digest algorithm as name"))
|
|
||||||
}
|
|
||||||
|
|
||||||
img, err := ir.backend.GetImage(ctx, vars["name"], backend.GetImageOpts{})
|
|
||||||
if err != nil {
|
|
||||||
return errdefs.NotFound(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := ir.backend.TagImage(ctx, img.ID(), ref); err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
w.WriteHeader(http.StatusCreated)
|
w.WriteHeader(http.StatusCreated)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ir *imageRouter) getImagesSearch(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
func (s *imageRouter) getImagesSearch(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||||
if err := httputils.ParseForm(r); err != nil {
|
if err := httputils.ParseForm(r); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
var (
|
||||||
|
config *types.AuthConfig
|
||||||
|
authEncoded = r.Header.Get("X-Registry-Auth")
|
||||||
|
headers = map[string][]string{}
|
||||||
|
)
|
||||||
|
|
||||||
var limit int
|
if authEncoded != "" {
|
||||||
if r.Form.Get("limit") != "" {
|
authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded))
|
||||||
var err error
|
if err := json.NewDecoder(authJSON).Decode(&config); err != nil {
|
||||||
limit, err = strconv.Atoi(r.Form.Get("limit"))
|
// for a search it is not an error if no auth was given
|
||||||
if err != nil || limit < 0 {
|
// to increase compatibility with the existing api it is defaulting to be empty
|
||||||
return errdefs.InvalidParameter(errors.Wrap(err, "invalid limit specified"))
|
config = &types.AuthConfig{}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
searchFilters, err := filters.FromJSON(r.Form.Get("filters"))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// For a search it is not an error if no auth was given. Ignore invalid
|
|
||||||
// AuthConfig to increase compatibility with the existing API.
|
|
||||||
authConfig, _ := registry.DecodeAuthConfig(r.Header.Get(registry.AuthHeader))
|
|
||||||
|
|
||||||
headers := http.Header{}
|
|
||||||
for k, v := range r.Header {
|
for k, v := range r.Header {
|
||||||
k = http.CanonicalHeaderKey(k)
|
|
||||||
if strings.HasPrefix(k, "X-Meta-") {
|
if strings.HasPrefix(k, "X-Meta-") {
|
||||||
headers[k] = v
|
headers[k] = v
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
headers.Set("User-Agent", dockerversion.DockerUserAgent(ctx))
|
limit := registry.DefaultSearchLimit
|
||||||
res, err := ir.searcher.Search(ctx, searchFilters, r.Form.Get("term"), limit, authConfig, headers)
|
if r.Form.Get("limit") != "" {
|
||||||
|
limitValue, err := strconv.Atoi(r.Form.Get("limit"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return httputils.WriteJSON(w, http.StatusOK, res)
|
limit = limitValue
|
||||||
|
}
|
||||||
|
query, err := s.backend.SearchRegistryForImages(ctx, r.Form.Get("filters"), r.Form.Get("term"), limit, config, headers)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return httputils.WriteJSON(w, http.StatusOK, query.Results)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ir *imageRouter) postImagesPrune(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
func (s *imageRouter) postImagesPrune(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||||
if err := httputils.ParseForm(r); err != nil {
|
if err := httputils.ParseForm(r); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -522,18 +314,9 @@ func (ir *imageRouter) postImagesPrune(ctx context.Context, w http.ResponseWrite
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
pruneReport, err := ir.backend.ImagesPrune(ctx, pruneFilters)
|
pruneReport, err := s.backend.ImagesPrune(ctx, pruneFilters)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return httputils.WriteJSON(w, http.StatusOK, pruneReport)
|
return httputils.WriteJSON(w, http.StatusOK, pruneReport)
|
||||||
}
|
}
|
||||||
|
|
||||||
// validateRepoName validates the name of a repository.
|
|
||||||
func validateRepoName(name reference.Named) error {
|
|
||||||
familiarName := reference.FamiliarName(name)
|
|
||||||
if familiarName == api.NoBaseImageSpecifier {
|
|
||||||
return fmt.Errorf("'%s' is a reserved name", familiarName)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
|
@ -1,8 +1,6 @@
|
||||||
package router // import "github.com/docker/docker/api/server/router"
|
package router // import "github.com/docker/docker/api/server/router"
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"net/http"
|
|
||||||
|
|
||||||
"github.com/docker/docker/api/server/httputils"
|
"github.com/docker/docker/api/server/httputils"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -44,30 +42,30 @@ func NewRoute(method, path string, handler httputils.APIFunc, opts ...RouteWrapp
|
||||||
|
|
||||||
// NewGetRoute initializes a new route with the http method GET.
|
// NewGetRoute initializes a new route with the http method GET.
|
||||||
func NewGetRoute(path string, handler httputils.APIFunc, opts ...RouteWrapper) Route {
|
func NewGetRoute(path string, handler httputils.APIFunc, opts ...RouteWrapper) Route {
|
||||||
return NewRoute(http.MethodGet, path, handler, opts...)
|
return NewRoute("GET", path, handler, opts...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewPostRoute initializes a new route with the http method POST.
|
// NewPostRoute initializes a new route with the http method POST.
|
||||||
func NewPostRoute(path string, handler httputils.APIFunc, opts ...RouteWrapper) Route {
|
func NewPostRoute(path string, handler httputils.APIFunc, opts ...RouteWrapper) Route {
|
||||||
return NewRoute(http.MethodPost, path, handler, opts...)
|
return NewRoute("POST", path, handler, opts...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewPutRoute initializes a new route with the http method PUT.
|
// NewPutRoute initializes a new route with the http method PUT.
|
||||||
func NewPutRoute(path string, handler httputils.APIFunc, opts ...RouteWrapper) Route {
|
func NewPutRoute(path string, handler httputils.APIFunc, opts ...RouteWrapper) Route {
|
||||||
return NewRoute(http.MethodPut, path, handler, opts...)
|
return NewRoute("PUT", path, handler, opts...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDeleteRoute initializes a new route with the http method DELETE.
|
// NewDeleteRoute initializes a new route with the http method DELETE.
|
||||||
func NewDeleteRoute(path string, handler httputils.APIFunc, opts ...RouteWrapper) Route {
|
func NewDeleteRoute(path string, handler httputils.APIFunc, opts ...RouteWrapper) Route {
|
||||||
return NewRoute(http.MethodDelete, path, handler, opts...)
|
return NewRoute("DELETE", path, handler, opts...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewOptionsRoute initializes a new route with the http method OPTIONS.
|
// NewOptionsRoute initializes a new route with the http method OPTIONS.
|
||||||
func NewOptionsRoute(path string, handler httputils.APIFunc, opts ...RouteWrapper) Route {
|
func NewOptionsRoute(path string, handler httputils.APIFunc, opts ...RouteWrapper) Route {
|
||||||
return NewRoute(http.MethodOptions, path, handler, opts...)
|
return NewRoute("OPTIONS", path, handler, opts...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewHeadRoute initializes a new route with the http method HEAD.
|
// NewHeadRoute initializes a new route with the http method HEAD.
|
||||||
func NewHeadRoute(path string, handler httputils.APIFunc, opts ...RouteWrapper) Route {
|
func NewHeadRoute(path string, handler httputils.APIFunc, opts ...RouteWrapper) Route {
|
||||||
return NewRoute(http.MethodHead, path, handler, opts...)
|
return NewRoute("HEAD", path, handler, opts...)
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,15 +4,16 @@ import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
"github.com/docker/docker/api/types/backend"
|
|
||||||
"github.com/docker/docker/api/types/filters"
|
"github.com/docker/docker/api/types/filters"
|
||||||
"github.com/docker/docker/api/types/network"
|
"github.com/docker/docker/api/types/network"
|
||||||
|
"github.com/docker/libnetwork"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Backend is all the methods that need to be implemented
|
// Backend is all the methods that need to be implemented
|
||||||
// to provide network specific functionality.
|
// to provide network specific functionality.
|
||||||
type Backend interface {
|
type Backend interface {
|
||||||
GetNetworks(filters.Args, backend.NetworkListConfig) ([]types.NetworkResource, error)
|
FindNetwork(idName string) (libnetwork.Network, error)
|
||||||
|
GetNetworks(filters.Args, types.NetworkListConfig) ([]types.NetworkResource, error)
|
||||||
CreateNetwork(nc types.NetworkCreateRequest) (*types.NetworkCreateResponse, error)
|
CreateNetwork(nc types.NetworkCreateRequest) (*types.NetworkCreateResponse, error)
|
||||||
ConnectContainerToNetwork(containerName, networkName string, endpointConfig *network.EndpointSettings) error
|
ConnectContainerToNetwork(containerName, networkName string, endpointConfig *network.EndpointSettings) error
|
||||||
DisconnectContainerFromNetwork(containerName string, networkName string, force bool) error
|
DisconnectContainerFromNetwork(containerName string, networkName string, force bool) error
|
||||||
|
|
1
api/server/router/network/filter.go
Normal file
1
api/server/router/network/filter.go
Normal file
|
@ -0,0 +1 @@
|
||||||
|
package network // import "github.com/docker/docker/api/server/router/network"
|
|
@ -2,19 +2,20 @@ package network // import "github.com/docker/docker/api/server/router/network"
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/docker/docker/api/server/httputils"
|
"github.com/docker/docker/api/server/httputils"
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
"github.com/docker/docker/api/types/backend"
|
|
||||||
"github.com/docker/docker/api/types/filters"
|
"github.com/docker/docker/api/types/filters"
|
||||||
"github.com/docker/docker/api/types/network"
|
"github.com/docker/docker/api/types/network"
|
||||||
"github.com/docker/docker/api/types/versions"
|
"github.com/docker/docker/api/types/versions"
|
||||||
"github.com/docker/docker/errdefs"
|
"github.com/docker/docker/errdefs"
|
||||||
"github.com/docker/docker/libnetwork"
|
"github.com/docker/libnetwork"
|
||||||
"github.com/docker/docker/libnetwork/scope"
|
netconst "github.com/docker/libnetwork/datastore"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -40,7 +41,7 @@ func (n *networkRouter) getNetworksList(ctx context.Context, w http.ResponseWrit
|
||||||
|
|
||||||
// Combine the network list returned by Docker daemon if it is not already
|
// Combine the network list returned by Docker daemon if it is not already
|
||||||
// returned by the cluster manager
|
// returned by the cluster manager
|
||||||
localNetworks, err := n.backend.GetNetworks(filter, backend.NetworkListConfig{Detailed: versions.LessThan(httputils.VersionFromContext(ctx), "1.28")})
|
localNetworks, err := n.backend.GetNetworks(filter, types.NetworkListConfig{Detailed: versions.LessThan(httputils.VersionFromContext(ctx), "1.28")})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -84,6 +85,10 @@ func (e ambigousResultsError) Error() string {
|
||||||
|
|
||||||
func (ambigousResultsError) InvalidParameter() {}
|
func (ambigousResultsError) InvalidParameter() {}
|
||||||
|
|
||||||
|
func nameConflict(name string) error {
|
||||||
|
return errdefs.Conflict(libnetwork.NetworkNameError(name))
|
||||||
|
}
|
||||||
|
|
||||||
func (n *networkRouter) getNetwork(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
func (n *networkRouter) getNetwork(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||||
if err := httputils.ParseForm(r); err != nil {
|
if err := httputils.ParseForm(r); err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -99,7 +104,7 @@ func (n *networkRouter) getNetwork(ctx context.Context, w http.ResponseWriter, r
|
||||||
return errors.Wrapf(invalidRequestError{err}, "invalid value for verbose: %s", v)
|
return errors.Wrapf(invalidRequestError{err}, "invalid value for verbose: %s", v)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
networkScope := r.URL.Query().Get("scope")
|
scope := r.URL.Query().Get("scope")
|
||||||
|
|
||||||
// In case multiple networks have duplicate names, return error.
|
// In case multiple networks have duplicate names, return error.
|
||||||
// TODO (yongtang): should we wrap with version here for backward compatibility?
|
// TODO (yongtang): should we wrap with version here for backward compatibility?
|
||||||
|
@ -115,23 +120,23 @@ func (n *networkRouter) getNetwork(ctx context.Context, w http.ResponseWriter, r
|
||||||
// TODO(@cpuguy83): All this logic for figuring out which network to return does not belong here
|
// TODO(@cpuguy83): All this logic for figuring out which network to return does not belong here
|
||||||
// Instead there should be a backend function to just get one network.
|
// Instead there should be a backend function to just get one network.
|
||||||
filter := filters.NewArgs(filters.Arg("idOrName", term))
|
filter := filters.NewArgs(filters.Arg("idOrName", term))
|
||||||
if networkScope != "" {
|
if scope != "" {
|
||||||
filter.Add("scope", networkScope)
|
filter.Add("scope", scope)
|
||||||
}
|
}
|
||||||
networks, _ := n.backend.GetNetworks(filter, backend.NetworkListConfig{Detailed: true, Verbose: verbose})
|
nw, _ := n.backend.GetNetworks(filter, types.NetworkListConfig{Detailed: true, Verbose: verbose})
|
||||||
for _, nw := range networks {
|
for _, network := range nw {
|
||||||
if nw.ID == term {
|
if network.ID == term {
|
||||||
return httputils.WriteJSON(w, http.StatusOK, nw)
|
return httputils.WriteJSON(w, http.StatusOK, network)
|
||||||
}
|
}
|
||||||
if nw.Name == term {
|
if network.Name == term {
|
||||||
// No need to check the ID collision here as we are still in
|
// No need to check the ID collision here as we are still in
|
||||||
// local scope and the network ID is unique in this scope.
|
// local scope and the network ID is unique in this scope.
|
||||||
listByFullName[nw.ID] = nw
|
listByFullName[network.ID] = network
|
||||||
}
|
}
|
||||||
if strings.HasPrefix(nw.ID, term) {
|
if strings.HasPrefix(network.ID, term) {
|
||||||
// No need to check the ID collision here as we are still in
|
// No need to check the ID collision here as we are still in
|
||||||
// local scope and the network ID is unique in this scope.
|
// local scope and the network ID is unique in this scope.
|
||||||
listByPartialID[nw.ID] = nw
|
listByPartialID[network.ID] = network
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -141,7 +146,7 @@ func (n *networkRouter) getNetwork(ctx context.Context, w http.ResponseWriter, r
|
||||||
// or if the get network was passed with a network name and scope as swarm
|
// or if the get network was passed with a network name and scope as swarm
|
||||||
// return the network. Skipped using isMatchingScope because it is true if the scope
|
// return the network. Skipped using isMatchingScope because it is true if the scope
|
||||||
// is not set which would be case if the client API v1.30
|
// is not set which would be case if the client API v1.30
|
||||||
if strings.HasPrefix(nwk.ID, term) || networkScope == scope.Swarm {
|
if strings.HasPrefix(nwk.ID, term) || (netconst.SwarmScope == scope) {
|
||||||
// If we have a previous match "backend", return it, we need verbose when enabled
|
// If we have a previous match "backend", return it, we need verbose when enabled
|
||||||
// ex: overlay/partial_ID or name/swarm_scope
|
// ex: overlay/partial_ID or name/swarm_scope
|
||||||
if nwv, ok := listByPartialID[nwk.ID]; ok {
|
if nwv, ok := listByPartialID[nwk.ID]; ok {
|
||||||
|
@ -153,25 +158,25 @@ func (n *networkRouter) getNetwork(ctx context.Context, w http.ResponseWriter, r
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
networks, _ = n.cluster.GetNetworks(filter)
|
nr, _ := n.cluster.GetNetworks(filter)
|
||||||
for _, nw := range networks {
|
for _, network := range nr {
|
||||||
if nw.ID == term {
|
if network.ID == term {
|
||||||
return httputils.WriteJSON(w, http.StatusOK, nw)
|
return httputils.WriteJSON(w, http.StatusOK, network)
|
||||||
}
|
}
|
||||||
if nw.Name == term {
|
if network.Name == term {
|
||||||
// Check the ID collision as we are in swarm scope here, and
|
// Check the ID collision as we are in swarm scope here, and
|
||||||
// the map (of the listByFullName) may have already had a
|
// the map (of the listByFullName) may have already had a
|
||||||
// network with the same ID (from local scope previously)
|
// network with the same ID (from local scope previously)
|
||||||
if _, ok := listByFullName[nw.ID]; !ok {
|
if _, ok := listByFullName[network.ID]; !ok {
|
||||||
listByFullName[nw.ID] = nw
|
listByFullName[network.ID] = network
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if strings.HasPrefix(nw.ID, term) {
|
if strings.HasPrefix(network.ID, term) {
|
||||||
// Check the ID collision as we are in swarm scope here, and
|
// Check the ID collision as we are in swarm scope here, and
|
||||||
// the map (of the listByPartialID) may have already had a
|
// the map (of the listByPartialID) may have already had a
|
||||||
// network with the same ID (from local scope previously)
|
// network with the same ID (from local scope previously)
|
||||||
if _, ok := listByPartialID[nw.ID]; !ok {
|
if _, ok := listByPartialID[network.ID]; !ok {
|
||||||
listByPartialID[nw.ID] = nw
|
listByPartialID[network.ID] = network
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -200,25 +205,39 @@ func (n *networkRouter) getNetwork(ctx context.Context, w http.ResponseWriter, r
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *networkRouter) postNetworkCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
func (n *networkRouter) postNetworkCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||||
|
var create types.NetworkCreateRequest
|
||||||
|
|
||||||
if err := httputils.ParseForm(r); err != nil {
|
if err := httputils.ParseForm(r); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
var create types.NetworkCreateRequest
|
if err := httputils.CheckForJSON(r); err != nil {
|
||||||
if err := httputils.ReadJSON(r, &create); err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if nws, err := n.cluster.GetNetworksByName(create.Name); err == nil && len(nws) > 0 {
|
if err := json.NewDecoder(r.Body).Decode(&create); err != nil {
|
||||||
return libnetwork.NetworkNameError(create.Name)
|
if err == io.EOF {
|
||||||
|
return errdefs.InvalidParameter(errors.New("got EOF while reading request body"))
|
||||||
|
}
|
||||||
|
return errdefs.InvalidParameter(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if nws, err := n.cluster.GetNetworksByName(create.Name); err == nil && len(nws) > 0 {
|
||||||
|
return nameConflict(create.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
// For a Swarm-scoped network, this call to backend.CreateNetwork is used to
|
|
||||||
// validate the configuration. The network will not be created but, if the
|
|
||||||
// configuration is valid, ManagerRedirectError will be returned and handled
|
|
||||||
// below.
|
|
||||||
nw, err := n.backend.CreateNetwork(create)
|
nw, err := n.backend.CreateNetwork(create)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
var warning string
|
||||||
|
if _, ok := err.(libnetwork.NetworkNameError); ok {
|
||||||
|
// check if user defined CheckDuplicate, if set true, return err
|
||||||
|
// otherwise prepare a warning message
|
||||||
|
if create.CheckDuplicate {
|
||||||
|
return nameConflict(create.Name)
|
||||||
|
}
|
||||||
|
warning = libnetwork.NetworkNameError(create.Name).Error()
|
||||||
|
}
|
||||||
|
|
||||||
if _, ok := err.(libnetwork.ManagerRedirectError); !ok {
|
if _, ok := err.(libnetwork.ManagerRedirectError); !ok {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -228,6 +247,7 @@ func (n *networkRouter) postNetworkCreate(ctx context.Context, w http.ResponseWr
|
||||||
}
|
}
|
||||||
nw = &types.NetworkCreateResponse{
|
nw = &types.NetworkCreateResponse{
|
||||||
ID: id,
|
ID: id,
|
||||||
|
Warning: warning,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -235,15 +255,22 @@ func (n *networkRouter) postNetworkCreate(ctx context.Context, w http.ResponseWr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *networkRouter) postNetworkConnect(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
func (n *networkRouter) postNetworkConnect(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||||
|
var connect types.NetworkConnect
|
||||||
if err := httputils.ParseForm(r); err != nil {
|
if err := httputils.ParseForm(r); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
var connect types.NetworkConnect
|
if err := httputils.CheckForJSON(r); err != nil {
|
||||||
if err := httputils.ReadJSON(r, &connect); err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := json.NewDecoder(r.Body).Decode(&connect); err != nil {
|
||||||
|
if err == io.EOF {
|
||||||
|
return errdefs.InvalidParameter(errors.New("got EOF while reading request body"))
|
||||||
|
}
|
||||||
|
return errdefs.InvalidParameter(err)
|
||||||
|
}
|
||||||
|
|
||||||
// Unlike other operations, we does not check ambiguity of the name/ID here.
|
// Unlike other operations, we does not check ambiguity of the name/ID here.
|
||||||
// The reason is that, In case of attachable network in swarm scope, the actual local network
|
// The reason is that, In case of attachable network in swarm scope, the actual local network
|
||||||
// may not be available at the time. At the same time, inside daemon `ConnectContainerToNetwork`
|
// may not be available at the time. At the same time, inside daemon `ConnectContainerToNetwork`
|
||||||
|
@ -252,15 +279,22 @@ func (n *networkRouter) postNetworkConnect(ctx context.Context, w http.ResponseW
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *networkRouter) postNetworkDisconnect(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
func (n *networkRouter) postNetworkDisconnect(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||||
|
var disconnect types.NetworkDisconnect
|
||||||
if err := httputils.ParseForm(r); err != nil {
|
if err := httputils.ParseForm(r); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
var disconnect types.NetworkDisconnect
|
if err := httputils.CheckForJSON(r); err != nil {
|
||||||
if err := httputils.ReadJSON(r, &disconnect); err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := json.NewDecoder(r.Body).Decode(&disconnect); err != nil {
|
||||||
|
if err == io.EOF {
|
||||||
|
return errdefs.InvalidParameter(errors.New("got EOF while reading request body"))
|
||||||
|
}
|
||||||
|
return errdefs.InvalidParameter(err)
|
||||||
|
}
|
||||||
|
|
||||||
return n.backend.DisconnectContainerFromNetwork(disconnect.Container, vars["id"], disconnect.Force)
|
return n.backend.DisconnectContainerFromNetwork(disconnect.Container, vars["id"], disconnect.Force)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -316,42 +350,42 @@ func (n *networkRouter) findUniqueNetwork(term string) (types.NetworkResource, e
|
||||||
listByPartialID := map[string]types.NetworkResource{}
|
listByPartialID := map[string]types.NetworkResource{}
|
||||||
|
|
||||||
filter := filters.NewArgs(filters.Arg("idOrName", term))
|
filter := filters.NewArgs(filters.Arg("idOrName", term))
|
||||||
networks, _ := n.backend.GetNetworks(filter, backend.NetworkListConfig{Detailed: true})
|
nw, _ := n.backend.GetNetworks(filter, types.NetworkListConfig{Detailed: true})
|
||||||
for _, nw := range networks {
|
for _, network := range nw {
|
||||||
if nw.ID == term {
|
if network.ID == term {
|
||||||
return nw, nil
|
return network, nil
|
||||||
}
|
}
|
||||||
if nw.Name == term && !nw.Ingress {
|
if network.Name == term && !network.Ingress {
|
||||||
// No need to check the ID collision here as we are still in
|
// No need to check the ID collision here as we are still in
|
||||||
// local scope and the network ID is unique in this scope.
|
// local scope and the network ID is unique in this scope.
|
||||||
listByFullName[nw.ID] = nw
|
listByFullName[network.ID] = network
|
||||||
}
|
}
|
||||||
if strings.HasPrefix(nw.ID, term) {
|
if strings.HasPrefix(network.ID, term) {
|
||||||
// No need to check the ID collision here as we are still in
|
// No need to check the ID collision here as we are still in
|
||||||
// local scope and the network ID is unique in this scope.
|
// local scope and the network ID is unique in this scope.
|
||||||
listByPartialID[nw.ID] = nw
|
listByPartialID[network.ID] = network
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
networks, _ = n.cluster.GetNetworks(filter)
|
nr, _ := n.cluster.GetNetworks(filter)
|
||||||
for _, nw := range networks {
|
for _, network := range nr {
|
||||||
if nw.ID == term {
|
if network.ID == term {
|
||||||
return nw, nil
|
return network, nil
|
||||||
}
|
}
|
||||||
if nw.Name == term {
|
if network.Name == term {
|
||||||
// Check the ID collision as we are in swarm scope here, and
|
// Check the ID collision as we are in swarm scope here, and
|
||||||
// the map (of the listByFullName) may have already had a
|
// the map (of the listByFullName) may have already had a
|
||||||
// network with the same ID (from local scope previously)
|
// network with the same ID (from local scope previously)
|
||||||
if _, ok := listByFullName[nw.ID]; !ok {
|
if _, ok := listByFullName[network.ID]; !ok {
|
||||||
listByFullName[nw.ID] = nw
|
listByFullName[network.ID] = network
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if strings.HasPrefix(nw.ID, term) {
|
if strings.HasPrefix(network.ID, term) {
|
||||||
// Check the ID collision as we are in swarm scope here, and
|
// Check the ID collision as we are in swarm scope here, and
|
||||||
// the map (of the listByPartialID) may have already had a
|
// the map (of the listByPartialID) may have already had a
|
||||||
// network with the same ID (from local scope previously)
|
// network with the same ID (from local scope previously)
|
||||||
if _, ok := listByPartialID[nw.ID]; !ok {
|
if _, ok := listByPartialID[network.ID]; !ok {
|
||||||
listByPartialID[nw.ID] = nw
|
listByPartialID[network.ID] = network
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,25 +5,23 @@ import (
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
"github.com/distribution/reference"
|
"github.com/docker/distribution/reference"
|
||||||
"github.com/docker/docker/api/types"
|
enginetypes "github.com/docker/docker/api/types"
|
||||||
"github.com/docker/docker/api/types/backend"
|
|
||||||
"github.com/docker/docker/api/types/filters"
|
"github.com/docker/docker/api/types/filters"
|
||||||
"github.com/docker/docker/api/types/registry"
|
|
||||||
"github.com/docker/docker/plugin"
|
"github.com/docker/docker/plugin"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Backend for Plugin
|
// Backend for Plugin
|
||||||
type Backend interface {
|
type Backend interface {
|
||||||
Disable(name string, config *backend.PluginDisableConfig) error
|
Disable(name string, config *enginetypes.PluginDisableConfig) error
|
||||||
Enable(name string, config *backend.PluginEnableConfig) error
|
Enable(name string, config *enginetypes.PluginEnableConfig) error
|
||||||
List(filters.Args) ([]types.Plugin, error)
|
List(filters.Args) ([]enginetypes.Plugin, error)
|
||||||
Inspect(name string) (*types.Plugin, error)
|
Inspect(name string) (*enginetypes.Plugin, error)
|
||||||
Remove(name string, config *backend.PluginRmConfig) error
|
Remove(name string, config *enginetypes.PluginRmConfig) error
|
||||||
Set(name string, args []string) error
|
Set(name string, args []string) error
|
||||||
Privileges(ctx context.Context, ref reference.Named, metaHeaders http.Header, authConfig *registry.AuthConfig) (types.PluginPrivileges, error)
|
Privileges(ctx context.Context, ref reference.Named, metaHeaders http.Header, authConfig *enginetypes.AuthConfig) (enginetypes.PluginPrivileges, error)
|
||||||
Pull(ctx context.Context, ref reference.Named, name string, metaHeaders http.Header, authConfig *registry.AuthConfig, privileges types.PluginPrivileges, outStream io.Writer, opts ...plugin.CreateOpt) error
|
Pull(ctx context.Context, ref reference.Named, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, privileges enginetypes.PluginPrivileges, outStream io.Writer, opts ...plugin.CreateOpt) error
|
||||||
Push(ctx context.Context, name string, metaHeaders http.Header, authConfig *registry.AuthConfig, outStream io.Writer) error
|
Push(ctx context.Context, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, outStream io.Writer) error
|
||||||
Upgrade(ctx context.Context, ref reference.Named, name string, metaHeaders http.Header, authConfig *registry.AuthConfig, privileges types.PluginPrivileges, outStream io.Writer) error
|
Upgrade(ctx context.Context, ref reference.Named, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, privileges enginetypes.PluginPrivileges, outStream io.Writer) error
|
||||||
CreateFromContext(ctx context.Context, tarCtx io.ReadCloser, options *types.PluginCreateOptions) error
|
CreateFromContext(ctx context.Context, tarCtx io.ReadCloser, options *enginetypes.PluginCreateOptions) error
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,22 +2,25 @@ package plugin // import "github.com/docker/docker/api/server/router/plugin"
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/base64"
|
||||||
|
"encoding/json"
|
||||||
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/distribution/reference"
|
"github.com/docker/distribution/reference"
|
||||||
"github.com/docker/docker/api/server/httputils"
|
"github.com/docker/docker/api/server/httputils"
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
"github.com/docker/docker/api/types/backend"
|
|
||||||
"github.com/docker/docker/api/types/filters"
|
"github.com/docker/docker/api/types/filters"
|
||||||
"github.com/docker/docker/api/types/registry"
|
"github.com/docker/docker/errdefs"
|
||||||
"github.com/docker/docker/pkg/ioutils"
|
"github.com/docker/docker/pkg/ioutils"
|
||||||
"github.com/docker/docker/pkg/streamformatter"
|
"github.com/docker/docker/pkg/streamformatter"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
func parseHeaders(headers http.Header) (map[string][]string, *registry.AuthConfig) {
|
func parseHeaders(headers http.Header) (map[string][]string, *types.AuthConfig) {
|
||||||
|
|
||||||
metaHeaders := map[string][]string{}
|
metaHeaders := map[string][]string{}
|
||||||
for k, v := range headers {
|
for k, v := range headers {
|
||||||
if strings.HasPrefix(k, "X-Meta-") {
|
if strings.HasPrefix(k, "X-Meta-") {
|
||||||
|
@ -25,8 +28,16 @@ func parseHeaders(headers http.Header) (map[string][]string, *registry.AuthConfi
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ignore invalid AuthConfig to increase compatibility with the existing API.
|
// Get X-Registry-Auth
|
||||||
authConfig, _ := registry.DecodeAuthConfig(headers.Get(registry.AuthHeader))
|
authEncoded := headers.Get("X-Registry-Auth")
|
||||||
|
authConfig := &types.AuthConfig{}
|
||||||
|
if authEncoded != "" {
|
||||||
|
authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded))
|
||||||
|
if err := json.NewDecoder(authJSON).Decode(authConfig); err != nil {
|
||||||
|
authConfig = &types.AuthConfig{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return metaHeaders, authConfig
|
return metaHeaders, authConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -85,8 +96,12 @@ func (pr *pluginRouter) upgradePlugin(ctx context.Context, w http.ResponseWriter
|
||||||
}
|
}
|
||||||
|
|
||||||
var privileges types.PluginPrivileges
|
var privileges types.PluginPrivileges
|
||||||
if err := httputils.ReadJSON(r, &privileges); err != nil {
|
dec := json.NewDecoder(r.Body)
|
||||||
return err
|
if err := dec.Decode(&privileges); err != nil {
|
||||||
|
return errors.Wrap(err, "failed to parse privileges")
|
||||||
|
}
|
||||||
|
if dec.More() {
|
||||||
|
return errors.New("invalid privileges")
|
||||||
}
|
}
|
||||||
|
|
||||||
metaHeaders, authConfig := parseHeaders(r.Header)
|
metaHeaders, authConfig := parseHeaders(r.Header)
|
||||||
|
@ -108,7 +123,7 @@ func (pr *pluginRouter) upgradePlugin(ctx context.Context, w http.ResponseWriter
|
||||||
if !output.Flushed() {
|
if !output.Flushed() {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
_, _ = output.Write(streamformatter.FormatError(err))
|
output.Write(streamformatter.FormatError(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -120,8 +135,12 @@ func (pr *pluginRouter) pullPlugin(ctx context.Context, w http.ResponseWriter, r
|
||||||
}
|
}
|
||||||
|
|
||||||
var privileges types.PluginPrivileges
|
var privileges types.PluginPrivileges
|
||||||
if err := httputils.ReadJSON(r, &privileges); err != nil {
|
dec := json.NewDecoder(r.Body)
|
||||||
return err
|
if err := dec.Decode(&privileges); err != nil {
|
||||||
|
return errors.Wrap(err, "failed to parse privileges")
|
||||||
|
}
|
||||||
|
if dec.More() {
|
||||||
|
return errors.New("invalid privileges")
|
||||||
}
|
}
|
||||||
|
|
||||||
metaHeaders, authConfig := parseHeaders(r.Header)
|
metaHeaders, authConfig := parseHeaders(r.Header)
|
||||||
|
@ -143,7 +162,7 @@ func (pr *pluginRouter) pullPlugin(ctx context.Context, w http.ResponseWriter, r
|
||||||
if !output.Flushed() {
|
if !output.Flushed() {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
_, _ = output.Write(streamformatter.FormatError(err))
|
output.Write(streamformatter.FormatError(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -187,8 +206,7 @@ func (pr *pluginRouter) createPlugin(ctx context.Context, w http.ResponseWriter,
|
||||||
}
|
}
|
||||||
|
|
||||||
options := &types.PluginCreateOptions{
|
options := &types.PluginCreateOptions{
|
||||||
RepoName: r.FormValue("name"),
|
RepoName: r.FormValue("name")}
|
||||||
}
|
|
||||||
|
|
||||||
if err := pr.backend.CreateFromContext(ctx, r.Body, options); err != nil {
|
if err := pr.backend.CreateFromContext(ctx, r.Body, options); err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -208,7 +226,7 @@ func (pr *pluginRouter) enablePlugin(ctx context.Context, w http.ResponseWriter,
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
config := &backend.PluginEnableConfig{Timeout: timeout}
|
config := &types.PluginEnableConfig{Timeout: timeout}
|
||||||
|
|
||||||
return pr.backend.Enable(name, config)
|
return pr.backend.Enable(name, config)
|
||||||
}
|
}
|
||||||
|
@ -219,7 +237,7 @@ func (pr *pluginRouter) disablePlugin(ctx context.Context, w http.ResponseWriter
|
||||||
}
|
}
|
||||||
|
|
||||||
name := vars["name"]
|
name := vars["name"]
|
||||||
config := &backend.PluginDisableConfig{
|
config := &types.PluginDisableConfig{
|
||||||
ForceDisable: httputils.BoolValue(r, "force"),
|
ForceDisable: httputils.BoolValue(r, "force"),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -232,7 +250,7 @@ func (pr *pluginRouter) removePlugin(ctx context.Context, w http.ResponseWriter,
|
||||||
}
|
}
|
||||||
|
|
||||||
name := vars["name"]
|
name := vars["name"]
|
||||||
config := &backend.PluginRmConfig{
|
config := &types.PluginRmConfig{
|
||||||
ForceRemove: httputils.BoolValue(r, "force"),
|
ForceRemove: httputils.BoolValue(r, "force"),
|
||||||
}
|
}
|
||||||
return pr.backend.Remove(name, config)
|
return pr.backend.Remove(name, config)
|
||||||
|
@ -252,15 +270,18 @@ func (pr *pluginRouter) pushPlugin(ctx context.Context, w http.ResponseWriter, r
|
||||||
if !output.Flushed() {
|
if !output.Flushed() {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
_, _ = output.Write(streamformatter.FormatError(err))
|
output.Write(streamformatter.FormatError(err))
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pr *pluginRouter) setPlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
func (pr *pluginRouter) setPlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||||
var args []string
|
var args []string
|
||||||
if err := httputils.ReadJSON(r, &args); err != nil {
|
if err := json.NewDecoder(r.Body).Decode(&args); err != nil {
|
||||||
return err
|
if err == io.EOF {
|
||||||
|
return errdefs.InvalidParameter(errors.New("got EOF while reading request body"))
|
||||||
|
}
|
||||||
|
return errdefs.InvalidParameter(err)
|
||||||
}
|
}
|
||||||
if err := pr.backend.Set(vars["name"], args); err != nil {
|
if err := pr.backend.Set(vars["name"], args); err != nil {
|
||||||
return err
|
return err
|
||||||
|
|
|
@ -3,41 +3,46 @@ package swarm // import "github.com/docker/docker/api/server/router/swarm"
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/docker/docker/api/types"
|
basictypes "github.com/docker/docker/api/types"
|
||||||
"github.com/docker/docker/api/types/backend"
|
"github.com/docker/docker/api/types/backend"
|
||||||
"github.com/docker/docker/api/types/container"
|
types "github.com/docker/docker/api/types/swarm"
|
||||||
"github.com/docker/docker/api/types/swarm"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Backend abstracts a swarm manager.
|
// Backend abstracts a swarm manager.
|
||||||
type Backend interface {
|
type Backend interface {
|
||||||
Init(req swarm.InitRequest) (string, error)
|
Init(req types.InitRequest) (string, error)
|
||||||
Join(req swarm.JoinRequest) error
|
Join(req types.JoinRequest) error
|
||||||
Leave(ctx context.Context, force bool) error
|
Leave(force bool) error
|
||||||
Inspect() (swarm.Swarm, error)
|
Inspect() (types.Swarm, error)
|
||||||
Update(uint64, swarm.Spec, swarm.UpdateFlags) error
|
Update(uint64, types.Spec, types.UpdateFlags) error
|
||||||
GetUnlockKey() (string, error)
|
GetUnlockKey() (string, error)
|
||||||
UnlockSwarm(req swarm.UnlockRequest) error
|
UnlockSwarm(req types.UnlockRequest) error
|
||||||
GetServices(types.ServiceListOptions) ([]swarm.Service, error)
|
|
||||||
GetService(idOrName string, insertDefaults bool) (swarm.Service, error)
|
GetServices(basictypes.ServiceListOptions) ([]types.Service, error)
|
||||||
CreateService(swarm.ServiceSpec, string, bool) (*swarm.ServiceCreateResponse, error)
|
GetService(idOrName string, insertDefaults bool) (types.Service, error)
|
||||||
UpdateService(string, uint64, swarm.ServiceSpec, types.ServiceUpdateOptions, bool) (*swarm.ServiceUpdateResponse, error)
|
CreateService(types.ServiceSpec, string, bool) (*basictypes.ServiceCreateResponse, error)
|
||||||
|
UpdateService(string, uint64, types.ServiceSpec, basictypes.ServiceUpdateOptions, bool) (*basictypes.ServiceUpdateResponse, error)
|
||||||
RemoveService(string) error
|
RemoveService(string) error
|
||||||
ServiceLogs(context.Context, *backend.LogSelector, *container.LogsOptions) (<-chan *backend.LogMessage, error)
|
|
||||||
GetNodes(types.NodeListOptions) ([]swarm.Node, error)
|
ServiceLogs(context.Context, *backend.LogSelector, *basictypes.ContainerLogsOptions) (<-chan *backend.LogMessage, error)
|
||||||
GetNode(string) (swarm.Node, error)
|
|
||||||
UpdateNode(string, uint64, swarm.NodeSpec) error
|
GetNodes(basictypes.NodeListOptions) ([]types.Node, error)
|
||||||
|
GetNode(string) (types.Node, error)
|
||||||
|
UpdateNode(string, uint64, types.NodeSpec) error
|
||||||
RemoveNode(string, bool) error
|
RemoveNode(string, bool) error
|
||||||
GetTasks(types.TaskListOptions) ([]swarm.Task, error)
|
|
||||||
GetTask(string) (swarm.Task, error)
|
GetTasks(basictypes.TaskListOptions) ([]types.Task, error)
|
||||||
GetSecrets(opts types.SecretListOptions) ([]swarm.Secret, error)
|
GetTask(string) (types.Task, error)
|
||||||
CreateSecret(s swarm.SecretSpec) (string, error)
|
|
||||||
|
GetSecrets(opts basictypes.SecretListOptions) ([]types.Secret, error)
|
||||||
|
CreateSecret(s types.SecretSpec) (string, error)
|
||||||
RemoveSecret(idOrName string) error
|
RemoveSecret(idOrName string) error
|
||||||
GetSecret(id string) (swarm.Secret, error)
|
GetSecret(id string) (types.Secret, error)
|
||||||
UpdateSecret(idOrName string, version uint64, spec swarm.SecretSpec) error
|
UpdateSecret(idOrName string, version uint64, spec types.SecretSpec) error
|
||||||
GetConfigs(opts types.ConfigListOptions) ([]swarm.Config, error)
|
|
||||||
CreateConfig(s swarm.ConfigSpec) (string, error)
|
GetConfigs(opts basictypes.ConfigListOptions) ([]types.Config, error)
|
||||||
|
CreateConfig(s types.ConfigSpec) (string, error)
|
||||||
RemoveConfig(id string) error
|
RemoveConfig(id string) error
|
||||||
GetConfig(id string) (swarm.Config, error)
|
GetConfig(id string) (types.Config, error)
|
||||||
UpdateConfig(idOrName string, version uint64, spec swarm.ConfigSpec) error
|
UpdateConfig(idOrName string, version uint64, spec types.ConfigSpec) error
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,26 +2,30 @@ package swarm // import "github.com/docker/docker/api/server/router/swarm"
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
"github.com/containerd/log"
|
|
||||||
"github.com/docker/docker/api/server/httputils"
|
"github.com/docker/docker/api/server/httputils"
|
||||||
basictypes "github.com/docker/docker/api/types"
|
basictypes "github.com/docker/docker/api/types"
|
||||||
"github.com/docker/docker/api/types/backend"
|
"github.com/docker/docker/api/types/backend"
|
||||||
"github.com/docker/docker/api/types/filters"
|
"github.com/docker/docker/api/types/filters"
|
||||||
"github.com/docker/docker/api/types/registry"
|
|
||||||
types "github.com/docker/docker/api/types/swarm"
|
types "github.com/docker/docker/api/types/swarm"
|
||||||
"github.com/docker/docker/api/types/versions"
|
"github.com/docker/docker/api/types/versions"
|
||||||
"github.com/docker/docker/errdefs"
|
"github.com/docker/docker/errdefs"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (sr *swarmRouter) initCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
func (sr *swarmRouter) initCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||||
var req types.InitRequest
|
var req types.InitRequest
|
||||||
if err := httputils.ReadJSON(r, &req); err != nil {
|
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||||
return err
|
if err == io.EOF {
|
||||||
|
return errdefs.InvalidParameter(errors.New("got EOF while reading request body"))
|
||||||
|
}
|
||||||
|
return errdefs.InvalidParameter(err)
|
||||||
}
|
}
|
||||||
version := httputils.VersionFromContext(ctx)
|
version := httputils.VersionFromContext(ctx)
|
||||||
|
|
||||||
|
@ -36,7 +40,7 @@ func (sr *swarmRouter) initCluster(ctx context.Context, w http.ResponseWriter, r
|
||||||
}
|
}
|
||||||
nodeID, err := sr.backend.Init(req)
|
nodeID, err := sr.backend.Init(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.G(ctx).WithContext(ctx).WithError(err).Debug("Error initializing swarm")
|
logrus.Errorf("Error initializing swarm: %v", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return httputils.WriteJSON(w, http.StatusOK, nodeID)
|
return httputils.WriteJSON(w, http.StatusOK, nodeID)
|
||||||
|
@ -44,8 +48,11 @@ func (sr *swarmRouter) initCluster(ctx context.Context, w http.ResponseWriter, r
|
||||||
|
|
||||||
func (sr *swarmRouter) joinCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
func (sr *swarmRouter) joinCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||||
var req types.JoinRequest
|
var req types.JoinRequest
|
||||||
if err := httputils.ReadJSON(r, &req); err != nil {
|
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||||
return err
|
if err == io.EOF {
|
||||||
|
return errdefs.InvalidParameter(errors.New("got EOF while reading request body"))
|
||||||
|
}
|
||||||
|
return errdefs.InvalidParameter(err)
|
||||||
}
|
}
|
||||||
return sr.backend.Join(req)
|
return sr.backend.Join(req)
|
||||||
}
|
}
|
||||||
|
@ -56,13 +63,13 @@ func (sr *swarmRouter) leaveCluster(ctx context.Context, w http.ResponseWriter,
|
||||||
}
|
}
|
||||||
|
|
||||||
force := httputils.BoolValue(r, "force")
|
force := httputils.BoolValue(r, "force")
|
||||||
return sr.backend.Leave(ctx, force)
|
return sr.backend.Leave(force)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sr *swarmRouter) inspectCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
func (sr *swarmRouter) inspectCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||||
swarm, err := sr.backend.Inspect()
|
swarm, err := sr.backend.Inspect()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.G(ctx).WithContext(ctx).WithError(err).Debug("Error getting swarm")
|
logrus.Errorf("Error getting swarm: %v", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -71,8 +78,11 @@ func (sr *swarmRouter) inspectCluster(ctx context.Context, w http.ResponseWriter
|
||||||
|
|
||||||
func (sr *swarmRouter) updateCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
func (sr *swarmRouter) updateCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||||
var swarm types.Spec
|
var swarm types.Spec
|
||||||
if err := httputils.ReadJSON(r, &swarm); err != nil {
|
if err := json.NewDecoder(r.Body).Decode(&swarm); err != nil {
|
||||||
return err
|
if err == io.EOF {
|
||||||
|
return errdefs.InvalidParameter(errors.New("got EOF while reading request body"))
|
||||||
|
}
|
||||||
|
return errdefs.InvalidParameter(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
rawVersion := r.URL.Query().Get("version")
|
rawVersion := r.URL.Query().Get("version")
|
||||||
|
@ -114,7 +124,7 @@ func (sr *swarmRouter) updateCluster(ctx context.Context, w http.ResponseWriter,
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := sr.backend.Update(version, swarm, flags); err != nil {
|
if err := sr.backend.Update(version, swarm, flags); err != nil {
|
||||||
log.G(ctx).WithContext(ctx).WithError(err).Debug("Error configuring swarm")
|
logrus.Errorf("Error configuring swarm: %v", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -122,12 +132,15 @@ func (sr *swarmRouter) updateCluster(ctx context.Context, w http.ResponseWriter,
|
||||||
|
|
||||||
func (sr *swarmRouter) unlockCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
func (sr *swarmRouter) unlockCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||||
var req types.UnlockRequest
|
var req types.UnlockRequest
|
||||||
if err := httputils.ReadJSON(r, &req); err != nil {
|
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||||
return err
|
if err == io.EOF {
|
||||||
|
return errdefs.InvalidParameter(errors.New("got EOF while reading request body"))
|
||||||
|
}
|
||||||
|
return errdefs.InvalidParameter(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := sr.backend.UnlockSwarm(req); err != nil {
|
if err := sr.backend.UnlockSwarm(req); err != nil {
|
||||||
log.G(ctx).WithContext(ctx).WithError(err).Debug("Error unlocking swarm")
|
logrus.Errorf("Error unlocking swarm: %v", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -136,7 +149,7 @@ func (sr *swarmRouter) unlockCluster(ctx context.Context, w http.ResponseWriter,
|
||||||
func (sr *swarmRouter) getUnlockKey(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
func (sr *swarmRouter) getUnlockKey(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||||
unlockKey, err := sr.backend.GetUnlockKey()
|
unlockKey, err := sr.backend.GetUnlockKey()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.G(ctx).WithContext(ctx).WithError(err).Debug("Error retrieving swarm unlock key")
|
logrus.WithError(err).Errorf("Error retrieving swarm unlock key")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -151,24 +164,12 @@ func (sr *swarmRouter) getServices(ctx context.Context, w http.ResponseWriter, r
|
||||||
}
|
}
|
||||||
filter, err := filters.FromJSON(r.Form.Get("filters"))
|
filter, err := filters.FromJSON(r.Form.Get("filters"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return errdefs.InvalidParameter(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// the status query parameter is only support in API versions >= 1.41. If
|
services, err := sr.backend.GetServices(basictypes.ServiceListOptions{Filters: filter})
|
||||||
// the client is using a lesser version, ignore the parameter.
|
|
||||||
cliVersion := httputils.VersionFromContext(ctx)
|
|
||||||
var status bool
|
|
||||||
if value := r.URL.Query().Get("status"); value != "" && !versions.LessThan(cliVersion, "1.41") {
|
|
||||||
var err error
|
|
||||||
status, err = strconv.ParseBool(value)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(errdefs.InvalidParameter(err), "invalid value for status: %s", value)
|
logrus.Errorf("Error getting services: %v", err)
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
services, err := sr.backend.GetServices(basictypes.ServiceListOptions{Filters: filter, Status: status})
|
|
||||||
if err != nil {
|
|
||||||
log.G(ctx).WithContext(ctx).WithError(err).Debug("Error getting services")
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -177,27 +178,18 @@ func (sr *swarmRouter) getServices(ctx context.Context, w http.ResponseWriter, r
|
||||||
|
|
||||||
func (sr *swarmRouter) getService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
func (sr *swarmRouter) getService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||||
var insertDefaults bool
|
var insertDefaults bool
|
||||||
|
|
||||||
if value := r.URL.Query().Get("insertDefaults"); value != "" {
|
if value := r.URL.Query().Get("insertDefaults"); value != "" {
|
||||||
var err error
|
var err error
|
||||||
insertDefaults, err = strconv.ParseBool(value)
|
insertDefaults, err = strconv.ParseBool(value)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
err := fmt.Errorf("invalid value for insertDefaults: %s", value)
|
||||||
return errors.Wrapf(errdefs.InvalidParameter(err), "invalid value for insertDefaults: %s", value)
|
return errors.Wrapf(errdefs.InvalidParameter(err), "invalid value for insertDefaults: %s", value)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// you may note that there is no code here to handle the "status" query
|
|
||||||
// parameter, as in getServices. the Status field is not supported when
|
|
||||||
// retrieving an individual service because the Backend API changes
|
|
||||||
// required to accommodate it would be too disruptive, and because that
|
|
||||||
// field is so rarely needed as part of an individual service inspection.
|
|
||||||
|
|
||||||
service, err := sr.backend.GetService(vars["id"], insertDefaults)
|
service, err := sr.backend.GetService(vars["id"], insertDefaults)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.G(ctx).WithContext(ctx).WithFields(log.Fields{
|
logrus.Errorf("Error getting service %s: %v", vars["id"], err)
|
||||||
"error": err,
|
|
||||||
"service-id": vars["id"],
|
|
||||||
}).Debug("Error getting service")
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -206,38 +198,27 @@ func (sr *swarmRouter) getService(ctx context.Context, w http.ResponseWriter, r
|
||||||
|
|
||||||
func (sr *swarmRouter) createService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
func (sr *swarmRouter) createService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||||
var service types.ServiceSpec
|
var service types.ServiceSpec
|
||||||
if err := httputils.ReadJSON(r, &service); err != nil {
|
if err := json.NewDecoder(r.Body).Decode(&service); err != nil {
|
||||||
return err
|
if err == io.EOF {
|
||||||
|
return errdefs.InvalidParameter(errors.New("got EOF while reading request body"))
|
||||||
}
|
}
|
||||||
// TODO(thaJeztah): remove logentries check and migration code in release v26.0.0.
|
return errdefs.InvalidParameter(err)
|
||||||
if service.TaskTemplate.LogDriver != nil && service.TaskTemplate.LogDriver.Name == "logentries" {
|
|
||||||
return errdefs.InvalidParameter(errors.New("the logentries logging driver has been deprecated and removed"))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get returns "" if the header does not exist
|
// Get returns "" if the header does not exist
|
||||||
encodedAuth := r.Header.Get(registry.AuthHeader)
|
encodedAuth := r.Header.Get("X-Registry-Auth")
|
||||||
|
cliVersion := r.Header.Get("version")
|
||||||
queryRegistry := false
|
queryRegistry := false
|
||||||
if v := httputils.VersionFromContext(ctx); v != "" {
|
if cliVersion != "" {
|
||||||
if versions.LessThan(v, "1.30") {
|
if versions.LessThan(cliVersion, "1.30") {
|
||||||
queryRegistry = true
|
queryRegistry = true
|
||||||
}
|
}
|
||||||
adjustForAPIVersion(v, &service)
|
adjustForAPIVersion(cliVersion, &service)
|
||||||
}
|
|
||||||
|
|
||||||
version := httputils.VersionFromContext(ctx)
|
|
||||||
if versions.LessThan(version, "1.44") {
|
|
||||||
if service.TaskTemplate.ContainerSpec != nil && service.TaskTemplate.ContainerSpec.Healthcheck != nil {
|
|
||||||
// StartInterval was added in API 1.44
|
|
||||||
service.TaskTemplate.ContainerSpec.Healthcheck.StartInterval = 0
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := sr.backend.CreateService(service, encodedAuth, queryRegistry)
|
resp, err := sr.backend.CreateService(service, encodedAuth, queryRegistry)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.G(ctx).WithFields(log.Fields{
|
logrus.Errorf("Error creating service %s: %v", service.Name, err)
|
||||||
"error": err,
|
|
||||||
"service-name": service.Name,
|
|
||||||
}).Debug("Error creating service")
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -246,12 +227,11 @@ func (sr *swarmRouter) createService(ctx context.Context, w http.ResponseWriter,
|
||||||
|
|
||||||
func (sr *swarmRouter) updateService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
func (sr *swarmRouter) updateService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||||
var service types.ServiceSpec
|
var service types.ServiceSpec
|
||||||
if err := httputils.ReadJSON(r, &service); err != nil {
|
if err := json.NewDecoder(r.Body).Decode(&service); err != nil {
|
||||||
return err
|
if err == io.EOF {
|
||||||
|
return errdefs.InvalidParameter(errors.New("got EOF while reading request body"))
|
||||||
}
|
}
|
||||||
// TODO(thaJeztah): remove logentries check and migration code in release v26.0.0.
|
return errdefs.InvalidParameter(err)
|
||||||
if service.TaskTemplate.LogDriver != nil && service.TaskTemplate.LogDriver.Name == "logentries" {
|
|
||||||
return errdefs.InvalidParameter(errors.New("the logentries logging driver has been deprecated and removed"))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
rawVersion := r.URL.Query().Get("version")
|
rawVersion := r.URL.Query().Get("version")
|
||||||
|
@ -264,23 +244,21 @@ func (sr *swarmRouter) updateService(ctx context.Context, w http.ResponseWriter,
|
||||||
var flags basictypes.ServiceUpdateOptions
|
var flags basictypes.ServiceUpdateOptions
|
||||||
|
|
||||||
// Get returns "" if the header does not exist
|
// Get returns "" if the header does not exist
|
||||||
flags.EncodedRegistryAuth = r.Header.Get(registry.AuthHeader)
|
flags.EncodedRegistryAuth = r.Header.Get("X-Registry-Auth")
|
||||||
flags.RegistryAuthFrom = r.URL.Query().Get("registryAuthFrom")
|
flags.RegistryAuthFrom = r.URL.Query().Get("registryAuthFrom")
|
||||||
flags.Rollback = r.URL.Query().Get("rollback")
|
flags.Rollback = r.URL.Query().Get("rollback")
|
||||||
|
cliVersion := r.Header.Get("version")
|
||||||
queryRegistry := false
|
queryRegistry := false
|
||||||
if v := httputils.VersionFromContext(ctx); v != "" {
|
if cliVersion != "" {
|
||||||
if versions.LessThan(v, "1.30") {
|
if versions.LessThan(cliVersion, "1.30") {
|
||||||
queryRegistry = true
|
queryRegistry = true
|
||||||
}
|
}
|
||||||
adjustForAPIVersion(v, &service)
|
adjustForAPIVersion(cliVersion, &service)
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := sr.backend.UpdateService(vars["id"], version, service, flags, queryRegistry)
|
resp, err := sr.backend.UpdateService(vars["id"], version, service, flags, queryRegistry)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.G(ctx).WithContext(ctx).WithFields(log.Fields{
|
logrus.Errorf("Error updating service %s: %v", vars["id"], err)
|
||||||
"error": err,
|
|
||||||
"service-id": vars["id"],
|
|
||||||
}).Debug("Error updating service")
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return httputils.WriteJSON(w, http.StatusOK, resp)
|
return httputils.WriteJSON(w, http.StatusOK, resp)
|
||||||
|
@ -288,10 +266,7 @@ func (sr *swarmRouter) updateService(ctx context.Context, w http.ResponseWriter,
|
||||||
|
|
||||||
func (sr *swarmRouter) removeService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
func (sr *swarmRouter) removeService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||||
if err := sr.backend.RemoveService(vars["id"]); err != nil {
|
if err := sr.backend.RemoveService(vars["id"]); err != nil {
|
||||||
log.G(ctx).WithContext(ctx).WithFields(log.Fields{
|
logrus.Errorf("Error removing service %s: %v", vars["id"], err)
|
||||||
"error": err,
|
|
||||||
"service-id": vars["id"],
|
|
||||||
}).Debug("Error removing service")
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -332,7 +307,7 @@ func (sr *swarmRouter) getNodes(ctx context.Context, w http.ResponseWriter, r *h
|
||||||
|
|
||||||
nodes, err := sr.backend.GetNodes(basictypes.NodeListOptions{Filters: filter})
|
nodes, err := sr.backend.GetNodes(basictypes.NodeListOptions{Filters: filter})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.G(ctx).WithContext(ctx).WithError(err).Debug("Error getting nodes")
|
logrus.Errorf("Error getting nodes: %v", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -342,10 +317,7 @@ func (sr *swarmRouter) getNodes(ctx context.Context, w http.ResponseWriter, r *h
|
||||||
func (sr *swarmRouter) getNode(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
func (sr *swarmRouter) getNode(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||||
node, err := sr.backend.GetNode(vars["id"])
|
node, err := sr.backend.GetNode(vars["id"])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.G(ctx).WithContext(ctx).WithFields(log.Fields{
|
logrus.Errorf("Error getting node %s: %v", vars["id"], err)
|
||||||
"error": err,
|
|
||||||
"node-id": vars["id"],
|
|
||||||
}).Debug("Error getting node")
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -354,8 +326,11 @@ func (sr *swarmRouter) getNode(ctx context.Context, w http.ResponseWriter, r *ht
|
||||||
|
|
||||||
func (sr *swarmRouter) updateNode(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
func (sr *swarmRouter) updateNode(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||||
var node types.NodeSpec
|
var node types.NodeSpec
|
||||||
if err := httputils.ReadJSON(r, &node); err != nil {
|
if err := json.NewDecoder(r.Body).Decode(&node); err != nil {
|
||||||
return err
|
if err == io.EOF {
|
||||||
|
return errdefs.InvalidParameter(errors.New("got EOF while reading request body"))
|
||||||
|
}
|
||||||
|
return errdefs.InvalidParameter(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
rawVersion := r.URL.Query().Get("version")
|
rawVersion := r.URL.Query().Get("version")
|
||||||
|
@ -366,10 +341,7 @@ func (sr *swarmRouter) updateNode(ctx context.Context, w http.ResponseWriter, r
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := sr.backend.UpdateNode(vars["id"], version, node); err != nil {
|
if err := sr.backend.UpdateNode(vars["id"], version, node); err != nil {
|
||||||
log.G(ctx).WithContext(ctx).WithFields(log.Fields{
|
logrus.Errorf("Error updating node %s: %v", vars["id"], err)
|
||||||
"error": err,
|
|
||||||
"node-id": vars["id"],
|
|
||||||
}).Debug("Error updating node")
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -383,10 +355,7 @@ func (sr *swarmRouter) removeNode(ctx context.Context, w http.ResponseWriter, r
|
||||||
force := httputils.BoolValue(r, "force")
|
force := httputils.BoolValue(r, "force")
|
||||||
|
|
||||||
if err := sr.backend.RemoveNode(vars["id"], force); err != nil {
|
if err := sr.backend.RemoveNode(vars["id"], force); err != nil {
|
||||||
log.G(ctx).WithContext(ctx).WithFields(log.Fields{
|
logrus.Errorf("Error removing node %s: %v", vars["id"], err)
|
||||||
"error": err,
|
|
||||||
"node-id": vars["id"],
|
|
||||||
}).Debug("Error removing node")
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -403,7 +372,7 @@ func (sr *swarmRouter) getTasks(ctx context.Context, w http.ResponseWriter, r *h
|
||||||
|
|
||||||
tasks, err := sr.backend.GetTasks(basictypes.TaskListOptions{Filters: filter})
|
tasks, err := sr.backend.GetTasks(basictypes.TaskListOptions{Filters: filter})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.G(ctx).WithContext(ctx).WithError(err).Debug("Error getting tasks")
|
logrus.Errorf("Error getting tasks: %v", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -413,10 +382,7 @@ func (sr *swarmRouter) getTasks(ctx context.Context, w http.ResponseWriter, r *h
|
||||||
func (sr *swarmRouter) getTask(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
func (sr *swarmRouter) getTask(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||||
task, err := sr.backend.GetTask(vars["id"])
|
task, err := sr.backend.GetTask(vars["id"])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.G(ctx).WithContext(ctx).WithFields(log.Fields{
|
logrus.Errorf("Error getting task %s: %v", vars["id"], err)
|
||||||
"error": err,
|
|
||||||
"task-id": vars["id"],
|
|
||||||
}).Debug("Error getting task")
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -442,8 +408,11 @@ func (sr *swarmRouter) getSecrets(ctx context.Context, w http.ResponseWriter, r
|
||||||
|
|
||||||
func (sr *swarmRouter) createSecret(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
func (sr *swarmRouter) createSecret(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||||
var secret types.SecretSpec
|
var secret types.SecretSpec
|
||||||
if err := httputils.ReadJSON(r, &secret); err != nil {
|
if err := json.NewDecoder(r.Body).Decode(&secret); err != nil {
|
||||||
return err
|
if err == io.EOF {
|
||||||
|
return errdefs.InvalidParameter(errors.New("got EOF while reading request body"))
|
||||||
|
}
|
||||||
|
return errdefs.InvalidParameter(err)
|
||||||
}
|
}
|
||||||
version := httputils.VersionFromContext(ctx)
|
version := httputils.VersionFromContext(ctx)
|
||||||
if secret.Templating != nil && versions.LessThan(version, "1.37") {
|
if secret.Templating != nil && versions.LessThan(version, "1.37") {
|
||||||
|
@ -480,8 +449,11 @@ func (sr *swarmRouter) getSecret(ctx context.Context, w http.ResponseWriter, r *
|
||||||
|
|
||||||
func (sr *swarmRouter) updateSecret(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
func (sr *swarmRouter) updateSecret(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||||
var secret types.SecretSpec
|
var secret types.SecretSpec
|
||||||
if err := httputils.ReadJSON(r, &secret); err != nil {
|
if err := json.NewDecoder(r.Body).Decode(&secret); err != nil {
|
||||||
return err
|
if err == io.EOF {
|
||||||
|
return errdefs.InvalidParameter(errors.New("got EOF while reading request body"))
|
||||||
|
}
|
||||||
|
return errdefs.InvalidParameter(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
rawVersion := r.URL.Query().Get("version")
|
rawVersion := r.URL.Query().Get("version")
|
||||||
|
@ -513,8 +485,11 @@ func (sr *swarmRouter) getConfigs(ctx context.Context, w http.ResponseWriter, r
|
||||||
|
|
||||||
func (sr *swarmRouter) createConfig(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
func (sr *swarmRouter) createConfig(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||||
var config types.ConfigSpec
|
var config types.ConfigSpec
|
||||||
if err := httputils.ReadJSON(r, &config); err != nil {
|
if err := json.NewDecoder(r.Body).Decode(&config); err != nil {
|
||||||
return err
|
if err == io.EOF {
|
||||||
|
return errdefs.InvalidParameter(errors.New("got EOF while reading request body"))
|
||||||
|
}
|
||||||
|
return errdefs.InvalidParameter(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
version := httputils.VersionFromContext(ctx)
|
version := httputils.VersionFromContext(ctx)
|
||||||
|
@ -552,8 +527,11 @@ func (sr *swarmRouter) getConfig(ctx context.Context, w http.ResponseWriter, r *
|
||||||
|
|
||||||
func (sr *swarmRouter) updateConfig(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
func (sr *swarmRouter) updateConfig(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||||
var config types.ConfigSpec
|
var config types.ConfigSpec
|
||||||
if err := httputils.ReadJSON(r, &config); err != nil {
|
if err := json.NewDecoder(r.Body).Decode(&config); err != nil {
|
||||||
return err
|
if err == io.EOF {
|
||||||
|
return errdefs.InvalidParameter(errors.New("got EOF while reading request body"))
|
||||||
|
}
|
||||||
|
return errdefs.InvalidParameter(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
rawVersion := r.URL.Query().Get("version")
|
rawVersion := r.URL.Query().Get("version")
|
||||||
|
|
|
@ -3,19 +3,19 @@ package swarm // import "github.com/docker/docker/api/server/router/swarm"
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
"github.com/docker/docker/api/server/httputils"
|
"github.com/docker/docker/api/server/httputils"
|
||||||
basictypes "github.com/docker/docker/api/types"
|
basictypes "github.com/docker/docker/api/types"
|
||||||
"github.com/docker/docker/api/types/backend"
|
"github.com/docker/docker/api/types/backend"
|
||||||
"github.com/docker/docker/api/types/container"
|
|
||||||
"github.com/docker/docker/api/types/swarm"
|
"github.com/docker/docker/api/types/swarm"
|
||||||
"github.com/docker/docker/api/types/versions"
|
"github.com/docker/docker/api/types/versions"
|
||||||
)
|
)
|
||||||
|
|
||||||
// swarmLogs takes an http response, request, and selector, and writes the logs
|
// swarmLogs takes an http response, request, and selector, and writes the logs
|
||||||
// specified by the selector to the response
|
// specified by the selector to the response
|
||||||
func (sr *swarmRouter) swarmLogs(ctx context.Context, w http.ResponseWriter, r *http.Request, selector *backend.LogSelector) error {
|
func (sr *swarmRouter) swarmLogs(ctx context.Context, w io.Writer, r *http.Request, selector *backend.LogSelector) error {
|
||||||
// Args are validated before the stream starts because when it starts we're
|
// Args are validated before the stream starts because when it starts we're
|
||||||
// sending HTTP 200 by writing an empty chunk of data to tell the client that
|
// sending HTTP 200 by writing an empty chunk of data to tell the client that
|
||||||
// daemon is going to stream. By sending this initial HTTP 200 we can't report
|
// daemon is going to stream. By sending this initial HTTP 200 we can't report
|
||||||
|
@ -26,9 +26,9 @@ func (sr *swarmRouter) swarmLogs(ctx context.Context, w http.ResponseWriter, r *
|
||||||
return fmt.Errorf("Bad parameters: you must choose at least one stream")
|
return fmt.Errorf("Bad parameters: you must choose at least one stream")
|
||||||
}
|
}
|
||||||
|
|
||||||
// there is probably a neater way to manufacture the LogsOptions
|
// there is probably a neater way to manufacture the ContainerLogsOptions
|
||||||
// struct, probably in the caller, to eliminate the dependency on net/http
|
// struct, probably in the caller, to eliminate the dependency on net/http
|
||||||
logsConfig := &container.LogsOptions{
|
logsConfig := &basictypes.ContainerLogsOptions{
|
||||||
Follow: httputils.BoolValue(r, "follow"),
|
Follow: httputils.BoolValue(r, "follow"),
|
||||||
Timestamps: httputils.BoolValue(r, "timestamps"),
|
Timestamps: httputils.BoolValue(r, "timestamps"),
|
||||||
Since: r.Form.Get("since"),
|
Since: r.Form.Get("since"),
|
||||||
|
@ -63,11 +63,6 @@ func (sr *swarmRouter) swarmLogs(ctx context.Context, w http.ResponseWriter, r *
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
contentType := basictypes.MediaTypeRawStream
|
|
||||||
if !tty && versions.GreaterThanOrEqualTo(httputils.VersionFromContext(ctx), "1.42") {
|
|
||||||
contentType = basictypes.MediaTypeMultiplexedStream
|
|
||||||
}
|
|
||||||
w.Header().Set("Content-Type", contentType)
|
|
||||||
httputils.WriteLogStream(ctx, w, msgs, logsConfig, !tty)
|
httputils.WriteLogStream(ctx, w, msgs, logsConfig, !tty)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -100,32 +95,4 @@ func adjustForAPIVersion(cliVersion string, service *swarm.ServiceSpec) {
|
||||||
service.TaskTemplate.Placement.MaxReplicas = 0
|
service.TaskTemplate.Placement.MaxReplicas = 0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if versions.LessThan(cliVersion, "1.41") {
|
|
||||||
if service.TaskTemplate.ContainerSpec != nil {
|
|
||||||
// Capabilities and Ulimits for docker swarm services weren't
|
|
||||||
// supported before API version 1.41
|
|
||||||
service.TaskTemplate.ContainerSpec.CapabilityAdd = nil
|
|
||||||
service.TaskTemplate.ContainerSpec.CapabilityDrop = nil
|
|
||||||
service.TaskTemplate.ContainerSpec.Ulimits = nil
|
|
||||||
}
|
|
||||||
if service.TaskTemplate.Resources != nil && service.TaskTemplate.Resources.Limits != nil {
|
|
||||||
// Limits.Pids not supported before API version 1.41
|
|
||||||
service.TaskTemplate.Resources.Limits.Pids = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// jobs were only introduced in API version 1.41. Nil out both Job
|
|
||||||
// modes; if the service is one of these modes and subsequently has no
|
|
||||||
// mode, then something down the pipe will thrown an error.
|
|
||||||
service.Mode.ReplicatedJob = nil
|
|
||||||
service.Mode.GlobalJob = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if versions.LessThan(cliVersion, "1.44") {
|
|
||||||
// seccomp, apparmor, and no_new_privs were added in 1.44.
|
|
||||||
if service.TaskTemplate.ContainerSpec != nil && service.TaskTemplate.ContainerSpec.Privileges != nil {
|
|
||||||
service.TaskTemplate.ContainerSpec.Privileges.Seccomp = nil
|
|
||||||
service.TaskTemplate.ContainerSpec.Privileges.AppArmor = nil
|
|
||||||
service.TaskTemplate.ContainerSpec.Privileges.NoNewPrivileges = false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,11 +5,12 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/docker/docker/api/types/swarm"
|
"github.com/docker/docker/api/types/swarm"
|
||||||
"github.com/docker/go-units"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestAdjustForAPIVersion(t *testing.T) {
|
func TestAdjustForAPIVersion(t *testing.T) {
|
||||||
expectedSysctls := map[string]string{"foo": "bar"}
|
var (
|
||||||
|
expectedSysctls = map[string]string{"foo": "bar"}
|
||||||
|
)
|
||||||
// testing the negative -- does this leave everything else alone? -- is
|
// testing the negative -- does this leave everything else alone? -- is
|
||||||
// prohibitively time-consuming to write, because it would need an object
|
// prohibitively time-consuming to write, because it would need an object
|
||||||
// with literally every field filled in.
|
// with literally every field filled in.
|
||||||
|
@ -38,40 +39,21 @@ func TestAdjustForAPIVersion(t *testing.T) {
|
||||||
ConfigName: "configRuntime",
|
ConfigName: "configRuntime",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Ulimits: []*units.Ulimit{
|
|
||||||
{
|
|
||||||
Name: "nofile",
|
|
||||||
Soft: 100,
|
|
||||||
Hard: 200,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
Placement: &swarm.Placement{
|
Placement: &swarm.Placement{
|
||||||
MaxReplicas: 222,
|
MaxReplicas: 222,
|
||||||
},
|
},
|
||||||
Resources: &swarm.ResourceRequirements{
|
|
||||||
Limits: &swarm.Limit{
|
|
||||||
Pids: 300,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
// first, does calling this with a later version correctly NOT strip
|
// first, does calling this with a later version correctly NOT strip
|
||||||
// fields? do the later version first, so we can reuse this spec in the
|
// fields? do the later version first, so we can reuse this spec in the
|
||||||
// next test.
|
// next test.
|
||||||
adjustForAPIVersion("1.41", spec)
|
adjustForAPIVersion("1.40", spec)
|
||||||
if !reflect.DeepEqual(spec.TaskTemplate.ContainerSpec.Sysctls, expectedSysctls) {
|
if !reflect.DeepEqual(spec.TaskTemplate.ContainerSpec.Sysctls, expectedSysctls) {
|
||||||
t.Error("Sysctls was stripped from spec")
|
t.Error("Sysctls was stripped from spec")
|
||||||
}
|
}
|
||||||
|
|
||||||
if spec.TaskTemplate.Resources.Limits.Pids == 0 {
|
|
||||||
t.Error("PidsLimit was stripped from spec")
|
|
||||||
}
|
|
||||||
if spec.TaskTemplate.Resources.Limits.Pids != 300 {
|
|
||||||
t.Error("PidsLimit did not preserve the value from spec")
|
|
||||||
}
|
|
||||||
|
|
||||||
if spec.TaskTemplate.ContainerSpec.Privileges.CredentialSpec.Config != "someconfig" {
|
if spec.TaskTemplate.ContainerSpec.Privileges.CredentialSpec.Config != "someconfig" {
|
||||||
t.Error("CredentialSpec.Config field was stripped from spec")
|
t.Error("CredentialSpec.Config field was stripped from spec")
|
||||||
}
|
}
|
||||||
|
@ -84,20 +66,12 @@ func TestAdjustForAPIVersion(t *testing.T) {
|
||||||
t.Error("MaxReplicas was stripped from spec")
|
t.Error("MaxReplicas was stripped from spec")
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(spec.TaskTemplate.ContainerSpec.Ulimits) == 0 {
|
|
||||||
t.Error("Ulimits were stripped from spec")
|
|
||||||
}
|
|
||||||
|
|
||||||
// next, does calling this with an earlier version correctly strip fields?
|
// next, does calling this with an earlier version correctly strip fields?
|
||||||
adjustForAPIVersion("1.29", spec)
|
adjustForAPIVersion("1.29", spec)
|
||||||
if spec.TaskTemplate.ContainerSpec.Sysctls != nil {
|
if spec.TaskTemplate.ContainerSpec.Sysctls != nil {
|
||||||
t.Error("Sysctls was not stripped from spec")
|
t.Error("Sysctls was not stripped from spec")
|
||||||
}
|
}
|
||||||
|
|
||||||
if spec.TaskTemplate.Resources.Limits.Pids != 0 {
|
|
||||||
t.Error("PidsLimit was not stripped from spec")
|
|
||||||
}
|
|
||||||
|
|
||||||
if spec.TaskTemplate.ContainerSpec.Privileges.CredentialSpec.Config != "" {
|
if spec.TaskTemplate.ContainerSpec.Privileges.CredentialSpec.Config != "" {
|
||||||
t.Error("CredentialSpec.Config field was not stripped from spec")
|
t.Error("CredentialSpec.Config field was not stripped from spec")
|
||||||
}
|
}
|
||||||
|
@ -110,7 +84,4 @@ func TestAdjustForAPIVersion(t *testing.T) {
|
||||||
t.Error("MaxReplicas was not stripped from spec")
|
t.Error("MaxReplicas was not stripped from spec")
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(spec.TaskTemplate.ContainerSpec.Ulimits) != 0 {
|
|
||||||
t.Error("Ulimits were not stripped from spec")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,41 +7,22 @@ import (
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
"github.com/docker/docker/api/types/events"
|
"github.com/docker/docker/api/types/events"
|
||||||
"github.com/docker/docker/api/types/filters"
|
"github.com/docker/docker/api/types/filters"
|
||||||
"github.com/docker/docker/api/types/registry"
|
|
||||||
"github.com/docker/docker/api/types/swarm"
|
"github.com/docker/docker/api/types/swarm"
|
||||||
"github.com/docker/docker/api/types/system"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// DiskUsageOptions holds parameters for system disk usage query.
|
|
||||||
type DiskUsageOptions struct {
|
|
||||||
// Containers controls whether container disk usage should be computed.
|
|
||||||
Containers bool
|
|
||||||
|
|
||||||
// Images controls whether image disk usage should be computed.
|
|
||||||
Images bool
|
|
||||||
|
|
||||||
// Volumes controls whether volume disk usage should be computed.
|
|
||||||
Volumes bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// Backend is the methods that need to be implemented to provide
|
// Backend is the methods that need to be implemented to provide
|
||||||
// system specific functionality.
|
// system specific functionality.
|
||||||
type Backend interface {
|
type Backend interface {
|
||||||
SystemInfo(context.Context) (*system.Info, error)
|
SystemInfo() (*types.Info, error)
|
||||||
SystemVersion(context.Context) (types.Version, error)
|
SystemVersion() types.Version
|
||||||
SystemDiskUsage(ctx context.Context, opts DiskUsageOptions) (*types.DiskUsage, error)
|
SystemDiskUsage(ctx context.Context) (*types.DiskUsage, error)
|
||||||
SubscribeToEvents(since, until time.Time, ef filters.Args) ([]events.Message, chan interface{})
|
SubscribeToEvents(since, until time.Time, ef filters.Args) ([]events.Message, chan interface{})
|
||||||
UnsubscribeFromEvents(chan interface{})
|
UnsubscribeFromEvents(chan interface{})
|
||||||
AuthenticateToRegistry(ctx context.Context, authConfig *registry.AuthConfig) (string, string, error)
|
AuthenticateToRegistry(ctx context.Context, authConfig *types.AuthConfig) (string, string, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ClusterBackend is all the methods that need to be implemented
|
// ClusterBackend is all the methods that need to be implemented
|
||||||
// to provide cluster system specific functionality.
|
// to provide cluster system specific functionality.
|
||||||
type ClusterBackend interface {
|
type ClusterBackend interface {
|
||||||
Info(context.Context) swarm.Info
|
Info() swarm.Info
|
||||||
}
|
|
||||||
|
|
||||||
// StatusProvider provides methods to get the swarm status of the current node.
|
|
||||||
type StatusProvider interface {
|
|
||||||
Status() string
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,13 +1,9 @@
|
||||||
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
|
|
||||||
//go:build go1.19
|
|
||||||
|
|
||||||
package system // import "github.com/docker/docker/api/server/router/system"
|
package system // import "github.com/docker/docker/api/server/router/system"
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/docker/docker/api/server/router"
|
"github.com/docker/docker/api/server/router"
|
||||||
"github.com/docker/docker/api/types/system"
|
"github.com/docker/docker/builder/builder-next"
|
||||||
buildkit "github.com/docker/docker/builder/builder-next"
|
"github.com/docker/docker/builder/fscache"
|
||||||
"resenje.org/singleflight"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// systemRouter provides information about the Docker system overall.
|
// systemRouter provides information about the Docker system overall.
|
||||||
|
@ -16,20 +12,17 @@ type systemRouter struct {
|
||||||
backend Backend
|
backend Backend
|
||||||
cluster ClusterBackend
|
cluster ClusterBackend
|
||||||
routes []router.Route
|
routes []router.Route
|
||||||
|
fscache *fscache.FSCache // legacy
|
||||||
builder *buildkit.Builder
|
builder *buildkit.Builder
|
||||||
features func() map[string]bool
|
features *map[string]bool
|
||||||
|
|
||||||
// collectSystemInfo is a single-flight for the /info endpoint,
|
|
||||||
// unique per API version (as different API versions may return
|
|
||||||
// a different API response).
|
|
||||||
collectSystemInfo singleflight.Group[string, *system.Info]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewRouter initializes a new system router
|
// NewRouter initializes a new system router
|
||||||
func NewRouter(b Backend, c ClusterBackend, builder *buildkit.Builder, features func() map[string]bool) router.Router {
|
func NewRouter(b Backend, c ClusterBackend, fscache *fscache.FSCache, builder *buildkit.Builder, features *map[string]bool) router.Router {
|
||||||
r := &systemRouter{
|
r := &systemRouter{
|
||||||
backend: b,
|
backend: b,
|
||||||
cluster: c,
|
cluster: c,
|
||||||
|
fscache: fscache,
|
||||||
builder: builder,
|
builder: builder,
|
||||||
features: features,
|
features: features,
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,19 +7,17 @@ import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/containerd/log"
|
|
||||||
"github.com/docker/docker/api/server/httputils"
|
"github.com/docker/docker/api/server/httputils"
|
||||||
"github.com/docker/docker/api/server/router/build"
|
"github.com/docker/docker/api/server/router/build"
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
"github.com/docker/docker/api/types/events"
|
"github.com/docker/docker/api/types/events"
|
||||||
"github.com/docker/docker/api/types/filters"
|
"github.com/docker/docker/api/types/filters"
|
||||||
"github.com/docker/docker/api/types/registry"
|
"github.com/docker/docker/api/types/registry"
|
||||||
"github.com/docker/docker/api/types/swarm"
|
|
||||||
"github.com/docker/docker/api/types/system"
|
|
||||||
timetypes "github.com/docker/docker/api/types/time"
|
timetypes "github.com/docker/docker/api/types/time"
|
||||||
"github.com/docker/docker/api/types/versions"
|
"github.com/docker/docker/api/types/versions"
|
||||||
"github.com/docker/docker/pkg/ioutils"
|
"github.com/docker/docker/pkg/ioutils"
|
||||||
"github.com/pkg/errors"
|
pkgerrors "github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -32,13 +30,10 @@ func (s *systemRouter) pingHandler(ctx context.Context, w http.ResponseWriter, r
|
||||||
w.Header().Add("Cache-Control", "no-cache, no-store, must-revalidate")
|
w.Header().Add("Cache-Control", "no-cache, no-store, must-revalidate")
|
||||||
w.Header().Add("Pragma", "no-cache")
|
w.Header().Add("Pragma", "no-cache")
|
||||||
|
|
||||||
builderVersion := build.BuilderVersion(s.features())
|
builderVersion := build.BuilderVersion(*s.features)
|
||||||
if bv := builderVersion; bv != "" {
|
if bv := builderVersion; bv != "" {
|
||||||
w.Header().Set("Builder-Version", string(bv))
|
w.Header().Set("Builder-Version", string(bv))
|
||||||
}
|
}
|
||||||
|
|
||||||
w.Header().Set("Swarm", s.swarmStatus())
|
|
||||||
|
|
||||||
if r.Method == http.MethodHead {
|
if r.Method == http.MethodHead {
|
||||||
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||||
w.Header().Set("Content-Length", "0")
|
w.Header().Set("Content-Length", "0")
|
||||||
|
@ -48,42 +43,38 @@ func (s *systemRouter) pingHandler(ctx context.Context, w http.ResponseWriter, r
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *systemRouter) swarmStatus() string {
|
|
||||||
if s.cluster != nil {
|
|
||||||
if p, ok := s.cluster.(StatusProvider); ok {
|
|
||||||
return p.Status()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return string(swarm.LocalNodeStateInactive)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *systemRouter) getInfo(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
func (s *systemRouter) getInfo(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||||
version := httputils.VersionFromContext(ctx)
|
info, err := s.backend.SystemInfo()
|
||||||
info, _, _ := s.collectSystemInfo.Do(ctx, version, func(ctx context.Context) (*system.Info, error) {
|
|
||||||
info, err := s.backend.SystemInfo(ctx)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if s.cluster != nil {
|
if s.cluster != nil {
|
||||||
info.Swarm = s.cluster.Info(ctx)
|
info.Swarm = s.cluster.Info()
|
||||||
info.Warnings = append(info.Warnings, info.Swarm.Warnings...)
|
info.Warnings = append(info.Warnings, info.Swarm.Warnings...)
|
||||||
}
|
}
|
||||||
|
|
||||||
if versions.LessThan(version, "1.25") {
|
if versions.LessThan(httputils.VersionFromContext(ctx), "1.25") {
|
||||||
// TODO: handle this conversion in engine-api
|
// TODO: handle this conversion in engine-api
|
||||||
kvSecOpts, err := system.DecodeSecurityOptions(info.SecurityOptions)
|
type oldInfo struct {
|
||||||
|
*types.Info
|
||||||
|
ExecutionDriver string
|
||||||
|
}
|
||||||
|
old := &oldInfo{
|
||||||
|
Info: info,
|
||||||
|
ExecutionDriver: "<not supported>",
|
||||||
|
}
|
||||||
|
nameOnlySecurityOptions := []string{}
|
||||||
|
kvSecOpts, err := types.DecodeSecurityOptions(old.SecurityOptions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
info.Warnings = append(info.Warnings, err.Error())
|
return err
|
||||||
}
|
}
|
||||||
var nameOnly []string
|
for _, s := range kvSecOpts {
|
||||||
for _, so := range kvSecOpts {
|
nameOnlySecurityOptions = append(nameOnlySecurityOptions, s.Name)
|
||||||
nameOnly = append(nameOnly, so.Name)
|
|
||||||
}
|
}
|
||||||
info.SecurityOptions = nameOnly
|
old.SecurityOptions = nameOnlySecurityOptions
|
||||||
info.ExecutionDriver = "<not supported>" //nolint:staticcheck // ignore SA1019 (ExecutionDriver is deprecated)
|
return httputils.WriteJSON(w, http.StatusOK, old)
|
||||||
}
|
}
|
||||||
if versions.LessThan(version, "1.39") {
|
if versions.LessThan(httputils.VersionFromContext(ctx), "1.39") {
|
||||||
if info.KernelVersion == "" {
|
if info.KernelVersion == "" {
|
||||||
info.KernelVersion = "<unknown>"
|
info.KernelVersion = "<unknown>"
|
||||||
}
|
}
|
||||||
|
@ -91,124 +82,56 @@ func (s *systemRouter) getInfo(ctx context.Context, w http.ResponseWriter, r *ht
|
||||||
info.OperatingSystem = "<unknown>"
|
info.OperatingSystem = "<unknown>"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if versions.LessThan(version, "1.44") {
|
|
||||||
for k, rt := range info.Runtimes {
|
|
||||||
// Status field introduced in API v1.44.
|
|
||||||
info.Runtimes[k] = system.RuntimeWithStatus{Runtime: rt.Runtime}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if versions.GreaterThanOrEqualTo(version, "1.42") {
|
|
||||||
info.KernelMemory = false
|
|
||||||
}
|
|
||||||
return info, nil
|
|
||||||
})
|
|
||||||
return httputils.WriteJSON(w, http.StatusOK, info)
|
return httputils.WriteJSON(w, http.StatusOK, info)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *systemRouter) getVersion(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
func (s *systemRouter) getVersion(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||||
info, err := s.backend.SystemVersion(ctx)
|
info := s.backend.SystemVersion()
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return httputils.WriteJSON(w, http.StatusOK, info)
|
return httputils.WriteJSON(w, http.StatusOK, info)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *systemRouter) getDiskUsage(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
func (s *systemRouter) getDiskUsage(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||||
if err := httputils.ParseForm(r); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
version := httputils.VersionFromContext(ctx)
|
|
||||||
|
|
||||||
var getContainers, getImages, getVolumes, getBuildCache bool
|
|
||||||
typeStrs, ok := r.Form["type"]
|
|
||||||
if versions.LessThan(version, "1.42") || !ok {
|
|
||||||
getContainers, getImages, getVolumes, getBuildCache = true, true, true, s.builder != nil
|
|
||||||
} else {
|
|
||||||
for _, typ := range typeStrs {
|
|
||||||
switch types.DiskUsageObject(typ) {
|
|
||||||
case types.ContainerObject:
|
|
||||||
getContainers = true
|
|
||||||
case types.ImageObject:
|
|
||||||
getImages = true
|
|
||||||
case types.VolumeObject:
|
|
||||||
getVolumes = true
|
|
||||||
case types.BuildCacheObject:
|
|
||||||
getBuildCache = true
|
|
||||||
default:
|
|
||||||
return invalidRequestError{Err: fmt.Errorf("unknown object type: %s", typ)}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
eg, ctx := errgroup.WithContext(ctx)
|
eg, ctx := errgroup.WithContext(ctx)
|
||||||
|
|
||||||
var systemDiskUsage *types.DiskUsage
|
var du *types.DiskUsage
|
||||||
if getContainers || getImages || getVolumes {
|
|
||||||
eg.Go(func() error {
|
eg.Go(func() error {
|
||||||
var err error
|
var err error
|
||||||
systemDiskUsage, err = s.backend.SystemDiskUsage(ctx, DiskUsageOptions{
|
du, err = s.backend.SystemDiskUsage(ctx)
|
||||||
Containers: getContainers,
|
|
||||||
Images: getImages,
|
|
||||||
Volumes: getVolumes,
|
|
||||||
})
|
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
|
|
||||||
|
var builderSize int64 // legacy
|
||||||
|
eg.Go(func() error {
|
||||||
|
var err error
|
||||||
|
builderSize, err = s.fscache.DiskUsage(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return pkgerrors.Wrap(err, "error getting fscache build cache usage")
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
var buildCache []*types.BuildCache
|
var buildCache []*types.BuildCache
|
||||||
if getBuildCache {
|
|
||||||
eg.Go(func() error {
|
eg.Go(func() error {
|
||||||
var err error
|
var err error
|
||||||
buildCache, err = s.builder.DiskUsage(ctx)
|
buildCache, err = s.builder.DiskUsage(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "error getting build cache usage")
|
return pkgerrors.Wrap(err, "error getting build cache usage")
|
||||||
}
|
|
||||||
if buildCache == nil {
|
|
||||||
// Ensure empty `BuildCache` field is represented as empty JSON array(`[]`)
|
|
||||||
// instead of `null` to be consistent with `Images`, `Containers` etc.
|
|
||||||
buildCache = []*types.BuildCache{}
|
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
}
|
|
||||||
|
|
||||||
if err := eg.Wait(); err != nil {
|
if err := eg.Wait(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
var builderSize int64
|
|
||||||
if versions.LessThan(version, "1.42") {
|
|
||||||
for _, b := range buildCache {
|
for _, b := range buildCache {
|
||||||
builderSize += b.Size
|
builderSize += b.Size
|
||||||
// Parents field was added in API 1.42 to replace the Parent field.
|
|
||||||
b.Parents = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if versions.GreaterThanOrEqualTo(version, "1.42") {
|
|
||||||
for _, b := range buildCache {
|
|
||||||
// Parent field is deprecated in API v1.42 and up, as it is deprecated
|
|
||||||
// in BuildKit. Empty the field to omit it in the API response.
|
|
||||||
b.Parent = "" //nolint:staticcheck // ignore SA1019 (Parent field is deprecated)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if versions.LessThan(version, "1.44") {
|
|
||||||
for _, b := range systemDiskUsage.Images {
|
|
||||||
b.VirtualSize = b.Size //nolint:staticcheck // ignore SA1019: field is deprecated, but still set on API < v1.44.
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
du := types.DiskUsage{
|
du.BuilderSize = builderSize
|
||||||
BuildCache: buildCache,
|
du.BuildCache = buildCache
|
||||||
BuilderSize: builderSize,
|
|
||||||
}
|
|
||||||
if systemDiskUsage != nil {
|
|
||||||
du.LayersSize = systemDiskUsage.LayersSize
|
|
||||||
du.Images = systemDiskUsage.Images
|
|
||||||
du.Containers = systemDiskUsage.Containers
|
|
||||||
du.Volumes = systemDiskUsage.Volumes
|
|
||||||
}
|
|
||||||
return httputils.WriteJSON(w, http.StatusOK, du)
|
return httputils.WriteJSON(w, http.StatusOK, du)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -251,9 +174,7 @@ func (s *systemRouter) getEvents(ctx context.Context, w http.ResponseWriter, r *
|
||||||
|
|
||||||
if !onlyPastEvents {
|
if !onlyPastEvents {
|
||||||
dur := until.Sub(now)
|
dur := until.Sub(now)
|
||||||
timer := time.NewTimer(dur)
|
timeout = time.After(dur)
|
||||||
defer timer.Stop()
|
|
||||||
timeout = timer.C
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -287,7 +208,7 @@ func (s *systemRouter) getEvents(ctx context.Context, w http.ResponseWriter, r *
|
||||||
case ev := <-l:
|
case ev := <-l:
|
||||||
jev, ok := ev.(events.Message)
|
jev, ok := ev.(events.Message)
|
||||||
if !ok {
|
if !ok {
|
||||||
log.G(ctx).Warnf("unexpected event message: %q", ev)
|
logrus.Warnf("unexpected event message: %q", ev)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if err := enc.Encode(jev); err != nil {
|
if err := enc.Encode(jev); err != nil {
|
||||||
|
@ -296,14 +217,14 @@ func (s *systemRouter) getEvents(ctx context.Context, w http.ResponseWriter, r *
|
||||||
case <-timeout:
|
case <-timeout:
|
||||||
return nil
|
return nil
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
log.G(ctx).Debug("Client context cancelled, stop sending events")
|
logrus.Debug("Client context cancelled, stop sending events")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *systemRouter) postAuth(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
func (s *systemRouter) postAuth(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||||
var config *registry.AuthConfig
|
var config *types.AuthConfig
|
||||||
err := json.NewDecoder(r.Body).Decode(&config)
|
err := json.NewDecoder(r.Body).Decode(&config)
|
||||||
r.Body.Close()
|
r.Body.Close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -7,28 +7,14 @@ import (
|
||||||
// TODO return types need to be refactored into pkg
|
// TODO return types need to be refactored into pkg
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
"github.com/docker/docker/api/types/filters"
|
"github.com/docker/docker/api/types/filters"
|
||||||
"github.com/docker/docker/api/types/volume"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Backend is the methods that need to be implemented to provide
|
// Backend is the methods that need to be implemented to provide
|
||||||
// volume specific functionality
|
// volume specific functionality
|
||||||
type Backend interface {
|
type Backend interface {
|
||||||
List(ctx context.Context, filter filters.Args) ([]*volume.Volume, []string, error)
|
List(ctx context.Context, filter filters.Args) ([]*types.Volume, []string, error)
|
||||||
Get(ctx context.Context, name string, opts ...opts.GetOption) (*volume.Volume, error)
|
Get(ctx context.Context, name string, opts ...opts.GetOption) (*types.Volume, error)
|
||||||
Create(ctx context.Context, name, driverName string, opts ...opts.CreateOption) (*volume.Volume, error)
|
Create(ctx context.Context, name, driverName string, opts ...opts.CreateOption) (*types.Volume, error)
|
||||||
Remove(ctx context.Context, name string, opts ...opts.RemoveOption) error
|
Remove(ctx context.Context, name string, opts ...opts.RemoveOption) error
|
||||||
Prune(ctx context.Context, pruneFilters filters.Args) (*types.VolumesPruneReport, error)
|
Prune(ctx context.Context, pruneFilters filters.Args) (*types.VolumesPruneReport, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ClusterBackend is the backend used for Swarm Cluster Volumes. Regular
|
|
||||||
// volumes go through the volume service, but to avoid across-dependency
|
|
||||||
// between the cluster package and the volume package, we simply provide two
|
|
||||||
// backends here.
|
|
||||||
type ClusterBackend interface {
|
|
||||||
GetVolume(nameOrID string) (volume.Volume, error)
|
|
||||||
GetVolumes(options volume.ListOptions) ([]*volume.Volume, error)
|
|
||||||
CreateVolume(volume volume.CreateOptions) (*volume.Volume, error)
|
|
||||||
RemoveVolume(nameOrID string, force bool) error
|
|
||||||
UpdateVolume(nameOrID string, version uint64, volume volume.UpdateOptions) error
|
|
||||||
IsManager() bool
|
|
||||||
}
|
|
||||||
|
|
|
@ -5,15 +5,13 @@ import "github.com/docker/docker/api/server/router"
|
||||||
// volumeRouter is a router to talk with the volumes controller
|
// volumeRouter is a router to talk with the volumes controller
|
||||||
type volumeRouter struct {
|
type volumeRouter struct {
|
||||||
backend Backend
|
backend Backend
|
||||||
cluster ClusterBackend
|
|
||||||
routes []router.Route
|
routes []router.Route
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewRouter initializes a new volume router
|
// NewRouter initializes a new volume router
|
||||||
func NewRouter(b Backend, cb ClusterBackend) router.Router {
|
func NewRouter(b Backend) router.Router {
|
||||||
r := &volumeRouter{
|
r := &volumeRouter{
|
||||||
backend: b,
|
backend: b,
|
||||||
cluster: cb,
|
|
||||||
}
|
}
|
||||||
r.initRoutes()
|
r.initRoutes()
|
||||||
return r
|
return r
|
||||||
|
@ -32,8 +30,6 @@ func (r *volumeRouter) initRoutes() {
|
||||||
// POST
|
// POST
|
||||||
router.NewPostRoute("/volumes/create", r.postVolumesCreate),
|
router.NewPostRoute("/volumes/create", r.postVolumesCreate),
|
||||||
router.NewPostRoute("/volumes/prune", r.postVolumesPrune),
|
router.NewPostRoute("/volumes/prune", r.postVolumesPrune),
|
||||||
// PUT
|
|
||||||
router.NewPutRoute("/volumes/{name:.*}", r.putVolumesUpdate),
|
|
||||||
// DELETE
|
// DELETE
|
||||||
router.NewDeleteRoute("/volumes/{name:.*}", r.deleteVolumes),
|
router.NewDeleteRoute("/volumes/{name:.*}", r.deleteVolumes),
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,26 +2,18 @@ package volume // import "github.com/docker/docker/api/server/router/volume"
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"encoding/json"
|
||||||
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
|
||||||
|
|
||||||
"github.com/containerd/log"
|
|
||||||
"github.com/docker/docker/api/server/httputils"
|
"github.com/docker/docker/api/server/httputils"
|
||||||
"github.com/docker/docker/api/types/filters"
|
"github.com/docker/docker/api/types/filters"
|
||||||
"github.com/docker/docker/api/types/versions"
|
volumetypes "github.com/docker/docker/api/types/volume"
|
||||||
"github.com/docker/docker/api/types/volume"
|
|
||||||
"github.com/docker/docker/errdefs"
|
"github.com/docker/docker/errdefs"
|
||||||
"github.com/docker/docker/volume/service/opts"
|
"github.com/docker/docker/volume/service/opts"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
|
||||||
// clusterVolumesVersion defines the API version that swarm cluster volume
|
|
||||||
// functionality was introduced. avoids the use of magic numbers.
|
|
||||||
clusterVolumesVersion = "1.42"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (v *volumeRouter) getVolumesList(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
func (v *volumeRouter) getVolumesList(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||||
if err := httputils.ParseForm(r); err != nil {
|
if err := httputils.ParseForm(r); err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -29,62 +21,25 @@ func (v *volumeRouter) getVolumesList(ctx context.Context, w http.ResponseWriter
|
||||||
|
|
||||||
filters, err := filters.FromJSON(r.Form.Get("filters"))
|
filters, err := filters.FromJSON(r.Form.Get("filters"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "error reading volume filters")
|
return errdefs.InvalidParameter(errors.Wrap(err, "error reading volume filters"))
|
||||||
}
|
}
|
||||||
volumes, warnings, err := v.backend.List(ctx, filters)
|
volumes, warnings, err := v.backend.List(ctx, filters)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
return httputils.WriteJSON(w, http.StatusOK, &volumetypes.VolumeListOKBody{Volumes: volumes, Warnings: warnings})
|
||||||
version := httputils.VersionFromContext(ctx)
|
|
||||||
if versions.GreaterThanOrEqualTo(version, clusterVolumesVersion) && v.cluster.IsManager() {
|
|
||||||
clusterVolumes, swarmErr := v.cluster.GetVolumes(volume.ListOptions{Filters: filters})
|
|
||||||
if swarmErr != nil {
|
|
||||||
// if there is a swarm error, we may not want to error out right
|
|
||||||
// away. the local list probably worked. instead, let's do what we
|
|
||||||
// do if there's a bad driver while trying to list: add the error
|
|
||||||
// to the warnings. don't do this if swarm is not initialized.
|
|
||||||
warnings = append(warnings, swarmErr.Error())
|
|
||||||
}
|
|
||||||
// add the cluster volumes to the return
|
|
||||||
volumes = append(volumes, clusterVolumes...)
|
|
||||||
}
|
|
||||||
|
|
||||||
return httputils.WriteJSON(w, http.StatusOK, &volume.ListResponse{Volumes: volumes, Warnings: warnings})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (v *volumeRouter) getVolumeByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
func (v *volumeRouter) getVolumeByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||||
if err := httputils.ParseForm(r); err != nil {
|
if err := httputils.ParseForm(r); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
version := httputils.VersionFromContext(ctx)
|
|
||||||
|
|
||||||
// re: volume name duplication
|
volume, err := v.backend.Get(ctx, vars["name"], opts.WithGetResolveStatus)
|
||||||
//
|
|
||||||
// we prefer to get volumes locally before attempting to get them from the
|
|
||||||
// cluster. Local volumes can only be looked up by name, but cluster
|
|
||||||
// volumes can also be looked up by ID.
|
|
||||||
vol, err := v.backend.Get(ctx, vars["name"], opts.WithGetResolveStatus)
|
|
||||||
|
|
||||||
// if the volume is not found in the regular volume backend, and the client
|
|
||||||
// is using an API version greater than 1.42 (when cluster volumes were
|
|
||||||
// introduced), then check if Swarm has the volume.
|
|
||||||
if errdefs.IsNotFound(err) && versions.GreaterThanOrEqualTo(version, clusterVolumesVersion) && v.cluster.IsManager() {
|
|
||||||
swarmVol, err := v.cluster.GetVolume(vars["name"])
|
|
||||||
// if swarm returns an error and that error indicates that swarm is not
|
|
||||||
// initialized, return original NotFound error. Otherwise, we'd return
|
|
||||||
// a weird swarm unavailable error on non-swarm engines.
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
vol = &swarmVol
|
return httputils.WriteJSON(w, http.StatusOK, volume)
|
||||||
} else if err != nil {
|
|
||||||
// otherwise, if this isn't NotFound, or this isn't a high enough version,
|
|
||||||
// just return the error by itself.
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return httputils.WriteJSON(w, http.StatusOK, vol)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (v *volumeRouter) postVolumesCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
func (v *volumeRouter) postVolumesCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||||
|
@ -92,65 +47,23 @@ func (v *volumeRouter) postVolumesCreate(ctx context.Context, w http.ResponseWri
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
var req volume.CreateOptions
|
if err := httputils.CheckForJSON(r); err != nil {
|
||||||
if err := httputils.ReadJSON(r, &req); err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var req volumetypes.VolumeCreateBody
|
||||||
vol *volume.Volume
|
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||||
err error
|
if err == io.EOF {
|
||||||
version = httputils.VersionFromContext(ctx)
|
return errdefs.InvalidParameter(errors.New("got EOF while reading request body"))
|
||||||
)
|
|
||||||
|
|
||||||
// if the ClusterVolumeSpec is filled in, then this is a cluster volume
|
|
||||||
// and is created through the swarm cluster volume backend.
|
|
||||||
//
|
|
||||||
// re: volume name duplication
|
|
||||||
//
|
|
||||||
// As it happens, there is no good way to prevent duplication of a volume
|
|
||||||
// name between local and cluster volumes. This is because Swarm volumes
|
|
||||||
// can be created from any manager node, bypassing most of the protections
|
|
||||||
// we could put into the engine side.
|
|
||||||
//
|
|
||||||
// Instead, we will allow creating a volume with a duplicate name, which
|
|
||||||
// should not break anything.
|
|
||||||
if req.ClusterVolumeSpec != nil && versions.GreaterThanOrEqualTo(version, clusterVolumesVersion) {
|
|
||||||
log.G(ctx).Debug("using cluster volume")
|
|
||||||
vol, err = v.cluster.CreateVolume(req)
|
|
||||||
} else {
|
|
||||||
log.G(ctx).Debug("using regular volume")
|
|
||||||
vol, err = v.backend.Create(ctx, req.Name, req.Driver, opts.WithCreateOptions(req.DriverOpts), opts.WithCreateLabels(req.Labels))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return httputils.WriteJSON(w, http.StatusCreated, vol)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *volumeRouter) putVolumesUpdate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
|
||||||
if !v.cluster.IsManager() {
|
|
||||||
return errdefs.Unavailable(errors.New("volume update only valid for cluster volumes, but swarm is unavailable"))
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := httputils.ParseForm(r); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
rawVersion := r.URL.Query().Get("version")
|
|
||||||
version, err := strconv.ParseUint(rawVersion, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
err = fmt.Errorf("invalid swarm object version '%s': %v", rawVersion, err)
|
|
||||||
return errdefs.InvalidParameter(err)
|
return errdefs.InvalidParameter(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var req volume.UpdateOptions
|
volume, err := v.backend.Create(ctx, req.Name, req.Driver, opts.WithCreateOptions(req.DriverOpts), opts.WithCreateLabels(req.Labels))
|
||||||
if err := httputils.ReadJSON(r, &req); err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
return httputils.WriteJSON(w, http.StatusCreated, volume)
|
||||||
return v.cluster.UpdateVolume(vars["name"], version, req)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (v *volumeRouter) deleteVolumes(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
func (v *volumeRouter) deleteVolumes(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||||
|
@ -158,28 +71,7 @@ func (v *volumeRouter) deleteVolumes(ctx context.Context, w http.ResponseWriter,
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
force := httputils.BoolValue(r, "force")
|
force := httputils.BoolValue(r, "force")
|
||||||
|
if err := v.backend.Remove(ctx, vars["name"], opts.WithPurgeOnError(force)); err != nil {
|
||||||
// First we try deleting local volume. The volume may not be found as a
|
|
||||||
// local volume, but could be a cluster volume, so we ignore "not found"
|
|
||||||
// errors at this stage. Note that no "not found" error is produced if
|
|
||||||
// "force" is enabled.
|
|
||||||
err := v.backend.Remove(ctx, vars["name"], opts.WithPurgeOnError(force))
|
|
||||||
if err != nil && !errdefs.IsNotFound(err) {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// If no volume was found, the volume may be a cluster volume. If force
|
|
||||||
// is enabled, the volume backend won't return an error for non-existing
|
|
||||||
// volumes, so we don't know if removal succeeded (or not volume existed).
|
|
||||||
// In that case we always try to delete cluster volumes as well.
|
|
||||||
if errdefs.IsNotFound(err) || force {
|
|
||||||
version := httputils.VersionFromContext(ctx)
|
|
||||||
if versions.GreaterThanOrEqualTo(version, clusterVolumesVersion) && v.cluster.IsManager() {
|
|
||||||
err = v.cluster.RemoveVolume(vars["name"], force)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
w.WriteHeader(http.StatusNoContent)
|
w.WriteHeader(http.StatusNoContent)
|
||||||
|
@ -196,12 +88,6 @@ func (v *volumeRouter) postVolumesPrune(ctx context.Context, w http.ResponseWrit
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// API version 1.42 changes behavior where prune should only prune anonymous volumes.
|
|
||||||
// To keep older API behavior working, we need to add this filter option to consider all (local) volumes for pruning, not just anonymous ones.
|
|
||||||
if versions.LessThan(httputils.VersionFromContext(ctx), "1.42") {
|
|
||||||
pruneFilters.Add("all", "true")
|
|
||||||
}
|
|
||||||
|
|
||||||
pruneReport, err := v.backend.Prune(ctx, pruneFilters)
|
pruneReport, err := v.backend.Prune(ctx, pruneFilters)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
|
|
@ -1,760 +0,0 @@
|
||||||
package volume
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"net/http/httptest"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"gotest.tools/v3/assert"
|
|
||||||
|
|
||||||
"github.com/docker/docker/api/server/httputils"
|
|
||||||
"github.com/docker/docker/api/types"
|
|
||||||
"github.com/docker/docker/api/types/filters"
|
|
||||||
"github.com/docker/docker/api/types/volume"
|
|
||||||
"github.com/docker/docker/errdefs"
|
|
||||||
"github.com/docker/docker/volume/service/opts"
|
|
||||||
)
|
|
||||||
|
|
||||||
func callGetVolume(v *volumeRouter, name string) (*httptest.ResponseRecorder, error) {
|
|
||||||
ctx := context.WithValue(context.Background(), httputils.APIVersionKey{}, clusterVolumesVersion)
|
|
||||||
vars := map[string]string{"name": name}
|
|
||||||
req := httptest.NewRequest("GET", fmt.Sprintf("/volumes/%s", name), nil)
|
|
||||||
resp := httptest.NewRecorder()
|
|
||||||
|
|
||||||
err := v.getVolumeByName(ctx, resp, req, vars)
|
|
||||||
return resp, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func callListVolumes(v *volumeRouter) (*httptest.ResponseRecorder, error) {
|
|
||||||
ctx := context.WithValue(context.Background(), httputils.APIVersionKey{}, clusterVolumesVersion)
|
|
||||||
vars := map[string]string{}
|
|
||||||
req := httptest.NewRequest("GET", "/volumes", nil)
|
|
||||||
resp := httptest.NewRecorder()
|
|
||||||
|
|
||||||
err := v.getVolumesList(ctx, resp, req, vars)
|
|
||||||
return resp, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGetVolumeByNameNotFoundNoSwarm(t *testing.T) {
|
|
||||||
v := &volumeRouter{
|
|
||||||
backend: &fakeVolumeBackend{},
|
|
||||||
cluster: &fakeClusterBackend{},
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err := callGetVolume(v, "notReal")
|
|
||||||
|
|
||||||
assert.Assert(t, err != nil)
|
|
||||||
assert.Assert(t, errdefs.IsNotFound(err))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGetVolumeByNameNotFoundNotManager(t *testing.T) {
|
|
||||||
v := &volumeRouter{
|
|
||||||
backend: &fakeVolumeBackend{},
|
|
||||||
cluster: &fakeClusterBackend{swarm: true},
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err := callGetVolume(v, "notReal")
|
|
||||||
|
|
||||||
assert.Assert(t, err != nil)
|
|
||||||
assert.Assert(t, errdefs.IsNotFound(err))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGetVolumeByNameNotFound(t *testing.T) {
|
|
||||||
v := &volumeRouter{
|
|
||||||
backend: &fakeVolumeBackend{},
|
|
||||||
cluster: &fakeClusterBackend{swarm: true, manager: true},
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err := callGetVolume(v, "notReal")
|
|
||||||
|
|
||||||
assert.Assert(t, err != nil)
|
|
||||||
assert.Assert(t, errdefs.IsNotFound(err))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGetVolumeByNameFoundRegular(t *testing.T) {
|
|
||||||
v := &volumeRouter{
|
|
||||||
backend: &fakeVolumeBackend{
|
|
||||||
volumes: map[string]*volume.Volume{
|
|
||||||
"volume1": {
|
|
||||||
Name: "volume1",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
cluster: &fakeClusterBackend{swarm: true, manager: true},
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err := callGetVolume(v, "volume1")
|
|
||||||
assert.NilError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGetVolumeByNameFoundSwarm(t *testing.T) {
|
|
||||||
v := &volumeRouter{
|
|
||||||
backend: &fakeVolumeBackend{},
|
|
||||||
cluster: &fakeClusterBackend{
|
|
||||||
swarm: true,
|
|
||||||
manager: true,
|
|
||||||
volumes: map[string]*volume.Volume{
|
|
||||||
"volume1": {
|
|
||||||
Name: "volume1",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err := callGetVolume(v, "volume1")
|
|
||||||
assert.NilError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestListVolumes(t *testing.T) {
|
|
||||||
v := &volumeRouter{
|
|
||||||
backend: &fakeVolumeBackend{
|
|
||||||
volumes: map[string]*volume.Volume{
|
|
||||||
"v1": {Name: "v1"},
|
|
||||||
"v2": {Name: "v2"},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
cluster: &fakeClusterBackend{
|
|
||||||
swarm: true,
|
|
||||||
manager: true,
|
|
||||||
volumes: map[string]*volume.Volume{
|
|
||||||
"v3": {Name: "v3"},
|
|
||||||
"v4": {Name: "v4"},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
resp, err := callListVolumes(v)
|
|
||||||
assert.NilError(t, err)
|
|
||||||
d := json.NewDecoder(resp.Result().Body)
|
|
||||||
respVols := volume.ListResponse{}
|
|
||||||
assert.NilError(t, d.Decode(&respVols))
|
|
||||||
|
|
||||||
assert.Assert(t, respVols.Volumes != nil)
|
|
||||||
assert.Equal(t, len(respVols.Volumes), 4, "volumes %v", respVols.Volumes)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestListVolumesNoSwarm(t *testing.T) {
|
|
||||||
v := &volumeRouter{
|
|
||||||
backend: &fakeVolumeBackend{
|
|
||||||
volumes: map[string]*volume.Volume{
|
|
||||||
"v1": {Name: "v1"},
|
|
||||||
"v2": {Name: "v2"},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
cluster: &fakeClusterBackend{},
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err := callListVolumes(v)
|
|
||||||
assert.NilError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestListVolumesNoManager(t *testing.T) {
|
|
||||||
v := &volumeRouter{
|
|
||||||
backend: &fakeVolumeBackend{
|
|
||||||
volumes: map[string]*volume.Volume{
|
|
||||||
"v1": {Name: "v1"},
|
|
||||||
"v2": {Name: "v2"},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
cluster: &fakeClusterBackend{swarm: true},
|
|
||||||
}
|
|
||||||
|
|
||||||
resp, err := callListVolumes(v)
|
|
||||||
assert.NilError(t, err)
|
|
||||||
|
|
||||||
d := json.NewDecoder(resp.Result().Body)
|
|
||||||
respVols := volume.ListResponse{}
|
|
||||||
assert.NilError(t, d.Decode(&respVols))
|
|
||||||
|
|
||||||
assert.Equal(t, len(respVols.Volumes), 2)
|
|
||||||
assert.Equal(t, len(respVols.Warnings), 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCreateRegularVolume(t *testing.T) {
|
|
||||||
b := &fakeVolumeBackend{}
|
|
||||||
c := &fakeClusterBackend{
|
|
||||||
swarm: true,
|
|
||||||
manager: true,
|
|
||||||
}
|
|
||||||
v := &volumeRouter{
|
|
||||||
backend: b,
|
|
||||||
cluster: c,
|
|
||||||
}
|
|
||||||
|
|
||||||
volumeCreate := volume.CreateOptions{
|
|
||||||
Name: "vol1",
|
|
||||||
Driver: "foodriver",
|
|
||||||
}
|
|
||||||
|
|
||||||
buf := bytes.Buffer{}
|
|
||||||
e := json.NewEncoder(&buf)
|
|
||||||
e.Encode(volumeCreate)
|
|
||||||
|
|
||||||
ctx := context.WithValue(context.Background(), httputils.APIVersionKey{}, clusterVolumesVersion)
|
|
||||||
req := httptest.NewRequest("POST", "/volumes/create", &buf)
|
|
||||||
req.Header.Add("Content-Type", "application/json")
|
|
||||||
|
|
||||||
resp := httptest.NewRecorder()
|
|
||||||
err := v.postVolumesCreate(ctx, resp, req, nil)
|
|
||||||
|
|
||||||
assert.NilError(t, err)
|
|
||||||
|
|
||||||
respVolume := volume.Volume{}
|
|
||||||
|
|
||||||
assert.NilError(t, json.NewDecoder(resp.Result().Body).Decode(&respVolume))
|
|
||||||
|
|
||||||
assert.Equal(t, respVolume.Name, "vol1")
|
|
||||||
assert.Equal(t, respVolume.Driver, "foodriver")
|
|
||||||
|
|
||||||
assert.Equal(t, 1, len(b.volumes))
|
|
||||||
assert.Equal(t, 0, len(c.volumes))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCreateSwarmVolumeNoSwarm(t *testing.T) {
|
|
||||||
b := &fakeVolumeBackend{}
|
|
||||||
c := &fakeClusterBackend{}
|
|
||||||
|
|
||||||
v := &volumeRouter{
|
|
||||||
backend: b,
|
|
||||||
cluster: c,
|
|
||||||
}
|
|
||||||
|
|
||||||
volumeCreate := volume.CreateOptions{
|
|
||||||
ClusterVolumeSpec: &volume.ClusterVolumeSpec{},
|
|
||||||
Name: "volCluster",
|
|
||||||
Driver: "someCSI",
|
|
||||||
}
|
|
||||||
|
|
||||||
buf := bytes.Buffer{}
|
|
||||||
json.NewEncoder(&buf).Encode(volumeCreate)
|
|
||||||
|
|
||||||
ctx := context.WithValue(context.Background(), httputils.APIVersionKey{}, clusterVolumesVersion)
|
|
||||||
req := httptest.NewRequest("POST", "/volumes/create", &buf)
|
|
||||||
req.Header.Add("Content-Type", "application/json")
|
|
||||||
|
|
||||||
resp := httptest.NewRecorder()
|
|
||||||
err := v.postVolumesCreate(ctx, resp, req, nil)
|
|
||||||
|
|
||||||
assert.Assert(t, err != nil)
|
|
||||||
assert.Assert(t, errdefs.IsUnavailable(err))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCreateSwarmVolumeNotManager(t *testing.T) {
|
|
||||||
b := &fakeVolumeBackend{}
|
|
||||||
c := &fakeClusterBackend{swarm: true}
|
|
||||||
|
|
||||||
v := &volumeRouter{
|
|
||||||
backend: b,
|
|
||||||
cluster: c,
|
|
||||||
}
|
|
||||||
|
|
||||||
volumeCreate := volume.CreateOptions{
|
|
||||||
ClusterVolumeSpec: &volume.ClusterVolumeSpec{},
|
|
||||||
Name: "volCluster",
|
|
||||||
Driver: "someCSI",
|
|
||||||
}
|
|
||||||
|
|
||||||
buf := bytes.Buffer{}
|
|
||||||
json.NewEncoder(&buf).Encode(volumeCreate)
|
|
||||||
|
|
||||||
ctx := context.WithValue(context.Background(), httputils.APIVersionKey{}, clusterVolumesVersion)
|
|
||||||
req := httptest.NewRequest("POST", "/volumes/create", &buf)
|
|
||||||
req.Header.Add("Content-Type", "application/json")
|
|
||||||
|
|
||||||
resp := httptest.NewRecorder()
|
|
||||||
err := v.postVolumesCreate(ctx, resp, req, nil)
|
|
||||||
|
|
||||||
assert.Assert(t, err != nil)
|
|
||||||
assert.Assert(t, errdefs.IsUnavailable(err))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCreateVolumeCluster(t *testing.T) {
|
|
||||||
b := &fakeVolumeBackend{}
|
|
||||||
c := &fakeClusterBackend{
|
|
||||||
swarm: true,
|
|
||||||
manager: true,
|
|
||||||
}
|
|
||||||
|
|
||||||
v := &volumeRouter{
|
|
||||||
backend: b,
|
|
||||||
cluster: c,
|
|
||||||
}
|
|
||||||
|
|
||||||
volumeCreate := volume.CreateOptions{
|
|
||||||
ClusterVolumeSpec: &volume.ClusterVolumeSpec{},
|
|
||||||
Name: "volCluster",
|
|
||||||
Driver: "someCSI",
|
|
||||||
}
|
|
||||||
|
|
||||||
buf := bytes.Buffer{}
|
|
||||||
json.NewEncoder(&buf).Encode(volumeCreate)
|
|
||||||
|
|
||||||
ctx := context.WithValue(context.Background(), httputils.APIVersionKey{}, clusterVolumesVersion)
|
|
||||||
req := httptest.NewRequest("POST", "/volumes/create", &buf)
|
|
||||||
req.Header.Add("Content-Type", "application/json")
|
|
||||||
|
|
||||||
resp := httptest.NewRecorder()
|
|
||||||
err := v.postVolumesCreate(ctx, resp, req, nil)
|
|
||||||
|
|
||||||
assert.NilError(t, err)
|
|
||||||
|
|
||||||
respVolume := volume.Volume{}
|
|
||||||
|
|
||||||
assert.NilError(t, json.NewDecoder(resp.Result().Body).Decode(&respVolume))
|
|
||||||
|
|
||||||
assert.Equal(t, respVolume.Name, "volCluster")
|
|
||||||
assert.Equal(t, respVolume.Driver, "someCSI")
|
|
||||||
|
|
||||||
assert.Equal(t, 0, len(b.volumes))
|
|
||||||
assert.Equal(t, 1, len(c.volumes))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestUpdateVolume(t *testing.T) {
|
|
||||||
b := &fakeVolumeBackend{}
|
|
||||||
c := &fakeClusterBackend{
|
|
||||||
swarm: true,
|
|
||||||
manager: true,
|
|
||||||
volumes: map[string]*volume.Volume{
|
|
||||||
"vol1": {
|
|
||||||
Name: "vo1",
|
|
||||||
ClusterVolume: &volume.ClusterVolume{
|
|
||||||
ID: "vol1",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
v := &volumeRouter{
|
|
||||||
backend: b,
|
|
||||||
cluster: c,
|
|
||||||
}
|
|
||||||
|
|
||||||
volumeUpdate := volume.UpdateOptions{
|
|
||||||
Spec: &volume.ClusterVolumeSpec{},
|
|
||||||
}
|
|
||||||
|
|
||||||
buf := bytes.Buffer{}
|
|
||||||
json.NewEncoder(&buf).Encode(volumeUpdate)
|
|
||||||
ctx := context.WithValue(context.Background(), httputils.APIVersionKey{}, clusterVolumesVersion)
|
|
||||||
req := httptest.NewRequest("POST", "/volumes/vol1/update?version=0", &buf)
|
|
||||||
req.Header.Add("Content-Type", "application/json")
|
|
||||||
|
|
||||||
resp := httptest.NewRecorder()
|
|
||||||
|
|
||||||
err := v.putVolumesUpdate(ctx, resp, req, map[string]string{"name": "vol1"})
|
|
||||||
assert.NilError(t, err)
|
|
||||||
|
|
||||||
assert.Equal(t, c.volumes["vol1"].ClusterVolume.Meta.Version.Index, uint64(1))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestUpdateVolumeNoSwarm(t *testing.T) {
|
|
||||||
b := &fakeVolumeBackend{}
|
|
||||||
c := &fakeClusterBackend{}
|
|
||||||
|
|
||||||
v := &volumeRouter{
|
|
||||||
backend: b,
|
|
||||||
cluster: c,
|
|
||||||
}
|
|
||||||
|
|
||||||
volumeUpdate := volume.UpdateOptions{
|
|
||||||
Spec: &volume.ClusterVolumeSpec{},
|
|
||||||
}
|
|
||||||
|
|
||||||
buf := bytes.Buffer{}
|
|
||||||
json.NewEncoder(&buf).Encode(volumeUpdate)
|
|
||||||
ctx := context.WithValue(context.Background(), httputils.APIVersionKey{}, clusterVolumesVersion)
|
|
||||||
req := httptest.NewRequest("POST", "/volumes/vol1/update?version=0", &buf)
|
|
||||||
req.Header.Add("Content-Type", "application/json")
|
|
||||||
|
|
||||||
resp := httptest.NewRecorder()
|
|
||||||
|
|
||||||
err := v.putVolumesUpdate(ctx, resp, req, map[string]string{"name": "vol1"})
|
|
||||||
assert.Assert(t, err != nil)
|
|
||||||
assert.Assert(t, errdefs.IsUnavailable(err))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestUpdateVolumeNotFound(t *testing.T) {
|
|
||||||
b := &fakeVolumeBackend{}
|
|
||||||
c := &fakeClusterBackend{
|
|
||||||
swarm: true,
|
|
||||||
manager: true,
|
|
||||||
volumes: map[string]*volume.Volume{},
|
|
||||||
}
|
|
||||||
|
|
||||||
v := &volumeRouter{
|
|
||||||
backend: b,
|
|
||||||
cluster: c,
|
|
||||||
}
|
|
||||||
|
|
||||||
volumeUpdate := volume.UpdateOptions{
|
|
||||||
Spec: &volume.ClusterVolumeSpec{},
|
|
||||||
}
|
|
||||||
|
|
||||||
buf := bytes.Buffer{}
|
|
||||||
json.NewEncoder(&buf).Encode(volumeUpdate)
|
|
||||||
ctx := context.WithValue(context.Background(), httputils.APIVersionKey{}, clusterVolumesVersion)
|
|
||||||
req := httptest.NewRequest("POST", "/volumes/vol1/update?version=0", &buf)
|
|
||||||
req.Header.Add("Content-Type", "application/json")
|
|
||||||
|
|
||||||
resp := httptest.NewRecorder()
|
|
||||||
|
|
||||||
err := v.putVolumesUpdate(ctx, resp, req, map[string]string{"name": "vol1"})
|
|
||||||
assert.Assert(t, err != nil)
|
|
||||||
assert.Assert(t, errdefs.IsNotFound(err))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestVolumeRemove(t *testing.T) {
|
|
||||||
b := &fakeVolumeBackend{
|
|
||||||
volumes: map[string]*volume.Volume{
|
|
||||||
"vol1": {
|
|
||||||
Name: "vol1",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
c := &fakeClusterBackend{swarm: true, manager: true}
|
|
||||||
|
|
||||||
v := &volumeRouter{
|
|
||||||
backend: b,
|
|
||||||
cluster: c,
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx := context.WithValue(context.Background(), httputils.APIVersionKey{}, clusterVolumesVersion)
|
|
||||||
req := httptest.NewRequest("DELETE", "/volumes/vol1", nil)
|
|
||||||
resp := httptest.NewRecorder()
|
|
||||||
|
|
||||||
err := v.deleteVolumes(ctx, resp, req, map[string]string{"name": "vol1"})
|
|
||||||
assert.NilError(t, err)
|
|
||||||
assert.Equal(t, len(b.volumes), 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestVolumeRemoveSwarm(t *testing.T) {
|
|
||||||
b := &fakeVolumeBackend{}
|
|
||||||
c := &fakeClusterBackend{
|
|
||||||
swarm: true,
|
|
||||||
manager: true,
|
|
||||||
volumes: map[string]*volume.Volume{
|
|
||||||
"vol1": {
|
|
||||||
Name: "vol1",
|
|
||||||
ClusterVolume: &volume.ClusterVolume{},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
v := &volumeRouter{
|
|
||||||
backend: b,
|
|
||||||
cluster: c,
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx := context.WithValue(context.Background(), httputils.APIVersionKey{}, clusterVolumesVersion)
|
|
||||||
req := httptest.NewRequest("DELETE", "/volumes/vol1", nil)
|
|
||||||
resp := httptest.NewRecorder()
|
|
||||||
|
|
||||||
err := v.deleteVolumes(ctx, resp, req, map[string]string{"name": "vol1"})
|
|
||||||
assert.NilError(t, err)
|
|
||||||
assert.Equal(t, len(c.volumes), 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestVolumeRemoveNotFoundNoSwarm(t *testing.T) {
|
|
||||||
b := &fakeVolumeBackend{}
|
|
||||||
c := &fakeClusterBackend{}
|
|
||||||
v := &volumeRouter{
|
|
||||||
backend: b,
|
|
||||||
cluster: c,
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx := context.WithValue(context.Background(), httputils.APIVersionKey{}, clusterVolumesVersion)
|
|
||||||
req := httptest.NewRequest("DELETE", "/volumes/vol1", nil)
|
|
||||||
resp := httptest.NewRecorder()
|
|
||||||
|
|
||||||
err := v.deleteVolumes(ctx, resp, req, map[string]string{"name": "vol1"})
|
|
||||||
assert.Assert(t, err != nil)
|
|
||||||
assert.Assert(t, errdefs.IsNotFound(err), err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestVolumeRemoveNotFoundNoManager(t *testing.T) {
|
|
||||||
b := &fakeVolumeBackend{}
|
|
||||||
c := &fakeClusterBackend{swarm: true}
|
|
||||||
v := &volumeRouter{
|
|
||||||
backend: b,
|
|
||||||
cluster: c,
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx := context.WithValue(context.Background(), httputils.APIVersionKey{}, clusterVolumesVersion)
|
|
||||||
req := httptest.NewRequest("DELETE", "/volumes/vol1", nil)
|
|
||||||
resp := httptest.NewRecorder()
|
|
||||||
|
|
||||||
err := v.deleteVolumes(ctx, resp, req, map[string]string{"name": "vol1"})
|
|
||||||
assert.Assert(t, err != nil)
|
|
||||||
assert.Assert(t, errdefs.IsNotFound(err))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestVolumeRemoveFoundNoSwarm(t *testing.T) {
|
|
||||||
b := &fakeVolumeBackend{
|
|
||||||
volumes: map[string]*volume.Volume{
|
|
||||||
"vol1": {
|
|
||||||
Name: "vol1",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
c := &fakeClusterBackend{}
|
|
||||||
|
|
||||||
v := &volumeRouter{
|
|
||||||
backend: b,
|
|
||||||
cluster: c,
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx := context.WithValue(context.Background(), httputils.APIVersionKey{}, clusterVolumesVersion)
|
|
||||||
req := httptest.NewRequest("DELETE", "/volumes/vol1", nil)
|
|
||||||
resp := httptest.NewRecorder()
|
|
||||||
|
|
||||||
err := v.deleteVolumes(ctx, resp, req, map[string]string{"name": "vol1"})
|
|
||||||
assert.NilError(t, err)
|
|
||||||
assert.Equal(t, len(b.volumes), 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestVolumeRemoveNoSwarmInUse(t *testing.T) {
|
|
||||||
b := &fakeVolumeBackend{
|
|
||||||
volumes: map[string]*volume.Volume{
|
|
||||||
"inuse": {
|
|
||||||
Name: "inuse",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
c := &fakeClusterBackend{}
|
|
||||||
v := &volumeRouter{
|
|
||||||
backend: b,
|
|
||||||
cluster: c,
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx := context.WithValue(context.Background(), httputils.APIVersionKey{}, clusterVolumesVersion)
|
|
||||||
req := httptest.NewRequest("DELETE", "/volumes/inuse", nil)
|
|
||||||
resp := httptest.NewRecorder()
|
|
||||||
|
|
||||||
err := v.deleteVolumes(ctx, resp, req, map[string]string{"name": "inuse"})
|
|
||||||
assert.Assert(t, err != nil)
|
|
||||||
assert.Assert(t, errdefs.IsConflict(err))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestVolumeRemoveSwarmForce(t *testing.T) {
|
|
||||||
b := &fakeVolumeBackend{}
|
|
||||||
c := &fakeClusterBackend{
|
|
||||||
swarm: true,
|
|
||||||
manager: true,
|
|
||||||
volumes: map[string]*volume.Volume{
|
|
||||||
"vol1": {
|
|
||||||
Name: "vol1",
|
|
||||||
ClusterVolume: &volume.ClusterVolume{},
|
|
||||||
Options: map[string]string{"mustforce": "yes"},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
v := &volumeRouter{
|
|
||||||
backend: b,
|
|
||||||
cluster: c,
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx := context.WithValue(context.Background(), httputils.APIVersionKey{}, clusterVolumesVersion)
|
|
||||||
req := httptest.NewRequest("DELETE", "/volumes/vol1", nil)
|
|
||||||
resp := httptest.NewRecorder()
|
|
||||||
|
|
||||||
err := v.deleteVolumes(ctx, resp, req, map[string]string{"name": "vol1"})
|
|
||||||
|
|
||||||
assert.Assert(t, err != nil)
|
|
||||||
assert.Assert(t, errdefs.IsConflict(err))
|
|
||||||
|
|
||||||
ctx = context.WithValue(context.Background(), httputils.APIVersionKey{}, clusterVolumesVersion)
|
|
||||||
req = httptest.NewRequest("DELETE", "/volumes/vol1?force=1", nil)
|
|
||||||
resp = httptest.NewRecorder()
|
|
||||||
|
|
||||||
err = v.deleteVolumes(ctx, resp, req, map[string]string{"name": "vol1"})
|
|
||||||
|
|
||||||
assert.NilError(t, err)
|
|
||||||
assert.Equal(t, len(b.volumes), 0)
|
|
||||||
assert.Equal(t, len(c.volumes), 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
type fakeVolumeBackend struct {
|
|
||||||
volumes map[string]*volume.Volume
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *fakeVolumeBackend) List(_ context.Context, _ filters.Args) ([]*volume.Volume, []string, error) {
|
|
||||||
volumes := []*volume.Volume{}
|
|
||||||
for _, v := range b.volumes {
|
|
||||||
volumes = append(volumes, v)
|
|
||||||
}
|
|
||||||
return volumes, nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *fakeVolumeBackend) Get(_ context.Context, name string, _ ...opts.GetOption) (*volume.Volume, error) {
|
|
||||||
if v, ok := b.volumes[name]; ok {
|
|
||||||
return v, nil
|
|
||||||
}
|
|
||||||
return nil, errdefs.NotFound(fmt.Errorf("volume %s not found", name))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *fakeVolumeBackend) Create(_ context.Context, name, driverName string, _ ...opts.CreateOption) (*volume.Volume, error) {
|
|
||||||
if _, ok := b.volumes[name]; ok {
|
|
||||||
// TODO(dperny): return appropriate error type
|
|
||||||
return nil, fmt.Errorf("already exists")
|
|
||||||
}
|
|
||||||
|
|
||||||
v := &volume.Volume{
|
|
||||||
Name: name,
|
|
||||||
Driver: driverName,
|
|
||||||
}
|
|
||||||
if b.volumes == nil {
|
|
||||||
b.volumes = map[string]*volume.Volume{
|
|
||||||
name: v,
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
b.volumes[name] = v
|
|
||||||
}
|
|
||||||
|
|
||||||
return v, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *fakeVolumeBackend) Remove(_ context.Context, name string, o ...opts.RemoveOption) error {
|
|
||||||
removeOpts := &opts.RemoveConfig{}
|
|
||||||
for _, opt := range o {
|
|
||||||
opt(removeOpts)
|
|
||||||
}
|
|
||||||
|
|
||||||
if v, ok := b.volumes[name]; !ok {
|
|
||||||
if !removeOpts.PurgeOnError {
|
|
||||||
return errdefs.NotFound(fmt.Errorf("volume %s not found", name))
|
|
||||||
}
|
|
||||||
} else if v.Name == "inuse" {
|
|
||||||
return errdefs.Conflict(fmt.Errorf("volume in use"))
|
|
||||||
}
|
|
||||||
|
|
||||||
delete(b.volumes, name)
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *fakeVolumeBackend) Prune(_ context.Context, _ filters.Args) (*types.VolumesPruneReport, error) {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type fakeClusterBackend struct {
|
|
||||||
swarm bool
|
|
||||||
manager bool
|
|
||||||
idCount int
|
|
||||||
volumes map[string]*volume.Volume
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *fakeClusterBackend) checkSwarm() error {
|
|
||||||
if !c.swarm {
|
|
||||||
return errdefs.Unavailable(fmt.Errorf("this node is not a swarm manager. Use \"docker swarm init\" or \"docker swarm join\" to connect this node to swarm and try again"))
|
|
||||||
} else if !c.manager {
|
|
||||||
return errdefs.Unavailable(fmt.Errorf("this node is not a swarm manager. Worker nodes can't be used to view or modify cluster state. Please run this command on a manager node or promote the current node to a manager"))
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *fakeClusterBackend) IsManager() bool {
|
|
||||||
return c.swarm && c.manager
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *fakeClusterBackend) GetVolume(nameOrID string) (volume.Volume, error) {
|
|
||||||
if err := c.checkSwarm(); err != nil {
|
|
||||||
return volume.Volume{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if v, ok := c.volumes[nameOrID]; ok {
|
|
||||||
return *v, nil
|
|
||||||
}
|
|
||||||
return volume.Volume{}, errdefs.NotFound(fmt.Errorf("volume %s not found", nameOrID))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *fakeClusterBackend) GetVolumes(options volume.ListOptions) ([]*volume.Volume, error) {
|
|
||||||
if err := c.checkSwarm(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
volumes := []*volume.Volume{}
|
|
||||||
|
|
||||||
for _, v := range c.volumes {
|
|
||||||
volumes = append(volumes, v)
|
|
||||||
}
|
|
||||||
return volumes, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *fakeClusterBackend) CreateVolume(volumeCreate volume.CreateOptions) (*volume.Volume, error) {
|
|
||||||
if err := c.checkSwarm(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, ok := c.volumes[volumeCreate.Name]; ok {
|
|
||||||
// TODO(dperny): return appropriate already exists error
|
|
||||||
return nil, fmt.Errorf("already exists")
|
|
||||||
}
|
|
||||||
|
|
||||||
v := &volume.Volume{
|
|
||||||
Name: volumeCreate.Name,
|
|
||||||
Driver: volumeCreate.Driver,
|
|
||||||
Labels: volumeCreate.Labels,
|
|
||||||
Options: volumeCreate.DriverOpts,
|
|
||||||
Scope: "global",
|
|
||||||
}
|
|
||||||
|
|
||||||
v.ClusterVolume = &volume.ClusterVolume{
|
|
||||||
ID: fmt.Sprintf("cluster_%d", c.idCount),
|
|
||||||
Spec: *volumeCreate.ClusterVolumeSpec,
|
|
||||||
}
|
|
||||||
|
|
||||||
c.idCount = c.idCount + 1
|
|
||||||
if c.volumes == nil {
|
|
||||||
c.volumes = map[string]*volume.Volume{
|
|
||||||
v.Name: v,
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
c.volumes[v.Name] = v
|
|
||||||
}
|
|
||||||
|
|
||||||
return v, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *fakeClusterBackend) RemoveVolume(nameOrID string, force bool) error {
|
|
||||||
if err := c.checkSwarm(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
v, ok := c.volumes[nameOrID]
|
|
||||||
if !ok {
|
|
||||||
return errdefs.NotFound(fmt.Errorf("volume %s not found", nameOrID))
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, mustforce := v.Options["mustforce"]; mustforce && !force {
|
|
||||||
return errdefs.Conflict(fmt.Errorf("volume %s must be force removed", nameOrID))
|
|
||||||
}
|
|
||||||
|
|
||||||
delete(c.volumes, nameOrID)
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *fakeClusterBackend) UpdateVolume(nameOrID string, version uint64, _ volume.UpdateOptions) error {
|
|
||||||
if err := c.checkSwarm(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if v, ok := c.volumes[nameOrID]; ok {
|
|
||||||
if v.ClusterVolume.Meta.Version.Index != version {
|
|
||||||
return fmt.Errorf("wrong version")
|
|
||||||
}
|
|
||||||
v.ClusterVolume.Meta.Version.Index = v.ClusterVolume.Meta.Version.Index + 1
|
|
||||||
// for testing, we don't actually need to change anything about the
|
|
||||||
// volume object. let's just increment the version so we can see the
|
|
||||||
// call happened.
|
|
||||||
} else {
|
|
||||||
return errdefs.NotFound(fmt.Errorf("volume %q not found", nameOrID))
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
30
api/server/router_swapper.go
Normal file
30
api/server/router_swapper.go
Normal file
|
@ -0,0 +1,30 @@
|
||||||
|
package server // import "github.com/docker/docker/api/server"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/gorilla/mux"
|
||||||
|
)
|
||||||
|
|
||||||
|
// routerSwapper is an http.Handler that allows you to swap
|
||||||
|
// mux routers.
|
||||||
|
type routerSwapper struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
router *mux.Router
|
||||||
|
}
|
||||||
|
|
||||||
|
// Swap changes the old router with the new one.
|
||||||
|
func (rs *routerSwapper) Swap(newRouter *mux.Router) {
|
||||||
|
rs.mu.Lock()
|
||||||
|
rs.router = newRouter
|
||||||
|
rs.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServeHTTP makes the routerSwapper to implement the http.Handler interface.
|
||||||
|
func (rs *routerSwapper) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||||
|
rs.mu.Lock()
|
||||||
|
router := rs.router
|
||||||
|
rs.mu.Unlock()
|
||||||
|
router.ServeHTTP(w, r)
|
||||||
|
}
|
|
@ -2,37 +2,124 @@ package server // import "github.com/docker/docker/api/server"
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"crypto/tls"
|
||||||
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/containerd/log"
|
|
||||||
"github.com/docker/docker/api/server/httpstatus"
|
|
||||||
"github.com/docker/docker/api/server/httputils"
|
"github.com/docker/docker/api/server/httputils"
|
||||||
"github.com/docker/docker/api/server/middleware"
|
"github.com/docker/docker/api/server/middleware"
|
||||||
"github.com/docker/docker/api/server/router"
|
"github.com/docker/docker/api/server/router"
|
||||||
"github.com/docker/docker/api/server/router/debug"
|
"github.com/docker/docker/api/server/router/debug"
|
||||||
"github.com/docker/docker/api/types"
|
|
||||||
"github.com/docker/docker/dockerversion"
|
"github.com/docker/docker/dockerversion"
|
||||||
|
"github.com/docker/docker/errdefs"
|
||||||
"github.com/gorilla/mux"
|
"github.com/gorilla/mux"
|
||||||
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
// versionMatcher defines a variable matcher to be parsed by the router
|
// versionMatcher defines a variable matcher to be parsed by the router
|
||||||
// when a request is about to be served.
|
// when a request is about to be served.
|
||||||
const versionMatcher = "/v{version:[0-9.]+}"
|
const versionMatcher = "/v{version:[0-9.]+}"
|
||||||
|
|
||||||
|
// Config provides the configuration for the API server
|
||||||
|
type Config struct {
|
||||||
|
Logging bool
|
||||||
|
CorsHeaders string
|
||||||
|
Version string
|
||||||
|
SocketGroup string
|
||||||
|
TLSConfig *tls.Config
|
||||||
|
}
|
||||||
|
|
||||||
// Server contains instance details for the server
|
// Server contains instance details for the server
|
||||||
type Server struct {
|
type Server struct {
|
||||||
|
cfg *Config
|
||||||
|
servers []*HTTPServer
|
||||||
|
routers []router.Router
|
||||||
|
routerSwapper *routerSwapper
|
||||||
middlewares []middleware.Middleware
|
middlewares []middleware.Middleware
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// New returns a new instance of the server based on the specified configuration.
|
||||||
|
// It allocates resources which will be needed for ServeAPI(ports, unix-sockets).
|
||||||
|
func New(cfg *Config) *Server {
|
||||||
|
return &Server{
|
||||||
|
cfg: cfg,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// UseMiddleware appends a new middleware to the request chain.
|
// UseMiddleware appends a new middleware to the request chain.
|
||||||
// This needs to be called before the API routes are configured.
|
// This needs to be called before the API routes are configured.
|
||||||
func (s *Server) UseMiddleware(m middleware.Middleware) {
|
func (s *Server) UseMiddleware(m middleware.Middleware) {
|
||||||
s.middlewares = append(s.middlewares, m)
|
s.middlewares = append(s.middlewares, m)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) makeHTTPHandler(handler httputils.APIFunc, operation string) http.HandlerFunc {
|
// Accept sets a listener the server accepts connections into.
|
||||||
return otelhttp.NewHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
func (s *Server) Accept(addr string, listeners ...net.Listener) {
|
||||||
|
for _, listener := range listeners {
|
||||||
|
httpServer := &HTTPServer{
|
||||||
|
srv: &http.Server{
|
||||||
|
Addr: addr,
|
||||||
|
},
|
||||||
|
l: listener,
|
||||||
|
}
|
||||||
|
s.servers = append(s.servers, httpServer)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes servers and thus stop receiving requests
|
||||||
|
func (s *Server) Close() {
|
||||||
|
for _, srv := range s.servers {
|
||||||
|
if err := srv.Close(); err != nil {
|
||||||
|
logrus.Error(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// serveAPI loops through all initialized servers and spawns goroutine
|
||||||
|
// with Serve method for each. It sets createMux() as Handler also.
|
||||||
|
func (s *Server) serveAPI() error {
|
||||||
|
var chErrors = make(chan error, len(s.servers))
|
||||||
|
for _, srv := range s.servers {
|
||||||
|
srv.srv.Handler = s.routerSwapper
|
||||||
|
go func(srv *HTTPServer) {
|
||||||
|
var err error
|
||||||
|
logrus.Infof("API listen on %s", srv.l.Addr())
|
||||||
|
if err = srv.Serve(); err != nil && strings.Contains(err.Error(), "use of closed network connection") {
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
|
chErrors <- err
|
||||||
|
}(srv)
|
||||||
|
}
|
||||||
|
|
||||||
|
for range s.servers {
|
||||||
|
err := <-chErrors
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// HTTPServer contains an instance of http server and the listener.
|
||||||
|
// srv *http.Server, contains configuration to create an http server and a mux router with all api end points.
|
||||||
|
// l net.Listener, is a TCP or Socket listener that dispatches incoming request to the router.
|
||||||
|
type HTTPServer struct {
|
||||||
|
srv *http.Server
|
||||||
|
l net.Listener
|
||||||
|
}
|
||||||
|
|
||||||
|
// Serve starts listening for inbound requests.
|
||||||
|
func (s *HTTPServer) Serve() error {
|
||||||
|
return s.srv.Serve(s.l)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes the HTTPServer from listening for the inbound requests.
|
||||||
|
func (s *HTTPServer) Close() error {
|
||||||
|
return s.l.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) makeHTTPHandler(handler httputils.APIFunc) http.HandlerFunc {
|
||||||
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
// Define the context that we'll pass around to share info
|
// Define the context that we'll pass around to share info
|
||||||
// like the docker-request-id.
|
// like the docker-request-id.
|
||||||
//
|
//
|
||||||
|
@ -44,7 +131,6 @@ func (s *Server) makeHTTPHandler(handler httputils.APIFunc, operation string) ht
|
||||||
// use intermediate variable to prevent "should not use basic type
|
// use intermediate variable to prevent "should not use basic type
|
||||||
// string as key in context.WithValue" golint errors
|
// string as key in context.WithValue" golint errors
|
||||||
ctx := context.WithValue(r.Context(), dockerversion.UAStringKey{}, r.Header.Get("User-Agent"))
|
ctx := context.WithValue(r.Context(), dockerversion.UAStringKey{}, r.Header.Get("User-Agent"))
|
||||||
|
|
||||||
r = r.WithContext(ctx)
|
r = r.WithContext(ctx)
|
||||||
handlerFunc := s.handlerWithGlobalMiddlewares(handler)
|
handlerFunc := s.handlerWithGlobalMiddlewares(handler)
|
||||||
|
|
||||||
|
@ -54,47 +140,72 @@ func (s *Server) makeHTTPHandler(handler httputils.APIFunc, operation string) ht
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := handlerFunc(ctx, w, r, vars); err != nil {
|
if err := handlerFunc(ctx, w, r, vars); err != nil {
|
||||||
statusCode := httpstatus.FromError(err)
|
statusCode := errdefs.GetHTTPErrorStatusCode(err)
|
||||||
if statusCode >= 500 {
|
if statusCode >= 500 {
|
||||||
log.G(ctx).Errorf("Handler for %s %s returned error: %v", r.Method, r.URL.Path, err)
|
logrus.Errorf("Handler for %s %s returned error: %v", r.Method, r.URL.Path, err)
|
||||||
|
}
|
||||||
|
httputils.MakeErrorHandler(err)(w, r)
|
||||||
}
|
}
|
||||||
_ = httputils.WriteJSON(w, statusCode, &types.ErrorResponse{
|
|
||||||
Message: err.Error(),
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
}), operation).ServeHTTP
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateMux returns a new mux with all the routers registered.
|
// InitRouter initializes the list of routers for the server.
|
||||||
func (s *Server) CreateMux(routers ...router.Router) *mux.Router {
|
// This method also enables the Go profiler.
|
||||||
|
func (s *Server) InitRouter(routers ...router.Router) {
|
||||||
|
s.routers = append(s.routers, routers...)
|
||||||
|
|
||||||
|
m := s.createMux()
|
||||||
|
s.routerSwapper = &routerSwapper{
|
||||||
|
router: m,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type pageNotFoundError struct{}
|
||||||
|
|
||||||
|
func (pageNotFoundError) Error() string {
|
||||||
|
return "page not found"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pageNotFoundError) NotFound() {}
|
||||||
|
|
||||||
|
// createMux initializes the main router the server uses.
|
||||||
|
func (s *Server) createMux() *mux.Router {
|
||||||
m := mux.NewRouter()
|
m := mux.NewRouter()
|
||||||
|
|
||||||
log.G(context.TODO()).Debug("Registering routers")
|
logrus.Debug("Registering routers")
|
||||||
for _, apiRouter := range routers {
|
for _, apiRouter := range s.routers {
|
||||||
for _, r := range apiRouter.Routes() {
|
for _, r := range apiRouter.Routes() {
|
||||||
f := s.makeHTTPHandler(r.Handler(), r.Method()+" "+r.Path())
|
f := s.makeHTTPHandler(r.Handler())
|
||||||
|
|
||||||
log.G(context.TODO()).Debugf("Registering %s, %s", r.Method(), r.Path())
|
logrus.Debugf("Registering %s, %s", r.Method(), r.Path())
|
||||||
m.Path(versionMatcher + r.Path()).Methods(r.Method()).Handler(f)
|
m.Path(versionMatcher + r.Path()).Methods(r.Method()).Handler(f)
|
||||||
m.Path(r.Path()).Methods(r.Method()).Handler(f)
|
m.Path(r.Path()).Methods(r.Method()).Handler(f)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
debugRouter := debug.NewRouter()
|
debugRouter := debug.NewRouter()
|
||||||
|
s.routers = append(s.routers, debugRouter)
|
||||||
for _, r := range debugRouter.Routes() {
|
for _, r := range debugRouter.Routes() {
|
||||||
f := s.makeHTTPHandler(r.Handler(), r.Method()+" "+r.Path())
|
f := s.makeHTTPHandler(r.Handler())
|
||||||
m.Path("/debug" + r.Path()).Handler(f)
|
m.Path("/debug" + r.Path()).Handler(f)
|
||||||
}
|
}
|
||||||
|
|
||||||
notFoundHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
notFoundHandler := httputils.MakeErrorHandler(pageNotFoundError{})
|
||||||
_ = httputils.WriteJSON(w, http.StatusNotFound, &types.ErrorResponse{
|
|
||||||
Message: "page not found",
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
m.HandleFunc(versionMatcher+"/{path:.*}", notFoundHandler)
|
m.HandleFunc(versionMatcher+"/{path:.*}", notFoundHandler)
|
||||||
m.NotFoundHandler = notFoundHandler
|
m.NotFoundHandler = notFoundHandler
|
||||||
m.MethodNotAllowedHandler = notFoundHandler
|
m.MethodNotAllowedHandler = notFoundHandler
|
||||||
|
|
||||||
return m
|
return m
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Wait blocks the server goroutine until it exits.
|
||||||
|
// It sends an error message if there is any error during
|
||||||
|
// the API execution.
|
||||||
|
func (s *Server) Wait(waitChan chan error) {
|
||||||
|
if err := s.serveAPI(); err != nil {
|
||||||
|
logrus.Errorf("ServeAPI error: %v", err)
|
||||||
|
waitChan <- err
|
||||||
|
return
|
||||||
|
}
|
||||||
|
waitChan <- nil
|
||||||
|
}
|
||||||
|
|
|
@ -13,15 +13,16 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestMiddlewares(t *testing.T) {
|
func TestMiddlewares(t *testing.T) {
|
||||||
srv := &Server{}
|
cfg := &Config{
|
||||||
|
Version: "0.1omega2",
|
||||||
m, err := middleware.NewVersionMiddleware("0.1omega2", api.DefaultVersion, api.MinSupportedAPIVersion)
|
}
|
||||||
if err != nil {
|
srv := &Server{
|
||||||
t.Fatal(err)
|
cfg: cfg,
|
||||||
}
|
}
|
||||||
srv.UseMiddleware(*m)
|
|
||||||
|
|
||||||
req, _ := http.NewRequest(http.MethodGet, "/containers/json", nil)
|
srv.UseMiddleware(middleware.NewVersionMiddleware("0.1omega2", api.DefaultVersion, api.MinVersion))
|
||||||
|
|
||||||
|
req, _ := http.NewRequest("GET", "/containers/json", nil)
|
||||||
resp := httptest.NewRecorder()
|
resp := httptest.NewRecorder()
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
|
|
2408
api/swagger.yaml
2408
api/swagger.yaml
File diff suppressed because it is too large
Load diff
|
@ -1,7 +1,8 @@
|
||||||
package {{ .Package }} // import "github.com/docker/docker/api/types/{{ .Package }}"
|
package {{ .Package }} // import "github.com/docker/docker/api/types/{{ .Package }}"
|
||||||
|
|
||||||
// ----------------------------------------------------------------------------
|
// ----------------------------------------------------------------------------
|
||||||
// Code generated by `swagger generate operation`. DO NOT EDIT.
|
// DO NOT EDIT THIS FILE
|
||||||
|
// This file was generated by `swagger generate operation`
|
||||||
//
|
//
|
||||||
// See hack/generate-swagger-api.sh
|
// See hack/generate-swagger-api.sh
|
||||||
// ----------------------------------------------------------------------------
|
// ----------------------------------------------------------------------------
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue