Compare commits
1065 commits
Author | SHA1 | Date | |
---|---|---|---|
|
00155eaaf6 | ||
|
d94f80c8da | ||
|
bd5eb03d9c | ||
|
6ba1198c47 | ||
|
b5c821795a | ||
|
b2926377b7 | ||
|
99f47ca4e7 | ||
|
fef388d8cb | ||
|
92849ca473 | ||
|
0952887157 | ||
|
d010b26e1c | ||
|
58de410850 | ||
|
54bc3ea87d | ||
|
64a2f7aa4f | ||
|
55be9f0b9c | ||
|
97ffa0394f | ||
|
dc91ec2056 | ||
|
356795f8b0 | ||
|
3efcd94e14 | ||
|
34bc21b3b7 | ||
|
37845c2936 | ||
|
47924716c1 | ||
|
1d60505629 | ||
|
9daf0ba767 | ||
|
bdae378569 | ||
|
363770ab84 | ||
|
8bc08b25dc | ||
|
e0c1b974c9 | ||
|
39cf9f6943 | ||
|
d650defa08 | ||
|
c5c42f072b | ||
|
bd5b32101f | ||
|
8208ac817d | ||
|
a99c4879de | ||
|
01b666a78f | ||
|
8294952474 | ||
|
7fb5b1b996 | ||
|
2749a98f26 | ||
|
08526da153 | ||
|
8269adf176 | ||
|
0cddcba5a7 | ||
|
3bd1eeacc1 | ||
|
1698ec2eb3 | ||
|
07710ad98d | ||
|
f63bf7093c | ||
|
0597bf1047 | ||
|
5bde4b92a2 | ||
|
faa994e3b3 | ||
|
68cc1a8e2c | ||
|
9c775e2213 | ||
|
6c94173ca1 | ||
|
d1e0560d28 | ||
|
52a94b2593 | ||
|
9550fd2921 | ||
|
a6549b08f9 | ||
|
ba3e2ecb5f | ||
|
2bd3b46e3f | ||
|
7831ddaede | ||
|
613f2f1c24 | ||
|
525f33a07a | ||
|
3f2604d33f | ||
|
b823bb04d2 | ||
|
9ba92d9495 | ||
|
0127fc188b | ||
|
3c7a651d27 | ||
|
50a3c0d911 | ||
|
b2bea85add | ||
|
61bc0065f9 | ||
|
19e9857fea | ||
|
665a980d62 | ||
|
eb0c6549c4 | ||
|
e7627bfcd3 | ||
|
62f5d4cb89 | ||
|
4502509c2d | ||
|
2f577c9884 | ||
|
499c7a432d | ||
|
5d24d665bd | ||
|
65753fe23e | ||
|
96825be11b | ||
|
ce2e65d776 | ||
|
ab320c9ecc | ||
|
76c912083e | ||
|
ea898ed104 | ||
|
0da12ef47b | ||
|
92aa89263b | ||
|
a1af33c6aa | ||
|
58a8b2b860 | ||
|
d9b91d074f | ||
|
de62be6f21 | ||
|
acfd4c3e55 | ||
|
dd446c805d | ||
|
d3f42e39db | ||
|
7b5ad6c38d | ||
|
8edce2055d | ||
|
d19976cc3f | ||
|
193d11587d | ||
|
4d4d2ad801 | ||
|
9b8407aeb0 | ||
|
aa4a7aa6f6 | ||
|
4bac74a149 | ||
|
dd9b0b151f | ||
|
0a8a0ee771 | ||
|
2bcf05ca45 | ||
|
aa426016f2 | ||
|
1fc0f21506 | ||
|
e1fdc10ef8 | ||
|
26d19abf61 | ||
|
590a1f1429 | ||
|
a020a4e0ed | ||
|
ad7dcdb628 | ||
|
a38fd26cf6 | ||
|
950cf67e4c | ||
|
d8341509e7 | ||
|
2bbd8b3a5f | ||
|
e315e48c39 | ||
|
150a338166 | ||
|
a957474740 | ||
|
019edf38f3 | ||
|
8ca069f6de | ||
|
2f16b06ffe | ||
|
456517af87 | ||
|
e8140d7310 | ||
|
ff48386cc8 | ||
|
70cb71acfa | ||
|
13418e9324 | ||
|
1196727448 | ||
|
aaae191710 | ||
|
1620e16b89 | ||
|
db577b154e | ||
|
c6164b8ae7 | ||
|
fc023748c1 | ||
|
cb3bc3f604 | ||
|
1dd63c29ec | ||
|
cc9a0d4dc2 | ||
|
74dd2a3b9a | ||
|
55c8677443 | ||
|
26d3105f54 | ||
|
ca2757d41e | ||
|
f38966c6ac | ||
|
baaef63d1d | ||
|
4d357a6a57 | ||
|
8b2188fcb6 | ||
|
799fdd7098 | ||
|
12f599fd65 | ||
|
be2ed1089c | ||
|
92911bda2b | ||
|
f7d9e56cac | ||
|
a577d8b3cd | ||
|
76ffa107dd | ||
|
9a6a65931e | ||
|
de089e51fd | ||
|
51ae2d7301 | ||
|
e5fc1bd574 | ||
|
aaf310ffff | ||
|
3ad86274d8 | ||
|
b4afdac8a0 | ||
|
19d405fa3a | ||
|
d92f85d1dd | ||
|
5a319dc64f | ||
|
a45aeb3bd6 | ||
|
162376fd74 | ||
|
0d4e4175a8 | ||
|
7ca390c85a | ||
|
d413775060 | ||
|
db0a467d33 | ||
|
e2ff12c589 | ||
|
849f0bd0a8 | ||
|
e61fb42cbc | ||
|
d8339ab967 | ||
|
410b7cd512 | ||
|
ad75543172 | ||
|
757185256c | ||
|
04dcb65eb0 | ||
|
1ff55bbfa7 | ||
|
c60eb050ef | ||
|
d7975d8d76 | ||
|
6b07908084 | ||
|
c49553abd0 | ||
|
ae309d64c4 | ||
|
8385acd0e3 | ||
|
e5836c8118 | ||
|
c23d779280 | ||
|
364c9c8162 | ||
|
3158190945 | ||
|
c8da72a7f7 | ||
|
6e041895c7 | ||
|
0aa6013342 | ||
|
6074ed21f7 | ||
|
dcecb79f63 | ||
|
71e01ab26d | ||
|
7ad6d99bd7 | ||
|
ad80d4e475 | ||
|
c85601146d | ||
|
b18b37042d | ||
|
0900a63b83 | ||
|
143d4611ba | ||
|
caa1d70aab | ||
|
a275ef17a8 | ||
|
856aed2d60 | ||
|
b52a517b16 | ||
|
69da5c10c6 | ||
|
d01fccf28c | ||
|
9fcff83f8f | ||
|
eec9c449d4 | ||
|
8180b75ef1 | ||
|
d381304136 | ||
|
d67f00546a | ||
|
810bf4542f | ||
|
e38350e8b3 | ||
|
3f479c5537 | ||
|
0d387d9799 | ||
|
8648351fc7 | ||
|
73b2573b14 | ||
|
91802fad3e | ||
|
87451560e3 | ||
|
5ac99ee556 | ||
|
d939a82225 | ||
|
0722c4369b | ||
|
1a0f734a9c | ||
|
bf94f8b87c | ||
|
5c8214e121 | ||
|
e6c8b0c86b | ||
|
03ebd5b841 | ||
|
c21b434c4e | ||
|
113724f340 | ||
|
9cde0909b0 | ||
|
86eab21be8 | ||
|
73d7779d89 | ||
|
9c31111249 | ||
|
e1b5d2fe39 | ||
|
ca880f6cbb | ||
|
784b7585c1 | ||
|
ce0693feda | ||
|
3e47a4f664 | ||
|
7318d1f32a | ||
|
259566fcce | ||
|
3121c35437 | ||
|
e35e07acdb | ||
|
a65e7782de | ||
|
a9341d7c0f | ||
|
723c15fb3e | ||
|
c7ba326540 | ||
|
61b5f97bf2 | ||
|
d396c24ad4 | ||
|
5f30ea3658 | ||
|
ba472c3c67 | ||
|
00ce4e4685 | ||
|
26a3c3085b | ||
|
f6fac68e1f | ||
|
4cc95f7269 | ||
|
fe41109c76 | ||
|
cec6420909 | ||
|
55847e7f0e | ||
|
c76a18168b | ||
|
f721cf5c40 | ||
|
ff2eed8ee9 | ||
|
61fe7c39a7 | ||
|
691133d7c8 | ||
|
8ce9af4adf | ||
|
d8b040e57c | ||
|
c71f0426ae | ||
|
7572daf9cc | ||
|
56d305fde4 | ||
|
74836af66e | ||
|
ed828458ab | ||
|
6175acb572 | ||
|
a91cf22e0f | ||
|
62854e4802 | ||
|
bde5713ed6 | ||
|
c14484856e | ||
|
84e387cc9c | ||
|
ac309cf9a3 | ||
|
59bdd4bc4e | ||
|
271d958acf | ||
|
bfa17314c6 | ||
|
6439569f36 | ||
|
50a9ac0163 | ||
|
1a765c7ff7 | ||
|
61e6cc6985 | ||
|
37b0c229fc | ||
|
d32d0d7587 | ||
|
3c522961af | ||
|
2d9e7dfba2 | ||
|
4a737be421 | ||
|
450ae868ff | ||
|
c8531a5492 | ||
|
c5c5860012 | ||
|
f83600225b | ||
|
a1346aa071 | ||
|
894e12e285 | ||
|
96c614550f | ||
|
6295be786f | ||
|
789d61f170 | ||
|
d5a9bec3da | ||
|
9e9d6a5585 | ||
|
654ce2e349 | ||
|
9456884584 | ||
|
010c36cab5 | ||
|
b872c423ee | ||
|
2ee2098a48 | ||
|
1acc2151cf | ||
|
0671178e29 | ||
|
7991b07165 | ||
|
37facd21d4 | ||
|
b4d9bf9c16 | ||
|
5452c3c121 | ||
|
9322701615 | ||
|
2fdcb44c14 | ||
|
87b12af932 | ||
|
75c2bcff8f | ||
|
822a05aa20 | ||
|
4139c79a77 | ||
|
379f87f571 | ||
|
51febb19fa | ||
|
5c938e46b7 | ||
|
9a7a3b00dc | ||
|
daf643596d | ||
|
bc8d71dfc7 | ||
|
8c31cc47b0 | ||
|
59378104b7 | ||
|
116be362ba | ||
|
e1c3097546 | ||
|
9bcdc90ca8 | ||
|
7da5d8fcea | ||
|
4a15775f65 | ||
|
691e44c1dc | ||
|
90bce505c4 | ||
|
320e404e4d | ||
|
e3c4ee0833 | ||
|
f1e52d99ba | ||
|
fc460922ad | ||
|
ba9df51b2e | ||
|
6282f95bd3 | ||
|
254824b781 | ||
|
40d0945450 | ||
|
63972edb96 | ||
|
da0eb5037e | ||
|
4b685b21a2 | ||
|
f05fe78737 | ||
|
19a95d8c55 | ||
|
64c7588a44 | ||
|
c55196a525 | ||
|
1da24ea0af | ||
|
75278d64de | ||
|
e54fd46a9e | ||
|
fac022090d | ||
|
dc7c829b73 | ||
|
aefcea034a | ||
|
1cbaa7c77b | ||
|
5ef0a2ed4b | ||
|
a592e388cd | ||
|
5d4145900f | ||
|
b94ec7597c | ||
|
c437f0ad76 | ||
|
7f7d2e57c2 | ||
|
397cad93df | ||
|
ce8dbda44b | ||
|
62b87083bb | ||
|
de35eb77cb | ||
|
163662a65a | ||
|
6395fa0b67 | ||
|
f03fdd1155 | ||
|
8ab4a9aa70 | ||
|
6c482a248d | ||
|
25450d9efc | ||
|
60cc07bc81 | ||
|
d8dd4b2131 | ||
|
b7b54b54c3 | ||
|
5011002d84 | ||
|
f5f56129df | ||
|
63212bb033 | ||
|
830116bcf2 | ||
|
ea96fe9a26 | ||
|
ebdda1b62e | ||
|
54a76e8c45 | ||
|
132d18d5d1 | ||
|
75e6ef6132 | ||
|
af0d7b48ad | ||
|
c03bcb3a8a | ||
|
39259ad6a9 | ||
|
2c070b7eda | ||
|
0413c0471c | ||
|
e4be4048e3 | ||
|
00366fce07 | ||
|
e88172dd7e | ||
|
a5cb26daf2 | ||
|
4f8794a255 | ||
|
5e5a09f164 | ||
|
f78e4b0443 | ||
|
51d8f3b436 | ||
|
ecc01f4f37 | ||
|
2af42da371 | ||
|
d1e4ee7bc8 | ||
|
4440c49174 | ||
|
76964a6b85 | ||
|
66f360e66c | ||
|
a38ce460bb | ||
|
80f21d1c91 | ||
|
1c1b76011f | ||
|
957d3a7b4d | ||
|
d7d7b0bbf0 | ||
|
a3156de4a8 | ||
|
99424bfa58 | ||
|
d120957736 | ||
|
324d695d93 | ||
|
9d60972743 | ||
|
f938af5a61 | ||
|
9ccdc3a597 | ||
|
3499edd5c2 | ||
|
9470cd6e69 | ||
|
1f7433e798 | ||
|
4ba3d026b4 | ||
|
98c639579f | ||
|
74e5999c63 | ||
|
48939b2b4f | ||
|
8339fee69d | ||
|
a2fc7d3cc5 | ||
|
ae7954eee2 | ||
|
8f934f7c82 | ||
|
b2781e0bfc | ||
|
e11473cf52 | ||
|
f8f8962ccb | ||
|
2238043efd | ||
|
d9426cef20 | ||
|
052d586364 | ||
|
11ba41e903 | ||
|
255985b7b0 | ||
|
2b77709a04 | ||
|
5b4a1bda2e | ||
|
3f94f6d0e7 | ||
|
d28a53a6cf | ||
|
963cec124e | ||
|
bbaca578cd | ||
|
da30389989 | ||
|
52ec36dbd6 | ||
|
b524d178dd | ||
|
e0d9b8bddf | ||
|
19da923369 | ||
|
824a70b22d | ||
|
cea70d5d6b | ||
|
adad8e658b | ||
|
e10487ad57 | ||
|
4eded56d5f | ||
|
43d011f125 | ||
|
a292044501 | ||
|
05c54614b2 | ||
|
32020e236f | ||
|
b9cf6e5083 | ||
|
ee5b7290a0 | ||
|
fd6a44c562 | ||
|
8d12872608 | ||
|
712f2053a4 | ||
|
54462c26f2 | ||
|
d0a171558d | ||
|
1ade850557 | ||
|
466f2e88b3 | ||
|
3cb53b2c33 | ||
|
6279216c2e | ||
|
5219c1fdd1 | ||
|
4294659785 | ||
|
f03f1b0156 | ||
|
184b99d500 | ||
|
74f05e5305 | ||
|
aefa7f77c2 | ||
|
084d4109b8 | ||
|
b60d3f680e | ||
|
ee90bfb506 | ||
|
e17068a76f | ||
|
354fc9b3d6 | ||
|
e29f6857db | ||
|
40344ec0ff | ||
|
783dff369b | ||
|
72e0325d05 | ||
|
2710207779 | ||
|
b719d03ebe | ||
|
84396343da | ||
|
14242b59a2 | ||
|
dad346cee8 | ||
|
04282f94a4 | ||
|
0423e8f157 | ||
|
bdcee06665 | ||
|
ae90ed2ba0 | ||
|
4ba3ae876d | ||
|
662164c7ff | ||
|
fad6af11e5 | ||
|
dba088daed | ||
|
a23fdea9e3 | ||
|
561976bcd0 | ||
|
874776bd12 | ||
|
ec67b67e9e | ||
|
71f691b208 | ||
|
e0cbb966f0 | ||
|
df9d47900a | ||
|
b8496c4d6e | ||
|
b0cfaf189c | ||
|
195cb9f081 | ||
|
9a10740218 | ||
|
7bcd79a70a | ||
|
beb8822df4 | ||
|
8805d85377 | ||
|
fcf9a8c673 | ||
|
2c1319985d | ||
|
a3fff56da5 | ||
|
14961a573f | ||
|
78cd5d8eba | ||
|
2df2803a37 | ||
|
7738faa040 | ||
|
157d1db0b1 | ||
|
7e85356325 | ||
|
a3d0cf5ddf | ||
|
04ab8e72f6 | ||
|
e0c3a13ac5 | ||
|
1b1745b7f7 | ||
|
2412a0a369 | ||
|
1e14d006b1 | ||
|
27c4ffd663 | ||
|
c0fe08b597 | ||
|
5550a5d2c0 | ||
|
2066ad7c83 | ||
|
61199172d0 | ||
|
3ce4d04b27 | ||
|
707729ee61 | ||
|
7b5bebc588 | ||
|
53f17b5715 | ||
|
496c8bc785 | ||
|
396d67bb2c | ||
|
bbebd9b163 | ||
|
c8d94f0a27 | ||
|
8be8343fee | ||
|
f3995901e3 | ||
|
f2618e7de6 | ||
|
6afbd77fd5 | ||
|
93e5cb36df | ||
|
09dea57850 | ||
|
8cad436421 | ||
|
f0dedbfabf | ||
|
51f0ded222 | ||
|
6b555cf0d8 | ||
|
0190d0b849 | ||
|
9977c64459 | ||
|
20706e45b0 | ||
|
53864fd8c1 | ||
|
7fa0959af4 | ||
|
2611dd2c98 | ||
|
6cebc037a0 | ||
|
15ad31da54 | ||
|
fe9904a54d | ||
|
831851c0c3 | ||
|
ea4c4dd57f | ||
|
e5a8220b8a | ||
|
ed949604d3 | ||
|
0841c7d7bd | ||
|
e17975ed7d | ||
|
f4eb9e7cd6 | ||
|
1085f9e5ec | ||
|
37eceffed9 | ||
|
6270b2c2d3 | ||
|
ad5bd18dd0 | ||
|
0296e0cafa | ||
|
147ad3b230 | ||
|
2da3eabc12 | ||
|
ac91170d65 | ||
|
f13b901f2d | ||
|
c23c73ed34 | ||
|
ad5d657a1a | ||
|
e2bebc99d1 | ||
|
926dcbbc63 | ||
|
a7f9581d99 | ||
|
75d911f29e | ||
|
91e4a54385 | ||
|
221a4878aa | ||
|
2ea43647ed | ||
|
04bdd3a5e4 | ||
|
1f9cf194fe | ||
|
e87118d2a8 | ||
|
fe888729f9 | ||
|
d7cd2ac803 | ||
|
ba9fe38b8b | ||
|
7b00fe3d5a | ||
|
fc1ba36ae5 | ||
|
2290137868 | ||
|
6ebe7691db | ||
|
29d1993a3b | ||
|
81c693de4e | ||
|
2017cb60e9 | ||
|
ec4cc33364 | ||
|
a22282f275 | ||
|
67de4c9c07 | ||
|
6591769a07 | ||
|
5a222807b7 | ||
|
a9207857cf | ||
|
37ffa3b55a | ||
|
048591553a | ||
|
33bfd61a0c | ||
|
965d059400 | ||
|
676286182a | ||
|
3b2002d9ef | ||
|
9d7e30807d | ||
|
91fae5c4d4 | ||
|
e3e85867b1 | ||
|
5618b95372 | ||
|
bf45d04600 | ||
|
80244bd83b | ||
|
9a9e7d1a7f | ||
|
6f422c3d8b | ||
|
222f0c735b | ||
|
63bf8eb1a1 | ||
|
db0e58ae7e | ||
|
87045284cc | ||
|
f3ee20980a | ||
|
54f1946aba | ||
|
47842ae614 | ||
|
7e0b62b703 | ||
|
15b4194e8f | ||
|
5a199acbb2 | ||
|
07b3f2f4d6 | ||
|
13ee236884 | ||
|
3822b7d3f7 | ||
|
2b2b69fb23 | ||
|
4b4edef0ad | ||
|
aa1e73326f | ||
|
07012aa812 | ||
|
0e54fa5655 | ||
|
3e44a1dd2d | ||
|
a417df60b3 | ||
|
2067c5c527 | ||
|
8a43486730 | ||
|
2636fedce8 | ||
|
a42e9ffa6b | ||
|
0e8c41bbd1 | ||
|
1e21aa9453 | ||
|
f9eadd7f04 | ||
|
04dc97072b | ||
|
ddda0b5ece | ||
|
76e89d07d4 | ||
|
a538255034 | ||
|
4ad2a9c1fa | ||
|
7ae9303c99 | ||
|
6c7b3ac5bb | ||
|
bd294bb3cf | ||
|
7349598b19 | ||
|
7f19f9f39c | ||
|
f19691250d | ||
|
554a1cb1f4 | ||
|
e54237ff70 | ||
|
e58709c822 | ||
|
5eca73a399 | ||
|
f8a19f747d | ||
|
ea3c1d7a3b | ||
|
bd585d8e52 | ||
|
a40fa93d7b | ||
|
4498bbf2e4 | ||
|
63e3891808 | ||
|
3ebdfa9b2d | ||
|
8debde842c | ||
|
3e5cf56460 | ||
|
f264b005ff | ||
|
bf76b0b158 | ||
|
c2a65a9a74 | ||
|
3267a50ae3 | ||
|
f0839519a8 | ||
|
95e9106902 | ||
|
da03f6c4e3 | ||
|
9e77cd1a26 | ||
|
56bf51277c | ||
|
37d98ca290 | ||
|
9473dc3937 | ||
|
6777008aec | ||
|
3e8254e398 | ||
|
9ddd2d3588 | ||
|
57935f585c | ||
|
2b463d61e3 | ||
|
ced4206c5f | ||
|
c86db09cd8 | ||
|
194c3c13ac | ||
|
d65c00728a | ||
|
526f6e0f6b | ||
|
a61211d32c | ||
|
78f75cdcb9 | ||
|
4cd340e07f | ||
|
890dde0e00 | ||
|
b1efe8d0b5 | ||
|
71fff28d29 | ||
|
6bfdf941bc | ||
|
fdc10aa6c7 | ||
|
455bb550ee | ||
|
2a827544ef | ||
|
9d2b5dc07d | ||
|
3ca62d76d7 | ||
|
00b9280834 | ||
|
ef0a3bc571 | ||
|
e3c5cf981f | ||
|
ec5da8b4a5 | ||
|
81de7d271e | ||
|
c8158e14e0 | ||
|
e96ae5ca51 | ||
|
e059197398 | ||
|
a2e73228d2 | ||
|
1470018054 | ||
|
e6bfbcd489 | ||
|
a0bbcf6ebb | ||
|
7f5a13d185 | ||
|
d5946da1e2 | ||
|
21682d1c1d | ||
|
fd52475ae2 | ||
|
55b47cf741 | ||
|
e0ce2e2e8a | ||
|
8fc4971df1 | ||
|
20e8cb898a | ||
|
b5894b257f | ||
|
cb517a3595 | ||
|
1b8f94c08f | ||
|
e46051299f | ||
|
bf2dcfe307 | ||
|
719f6077ab | ||
|
101783ee86 | ||
|
6843402d2e | ||
|
88feda6bf9 | ||
|
5c446ff645 | ||
|
9a6b1a1315 | ||
|
90009a649d | ||
|
8762628481 | ||
|
a5e41c9336 | ||
|
729f30aebf | ||
|
1da213a6e3 | ||
|
2b0b19da9e | ||
|
686166f2ce | ||
|
93ce593ed0 | ||
|
6f4475ff72 | ||
|
0b9a96ec6b | ||
|
f0f5ee392b | ||
|
dadaca141a | ||
|
7ab30099dd | ||
|
3170991aa8 | ||
|
118744a860 | ||
|
fe6a3f2ce8 | ||
|
75efaa9741 | ||
|
560e7f316a | ||
|
eee5d74e87 | ||
|
9ae473fcdc | ||
|
b774289c6d | ||
|
ecf715880f | ||
|
b2e28fe3a2 | ||
|
cc2f23bd89 | ||
|
7329cd804b | ||
|
84e3132ed1 | ||
|
f6b11c2d01 | ||
|
32da923dfe | ||
|
91dfa501f8 | ||
|
7c724e18fe | ||
|
302f83c7a4 | ||
|
984ca1fb7e | ||
|
87f6a18476 | ||
|
90c21458b8 | ||
|
f536c64043 | ||
|
1a33b5bb53 | ||
|
0ecaa862bd | ||
|
751946f47a | ||
|
796ea1dde9 | ||
|
a87aa9b98e | ||
|
9abd186166 | ||
|
18d0bf9dc3 | ||
|
c9bd08cf9c | ||
|
d2f4edcdb6 | ||
|
67abf03fe3 | ||
|
5d7f6960f3 | ||
|
4bea9ed760 | ||
|
4995cf1b02 | ||
|
a5d0cbbe44 | ||
|
7b1a0d3cd3 | ||
|
1e0b3a2a8c | ||
|
e72bb1e124 | ||
|
164621289c | ||
|
737109b2b8 | ||
|
8b8e27b702 | ||
|
4b099640de | ||
|
80da2dc722 | ||
|
61947e67ae | ||
|
9a37e3d159 | ||
|
14fb6c4038 | ||
|
dd9c5b2149 | ||
|
ecd488a840 | ||
|
4a44a7dfe1 | ||
|
16a44a144b | ||
|
97f8142b1e | ||
|
504cd3efda | ||
|
857b6cc10a | ||
|
002a06629e | ||
|
5bc0f4f8af | ||
|
cacfffc5bf | ||
|
87d7854453 | ||
|
aa34388de0 | ||
|
a3f50029ba | ||
|
f9d8b83c2a | ||
|
7c8bb5b18a | ||
|
254b2ae87f | ||
|
5a40f998ae | ||
|
77f3400161 | ||
|
3521bacc4a | ||
|
55f8171dd1 | ||
|
a7b159aebb | ||
|
5c114b28e3 | ||
|
e079444e8a | ||
|
3cb23ac956 | ||
|
8fb256ac91 | ||
|
ca32cd5e0e | ||
|
e0defafa26 | ||
|
5cccb872bb | ||
|
aaf940edab | ||
|
853086b942 | ||
|
81bdba6782 | ||
|
d955ddcef9 | ||
|
4bbb195711 | ||
|
a193089646 | ||
|
9bfdc10172 | ||
|
b062b38ef4 | ||
|
a31a9dc32c | ||
|
fa43791ea9 | ||
|
93b9c1617e | ||
|
4c710d731f | ||
|
d9f30e7ac5 | ||
|
03da7f696c | ||
|
883a3dceaf | ||
|
7b86e2ac59 | ||
|
8502d7b051 | ||
|
6f8b71b89f | ||
|
7e7f662a23 | ||
|
0bec1c6012 | ||
|
5582f5c811 | ||
|
48ed3dab1f | ||
|
d8de0faef5 | ||
|
df828b6021 | ||
|
056daaddfc | ||
|
5c2fd8d52a | ||
|
4519bffa39 | ||
|
1ea7429921 | ||
|
816c174036 | ||
|
79857a8733 | ||
|
dcc3292dbc | ||
|
7f674a7fb3 | ||
|
b64d3c2fbf | ||
|
7fc5cb80d6 | ||
|
92460f811f | ||
|
e18ad55067 | ||
|
4e9dae6fa4 | ||
|
f5a0559be6 | ||
|
670018f05e | ||
|
8bbf54d2b6 | ||
|
d31cccf85f | ||
|
c19b03a3f7 | ||
|
c6b8644828 | ||
|
f1a255aa6c | ||
|
876bf8aa4f | ||
|
900e519ff1 | ||
|
f1832d4478 | ||
|
ebbbf81e65 | ||
|
1fccd05e9e | ||
|
66945c0a02 | ||
|
fa0ca8fe89 | ||
|
c478c7dae9 | ||
|
9382db751c | ||
|
7e2a8e70c9 | ||
|
cd35636939 | ||
|
d51adb041e | ||
|
02db00d008 | ||
|
fb2d59ec92 | ||
|
1df1225eed | ||
|
aca71bff7a | ||
|
9709aed5e6 | ||
|
d2a4178846 | ||
|
d73be7aee5 | ||
|
ffe7f7ff16 | ||
|
a6ed6fc721 | ||
|
c3831de94e | ||
|
9b6b9cca3d | ||
|
64d1ea2d89 | ||
|
1c51239da8 | ||
|
51c15de892 | ||
|
b8efb1b8ec | ||
|
ec1d20f46f | ||
|
1f619d5ea6 | ||
|
6d3d94a01f | ||
|
0a3d94f73d | ||
|
7c68b03d07 | ||
|
2912b2e92e | ||
|
a6fe802370 | ||
|
ad483b7581 | ||
|
df86955f28 | ||
|
00ec426a80 | ||
|
222db53410 | ||
|
4d85dc108f | ||
|
6d582a821b | ||
|
794afbf85e | ||
|
e3f3997c5e | ||
|
f78090e47f | ||
|
4d7a4aa99a | ||
|
c36217c654 | ||
|
59bb578b89 | ||
|
7d8823307f | ||
|
8174349032 | ||
|
00a02dc14d | ||
|
ced73ed04e | ||
|
cc73bb811b | ||
|
a587228cf0 | ||
|
1472a0f415 | ||
|
0bb141960f | ||
|
c153330ab8 | ||
|
5b4ef0ee3b | ||
|
9632b6ee94 | ||
|
78eb1c1166 | ||
|
a7c0b07a2a | ||
|
dc1cc88a46 | ||
|
3f5451eab6 | ||
|
30d98326ca | ||
|
bedc8e288b | ||
|
6092b6628e | ||
|
6ee51c5cc1 | ||
|
4df0ae82ac | ||
|
5db31f0fb3 | ||
|
0f8170c10f | ||
|
3c24cb773f | ||
|
bec54ac8ae | ||
|
c330ac8418 | ||
|
3e478f42ea | ||
|
18ab757216 | ||
|
b6bcf0cd94 | ||
|
015aa36c56 | ||
|
f2480ce5c9 | ||
|
f828c58dca | ||
|
dc19921b0c | ||
|
3f3591bae0 | ||
|
fc048728d9 | ||
|
aeb4675196 | ||
|
4652f9ede8 | ||
|
531cb5b5a1 | ||
|
9fb43b2c46 | ||
|
8a8298ad46 | ||
|
3d6b09e949 | ||
|
fb8f013ea7 | ||
|
c41319bb7a | ||
|
46157ebbb6 | ||
|
200b1d08c7 | ||
|
24b0352eb6 | ||
|
52f3a98cc8 | ||
|
e29a3efd39 | ||
|
ca730e77a5 | ||
|
0833b4698e | ||
|
ee5c5e033d | ||
|
78233ff9a3 | ||
|
b331dc5686 | ||
|
dfcfcee208 | ||
|
094ee1522e | ||
|
3bc58f5988 | ||
|
f6938e76dc | ||
|
570964deb3 | ||
|
31984ffec1 | ||
|
74fc3aaf37 | ||
|
97d0a48557 | ||
|
3bbe67571f | ||
|
f131ef130b | ||
|
4a6a4ce28d | ||
|
a80ac80fcd | ||
|
4aa9686e3b | ||
|
64e87d64bd | ||
|
9ca0b46f30 | ||
|
6eb154bb74 | ||
|
ea01c3a125 | ||
|
1b4a1fbbe5 | ||
|
ec81a7ac29 | ||
|
22d28a37b6 | ||
|
cc134cad9a | ||
|
1459150024 | ||
|
87751e562e | ||
|
e6f969cb04 | ||
|
ba1febba73 | ||
|
af8fa7ff81 | ||
|
4ab2e4088a | ||
|
da0ccc6426 | ||
|
0661876e99 | ||
|
cd72ac4fc9 | ||
|
da5a061b65 | ||
|
65948a47f1 | ||
|
bf4b3e6840 | ||
|
6ea38188e8 | ||
|
b5639a51fd | ||
|
5c34d814d6 | ||
|
0eca4f1866 | ||
|
b52f829f05 | ||
|
90f64c9f63 | ||
|
c106498dd8 | ||
|
7bad65a43e | ||
|
101c2962ab | ||
|
59140a6d51 | ||
|
b1d54f69d9 | ||
|
374de07c7b | ||
|
8a4c21b64a | ||
|
16ba7ddb34 | ||
|
bd9506da42 | ||
|
b903a6e46f | ||
|
bcf088f586 | ||
|
be3857d572 | ||
|
b99d4ce82e | ||
|
0a558203da | ||
|
5a549a88fe | ||
|
fe953d6b38 | ||
|
05c62b9f40 | ||
|
555dc3b0c0 | ||
|
0de0d3308c | ||
|
a20373b613 | ||
|
ced2e16f41 | ||
|
3ac832c8dd | ||
|
a3c087456b | ||
|
419774158a | ||
|
0503215e7a | ||
|
9541843ff7 | ||
|
98f22ba110 | ||
|
1e9a19e326 | ||
|
0046c9960a | ||
|
7640612a95 | ||
|
a26962f367 | ||
|
f778e47d22 | ||
|
4781921336 | ||
|
3ae8abda9e | ||
|
90b324d707 | ||
|
3a22aae34f | ||
|
45a0473fec | ||
|
a7313e4492 | ||
|
c41ae116eb | ||
|
83c7453957 | ||
|
85a47810ff | ||
|
c997ef876c | ||
|
ae8ccadad2 | ||
|
5967aa1aa5 | ||
|
c900cde8e4 | ||
|
13183a9f76 | ||
|
5a568b4077 | ||
|
030507a2ce | ||
|
338301955f | ||
|
6d313f6d8f | ||
|
776dffcf12 | ||
|
e1a2451c22 | ||
|
7344366ce8 | ||
|
bd5191dfc5 | ||
|
bfa4085932 | ||
|
302ec2558c | ||
|
ff19879ffd | ||
|
04001f7ad3 | ||
|
076b2f0ee0 | ||
|
93dfb03eaf | ||
|
e09bdd43d4 | ||
|
ac8d8a3da1 | ||
|
a4157e83e9 | ||
|
13f23838a1 | ||
|
fd4c388b23 | ||
|
88b10da596 | ||
|
c07dc74d48 | ||
|
b48e01155c | ||
|
0ff010cc94 | ||
|
81aac15a6c | ||
|
c1b862394d | ||
|
f19937b715 | ||
|
f2f612b450 | ||
|
0c2640bbab | ||
|
3bb0ca1d2b | ||
|
d5b42f72e2 |
31
.cirrus.yml
Normal file
|
@ -0,0 +1,31 @@
|
||||||
|
freebsd_task:
|
||||||
|
name: FreeBSD
|
||||||
|
|
||||||
|
matrix:
|
||||||
|
- name: FreeBSD 14.0
|
||||||
|
freebsd_instance:
|
||||||
|
image_family: freebsd-14-0
|
||||||
|
|
||||||
|
pkginstall_script:
|
||||||
|
- pkg update -f
|
||||||
|
- pkg install -y go122
|
||||||
|
- pkg install -y git
|
||||||
|
|
||||||
|
setup_script:
|
||||||
|
- ln -s /usr/local/bin/go122 /usr/local/bin/go
|
||||||
|
- pw groupadd sftpgo
|
||||||
|
- pw useradd sftpgo -g sftpgo -w none -m
|
||||||
|
- mkdir /home/sftpgo/sftpgo
|
||||||
|
- cp -R . /home/sftpgo/sftpgo
|
||||||
|
- chown -R sftpgo:sftpgo /home/sftpgo/sftpgo
|
||||||
|
|
||||||
|
compile_script:
|
||||||
|
- su sftpgo -c 'cd ~/sftpgo && go build -trimpath -tags nopgxregisterdefaulttypes -ldflags "-s -w -X github.com/drakkan/sftpgo/v2/internal/version.commit=`git describe --always --abbrev=8 --dirty` -X github.com/drakkan/sftpgo/v2/internal/version.date=`date -u +%FT%TZ`" -o sftpgo'
|
||||||
|
- su sftpgo -c 'cd ~/sftpgo/tests/eventsearcher && go build -trimpath -ldflags "-s -w" -o eventsearcher'
|
||||||
|
- su sftpgo -c 'cd ~/sftpgo/tests/ipfilter && go build -trimpath -ldflags "-s -w" -o ipfilter'
|
||||||
|
|
||||||
|
check_script:
|
||||||
|
- su sftpgo -c 'cd ~/sftpgo && ./sftpgo initprovider && ./sftpgo resetprovider --force'
|
||||||
|
|
||||||
|
test_script:
|
||||||
|
- su sftpgo -c 'cd ~/sftpgo && go test -v -tags nopgxregisterdefaulttypes -p 1 -timeout 20m ./... -coverprofile=coverage.txt -covermode=atomic'
|
2
.github/FUNDING.yml
vendored
|
@ -9,4 +9,4 @@ community_bridge: # Replace with a single Community Bridge project-name e.g., cl
|
||||||
liberapay: # Replace with a single Liberapay username
|
liberapay: # Replace with a single Liberapay username
|
||||||
issuehunt: # Replace with a single IssueHunt username
|
issuehunt: # Replace with a single IssueHunt username
|
||||||
otechie: # Replace with a single Otechie username
|
otechie: # Replace with a single Otechie username
|
||||||
custom: ['https://www.paypal.com/donate?hosted_button_id=JQL6GBT8GXRKC'] # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']
|
custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']
|
||||||
|
|
108
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
Normal file
|
@ -0,0 +1,108 @@
|
||||||
|
name: Open Source Bug Report
|
||||||
|
description: "Submit a report and help us improve SFTPGo"
|
||||||
|
title: "[Bug]: "
|
||||||
|
labels: ["bug"]
|
||||||
|
body:
|
||||||
|
- type: markdown
|
||||||
|
attributes:
|
||||||
|
value: |
|
||||||
|
### 👍 Thank you for contributing to our project!
|
||||||
|
Before asking for help please check the [support policy](https://github.com/drakkan/sftpgo#support-policy).
|
||||||
|
If you are a commercial user or a project sponsor please contact us using the dedicated [email address](mailto:support@sftpgo.com).
|
||||||
|
- type: checkboxes
|
||||||
|
id: before-posting
|
||||||
|
attributes:
|
||||||
|
label: "⚠️ This issue respects the following points: ⚠️"
|
||||||
|
description: All conditions are **required**.
|
||||||
|
options:
|
||||||
|
- label: This is a **bug**, not a question or a configuration issue.
|
||||||
|
required: true
|
||||||
|
- label: This issue is **not** already reported on Github _(I've searched it)_.
|
||||||
|
required: true
|
||||||
|
- type: textarea
|
||||||
|
id: bug-description
|
||||||
|
attributes:
|
||||||
|
label: Bug description
|
||||||
|
description: |
|
||||||
|
Provide a description of the bug you're experiencing.
|
||||||
|
Don't just expect someone will guess what your specific problem is and provide full details.
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
- type: textarea
|
||||||
|
id: reproduce
|
||||||
|
attributes:
|
||||||
|
label: Steps to reproduce
|
||||||
|
description: |
|
||||||
|
Describe the steps to reproduce the bug.
|
||||||
|
The better your description is the fastest you'll get an _(accurate)_ answer.
|
||||||
|
value: |
|
||||||
|
1.
|
||||||
|
2.
|
||||||
|
3.
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
- type: textarea
|
||||||
|
id: expected-behavior
|
||||||
|
attributes:
|
||||||
|
label: Expected behavior
|
||||||
|
description: Describe what you expected to happen instead.
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
- type: input
|
||||||
|
id: version
|
||||||
|
attributes:
|
||||||
|
label: SFTPGo version
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
- type: input
|
||||||
|
id: data-provider
|
||||||
|
attributes:
|
||||||
|
label: Data provider
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
- type: dropdown
|
||||||
|
id: install-method
|
||||||
|
attributes:
|
||||||
|
label: Installation method
|
||||||
|
description: |
|
||||||
|
Select installation method you've used.
|
||||||
|
_Describe the method in the "Additional info" section if you chose "Other"._
|
||||||
|
options:
|
||||||
|
- "Community Docker image"
|
||||||
|
- "Community Deb package"
|
||||||
|
- "Community RPM package"
|
||||||
|
- "Other"
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
- type: textarea
|
||||||
|
attributes:
|
||||||
|
label: Configuration
|
||||||
|
description: "Describe your customizations to the configuration: both config file changes and overrides via environment variables"
|
||||||
|
value: config
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
- type: textarea
|
||||||
|
id: logs
|
||||||
|
attributes:
|
||||||
|
label: Relevant log output
|
||||||
|
description: Please copy and paste any relevant log output. This will be automatically formatted into code, so no need for backticks.
|
||||||
|
render: shell
|
||||||
|
- type: dropdown
|
||||||
|
id: usecase
|
||||||
|
attributes:
|
||||||
|
label: What are you using SFTPGo for?
|
||||||
|
description: We'd like to understand your SFTPGo usecase more
|
||||||
|
multiple: true
|
||||||
|
options:
|
||||||
|
- "Private user, home usecase (home backup/VPS)"
|
||||||
|
- "Professional user, 1 person business"
|
||||||
|
- "Small business (3-person firm with file exchange?)"
|
||||||
|
- "Medium business"
|
||||||
|
- "Enterprise"
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
- type: textarea
|
||||||
|
id: additional-info
|
||||||
|
attributes:
|
||||||
|
label: Additional info
|
||||||
|
description: Any additional information related to the issue.
|
9
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
|
@ -0,0 +1,9 @@
|
||||||
|
blank_issues_enabled: false
|
||||||
|
contact_links:
|
||||||
|
- name: Commercial Support
|
||||||
|
url: https://sftpgo.com/
|
||||||
|
about: >
|
||||||
|
If you need Professional support, so your reports are prioritized and resolved more quickly.
|
||||||
|
- name: GitHub Community Discussions
|
||||||
|
url: https://github.com/drakkan/sftpgo/discussions
|
||||||
|
about: Please ask and answer questions here.
|
42
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
Normal file
|
@ -0,0 +1,42 @@
|
||||||
|
name: 🚀 Feature request
|
||||||
|
description: Suggest an idea for SFTPGo
|
||||||
|
labels: ["suggestion"]
|
||||||
|
body:
|
||||||
|
- type: textarea
|
||||||
|
attributes:
|
||||||
|
label: Is your feature request related to a problem? Please describe.
|
||||||
|
description: A clear and concise description of what the problem is.
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
|
- type: textarea
|
||||||
|
attributes:
|
||||||
|
label: Describe the solution you'd like
|
||||||
|
description: A clear and concise description of what you want to happen.
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
- type: textarea
|
||||||
|
attributes:
|
||||||
|
label: Describe alternatives you've considered
|
||||||
|
description: A clear and concise description of any alternative solutions or features you've considered.
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
|
- type: dropdown
|
||||||
|
id: usecase
|
||||||
|
attributes:
|
||||||
|
label: What are you using SFTPGo for?
|
||||||
|
description: We'd like to understand your SFTPGo usecase more
|
||||||
|
multiple: true
|
||||||
|
options:
|
||||||
|
- "Private user, home usecase (home backup/VPS)"
|
||||||
|
- "Professional user, 1 person business"
|
||||||
|
- "Small business (3-person firm with file exchange?)"
|
||||||
|
- "Medium business"
|
||||||
|
- "Enterprise"
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
- type: textarea
|
||||||
|
attributes:
|
||||||
|
label: Additional context
|
||||||
|
description: Add any other context or screenshots about the feature request here.
|
||||||
|
validations:
|
||||||
|
required: false
|
5
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
# Checklist for Pull Requests
|
||||||
|
|
||||||
|
- [ ] Have you signed the [Contributor License Agreement](https://sftpgo.com/cla.html)?
|
||||||
|
|
||||||
|
---
|
10
.github/dependabot.yml
vendored
|
@ -1,11 +1,11 @@
|
||||||
version: 2
|
version: 2
|
||||||
|
|
||||||
updates:
|
updates:
|
||||||
- package-ecosystem: "gomod"
|
#- package-ecosystem: "gomod"
|
||||||
directory: "/"
|
# directory: "/"
|
||||||
schedule:
|
# schedule:
|
||||||
interval: "weekly"
|
# interval: "weekly"
|
||||||
open-pull-requests-limit: 2
|
# open-pull-requests-limit: 2
|
||||||
|
|
||||||
- package-ecosystem: "docker"
|
- package-ecosystem: "docker"
|
||||||
directory: "/"
|
directory: "/"
|
||||||
|
|
36
.github/workflows/codeql.yml
vendored
Normal file
|
@ -0,0 +1,36 @@
|
||||||
|
name: "Code scanning - action"
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
pull_request:
|
||||||
|
schedule:
|
||||||
|
- cron: '30 1 * * 6'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
CodeQL-Build:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
security-events: write
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Set up Go
|
||||||
|
uses: actions/setup-go@v5
|
||||||
|
with:
|
||||||
|
go-version: '1.22'
|
||||||
|
|
||||||
|
- name: Initialize CodeQL
|
||||||
|
uses: github/codeql-action/init@v3
|
||||||
|
with:
|
||||||
|
languages: go
|
||||||
|
|
||||||
|
- name: Autobuild
|
||||||
|
uses: github/codeql-action/autobuild@v3
|
||||||
|
|
||||||
|
- name: Perform CodeQL Analysis
|
||||||
|
uses: github/codeql-action/analyze@v3
|
326
.github/workflows/development.yml
vendored
|
@ -11,64 +11,101 @@ jobs:
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
go: [1.16]
|
go: ['1.22']
|
||||||
os: [ubuntu-latest, macos-latest]
|
os: [ubuntu-latest, macos-latest]
|
||||||
upload-coverage: [true]
|
upload-coverage: [true]
|
||||||
include:
|
include:
|
||||||
- go: 1.16
|
- go: '1.22'
|
||||||
os: windows-latest
|
os: windows-latest
|
||||||
upload-coverage: false
|
upload-coverage: false
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
- name: Set up Go
|
- name: Set up Go
|
||||||
uses: actions/setup-go@v2
|
uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: ${{ matrix.go }}
|
go-version: ${{ matrix.go }}
|
||||||
|
|
||||||
- name: Build for Linux/macOS x86_64
|
- name: Build for Linux/macOS x86_64
|
||||||
if: startsWith(matrix.os, 'windows-') != true
|
if: startsWith(matrix.os, 'windows-') != true
|
||||||
run: go build -trimpath -ldflags "-s -w -X github.com/drakkan/sftpgo/version.commit=`git describe --always --dirty` -X github.com/drakkan/sftpgo/version.date=`date -u +%FT%TZ`" -o sftpgo
|
run: |
|
||||||
|
go build -trimpath -tags nopgxregisterdefaulttypes -ldflags "-s -w -X github.com/drakkan/sftpgo/v2/internal/version.commit=`git describe --always --abbrev=8 --dirty` -X github.com/drakkan/sftpgo/v2/internal/version.date=`date -u +%FT%TZ`" -o sftpgo
|
||||||
|
cd tests/eventsearcher
|
||||||
|
go build -trimpath -ldflags "-s -w" -o eventsearcher
|
||||||
|
cd -
|
||||||
|
cd tests/ipfilter
|
||||||
|
go build -trimpath -ldflags "-s -w" -o ipfilter
|
||||||
|
cd -
|
||||||
|
./sftpgo initprovider
|
||||||
|
./sftpgo resetprovider --force
|
||||||
|
|
||||||
- name: Build for macOS arm64
|
- name: Build for macOS arm64
|
||||||
if: startsWith(matrix.os, 'macos-') == true
|
if: startsWith(matrix.os, 'macos-') == true
|
||||||
run: CGO_ENABLED=1 GOOS=darwin GOARCH=arm64 SDKROOT=$(xcrun --sdk macosx --show-sdk-path) go build -trimpath -ldflags "-s -w -X github.com/drakkan/sftpgo/version.commit=`git describe --always --dirty` -X github.com/drakkan/sftpgo/version.date=`date -u +%FT%TZ`" -o sftpgo_arm64
|
run: CGO_ENABLED=1 GOOS=darwin GOARCH=arm64 SDKROOT=$(xcrun --sdk macosx --show-sdk-path) go build -trimpath -tags nopgxregisterdefaulttypes -ldflags "-s -w -X github.com/drakkan/sftpgo/v2/internal/version.commit=`git describe --always --abbrev=8 --dirty` -X github.com/drakkan/sftpgo/v2/internal/version.date=`date -u +%FT%TZ`" -o sftpgo_arm64
|
||||||
|
|
||||||
- name: Build for Windows
|
- name: Build for Windows
|
||||||
if: startsWith(matrix.os, 'windows-')
|
if: startsWith(matrix.os, 'windows-')
|
||||||
run: |
|
run: |
|
||||||
$GIT_COMMIT = (git describe --always --dirty) | Out-String
|
$GIT_COMMIT = (git describe --always --abbrev=8 --dirty) | Out-String
|
||||||
$DATE_TIME = ([datetime]::Now.ToUniversalTime().toString("yyyy-MM-ddTHH:mm:ssZ")) | Out-String
|
$DATE_TIME = ([datetime]::Now.ToUniversalTime().toString("yyyy-MM-ddTHH:mm:ssZ")) | Out-String
|
||||||
go build -trimpath -ldflags "-s -w -X github.com/drakkan/sftpgo/version.commit=$GIT_COMMIT -X github.com/drakkan/sftpgo/version.date=$DATE_TIME" -o sftpgo.exe
|
$LATEST_TAG = ((git describe --tags $(git rev-list --tags --max-count=1)) | Out-String).Trim()
|
||||||
|
$REV_LIST=$LATEST_TAG+"..HEAD"
|
||||||
|
$COMMITS_FROM_TAG= ((git rev-list $REV_LIST --count) | Out-String).Trim()
|
||||||
|
$FILE_VERSION = $LATEST_TAG.substring(1) + "." + $COMMITS_FROM_TAG
|
||||||
|
go install github.com/tc-hib/go-winres@latest
|
||||||
|
go-winres simply --arch amd64 --product-version $LATEST_TAG-dev-$GIT_COMMIT --file-version $FILE_VERSION --file-description "SFTPGo server" --product-name SFTPGo --copyright "AGPL-3.0" --original-filename sftpgo.exe --icon .\windows-installer\icon.ico
|
||||||
|
go build -trimpath -tags nopgxregisterdefaulttypes -ldflags "-s -w -X github.com/drakkan/sftpgo/v2/internal/version.commit=$GIT_COMMIT -X github.com/drakkan/sftpgo/v2/internal/version.date=$DATE_TIME" -o sftpgo.exe
|
||||||
|
cd tests/eventsearcher
|
||||||
|
go build -trimpath -ldflags "-s -w" -o eventsearcher.exe
|
||||||
|
cd ../..
|
||||||
|
cd tests/ipfilter
|
||||||
|
go build -trimpath -ldflags "-s -w" -o ipfilter.exe
|
||||||
|
cd ../..
|
||||||
|
mkdir arm64
|
||||||
|
$Env:CGO_ENABLED='0'
|
||||||
|
$Env:GOOS='windows'
|
||||||
|
$Env:GOARCH='arm64'
|
||||||
|
go-winres simply --arch arm64 --product-version $LATEST_TAG-dev-$GIT_COMMIT --file-version $FILE_VERSION --file-description "SFTPGo server" --product-name SFTPGo --copyright "AGPL-3.0" --original-filename sftpgo.exe --icon .\windows-installer\icon.ico
|
||||||
|
go build -trimpath -tags nopgxregisterdefaulttypes,nosqlite -ldflags "-s -w -X github.com/drakkan/sftpgo/v2/internal/version.commit=$GIT_COMMIT -X github.com/drakkan/sftpgo/v2/internal/version.date=$DATE_TIME" -o .\arm64\sftpgo.exe
|
||||||
|
mkdir x86
|
||||||
|
$Env:GOARCH='386'
|
||||||
|
go-winres simply --arch 386 --product-version $LATEST_TAG-dev-$GIT_COMMIT --file-version $FILE_VERSION --file-description "SFTPGo server" --product-name SFTPGo --copyright "AGPL-3.0" --original-filename sftpgo.exe --icon .\windows-installer\icon.ico
|
||||||
|
go build -trimpath -tags nopgxregisterdefaulttypes,nosqlite -ldflags "-s -w -X github.com/drakkan/sftpgo/v2/internal/version.commit=$GIT_COMMIT -X github.com/drakkan/sftpgo/v2/internal/version.date=$DATE_TIME" -o .\x86\sftpgo.exe
|
||||||
|
Remove-Item Env:\CGO_ENABLED
|
||||||
|
Remove-Item Env:\GOOS
|
||||||
|
Remove-Item Env:\GOARCH
|
||||||
|
|
||||||
- name: Run test cases using SQLite provider
|
- name: Run test cases using SQLite provider
|
||||||
run: go test -v -p 1 -timeout 10m ./... -coverprofile=coverage.txt -covermode=atomic
|
run: go test -v -tags nopgxregisterdefaulttypes -p 1 -timeout 15m ./... -coverprofile=coverage.txt -covermode=atomic
|
||||||
|
|
||||||
- name: Upload coverage to Codecov
|
- name: Upload coverage to Codecov
|
||||||
if: ${{ matrix.upload-coverage }}
|
if: ${{ matrix.upload-coverage }}
|
||||||
uses: codecov/codecov-action@v1
|
uses: codecov/codecov-action@v4
|
||||||
with:
|
with:
|
||||||
file: ./coverage.txt
|
file: ./coverage.txt
|
||||||
fail_ci_if_error: false
|
fail_ci_if_error: false
|
||||||
|
token: ${{ secrets.CODECOV_TOKEN }}
|
||||||
|
|
||||||
- name: Run test cases using bolt provider
|
- name: Run test cases using bolt provider
|
||||||
run: |
|
run: |
|
||||||
go test -v -p 1 -timeout 2m ./config -covermode=atomic
|
go test -v -tags nopgxregisterdefaulttypes -p 1 -timeout 2m ./internal/config -covermode=atomic
|
||||||
go test -v -p 1 -timeout 5m ./common -covermode=atomic
|
go test -v -tags nopgxregisterdefaulttypes -p 1 -timeout 5m ./internal/common -covermode=atomic
|
||||||
go test -v -p 1 -timeout 5m ./httpd -covermode=atomic
|
go test -v -tags nopgxregisterdefaulttypes -p 1 -timeout 5m ./internal/httpd -covermode=atomic
|
||||||
go test -v -p 1 -timeout 8m ./sftpd -covermode=atomic
|
go test -v -tags nopgxregisterdefaulttypes -p 1 -timeout 8m ./internal/sftpd -covermode=atomic
|
||||||
go test -v -p 1 -timeout 5m ./ftpd -covermode=atomic
|
go test -v -tags nopgxregisterdefaulttypes -p 1 -timeout 5m ./internal/ftpd -covermode=atomic
|
||||||
go test -v -p 1 -timeout 5m ./webdavd -covermode=atomic
|
go test -v -tags nopgxregisterdefaulttypes -p 1 -timeout 5m ./internal/webdavd -covermode=atomic
|
||||||
go test -v -p 1 -timeout 2m ./telemetry -covermode=atomic
|
go test -v -tags nopgxregisterdefaulttypes -p 1 -timeout 2m ./internal/telemetry -covermode=atomic
|
||||||
|
go test -v -tags nopgxregisterdefaulttypes -p 1 -timeout 2m ./internal/mfa -covermode=atomic
|
||||||
|
go test -v -tags nopgxregisterdefaulttypes -p 1 -timeout 2m ./internal/command -covermode=atomic
|
||||||
env:
|
env:
|
||||||
SFTPGO_DATA_PROVIDER__DRIVER: bolt
|
SFTPGO_DATA_PROVIDER__DRIVER: bolt
|
||||||
SFTPGO_DATA_PROVIDER__NAME: 'sftpgo_bolt.db'
|
SFTPGO_DATA_PROVIDER__NAME: 'sftpgo_bolt.db'
|
||||||
|
|
||||||
- name: Run test cases using memory provider
|
- name: Run test cases using memory provider
|
||||||
run: go test -v -p 1 -timeout 10m ./... -covermode=atomic
|
run: go test -v -tags nopgxregisterdefaulttypes -p 1 -timeout 15m ./... -covermode=atomic
|
||||||
env:
|
env:
|
||||||
SFTPGO_DATA_PROVIDER__DRIVER: memory
|
SFTPGO_DATA_PROVIDER__DRIVER: memory
|
||||||
SFTPGO_DATA_PROVIDER__NAME: ''
|
SFTPGO_DATA_PROVIDER__NAME: ''
|
||||||
|
@ -82,30 +119,130 @@ jobs:
|
||||||
cp sftpgo.json output/
|
cp sftpgo.json output/
|
||||||
cp -r templates output/
|
cp -r templates output/
|
||||||
cp -r static output/
|
cp -r static output/
|
||||||
|
cp -r openapi output/
|
||||||
cp init/com.github.drakkan.sftpgo.plist output/init/
|
cp init/com.github.drakkan.sftpgo.plist output/init/
|
||||||
./sftpgo gen completion bash > output/bash_completion/sftpgo
|
./sftpgo gen completion bash > output/bash_completion/sftpgo
|
||||||
./sftpgo gen completion zsh > output/zsh_completion/_sftpgo
|
./sftpgo gen completion zsh > output/zsh_completion/_sftpgo
|
||||||
./sftpgo gen man -d output/man/man1
|
./sftpgo gen man -d output/man/man1
|
||||||
gzip output/man/man1/*
|
gzip output/man/man1/*
|
||||||
|
|
||||||
- name: Prepare build artifact for Windows
|
- name: Prepare Windows installer
|
||||||
if: startsWith(matrix.os, 'windows-')
|
if: ${{ startsWith(matrix.os, 'windows-') && github.event_name != 'pull_request' }}
|
||||||
run: |
|
run: |
|
||||||
|
Remove-Item -LiteralPath "output" -Force -Recurse -ErrorAction Ignore
|
||||||
mkdir output
|
mkdir output
|
||||||
copy .\sftpgo.exe .\output
|
copy .\sftpgo.exe .\output
|
||||||
copy .\sftpgo.json .\output
|
copy .\sftpgo.json .\output
|
||||||
|
copy .\sftpgo.db .\output
|
||||||
|
copy .\LICENSE .\output\LICENSE.txt
|
||||||
mkdir output\templates
|
mkdir output\templates
|
||||||
xcopy .\templates .\output\templates\ /E
|
xcopy .\templates .\output\templates\ /E
|
||||||
mkdir output\static
|
mkdir output\static
|
||||||
xcopy .\static .\output\static\ /E
|
xcopy .\static .\output\static\ /E
|
||||||
|
mkdir output\openapi
|
||||||
|
xcopy .\openapi .\output\openapi\ /E
|
||||||
|
$LATEST_TAG = ((git describe --tags $(git rev-list --tags --max-count=1)) | Out-String).Trim()
|
||||||
|
$REV_LIST=$LATEST_TAG+"..HEAD"
|
||||||
|
$COMMITS_FROM_TAG= ((git rev-list $REV_LIST --count) | Out-String).Trim()
|
||||||
|
$Env:SFTPGO_ISS_DEV_VERSION = $LATEST_TAG + "." + $COMMITS_FROM_TAG
|
||||||
|
$CERT_PATH=(Get-Location -PSProvider FileSystem).ProviderPath + "\cert.pfx"
|
||||||
|
[IO.File]::WriteAllBytes($CERT_PATH,[System.Convert]::FromBase64String($Env:CERT_DATA))
|
||||||
|
certutil -f -p "$Env:CERT_PASS" -importpfx MY "$CERT_PATH"
|
||||||
|
rm "$CERT_PATH"
|
||||||
|
& 'C:/Program Files (x86)/Windows Kits/10/bin/10.0.20348.0/x86/signtool.exe' sign /sm /tr http://timestamp.sectigo.com /td sha256 /fd sha256 /n "Nicola Murino" /d "SFTPGo" .\sftpgo.exe
|
||||||
|
& 'C:/Program Files (x86)/Windows Kits/10/bin/10.0.20348.0/x86/signtool.exe' sign /sm /tr http://timestamp.sectigo.com /td sha256 /fd sha256 /n "Nicola Murino" /d "SFTPGo" .\arm64\sftpgo.exe
|
||||||
|
& 'C:/Program Files (x86)/Windows Kits/10/bin/10.0.20348.0/x86/signtool.exe' sign /sm /tr http://timestamp.sectigo.com /td sha256 /fd sha256 /n "Nicola Murino" /d "SFTPGo" .\x86\sftpgo.exe
|
||||||
|
$INNO_S='/Ssigntool=$qC:/Program Files (x86)/Windows Kits/10/bin/10.0.20348.0/x86/signtool.exe$q sign /sm /tr http://timestamp.sectigo.com /td sha256 /fd sha256 /n $qNicola Murino$q /d $qSFTPGo$q $f'
|
||||||
|
iscc "$INNO_S" .\windows-installer\sftpgo.iss
|
||||||
|
|
||||||
|
rm .\output\sftpgo.exe
|
||||||
|
rm .\output\sftpgo.db
|
||||||
|
copy .\arm64\sftpgo.exe .\output
|
||||||
|
(Get-Content .\output\sftpgo.json).replace('"sqlite"', '"bolt"') | Set-Content .\output\sftpgo.json
|
||||||
|
$Env:SFTPGO_DATA_PROVIDER__DRIVER='bolt'
|
||||||
|
$Env:SFTPGO_DATA_PROVIDER__NAME='.\output\sftpgo.db'
|
||||||
|
.\sftpgo.exe initprovider
|
||||||
|
Remove-Item Env:\SFTPGO_DATA_PROVIDER__DRIVER
|
||||||
|
Remove-Item Env:\SFTPGO_DATA_PROVIDER__NAME
|
||||||
|
$Env:SFTPGO_ISS_ARCH='arm64'
|
||||||
|
iscc "$INNO_S" .\windows-installer\sftpgo.iss
|
||||||
|
|
||||||
|
rm .\output\sftpgo.exe
|
||||||
|
copy .\x86\sftpgo.exe .\output
|
||||||
|
$Env:SFTPGO_ISS_ARCH='x86'
|
||||||
|
iscc "$INNO_S" .\windows-installer\sftpgo.iss
|
||||||
|
certutil -delstore MY "Nicola Murino"
|
||||||
|
env:
|
||||||
|
CERT_DATA: ${{ secrets.CERT_DATA }}
|
||||||
|
CERT_PASS: ${{ secrets.CERT_PASS }}
|
||||||
|
|
||||||
|
- name: Upload Windows installer x86_64 artifact
|
||||||
|
if: ${{ startsWith(matrix.os, 'windows-') && github.event_name != 'pull_request' }}
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: sftpgo_windows_installer_x86_64
|
||||||
|
path: ./sftpgo_windows_x86_64.exe
|
||||||
|
|
||||||
|
- name: Upload Windows installer arm64 artifact
|
||||||
|
if: ${{ startsWith(matrix.os, 'windows-') && github.event_name != 'pull_request' }}
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: sftpgo_windows_installer_arm64
|
||||||
|
path: ./sftpgo_windows_arm64.exe
|
||||||
|
|
||||||
|
- name: Upload Windows installer x86 artifact
|
||||||
|
if: ${{ startsWith(matrix.os, 'windows-') && github.event_name != 'pull_request' }}
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: sftpgo_windows_installer_x86
|
||||||
|
path: ./sftpgo_windows_x86.exe
|
||||||
|
|
||||||
|
- name: Prepare build artifact for Windows
|
||||||
|
if: startsWith(matrix.os, 'windows-')
|
||||||
|
run: |
|
||||||
|
Remove-Item -LiteralPath "output" -Force -Recurse -ErrorAction Ignore
|
||||||
|
mkdir output
|
||||||
|
copy .\sftpgo.exe .\output
|
||||||
|
mkdir output\arm64
|
||||||
|
copy .\arm64\sftpgo.exe .\output\arm64
|
||||||
|
mkdir output\x86
|
||||||
|
copy .\x86\sftpgo.exe .\output\x86
|
||||||
|
copy .\sftpgo.json .\output
|
||||||
|
(Get-Content .\output\sftpgo.json).replace('"sqlite"', '"bolt"') | Set-Content .\output\sftpgo.json
|
||||||
|
mkdir output\templates
|
||||||
|
xcopy .\templates .\output\templates\ /E
|
||||||
|
mkdir output\static
|
||||||
|
xcopy .\static .\output\static\ /E
|
||||||
|
mkdir output\openapi
|
||||||
|
xcopy .\openapi .\output\openapi\ /E
|
||||||
|
|
||||||
- name: Upload build artifact
|
- name: Upload build artifact
|
||||||
if: startsWith(matrix.os, 'ubuntu-') != true
|
if: startsWith(matrix.os, 'ubuntu-') != true
|
||||||
uses: actions/upload-artifact@v2
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: sftpgo-${{ matrix.os }}-go-${{ matrix.go }}
|
name: sftpgo-${{ matrix.os }}-go-${{ matrix.go }}
|
||||||
path: output
|
path: output
|
||||||
|
|
||||||
|
test-build-flags:
|
||||||
|
name: Test build flags
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Set up Go
|
||||||
|
uses: actions/setup-go@v5
|
||||||
|
with:
|
||||||
|
go-version: '1.22'
|
||||||
|
|
||||||
|
- name: Build
|
||||||
|
run: |
|
||||||
|
go build -trimpath -tags nopgxregisterdefaulttypes,nogcs,nos3,noportable,nobolt,nomysql,nopgsql,nosqlite,nometrics,noazblob,unixcrypt -ldflags "-s -w -X github.com/drakkan/sftpgo/v2/internal/internal/version.commit=`git describe --always --abbrev=8 --dirty` -X github.com/drakkan/sftpgo/v2/internal/version.date=`date -u +%FT%TZ`" -o sftpgo
|
||||||
|
./sftpgo -v
|
||||||
|
cp -r openapi static templates internal/bundle/
|
||||||
|
go build -trimpath -tags nopgxregisterdefaulttypes,bundle -ldflags "-s -w -X github.com/drakkan/sftpgo/v2/internal/version.commit=`git describe --always --abbrev=8 --dirty` -X github.com/drakkan/sftpgo/v2/internal/version.date=`date -u +%FT%TZ`" -o sftpgo
|
||||||
|
./sftpgo -v
|
||||||
|
|
||||||
test-postgresql-mysql-crdb:
|
test-postgresql-mysql-crdb:
|
||||||
name: Test with PgSQL/MySQL/Cockroach
|
name: Test with PgSQL/MySQL/Cockroach
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
@ -132,27 +269,64 @@ jobs:
|
||||||
MYSQL_USER: sftpgo
|
MYSQL_USER: sftpgo
|
||||||
MYSQL_PASSWORD: sftpgo
|
MYSQL_PASSWORD: sftpgo
|
||||||
options: >-
|
options: >-
|
||||||
--health-cmd "mysqladmin status -h 127.0.0.1 -P 3306 -u root -p$MYSQL_ROOT_PASSWORD"
|
--health-cmd "mariadb-admin status -h 127.0.0.1 -P 3306 -u root -p$MYSQL_ROOT_PASSWORD"
|
||||||
--health-interval 10s
|
--health-interval 10s
|
||||||
--health-timeout 5s
|
--health-timeout 5s
|
||||||
--health-retries 6
|
--health-retries 6
|
||||||
ports:
|
ports:
|
||||||
- 3307:3306
|
- 3307:3306
|
||||||
|
|
||||||
|
mysql:
|
||||||
|
image: mysql:latest
|
||||||
|
env:
|
||||||
|
MYSQL_ROOT_PASSWORD: mysql
|
||||||
|
MYSQL_DATABASE: sftpgo
|
||||||
|
MYSQL_USER: sftpgo
|
||||||
|
MYSQL_PASSWORD: sftpgo
|
||||||
|
options: >-
|
||||||
|
--health-cmd "mysqladmin status -h 127.0.0.1 -P 3306 -u root -p$MYSQL_ROOT_PASSWORD"
|
||||||
|
--health-interval 10s
|
||||||
|
--health-timeout 5s
|
||||||
|
--health-retries 6
|
||||||
|
ports:
|
||||||
|
- 3308:3306
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Set up Go
|
- name: Set up Go
|
||||||
uses: actions/setup-go@v2
|
uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: 1.16
|
go-version: '1.22'
|
||||||
|
|
||||||
- name: Build
|
- name: Build
|
||||||
run: go build -trimpath -ldflags "-s -w -X github.com/drakkan/sftpgo/version.commit=`git describe --always --dirty` -X github.com/drakkan/sftpgo/version.date=`date -u +%FT%TZ`" -o sftpgo
|
run: |
|
||||||
|
go build -trimpath -tags nopgxregisterdefaulttypes -ldflags "-s -w -X github.com/drakkan/sftpgo/v2/internal/version.commit=`git describe --always --abbrev=8 --dirty` -X github.com/drakkan/sftpgo/v2/internal/version.date=`date -u +%FT%TZ`" -o sftpgo
|
||||||
|
cd tests/eventsearcher
|
||||||
|
go build -trimpath -ldflags "-s -w" -o eventsearcher
|
||||||
|
cd -
|
||||||
|
cd tests/ipfilter
|
||||||
|
go build -trimpath -ldflags "-s -w" -o ipfilter
|
||||||
|
cd -
|
||||||
|
|
||||||
|
- name: Run tests using MySQL provider
|
||||||
|
run: |
|
||||||
|
./sftpgo initprovider
|
||||||
|
./sftpgo resetprovider --force
|
||||||
|
go test -v -tags nopgxregisterdefaulttypes -p 1 -timeout 15m ./... -covermode=atomic
|
||||||
|
env:
|
||||||
|
SFTPGO_DATA_PROVIDER__DRIVER: mysql
|
||||||
|
SFTPGO_DATA_PROVIDER__NAME: sftpgo
|
||||||
|
SFTPGO_DATA_PROVIDER__HOST: localhost
|
||||||
|
SFTPGO_DATA_PROVIDER__PORT: 3308
|
||||||
|
SFTPGO_DATA_PROVIDER__USERNAME: sftpgo
|
||||||
|
SFTPGO_DATA_PROVIDER__PASSWORD: sftpgo
|
||||||
|
|
||||||
- name: Run tests using PostgreSQL provider
|
- name: Run tests using PostgreSQL provider
|
||||||
run: |
|
run: |
|
||||||
go test -v -p 1 -timeout 10m ./... -covermode=atomic
|
./sftpgo initprovider
|
||||||
|
./sftpgo resetprovider --force
|
||||||
|
go test -v -tags nopgxregisterdefaulttypes -p 1 -timeout 15m ./... -covermode=atomic
|
||||||
env:
|
env:
|
||||||
SFTPGO_DATA_PROVIDER__DRIVER: postgresql
|
SFTPGO_DATA_PROVIDER__DRIVER: postgresql
|
||||||
SFTPGO_DATA_PROVIDER__NAME: sftpgo
|
SFTPGO_DATA_PROVIDER__NAME: sftpgo
|
||||||
|
@ -161,9 +335,11 @@ jobs:
|
||||||
SFTPGO_DATA_PROVIDER__USERNAME: postgres
|
SFTPGO_DATA_PROVIDER__USERNAME: postgres
|
||||||
SFTPGO_DATA_PROVIDER__PASSWORD: postgres
|
SFTPGO_DATA_PROVIDER__PASSWORD: postgres
|
||||||
|
|
||||||
- name: Run tests using MySQL provider
|
- name: Run tests using MariaDB provider
|
||||||
run: |
|
run: |
|
||||||
go test -v -p 1 -timeout 10m ./... -covermode=atomic
|
./sftpgo initprovider
|
||||||
|
./sftpgo resetprovider --force
|
||||||
|
go test -v -tags nopgxregisterdefaulttypes -p 1 -timeout 15m ./... -covermode=atomic
|
||||||
env:
|
env:
|
||||||
SFTPGO_DATA_PROVIDER__DRIVER: mysql
|
SFTPGO_DATA_PROVIDER__DRIVER: mysql
|
||||||
SFTPGO_DATA_PROVIDER__NAME: sftpgo
|
SFTPGO_DATA_PROVIDER__NAME: sftpgo
|
||||||
|
@ -171,12 +347,16 @@ jobs:
|
||||||
SFTPGO_DATA_PROVIDER__PORT: 3307
|
SFTPGO_DATA_PROVIDER__PORT: 3307
|
||||||
SFTPGO_DATA_PROVIDER__USERNAME: sftpgo
|
SFTPGO_DATA_PROVIDER__USERNAME: sftpgo
|
||||||
SFTPGO_DATA_PROVIDER__PASSWORD: sftpgo
|
SFTPGO_DATA_PROVIDER__PASSWORD: sftpgo
|
||||||
|
SFTPGO_DATA_PROVIDER__SQL_TABLES_PREFIX: prefix_
|
||||||
|
|
||||||
- name: Run tests using CockroachDB provider
|
- name: Run tests using CockroachDB provider
|
||||||
run: |
|
run: |
|
||||||
docker run --rm --name crdb --health-cmd "curl -I http://127.0.0.1:8080" --health-interval 10s --health-timeout 5s --health-retries 6 -p 26257:26257 -d cockroachdb/cockroach:latest start-single-node --insecure --listen-addr 0.0.0.0:26257
|
docker run --rm --name crdb --health-cmd "curl -I http://127.0.0.1:8080" --health-interval 10s --health-timeout 5s --health-retries 6 -p 26257:26257 -d cockroachdb/cockroach:latest start-single-node --insecure --listen-addr :26257
|
||||||
|
sleep 10
|
||||||
docker exec crdb cockroach sql --insecure -e 'create database "sftpgo"'
|
docker exec crdb cockroach sql --insecure -e 'create database "sftpgo"'
|
||||||
go test -v -p 1 -timeout 10m ./... -covermode=atomic
|
./sftpgo initprovider
|
||||||
|
./sftpgo resetprovider --force
|
||||||
|
go test -v -tags nopgxregisterdefaulttypes -p 1 -timeout 15m ./... -covermode=atomic
|
||||||
docker stop crdb
|
docker stop crdb
|
||||||
env:
|
env:
|
||||||
SFTPGO_DATA_PROVIDER__DRIVER: cockroachdb
|
SFTPGO_DATA_PROVIDER__DRIVER: cockroachdb
|
||||||
|
@ -185,15 +365,18 @@ jobs:
|
||||||
SFTPGO_DATA_PROVIDER__PORT: 26257
|
SFTPGO_DATA_PROVIDER__PORT: 26257
|
||||||
SFTPGO_DATA_PROVIDER__USERNAME: root
|
SFTPGO_DATA_PROVIDER__USERNAME: root
|
||||||
SFTPGO_DATA_PROVIDER__PASSWORD:
|
SFTPGO_DATA_PROVIDER__PASSWORD:
|
||||||
|
SFTPGO_DATA_PROVIDER__TARGET_SESSION_ATTRS: any
|
||||||
|
SFTPGO_DATA_PROVIDER__SQL_TABLES_PREFIX: prefix_
|
||||||
|
|
||||||
build-linux-packages:
|
build-linux-packages:
|
||||||
name: Build Linux packages
|
name: Build Linux packages
|
||||||
runs-on: ubuntu-18.04
|
runs-on: ubuntu-latest
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
include:
|
include:
|
||||||
- arch: amd64
|
- arch: amd64
|
||||||
go: 1.16
|
distro: ubuntu:18.04
|
||||||
|
go: latest
|
||||||
go-arch: amd64
|
go-arch: amd64
|
||||||
- arch: aarch64
|
- arch: aarch64
|
||||||
distro: ubuntu18.04
|
distro: ubuntu18.04
|
||||||
|
@ -203,24 +386,49 @@ jobs:
|
||||||
distro: ubuntu18.04
|
distro: ubuntu18.04
|
||||||
go: latest
|
go: latest
|
||||||
go-arch: ppc64le
|
go-arch: ppc64le
|
||||||
|
- arch: armv7
|
||||||
|
distro: ubuntu18.04
|
||||||
|
go: latest
|
||||||
|
go-arch: arm7
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
- name: Set up Go
|
|
||||||
if: ${{ matrix.arch == 'amd64' }}
|
- name: Get commit SHA
|
||||||
uses: actions/setup-go@v2
|
id: get_commit
|
||||||
with:
|
run: echo "COMMIT=${GITHUB_SHA::8}" >> $GITHUB_OUTPUT
|
||||||
go-version: ${{ matrix.go }}
|
shell: bash
|
||||||
|
|
||||||
- name: Build on amd64
|
- name: Build on amd64
|
||||||
if: ${{ matrix.arch == 'amd64' }}
|
if: ${{ matrix.arch == 'amd64' }}
|
||||||
run: |
|
run: |
|
||||||
go build -trimpath -ldflags "-s -w -X github.com/drakkan/sftpgo/version.commit=`git describe --always --dirty` -X github.com/drakkan/sftpgo/version.date=`date -u +%FT%TZ`" -o sftpgo
|
echo '#!/bin/bash' > build.sh
|
||||||
|
echo '' >> build.sh
|
||||||
|
echo 'set -e' >> build.sh
|
||||||
|
echo 'apt-get update -q -y' >> build.sh
|
||||||
|
echo 'apt-get install -q -y curl gcc' >> build.sh
|
||||||
|
if [ ${{ matrix.go }} == 'latest' ]
|
||||||
|
then
|
||||||
|
echo 'GO_VERSION=$(curl -L https://go.dev/VERSION?m=text | head -n 1)' >> build.sh
|
||||||
|
else
|
||||||
|
echo 'GO_VERSION=${{ matrix.go }}' >> build.sh
|
||||||
|
fi
|
||||||
|
echo 'GO_DOWNLOAD_ARCH=${{ matrix.go-arch }}' >> build.sh
|
||||||
|
echo 'curl --retry 5 --retry-delay 2 --connect-timeout 10 -o go.tar.gz -L https://go.dev/dl/${GO_VERSION}.linux-${GO_DOWNLOAD_ARCH}.tar.gz' >> build.sh
|
||||||
|
echo 'tar -C /usr/local -xzf go.tar.gz' >> build.sh
|
||||||
|
echo 'export PATH=$PATH:/usr/local/go/bin' >> build.sh
|
||||||
|
echo 'go version' >> build.sh
|
||||||
|
echo 'cd /usr/local/src' >> build.sh
|
||||||
|
echo 'go build -buildvcs=false -trimpath -tags nopgxregisterdefaulttypes -ldflags "-s -w -X github.com/drakkan/sftpgo/v2/internal/version.commit=${{ steps.get_commit.outputs.COMMIT }} -X github.com/drakkan/sftpgo/v2/internal/version.date=`date -u +%FT%TZ`" -o sftpgo' >> build.sh
|
||||||
|
|
||||||
|
chmod 755 build.sh
|
||||||
|
docker run --rm --name ubuntu-build --mount type=bind,source=`pwd`,target=/usr/local/src ${{ matrix.distro }} /usr/local/src/build.sh
|
||||||
mkdir -p output/{init,bash_completion,zsh_completion}
|
mkdir -p output/{init,bash_completion,zsh_completion}
|
||||||
cp sftpgo.json output/
|
cp sftpgo.json output/
|
||||||
cp -r templates output/
|
cp -r templates output/
|
||||||
cp -r static output/
|
cp -r static output/
|
||||||
|
cp -r openapi output/
|
||||||
cp init/sftpgo.service output/init/
|
cp init/sftpgo.service output/init/
|
||||||
./sftpgo gen completion bash > output/bash_completion/sftpgo
|
./sftpgo gen completion bash > output/bash_completion/sftpgo
|
||||||
./sftpgo gen completion zsh > output/zsh_completion/_sftpgo
|
./sftpgo gen completion zsh > output/zsh_completion/_sftpgo
|
||||||
|
@ -228,7 +436,7 @@ jobs:
|
||||||
gzip output/man/man1/*
|
gzip output/man/man1/*
|
||||||
cp sftpgo output/
|
cp sftpgo output/
|
||||||
|
|
||||||
- uses: uraimo/run-on-arch-action@v2.0.10
|
- uses: uraimo/run-on-arch-action@v2
|
||||||
if: ${{ matrix.arch != 'amd64' }}
|
if: ${{ matrix.arch != 'amd64' }}
|
||||||
name: Build for ${{ matrix.arch }}
|
name: Build for ${{ matrix.arch }}
|
||||||
id: build
|
id: build
|
||||||
|
@ -242,22 +450,33 @@ jobs:
|
||||||
shell: /bin/bash
|
shell: /bin/bash
|
||||||
install: |
|
install: |
|
||||||
apt-get update -q -y
|
apt-get update -q -y
|
||||||
apt-get install -q -y curl gcc git
|
apt-get install -q -y curl gcc
|
||||||
if [ ${{ matrix.go }} == 'latest' ]
|
if [ ${{ matrix.go }} == 'latest' ]
|
||||||
then
|
then
|
||||||
GO_VERSION=$(curl https://golang.org/VERSION?m=text)
|
GO_VERSION=$(curl -L https://go.dev/VERSION?m=text | head -n 1)
|
||||||
else
|
else
|
||||||
GO_VERSION=${{ matrix.go }}
|
GO_VERSION=${{ matrix.go }}
|
||||||
fi
|
fi
|
||||||
curl --retry 5 --retry-delay 2 --connect-timeout 10 -o go.tar.gz -L https://golang.org/dl/${GO_VERSION}.linux-${{ matrix.go-arch }}.tar.gz
|
GO_DOWNLOAD_ARCH=${{ matrix.go-arch }}
|
||||||
|
if [ ${{ matrix.arch}} == 'armv7' ]
|
||||||
|
then
|
||||||
|
GO_DOWNLOAD_ARCH=armv6l
|
||||||
|
fi
|
||||||
|
curl --retry 5 --retry-delay 2 --connect-timeout 10 -o go.tar.gz -L https://go.dev/dl/${GO_VERSION}.linux-${GO_DOWNLOAD_ARCH}.tar.gz
|
||||||
tar -C /usr/local -xzf go.tar.gz
|
tar -C /usr/local -xzf go.tar.gz
|
||||||
run: |
|
run: |
|
||||||
export PATH=$PATH:/usr/local/go/bin
|
export PATH=$PATH:/usr/local/go/bin
|
||||||
go build -trimpath -ldflags "-s -w -X github.com/drakkan/sftpgo/version.commit=`git describe --always --dirty` -X github.com/drakkan/sftpgo/version.date=`date -u +%FT%TZ`" -o sftpgo
|
go version
|
||||||
|
if [ ${{ matrix.arch}} == 'armv7' ]
|
||||||
|
then
|
||||||
|
export GOARM=7
|
||||||
|
fi
|
||||||
|
go build -buildvcs=false -trimpath -tags nopgxregisterdefaulttypes -ldflags "-s -w -X github.com/drakkan/sftpgo/v2/internal/version.commit=${{ steps.get_commit.outputs.COMMIT }} -X github.com/drakkan/sftpgo/v2/internal/version.date=`date -u +%FT%TZ`" -o sftpgo
|
||||||
mkdir -p output/{init,bash_completion,zsh_completion}
|
mkdir -p output/{init,bash_completion,zsh_completion}
|
||||||
cp sftpgo.json output/
|
cp sftpgo.json output/
|
||||||
cp -r templates output/
|
cp -r templates output/
|
||||||
cp -r static output/
|
cp -r static output/
|
||||||
|
cp -r openapi output/
|
||||||
cp init/sftpgo.service output/init/
|
cp init/sftpgo.service output/init/
|
||||||
./sftpgo gen completion bash > output/bash_completion/sftpgo
|
./sftpgo gen completion bash > output/bash_completion/sftpgo
|
||||||
./sftpgo gen completion zsh > output/zsh_completion/_sftpgo
|
./sftpgo gen completion zsh > output/zsh_completion/_sftpgo
|
||||||
|
@ -266,7 +485,7 @@ jobs:
|
||||||
cp sftpgo output/
|
cp sftpgo output/
|
||||||
|
|
||||||
- name: Upload build artifact
|
- name: Upload build artifact
|
||||||
uses: actions/upload-artifact@v2
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: sftpgo-linux-${{ matrix.arch }}-go-${{ matrix.go }}
|
name: sftpgo-linux-${{ matrix.arch }}-go-${{ matrix.go }}
|
||||||
path: output
|
path: output
|
||||||
|
@ -278,16 +497,16 @@ jobs:
|
||||||
cd pkgs
|
cd pkgs
|
||||||
./build.sh
|
./build.sh
|
||||||
PKG_VERSION=$(cat dist/version)
|
PKG_VERSION=$(cat dist/version)
|
||||||
echo "::set-output name=pkg-version::${PKG_VERSION}"
|
echo "pkg-version=${PKG_VERSION}" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
- name: Upload Debian Package
|
- name: Upload Debian Package
|
||||||
uses: actions/upload-artifact@v2
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: sftpgo-${{ steps.build_linux_pkgs.outputs.pkg-version }}-${{ matrix.go-arch }}-deb
|
name: sftpgo-${{ steps.build_linux_pkgs.outputs.pkg-version }}-${{ matrix.go-arch }}-deb
|
||||||
path: pkgs/dist/deb/*
|
path: pkgs/dist/deb/*
|
||||||
|
|
||||||
- name: Upload RPM Package
|
- name: Upload RPM Package
|
||||||
uses: actions/upload-artifact@v2
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: sftpgo-${{ steps.build_linux_pkgs.outputs.pkg-version }}-${{ matrix.go-arch }}-rpm
|
name: sftpgo-${{ steps.build_linux_pkgs.outputs.pkg-version }}-${{ matrix.go-arch }}-rpm
|
||||||
path: pkgs/dist/rpm/*
|
path: pkgs/dist/rpm/*
|
||||||
|
@ -296,13 +515,12 @@ jobs:
|
||||||
name: golangci-lint
|
name: golangci-lint
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
|
||||||
- name: Set up Go
|
- name: Set up Go
|
||||||
uses: actions/setup-go@v2
|
uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: 1.16
|
go-version: '1.22'
|
||||||
|
- uses: actions/checkout@v4
|
||||||
- name: Run golangci-lint
|
- name: Run golangci-lint
|
||||||
uses: golangci/golangci-lint-action@v2
|
uses: golangci/golangci-lint-action@v6
|
||||||
with:
|
with:
|
||||||
version: latest
|
version: latest
|
||||||
skip-go-installation: true
|
|
95
.github/workflows/docker.yml
vendored
|
@ -24,17 +24,16 @@ jobs:
|
||||||
optional_deps:
|
optional_deps:
|
||||||
- true
|
- true
|
||||||
- false
|
- false
|
||||||
|
include:
|
||||||
|
- os: ubuntu-latest
|
||||||
|
docker_pkg: distroless
|
||||||
|
optional_deps: false
|
||||||
|
- os: ubuntu-latest
|
||||||
|
docker_pkg: debian-plugins
|
||||||
|
optional_deps: true
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Repo metadata
|
|
||||||
id: repo
|
|
||||||
uses: actions/github-script@v4
|
|
||||||
with:
|
|
||||||
script: |
|
|
||||||
const repo = await github.repos.get(context.repo)
|
|
||||||
return repo.data
|
|
||||||
|
|
||||||
- name: Gather image information
|
- name: Gather image information
|
||||||
id: info
|
id: info
|
||||||
|
@ -43,6 +42,7 @@ jobs:
|
||||||
DOCKERFILE=Dockerfile
|
DOCKERFILE=Dockerfile
|
||||||
MINOR=""
|
MINOR=""
|
||||||
MAJOR=""
|
MAJOR=""
|
||||||
|
FEATURES="nopgxregisterdefaulttypes"
|
||||||
if [ "${{ github.event_name }}" = "schedule" ]; then
|
if [ "${{ github.event_name }}" = "schedule" ]; then
|
||||||
VERSION=nightly
|
VERSION=nightly
|
||||||
elif [[ $GITHUB_REF == refs/tags/* ]]; then
|
elif [[ $GITHUB_REF == refs/tags/* ]]; then
|
||||||
|
@ -64,8 +64,18 @@ jobs:
|
||||||
VERSION="${VERSION}-alpine"
|
VERSION="${VERSION}-alpine"
|
||||||
VERSION_SLIM="${VERSION}-slim"
|
VERSION_SLIM="${VERSION}-slim"
|
||||||
DOCKERFILE=Dockerfile.alpine
|
DOCKERFILE=Dockerfile.alpine
|
||||||
|
elif [[ $DOCKER_PKG == distroless ]]; then
|
||||||
|
VERSION="${VERSION}-distroless"
|
||||||
|
VERSION_SLIM="${VERSION}-slim"
|
||||||
|
DOCKERFILE=Dockerfile.distroless
|
||||||
|
FEATURES="${FEATURES},nosqlite"
|
||||||
|
elif [[ $DOCKER_PKG == debian-plugins ]]; then
|
||||||
|
VERSION="${VERSION}-plugins"
|
||||||
|
VERSION_SLIM="${VERSION}-slim"
|
||||||
|
FEATURES="${FEATURES},unixcrypt"
|
||||||
|
elif [[ $DOCKER_PKG == debian ]]; then
|
||||||
|
FEATURES="${FEATURES},unixcrypt"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
DOCKER_IMAGES=("drakkan/sftpgo" "ghcr.io/drakkan/sftpgo")
|
DOCKER_IMAGES=("drakkan/sftpgo" "ghcr.io/drakkan/sftpgo")
|
||||||
TAGS="${DOCKER_IMAGES[0]}:${VERSION}"
|
TAGS="${DOCKER_IMAGES[0]}:${VERSION}"
|
||||||
TAGS_SLIM="${DOCKER_IMAGES[0]}:${VERSION_SLIM}"
|
TAGS_SLIM="${DOCKER_IMAGES[0]}:${VERSION_SLIM}"
|
||||||
|
@ -83,6 +93,20 @@ jobs:
|
||||||
fi
|
fi
|
||||||
TAGS="${TAGS},${DOCKER_IMAGE}:latest"
|
TAGS="${TAGS},${DOCKER_IMAGE}:latest"
|
||||||
TAGS_SLIM="${TAGS_SLIM},${DOCKER_IMAGE}:slim"
|
TAGS_SLIM="${TAGS_SLIM},${DOCKER_IMAGE}:slim"
|
||||||
|
elif [[ $DOCKER_PKG == distroless ]]; then
|
||||||
|
if [[ -n $MAJOR && -n $MINOR ]]; then
|
||||||
|
TAGS="${TAGS},${DOCKER_IMAGE}:${MINOR}-distroless,${DOCKER_IMAGE}:${MAJOR}-distroless"
|
||||||
|
TAGS_SLIM="${TAGS_SLIM},${DOCKER_IMAGE}:${MINOR}-distroless-slim,${DOCKER_IMAGE}:${MAJOR}-distroless-slim"
|
||||||
|
fi
|
||||||
|
TAGS="${TAGS},${DOCKER_IMAGE}:distroless"
|
||||||
|
TAGS_SLIM="${TAGS_SLIM},${DOCKER_IMAGE}:distroless-slim"
|
||||||
|
elif [[ $DOCKER_PKG == debian-plugins ]]; then
|
||||||
|
if [[ -n $MAJOR && -n $MINOR ]]; then
|
||||||
|
TAGS="${TAGS},${DOCKER_IMAGE}:${MINOR}-plugins,${DOCKER_IMAGE}:${MAJOR}-plugins"
|
||||||
|
TAGS_SLIM="${TAGS_SLIM},${DOCKER_IMAGE}:${MINOR}-plugins-slim,${DOCKER_IMAGE}:${MAJOR}-plugins-slim"
|
||||||
|
fi
|
||||||
|
TAGS="${TAGS},${DOCKER_IMAGE}:plugins"
|
||||||
|
TAGS_SLIM="${TAGS_SLIM},${DOCKER_IMAGE}:plugins-slim"
|
||||||
else
|
else
|
||||||
if [[ -n $MAJOR && -n $MINOR ]]; then
|
if [[ -n $MAJOR && -n $MINOR ]]; then
|
||||||
TAGS="${TAGS},${DOCKER_IMAGE}:${MINOR}-alpine,${DOCKER_IMAGE}:${MAJOR}-alpine"
|
TAGS="${TAGS},${DOCKER_IMAGE}:${MINOR}-alpine,${DOCKER_IMAGE}:${MAJOR}-alpine"
|
||||||
|
@ -95,61 +119,70 @@ jobs:
|
||||||
done
|
done
|
||||||
|
|
||||||
if [[ $OPTIONAL_DEPS == true ]]; then
|
if [[ $OPTIONAL_DEPS == true ]]; then
|
||||||
echo ::set-output name=version::${VERSION}
|
echo "version=${VERSION}" >> $GITHUB_OUTPUT
|
||||||
echo ::set-output name=tags::${TAGS}
|
echo "tags=${TAGS}" >> $GITHUB_OUTPUT
|
||||||
echo ::set-output name=full::true
|
echo "full=true" >> $GITHUB_OUTPUT
|
||||||
else
|
else
|
||||||
echo ::set-output name=version::${VERSION_SLIM}
|
echo "version=${VERSION_SLIM}" >> $GITHUB_OUTPUT
|
||||||
echo ::set-output name=tags::${TAGS_SLIM}
|
echo "tags=${TAGS_SLIM}" >> $GITHUB_OUTPUT
|
||||||
echo ::set-output name=full::false
|
echo "full=false" >> $GITHUB_OUTPUT
|
||||||
fi
|
fi
|
||||||
echo ::set-output name=dockerfile::${DOCKERFILE}
|
if [[ $DOCKER_PKG == debian-plugins ]]; then
|
||||||
echo ::set-output name=created::$(date -u +'%Y-%m-%dT%H:%M:%SZ')
|
echo "plugins=true" >> $GITHUB_OUTPUT
|
||||||
echo ::set-output name=sha::${GITHUB_SHA::8}
|
else
|
||||||
|
echo "plugins=false" >> $GITHUB_OUTPUT
|
||||||
|
fi
|
||||||
|
echo "dockerfile=${DOCKERFILE}" >> $GITHUB_OUTPUT
|
||||||
|
echo "features=${FEATURES}" >> $GITHUB_OUTPUT
|
||||||
|
echo "created=$(date -u +'%Y-%m-%dT%H:%M:%SZ')" >> $GITHUB_OUTPUT
|
||||||
|
echo "sha=${GITHUB_SHA::8}" >> $GITHUB_OUTPUT
|
||||||
env:
|
env:
|
||||||
DOCKER_PKG: ${{ matrix.docker_pkg }}
|
DOCKER_PKG: ${{ matrix.docker_pkg }}
|
||||||
OPTIONAL_DEPS: ${{ matrix.optional_deps }}
|
OPTIONAL_DEPS: ${{ matrix.optional_deps }}
|
||||||
|
|
||||||
- name: Set up QEMU
|
- name: Set up QEMU
|
||||||
uses: docker/setup-qemu-action@v1
|
uses: docker/setup-qemu-action@v3
|
||||||
|
|
||||||
- name: Set up builder
|
- name: Set up builder
|
||||||
uses: docker/setup-buildx-action@v1
|
uses: docker/setup-buildx-action@v3
|
||||||
id: builder
|
id: builder
|
||||||
|
|
||||||
- name: Login to Docker Hub
|
- name: Login to Docker Hub
|
||||||
uses: docker/login-action@v1
|
uses: docker/login-action@v3
|
||||||
with:
|
with:
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
if: ${{ github.event_name != 'pull_request' }}
|
if: ${{ github.event_name != 'pull_request' }}
|
||||||
|
|
||||||
- name: Login to GitHub Container Registry
|
- name: Login to GitHub Container Registry
|
||||||
uses: docker/login-action@v1
|
uses: docker/login-action@v3
|
||||||
with:
|
with:
|
||||||
registry: ghcr.io
|
registry: ghcr.io
|
||||||
username: ${{ github.repository_owner }}
|
username: ${{ github.repository_owner }}
|
||||||
password: ${{ secrets.CR_PAT }}
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
if: ${{ github.event_name != 'pull_request' }}
|
if: ${{ github.event_name != 'pull_request' }}
|
||||||
|
|
||||||
- name: Build and push
|
- name: Build and push
|
||||||
uses: docker/build-push-action@v2
|
uses: docker/build-push-action@v6
|
||||||
with:
|
with:
|
||||||
|
context: .
|
||||||
builder: ${{ steps.builder.outputs.name }}
|
builder: ${{ steps.builder.outputs.name }}
|
||||||
file: ./${{ steps.info.outputs.dockerfile }}
|
file: ./${{ steps.info.outputs.dockerfile }}
|
||||||
platforms: linux/amd64,linux/arm64,linux/ppc64le
|
platforms: linux/amd64,linux/arm64,linux/ppc64le,linux/arm/v7
|
||||||
push: ${{ github.event_name != 'pull_request' }}
|
push: ${{ github.event_name != 'pull_request' }}
|
||||||
tags: ${{ steps.info.outputs.tags }}
|
tags: ${{ steps.info.outputs.tags }}
|
||||||
build-args: |
|
build-args: |
|
||||||
COMMIT_SHA=${{ steps.info.outputs.sha }}
|
COMMIT_SHA=${{ steps.info.outputs.sha }}
|
||||||
INSTALL_OPTIONAL_PACKAGES=${{ steps.info.outputs.full }}
|
INSTALL_OPTIONAL_PACKAGES=${{ steps.info.outputs.full }}
|
||||||
|
DOWNLOAD_PLUGINS=${{ steps.info.outputs.plugins }}
|
||||||
|
FEATURES=${{ steps.info.outputs.features }}
|
||||||
labels: |
|
labels: |
|
||||||
org.opencontainers.image.title=SFTPGo
|
org.opencontainers.image.title=SFTPGo
|
||||||
org.opencontainers.image.description=Fully featured and highly configurable SFTP server with optional FTP/S and WebDAV support
|
org.opencontainers.image.description=Full-featured and highly configurable file transfer server: SFTP, HTTP/S,FTP/S, WebDAV
|
||||||
org.opencontainers.image.url=${{ fromJson(steps.repo.outputs.result).html_url }}
|
org.opencontainers.image.url=https://github.com/drakkan/sftpgo
|
||||||
org.opencontainers.image.documentation=${{ fromJson(steps.repo.outputs.result).html_url }}/blob/${{ github.sha }}/docker/README.md
|
org.opencontainers.image.documentation=https://github.com/drakkan/sftpgo/blob/${{ github.sha }}/docker/README.md
|
||||||
org.opencontainers.image.source=${{ fromJson(steps.repo.outputs.result).html_url }}
|
org.opencontainers.image.source=https://github.com/drakkan/sftpgo
|
||||||
org.opencontainers.image.version=${{ steps.info.outputs.version }}
|
org.opencontainers.image.version=${{ steps.info.outputs.version }}
|
||||||
org.opencontainers.image.created=${{ steps.info.outputs.created }}
|
org.opencontainers.image.created=${{ steps.info.outputs.created }}
|
||||||
org.opencontainers.image.revision=${{ github.sha }}
|
org.opencontainers.image.revision=${{ github.sha }}
|
||||||
org.opencontainers.image.licenses=${{ fromJson(steps.repo.outputs.result).license.spdx_id }}
|
org.opencontainers.image.licenses=AGPL-3.0-only
|
304
.github/workflows/release.yml
vendored
|
@ -5,33 +5,34 @@ on:
|
||||||
tags: 'v*'
|
tags: 'v*'
|
||||||
|
|
||||||
env:
|
env:
|
||||||
GO_VERSION: 1.16.5
|
GO_VERSION: 1.22.4
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
prepare-sources-with-deps:
|
prepare-sources-with-deps:
|
||||||
name: Prepare sources with deps
|
name: Prepare sources with deps
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v4
|
||||||
- name: Set up Go
|
- name: Set up Go
|
||||||
uses: actions/setup-go@v2
|
uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: ${{ env.GO_VERSION }}
|
go-version: ${{ env.GO_VERSION }}
|
||||||
|
|
||||||
- name: Get SFTPGo version
|
- name: Get SFTPGo version
|
||||||
id: get_version
|
id: get_version
|
||||||
run: echo ::set-output name=VERSION::${GITHUB_REF/refs\/tags\//}
|
run: echo "VERSION=${GITHUB_REF/refs\/tags\//}" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
- name: Prepare release
|
- name: Prepare release
|
||||||
run: |
|
run: |
|
||||||
go mod vendor
|
go mod vendor
|
||||||
echo "${SFTPGO_VERSION}" > VERSION.txt
|
echo "${SFTPGO_VERSION}" > VERSION.txt
|
||||||
|
echo "${GITHUB_SHA::8}" >> VERSION.txt
|
||||||
tar cJvf sftpgo_${SFTPGO_VERSION}_src_with_deps.tar.xz *
|
tar cJvf sftpgo_${SFTPGO_VERSION}_src_with_deps.tar.xz *
|
||||||
env:
|
env:
|
||||||
SFTPGO_VERSION: ${{ steps.get_version.outputs.VERSION }}
|
SFTPGO_VERSION: ${{ steps.get_version.outputs.VERSION }}
|
||||||
|
|
||||||
- name: Upload build artifact
|
- name: Upload build artifact
|
||||||
uses: actions/upload-artifact@v2
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: sftpgo_${{ steps.get_version.outputs.VERSION }}_src_with_deps.tar.xz
|
name: sftpgo_${{ steps.get_version.outputs.VERSION }}_src_with_deps.tar.xz
|
||||||
path: ./sftpgo_${{ steps.get_version.outputs.VERSION }}_src_with_deps.tar.xz
|
path: ./sftpgo_${{ steps.get_version.outputs.VERSION }}_src_with_deps.tar.xz
|
||||||
|
@ -42,37 +43,18 @@ jobs:
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
os: [macos-10.15, windows-2019]
|
os: [macos-12, windows-2022]
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v4
|
||||||
- name: Set up Go
|
- name: Set up Go
|
||||||
uses: actions/setup-go@v2
|
uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: ${{ env.GO_VERSION }}
|
go-version: ${{ env.GO_VERSION }}
|
||||||
|
|
||||||
- name: Build for macOS x86_64
|
|
||||||
if: startsWith(matrix.os, 'windows-') != true
|
|
||||||
run: go build -trimpath -ldflags "-s -w -X github.com/drakkan/sftpgo/version.commit=`git describe --always --dirty` -X github.com/drakkan/sftpgo/version.date=`date -u +%FT%TZ`" -o sftpgo
|
|
||||||
|
|
||||||
- name: Build for macOS arm64
|
|
||||||
if: startsWith(matrix.os, 'macos-') == true
|
|
||||||
run: CGO_ENABLED=1 GOOS=darwin GOARCH=arm64 SDKROOT=$(xcrun --sdk macosx --show-sdk-path) go build -trimpath -ldflags "-s -w -X github.com/drakkan/sftpgo/version.commit=`git describe --always --dirty` -X github.com/drakkan/sftpgo/version.date=`date -u +%FT%TZ`" -o sftpgo_arm64
|
|
||||||
|
|
||||||
- name: Build for Windows
|
|
||||||
if: startsWith(matrix.os, 'windows-')
|
|
||||||
run: |
|
|
||||||
$GIT_COMMIT = (git describe --always --dirty) | Out-String
|
|
||||||
$DATE_TIME = ([datetime]::Now.ToUniversalTime().toString("yyyy-MM-ddTHH:mm:ssZ")) | Out-String
|
|
||||||
go build -trimpath -ldflags "-s -w -X github.com/drakkan/sftpgo/version.commit=$GIT_COMMIT -X github.com/drakkan/sftpgo/version.date=$DATE_TIME" -o sftpgo.exe
|
|
||||||
|
|
||||||
- name: Initialize data provider
|
|
||||||
run: ./sftpgo initprovider
|
|
||||||
shell: bash
|
|
||||||
|
|
||||||
- name: Get SFTPGo version
|
- name: Get SFTPGo version
|
||||||
id: get_version
|
id: get_version
|
||||||
run: echo ::set-output name=VERSION::${GITHUB_REF/refs\/tags\//}
|
run: echo "VERSION=${GITHUB_REF/refs\/tags\//}" >> $GITHUB_OUTPUT
|
||||||
shell: bash
|
shell: bash
|
||||||
|
|
||||||
- name: Get OS name
|
- name: Get OS name
|
||||||
|
@ -80,14 +62,51 @@ jobs:
|
||||||
run: |
|
run: |
|
||||||
if [[ $MATRIX_OS =~ ^macos.* ]]
|
if [[ $MATRIX_OS =~ ^macos.* ]]
|
||||||
then
|
then
|
||||||
echo ::set-output name=OS::macOS
|
echo "OS=macOS" >> $GITHUB_OUTPUT
|
||||||
else
|
else
|
||||||
echo ::set-output name=OS::windows
|
echo "OS=windows" >> $GITHUB_OUTPUT
|
||||||
fi
|
fi
|
||||||
shell: bash
|
shell: bash
|
||||||
env:
|
env:
|
||||||
MATRIX_OS: ${{ matrix.os }}
|
MATRIX_OS: ${{ matrix.os }}
|
||||||
|
|
||||||
|
- name: Build for macOS x86_64
|
||||||
|
if: startsWith(matrix.os, 'windows-') != true
|
||||||
|
run: go build -trimpath -tags nopgxregisterdefaulttypes -ldflags "-s -w -X github.com/drakkan/sftpgo/v2/internal/version.commit=`git describe --always --abbrev=8 --dirty` -X github.com/drakkan/sftpgo/v2/internal/version.date=`date -u +%FT%TZ`" -o sftpgo
|
||||||
|
|
||||||
|
- name: Build for macOS arm64
|
||||||
|
if: startsWith(matrix.os, 'macos-') == true
|
||||||
|
run: CGO_ENABLED=1 GOOS=darwin GOARCH=arm64 SDKROOT=$(xcrun --sdk macosx --show-sdk-path) go build -trimpath -tags nopgxregisterdefaulttypes -ldflags "-s -w -X github.com/drakkan/sftpgo/v2/internal/version.commit=`git describe --always --abbrev=8 --dirty` -X github.com/drakkan/sftpgo/v2/internal/version.date=`date -u +%FT%TZ`" -o sftpgo_arm64
|
||||||
|
|
||||||
|
- name: Build for Windows
|
||||||
|
if: startsWith(matrix.os, 'windows-')
|
||||||
|
run: |
|
||||||
|
$GIT_COMMIT = (git describe --always --abbrev=8 --dirty) | Out-String
|
||||||
|
$DATE_TIME = ([datetime]::Now.ToUniversalTime().toString("yyyy-MM-ddTHH:mm:ssZ")) | Out-String
|
||||||
|
$FILE_VERSION = $Env:SFTPGO_VERSION.substring(1) + ".0"
|
||||||
|
go install github.com/tc-hib/go-winres@latest
|
||||||
|
go-winres simply --arch amd64 --product-version $Env:SFTPGO_VERSION-$GIT_COMMIT --file-version $FILE_VERSION --file-description "SFTPGo server" --product-name SFTPGo --copyright "AGPL-3.0" --original-filename sftpgo.exe --icon .\windows-installer\icon.ico
|
||||||
|
go build -trimpath -tags nopgxregisterdefaulttypes -ldflags "-s -w -X github.com/drakkan/sftpgo/v2/internal/version.commit=$GIT_COMMIT -X github.com/drakkan/sftpgo/v2/internal/version.date=$DATE_TIME" -o sftpgo.exe
|
||||||
|
mkdir arm64
|
||||||
|
$Env:CGO_ENABLED='0'
|
||||||
|
$Env:GOOS='windows'
|
||||||
|
$Env:GOARCH='arm64'
|
||||||
|
go-winres simply --arch arm64 --product-version $Env:SFTPGO_VERSION-$GIT_COMMIT --file-version $FILE_VERSION --file-description "SFTPGo server" --product-name SFTPGo --copyright "AGPL-3.0" --original-filename sftpgo.exe --icon .\windows-installer\icon.ico
|
||||||
|
go build -trimpath -tags nopgxregisterdefaulttypes,nosqlite -ldflags "-s -w -X github.com/drakkan/sftpgo/v2/internal/version.commit=$GIT_COMMIT -X github.com/drakkan/sftpgo/v2/internal/version.date=$DATE_TIME" -o .\arm64\sftpgo.exe
|
||||||
|
mkdir x86
|
||||||
|
$Env:GOARCH='386'
|
||||||
|
go-winres simply --arch 386 --product-version $Env:SFTPGO_VERSION-$GIT_COMMIT --file-version $FILE_VERSION --file-description "SFTPGo server" --product-name SFTPGo --copyright "AGPL-3.0" --original-filename sftpgo.exe --icon .\windows-installer\icon.ico
|
||||||
|
go build -trimpath -tags nopgxregisterdefaulttypes,nosqlite -ldflags "-s -w -X github.com/drakkan/sftpgo/v2/internal/version.commit=$GIT_COMMIT -X github.com/drakkan/sftpgo/v2/internal/version.date=$DATE_TIME" -o .\x86\sftpgo.exe
|
||||||
|
Remove-Item Env:\CGO_ENABLED
|
||||||
|
Remove-Item Env:\GOOS
|
||||||
|
Remove-Item Env:\GOARCH
|
||||||
|
env:
|
||||||
|
SFTPGO_VERSION: ${{ steps.get_version.outputs.VERSION }}
|
||||||
|
|
||||||
|
- name: Initialize data provider
|
||||||
|
run: ./sftpgo initprovider
|
||||||
|
shell: bash
|
||||||
|
|
||||||
- name: Prepare Release for macOS
|
- name: Prepare Release for macOS
|
||||||
if: startsWith(matrix.os, 'macos-')
|
if: startsWith(matrix.os, 'macos-')
|
||||||
run: |
|
run: |
|
||||||
|
@ -100,6 +119,7 @@ jobs:
|
||||||
cp sftpgo.json output/
|
cp sftpgo.json output/
|
||||||
cp sftpgo.db output/sqlite/
|
cp sftpgo.db output/sqlite/
|
||||||
cp -r static output/
|
cp -r static output/
|
||||||
|
cp -r openapi output/
|
||||||
cp -r templates output/
|
cp -r templates output/
|
||||||
cp init/com.github.drakkan.sftpgo.plist output/init/
|
cp init/com.github.drakkan.sftpgo.plist output/init/
|
||||||
./sftpgo gen completion bash > output/bash_completion/sftpgo
|
./sftpgo gen completion bash > output/bash_completion/sftpgo
|
||||||
|
@ -129,31 +149,65 @@ jobs:
|
||||||
xcopy .\templates .\output\templates\ /E
|
xcopy .\templates .\output\templates\ /E
|
||||||
mkdir output\static
|
mkdir output\static
|
||||||
xcopy .\static .\output\static\ /E
|
xcopy .\static .\output\static\ /E
|
||||||
iscc windows-installer\sftpgo.iss
|
mkdir output\openapi
|
||||||
|
xcopy .\openapi .\output\openapi\ /E
|
||||||
|
$CERT_PATH=(Get-Location -PSProvider FileSystem).ProviderPath + "\cert.pfx"
|
||||||
|
[IO.File]::WriteAllBytes($CERT_PATH,[System.Convert]::FromBase64String($Env:CERT_DATA))
|
||||||
|
certutil -f -p "$Env:CERT_PASS" -importpfx MY "$CERT_PATH"
|
||||||
|
rm "$CERT_PATH"
|
||||||
|
& 'C:/Program Files (x86)/Windows Kits/10/bin/10.0.20348.0/x86/signtool.exe' sign /sm /tr http://timestamp.sectigo.com /td sha256 /fd sha256 /n "Nicola Murino" /d "SFTPGo" .\sftpgo.exe
|
||||||
|
& 'C:/Program Files (x86)/Windows Kits/10/bin/10.0.20348.0/x86/signtool.exe' sign /sm /tr http://timestamp.sectigo.com /td sha256 /fd sha256 /n "Nicola Murino" /d "SFTPGo" .\arm64\sftpgo.exe
|
||||||
|
& 'C:/Program Files (x86)/Windows Kits/10/bin/10.0.20348.0/x86/signtool.exe' sign /sm /tr http://timestamp.sectigo.com /td sha256 /fd sha256 /n "Nicola Murino" /d "SFTPGo" .\x86\sftpgo.exe
|
||||||
|
$INNO_S='/Ssigntool=$qC:/Program Files (x86)/Windows Kits/10/bin/10.0.20348.0/x86/signtool.exe$q sign /sm /tr http://timestamp.sectigo.com /td sha256 /fd sha256 /n $qNicola Murino$q /d $qSFTPGo$q $f'
|
||||||
|
iscc "$INNO_S" .\windows-installer\sftpgo.iss
|
||||||
|
|
||||||
|
rm .\output\sftpgo.exe
|
||||||
|
rm .\output\sftpgo.db
|
||||||
|
copy .\arm64\sftpgo.exe .\output
|
||||||
|
(Get-Content .\output\sftpgo.json).replace('"sqlite"', '"bolt"') | Set-Content .\output\sftpgo.json
|
||||||
|
$Env:SFTPGO_DATA_PROVIDER__DRIVER='bolt'
|
||||||
|
$Env:SFTPGO_DATA_PROVIDER__NAME='.\output\sftpgo.db'
|
||||||
|
.\sftpgo.exe initprovider
|
||||||
|
Remove-Item Env:\SFTPGO_DATA_PROVIDER__DRIVER
|
||||||
|
Remove-Item Env:\SFTPGO_DATA_PROVIDER__NAME
|
||||||
|
$Env:SFTPGO_ISS_ARCH='arm64'
|
||||||
|
iscc "$INNO_S" .\windows-installer\sftpgo.iss
|
||||||
|
|
||||||
|
rm .\output\sftpgo.exe
|
||||||
|
copy .\x86\sftpgo.exe .\output
|
||||||
|
$Env:SFTPGO_ISS_ARCH='x86'
|
||||||
|
iscc "$INNO_S" .\windows-installer\sftpgo.iss
|
||||||
|
certutil -delstore MY "Nicola Murino"
|
||||||
env:
|
env:
|
||||||
SFTPGO_ISS_VERSION: ${{ steps.get_version.outputs.VERSION }}
|
SFTPGO_ISS_VERSION: ${{ steps.get_version.outputs.VERSION }}
|
||||||
SFTPGO_ISS_DOC_URL: https://github.com/drakkan/sftpgo/blob/${{ steps.get_version.outputs.VERSION }}/README.md
|
SFTPGO_ISS_DOC_URL: https://github.com/drakkan/sftpgo/blob/${{ steps.get_version.outputs.VERSION }}/README.md
|
||||||
|
CERT_DATA: ${{ secrets.CERT_DATA }}
|
||||||
|
CERT_PASS: ${{ secrets.CERT_PASS }}
|
||||||
|
|
||||||
- name: Prepare Portable Release for Windows
|
- name: Prepare Portable Release for Windows
|
||||||
if: startsWith(matrix.os, 'windows-')
|
if: startsWith(matrix.os, 'windows-')
|
||||||
run: |
|
run: |
|
||||||
mkdir win-portable
|
mkdir win-portable
|
||||||
copy .\sftpgo.exe .\win-portable
|
copy .\sftpgo.exe .\win-portable
|
||||||
|
mkdir win-portable\arm64
|
||||||
|
copy .\arm64\sftpgo.exe .\win-portable\arm64
|
||||||
|
mkdir win-portable\x86
|
||||||
|
copy .\x86\sftpgo.exe .\win-portable\x86
|
||||||
copy .\sftpgo.json .\win-portable
|
copy .\sftpgo.json .\win-portable
|
||||||
copy .\sftpgo.db .\win-portable
|
(Get-Content .\win-portable\sftpgo.json).replace('"sqlite"', '"bolt"') | Set-Content .\win-portable\sftpgo.json
|
||||||
|
copy .\output\sftpgo.db .\win-portable
|
||||||
copy .\LICENSE .\win-portable\LICENSE.txt
|
copy .\LICENSE .\win-portable\LICENSE.txt
|
||||||
mkdir win-portable\templates
|
mkdir win-portable\templates
|
||||||
xcopy .\templates .\win-portable\templates\ /E
|
xcopy .\templates .\win-portable\templates\ /E
|
||||||
mkdir win-portable\static
|
mkdir win-portable\static
|
||||||
xcopy .\static .\win-portable\static\ /E
|
xcopy .\static .\win-portable\static\ /E
|
||||||
Compress-Archive .\win-portable\* sftpgo_portable_x86_64.zip
|
mkdir win-portable\openapi
|
||||||
env:
|
xcopy .\openapi .\win-portable\openapi\ /E
|
||||||
SFTPGO_VERSION: ${{ steps.get_version.outputs.VERSION }}
|
Compress-Archive .\win-portable\* sftpgo_portable.zip
|
||||||
OS: ${{ steps.get_os_name.outputs.OS }}
|
|
||||||
|
|
||||||
- name: Upload macOS x86_64 artifact
|
- name: Upload macOS x86_64 artifact
|
||||||
if: startsWith(matrix.os, 'macos-')
|
if: startsWith(matrix.os, 'macos-')
|
||||||
uses: actions/upload-artifact@v2
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: sftpgo_${{ steps.get_version.outputs.VERSION }}_${{ steps.get_os_name.outputs.OS }}_x86_64.tar.xz
|
name: sftpgo_${{ steps.get_version.outputs.VERSION }}_${{ steps.get_os_name.outputs.OS }}_x86_64.tar.xz
|
||||||
path: ./sftpgo_${{ steps.get_version.outputs.VERSION }}_${{ steps.get_os_name.outputs.OS }}_x86_64.tar.xz
|
path: ./sftpgo_${{ steps.get_version.outputs.VERSION }}_${{ steps.get_os_name.outputs.OS }}_x86_64.tar.xz
|
||||||
|
@ -161,35 +215,52 @@ jobs:
|
||||||
|
|
||||||
- name: Upload macOS arm64 artifact
|
- name: Upload macOS arm64 artifact
|
||||||
if: startsWith(matrix.os, 'macos-')
|
if: startsWith(matrix.os, 'macos-')
|
||||||
uses: actions/upload-artifact@v2
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: sftpgo_${{ steps.get_version.outputs.VERSION }}_${{ steps.get_os_name.outputs.OS }}_arm64.tar.xz
|
name: sftpgo_${{ steps.get_version.outputs.VERSION }}_${{ steps.get_os_name.outputs.OS }}_arm64.tar.xz
|
||||||
path: ./sftpgo_${{ steps.get_version.outputs.VERSION }}_${{ steps.get_os_name.outputs.OS }}_arm64.tar.xz
|
path: ./sftpgo_${{ steps.get_version.outputs.VERSION }}_${{ steps.get_os_name.outputs.OS }}_arm64.tar.xz
|
||||||
retention-days: 1
|
retention-days: 1
|
||||||
|
|
||||||
- name: Upload Windows installer artifact
|
- name: Upload Windows installer x86_64 artifact
|
||||||
if: startsWith(matrix.os, 'windows-')
|
if: startsWith(matrix.os, 'windows-')
|
||||||
uses: actions/upload-artifact@v2
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: sftpgo_${{ steps.get_version.outputs.VERSION }}_${{ steps.get_os_name.outputs.OS }}_x86_64.exe
|
name: sftpgo_${{ steps.get_version.outputs.VERSION }}_${{ steps.get_os_name.outputs.OS }}_x86_64.exe
|
||||||
path: ./sftpgo_windows_x86_64.exe
|
path: ./sftpgo_windows_x86_64.exe
|
||||||
retention-days: 1
|
retention-days: 1
|
||||||
|
|
||||||
|
- name: Upload Windows installer arm64 artifact
|
||||||
|
if: startsWith(matrix.os, 'windows-')
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: sftpgo_${{ steps.get_version.outputs.VERSION }}_${{ steps.get_os_name.outputs.OS }}_arm64.exe
|
||||||
|
path: ./sftpgo_windows_arm64.exe
|
||||||
|
retention-days: 1
|
||||||
|
|
||||||
|
- name: Upload Windows installer x86 artifact
|
||||||
|
if: startsWith(matrix.os, 'windows-')
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: sftpgo_${{ steps.get_version.outputs.VERSION }}_${{ steps.get_os_name.outputs.OS }}_x86.exe
|
||||||
|
path: ./sftpgo_windows_x86.exe
|
||||||
|
retention-days: 1
|
||||||
|
|
||||||
- name: Upload Windows portable artifact
|
- name: Upload Windows portable artifact
|
||||||
if: startsWith(matrix.os, 'windows-')
|
if: startsWith(matrix.os, 'windows-')
|
||||||
uses: actions/upload-artifact@v2
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: sftpgo_${{ steps.get_version.outputs.VERSION }}_${{ steps.get_os_name.outputs.OS }}_portable_x86_64.zip
|
name: sftpgo_${{ steps.get_version.outputs.VERSION }}_${{ steps.get_os_name.outputs.OS }}_portable.zip
|
||||||
path: ./sftpgo_portable_x86_64.zip
|
path: ./sftpgo_portable.zip
|
||||||
retention-days: 1
|
retention-days: 1
|
||||||
|
|
||||||
prepare-linux:
|
prepare-linux:
|
||||||
name: Prepare Linux binaries
|
name: Prepare Linux binaries
|
||||||
runs-on: ubuntu-18.04
|
runs-on: ubuntu-latest
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
include:
|
include:
|
||||||
- arch: amd64
|
- arch: amd64
|
||||||
|
distro: ubuntu:18.04
|
||||||
go-arch: amd64
|
go-arch: amd64
|
||||||
deb-arch: amd64
|
deb-arch: amd64
|
||||||
rpm-arch: x86_64
|
rpm-arch: x86_64
|
||||||
|
@ -206,20 +277,22 @@ jobs:
|
||||||
deb-arch: ppc64el
|
deb-arch: ppc64el
|
||||||
rpm-arch: ppc64le
|
rpm-arch: ppc64le
|
||||||
tar-arch: ppc64le
|
tar-arch: ppc64le
|
||||||
|
- arch: armv7
|
||||||
|
distro: ubuntu18.04
|
||||||
|
go-arch: arm7
|
||||||
|
deb-arch: armhf
|
||||||
|
rpm-arch: armv7hl
|
||||||
|
tar-arch: armv7
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v4
|
||||||
- name: Set up Go
|
|
||||||
if: ${{ matrix.arch == 'amd64' }}
|
|
||||||
uses: actions/setup-go@v2
|
|
||||||
with:
|
|
||||||
go-version: ${{ env.GO_VERSION }}
|
|
||||||
|
|
||||||
- name: Get versions
|
- name: Get versions
|
||||||
id: get_version
|
id: get_version
|
||||||
run: |
|
run: |
|
||||||
echo ::set-output name=SFTPGO_VERSION::${GITHUB_REF/refs\/tags\//}
|
echo "SFTPGO_VERSION=${GITHUB_REF/refs\/tags\//}" >> $GITHUB_OUTPUT
|
||||||
echo ::set-output name=GO_VERSION::${GO_VERSION}
|
echo "GO_VERSION=${GO_VERSION}" >> $GITHUB_OUTPUT
|
||||||
|
echo "COMMIT=${GITHUB_SHA::8}" >> $GITHUB_OUTPUT
|
||||||
shell: bash
|
shell: bash
|
||||||
env:
|
env:
|
||||||
GO_VERSION: ${{ env.GO_VERSION }}
|
GO_VERSION: ${{ env.GO_VERSION }}
|
||||||
|
@ -227,7 +300,20 @@ jobs:
|
||||||
- name: Build on amd64
|
- name: Build on amd64
|
||||||
if: ${{ matrix.arch == 'amd64' }}
|
if: ${{ matrix.arch == 'amd64' }}
|
||||||
run: |
|
run: |
|
||||||
go build -trimpath -ldflags "-s -w -X github.com/drakkan/sftpgo/version.commit=`git describe --always --dirty` -X github.com/drakkan/sftpgo/version.date=`date -u +%FT%TZ`" -o sftpgo
|
echo '#!/bin/bash' > build.sh
|
||||||
|
echo '' >> build.sh
|
||||||
|
echo 'set -e' >> build.sh
|
||||||
|
echo 'apt-get update -q -y' >> build.sh
|
||||||
|
echo 'apt-get install -q -y curl gcc' >> build.sh
|
||||||
|
echo 'curl --retry 5 --retry-delay 2 --connect-timeout 10 -o go.tar.gz -L https://go.dev/dl/go${{ steps.get_version.outputs.GO_VERSION }}.linux-${{ matrix.go-arch }}.tar.gz' >> build.sh
|
||||||
|
echo 'tar -C /usr/local -xzf go.tar.gz' >> build.sh
|
||||||
|
echo 'export PATH=$PATH:/usr/local/go/bin' >> build.sh
|
||||||
|
echo 'go version' >> build.sh
|
||||||
|
echo 'cd /usr/local/src' >> build.sh
|
||||||
|
echo 'go build -buildvcs=false -trimpath -tags nopgxregisterdefaulttypes -ldflags "-s -w -X github.com/drakkan/sftpgo/v2/internal/version.commit=${{ steps.get_version.outputs.COMMIT }} -X github.com/drakkan/sftpgo/v2/internal/version.date=`date -u +%FT%TZ`" -o sftpgo' >> build.sh
|
||||||
|
|
||||||
|
chmod 755 build.sh
|
||||||
|
docker run --rm --name ubuntu-build --mount type=bind,source=`pwd`,target=/usr/local/src ${{ matrix.distro }} /usr/local/src/build.sh
|
||||||
mkdir -p output/{init,sqlite,bash_completion,zsh_completion}
|
mkdir -p output/{init,sqlite,bash_completion,zsh_completion}
|
||||||
echo "For documentation please take a look here:" > output/README.txt
|
echo "For documentation please take a look here:" > output/README.txt
|
||||||
echo "" >> output/README.txt
|
echo "" >> output/README.txt
|
||||||
|
@ -236,6 +322,7 @@ jobs:
|
||||||
cp sftpgo.json output/
|
cp sftpgo.json output/
|
||||||
cp -r templates output/
|
cp -r templates output/
|
||||||
cp -r static output/
|
cp -r static output/
|
||||||
|
cp -r openapi output/
|
||||||
cp init/sftpgo.service output/init/
|
cp init/sftpgo.service output/init/
|
||||||
./sftpgo initprovider
|
./sftpgo initprovider
|
||||||
./sftpgo gen completion bash > output/bash_completion/sftpgo
|
./sftpgo gen completion bash > output/bash_completion/sftpgo
|
||||||
|
@ -250,7 +337,7 @@ jobs:
|
||||||
env:
|
env:
|
||||||
SFTPGO_VERSION: ${{ steps.get_version.outputs.SFTPGO_VERSION }}
|
SFTPGO_VERSION: ${{ steps.get_version.outputs.SFTPGO_VERSION }}
|
||||||
|
|
||||||
- uses: uraimo/run-on-arch-action@v2.0.10
|
- uses: uraimo/run-on-arch-action@v2
|
||||||
if: ${{ matrix.arch != 'amd64' }}
|
if: ${{ matrix.arch != 'amd64' }}
|
||||||
name: Build for ${{ matrix.arch }}
|
name: Build for ${{ matrix.arch }}
|
||||||
id: build
|
id: build
|
||||||
|
@ -264,12 +351,18 @@ jobs:
|
||||||
shell: /bin/bash
|
shell: /bin/bash
|
||||||
install: |
|
install: |
|
||||||
apt-get update -q -y
|
apt-get update -q -y
|
||||||
apt-get install -q -y curl gcc git xz-utils
|
apt-get install -q -y curl gcc xz-utils
|
||||||
curl --retry 5 --retry-delay 2 --connect-timeout 10 -o go.tar.gz -L https://golang.org/dl/go${{ steps.get_version.outputs.GO_VERSION }}.linux-${{ matrix.go-arch }}.tar.gz
|
GO_DOWNLOAD_ARCH=${{ matrix.go-arch }}
|
||||||
|
if [ ${{ matrix.arch}} == 'armv7' ]
|
||||||
|
then
|
||||||
|
GO_DOWNLOAD_ARCH=armv6l
|
||||||
|
fi
|
||||||
|
curl --retry 5 --retry-delay 2 --connect-timeout 10 -o go.tar.gz -L https://go.dev/dl/go${{ steps.get_version.outputs.GO_VERSION }}.linux-${GO_DOWNLOAD_ARCH}.tar.gz
|
||||||
tar -C /usr/local -xzf go.tar.gz
|
tar -C /usr/local -xzf go.tar.gz
|
||||||
run: |
|
run: |
|
||||||
export PATH=$PATH:/usr/local/go/bin
|
export PATH=$PATH:/usr/local/go/bin
|
||||||
go build -trimpath -ldflags "-s -w -X github.com/drakkan/sftpgo/version.commit=`git describe --always --dirty` -X github.com/drakkan/sftpgo/version.date=`date -u +%FT%TZ`" -o sftpgo
|
go version
|
||||||
|
go build -buildvcs=false -trimpath -tags nopgxregisterdefaulttypes -ldflags "-s -w -X github.com/drakkan/sftpgo/v2/internal/version.commit=${{ steps.get_version.outputs.COMMIT }} -X github.com/drakkan/sftpgo/v2/internal/version.date=`date -u +%FT%TZ`" -o sftpgo
|
||||||
mkdir -p output/{init,sqlite,bash_completion,zsh_completion}
|
mkdir -p output/{init,sqlite,bash_completion,zsh_completion}
|
||||||
echo "For documentation please take a look here:" > output/README.txt
|
echo "For documentation please take a look here:" > output/README.txt
|
||||||
echo "" >> output/README.txt
|
echo "" >> output/README.txt
|
||||||
|
@ -278,6 +371,7 @@ jobs:
|
||||||
cp sftpgo.json output/
|
cp sftpgo.json output/
|
||||||
cp -r templates output/
|
cp -r templates output/
|
||||||
cp -r static output/
|
cp -r static output/
|
||||||
|
cp -r openapi output/
|
||||||
cp init/sftpgo.service output/init/
|
cp init/sftpgo.service output/init/
|
||||||
./sftpgo initprovider
|
./sftpgo initprovider
|
||||||
./sftpgo gen completion bash > output/bash_completion/sftpgo
|
./sftpgo gen completion bash > output/bash_completion/sftpgo
|
||||||
|
@ -291,7 +385,7 @@ jobs:
|
||||||
cd ..
|
cd ..
|
||||||
|
|
||||||
- name: Upload build artifact for ${{ matrix.arch }}
|
- name: Upload build artifact for ${{ matrix.arch }}
|
||||||
uses: actions/upload-artifact@v2
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_linux_${{ matrix.tar-arch }}.tar.xz
|
name: sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_linux_${{ matrix.tar-arch }}.tar.xz
|
||||||
path: ./output/sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_linux_${{ matrix.tar-arch }}.tar.xz
|
path: ./output/sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_linux_${{ matrix.tar-arch }}.tar.xz
|
||||||
|
@ -304,19 +398,19 @@ jobs:
|
||||||
cd pkgs
|
cd pkgs
|
||||||
./build.sh
|
./build.sh
|
||||||
PKG_VERSION=${SFTPGO_VERSION:1}
|
PKG_VERSION=${SFTPGO_VERSION:1}
|
||||||
echo "::set-output name=pkg-version::${PKG_VERSION}"
|
echo "pkg-version=${PKG_VERSION}" >> $GITHUB_OUTPUT
|
||||||
env:
|
env:
|
||||||
SFTPGO_VERSION: ${{ steps.get_version.outputs.SFTPGO_VERSION }}
|
SFTPGO_VERSION: ${{ steps.get_version.outputs.SFTPGO_VERSION }}
|
||||||
|
|
||||||
- name: Upload Deb Package
|
- name: Upload Deb Package
|
||||||
uses: actions/upload-artifact@v2
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: sftpgo_${{ steps.build_linux_pkgs.outputs.pkg-version }}-1_${{ matrix.deb-arch}}.deb
|
name: sftpgo_${{ steps.build_linux_pkgs.outputs.pkg-version }}-1_${{ matrix.deb-arch}}.deb
|
||||||
path: ./pkgs/dist/deb/sftpgo_${{ steps.build_linux_pkgs.outputs.pkg-version }}-1_${{ matrix.deb-arch}}.deb
|
path: ./pkgs/dist/deb/sftpgo_${{ steps.build_linux_pkgs.outputs.pkg-version }}-1_${{ matrix.deb-arch}}.deb
|
||||||
retention-days: 1
|
retention-days: 1
|
||||||
|
|
||||||
- name: Upload RPM Package
|
- name: Upload RPM Package
|
||||||
uses: actions/upload-artifact@v2
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: sftpgo-${{ steps.build_linux_pkgs.outputs.pkg-version }}-1.${{ matrix.rpm-arch}}.rpm
|
name: sftpgo-${{ steps.build_linux_pkgs.outputs.pkg-version }}-1.${{ matrix.rpm-arch}}.rpm
|
||||||
path: ./pkgs/dist/rpm/sftpgo-${{ steps.build_linux_pkgs.outputs.pkg-version }}-1.${{ matrix.rpm-arch}}.rpm
|
path: ./pkgs/dist/rpm/sftpgo-${{ steps.build_linux_pkgs.outputs.pkg-version }}-1.${{ matrix.rpm-arch}}.rpm
|
||||||
|
@ -331,34 +425,41 @@ jobs:
|
||||||
- name: Get versions
|
- name: Get versions
|
||||||
id: get_version
|
id: get_version
|
||||||
run: |
|
run: |
|
||||||
echo ::set-output name=SFTPGO_VERSION::${GITHUB_REF/refs\/tags\//}
|
echo "SFTPGO_VERSION=${GITHUB_REF/refs\/tags\//}" >> $GITHUB_OUTPUT
|
||||||
shell: bash
|
shell: bash
|
||||||
|
|
||||||
- name: Download amd64 artifact
|
- name: Download amd64 artifact
|
||||||
uses: actions/download-artifact@v2
|
uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_linux_x86_64.tar.xz
|
name: sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_linux_x86_64.tar.xz
|
||||||
|
|
||||||
- name: Download arm64 artifact
|
- name: Download arm64 artifact
|
||||||
uses: actions/download-artifact@v2
|
uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_linux_arm64.tar.xz
|
name: sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_linux_arm64.tar.xz
|
||||||
|
|
||||||
- name: Download ppc64le artifact
|
- name: Download ppc64le artifact
|
||||||
uses: actions/download-artifact@v2
|
uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_linux_ppc64le.tar.xz
|
name: sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_linux_ppc64le.tar.xz
|
||||||
|
|
||||||
|
- name: Download armv7 artifact
|
||||||
|
uses: actions/download-artifact@v4
|
||||||
|
with:
|
||||||
|
name: sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_linux_armv7.tar.xz
|
||||||
|
|
||||||
- name: Build bundle
|
- name: Build bundle
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
mkdir -p bundle/{arm64,ppc64le}
|
mkdir -p bundle/{arm64,ppc64le,armv7}
|
||||||
cd bundle
|
cd bundle
|
||||||
tar xvf ../sftpgo_${SFTPGO_VERSION}_linux_x86_64.tar.xz
|
tar xvf ../sftpgo_${SFTPGO_VERSION}_linux_x86_64.tar.xz
|
||||||
cd arm64
|
cd arm64
|
||||||
tar xvf ../../sftpgo_${SFTPGO_VERSION}_linux_arm64.tar.xz sftpgo
|
tar xvf ../../sftpgo_${SFTPGO_VERSION}_linux_arm64.tar.xz sftpgo
|
||||||
cd ../ppc64le
|
cd ../ppc64le
|
||||||
tar xvf ../../sftpgo_${SFTPGO_VERSION}_linux_ppc64le.tar.xz sftpgo
|
tar xvf ../../sftpgo_${SFTPGO_VERSION}_linux_ppc64le.tar.xz sftpgo
|
||||||
|
cd ../armv7
|
||||||
|
tar xvf ../../sftpgo_${SFTPGO_VERSION}_linux_armv7.tar.xz sftpgo
|
||||||
cd ..
|
cd ..
|
||||||
tar cJvf sftpgo_${SFTPGO_VERSION}_linux_bundle.tar.xz *
|
tar cJvf sftpgo_${SFTPGO_VERSION}_linux_bundle.tar.xz *
|
||||||
cd ..
|
cd ..
|
||||||
|
@ -366,7 +467,7 @@ jobs:
|
||||||
SFTPGO_VERSION: ${{ steps.get_version.outputs.SFTPGO_VERSION }}
|
SFTPGO_VERSION: ${{ steps.get_version.outputs.SFTPGO_VERSION }}
|
||||||
|
|
||||||
- name: Upload Linux bundle
|
- name: Upload Linux bundle
|
||||||
uses: actions/upload-artifact@v2
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_linux_bundle.tar.xz
|
name: sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_linux_bundle.tar.xz
|
||||||
path: ./bundle/sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_linux_bundle.tar.xz
|
path: ./bundle/sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_linux_bundle.tar.xz
|
||||||
|
@ -378,95 +479,122 @@ jobs:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v4
|
||||||
- name: Get versions
|
- name: Get versions
|
||||||
id: get_version
|
id: get_version
|
||||||
run: |
|
run: |
|
||||||
SFTPGO_VERSION=${GITHUB_REF/refs\/tags\//}
|
SFTPGO_VERSION=${GITHUB_REF/refs\/tags\//}
|
||||||
PKG_VERSION=${SFTPGO_VERSION:1}
|
PKG_VERSION=${SFTPGO_VERSION:1}
|
||||||
echo ::set-output name=SFTPGO_VERSION::${SFTPGO_VERSION}
|
echo "SFTPGO_VERSION=${SFTPGO_VERSION}" >> $GITHUB_OUTPUT
|
||||||
echo "::set-output name=PKG_VERSION::${PKG_VERSION}"
|
echo "PKG_VERSION=${PKG_VERSION}" >> $GITHUB_OUTPUT
|
||||||
shell: bash
|
shell: bash
|
||||||
|
|
||||||
- name: Download amd64 artifact
|
- name: Download amd64 artifact
|
||||||
uses: actions/download-artifact@v2
|
uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_linux_x86_64.tar.xz
|
name: sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_linux_x86_64.tar.xz
|
||||||
|
|
||||||
- name: Download arm64 artifact
|
- name: Download arm64 artifact
|
||||||
uses: actions/download-artifact@v2
|
uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_linux_arm64.tar.xz
|
name: sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_linux_arm64.tar.xz
|
||||||
|
|
||||||
- name: Download ppc64le artifact
|
- name: Download ppc64le artifact
|
||||||
uses: actions/download-artifact@v2
|
uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_linux_ppc64le.tar.xz
|
name: sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_linux_ppc64le.tar.xz
|
||||||
|
|
||||||
|
- name: Download armv7 artifact
|
||||||
|
uses: actions/download-artifact@v4
|
||||||
|
with:
|
||||||
|
name: sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_linux_armv7.tar.xz
|
||||||
|
|
||||||
- name: Download Linux bundle artifact
|
- name: Download Linux bundle artifact
|
||||||
uses: actions/download-artifact@v2
|
uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_linux_bundle.tar.xz
|
name: sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_linux_bundle.tar.xz
|
||||||
|
|
||||||
- name: Download Deb amd64 artifact
|
- name: Download Deb amd64 artifact
|
||||||
uses: actions/download-artifact@v2
|
uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: sftpgo_${{ steps.get_version.outputs.PKG_VERSION }}-1_amd64.deb
|
name: sftpgo_${{ steps.get_version.outputs.PKG_VERSION }}-1_amd64.deb
|
||||||
|
|
||||||
- name: Download Deb arm64 artifact
|
- name: Download Deb arm64 artifact
|
||||||
uses: actions/download-artifact@v2
|
uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: sftpgo_${{ steps.get_version.outputs.PKG_VERSION }}-1_arm64.deb
|
name: sftpgo_${{ steps.get_version.outputs.PKG_VERSION }}-1_arm64.deb
|
||||||
|
|
||||||
- name: Download Deb ppc64le artifact
|
- name: Download Deb ppc64le artifact
|
||||||
uses: actions/download-artifact@v2
|
uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: sftpgo_${{ steps.get_version.outputs.PKG_VERSION }}-1_ppc64el.deb
|
name: sftpgo_${{ steps.get_version.outputs.PKG_VERSION }}-1_ppc64el.deb
|
||||||
|
|
||||||
|
- name: Download Deb armv7 artifact
|
||||||
|
uses: actions/download-artifact@v4
|
||||||
|
with:
|
||||||
|
name: sftpgo_${{ steps.get_version.outputs.PKG_VERSION }}-1_armhf.deb
|
||||||
|
|
||||||
- name: Download RPM x86_64 artifact
|
- name: Download RPM x86_64 artifact
|
||||||
uses: actions/download-artifact@v2
|
uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: sftpgo-${{ steps.get_version.outputs.PKG_VERSION }}-1.x86_64.rpm
|
name: sftpgo-${{ steps.get_version.outputs.PKG_VERSION }}-1.x86_64.rpm
|
||||||
|
|
||||||
- name: Download RPM aarch64 artifact
|
- name: Download RPM aarch64 artifact
|
||||||
uses: actions/download-artifact@v2
|
uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: sftpgo-${{ steps.get_version.outputs.PKG_VERSION }}-1.aarch64.rpm
|
name: sftpgo-${{ steps.get_version.outputs.PKG_VERSION }}-1.aarch64.rpm
|
||||||
|
|
||||||
- name: Download RPM ppc64le artifact
|
- name: Download RPM ppc64le artifact
|
||||||
uses: actions/download-artifact@v2
|
uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: sftpgo-${{ steps.get_version.outputs.PKG_VERSION }}-1.ppc64le.rpm
|
name: sftpgo-${{ steps.get_version.outputs.PKG_VERSION }}-1.ppc64le.rpm
|
||||||
|
|
||||||
|
- name: Download RPM armv7 artifact
|
||||||
|
uses: actions/download-artifact@v4
|
||||||
|
with:
|
||||||
|
name: sftpgo-${{ steps.get_version.outputs.PKG_VERSION }}-1.armv7hl.rpm
|
||||||
|
|
||||||
- name: Download macOS x86_64 artifact
|
- name: Download macOS x86_64 artifact
|
||||||
uses: actions/download-artifact@v2
|
uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_macOS_x86_64.tar.xz
|
name: sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_macOS_x86_64.tar.xz
|
||||||
|
|
||||||
- name: Download macOS arm64 artifact
|
- name: Download macOS arm64 artifact
|
||||||
uses: actions/download-artifact@v2
|
uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_macOS_arm64.tar.xz
|
name: sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_macOS_arm64.tar.xz
|
||||||
|
|
||||||
- name: Download Windows installer x86_64 artifact
|
- name: Download Windows installer x86_64 artifact
|
||||||
uses: actions/download-artifact@v2
|
uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_windows_x86_64.exe
|
name: sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_windows_x86_64.exe
|
||||||
|
|
||||||
- name: Download Windows portable x86_64 artifact
|
- name: Download Windows installer arm64 artifact
|
||||||
uses: actions/download-artifact@v2
|
uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_windows_portable_x86_64.zip
|
name: sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_windows_arm64.exe
|
||||||
|
|
||||||
|
- name: Download Windows installer x86 artifact
|
||||||
|
uses: actions/download-artifact@v4
|
||||||
|
with:
|
||||||
|
name: sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_windows_x86.exe
|
||||||
|
|
||||||
|
- name: Download Windows portable artifact
|
||||||
|
uses: actions/download-artifact@v4
|
||||||
|
with:
|
||||||
|
name: sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_windows_portable.zip
|
||||||
|
|
||||||
- name: Download source with deps artifact
|
- name: Download source with deps artifact
|
||||||
uses: actions/download-artifact@v2
|
uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_src_with_deps.tar.xz
|
name: sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_src_with_deps.tar.xz
|
||||||
|
|
||||||
- name: Create release
|
- name: Create release
|
||||||
run: |
|
run: |
|
||||||
mv sftpgo_windows_x86_64.exe sftpgo_${SFTPGO_VERSION}_windows_x86_64.exe
|
mv sftpgo_windows_x86_64.exe sftpgo_${SFTPGO_VERSION}_windows_x86_64.exe
|
||||||
mv sftpgo_portable_x86_64.zip sftpgo_${SFTPGO_VERSION}_windows_portable_x86_64.zip
|
mv sftpgo_windows_arm64.exe sftpgo_${SFTPGO_VERSION}_windows_arm64.exe
|
||||||
|
mv sftpgo_windows_x86.exe sftpgo_${SFTPGO_VERSION}_windows_x86.exe
|
||||||
|
mv sftpgo_portable.zip sftpgo_${SFTPGO_VERSION}_windows_portable.zip
|
||||||
gh release create "${SFTPGO_VERSION}" -t "${SFTPGO_VERSION}"
|
gh release create "${SFTPGO_VERSION}" -t "${SFTPGO_VERSION}"
|
||||||
gh release upload "${SFTPGO_VERSION}" sftpgo_*.xz --clobber
|
gh release upload "${SFTPGO_VERSION}" sftpgo_*.xz --clobber
|
||||||
gh release upload "${SFTPGO_VERSION}" sftpgo-*.rpm --clobber
|
gh release upload "${SFTPGO_VERSION}" sftpgo-*.rpm --clobber
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
run:
|
run:
|
||||||
timeout: 5m
|
timeout: 10m
|
||||||
issues-exit-code: 1
|
issues-exit-code: 1
|
||||||
tests: true
|
tests: true
|
||||||
|
|
||||||
|
@ -25,6 +25,14 @@ linters-settings:
|
||||||
#enable:
|
#enable:
|
||||||
# - fieldalignment
|
# - fieldalignment
|
||||||
|
|
||||||
|
issues:
|
||||||
|
include:
|
||||||
|
- EXC0002
|
||||||
|
- EXC0012
|
||||||
|
- EXC0013
|
||||||
|
- EXC0014
|
||||||
|
- EXC0015
|
||||||
|
|
||||||
linters:
|
linters:
|
||||||
enable:
|
enable:
|
||||||
- goconst
|
- goconst
|
||||||
|
@ -40,4 +48,5 @@ linters:
|
||||||
- whitespace
|
- whitespace
|
||||||
- dupl
|
- dupl
|
||||||
- rowserrcheck
|
- rowserrcheck
|
||||||
- dogsled
|
- dogsled
|
||||||
|
- govet
|
||||||
|
|
1
CODEOWNERS
Normal file
|
@ -0,0 +1 @@
|
||||||
|
* @drakkan
|
128
CODE_OF_CONDUCT.md
Normal file
|
@ -0,0 +1,128 @@
|
||||||
|
# Contributor Covenant Code of Conduct
|
||||||
|
|
||||||
|
## Our Pledge
|
||||||
|
|
||||||
|
We as members, contributors, and leaders pledge to make participation in our
|
||||||
|
community a harassment-free experience for everyone, regardless of age, body
|
||||||
|
size, visible or invisible disability, ethnicity, sex characteristics, gender
|
||||||
|
identity and expression, level of experience, education, socio-economic status,
|
||||||
|
nationality, personal appearance, race, religion, or sexual identity
|
||||||
|
and orientation.
|
||||||
|
|
||||||
|
We pledge to act and interact in ways that contribute to an open, welcoming,
|
||||||
|
diverse, inclusive, and healthy community.
|
||||||
|
|
||||||
|
## Our Standards
|
||||||
|
|
||||||
|
Examples of behavior that contributes to a positive environment for our
|
||||||
|
community include:
|
||||||
|
|
||||||
|
* Demonstrating empathy and kindness toward other people
|
||||||
|
* Being respectful of differing opinions, viewpoints, and experiences
|
||||||
|
* Giving and gracefully accepting constructive feedback
|
||||||
|
* Accepting responsibility and apologizing to those affected by our mistakes,
|
||||||
|
and learning from the experience
|
||||||
|
* Focusing on what is best not just for us as individuals, but for the
|
||||||
|
overall community
|
||||||
|
|
||||||
|
Examples of unacceptable behavior include:
|
||||||
|
|
||||||
|
* The use of sexualized language or imagery, and sexual attention or
|
||||||
|
advances of any kind
|
||||||
|
* Trolling, insulting or derogatory comments, and personal or political attacks
|
||||||
|
* Public or private harassment
|
||||||
|
* Publishing others' private information, such as a physical or email
|
||||||
|
address, without their explicit permission
|
||||||
|
* Other conduct which could reasonably be considered inappropriate in a
|
||||||
|
professional setting
|
||||||
|
|
||||||
|
## Enforcement Responsibilities
|
||||||
|
|
||||||
|
Community leaders are responsible for clarifying and enforcing our standards of
|
||||||
|
acceptable behavior and will take appropriate and fair corrective action in
|
||||||
|
response to any behavior that they deem inappropriate, threatening, offensive,
|
||||||
|
or harmful.
|
||||||
|
|
||||||
|
Community leaders have the right and responsibility to remove, edit, or reject
|
||||||
|
comments, commits, code, wiki edits, issues, and other contributions that are
|
||||||
|
not aligned to this Code of Conduct, and will communicate reasons for moderation
|
||||||
|
decisions when appropriate.
|
||||||
|
|
||||||
|
## Scope
|
||||||
|
|
||||||
|
This Code of Conduct applies within all community spaces, and also applies when
|
||||||
|
an individual is officially representing the community in public spaces.
|
||||||
|
Examples of representing our community include using an official e-mail address,
|
||||||
|
posting via an official social media account, or acting as an appointed
|
||||||
|
representative at an online or offline event.
|
||||||
|
|
||||||
|
## Enforcement
|
||||||
|
|
||||||
|
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||||
|
reported to the community leaders responsible for enforcement at
|
||||||
|
support@sftpgo.com.
|
||||||
|
All complaints will be reviewed and investigated promptly and fairly.
|
||||||
|
|
||||||
|
All community leaders are obligated to respect the privacy and security of the
|
||||||
|
reporter of any incident.
|
||||||
|
|
||||||
|
## Enforcement Guidelines
|
||||||
|
|
||||||
|
Community leaders will follow these Community Impact Guidelines in determining
|
||||||
|
the consequences for any action they deem in violation of this Code of Conduct:
|
||||||
|
|
||||||
|
### 1. Correction
|
||||||
|
|
||||||
|
**Community Impact**: Use of inappropriate language or other behavior deemed
|
||||||
|
unprofessional or unwelcome in the community.
|
||||||
|
|
||||||
|
**Consequence**: A private, written warning from community leaders, providing
|
||||||
|
clarity around the nature of the violation and an explanation of why the
|
||||||
|
behavior was inappropriate. A public apology may be requested.
|
||||||
|
|
||||||
|
### 2. Warning
|
||||||
|
|
||||||
|
**Community Impact**: A violation through a single incident or series
|
||||||
|
of actions.
|
||||||
|
|
||||||
|
**Consequence**: A warning with consequences for continued behavior. No
|
||||||
|
interaction with the people involved, including unsolicited interaction with
|
||||||
|
those enforcing the Code of Conduct, for a specified period of time. This
|
||||||
|
includes avoiding interactions in community spaces as well as external channels
|
||||||
|
like social media. Violating these terms may lead to a temporary or
|
||||||
|
permanent ban.
|
||||||
|
|
||||||
|
### 3. Temporary Ban
|
||||||
|
|
||||||
|
**Community Impact**: A serious violation of community standards, including
|
||||||
|
sustained inappropriate behavior.
|
||||||
|
|
||||||
|
**Consequence**: A temporary ban from any sort of interaction or public
|
||||||
|
communication with the community for a specified period of time. No public or
|
||||||
|
private interaction with the people involved, including unsolicited interaction
|
||||||
|
with those enforcing the Code of Conduct, is allowed during this period.
|
||||||
|
Violating these terms may lead to a permanent ban.
|
||||||
|
|
||||||
|
### 4. Permanent Ban
|
||||||
|
|
||||||
|
**Community Impact**: Demonstrating a pattern of violation of community
|
||||||
|
standards, including sustained inappropriate behavior, harassment of an
|
||||||
|
individual, or aggression toward or disparagement of classes of individuals.
|
||||||
|
|
||||||
|
**Consequence**: A permanent ban from any sort of public interaction within
|
||||||
|
the community.
|
||||||
|
|
||||||
|
## Attribution
|
||||||
|
|
||||||
|
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
|
||||||
|
version 2.0, available at
|
||||||
|
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
|
||||||
|
|
||||||
|
Community Impact Guidelines were inspired by [Mozilla's code of conduct
|
||||||
|
enforcement ladder](https://github.com/mozilla/diversity).
|
||||||
|
|
||||||
|
[homepage]: https://www.contributor-covenant.org
|
||||||
|
|
||||||
|
For answers to common questions about this code of conduct, see the FAQ at
|
||||||
|
https://www.contributor-covenant.org/faq. Translations are available at
|
||||||
|
https://www.contributor-covenant.org/translations.
|
31
Dockerfile
|
@ -1,7 +1,9 @@
|
||||||
FROM golang:1.16-buster as builder
|
FROM golang:1.22-bookworm as builder
|
||||||
|
|
||||||
ENV GOFLAGS="-mod=readonly"
|
ENV GOFLAGS="-mod=readonly"
|
||||||
|
|
||||||
|
RUN apt-get update && apt-get -y upgrade && rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
RUN mkdir -p /workspace
|
RUN mkdir -p /workspace
|
||||||
WORKDIR /workspace
|
WORKDIR /workspace
|
||||||
|
|
||||||
|
@ -20,17 +22,22 @@ ARG FEATURES
|
||||||
COPY . .
|
COPY . .
|
||||||
|
|
||||||
RUN set -xe && \
|
RUN set -xe && \
|
||||||
export COMMIT_SHA=${COMMIT_SHA:-$(git describe --always --dirty)} && \
|
export COMMIT_SHA=${COMMIT_SHA:-$(git describe --always --abbrev=8 --dirty)} && \
|
||||||
go build $(if [ -n "${FEATURES}" ]; then echo "-tags ${FEATURES}"; fi) -trimpath -ldflags "-s -w -X github.com/drakkan/sftpgo/version.commit=${COMMIT_SHA} -X github.com/drakkan/sftpgo/version.date=`date -u +%FT%TZ`" -v -o sftpgo
|
go build $(if [ -n "${FEATURES}" ]; then echo "-tags ${FEATURES}"; fi) -trimpath -ldflags "-s -w -X github.com/drakkan/sftpgo/v2/internal/version.commit=${COMMIT_SHA} -X github.com/drakkan/sftpgo/v2/internal/version.date=`date -u +%FT%TZ`" -v -o sftpgo
|
||||||
|
|
||||||
FROM debian:buster-slim
|
# Set to "true" to download the "official" plugins in /usr/local/bin
|
||||||
|
ARG DOWNLOAD_PLUGINS=false
|
||||||
|
|
||||||
# Set to "true" to install the optional git and rsync dependencies
|
RUN if [ "${DOWNLOAD_PLUGINS}" = "true" ]; then apt-get update && apt-get install --no-install-recommends -y curl && ./docker/scripts/download-plugins.sh; fi
|
||||||
|
|
||||||
|
FROM debian:bookworm-slim
|
||||||
|
|
||||||
|
# Set to "true" to install jq and the optional git and rsync dependencies
|
||||||
ARG INSTALL_OPTIONAL_PACKAGES=false
|
ARG INSTALL_OPTIONAL_PACKAGES=false
|
||||||
|
|
||||||
RUN apt-get update && apt-get install --no-install-recommends -y ca-certificates mime-support && rm -rf /var/lib/apt/lists/*
|
RUN apt-get update && apt-get -y upgrade && apt-get install --no-install-recommends -y ca-certificates media-types && rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
RUN if [ "${INSTALL_OPTIONAL_PACKAGES}" = "true" ]; then apt-get update && apt-get install --no-install-recommends -y git rsync && rm -rf /var/lib/apt/lists/*; fi
|
RUN if [ "${INSTALL_OPTIONAL_PACKAGES}" = "true" ]; then apt-get update && apt-get install --no-install-recommends -y jq git rsync && rm -rf /var/lib/apt/lists/*; fi
|
||||||
|
|
||||||
RUN mkdir -p /etc/sftpgo /var/lib/sftpgo /usr/share/sftpgo /srv/sftpgo/data /srv/sftpgo/backups
|
RUN mkdir -p /etc/sftpgo /var/lib/sftpgo /usr/share/sftpgo /srv/sftpgo/data /srv/sftpgo/backups
|
||||||
|
|
||||||
|
@ -42,17 +49,15 @@ RUN groupadd --system -g 1000 sftpgo && \
|
||||||
COPY --from=builder /workspace/sftpgo.json /etc/sftpgo/sftpgo.json
|
COPY --from=builder /workspace/sftpgo.json /etc/sftpgo/sftpgo.json
|
||||||
COPY --from=builder /workspace/templates /usr/share/sftpgo/templates
|
COPY --from=builder /workspace/templates /usr/share/sftpgo/templates
|
||||||
COPY --from=builder /workspace/static /usr/share/sftpgo/static
|
COPY --from=builder /workspace/static /usr/share/sftpgo/static
|
||||||
COPY --from=builder /workspace/sftpgo /usr/local/bin/
|
COPY --from=builder /workspace/openapi /usr/share/sftpgo/openapi
|
||||||
|
COPY --from=builder /workspace/sftpgo /usr/local/bin/sftpgo-plugin-* /usr/local/bin/
|
||||||
|
|
||||||
# Log to the stdout so the logs will be available using docker logs
|
# Log to the stdout so the logs will be available using docker logs
|
||||||
ENV SFTPGO_LOG_FILE_PATH=""
|
ENV SFTPGO_LOG_FILE_PATH=""
|
||||||
# templates and static paths are inside the container
|
|
||||||
ENV SFTPGO_HTTPD__TEMPLATES_PATH=/usr/share/sftpgo/templates
|
|
||||||
ENV SFTPGO_HTTPD__STATIC_FILES_PATH=/usr/share/sftpgo/static
|
|
||||||
|
|
||||||
# Modify the default configuration file
|
# Modify the default configuration file
|
||||||
RUN sed -i "s|\"users_base_dir\": \"\",|\"users_base_dir\": \"/srv/sftpgo/data\",|" /etc/sftpgo/sftpgo.json && \
|
RUN sed -i 's|"users_base_dir": "",|"users_base_dir": "/srv/sftpgo/data",|' /etc/sftpgo/sftpgo.json && \
|
||||||
sed -i "s|\"backups\"|\"/srv/sftpgo/backups\"|" /etc/sftpgo/sftpgo.json
|
sed -i 's|"backups"|"/srv/sftpgo/backups"|' /etc/sftpgo/sftpgo.json
|
||||||
|
|
||||||
RUN chown -R sftpgo:sftpgo /etc/sftpgo /srv/sftpgo && chown sftpgo:sftpgo /var/lib/sftpgo && chmod 700 /srv/sftpgo/backups
|
RUN chown -R sftpgo:sftpgo /etc/sftpgo /srv/sftpgo && chown sftpgo:sftpgo /var/lib/sftpgo && chmod 700 /srv/sftpgo/backups
|
||||||
|
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
FROM golang:1.16-alpine3.13 AS builder
|
FROM golang:1.22-alpine3.20 AS builder
|
||||||
|
|
||||||
ENV GOFLAGS="-mod=readonly"
|
ENV GOFLAGS="-mod=readonly"
|
||||||
|
|
||||||
RUN apk add --update --no-cache bash ca-certificates curl git gcc g++
|
RUN apk -U upgrade --no-cache && apk add --update --no-cache bash ca-certificates curl git gcc g++
|
||||||
|
|
||||||
RUN mkdir -p /workspace
|
RUN mkdir -p /workspace
|
||||||
WORKDIR /workspace
|
WORKDIR /workspace
|
||||||
|
@ -22,22 +22,17 @@ ARG FEATURES
|
||||||
COPY . .
|
COPY . .
|
||||||
|
|
||||||
RUN set -xe && \
|
RUN set -xe && \
|
||||||
export COMMIT_SHA=${COMMIT_SHA:-$(git describe --always --dirty)} && \
|
export COMMIT_SHA=${COMMIT_SHA:-$(git describe --always --abbrev=8 --dirty)} && \
|
||||||
go build $(if [ -n "${FEATURES}" ]; then echo "-tags ${FEATURES}"; fi) -trimpath -ldflags "-s -w -X github.com/drakkan/sftpgo/version.commit=${COMMIT_SHA} -X github.com/drakkan/sftpgo/version.date=`date -u +%FT%TZ`" -v -o sftpgo
|
go build $(if [ -n "${FEATURES}" ]; then echo "-tags ${FEATURES}"; fi) -trimpath -ldflags "-s -w -X github.com/drakkan/sftpgo/v2/internal/version.commit=${COMMIT_SHA} -X github.com/drakkan/sftpgo/v2/internal/version.date=`date -u +%FT%TZ`" -v -o sftpgo
|
||||||
|
|
||||||
|
FROM alpine:3.20
|
||||||
|
|
||||||
FROM alpine:3.13
|
# Set to "true" to install jq and the optional git and rsync dependencies
|
||||||
|
|
||||||
# Set to "true" to install the optional git and rsync dependencies
|
|
||||||
ARG INSTALL_OPTIONAL_PACKAGES=false
|
ARG INSTALL_OPTIONAL_PACKAGES=false
|
||||||
|
|
||||||
RUN apk add --update --no-cache ca-certificates tzdata mailcap
|
RUN apk -U upgrade --no-cache && apk add --update --no-cache ca-certificates tzdata mailcap
|
||||||
|
|
||||||
RUN if [ "${INSTALL_OPTIONAL_PACKAGES}" = "true" ]; then apk add --update --no-cache rsync git; fi
|
RUN if [ "${INSTALL_OPTIONAL_PACKAGES}" = "true" ]; then apk add --update --no-cache jq git rsync; fi
|
||||||
|
|
||||||
# set up nsswitch.conf for Go's "netgo" implementation
|
|
||||||
# https://github.com/gliderlabs/docker-alpine/issues/367#issuecomment-424546457
|
|
||||||
RUN test ! -e /etc/nsswitch.conf && echo 'hosts: files dns' > /etc/nsswitch.conf
|
|
||||||
|
|
||||||
RUN mkdir -p /etc/sftpgo /var/lib/sftpgo /usr/share/sftpgo /srv/sftpgo/data /srv/sftpgo/backups
|
RUN mkdir -p /etc/sftpgo /var/lib/sftpgo /usr/share/sftpgo /srv/sftpgo/data /srv/sftpgo/backups
|
||||||
|
|
||||||
|
@ -47,17 +42,15 @@ RUN addgroup -g 1000 -S sftpgo && \
|
||||||
COPY --from=builder /workspace/sftpgo.json /etc/sftpgo/sftpgo.json
|
COPY --from=builder /workspace/sftpgo.json /etc/sftpgo/sftpgo.json
|
||||||
COPY --from=builder /workspace/templates /usr/share/sftpgo/templates
|
COPY --from=builder /workspace/templates /usr/share/sftpgo/templates
|
||||||
COPY --from=builder /workspace/static /usr/share/sftpgo/static
|
COPY --from=builder /workspace/static /usr/share/sftpgo/static
|
||||||
|
COPY --from=builder /workspace/openapi /usr/share/sftpgo/openapi
|
||||||
COPY --from=builder /workspace/sftpgo /usr/local/bin/
|
COPY --from=builder /workspace/sftpgo /usr/local/bin/
|
||||||
|
|
||||||
# Log to the stdout so the logs will be available using docker logs
|
# Log to the stdout so the logs will be available using docker logs
|
||||||
ENV SFTPGO_LOG_FILE_PATH=""
|
ENV SFTPGO_LOG_FILE_PATH=""
|
||||||
# templates and static paths are inside the container
|
|
||||||
ENV SFTPGO_HTTPD__TEMPLATES_PATH=/usr/share/sftpgo/templates
|
|
||||||
ENV SFTPGO_HTTPD__STATIC_FILES_PATH=/usr/share/sftpgo/static
|
|
||||||
|
|
||||||
# Modify the default configuration file
|
# Modify the default configuration file
|
||||||
RUN sed -i "s|\"users_base_dir\": \"\",|\"users_base_dir\": \"/srv/sftpgo/data\",|" /etc/sftpgo/sftpgo.json && \
|
RUN sed -i 's|"users_base_dir": "",|"users_base_dir": "/srv/sftpgo/data",|' /etc/sftpgo/sftpgo.json && \
|
||||||
sed -i "s|\"backups\"|\"/srv/sftpgo/backups\"|" /etc/sftpgo/sftpgo.json
|
sed -i 's|"backups"|"/srv/sftpgo/backups"|' /etc/sftpgo/sftpgo.json
|
||||||
|
|
||||||
RUN chown -R sftpgo:sftpgo /etc/sftpgo /srv/sftpgo && chown sftpgo:sftpgo /var/lib/sftpgo && chmod 700 /srv/sftpgo/backups
|
RUN chown -R sftpgo:sftpgo /etc/sftpgo /srv/sftpgo && chown sftpgo:sftpgo /var/lib/sftpgo && chmod 700 /srv/sftpgo/backups
|
||||||
|
|
||||||
|
|
57
Dockerfile.distroless
Normal file
|
@ -0,0 +1,57 @@
|
||||||
|
FROM golang:1.22-bookworm as builder
|
||||||
|
|
||||||
|
ENV CGO_ENABLED=0 GOFLAGS="-mod=readonly"
|
||||||
|
|
||||||
|
RUN apt-get update && apt-get -y upgrade && apt-get install --no-install-recommends -y media-types && rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
RUN mkdir -p /workspace
|
||||||
|
WORKDIR /workspace
|
||||||
|
|
||||||
|
ARG GOPROXY
|
||||||
|
|
||||||
|
COPY go.mod go.sum ./
|
||||||
|
RUN go mod download
|
||||||
|
|
||||||
|
ARG COMMIT_SHA
|
||||||
|
|
||||||
|
# This ARG allows to disable some optional features and it might be useful if you build the image yourself.
|
||||||
|
# For this variant we disable SQLite support since it requires CGO and so a C runtime which is not installed
|
||||||
|
# in distroless/static-* images
|
||||||
|
ARG FEATURES
|
||||||
|
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
RUN set -xe && \
|
||||||
|
export COMMIT_SHA=${COMMIT_SHA:-$(git describe --always --abbrev=8 --dirty)} && \
|
||||||
|
go build $(if [ -n "${FEATURES}" ]; then echo "-tags ${FEATURES}"; fi) -trimpath -ldflags "-s -w -X github.com/drakkan/sftpgo/v2/internal/version.commit=${COMMIT_SHA} -X github.com/drakkan/sftpgo/v2/internal/version.date=`date -u +%FT%TZ`" -v -o sftpgo
|
||||||
|
|
||||||
|
# Modify the default configuration file
|
||||||
|
RUN sed -i 's|"users_base_dir": "",|"users_base_dir": "/srv/sftpgo/data",|' sftpgo.json && \
|
||||||
|
sed -i 's|"backups"|"/srv/sftpgo/backups"|' sftpgo.json && \
|
||||||
|
sed -i 's|"sqlite"|"bolt"|' sftpgo.json
|
||||||
|
|
||||||
|
RUN mkdir /etc/sftpgo /var/lib/sftpgo /srv/sftpgo
|
||||||
|
|
||||||
|
FROM gcr.io/distroless/static-debian12
|
||||||
|
|
||||||
|
COPY --from=builder --chown=1000:1000 /etc/sftpgo /etc/sftpgo
|
||||||
|
COPY --from=builder --chown=1000:1000 /srv/sftpgo /srv/sftpgo
|
||||||
|
COPY --from=builder --chown=1000:1000 /var/lib/sftpgo /var/lib/sftpgo
|
||||||
|
COPY --from=builder --chown=1000:1000 /workspace/sftpgo.json /etc/sftpgo/sftpgo.json
|
||||||
|
COPY --from=builder /workspace/templates /usr/share/sftpgo/templates
|
||||||
|
COPY --from=builder /workspace/static /usr/share/sftpgo/static
|
||||||
|
COPY --from=builder /workspace/openapi /usr/share/sftpgo/openapi
|
||||||
|
COPY --from=builder /workspace/sftpgo /usr/local/bin/
|
||||||
|
COPY --from=builder /etc/mime.types /etc/mime.types
|
||||||
|
|
||||||
|
# Log to the stdout so the logs will be available using docker logs
|
||||||
|
ENV SFTPGO_LOG_FILE_PATH=""
|
||||||
|
# These env vars are required to avoid the following error when calling user.Current():
|
||||||
|
# unable to get the current user: user: Current requires cgo or $USER set in environment
|
||||||
|
ENV USER=sftpgo
|
||||||
|
ENV HOME=/var/lib/sftpgo
|
||||||
|
|
||||||
|
WORKDIR /var/lib/sftpgo
|
||||||
|
USER 1000:1000
|
||||||
|
|
||||||
|
CMD ["sftpgo", "serve"]
|
303
README.md
|
@ -1,281 +1,67 @@
|
||||||
# SFTPGo
|
# SFTPGo
|
||||||
|
|
||||||
![CI Status](https://github.com/drakkan/sftpgo/workflows/CI/badge.svg?branch=main&event=push)
|
[![CI Status](https://github.com/drakkan/sftpgo/workflows/CI/badge.svg?branch=main&event=push)](https://github.com/drakkan/sftpgo/workflows/CI/badge.svg?branch=main&event=push)
|
||||||
[![Code Coverage](https://codecov.io/gh/drakkan/sftpgo/branch/main/graph/badge.svg)](https://codecov.io/gh/drakkan/sftpgo/branch/main)
|
[![Code Coverage](https://codecov.io/gh/drakkan/sftpgo/branch/main/graph/badge.svg)](https://codecov.io/gh/drakkan/sftpgo/branch/main)
|
||||||
[![Go Report Card](https://goreportcard.com/badge/github.com/drakkan/sftpgo)](https://goreportcard.com/report/github.com/drakkan/sftpgo)
|
[![License: AGPL-3.0-only](https://img.shields.io/badge/License-AGPLv3-blue.svg)](https://www.gnu.org/licenses/agpl-3.0)
|
||||||
[![License: AGPL v3](https://img.shields.io/badge/License-AGPLv3-blue.svg)](https://www.gnu.org/licenses/agpl-3.0)
|
|
||||||
[![Docker Pulls](https://img.shields.io/docker/pulls/drakkan/sftpgo)](https://hub.docker.com/r/drakkan/sftpgo)
|
|
||||||
[![Mentioned in Awesome Go](https://awesome.re/mentioned-badge.svg)](https://github.com/avelino/awesome-go)
|
[![Mentioned in Awesome Go](https://awesome.re/mentioned-badge.svg)](https://github.com/avelino/awesome-go)
|
||||||
|
|
||||||
Fully featured and highly configurable SFTP server with optional FTP/S and WebDAV support, written in Go.
|
Full-featured and highly configurable event-driven file transfer solution.
|
||||||
Several storage backends are supported: local filesystem, encrypted local filesystem, S3 (compatible) Object Storage, Google Cloud Storage, Azure Blob Storage, SFTP.
|
Server protocols: SFTP, HTTP/S, FTP/S, WebDAV.
|
||||||
|
Storage backends: local filesystem, encrypted local filesystem, S3 (compatible) Object Storage, Google Cloud Storage, Azure Blob Storage, other SFTP servers.
|
||||||
|
|
||||||
## Features
|
With SFTPGo you can leverage local and cloud storage backends for exchanging and storing files internally or with business partners using the same tools and processes you are already familiar with.
|
||||||
|
|
||||||
- Support for serving local filesystem, encrypted local filesystem, S3 Compatible Object Storage, Google Cloud Storage, Azure Blob Storage or other SFTP accounts over SFTP/SCP/FTP/WebDAV.
|
The WebAdmin UI allows to easily create and manage your users, folders, groups and other resources.
|
||||||
- Virtual folders are supported: a virtual folder can use any of the supported storage backends. So you can have, for example, an S3 user that exposes a GCS bucket (or part of it) on a specified path and an encrypted local filesystem on another one. Virtual folders can be private or shared among multiple users, for shared virtual folders you can define different quota limits for each user.
|
|
||||||
- Configurable custom commands and/or HTTP hooks on file upload, pre-upload, download, pre-download, delete, pre-delete, rename, on SSH commands and on user add, update and delete.
|
|
||||||
- Virtual accounts stored within a "data provider".
|
|
||||||
- SQLite, MySQL, PostgreSQL, CockroachDB, Bolt (key/value store in pure Go) and in-memory data providers are supported.
|
|
||||||
- Chroot isolation for local accounts. Cloud-based accounts can be restricted to a certain base path.
|
|
||||||
- Per user and per directory virtual permissions, for each exposed path you can allow or deny: directory listing, upload, overwrite, download, delete, rename, create directories, create symlinks, change owner/group/file mode.
|
|
||||||
- [REST API](./docs/rest-api.md) for users and folders management, backup, restore and real time reports of the active connections with possibility of forcibly closing a connection.
|
|
||||||
- [Web based administration interface](./docs/web-admin.md) to easily manage users, folders and connections.
|
|
||||||
- [Web client interface](./docs/web-client.md) so that end users can change their credentials and browse their files.
|
|
||||||
- Public key and password authentication. Multiple public keys per user are supported.
|
|
||||||
- SSH user [certificate authentication](https://cvsweb.openbsd.org/src/usr.bin/ssh/PROTOCOL.certkeys?rev=1.8).
|
|
||||||
- Keyboard interactive authentication. You can easily setup a customizable multi-factor authentication.
|
|
||||||
- Partial authentication. You can configure multi-step authentication requiring, for example, the user password after successful public key authentication.
|
|
||||||
- Per user authentication methods.
|
|
||||||
- Custom authentication via external programs/HTTP API.
|
|
||||||
- [Data At Rest Encryption](./docs/dare.md).
|
|
||||||
- Dynamic user modification before login via external programs/HTTP API.
|
|
||||||
- Quota support: accounts can have individual quota expressed as max total size and/or max number of files.
|
|
||||||
- Bandwidth throttling, with distinct settings for upload and download.
|
|
||||||
- Per-protocol [rate limiting](./docs/rate-limiting.md) is supported and can be optionally connected to the built-in defender to automatically block hosts that repeatedly exceed the configured limit.
|
|
||||||
- Per user maximum concurrent sessions.
|
|
||||||
- Per user and global IP filters: login can be restricted to specific ranges of IP addresses or to a specific IP address.
|
|
||||||
- Per user and per directory shell like patterns filters: files can be allowed or denied based on shell like patterns.
|
|
||||||
- Automatically terminating idle connections.
|
|
||||||
- Automatic blocklist management using the built-in [defender](./docs/defender.md).
|
|
||||||
- Atomic uploads are configurable.
|
|
||||||
- Per user files/folders ownership mapping: you can map all the users to the system account that runs SFTPGo (all platforms are supported) or you can run SFTPGo as root user and map each user or group of users to a different system account (\*NIX only).
|
|
||||||
- Support for Git repositories over SSH.
|
|
||||||
- SCP and rsync are supported.
|
|
||||||
- FTP/S is supported. You can configure the FTP service to require TLS for both control and data connections.
|
|
||||||
- [WebDAV](./docs/webdav.md) is supported.
|
|
||||||
- Two-Way TLS authentication, aka TLS with client certificate authentication, is supported for REST API/Web Admin, FTPS and WebDAV over HTTPS.
|
|
||||||
- Per user protocols restrictions. You can configure the allowed protocols (SSH/FTP/WebDAV) for each user.
|
|
||||||
- [Prometheus metrics](./docs/metrics.md) are exposed.
|
|
||||||
- Support for HAProxy PROXY protocol: you can proxy and/or load balance the SFTP/SCP/FTP/WebDAV service without losing the information about the client's address.
|
|
||||||
- Easy [migration](./examples/convertusers) from Linux system user accounts.
|
|
||||||
- [Portable mode](./docs/portable-mode.md): a convenient way to share a single directory on demand.
|
|
||||||
- [SFTP subsystem mode](./docs/sftp-subsystem.md): you can use SFTPGo as OpenSSH's SFTP subsystem.
|
|
||||||
- Performance analysis using built-in [profiler](./docs/profiling.md).
|
|
||||||
- Configuration format is at your choice: JSON, TOML, YAML, HCL, envfile are supported.
|
|
||||||
- Log files are accurate and they are saved in the easily parsable JSON format ([more information](./docs/logs.md)).
|
|
||||||
|
|
||||||
## Platforms
|
The WebClient UI allows end users to change their credentials, browse and manage their files in the browser and setup two-factor authentication which works with Microsoft Authenticator, Google Authenticator, Authy and other compatible apps.
|
||||||
|
|
||||||
SFTPGo is developed and tested on Linux. After each commit, the code is automatically built and tested on Linux, macOS and Windows using a [GitHub Action](./.github/workflows/development.yml). The test cases are regularly manually executed and passed on FreeBSD. Other *BSD variants should work too.
|
## Sponsors
|
||||||
|
|
||||||
## Requirements
|
We strongly believe in Open Source software model, so we decided to make SFTPGo available to everyone, but maintaining and evolving SFTPGo takes a lot of time and work. To make development and maintenance sustainable you should consider to support the project with a [sponsorship](https://github.com/sponsors/drakkan).
|
||||||
|
|
||||||
- Go as build only dependency. We support the Go version(s) used in [continuous integration workflows](./tree/main/.github/workflows).
|
We also provide [professional services](https://sftpgo.com/#pricing) to support you in using SFTPGo to the fullest.
|
||||||
- A suitable SQL server to use as data provider: PostgreSQL 9.4+ or MySQL 5.6+ or SQLite 3.x or CockroachDB stable.
|
|
||||||
- The SQL server is optional: you can choose to use an embedded bolt database as key/value store or an in memory data provider.
|
|
||||||
|
|
||||||
## Installation
|
The open source license grant you freedom but not assurance of help. So why would you rely on free software without support or any guarantee it will stay healthy and maintained for the upcoming years?
|
||||||
|
|
||||||
Binary releases for Linux, macOS, and Windows are available. Please visit the [releases](https://github.com/drakkan/sftpgo/releases "releases") page.
|
Supporting the project benefit businesses and the community because if the project is financially sustainable, using this business model, we don't have to restrict features and/or switch to an [Open-core](https://en.wikipedia.org/wiki/Open-core_model) model. The technology stays truly open source. Everyone wins.
|
||||||
|
|
||||||
An official Docker image is available. Documentation is [here](./docker/README.md).
|
It is important to understand that you should support SFTPGo and any other Open Source project you rely on for ongoing maintenance, even if you don't have any questions or need new features, to mitigate the business risk of a project you depend on going unmaintained, with its security and development velocity implications.
|
||||||
|
|
||||||
Some Linux distro packages are available:
|
### Thank you to our sponsors
|
||||||
|
|
||||||
- For Arch Linux via AUR:
|
#### Platinum sponsors
|
||||||
- [sftpgo](https://aur.archlinux.org/packages/sftpgo/). This package follows stable releases. It requires `git`, `gcc` and `go` to build.
|
|
||||||
- [sftpgo-bin](https://aur.archlinux.org/packages/sftpgo-bin/). This package follows stable releases downloading the prebuilt linux binary from GitHub. It does not require `git`, `gcc` and `go` to build.
|
|
||||||
- [sftpgo-git](https://aur.archlinux.org/packages/sftpgo-git/). This package builds and installs the latest git `main` branch. It requires `git`, `gcc` and `go` to build.
|
|
||||||
- Deb and RPM packages are built after each commit and for each release.
|
|
||||||
- For Ubuntu a PPA is available [here](https://launchpad.net/~sftpgo/+archive/ubuntu/sftpgo).
|
|
||||||
|
|
||||||
SFTPGo is also available on [AWS Marketplace](https://aws.amazon.com/marketplace/seller-profile?id=6e849ab8-70a6-47de-9a43-13c3fa849335), purchasing from there will help keep SFTPGo a long-term sustainable project.
|
[<img src="./img/Aledade_logo.png" alt="Aledade logo" width="202" height="70">](https://www.aledade.com/)
|
||||||
|
</br></br>
|
||||||
|
[<img src="./img/jumptrading.png" alt="Jump Trading logo" width="362" height="63">](https://www.jumptrading.com/)
|
||||||
|
</br></br>
|
||||||
|
[<img src="./img/wpengine.png" alt="WP Engine logo" width="331" height="63">](https://wpengine.com/)
|
||||||
|
|
||||||
On FreeBSD you can install from the [SFTPGo port](https://www.freshports.org/ftp/sftpgo).
|
#### Silver sponsors
|
||||||
|
|
||||||
On Windows you can use:
|
[<img src="./img/IDCS.png" alt="IDCS logo" width="212" height="51">](https://idcs.ip-paris.fr/)
|
||||||
|
|
||||||
- The Windows installer to install and run SFTPGo as a Windows service.
|
#### Bronze sponsors
|
||||||
- The portable package to start SFTPGo on demand.
|
|
||||||
|
|
||||||
You can easily test new features selecting a commit from the [Actions](https://github.com/drakkan/sftpgo/actions) page and downloading the matching build artifacts for Linux, macOS or Windows. GitHub stores artifacts for 90 days.
|
[<img src="./img/7digital.png" alt="7digital logo" width="178" height="56">](https://www.7digital.com/)
|
||||||
|
</br></br>
|
||||||
|
[<img src="./img/vps2day.png" alt="VPS2day logo" width="234" height="56">](https://www.vps2day.com/)
|
||||||
|
|
||||||
Alternately, you can [build from source](./docs/build-from-source.md).
|
## Support policy
|
||||||
|
|
||||||
[Getting Started Guide for the Impatient](./docs/howto/getting-started.md).
|
You can use SFTPGo for free, respecting the obligations of the Open Source license, but please do not ask or expect free support as well.
|
||||||
|
|
||||||
## Configuration
|
Use [discussions](https://github.com/drakkan/sftpgo/discussions) to ask questions and get support from the community.
|
||||||
|
|
||||||
A full explanation of all configuration methods can be found [here](./docs/full-configuration.md).
|
If you report an invalid issue and/or ask for step-by-step support, your issue will be closed as invalid without further explanation and/or the "support request" label will be added. Invalid bug reports may confuse other users. Thanks for understanding.
|
||||||
|
|
||||||
Please make sure to [initialize the data provider](#data-provider-initialization-and-management) before running the daemon.
|
## Documentation
|
||||||
|
|
||||||
To start SFTPGo with the default settings, simply run:
|
You can read more about supported features and documentation at [sftpgo.github.io](https://sftpgo.github.io/).
|
||||||
|
|
||||||
```bash
|
|
||||||
sftpgo serve
|
|
||||||
```
|
|
||||||
|
|
||||||
Check out [this documentation](./docs/service.md) if you want to run SFTPGo as a service.
|
|
||||||
|
|
||||||
### Data provider initialization and management
|
|
||||||
|
|
||||||
Before starting the SFTPGo server please ensure that the configured data provider is properly initialized/updated.
|
|
||||||
|
|
||||||
For PostgreSQL, MySQL and CockroachDB providers, you need to create the configured database. For SQLite, the configured database will be automatically created at startup. Memory and bolt data providers do not require an initialization but they could require an update to the existing data after upgrading SFTPGo.
|
|
||||||
|
|
||||||
SFTPGo will attempt to automatically detect if the data provider is initialized/updated and if not, will attempt to initialize/ update it on startup as needed.
|
|
||||||
|
|
||||||
Alternately, you can create/update the required data provider structures yourself using the `initprovider` command.
|
|
||||||
|
|
||||||
For example, you can simply execute the following command from the configuration directory:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
sftpgo initprovider
|
|
||||||
```
|
|
||||||
|
|
||||||
Take a look at the CLI usage to learn how to specify a different configuration file:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
sftpgo initprovider --help
|
|
||||||
```
|
|
||||||
|
|
||||||
You can disable automatic data provider checks/updates at startup by setting the `update_mode` configuration key to `1`.
|
|
||||||
|
|
||||||
## Create the first admin
|
|
||||||
|
|
||||||
To start using SFTPGo you need to create an admin user, you can do it in several ways:
|
|
||||||
|
|
||||||
- by using the web admin interface. The default URL is [http://127.0.0.1:8080/web/admin](http://127.0.0.1:8080/web/admin)
|
|
||||||
- by loading initial data
|
|
||||||
- by enabling `create_default_admin` in your configuration file. In this case the credentials are `admin`/`password`
|
|
||||||
|
|
||||||
## Upgrading
|
|
||||||
|
|
||||||
SFTPGo supports upgrading from the previous release branch to the current one.
|
|
||||||
Some examples for supported upgrade paths are:
|
|
||||||
|
|
||||||
- from 1.2.x to 2.0.x
|
|
||||||
- from 2.0.x to 2.1.x and so on.
|
|
||||||
|
|
||||||
For supported upgrade paths, the data and schema are migrated automatically, alternately you can use the `initprovider` command.
|
|
||||||
|
|
||||||
So if, for example, you want to upgrade from a version before 1.2.x to 2.0.x, you must first install version 1.2.x, update the data provider and finally install the version 2.0.x. It is recommended to always install the latest available minor version, ie do not install 1.2.0 if 1.2.2 is available.
|
|
||||||
|
|
||||||
Loading data from a provider independent JSON dump is supported from the previous release branch to the current one too. After upgrading SFTPGo it is advisable to regenerate the JSON dump from the new version.
|
|
||||||
|
|
||||||
## Downgrading
|
|
||||||
|
|
||||||
If for some reason you want to downgrade SFTPGo, you may need to downgrade your data provider schema and data as well. You can use the `revertprovider` command for this task.
|
|
||||||
|
|
||||||
As for upgrading, SFTPGo supports downgrading from the previous release branch to the current one.
|
|
||||||
|
|
||||||
So, if you plan to downgrade from 2.0.x to 1.2.x, before uninstalling 2.0.x version, you can prepare your data provider executing the following command from the configuration directory:
|
|
||||||
|
|
||||||
```shell
|
|
||||||
sftpgo revertprovider --to-version 4
|
|
||||||
```
|
|
||||||
|
|
||||||
Take a look at the CLI usage to see the supported parameter for the `--to-version` argument and to learn how to specify a different configuration file:
|
|
||||||
|
|
||||||
```shell
|
|
||||||
sftpgo revertprovider --help
|
|
||||||
```
|
|
||||||
|
|
||||||
The `revertprovider` command is not supported for the memory provider.
|
|
||||||
|
|
||||||
Please note that we only support the current release branch and the current main branch, if you find a bug it is better to report it rather than downgrading to an older unsupported version.
|
|
||||||
|
|
||||||
## Users and folders management
|
|
||||||
|
|
||||||
After starting SFTPGo you can manage users and folders using:
|
|
||||||
|
|
||||||
- the [web based administration interface](./docs/web-admin.md)
|
|
||||||
- the [REST API](./docs/rest-api.md)
|
|
||||||
|
|
||||||
To support embedded data providers like `bolt` and `SQLite` we can't have a CLI that directly write users and folders to the data provider, we always have to use the REST API.
|
|
||||||
|
|
||||||
Full details for users, folders, admins and other resources are documented in the [OpenAPI](/httpd/schema/openapi.yaml) schema. If you want to render the schema without importing it manually, you can explore it on [Stoplight](https://sftpgo.stoplight.io/docs/sftpgo/openapi.yaml).
|
|
||||||
|
|
||||||
## Tutorials
|
|
||||||
|
|
||||||
Some step-to-step tutorials can be found inside the source tree [howto](./docs/howto "How-to") directory.
|
|
||||||
|
|
||||||
## Authentication options
|
|
||||||
|
|
||||||
### External Authentication
|
|
||||||
|
|
||||||
Custom authentication methods can easily be added. SFTPGo supports external authentication modules, and writing a new backend can be as simple as a few lines of shell script. More information can be found [here](./docs/external-auth.md).
|
|
||||||
|
|
||||||
### Keyboard Interactive Authentication
|
|
||||||
|
|
||||||
Keyboard interactive authentication is, in general, a series of questions asked by the server with responses provided by the client.
|
|
||||||
This authentication method is typically used for multi-factor authentication.
|
|
||||||
|
|
||||||
More information can be found [here](./docs/keyboard-interactive.md).
|
|
||||||
|
|
||||||
## Dynamic user creation or modification
|
|
||||||
|
|
||||||
A user can be created or modified by an external program just before the login. More information about this can be found [here](./docs/dynamic-user-mod.md).
|
|
||||||
|
|
||||||
## Custom Actions
|
|
||||||
|
|
||||||
SFTPGo allows to configure custom commands and/or HTTP notifications on file upload, download, delete, rename, on SSH commands and on user add, update and delete.
|
|
||||||
|
|
||||||
More information about custom actions can be found [here](./docs/custom-actions.md).
|
|
||||||
|
|
||||||
## Virtual folders
|
|
||||||
|
|
||||||
Directories outside the user home directory or based on a different storage provider can be exposed as virtual folders, more information [here](./docs/virtual-folders.md).
|
|
||||||
|
|
||||||
## Other hooks
|
|
||||||
|
|
||||||
You can get notified as soon as a new connection is established using the [Post-connect hook](./docs/post-connect-hook.md) and after each login using the [Post-login hook](./docs/post-login-hook.md).
|
|
||||||
You can use your own hook to [check passwords](./docs/check-password-hook.md).
|
|
||||||
|
|
||||||
## Storage backends
|
|
||||||
|
|
||||||
### S3 Compatible Object Storage backends
|
|
||||||
|
|
||||||
Each user can be mapped to the whole bucket or to a bucket virtual folder. This way, the mapped bucket/virtual folder is exposed over SFTP/SCP/FTP/WebDAV. More information about S3 integration can be found [here](./docs/s3.md).
|
|
||||||
|
|
||||||
### Google Cloud Storage backend
|
|
||||||
|
|
||||||
Each user can be mapped with a Google Cloud Storage bucket or a bucket virtual folder. This way, the mapped bucket/virtual folder is exposed over SFTP/SCP/FTP/WebDAV. More information about Google Cloud Storage integration can be found [here](./docs/google-cloud-storage.md).
|
|
||||||
|
|
||||||
### Azure Blob Storage backend
|
|
||||||
|
|
||||||
Each user can be mapped with an Azure Blob Storage container or a container virtual folder. This way, the mapped container/virtual folder is exposed over SFTP/SCP/FTP/WebDAV. More information about Azure Blob Storage integration can be found [here](./docs/azure-blob-storage.md).
|
|
||||||
|
|
||||||
### SFTP backend
|
|
||||||
|
|
||||||
Each user can be mapped to another SFTP server account or a subfolder of it. More information can be found [here](./docs/sftpfs.md).
|
|
||||||
|
|
||||||
### Encrypted backend
|
|
||||||
|
|
||||||
Data at-rest encryption is supported via the [cryptfs backend](./docs/dare.md).
|
|
||||||
|
|
||||||
### Other Storage backends
|
|
||||||
|
|
||||||
Adding new storage backends is quite easy:
|
|
||||||
|
|
||||||
- implement the [Fs interface](./vfs/vfs.go#L28 "interface for filesystem backends").
|
|
||||||
- update the user method `GetFilesystem` to return the new backend
|
|
||||||
- update the web interface and the REST API CLI
|
|
||||||
- add the flags for the new storage backed to the `portable` mode
|
|
||||||
|
|
||||||
Anyway, some backends require a pay per use account (or they offer free account for a limited time period only). To be able to add support for such backends or to review pull requests, please provide a test account. The test account must be available for enough time to be able to maintain the backend and do basic tests before each new release.
|
|
||||||
|
|
||||||
## Brute force protection
|
|
||||||
|
|
||||||
The [connection failed logs](./docs/logs.md) can be used for integration in tools such as [Fail2ban](http://www.fail2ban.org/). Example of [jails](./fail2ban/jails) and [filters](./fail2ban/filters) working with `systemd`/`journald` are available in fail2ban directory.
|
|
||||||
|
|
||||||
You can also use the built-in [defender](./docs/defender.md).
|
|
||||||
|
|
||||||
## Account's configuration properties
|
|
||||||
|
|
||||||
Details information about account configuration properties can be found [here](./docs/account.md).
|
|
||||||
|
|
||||||
## Performance
|
|
||||||
|
|
||||||
SFTPGo can easily saturate a Gigabit connection on low end hardware with no special configuration, this is generally enough for most use cases.
|
|
||||||
|
|
||||||
More in-depth analysis of performance can be found [here](./docs/performance.md).
|
|
||||||
|
|
||||||
## Release Cadence
|
## Release Cadence
|
||||||
|
|
||||||
SFTPGo releases are feature-driven, we don't have a fixed time based schedule. As a rough estimate, you can expect 1 or 2 new releases per year.
|
SFTPGo releases are feature-driven, we don't have a fixed time based schedule. As a rough estimate, you can expect 1 or 2 new major releases per year and several bug fix releases.
|
||||||
|
|
||||||
## Acknowledgements
|
## Acknowledgements
|
||||||
|
|
||||||
|
@ -283,14 +69,25 @@ SFTPGo makes use of the third party libraries listed inside [go.mod](./go.mod).
|
||||||
|
|
||||||
We are very grateful to all the people who contributed with ideas and/or pull requests.
|
We are very grateful to all the people who contributed with ideas and/or pull requests.
|
||||||
|
|
||||||
Thank you [ysura](https://www.ysura.com/) for granting me stable access to a test AWS S3 account.
|
Thank you to [ysura](https://www.ysura.com/) for granting us stable access to a test AWS S3 account.
|
||||||
|
|
||||||
## Sponsors
|
Thank you to [KeenThemes](https://keenthemes.com/) for granting us a custom license to use their amazing [Mega Bundle](https://keenthemes.com/products/templates-mega-bundle) for SFTPGo UI.
|
||||||
|
|
||||||
I'd like to make SFTPGo into a sustainable long term project and your [sponsorship](https://github.com/sponsors/drakkan) will really help :heart:
|
Thank you to [Crowdin](https://crowdin.com/) for granting us an Open Source License.
|
||||||
|
|
||||||
Thank you to our sponsors!
|
Thank you to [Incode](https://www.incode.it/) for helping us to improve the UI/UX.
|
||||||
|
|
||||||
## License
|
## License
|
||||||
|
|
||||||
GNU AGPLv3
|
SFTPGo source code is licensed under the GNU AGPL-3.0-only.
|
||||||
|
|
||||||
|
The [theme](https://keenthemes.com/products/templates-mega-bundle) used in WebAdmin and WebClient user interfaces is proprietary, this means:
|
||||||
|
|
||||||
|
- KeenThemes HTML/CSS/JS components are allowed for use only within the SFTPGo product and restricted to be used in a resealable HTML template that can compete with KeenThemes products anyhow.
|
||||||
|
- The SFTPGo WebAdmin and WebClient user interfaces (HTML, CSS and JS components) based on this theme are allowed for use only within the SFTPGo product and therefore cannot be used in derivative works/products without an explicit grant from the [SFTPGo Team](mailto:support@sftpgo.com).
|
||||||
|
|
||||||
|
More information about [compliance](https://sftpgo.com/compliance.html).
|
||||||
|
|
||||||
|
## Copyright
|
||||||
|
|
||||||
|
Copyright (C) 2019 Nicola Murino
|
||||||
|
|
|
@ -2,11 +2,9 @@
|
||||||
|
|
||||||
## Supported Versions
|
## Supported Versions
|
||||||
|
|
||||||
Only the current release of the software is actively supported. If you need
|
Only the current release of the software is actively supported.
|
||||||
help backporting fixes into an older release, feel free to ask.
|
[Contact us](mailto:support@sftpgo.com) if you need early security patches and enterprise-grade security.
|
||||||
|
|
||||||
## Reporting a Vulnerability
|
## Reporting a Vulnerability
|
||||||
|
|
||||||
Email your vulnerability information to SFTPGo's maintainer:
|
To report (possible) security issues in SFTPGo, please either send a mail to the [SFTPGo Team](mailto:support@sftpgo.com) or use Github's [private reporting feature](https://github.com/drakkan/sftpgo/security/advisories/new).
|
||||||
|
|
||||||
Nicola Murino <nicola.murino@gmail.com>
|
|
||||||
|
|
12
cmd/gen.go
|
@ -1,12 +0,0 @@
|
||||||
package cmd
|
|
||||||
|
|
||||||
import "github.com/spf13/cobra"
|
|
||||||
|
|
||||||
var genCmd = &cobra.Command{
|
|
||||||
Use: "gen",
|
|
||||||
Short: "A collection of useful generators",
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
rootCmd.AddCommand(genCmd)
|
|
||||||
}
|
|
|
@ -1,86 +0,0 @@
|
||||||
package cmd
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/rs/zerolog"
|
|
||||||
"github.com/spf13/cobra"
|
|
||||||
|
|
||||||
"github.com/drakkan/sftpgo/logger"
|
|
||||||
)
|
|
||||||
|
|
||||||
var genCompletionCmd = &cobra.Command{
|
|
||||||
Use: "completion [bash|zsh|fish|powershell]",
|
|
||||||
Short: "Generate shell completion script",
|
|
||||||
Long: `To load completions:
|
|
||||||
|
|
||||||
Bash:
|
|
||||||
|
|
||||||
$ source <(sftpgo gen completion bash)
|
|
||||||
|
|
||||||
To load completions for each session, execute once:
|
|
||||||
|
|
||||||
Linux:
|
|
||||||
|
|
||||||
$ sudo sftpgo gen completion bash > /usr/share/bash-completion/completions/sftpgo
|
|
||||||
|
|
||||||
MacOS:
|
|
||||||
|
|
||||||
$ sudo sftpgo gen completion bash > /usr/local/etc/bash_completion.d/sftpgo
|
|
||||||
|
|
||||||
Zsh:
|
|
||||||
|
|
||||||
If shell completion is not already enabled in your environment you will need
|
|
||||||
to enable it. You can execute the following once:
|
|
||||||
|
|
||||||
$ echo "autoload -U compinit; compinit" >> ~/.zshrc
|
|
||||||
|
|
||||||
To load completions for each session, execute once:
|
|
||||||
|
|
||||||
$ sftpgo gen completion zsh > "${fpath[1]}/_sftpgo"
|
|
||||||
|
|
||||||
Fish:
|
|
||||||
|
|
||||||
$ sftpgo gen completion fish | source
|
|
||||||
|
|
||||||
To load completions for each session, execute once:
|
|
||||||
|
|
||||||
$ sftpgo gen completion fish > ~/.config/fish/completions/sftpgo.fish
|
|
||||||
|
|
||||||
Powershell:
|
|
||||||
|
|
||||||
PS> sftpgo gen completion powershell | Out-String | Invoke-Expression
|
|
||||||
|
|
||||||
To load completions for every new session, run:
|
|
||||||
|
|
||||||
PS> sftpgo gen completion powershell > sftpgo.ps1
|
|
||||||
|
|
||||||
and source this file from your powershell profile.
|
|
||||||
`,
|
|
||||||
DisableFlagsInUseLine: true,
|
|
||||||
ValidArgs: []string{"bash", "zsh", "fish", "powershell"},
|
|
||||||
Args: cobra.ExactValidArgs(1),
|
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
|
||||||
var err error
|
|
||||||
logger.DisableLogger()
|
|
||||||
logger.EnableConsoleLogger(zerolog.DebugLevel)
|
|
||||||
switch args[0] {
|
|
||||||
case "bash":
|
|
||||||
err = cmd.Root().GenBashCompletion(os.Stdout)
|
|
||||||
case "zsh":
|
|
||||||
err = cmd.Root().GenZshCompletion(os.Stdout)
|
|
||||||
case "fish":
|
|
||||||
err = cmd.Root().GenFishCompletion(os.Stdout, true)
|
|
||||||
case "powershell":
|
|
||||||
err = cmd.Root().GenPowerShellCompletionWithDesc(os.Stdout)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
logger.WarnToConsole("Unable to generate shell completion script: %v", err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
genCmd.AddCommand(genCompletionCmd)
|
|
||||||
}
|
|
|
@ -1,52 +0,0 @@
|
||||||
package cmd
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/rs/zerolog"
|
|
||||||
"github.com/spf13/cobra"
|
|
||||||
"github.com/spf13/cobra/doc"
|
|
||||||
|
|
||||||
"github.com/drakkan/sftpgo/logger"
|
|
||||||
"github.com/drakkan/sftpgo/version"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
manDir string
|
|
||||||
genManCmd = &cobra.Command{
|
|
||||||
Use: "man",
|
|
||||||
Short: "Generate man pages for SFTPGo CLI",
|
|
||||||
Long: `This command automatically generates up-to-date man pages of SFTPGo's
|
|
||||||
command-line interface. By default, it creates the man page files
|
|
||||||
in the "man" directory under the current directory.
|
|
||||||
`,
|
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
|
||||||
logger.DisableLogger()
|
|
||||||
logger.EnableConsoleLogger(zerolog.DebugLevel)
|
|
||||||
if _, err := os.Stat(manDir); os.IsNotExist(err) {
|
|
||||||
err = os.MkdirAll(manDir, os.ModePerm)
|
|
||||||
if err != nil {
|
|
||||||
logger.WarnToConsole("Unable to generate man page files: %v", err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
header := &doc.GenManHeader{
|
|
||||||
Section: "1",
|
|
||||||
Manual: "SFTPGo Manual",
|
|
||||||
Source: fmt.Sprintf("SFTPGo %v", version.Get().Version),
|
|
||||||
}
|
|
||||||
cmd.Root().DisableAutoGenTag = true
|
|
||||||
err := doc.GenManTree(cmd.Root(), header, manDir)
|
|
||||||
if err != nil {
|
|
||||||
logger.WarnToConsole("Unable to generate man page files: %v", err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
},
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
genManCmd.Flags().StringVarP(&manDir, "dir", "d", "man", "The directory to write the man pages")
|
|
||||||
genCmd.AddCommand(genManCmd)
|
|
||||||
}
|
|
|
@ -1,70 +0,0 @@
|
||||||
package cmd
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/rs/zerolog"
|
|
||||||
"github.com/spf13/cobra"
|
|
||||||
"github.com/spf13/viper"
|
|
||||||
|
|
||||||
"github.com/drakkan/sftpgo/config"
|
|
||||||
"github.com/drakkan/sftpgo/dataprovider"
|
|
||||||
"github.com/drakkan/sftpgo/logger"
|
|
||||||
"github.com/drakkan/sftpgo/utils"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
initProviderCmd = &cobra.Command{
|
|
||||||
Use: "initprovider",
|
|
||||||
Short: "Initializes and/or updates the configured data provider",
|
|
||||||
Long: `This command reads the data provider connection details from the specified
|
|
||||||
configuration file and creates the initial structure or update the existing one,
|
|
||||||
as needed.
|
|
||||||
|
|
||||||
Some data providers such as bolt and memory does not require an initialization
|
|
||||||
but they could require an update to the existing data after upgrading SFTPGo.
|
|
||||||
|
|
||||||
For SQLite/bolt providers the database file will be auto-created if missing.
|
|
||||||
|
|
||||||
For PostgreSQL and MySQL providers you need to create the configured database,
|
|
||||||
this command will create/update the required tables as needed.
|
|
||||||
|
|
||||||
To initialize/update the data provider from the configuration directory simply use:
|
|
||||||
|
|
||||||
$ sftpgo initprovider
|
|
||||||
|
|
||||||
Please take a look at the usage below to customize the options.`,
|
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
|
||||||
logger.DisableLogger()
|
|
||||||
logger.EnableConsoleLogger(zerolog.DebugLevel)
|
|
||||||
configDir = utils.CleanDirInput(configDir)
|
|
||||||
err := config.LoadConfig(configDir, configFile)
|
|
||||||
if err != nil {
|
|
||||||
logger.WarnToConsole("Unable to initialize data provider, config load error: %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
kmsConfig := config.GetKMSConfig()
|
|
||||||
err = kmsConfig.Initialize()
|
|
||||||
if err != nil {
|
|
||||||
logger.ErrorToConsole("unable to initialize KMS: %v", err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
providerConf := config.GetProviderConf()
|
|
||||||
logger.InfoToConsole("Initializing provider: %#v config file: %#v", providerConf.Driver, viper.ConfigFileUsed())
|
|
||||||
err = dataprovider.InitializeDatabase(providerConf, configDir)
|
|
||||||
if err == nil {
|
|
||||||
logger.InfoToConsole("Data provider successfully initialized/updated")
|
|
||||||
} else if err == dataprovider.ErrNoInitRequired {
|
|
||||||
logger.InfoToConsole("%v", err.Error())
|
|
||||||
} else {
|
|
||||||
logger.WarnToConsole("Unable to initialize/update the data provider: %v", err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
},
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
rootCmd.AddCommand(initProviderCmd)
|
|
||||||
addConfigFlags(initProviderCmd)
|
|
||||||
}
|
|
|
@ -1,9 +0,0 @@
|
||||||
// +build noportable
|
|
||||||
|
|
||||||
package cmd
|
|
||||||
|
|
||||||
import "github.com/drakkan/sftpgo/version"
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
version.AddFeature("-portable")
|
|
||||||
}
|
|
|
@ -1,35 +0,0 @@
|
||||||
package cmd
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/spf13/cobra"
|
|
||||||
|
|
||||||
"github.com/drakkan/sftpgo/service"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
reloadCmd = &cobra.Command{
|
|
||||||
Use: "reload",
|
|
||||||
Short: "Reload the SFTPGo Windows Service sending a \"paramchange\" request",
|
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
|
||||||
s := service.WindowsService{
|
|
||||||
Service: service.Service{
|
|
||||||
Shutdown: make(chan bool),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
err := s.Reload()
|
|
||||||
if err != nil {
|
|
||||||
fmt.Printf("Error sending reload signal: %v\r\n", err)
|
|
||||||
os.Exit(1)
|
|
||||||
} else {
|
|
||||||
fmt.Printf("Reload signal sent!\r\n")
|
|
||||||
}
|
|
||||||
},
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
serviceCmd.AddCommand(reloadCmd)
|
|
||||||
}
|
|
|
@ -1,35 +0,0 @@
|
||||||
package cmd
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/spf13/cobra"
|
|
||||||
|
|
||||||
"github.com/drakkan/sftpgo/service"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
rotateLogCmd = &cobra.Command{
|
|
||||||
Use: "rotatelogs",
|
|
||||||
Short: "Signal to the running service to rotate the logs",
|
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
|
||||||
s := service.WindowsService{
|
|
||||||
Service: service.Service{
|
|
||||||
Shutdown: make(chan bool),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
err := s.RotateLogFile()
|
|
||||||
if err != nil {
|
|
||||||
fmt.Printf("Error sending rotate log file signal to the service: %v\r\n", err)
|
|
||||||
os.Exit(1)
|
|
||||||
} else {
|
|
||||||
fmt.Printf("Rotate log file signal sent!\r\n")
|
|
||||||
}
|
|
||||||
},
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
serviceCmd.AddCommand(rotateLogCmd)
|
|
||||||
}
|
|
52
cmd/serve.go
|
@ -1,52 +0,0 @@
|
||||||
package cmd
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/spf13/cobra"
|
|
||||||
|
|
||||||
"github.com/drakkan/sftpgo/service"
|
|
||||||
"github.com/drakkan/sftpgo/utils"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
serveCmd = &cobra.Command{
|
|
||||||
Use: "serve",
|
|
||||||
Short: "Start the SFTP Server",
|
|
||||||
Long: `To start the SFTPGo with the default values for the command line flags simply
|
|
||||||
use:
|
|
||||||
|
|
||||||
$ sftpgo serve
|
|
||||||
|
|
||||||
Please take a look at the usage below to customize the startup options`,
|
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
|
||||||
service := service.Service{
|
|
||||||
ConfigDir: utils.CleanDirInput(configDir),
|
|
||||||
ConfigFile: configFile,
|
|
||||||
LogFilePath: logFilePath,
|
|
||||||
LogMaxSize: logMaxSize,
|
|
||||||
LogMaxBackups: logMaxBackups,
|
|
||||||
LogMaxAge: logMaxAge,
|
|
||||||
LogCompress: logCompress,
|
|
||||||
LogVerbose: logVerbose,
|
|
||||||
LoadDataFrom: loadDataFrom,
|
|
||||||
LoadDataMode: loadDataMode,
|
|
||||||
LoadDataQuotaScan: loadDataQuotaScan,
|
|
||||||
LoadDataClean: loadDataClean,
|
|
||||||
Shutdown: make(chan bool),
|
|
||||||
}
|
|
||||||
if err := service.Start(); err == nil {
|
|
||||||
service.Wait()
|
|
||||||
if service.Error == nil {
|
|
||||||
os.Exit(0)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
os.Exit(1)
|
|
||||||
},
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
rootCmd.AddCommand(serveCmd)
|
|
||||||
addServeFlags(serveCmd)
|
|
||||||
}
|
|
|
@ -1,16 +0,0 @@
|
||||||
package cmd
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/spf13/cobra"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
serviceCmd = &cobra.Command{
|
|
||||||
Use: "service",
|
|
||||||
Short: "Manage SFTPGo Windows Service",
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
rootCmd.AddCommand(serviceCmd)
|
|
||||||
}
|
|
|
@ -1,51 +0,0 @@
|
||||||
package cmd
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
|
|
||||||
"github.com/spf13/cobra"
|
|
||||||
|
|
||||||
"github.com/drakkan/sftpgo/service"
|
|
||||||
"github.com/drakkan/sftpgo/utils"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
startCmd = &cobra.Command{
|
|
||||||
Use: "start",
|
|
||||||
Short: "Start SFTPGo Windows Service",
|
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
|
||||||
configDir = utils.CleanDirInput(configDir)
|
|
||||||
if !filepath.IsAbs(logFilePath) && utils.IsFileInputValid(logFilePath) {
|
|
||||||
logFilePath = filepath.Join(configDir, logFilePath)
|
|
||||||
}
|
|
||||||
s := service.Service{
|
|
||||||
ConfigDir: configDir,
|
|
||||||
ConfigFile: configFile,
|
|
||||||
LogFilePath: logFilePath,
|
|
||||||
LogMaxSize: logMaxSize,
|
|
||||||
LogMaxBackups: logMaxBackups,
|
|
||||||
LogMaxAge: logMaxAge,
|
|
||||||
LogCompress: logCompress,
|
|
||||||
LogVerbose: logVerbose,
|
|
||||||
Shutdown: make(chan bool),
|
|
||||||
}
|
|
||||||
winService := service.WindowsService{
|
|
||||||
Service: s,
|
|
||||||
}
|
|
||||||
err := winService.RunService()
|
|
||||||
if err != nil {
|
|
||||||
fmt.Printf("Error starting service: %v\r\n", err)
|
|
||||||
os.Exit(1)
|
|
||||||
} else {
|
|
||||||
fmt.Printf("Service started!\r\n")
|
|
||||||
}
|
|
||||||
},
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
serviceCmd.AddCommand(startCmd)
|
|
||||||
addServeFlags(startCmd)
|
|
||||||
}
|
|
|
@ -1,35 +0,0 @@
|
||||||
package cmd
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/spf13/cobra"
|
|
||||||
|
|
||||||
"github.com/drakkan/sftpgo/service"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
statusCmd = &cobra.Command{
|
|
||||||
Use: "status",
|
|
||||||
Short: "Retrieve the status for the SFTPGo Windows Service",
|
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
|
||||||
s := service.WindowsService{
|
|
||||||
Service: service.Service{
|
|
||||||
Shutdown: make(chan bool),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
status, err := s.Status()
|
|
||||||
if err != nil {
|
|
||||||
fmt.Printf("Error querying service status: %v\r\n", err)
|
|
||||||
os.Exit(1)
|
|
||||||
} else {
|
|
||||||
fmt.Printf("Service status: %#v\r\n", status.String())
|
|
||||||
}
|
|
||||||
},
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
serviceCmd.AddCommand(statusCmd)
|
|
||||||
}
|
|
|
@ -1,35 +0,0 @@
|
||||||
package cmd
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/spf13/cobra"
|
|
||||||
|
|
||||||
"github.com/drakkan/sftpgo/service"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
stopCmd = &cobra.Command{
|
|
||||||
Use: "stop",
|
|
||||||
Short: "Stop SFTPGo Windows Service",
|
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
|
||||||
s := service.WindowsService{
|
|
||||||
Service: service.Service{
|
|
||||||
Shutdown: make(chan bool),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
err := s.Stop()
|
|
||||||
if err != nil {
|
|
||||||
fmt.Printf("Error stopping service: %v\r\n", err)
|
|
||||||
os.Exit(1)
|
|
||||||
} else {
|
|
||||||
fmt.Printf("Service stopped!\r\n")
|
|
||||||
}
|
|
||||||
},
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
serviceCmd.AddCommand(stopCmd)
|
|
||||||
}
|
|
|
@ -1,35 +0,0 @@
|
||||||
package cmd
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/spf13/cobra"
|
|
||||||
|
|
||||||
"github.com/drakkan/sftpgo/service"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
uninstallCmd = &cobra.Command{
|
|
||||||
Use: "uninstall",
|
|
||||||
Short: "Uninstall SFTPGo Windows Service",
|
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
|
||||||
s := service.WindowsService{
|
|
||||||
Service: service.Service{
|
|
||||||
Shutdown: make(chan bool),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
err := s.Uninstall()
|
|
||||||
if err != nil {
|
|
||||||
fmt.Printf("Error removing service: %v\r\n", err)
|
|
||||||
os.Exit(1)
|
|
||||||
} else {
|
|
||||||
fmt.Printf("Service uninstalled\r\n")
|
|
||||||
}
|
|
||||||
},
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
serviceCmd.AddCommand(uninstallCmd)
|
|
||||||
}
|
|
|
@ -1,236 +0,0 @@
|
||||||
package common
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/drakkan/sftpgo/dataprovider"
|
|
||||||
"github.com/drakkan/sftpgo/httpclient"
|
|
||||||
"github.com/drakkan/sftpgo/logger"
|
|
||||||
"github.com/drakkan/sftpgo/utils"
|
|
||||||
"github.com/drakkan/sftpgo/vfs"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
errUnconfiguredAction = errors.New("no hook is configured for this action")
|
|
||||||
errNoHook = errors.New("unable to execute action, no hook defined")
|
|
||||||
errUnexpectedHTTResponse = errors.New("unexpected HTTP response code")
|
|
||||||
)
|
|
||||||
|
|
||||||
// ProtocolActions defines the action to execute on file operations and SSH commands
|
|
||||||
type ProtocolActions struct {
|
|
||||||
// Valid values are download, upload, pre-delete, delete, rename, ssh_cmd. Empty slice to disable
|
|
||||||
ExecuteOn []string `json:"execute_on" mapstructure:"execute_on"`
|
|
||||||
// Actions to be performed synchronously.
|
|
||||||
// The pre-delete action is always executed synchronously while the other ones are asynchronous.
|
|
||||||
// Executing an action synchronously means that SFTPGo will not return a result code to the client
|
|
||||||
// (which is waiting for it) until your hook have completed its execution.
|
|
||||||
ExecuteSync []string `json:"execute_sync" mapstructure:"execute_sync"`
|
|
||||||
// Absolute path to an external program or an HTTP URL
|
|
||||||
Hook string `json:"hook" mapstructure:"hook"`
|
|
||||||
}
|
|
||||||
|
|
||||||
var actionHandler ActionHandler = &defaultActionHandler{}
|
|
||||||
|
|
||||||
// InitializeActionHandler lets the user choose an action handler implementation.
|
|
||||||
//
|
|
||||||
// Do NOT call this function after application initialization.
|
|
||||||
func InitializeActionHandler(handler ActionHandler) {
|
|
||||||
actionHandler = handler
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExecutePreAction executes a pre-* action and returns the result
|
|
||||||
func ExecutePreAction(user *dataprovider.User, operation, filePath, virtualPath, protocol string, fileSize int64, openFlags int) error {
|
|
||||||
if !utils.IsStringInSlice(operation, Config.Actions.ExecuteOn) {
|
|
||||||
// for pre-delete we execute the internal handling on error, so we must return errUnconfiguredAction.
|
|
||||||
// Other pre action will deny the operation on error so if we have no configuration we must return
|
|
||||||
// a nil error
|
|
||||||
if operation == operationPreDelete {
|
|
||||||
return errUnconfiguredAction
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
notification := newActionNotification(user, operation, filePath, virtualPath, "", "", protocol, fileSize, openFlags, nil)
|
|
||||||
return actionHandler.Handle(notification)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExecuteActionNotification executes the defined hook, if any, for the specified action
|
|
||||||
func ExecuteActionNotification(user *dataprovider.User, operation, filePath, virtualPath, target, sshCmd, protocol string, fileSize int64, err error) {
|
|
||||||
notification := newActionNotification(user, operation, filePath, virtualPath, target, sshCmd, protocol, fileSize, 0, err)
|
|
||||||
|
|
||||||
if utils.IsStringInSlice(operation, Config.Actions.ExecuteSync) {
|
|
||||||
actionHandler.Handle(notification) //nolint:errcheck
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
go actionHandler.Handle(notification) //nolint:errcheck
|
|
||||||
}
|
|
||||||
|
|
||||||
// ActionHandler handles a notification for a Protocol Action.
|
|
||||||
type ActionHandler interface {
|
|
||||||
Handle(notification *ActionNotification) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// ActionNotification defines a notification for a Protocol Action.
|
|
||||||
type ActionNotification struct {
|
|
||||||
Action string `json:"action"`
|
|
||||||
Username string `json:"username"`
|
|
||||||
Path string `json:"path"`
|
|
||||||
TargetPath string `json:"target_path,omitempty"`
|
|
||||||
SSHCmd string `json:"ssh_cmd,omitempty"`
|
|
||||||
FileSize int64 `json:"file_size,omitempty"`
|
|
||||||
FsProvider int `json:"fs_provider"`
|
|
||||||
Bucket string `json:"bucket,omitempty"`
|
|
||||||
Endpoint string `json:"endpoint,omitempty"`
|
|
||||||
Status int `json:"status"`
|
|
||||||
Protocol string `json:"protocol"`
|
|
||||||
OpenFlags int `json:"open_flags,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func newActionNotification(
|
|
||||||
user *dataprovider.User,
|
|
||||||
operation, filePath, virtualPath, target, sshCmd, protocol string,
|
|
||||||
fileSize int64,
|
|
||||||
openFlags int,
|
|
||||||
err error,
|
|
||||||
) *ActionNotification {
|
|
||||||
var bucket, endpoint string
|
|
||||||
status := 1
|
|
||||||
|
|
||||||
fsConfig := user.GetFsConfigForPath(virtualPath)
|
|
||||||
|
|
||||||
switch fsConfig.Provider {
|
|
||||||
case vfs.S3FilesystemProvider:
|
|
||||||
bucket = fsConfig.S3Config.Bucket
|
|
||||||
endpoint = fsConfig.S3Config.Endpoint
|
|
||||||
case vfs.GCSFilesystemProvider:
|
|
||||||
bucket = fsConfig.GCSConfig.Bucket
|
|
||||||
case vfs.AzureBlobFilesystemProvider:
|
|
||||||
bucket = fsConfig.AzBlobConfig.Container
|
|
||||||
if fsConfig.AzBlobConfig.Endpoint != "" {
|
|
||||||
endpoint = fsConfig.AzBlobConfig.Endpoint
|
|
||||||
}
|
|
||||||
case vfs.SFTPFilesystemProvider:
|
|
||||||
endpoint = fsConfig.SFTPConfig.Endpoint
|
|
||||||
}
|
|
||||||
|
|
||||||
if err == ErrQuotaExceeded {
|
|
||||||
status = 2
|
|
||||||
} else if err != nil {
|
|
||||||
status = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
return &ActionNotification{
|
|
||||||
Action: operation,
|
|
||||||
Username: user.Username,
|
|
||||||
Path: filePath,
|
|
||||||
TargetPath: target,
|
|
||||||
SSHCmd: sshCmd,
|
|
||||||
FileSize: fileSize,
|
|
||||||
FsProvider: int(fsConfig.Provider),
|
|
||||||
Bucket: bucket,
|
|
||||||
Endpoint: endpoint,
|
|
||||||
Status: status,
|
|
||||||
Protocol: protocol,
|
|
||||||
OpenFlags: openFlags,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type defaultActionHandler struct{}
|
|
||||||
|
|
||||||
func (h *defaultActionHandler) Handle(notification *ActionNotification) error {
|
|
||||||
if !utils.IsStringInSlice(notification.Action, Config.Actions.ExecuteOn) {
|
|
||||||
return errUnconfiguredAction
|
|
||||||
}
|
|
||||||
|
|
||||||
if Config.Actions.Hook == "" {
|
|
||||||
logger.Warn(notification.Protocol, "", "Unable to send notification, no hook is defined")
|
|
||||||
|
|
||||||
return errNoHook
|
|
||||||
}
|
|
||||||
|
|
||||||
if strings.HasPrefix(Config.Actions.Hook, "http") {
|
|
||||||
return h.handleHTTP(notification)
|
|
||||||
}
|
|
||||||
|
|
||||||
return h.handleCommand(notification)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *defaultActionHandler) handleHTTP(notification *ActionNotification) error {
|
|
||||||
u, err := url.Parse(Config.Actions.Hook)
|
|
||||||
if err != nil {
|
|
||||||
logger.Warn(notification.Protocol, "", "Invalid hook %#v for operation %#v: %v", Config.Actions.Hook, notification.Action, err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
startTime := time.Now()
|
|
||||||
respCode := 0
|
|
||||||
|
|
||||||
var b bytes.Buffer
|
|
||||||
_ = json.NewEncoder(&b).Encode(notification)
|
|
||||||
|
|
||||||
resp, err := httpclient.RetryablePost(Config.Actions.Hook, "application/json", &b)
|
|
||||||
if err == nil {
|
|
||||||
respCode = resp.StatusCode
|
|
||||||
resp.Body.Close()
|
|
||||||
|
|
||||||
if respCode != http.StatusOK {
|
|
||||||
err = errUnexpectedHTTResponse
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.Debug(notification.Protocol, "", "notified operation %#v to URL: %v status code: %v, elapsed: %v err: %v",
|
|
||||||
notification.Action, u.Redacted(), respCode, time.Since(startTime), err)
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *defaultActionHandler) handleCommand(notification *ActionNotification) error {
|
|
||||||
if !filepath.IsAbs(Config.Actions.Hook) {
|
|
||||||
err := fmt.Errorf("invalid notification command %#v", Config.Actions.Hook)
|
|
||||||
logger.Warn(notification.Protocol, "", "unable to execute notification command: %v", err)
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
cmd := exec.CommandContext(ctx, Config.Actions.Hook, notification.Action, notification.Username, notification.Path, notification.TargetPath, notification.SSHCmd)
|
|
||||||
cmd.Env = append(os.Environ(), notificationAsEnvVars(notification)...)
|
|
||||||
|
|
||||||
startTime := time.Now()
|
|
||||||
err := cmd.Run()
|
|
||||||
|
|
||||||
logger.Debug(notification.Protocol, "", "executed command %#v with arguments: %#v, %#v, %#v, %#v, %#v, elapsed: %v, error: %v",
|
|
||||||
Config.Actions.Hook, notification.Action, notification.Username, notification.Path, notification.TargetPath, notification.SSHCmd, time.Since(startTime), err)
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func notificationAsEnvVars(notification *ActionNotification) []string {
|
|
||||||
return []string{
|
|
||||||
fmt.Sprintf("SFTPGO_ACTION=%v", notification.Action),
|
|
||||||
fmt.Sprintf("SFTPGO_ACTION_USERNAME=%v", notification.Username),
|
|
||||||
fmt.Sprintf("SFTPGO_ACTION_PATH=%v", notification.Path),
|
|
||||||
fmt.Sprintf("SFTPGO_ACTION_TARGET=%v", notification.TargetPath),
|
|
||||||
fmt.Sprintf("SFTPGO_ACTION_SSH_CMD=%v", notification.SSHCmd),
|
|
||||||
fmt.Sprintf("SFTPGO_ACTION_FILE_SIZE=%v", notification.FileSize),
|
|
||||||
fmt.Sprintf("SFTPGO_ACTION_FS_PROVIDER=%v", notification.FsProvider),
|
|
||||||
fmt.Sprintf("SFTPGO_ACTION_BUCKET=%v", notification.Bucket),
|
|
||||||
fmt.Sprintf("SFTPGO_ACTION_ENDPOINT=%v", notification.Endpoint),
|
|
||||||
fmt.Sprintf("SFTPGO_ACTION_STATUS=%v", notification.Status),
|
|
||||||
fmt.Sprintf("SFTPGO_ACTION_PROTOCOL=%v", notification.Protocol),
|
|
||||||
fmt.Sprintf("SFTPGO_ACTION_OPEN_FLAGS=%v", notification.OpenFlags),
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,227 +0,0 @@
|
||||||
package common
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
"path/filepath"
|
|
||||||
"runtime"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
|
|
||||||
"github.com/drakkan/sftpgo/dataprovider"
|
|
||||||
"github.com/drakkan/sftpgo/vfs"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestNewActionNotification(t *testing.T) {
|
|
||||||
user := &dataprovider.User{
|
|
||||||
Username: "username",
|
|
||||||
}
|
|
||||||
user.FsConfig.Provider = vfs.LocalFilesystemProvider
|
|
||||||
user.FsConfig.S3Config = vfs.S3FsConfig{
|
|
||||||
Bucket: "s3bucket",
|
|
||||||
Endpoint: "endpoint",
|
|
||||||
}
|
|
||||||
user.FsConfig.GCSConfig = vfs.GCSFsConfig{
|
|
||||||
Bucket: "gcsbucket",
|
|
||||||
}
|
|
||||||
user.FsConfig.AzBlobConfig = vfs.AzBlobFsConfig{
|
|
||||||
Container: "azcontainer",
|
|
||||||
Endpoint: "azendpoint",
|
|
||||||
}
|
|
||||||
user.FsConfig.SFTPConfig = vfs.SFTPFsConfig{
|
|
||||||
Endpoint: "sftpendpoint",
|
|
||||||
}
|
|
||||||
a := newActionNotification(user, operationDownload, "path", "vpath", "target", "", ProtocolSFTP, 123, 0, errors.New("fake error"))
|
|
||||||
assert.Equal(t, user.Username, a.Username)
|
|
||||||
assert.Equal(t, 0, len(a.Bucket))
|
|
||||||
assert.Equal(t, 0, len(a.Endpoint))
|
|
||||||
assert.Equal(t, 0, a.Status)
|
|
||||||
|
|
||||||
user.FsConfig.Provider = vfs.S3FilesystemProvider
|
|
||||||
a = newActionNotification(user, operationDownload, "path", "vpath", "target", "", ProtocolSSH, 123, 0, nil)
|
|
||||||
assert.Equal(t, "s3bucket", a.Bucket)
|
|
||||||
assert.Equal(t, "endpoint", a.Endpoint)
|
|
||||||
assert.Equal(t, 1, a.Status)
|
|
||||||
|
|
||||||
user.FsConfig.Provider = vfs.GCSFilesystemProvider
|
|
||||||
a = newActionNotification(user, operationDownload, "path", "vpath", "target", "", ProtocolSCP, 123, 0, ErrQuotaExceeded)
|
|
||||||
assert.Equal(t, "gcsbucket", a.Bucket)
|
|
||||||
assert.Equal(t, 0, len(a.Endpoint))
|
|
||||||
assert.Equal(t, 2, a.Status)
|
|
||||||
|
|
||||||
user.FsConfig.Provider = vfs.AzureBlobFilesystemProvider
|
|
||||||
a = newActionNotification(user, operationDownload, "path", "vpath", "target", "", ProtocolSCP, 123, 0, nil)
|
|
||||||
assert.Equal(t, "azcontainer", a.Bucket)
|
|
||||||
assert.Equal(t, "azendpoint", a.Endpoint)
|
|
||||||
assert.Equal(t, 1, a.Status)
|
|
||||||
|
|
||||||
a = newActionNotification(user, operationDownload, "path", "vpath", "target", "", ProtocolSCP, 123, os.O_APPEND, nil)
|
|
||||||
assert.Equal(t, "azcontainer", a.Bucket)
|
|
||||||
assert.Equal(t, "azendpoint", a.Endpoint)
|
|
||||||
assert.Equal(t, 1, a.Status)
|
|
||||||
assert.Equal(t, os.O_APPEND, a.OpenFlags)
|
|
||||||
|
|
||||||
user.FsConfig.Provider = vfs.SFTPFilesystemProvider
|
|
||||||
a = newActionNotification(user, operationDownload, "path", "vpath", "target", "", ProtocolSFTP, 123, 0, nil)
|
|
||||||
assert.Equal(t, "sftpendpoint", a.Endpoint)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestActionHTTP(t *testing.T) {
|
|
||||||
actionsCopy := Config.Actions
|
|
||||||
|
|
||||||
Config.Actions = ProtocolActions{
|
|
||||||
ExecuteOn: []string{operationDownload},
|
|
||||||
Hook: fmt.Sprintf("http://%v", httpAddr),
|
|
||||||
}
|
|
||||||
user := &dataprovider.User{
|
|
||||||
Username: "username",
|
|
||||||
}
|
|
||||||
a := newActionNotification(user, operationDownload, "path", "vpath", "target", "", ProtocolSFTP, 123, 0, nil)
|
|
||||||
err := actionHandler.Handle(a)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
Config.Actions.Hook = "http://invalid:1234"
|
|
||||||
err = actionHandler.Handle(a)
|
|
||||||
assert.Error(t, err)
|
|
||||||
|
|
||||||
Config.Actions.Hook = fmt.Sprintf("http://%v/404", httpAddr)
|
|
||||||
err = actionHandler.Handle(a)
|
|
||||||
if assert.Error(t, err) {
|
|
||||||
assert.EqualError(t, err, errUnexpectedHTTResponse.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
Config.Actions = actionsCopy
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestActionCMD(t *testing.T) {
|
|
||||||
if runtime.GOOS == osWindows {
|
|
||||||
t.Skip("this test is not available on Windows")
|
|
||||||
}
|
|
||||||
actionsCopy := Config.Actions
|
|
||||||
|
|
||||||
hookCmd, err := exec.LookPath("true")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
Config.Actions = ProtocolActions{
|
|
||||||
ExecuteOn: []string{operationDownload},
|
|
||||||
Hook: hookCmd,
|
|
||||||
}
|
|
||||||
user := &dataprovider.User{
|
|
||||||
Username: "username",
|
|
||||||
}
|
|
||||||
a := newActionNotification(user, operationDownload, "path", "vpath", "target", "", ProtocolSFTP, 123, 0, nil)
|
|
||||||
err = actionHandler.Handle(a)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
ExecuteActionNotification(user, OperationSSHCmd, "path", "vpath", "target", "sha1sum", ProtocolSSH, 0, nil)
|
|
||||||
|
|
||||||
Config.Actions = actionsCopy
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestWrongActions(t *testing.T) {
|
|
||||||
actionsCopy := Config.Actions
|
|
||||||
|
|
||||||
badCommand := "/bad/command"
|
|
||||||
if runtime.GOOS == osWindows {
|
|
||||||
badCommand = "C:\\bad\\command"
|
|
||||||
}
|
|
||||||
Config.Actions = ProtocolActions{
|
|
||||||
ExecuteOn: []string{operationUpload},
|
|
||||||
Hook: badCommand,
|
|
||||||
}
|
|
||||||
user := &dataprovider.User{
|
|
||||||
Username: "username",
|
|
||||||
}
|
|
||||||
|
|
||||||
a := newActionNotification(user, operationUpload, "", "", "", "", ProtocolSFTP, 123, 0, nil)
|
|
||||||
err := actionHandler.Handle(a)
|
|
||||||
assert.Error(t, err, "action with bad command must fail")
|
|
||||||
|
|
||||||
a.Action = operationDelete
|
|
||||||
err = actionHandler.Handle(a)
|
|
||||||
assert.EqualError(t, err, errUnconfiguredAction.Error())
|
|
||||||
|
|
||||||
Config.Actions.Hook = "http://foo\x7f.com/"
|
|
||||||
a.Action = operationUpload
|
|
||||||
err = actionHandler.Handle(a)
|
|
||||||
assert.Error(t, err, "action with bad url must fail")
|
|
||||||
|
|
||||||
Config.Actions.Hook = ""
|
|
||||||
err = actionHandler.Handle(a)
|
|
||||||
if assert.Error(t, err) {
|
|
||||||
assert.EqualError(t, err, errNoHook.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
Config.Actions.Hook = "relative path"
|
|
||||||
err = actionHandler.Handle(a)
|
|
||||||
if assert.Error(t, err) {
|
|
||||||
assert.EqualError(t, err, fmt.Sprintf("invalid notification command %#v", Config.Actions.Hook))
|
|
||||||
}
|
|
||||||
|
|
||||||
Config.Actions = actionsCopy
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPreDeleteAction(t *testing.T) {
|
|
||||||
if runtime.GOOS == osWindows {
|
|
||||||
t.Skip("this test is not available on Windows")
|
|
||||||
}
|
|
||||||
actionsCopy := Config.Actions
|
|
||||||
|
|
||||||
hookCmd, err := exec.LookPath("true")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
Config.Actions = ProtocolActions{
|
|
||||||
ExecuteOn: []string{operationPreDelete},
|
|
||||||
Hook: hookCmd,
|
|
||||||
}
|
|
||||||
homeDir := filepath.Join(os.TempDir(), "test_user")
|
|
||||||
err = os.MkdirAll(homeDir, os.ModePerm)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
user := dataprovider.User{
|
|
||||||
Username: "username",
|
|
||||||
HomeDir: homeDir,
|
|
||||||
}
|
|
||||||
user.Permissions = make(map[string][]string)
|
|
||||||
user.Permissions["/"] = []string{dataprovider.PermAny}
|
|
||||||
fs := vfs.NewOsFs("id", homeDir, "")
|
|
||||||
c := NewBaseConnection("id", ProtocolSFTP, "", user)
|
|
||||||
|
|
||||||
testfile := filepath.Join(user.HomeDir, "testfile")
|
|
||||||
err = os.WriteFile(testfile, []byte("test"), os.ModePerm)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
info, err := os.Stat(testfile)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = c.RemoveFile(fs, testfile, "testfile", info)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.FileExists(t, testfile)
|
|
||||||
|
|
||||||
os.RemoveAll(homeDir)
|
|
||||||
|
|
||||||
Config.Actions = actionsCopy
|
|
||||||
}
|
|
||||||
|
|
||||||
type actionHandlerStub struct {
|
|
||||||
called bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *actionHandlerStub) Handle(notification *ActionNotification) error {
|
|
||||||
h.called = true
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestInitializeActionHandler(t *testing.T) {
|
|
||||||
handler := &actionHandlerStub{}
|
|
||||||
|
|
||||||
InitializeActionHandler(handler)
|
|
||||||
t.Cleanup(func() {
|
|
||||||
InitializeActionHandler(&defaultActionHandler{})
|
|
||||||
})
|
|
||||||
|
|
||||||
err := actionHandler.Handle(&ActionNotification{})
|
|
||||||
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.True(t, handler.called)
|
|
||||||
}
|
|
|
@ -1,51 +0,0 @@
|
||||||
package common
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync"
|
|
||||||
"sync/atomic"
|
|
||||||
|
|
||||||
"github.com/drakkan/sftpgo/logger"
|
|
||||||
)
|
|
||||||
|
|
||||||
// clienstMap is a struct containing the map of the connected clients
|
|
||||||
type clientsMap struct {
|
|
||||||
totalConnections int32
|
|
||||||
mu sync.RWMutex
|
|
||||||
clients map[string]int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *clientsMap) add(source string) {
|
|
||||||
atomic.AddInt32(&c.totalConnections, 1)
|
|
||||||
|
|
||||||
c.mu.Lock()
|
|
||||||
defer c.mu.Unlock()
|
|
||||||
|
|
||||||
c.clients[source]++
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *clientsMap) remove(source string) {
|
|
||||||
c.mu.Lock()
|
|
||||||
defer c.mu.Unlock()
|
|
||||||
|
|
||||||
if val, ok := c.clients[source]; ok {
|
|
||||||
atomic.AddInt32(&c.totalConnections, -1)
|
|
||||||
c.clients[source]--
|
|
||||||
if val > 1 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
delete(c.clients, source)
|
|
||||||
} else {
|
|
||||||
logger.Warn(logSender, "", "cannot remove client %v it is not mapped", source)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *clientsMap) getTotal() int32 {
|
|
||||||
return atomic.LoadInt32(&c.totalConnections)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *clientsMap) getTotalFrom(source string) int {
|
|
||||||
c.mu.RLock()
|
|
||||||
defer c.mu.RUnlock()
|
|
||||||
|
|
||||||
return c.clients[source]
|
|
||||||
}
|
|
985
common/common.go
|
@ -1,985 +0,0 @@
|
||||||
// Package common defines code shared among file transfer packages and protocols
|
|
||||||
package common
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"net"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"sync/atomic"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/pires/go-proxyproto"
|
|
||||||
|
|
||||||
"github.com/drakkan/sftpgo/dataprovider"
|
|
||||||
"github.com/drakkan/sftpgo/httpclient"
|
|
||||||
"github.com/drakkan/sftpgo/logger"
|
|
||||||
"github.com/drakkan/sftpgo/metrics"
|
|
||||||
"github.com/drakkan/sftpgo/utils"
|
|
||||||
"github.com/drakkan/sftpgo/vfs"
|
|
||||||
)
|
|
||||||
|
|
||||||
// constants
|
|
||||||
const (
|
|
||||||
logSender = "common"
|
|
||||||
uploadLogSender = "Upload"
|
|
||||||
downloadLogSender = "Download"
|
|
||||||
renameLogSender = "Rename"
|
|
||||||
rmdirLogSender = "Rmdir"
|
|
||||||
mkdirLogSender = "Mkdir"
|
|
||||||
symlinkLogSender = "Symlink"
|
|
||||||
removeLogSender = "Remove"
|
|
||||||
chownLogSender = "Chown"
|
|
||||||
chmodLogSender = "Chmod"
|
|
||||||
chtimesLogSender = "Chtimes"
|
|
||||||
truncateLogSender = "Truncate"
|
|
||||||
operationDownload = "download"
|
|
||||||
operationUpload = "upload"
|
|
||||||
operationDelete = "delete"
|
|
||||||
// Pre-download action name
|
|
||||||
OperationPreDownload = "pre-download"
|
|
||||||
// Pre-upload action name
|
|
||||||
OperationPreUpload = "pre-upload"
|
|
||||||
operationPreDelete = "pre-delete"
|
|
||||||
operationRename = "rename"
|
|
||||||
// SSH command action name
|
|
||||||
OperationSSHCmd = "ssh_cmd"
|
|
||||||
chtimesFormat = "2006-01-02T15:04:05" // YYYY-MM-DDTHH:MM:SS
|
|
||||||
idleTimeoutCheckInterval = 3 * time.Minute
|
|
||||||
)
|
|
||||||
|
|
||||||
// Stat flags
|
|
||||||
const (
|
|
||||||
StatAttrUIDGID = 1
|
|
||||||
StatAttrPerms = 2
|
|
||||||
StatAttrTimes = 4
|
|
||||||
StatAttrSize = 8
|
|
||||||
)
|
|
||||||
|
|
||||||
// Transfer types
|
|
||||||
const (
|
|
||||||
TransferUpload = iota
|
|
||||||
TransferDownload
|
|
||||||
)
|
|
||||||
|
|
||||||
// Supported protocols
|
|
||||||
const (
|
|
||||||
ProtocolSFTP = "SFTP"
|
|
||||||
ProtocolSCP = "SCP"
|
|
||||||
ProtocolSSH = "SSH"
|
|
||||||
ProtocolFTP = "FTP"
|
|
||||||
ProtocolWebDAV = "DAV"
|
|
||||||
ProtocolHTTP = "HTTP"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Upload modes
|
|
||||||
const (
|
|
||||||
UploadModeStandard = iota
|
|
||||||
UploadModeAtomic
|
|
||||||
UploadModeAtomicWithResume
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
Connections.clients = clientsMap{
|
|
||||||
clients: make(map[string]int),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// errors definitions
|
|
||||||
var (
|
|
||||||
ErrPermissionDenied = errors.New("permission denied")
|
|
||||||
ErrNotExist = errors.New("no such file or directory")
|
|
||||||
ErrOpUnsupported = errors.New("operation unsupported")
|
|
||||||
ErrGenericFailure = errors.New("failure")
|
|
||||||
ErrQuotaExceeded = errors.New("denying write due to space limit")
|
|
||||||
ErrSkipPermissionsCheck = errors.New("permission check skipped")
|
|
||||||
ErrConnectionDenied = errors.New("you are not allowed to connect")
|
|
||||||
ErrNoBinding = errors.New("no binding configured")
|
|
||||||
ErrCrtRevoked = errors.New("your certificate has been revoked")
|
|
||||||
ErrNoCredentials = errors.New("no credential provided")
|
|
||||||
ErrInternalFailure = errors.New("internal failure")
|
|
||||||
errNoTransfer = errors.New("requested transfer not found")
|
|
||||||
errTransferMismatch = errors.New("transfer mismatch")
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// Config is the configuration for the supported protocols
|
|
||||||
Config Configuration
|
|
||||||
// Connections is the list of active connections
|
|
||||||
Connections ActiveConnections
|
|
||||||
// QuotaScans is the list of active quota scans
|
|
||||||
QuotaScans ActiveScans
|
|
||||||
idleTimeoutTicker *time.Ticker
|
|
||||||
idleTimeoutTickerDone chan bool
|
|
||||||
supportedProtocols = []string{ProtocolSFTP, ProtocolSCP, ProtocolSSH, ProtocolFTP, ProtocolWebDAV, ProtocolHTTP}
|
|
||||||
// the map key is the protocol, for each protocol we can have multiple rate limiters
|
|
||||||
rateLimiters map[string][]*rateLimiter
|
|
||||||
)
|
|
||||||
|
|
||||||
// Initialize sets the common configuration
|
|
||||||
func Initialize(c Configuration) error {
|
|
||||||
Config = c
|
|
||||||
Config.idleLoginTimeout = 2 * time.Minute
|
|
||||||
Config.idleTimeoutAsDuration = time.Duration(Config.IdleTimeout) * time.Minute
|
|
||||||
if Config.IdleTimeout > 0 {
|
|
||||||
startIdleTimeoutTicker(idleTimeoutCheckInterval)
|
|
||||||
}
|
|
||||||
Config.defender = nil
|
|
||||||
if c.DefenderConfig.Enabled {
|
|
||||||
defender, err := newInMemoryDefender(&c.DefenderConfig)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("defender initialization error: %v", err)
|
|
||||||
}
|
|
||||||
logger.Info(logSender, "", "defender initialized with config %+v", c.DefenderConfig)
|
|
||||||
Config.defender = defender
|
|
||||||
}
|
|
||||||
rateLimiters = make(map[string][]*rateLimiter)
|
|
||||||
for _, rlCfg := range c.RateLimitersConfig {
|
|
||||||
if rlCfg.isEnabled() {
|
|
||||||
if err := rlCfg.validate(); err != nil {
|
|
||||||
return fmt.Errorf("rate limiters initialization error: %v", err)
|
|
||||||
}
|
|
||||||
rateLimiter := rlCfg.getLimiter()
|
|
||||||
for _, protocol := range rlCfg.Protocols {
|
|
||||||
rateLimiters[protocol] = append(rateLimiters[protocol], rateLimiter)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
vfs.SetTempPath(c.TempPath)
|
|
||||||
dataprovider.SetTempPath(c.TempPath)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// LimitRate blocks until all the configured rate limiters
|
|
||||||
// allow one event to happen.
|
|
||||||
// It returns an error if the time to wait exceeds the max
|
|
||||||
// allowed delay
|
|
||||||
func LimitRate(protocol, ip string) (time.Duration, error) {
|
|
||||||
for _, limiter := range rateLimiters[protocol] {
|
|
||||||
if delay, err := limiter.Wait(ip); err != nil {
|
|
||||||
logger.Debug(logSender, "", "protocol %v ip %v: %v", protocol, ip, err)
|
|
||||||
return delay, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReloadDefender reloads the defender's block and safe lists
|
|
||||||
func ReloadDefender() error {
|
|
||||||
if Config.defender == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return Config.defender.Reload()
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsBanned returns true if the specified IP address is banned
|
|
||||||
func IsBanned(ip string) bool {
|
|
||||||
if Config.defender == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return Config.defender.IsBanned(ip)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetDefenderBanTime returns the ban time for the given IP
|
|
||||||
// or nil if the IP is not banned or the defender is disabled
|
|
||||||
func GetDefenderBanTime(ip string) *time.Time {
|
|
||||||
if Config.defender == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return Config.defender.GetBanTime(ip)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetDefenderHosts returns hosts that are banned or for which some violations have been detected
|
|
||||||
func GetDefenderHosts() []*DefenderEntry {
|
|
||||||
if Config.defender == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return Config.defender.GetHosts()
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetDefenderHost returns a defender host by ip, if any
|
|
||||||
func GetDefenderHost(ip string) (*DefenderEntry, error) {
|
|
||||||
if Config.defender == nil {
|
|
||||||
return nil, errors.New("defender is disabled")
|
|
||||||
}
|
|
||||||
|
|
||||||
return Config.defender.GetHost(ip)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteDefenderHost removes the specified IP address from the defender lists
|
|
||||||
func DeleteDefenderHost(ip string) bool {
|
|
||||||
if Config.defender == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return Config.defender.DeleteHost(ip)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetDefenderScore returns the score for the given IP
|
|
||||||
func GetDefenderScore(ip string) int {
|
|
||||||
if Config.defender == nil {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
return Config.defender.GetScore(ip)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddDefenderEvent adds the specified defender event for the given IP
|
|
||||||
func AddDefenderEvent(ip string, event HostEvent) {
|
|
||||||
if Config.defender == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
Config.defender.AddEvent(ip, event)
|
|
||||||
}
|
|
||||||
|
|
||||||
// the ticker cannot be started/stopped from multiple goroutines
|
|
||||||
func startIdleTimeoutTicker(duration time.Duration) {
|
|
||||||
stopIdleTimeoutTicker()
|
|
||||||
idleTimeoutTicker = time.NewTicker(duration)
|
|
||||||
idleTimeoutTickerDone = make(chan bool)
|
|
||||||
go func() {
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-idleTimeoutTickerDone:
|
|
||||||
return
|
|
||||||
case <-idleTimeoutTicker.C:
|
|
||||||
Connections.checkIdles()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
func stopIdleTimeoutTicker() {
|
|
||||||
if idleTimeoutTicker != nil {
|
|
||||||
idleTimeoutTicker.Stop()
|
|
||||||
idleTimeoutTickerDone <- true
|
|
||||||
idleTimeoutTicker = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ActiveTransfer defines the interface for the current active transfers
|
|
||||||
type ActiveTransfer interface {
|
|
||||||
GetID() uint64
|
|
||||||
GetType() int
|
|
||||||
GetSize() int64
|
|
||||||
GetVirtualPath() string
|
|
||||||
GetStartTime() time.Time
|
|
||||||
SignalClose()
|
|
||||||
Truncate(fsPath string, size int64) (int64, error)
|
|
||||||
GetRealFsPath(fsPath string) string
|
|
||||||
}
|
|
||||||
|
|
||||||
// ActiveConnection defines the interface for the current active connections
|
|
||||||
type ActiveConnection interface {
|
|
||||||
GetID() string
|
|
||||||
GetUsername() string
|
|
||||||
GetRemoteAddress() string
|
|
||||||
GetClientVersion() string
|
|
||||||
GetProtocol() string
|
|
||||||
GetConnectionTime() time.Time
|
|
||||||
GetLastActivity() time.Time
|
|
||||||
GetCommand() string
|
|
||||||
Disconnect() error
|
|
||||||
AddTransfer(t ActiveTransfer)
|
|
||||||
RemoveTransfer(t ActiveTransfer)
|
|
||||||
GetTransfers() []ConnectionTransfer
|
|
||||||
CloseFS() error
|
|
||||||
}
|
|
||||||
|
|
||||||
// StatAttributes defines the attributes for set stat commands
|
|
||||||
type StatAttributes struct {
|
|
||||||
Mode os.FileMode
|
|
||||||
Atime time.Time
|
|
||||||
Mtime time.Time
|
|
||||||
UID int
|
|
||||||
GID int
|
|
||||||
Flags int
|
|
||||||
Size int64
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConnectionTransfer defines the trasfer details to expose
|
|
||||||
type ConnectionTransfer struct {
|
|
||||||
ID uint64 `json:"-"`
|
|
||||||
OperationType string `json:"operation_type"`
|
|
||||||
StartTime int64 `json:"start_time"`
|
|
||||||
Size int64 `json:"size"`
|
|
||||||
VirtualPath string `json:"path"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *ConnectionTransfer) getConnectionTransferAsString() string {
|
|
||||||
result := ""
|
|
||||||
switch t.OperationType {
|
|
||||||
case operationUpload:
|
|
||||||
result += "UL "
|
|
||||||
case operationDownload:
|
|
||||||
result += "DL "
|
|
||||||
}
|
|
||||||
result += fmt.Sprintf("%#v ", t.VirtualPath)
|
|
||||||
if t.Size > 0 {
|
|
||||||
elapsed := time.Since(utils.GetTimeFromMsecSinceEpoch(t.StartTime))
|
|
||||||
speed := float64(t.Size) / float64(utils.GetTimeAsMsSinceEpoch(time.Now())-t.StartTime)
|
|
||||||
result += fmt.Sprintf("Size: %#v Elapsed: %#v Speed: \"%.1f KB/s\"", utils.ByteCountIEC(t.Size),
|
|
||||||
utils.GetDurationAsString(elapsed), speed)
|
|
||||||
}
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
// Configuration defines configuration parameters common to all supported protocols
|
|
||||||
type Configuration struct {
|
|
||||||
// Maximum idle timeout as minutes. If a client is idle for a time that exceeds this setting it will be disconnected.
|
|
||||||
// 0 means disabled
|
|
||||||
IdleTimeout int `json:"idle_timeout" mapstructure:"idle_timeout"`
|
|
||||||
// UploadMode 0 means standard, the files are uploaded directly to the requested path.
|
|
||||||
// 1 means atomic: the files are uploaded to a temporary path and renamed to the requested path
|
|
||||||
// when the client ends the upload. Atomic mode avoid problems such as a web server that
|
|
||||||
// serves partial files when the files are being uploaded.
|
|
||||||
// In atomic mode if there is an upload error the temporary file is deleted and so the requested
|
|
||||||
// upload path will not contain a partial file.
|
|
||||||
// 2 means atomic with resume support: as atomic but if there is an upload error the temporary
|
|
||||||
// file is renamed to the requested path and not deleted, this way a client can reconnect and resume
|
|
||||||
// the upload.
|
|
||||||
UploadMode int `json:"upload_mode" mapstructure:"upload_mode"`
|
|
||||||
// Actions to execute for SFTP file operations and SSH commands
|
|
||||||
Actions ProtocolActions `json:"actions" mapstructure:"actions"`
|
|
||||||
// SetstatMode 0 means "normal mode": requests for changing permissions and owner/group are executed.
|
|
||||||
// 1 means "ignore mode": requests for changing permissions and owner/group are silently ignored.
|
|
||||||
// 2 means "ignore mode for cloud fs": requests for changing permissions and owner/group/time are
|
|
||||||
// silently ignored for cloud based filesystem such as S3, GCS, Azure Blob
|
|
||||||
SetstatMode int `json:"setstat_mode" mapstructure:"setstat_mode"`
|
|
||||||
// TempPath defines the path for temporary files such as those used for atomic uploads or file pipes.
|
|
||||||
// If you set this option you must make sure that the defined path exists, is accessible for writing
|
|
||||||
// by the user running SFTPGo, and is on the same filesystem as the users home directories otherwise
|
|
||||||
// the renaming for atomic uploads will become a copy and therefore may take a long time.
|
|
||||||
// The temporary files are not namespaced. The default is generally fine. Leave empty for the default.
|
|
||||||
TempPath string `json:"temp_path" mapstructure:"temp_path"`
|
|
||||||
// Support for HAProxy PROXY protocol.
|
|
||||||
// If you are running SFTPGo behind a proxy server such as HAProxy, AWS ELB or NGNIX, you can enable
|
|
||||||
// the proxy protocol. It provides a convenient way to safely transport connection information
|
|
||||||
// such as a client's address across multiple layers of NAT or TCP proxies to get the real
|
|
||||||
// client IP address instead of the proxy IP. Both protocol versions 1 and 2 are supported.
|
|
||||||
// - 0 means disabled
|
|
||||||
// - 1 means proxy protocol enabled. Proxy header will be used and requests without proxy header will be accepted.
|
|
||||||
// - 2 means proxy protocol required. Proxy header will be used and requests without proxy header will be rejected.
|
|
||||||
// If the proxy protocol is enabled in SFTPGo then you have to enable the protocol in your proxy configuration too,
|
|
||||||
// for example for HAProxy add "send-proxy" or "send-proxy-v2" to each server configuration line.
|
|
||||||
ProxyProtocol int `json:"proxy_protocol" mapstructure:"proxy_protocol"`
|
|
||||||
// List of IP addresses and IP ranges allowed to send the proxy header.
|
|
||||||
// If proxy protocol is set to 1 and we receive a proxy header from an IP that is not in the list then the
|
|
||||||
// connection will be accepted and the header will be ignored.
|
|
||||||
// If proxy protocol is set to 2 and we receive a proxy header from an IP that is not in the list then the
|
|
||||||
// connection will be rejected.
|
|
||||||
ProxyAllowed []string `json:"proxy_allowed" mapstructure:"proxy_allowed"`
|
|
||||||
// Absolute path to an external program or an HTTP URL to invoke as soon as SFTPGo starts.
|
|
||||||
// If you define an HTTP URL it will be invoked using a `GET` request.
|
|
||||||
// Please note that SFTPGo services may not yet be available when this hook is run.
|
|
||||||
// Leave empty do disable.
|
|
||||||
StartupHook string `json:"startup_hook" mapstructure:"startup_hook"`
|
|
||||||
// Absolute path to an external program or an HTTP URL to invoke after a user connects
|
|
||||||
// and before he tries to login. It allows you to reject the connection based on the source
|
|
||||||
// ip address. Leave empty do disable.
|
|
||||||
PostConnectHook string `json:"post_connect_hook" mapstructure:"post_connect_hook"`
|
|
||||||
// Maximum number of concurrent client connections. 0 means unlimited
|
|
||||||
MaxTotalConnections int `json:"max_total_connections" mapstructure:"max_total_connections"`
|
|
||||||
// Maximum number of concurrent client connections from the same host (IP). 0 means unlimited
|
|
||||||
MaxPerHostConnections int `json:"max_per_host_connections" mapstructure:"max_per_host_connections"`
|
|
||||||
// Defender configuration
|
|
||||||
DefenderConfig DefenderConfig `json:"defender" mapstructure:"defender"`
|
|
||||||
// Rate limiter configurations
|
|
||||||
RateLimitersConfig []RateLimiterConfig `json:"rate_limiters" mapstructure:"rate_limiters"`
|
|
||||||
idleTimeoutAsDuration time.Duration
|
|
||||||
idleLoginTimeout time.Duration
|
|
||||||
defender Defender
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsAtomicUploadEnabled returns true if atomic upload is enabled
|
|
||||||
func (c *Configuration) IsAtomicUploadEnabled() bool {
|
|
||||||
return c.UploadMode == UploadModeAtomic || c.UploadMode == UploadModeAtomicWithResume
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetProxyListener returns a wrapper for the given listener that supports the
|
|
||||||
// HAProxy Proxy Protocol or nil if the proxy protocol is not configured
|
|
||||||
func (c *Configuration) GetProxyListener(listener net.Listener) (*proxyproto.Listener, error) {
|
|
||||||
var proxyListener *proxyproto.Listener
|
|
||||||
var err error
|
|
||||||
if c.ProxyProtocol > 0 {
|
|
||||||
var policyFunc func(upstream net.Addr) (proxyproto.Policy, error)
|
|
||||||
if c.ProxyProtocol == 1 && len(c.ProxyAllowed) > 0 {
|
|
||||||
policyFunc, err = proxyproto.LaxWhiteListPolicy(c.ProxyAllowed)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if c.ProxyProtocol == 2 {
|
|
||||||
if len(c.ProxyAllowed) == 0 {
|
|
||||||
policyFunc = func(upstream net.Addr) (proxyproto.Policy, error) {
|
|
||||||
return proxyproto.REQUIRE, nil
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
policyFunc, err = proxyproto.StrictWhiteListPolicy(c.ProxyAllowed)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
proxyListener = &proxyproto.Listener{
|
|
||||||
Listener: listener,
|
|
||||||
Policy: policyFunc,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return proxyListener, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExecuteStartupHook runs the startup hook if defined
|
|
||||||
func (c *Configuration) ExecuteStartupHook() error {
|
|
||||||
if c.StartupHook == "" {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if strings.HasPrefix(c.StartupHook, "http") {
|
|
||||||
var url *url.URL
|
|
||||||
url, err := url.Parse(c.StartupHook)
|
|
||||||
if err != nil {
|
|
||||||
logger.Warn(logSender, "", "Invalid startup hook %#v: %v", c.StartupHook, err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
startTime := time.Now()
|
|
||||||
resp, err := httpclient.RetryableGet(url.String())
|
|
||||||
if err != nil {
|
|
||||||
logger.Warn(logSender, "", "Error executing startup hook: %v", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
logger.Debug(logSender, "", "Startup hook executed, elapsed: %v, response code: %v", time.Since(startTime), resp.StatusCode)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if !filepath.IsAbs(c.StartupHook) {
|
|
||||||
err := fmt.Errorf("invalid startup hook %#v", c.StartupHook)
|
|
||||||
logger.Warn(logSender, "", "Invalid startup hook %#v", c.StartupHook)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
startTime := time.Now()
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
|
|
||||||
defer cancel()
|
|
||||||
cmd := exec.CommandContext(ctx, c.StartupHook)
|
|
||||||
err := cmd.Run()
|
|
||||||
logger.Debug(logSender, "", "Startup hook executed, elapsed: %v, error: %v", time.Since(startTime), err)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExecutePostConnectHook executes the post connect hook if defined
|
|
||||||
func (c *Configuration) ExecutePostConnectHook(ipAddr, protocol string) error {
|
|
||||||
if c.PostConnectHook == "" {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if strings.HasPrefix(c.PostConnectHook, "http") {
|
|
||||||
var url *url.URL
|
|
||||||
url, err := url.Parse(c.PostConnectHook)
|
|
||||||
if err != nil {
|
|
||||||
logger.Warn(protocol, "", "Login from ip %#v denied, invalid post connect hook %#v: %v",
|
|
||||||
ipAddr, c.PostConnectHook, err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
q := url.Query()
|
|
||||||
q.Add("ip", ipAddr)
|
|
||||||
q.Add("protocol", protocol)
|
|
||||||
url.RawQuery = q.Encode()
|
|
||||||
|
|
||||||
resp, err := httpclient.RetryableGet(url.String())
|
|
||||||
if err != nil {
|
|
||||||
logger.Warn(protocol, "", "Login from ip %#v denied, error executing post connect hook: %v", ipAddr, err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
if resp.StatusCode != http.StatusOK {
|
|
||||||
logger.Warn(protocol, "", "Login from ip %#v denied, post connect hook response code: %v", ipAddr, resp.StatusCode)
|
|
||||||
return errUnexpectedHTTResponse
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if !filepath.IsAbs(c.PostConnectHook) {
|
|
||||||
err := fmt.Errorf("invalid post connect hook %#v", c.PostConnectHook)
|
|
||||||
logger.Warn(protocol, "", "Login from ip %#v denied: %v", ipAddr, err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
|
|
||||||
defer cancel()
|
|
||||||
cmd := exec.CommandContext(ctx, c.PostConnectHook)
|
|
||||||
cmd.Env = append(os.Environ(),
|
|
||||||
fmt.Sprintf("SFTPGO_CONNECTION_IP=%v", ipAddr),
|
|
||||||
fmt.Sprintf("SFTPGO_CONNECTION_PROTOCOL=%v", protocol))
|
|
||||||
err := cmd.Run()
|
|
||||||
if err != nil {
|
|
||||||
logger.Warn(protocol, "", "Login from ip %#v denied, connect hook error: %v", ipAddr, err)
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// SSHConnection defines an ssh connection.
|
|
||||||
// Each SSH connection can open several channels for SFTP or SSH commands
|
|
||||||
type SSHConnection struct {
|
|
||||||
id string
|
|
||||||
conn net.Conn
|
|
||||||
lastActivity int64
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewSSHConnection returns a new SSHConnection
|
|
||||||
func NewSSHConnection(id string, conn net.Conn) *SSHConnection {
|
|
||||||
return &SSHConnection{
|
|
||||||
id: id,
|
|
||||||
conn: conn,
|
|
||||||
lastActivity: time.Now().UnixNano(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetID returns the ID for this SSHConnection
|
|
||||||
func (c *SSHConnection) GetID() string {
|
|
||||||
return c.id
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateLastActivity updates last activity for this connection
|
|
||||||
func (c *SSHConnection) UpdateLastActivity() {
|
|
||||||
atomic.StoreInt64(&c.lastActivity, time.Now().UnixNano())
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetLastActivity returns the last connection activity
|
|
||||||
func (c *SSHConnection) GetLastActivity() time.Time {
|
|
||||||
return time.Unix(0, atomic.LoadInt64(&c.lastActivity))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close closes the underlying network connection
|
|
||||||
func (c *SSHConnection) Close() error {
|
|
||||||
return c.conn.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
// ActiveConnections holds the currect active connections with the associated transfers
|
|
||||||
type ActiveConnections struct {
|
|
||||||
// clients contains both authenticated and estabilished connections and the ones waiting
|
|
||||||
// for authentication
|
|
||||||
clients clientsMap
|
|
||||||
sync.RWMutex
|
|
||||||
connections []ActiveConnection
|
|
||||||
sshConnections []*SSHConnection
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetActiveSessions returns the number of active sessions for the given username.
|
|
||||||
// We return the open sessions for any protocol
|
|
||||||
func (conns *ActiveConnections) GetActiveSessions(username string) int {
|
|
||||||
conns.RLock()
|
|
||||||
defer conns.RUnlock()
|
|
||||||
|
|
||||||
numSessions := 0
|
|
||||||
for _, c := range conns.connections {
|
|
||||||
if c.GetUsername() == username {
|
|
||||||
numSessions++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return numSessions
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add adds a new connection to the active ones
|
|
||||||
func (conns *ActiveConnections) Add(c ActiveConnection) {
|
|
||||||
conns.Lock()
|
|
||||||
defer conns.Unlock()
|
|
||||||
|
|
||||||
conns.connections = append(conns.connections, c)
|
|
||||||
metrics.UpdateActiveConnectionsSize(len(conns.connections))
|
|
||||||
logger.Debug(c.GetProtocol(), c.GetID(), "connection added, num open connections: %v", len(conns.connections))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Swap replaces an existing connection with the given one.
|
|
||||||
// This method is useful if you have to change some connection details
|
|
||||||
// for example for FTP is used to update the connection once the user
|
|
||||||
// authenticates
|
|
||||||
func (conns *ActiveConnections) Swap(c ActiveConnection) error {
|
|
||||||
conns.Lock()
|
|
||||||
defer conns.Unlock()
|
|
||||||
|
|
||||||
for idx, conn := range conns.connections {
|
|
||||||
if conn.GetID() == c.GetID() {
|
|
||||||
conn = nil
|
|
||||||
conns.connections[idx] = c
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return errors.New("connection to swap not found")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove removes a connection from the active ones
|
|
||||||
func (conns *ActiveConnections) Remove(connectionID string) {
|
|
||||||
conns.Lock()
|
|
||||||
defer conns.Unlock()
|
|
||||||
|
|
||||||
for idx, conn := range conns.connections {
|
|
||||||
if conn.GetID() == connectionID {
|
|
||||||
err := conn.CloseFS()
|
|
||||||
lastIdx := len(conns.connections) - 1
|
|
||||||
conns.connections[idx] = conns.connections[lastIdx]
|
|
||||||
conns.connections[lastIdx] = nil
|
|
||||||
conns.connections = conns.connections[:lastIdx]
|
|
||||||
metrics.UpdateActiveConnectionsSize(lastIdx)
|
|
||||||
logger.Debug(conn.GetProtocol(), conn.GetID(), "connection removed, close fs error: %v, num open connections: %v",
|
|
||||||
err, lastIdx)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
logger.Warn(logSender, "", "connection id %#v to remove not found!", connectionID)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close closes an active connection.
|
|
||||||
// It returns true on success
|
|
||||||
func (conns *ActiveConnections) Close(connectionID string) bool {
|
|
||||||
conns.RLock()
|
|
||||||
result := false
|
|
||||||
|
|
||||||
for _, c := range conns.connections {
|
|
||||||
if c.GetID() == connectionID {
|
|
||||||
defer func(conn ActiveConnection) {
|
|
||||||
err := conn.Disconnect()
|
|
||||||
logger.Debug(conn.GetProtocol(), conn.GetID(), "close connection requested, close err: %v", err)
|
|
||||||
}(c)
|
|
||||||
result = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
conns.RUnlock()
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddSSHConnection adds a new ssh connection to the active ones
|
|
||||||
func (conns *ActiveConnections) AddSSHConnection(c *SSHConnection) {
|
|
||||||
conns.Lock()
|
|
||||||
defer conns.Unlock()
|
|
||||||
|
|
||||||
conns.sshConnections = append(conns.sshConnections, c)
|
|
||||||
logger.Debug(logSender, c.GetID(), "ssh connection added, num open connections: %v", len(conns.sshConnections))
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemoveSSHConnection removes a connection from the active ones
|
|
||||||
func (conns *ActiveConnections) RemoveSSHConnection(connectionID string) {
|
|
||||||
conns.Lock()
|
|
||||||
defer conns.Unlock()
|
|
||||||
|
|
||||||
for idx, conn := range conns.sshConnections {
|
|
||||||
if conn.GetID() == connectionID {
|
|
||||||
lastIdx := len(conns.sshConnections) - 1
|
|
||||||
conns.sshConnections[idx] = conns.sshConnections[lastIdx]
|
|
||||||
conns.sshConnections[lastIdx] = nil
|
|
||||||
conns.sshConnections = conns.sshConnections[:lastIdx]
|
|
||||||
logger.Debug(logSender, conn.GetID(), "ssh connection removed, num open ssh connections: %v", lastIdx)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
logger.Warn(logSender, "", "ssh connection to remove with id %#v not found!", connectionID)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (conns *ActiveConnections) checkIdles() {
|
|
||||||
conns.RLock()
|
|
||||||
|
|
||||||
for _, sshConn := range conns.sshConnections {
|
|
||||||
idleTime := time.Since(sshConn.GetLastActivity())
|
|
||||||
if idleTime > Config.idleTimeoutAsDuration {
|
|
||||||
// we close the an ssh connection if it has no active connections associated
|
|
||||||
idToMatch := fmt.Sprintf("_%v_", sshConn.GetID())
|
|
||||||
toClose := true
|
|
||||||
for _, conn := range conns.connections {
|
|
||||||
if strings.Contains(conn.GetID(), idToMatch) {
|
|
||||||
toClose = false
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if toClose {
|
|
||||||
defer func(c *SSHConnection) {
|
|
||||||
err := c.Close()
|
|
||||||
logger.Debug(logSender, c.GetID(), "close idle SSH connection, idle time: %v, close err: %v",
|
|
||||||
time.Since(c.GetLastActivity()), err)
|
|
||||||
}(sshConn)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, c := range conns.connections {
|
|
||||||
idleTime := time.Since(c.GetLastActivity())
|
|
||||||
isUnauthenticatedFTPUser := (c.GetProtocol() == ProtocolFTP && c.GetUsername() == "")
|
|
||||||
|
|
||||||
if idleTime > Config.idleTimeoutAsDuration || (isUnauthenticatedFTPUser && idleTime > Config.idleLoginTimeout) {
|
|
||||||
defer func(conn ActiveConnection, isFTPNoAuth bool) {
|
|
||||||
err := conn.Disconnect()
|
|
||||||
logger.Debug(conn.GetProtocol(), conn.GetID(), "close idle connection, idle time: %v, username: %#v close err: %v",
|
|
||||||
time.Since(conn.GetLastActivity()), conn.GetUsername(), err)
|
|
||||||
if isFTPNoAuth {
|
|
||||||
ip := utils.GetIPFromRemoteAddress(c.GetRemoteAddress())
|
|
||||||
logger.ConnectionFailedLog("", ip, dataprovider.LoginMethodNoAuthTryed, c.GetProtocol(), "client idle")
|
|
||||||
metrics.AddNoAuthTryed()
|
|
||||||
AddDefenderEvent(ip, HostEventNoLoginTried)
|
|
||||||
dataprovider.ExecutePostLoginHook(&dataprovider.User{}, dataprovider.LoginMethodNoAuthTryed, ip, c.GetProtocol(),
|
|
||||||
dataprovider.ErrNoAuthTryed)
|
|
||||||
}
|
|
||||||
}(c, isUnauthenticatedFTPUser)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
conns.RUnlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddClientConnection stores a new client connection
|
|
||||||
func (conns *ActiveConnections) AddClientConnection(ipAddr string) {
|
|
||||||
conns.clients.add(ipAddr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemoveClientConnection removes a disconnected client from the tracked ones
|
|
||||||
func (conns *ActiveConnections) RemoveClientConnection(ipAddr string) {
|
|
||||||
conns.clients.remove(ipAddr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetClientConnections returns the total number of client connections
|
|
||||||
func (conns *ActiveConnections) GetClientConnections() int32 {
|
|
||||||
return conns.clients.getTotal()
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsNewConnectionAllowed returns false if the maximum number of concurrent allowed connections is exceeded
|
|
||||||
func (conns *ActiveConnections) IsNewConnectionAllowed(ipAddr string) bool {
|
|
||||||
if Config.MaxTotalConnections == 0 && Config.MaxPerHostConnections == 0 {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
if Config.MaxPerHostConnections > 0 {
|
|
||||||
if total := conns.clients.getTotalFrom(ipAddr); total > Config.MaxPerHostConnections {
|
|
||||||
logger.Debug(logSender, "", "active connections from %v %v/%v", ipAddr, total, Config.MaxPerHostConnections)
|
|
||||||
AddDefenderEvent(ipAddr, HostEventLimitExceeded)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if Config.MaxTotalConnections > 0 {
|
|
||||||
if total := conns.clients.getTotal(); total > int32(Config.MaxTotalConnections) {
|
|
||||||
logger.Debug(logSender, "", "active client connections %v/%v", total, Config.MaxTotalConnections)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// on a single SFTP connection we could have multiple SFTP channels or commands
|
|
||||||
// so we check the estabilished connections too
|
|
||||||
|
|
||||||
conns.RLock()
|
|
||||||
defer conns.RUnlock()
|
|
||||||
|
|
||||||
return len(conns.connections) < Config.MaxTotalConnections
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetStats returns stats for active connections
|
|
||||||
func (conns *ActiveConnections) GetStats() []*ConnectionStatus {
|
|
||||||
conns.RLock()
|
|
||||||
defer conns.RUnlock()
|
|
||||||
|
|
||||||
stats := make([]*ConnectionStatus, 0, len(conns.connections))
|
|
||||||
for _, c := range conns.connections {
|
|
||||||
stat := &ConnectionStatus{
|
|
||||||
Username: c.GetUsername(),
|
|
||||||
ConnectionID: c.GetID(),
|
|
||||||
ClientVersion: c.GetClientVersion(),
|
|
||||||
RemoteAddress: c.GetRemoteAddress(),
|
|
||||||
ConnectionTime: utils.GetTimeAsMsSinceEpoch(c.GetConnectionTime()),
|
|
||||||
LastActivity: utils.GetTimeAsMsSinceEpoch(c.GetLastActivity()),
|
|
||||||
Protocol: c.GetProtocol(),
|
|
||||||
Command: c.GetCommand(),
|
|
||||||
Transfers: c.GetTransfers(),
|
|
||||||
}
|
|
||||||
stats = append(stats, stat)
|
|
||||||
}
|
|
||||||
return stats
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConnectionStatus returns the status for an active connection
|
|
||||||
type ConnectionStatus struct {
|
|
||||||
// Logged in username
|
|
||||||
Username string `json:"username"`
|
|
||||||
// Unique identifier for the connection
|
|
||||||
ConnectionID string `json:"connection_id"`
|
|
||||||
// client's version string
|
|
||||||
ClientVersion string `json:"client_version,omitempty"`
|
|
||||||
// Remote address for this connection
|
|
||||||
RemoteAddress string `json:"remote_address"`
|
|
||||||
// Connection time as unix timestamp in milliseconds
|
|
||||||
ConnectionTime int64 `json:"connection_time"`
|
|
||||||
// Last activity as unix timestamp in milliseconds
|
|
||||||
LastActivity int64 `json:"last_activity"`
|
|
||||||
// Protocol for this connection
|
|
||||||
Protocol string `json:"protocol"`
|
|
||||||
// active uploads/downloads
|
|
||||||
Transfers []ConnectionTransfer `json:"active_transfers,omitempty"`
|
|
||||||
// SSH command or WebDAV method
|
|
||||||
Command string `json:"command,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetConnectionDuration returns the connection duration as string
|
|
||||||
func (c *ConnectionStatus) GetConnectionDuration() string {
|
|
||||||
elapsed := time.Since(utils.GetTimeFromMsecSinceEpoch(c.ConnectionTime))
|
|
||||||
return utils.GetDurationAsString(elapsed)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetConnectionInfo returns connection info.
|
|
||||||
// Protocol,Client Version and RemoteAddress are returned.
|
|
||||||
func (c *ConnectionStatus) GetConnectionInfo() string {
|
|
||||||
var result strings.Builder
|
|
||||||
|
|
||||||
result.WriteString(fmt.Sprintf("%v. Client: %#v From: %#v", c.Protocol, c.ClientVersion, c.RemoteAddress))
|
|
||||||
|
|
||||||
if c.Command == "" {
|
|
||||||
return result.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
switch c.Protocol {
|
|
||||||
case ProtocolSSH, ProtocolFTP:
|
|
||||||
result.WriteString(fmt.Sprintf(". Command: %#v", c.Command))
|
|
||||||
case ProtocolWebDAV:
|
|
||||||
result.WriteString(fmt.Sprintf(". Method: %#v", c.Command))
|
|
||||||
}
|
|
||||||
|
|
||||||
return result.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetTransfersAsString returns the active transfers as string
|
|
||||||
func (c *ConnectionStatus) GetTransfersAsString() string {
|
|
||||||
result := ""
|
|
||||||
for _, t := range c.Transfers {
|
|
||||||
if result != "" {
|
|
||||||
result += ". "
|
|
||||||
}
|
|
||||||
result += t.getConnectionTransferAsString()
|
|
||||||
}
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
// ActiveQuotaScan defines an active quota scan for a user home dir
|
|
||||||
type ActiveQuotaScan struct {
|
|
||||||
// Username to which the quota scan refers
|
|
||||||
Username string `json:"username"`
|
|
||||||
// quota scan start time as unix timestamp in milliseconds
|
|
||||||
StartTime int64 `json:"start_time"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ActiveVirtualFolderQuotaScan defines an active quota scan for a virtual folder
|
|
||||||
type ActiveVirtualFolderQuotaScan struct {
|
|
||||||
// folder name to which the quota scan refers
|
|
||||||
Name string `json:"name"`
|
|
||||||
// quota scan start time as unix timestamp in milliseconds
|
|
||||||
StartTime int64 `json:"start_time"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ActiveScans holds the active quota scans
|
|
||||||
type ActiveScans struct {
|
|
||||||
sync.RWMutex
|
|
||||||
UserHomeScans []ActiveQuotaScan
|
|
||||||
FolderScans []ActiveVirtualFolderQuotaScan
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetUsersQuotaScans returns the active quota scans for users home directories
|
|
||||||
func (s *ActiveScans) GetUsersQuotaScans() []ActiveQuotaScan {
|
|
||||||
s.RLock()
|
|
||||||
defer s.RUnlock()
|
|
||||||
|
|
||||||
scans := make([]ActiveQuotaScan, len(s.UserHomeScans))
|
|
||||||
copy(scans, s.UserHomeScans)
|
|
||||||
return scans
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddUserQuotaScan adds a user to the ones with active quota scans.
|
|
||||||
// Returns false if the user has a quota scan already running
|
|
||||||
func (s *ActiveScans) AddUserQuotaScan(username string) bool {
|
|
||||||
s.Lock()
|
|
||||||
defer s.Unlock()
|
|
||||||
|
|
||||||
for _, scan := range s.UserHomeScans {
|
|
||||||
if scan.Username == username {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
s.UserHomeScans = append(s.UserHomeScans, ActiveQuotaScan{
|
|
||||||
Username: username,
|
|
||||||
StartTime: utils.GetTimeAsMsSinceEpoch(time.Now()),
|
|
||||||
})
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemoveUserQuotaScan removes a user from the ones with active quota scans.
|
|
||||||
// Returns false if the user has no active quota scans
|
|
||||||
func (s *ActiveScans) RemoveUserQuotaScan(username string) bool {
|
|
||||||
s.Lock()
|
|
||||||
defer s.Unlock()
|
|
||||||
|
|
||||||
indexToRemove := -1
|
|
||||||
for i, scan := range s.UserHomeScans {
|
|
||||||
if scan.Username == username {
|
|
||||||
indexToRemove = i
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if indexToRemove >= 0 {
|
|
||||||
s.UserHomeScans[indexToRemove] = s.UserHomeScans[len(s.UserHomeScans)-1]
|
|
||||||
s.UserHomeScans = s.UserHomeScans[:len(s.UserHomeScans)-1]
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetVFoldersQuotaScans returns the active quota scans for virtual folders
|
|
||||||
func (s *ActiveScans) GetVFoldersQuotaScans() []ActiveVirtualFolderQuotaScan {
|
|
||||||
s.RLock()
|
|
||||||
defer s.RUnlock()
|
|
||||||
scans := make([]ActiveVirtualFolderQuotaScan, len(s.FolderScans))
|
|
||||||
copy(scans, s.FolderScans)
|
|
||||||
return scans
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddVFolderQuotaScan adds a virtual folder to the ones with active quota scans.
|
|
||||||
// Returns false if the folder has a quota scan already running
|
|
||||||
func (s *ActiveScans) AddVFolderQuotaScan(folderName string) bool {
|
|
||||||
s.Lock()
|
|
||||||
defer s.Unlock()
|
|
||||||
|
|
||||||
for _, scan := range s.FolderScans {
|
|
||||||
if scan.Name == folderName {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
s.FolderScans = append(s.FolderScans, ActiveVirtualFolderQuotaScan{
|
|
||||||
Name: folderName,
|
|
||||||
StartTime: utils.GetTimeAsMsSinceEpoch(time.Now()),
|
|
||||||
})
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemoveVFolderQuotaScan removes a folder from the ones with active quota scans.
|
|
||||||
// Returns false if the folder has no active quota scans
|
|
||||||
func (s *ActiveScans) RemoveVFolderQuotaScan(folderName string) bool {
|
|
||||||
s.Lock()
|
|
||||||
defer s.Unlock()
|
|
||||||
|
|
||||||
indexToRemove := -1
|
|
||||||
for i, scan := range s.FolderScans {
|
|
||||||
if scan.Name == folderName {
|
|
||||||
indexToRemove = i
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if indexToRemove >= 0 {
|
|
||||||
s.FolderScans[indexToRemove] = s.FolderScans[len(s.FolderScans)-1]
|
|
||||||
s.FolderScans = s.FolderScans[:len(s.FolderScans)-1]
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
|
@ -1,771 +0,0 @@
|
||||||
package common
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"net"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
"path/filepath"
|
|
||||||
"runtime"
|
|
||||||
"strings"
|
|
||||||
"sync/atomic"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/alexedwards/argon2id"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
"golang.org/x/crypto/bcrypt"
|
|
||||||
|
|
||||||
"github.com/drakkan/sftpgo/dataprovider"
|
|
||||||
"github.com/drakkan/sftpgo/kms"
|
|
||||||
"github.com/drakkan/sftpgo/utils"
|
|
||||||
"github.com/drakkan/sftpgo/vfs"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
logSenderTest = "common_test"
|
|
||||||
httpAddr = "127.0.0.1:9999"
|
|
||||||
configDir = ".."
|
|
||||||
osWindows = "windows"
|
|
||||||
userTestUsername = "common_test_username"
|
|
||||||
)
|
|
||||||
|
|
||||||
type fakeConnection struct {
|
|
||||||
*BaseConnection
|
|
||||||
command string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *fakeConnection) AddUser(user dataprovider.User) error {
|
|
||||||
_, err := user.GetFilesystem(c.GetID())
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
c.BaseConnection.User = user
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *fakeConnection) Disconnect() error {
|
|
||||||
Connections.Remove(c.GetID())
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *fakeConnection) GetClientVersion() string {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *fakeConnection) GetCommand() string {
|
|
||||||
return c.command
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *fakeConnection) GetRemoteAddress() string {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
type customNetConn struct {
|
|
||||||
net.Conn
|
|
||||||
id string
|
|
||||||
isClosed bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *customNetConn) Close() error {
|
|
||||||
Connections.RemoveSSHConnection(c.id)
|
|
||||||
c.isClosed = true
|
|
||||||
return c.Conn.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSSHConnections(t *testing.T) {
|
|
||||||
conn1, conn2 := net.Pipe()
|
|
||||||
now := time.Now()
|
|
||||||
sshConn1 := NewSSHConnection("id1", conn1)
|
|
||||||
sshConn2 := NewSSHConnection("id2", conn2)
|
|
||||||
sshConn3 := NewSSHConnection("id3", conn2)
|
|
||||||
assert.Equal(t, "id1", sshConn1.GetID())
|
|
||||||
assert.Equal(t, "id2", sshConn2.GetID())
|
|
||||||
assert.Equal(t, "id3", sshConn3.GetID())
|
|
||||||
sshConn1.UpdateLastActivity()
|
|
||||||
assert.GreaterOrEqual(t, sshConn1.GetLastActivity().UnixNano(), now.UnixNano())
|
|
||||||
Connections.AddSSHConnection(sshConn1)
|
|
||||||
Connections.AddSSHConnection(sshConn2)
|
|
||||||
Connections.AddSSHConnection(sshConn3)
|
|
||||||
Connections.RLock()
|
|
||||||
assert.Len(t, Connections.sshConnections, 3)
|
|
||||||
Connections.RUnlock()
|
|
||||||
Connections.RemoveSSHConnection(sshConn1.id)
|
|
||||||
Connections.RLock()
|
|
||||||
assert.Len(t, Connections.sshConnections, 2)
|
|
||||||
assert.Equal(t, sshConn3.id, Connections.sshConnections[0].id)
|
|
||||||
assert.Equal(t, sshConn2.id, Connections.sshConnections[1].id)
|
|
||||||
Connections.RUnlock()
|
|
||||||
Connections.RemoveSSHConnection(sshConn1.id)
|
|
||||||
Connections.RLock()
|
|
||||||
assert.Len(t, Connections.sshConnections, 2)
|
|
||||||
assert.Equal(t, sshConn3.id, Connections.sshConnections[0].id)
|
|
||||||
assert.Equal(t, sshConn2.id, Connections.sshConnections[1].id)
|
|
||||||
Connections.RUnlock()
|
|
||||||
Connections.RemoveSSHConnection(sshConn2.id)
|
|
||||||
Connections.RLock()
|
|
||||||
assert.Len(t, Connections.sshConnections, 1)
|
|
||||||
assert.Equal(t, sshConn3.id, Connections.sshConnections[0].id)
|
|
||||||
Connections.RUnlock()
|
|
||||||
Connections.RemoveSSHConnection(sshConn3.id)
|
|
||||||
Connections.RLock()
|
|
||||||
assert.Len(t, Connections.sshConnections, 0)
|
|
||||||
Connections.RUnlock()
|
|
||||||
assert.NoError(t, sshConn1.Close())
|
|
||||||
assert.NoError(t, sshConn2.Close())
|
|
||||||
assert.NoError(t, sshConn3.Close())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDefenderIntegration(t *testing.T) {
|
|
||||||
// by default defender is nil
|
|
||||||
configCopy := Config
|
|
||||||
|
|
||||||
ip := "127.1.1.1"
|
|
||||||
|
|
||||||
assert.Nil(t, ReloadDefender())
|
|
||||||
|
|
||||||
AddDefenderEvent(ip, HostEventNoLoginTried)
|
|
||||||
assert.False(t, IsBanned(ip))
|
|
||||||
|
|
||||||
assert.Nil(t, GetDefenderBanTime(ip))
|
|
||||||
assert.False(t, DeleteDefenderHost(ip))
|
|
||||||
assert.Equal(t, 0, GetDefenderScore(ip))
|
|
||||||
_, err := GetDefenderHost(ip)
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.Nil(t, GetDefenderHosts())
|
|
||||||
|
|
||||||
Config.DefenderConfig = DefenderConfig{
|
|
||||||
Enabled: true,
|
|
||||||
BanTime: 10,
|
|
||||||
BanTimeIncrement: 50,
|
|
||||||
Threshold: 0,
|
|
||||||
ScoreInvalid: 2,
|
|
||||||
ScoreValid: 1,
|
|
||||||
ObservationTime: 15,
|
|
||||||
EntriesSoftLimit: 100,
|
|
||||||
EntriesHardLimit: 150,
|
|
||||||
}
|
|
||||||
err = Initialize(Config)
|
|
||||||
assert.Error(t, err)
|
|
||||||
Config.DefenderConfig.Threshold = 3
|
|
||||||
err = Initialize(Config)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Nil(t, ReloadDefender())
|
|
||||||
|
|
||||||
AddDefenderEvent(ip, HostEventNoLoginTried)
|
|
||||||
assert.False(t, IsBanned(ip))
|
|
||||||
assert.Equal(t, 2, GetDefenderScore(ip))
|
|
||||||
entry, err := GetDefenderHost(ip)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
asJSON, err := json.Marshal(&entry)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, `{"id":"3132372e312e312e31","ip":"127.1.1.1","score":2}`, string(asJSON), "entry %v", entry)
|
|
||||||
assert.True(t, DeleteDefenderHost(ip))
|
|
||||||
assert.Nil(t, GetDefenderBanTime(ip))
|
|
||||||
|
|
||||||
AddDefenderEvent(ip, HostEventLoginFailed)
|
|
||||||
AddDefenderEvent(ip, HostEventNoLoginTried)
|
|
||||||
assert.True(t, IsBanned(ip))
|
|
||||||
assert.Equal(t, 0, GetDefenderScore(ip))
|
|
||||||
assert.NotNil(t, GetDefenderBanTime(ip))
|
|
||||||
assert.Len(t, GetDefenderHosts(), 1)
|
|
||||||
entry, err = GetDefenderHost(ip)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.False(t, entry.BanTime.IsZero())
|
|
||||||
assert.True(t, DeleteDefenderHost(ip))
|
|
||||||
assert.Len(t, GetDefenderHosts(), 0)
|
|
||||||
assert.Nil(t, GetDefenderBanTime(ip))
|
|
||||||
assert.False(t, DeleteDefenderHost(ip))
|
|
||||||
|
|
||||||
Config = configCopy
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRateLimitersIntegration(t *testing.T) {
|
|
||||||
// by default defender is nil
|
|
||||||
configCopy := Config
|
|
||||||
|
|
||||||
Config.RateLimitersConfig = []RateLimiterConfig{
|
|
||||||
{
|
|
||||||
Average: 100,
|
|
||||||
Period: 10,
|
|
||||||
Burst: 5,
|
|
||||||
Type: int(rateLimiterTypeGlobal),
|
|
||||||
Protocols: rateLimiterProtocolValues,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Average: 1,
|
|
||||||
Period: 1000,
|
|
||||||
Burst: 1,
|
|
||||||
Type: int(rateLimiterTypeSource),
|
|
||||||
Protocols: []string{ProtocolWebDAV, ProtocolWebDAV, ProtocolFTP},
|
|
||||||
GenerateDefenderEvents: true,
|
|
||||||
EntriesSoftLimit: 100,
|
|
||||||
EntriesHardLimit: 150,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
err := Initialize(Config)
|
|
||||||
assert.Error(t, err)
|
|
||||||
Config.RateLimitersConfig[0].Period = 1000
|
|
||||||
err = Initialize(Config)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
assert.Len(t, rateLimiters, 4)
|
|
||||||
assert.Len(t, rateLimiters[ProtocolSSH], 1)
|
|
||||||
assert.Len(t, rateLimiters[ProtocolFTP], 2)
|
|
||||||
assert.Len(t, rateLimiters[ProtocolWebDAV], 2)
|
|
||||||
assert.Len(t, rateLimiters[ProtocolHTTP], 1)
|
|
||||||
|
|
||||||
source1 := "127.1.1.1"
|
|
||||||
source2 := "127.1.1.2"
|
|
||||||
|
|
||||||
_, err = LimitRate(ProtocolSSH, source1)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
_, err = LimitRate(ProtocolFTP, source1)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
// sleep to allow the add configured burst to the token.
|
|
||||||
// This sleep is not enough to add the per-source burst
|
|
||||||
time.Sleep(20 * time.Millisecond)
|
|
||||||
_, err = LimitRate(ProtocolWebDAV, source2)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
_, err = LimitRate(ProtocolFTP, source1)
|
|
||||||
assert.Error(t, err)
|
|
||||||
_, err = LimitRate(ProtocolWebDAV, source2)
|
|
||||||
assert.Error(t, err)
|
|
||||||
_, err = LimitRate(ProtocolSSH, source1)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
_, err = LimitRate(ProtocolSSH, source2)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
Config = configCopy
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMaxConnections(t *testing.T) {
|
|
||||||
oldValue := Config.MaxTotalConnections
|
|
||||||
perHost := Config.MaxPerHostConnections
|
|
||||||
|
|
||||||
Config.MaxPerHostConnections = 0
|
|
||||||
|
|
||||||
ipAddr := "192.168.7.8"
|
|
||||||
assert.True(t, Connections.IsNewConnectionAllowed(ipAddr))
|
|
||||||
|
|
||||||
Config.MaxTotalConnections = 1
|
|
||||||
Config.MaxPerHostConnections = perHost
|
|
||||||
|
|
||||||
assert.True(t, Connections.IsNewConnectionAllowed(ipAddr))
|
|
||||||
c := NewBaseConnection("id", ProtocolSFTP, "", dataprovider.User{})
|
|
||||||
fakeConn := &fakeConnection{
|
|
||||||
BaseConnection: c,
|
|
||||||
}
|
|
||||||
Connections.Add(fakeConn)
|
|
||||||
assert.Len(t, Connections.GetStats(), 1)
|
|
||||||
assert.False(t, Connections.IsNewConnectionAllowed(ipAddr))
|
|
||||||
|
|
||||||
res := Connections.Close(fakeConn.GetID())
|
|
||||||
assert.True(t, res)
|
|
||||||
assert.Eventually(t, func() bool { return len(Connections.GetStats()) == 0 }, 300*time.Millisecond, 50*time.Millisecond)
|
|
||||||
|
|
||||||
assert.True(t, Connections.IsNewConnectionAllowed(ipAddr))
|
|
||||||
Connections.AddClientConnection(ipAddr)
|
|
||||||
Connections.AddClientConnection(ipAddr)
|
|
||||||
assert.False(t, Connections.IsNewConnectionAllowed(ipAddr))
|
|
||||||
Connections.RemoveClientConnection(ipAddr)
|
|
||||||
assert.True(t, Connections.IsNewConnectionAllowed(ipAddr))
|
|
||||||
Connections.RemoveClientConnection(ipAddr)
|
|
||||||
|
|
||||||
Config.MaxTotalConnections = oldValue
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMaxConnectionPerHost(t *testing.T) {
|
|
||||||
oldValue := Config.MaxPerHostConnections
|
|
||||||
|
|
||||||
Config.MaxPerHostConnections = 2
|
|
||||||
|
|
||||||
ipAddr := "192.168.9.9"
|
|
||||||
Connections.AddClientConnection(ipAddr)
|
|
||||||
assert.True(t, Connections.IsNewConnectionAllowed(ipAddr))
|
|
||||||
|
|
||||||
Connections.AddClientConnection(ipAddr)
|
|
||||||
assert.True(t, Connections.IsNewConnectionAllowed(ipAddr))
|
|
||||||
|
|
||||||
Connections.AddClientConnection(ipAddr)
|
|
||||||
assert.False(t, Connections.IsNewConnectionAllowed(ipAddr))
|
|
||||||
assert.Equal(t, int32(3), Connections.GetClientConnections())
|
|
||||||
|
|
||||||
Connections.RemoveClientConnection(ipAddr)
|
|
||||||
Connections.RemoveClientConnection(ipAddr)
|
|
||||||
Connections.RemoveClientConnection(ipAddr)
|
|
||||||
|
|
||||||
assert.Equal(t, int32(0), Connections.GetClientConnections())
|
|
||||||
|
|
||||||
Config.MaxPerHostConnections = oldValue
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestIdleConnections(t *testing.T) {
|
|
||||||
configCopy := Config
|
|
||||||
|
|
||||||
Config.IdleTimeout = 1
|
|
||||||
err := Initialize(Config)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
conn1, conn2 := net.Pipe()
|
|
||||||
customConn1 := &customNetConn{
|
|
||||||
Conn: conn1,
|
|
||||||
id: "id1",
|
|
||||||
}
|
|
||||||
customConn2 := &customNetConn{
|
|
||||||
Conn: conn2,
|
|
||||||
id: "id2",
|
|
||||||
}
|
|
||||||
sshConn1 := NewSSHConnection(customConn1.id, customConn1)
|
|
||||||
sshConn2 := NewSSHConnection(customConn2.id, customConn2)
|
|
||||||
|
|
||||||
username := "test_user"
|
|
||||||
user := dataprovider.User{
|
|
||||||
Username: username,
|
|
||||||
}
|
|
||||||
c := NewBaseConnection(sshConn1.id+"_1", ProtocolSFTP, "", user)
|
|
||||||
c.lastActivity = time.Now().Add(-24 * time.Hour).UnixNano()
|
|
||||||
fakeConn := &fakeConnection{
|
|
||||||
BaseConnection: c,
|
|
||||||
}
|
|
||||||
// both ssh connections are expired but they should get removed only
|
|
||||||
// if there is no associated connection
|
|
||||||
sshConn1.lastActivity = c.lastActivity
|
|
||||||
sshConn2.lastActivity = c.lastActivity
|
|
||||||
Connections.AddSSHConnection(sshConn1)
|
|
||||||
Connections.Add(fakeConn)
|
|
||||||
assert.Equal(t, Connections.GetActiveSessions(username), 1)
|
|
||||||
c = NewBaseConnection(sshConn2.id+"_1", ProtocolSSH, "", user)
|
|
||||||
fakeConn = &fakeConnection{
|
|
||||||
BaseConnection: c,
|
|
||||||
}
|
|
||||||
Connections.AddSSHConnection(sshConn2)
|
|
||||||
Connections.Add(fakeConn)
|
|
||||||
assert.Equal(t, Connections.GetActiveSessions(username), 2)
|
|
||||||
|
|
||||||
cFTP := NewBaseConnection("id2", ProtocolFTP, "", dataprovider.User{})
|
|
||||||
cFTP.lastActivity = time.Now().UnixNano()
|
|
||||||
fakeConn = &fakeConnection{
|
|
||||||
BaseConnection: cFTP,
|
|
||||||
}
|
|
||||||
Connections.Add(fakeConn)
|
|
||||||
assert.Equal(t, Connections.GetActiveSessions(username), 2)
|
|
||||||
assert.Len(t, Connections.GetStats(), 3)
|
|
||||||
Connections.RLock()
|
|
||||||
assert.Len(t, Connections.sshConnections, 2)
|
|
||||||
Connections.RUnlock()
|
|
||||||
|
|
||||||
startIdleTimeoutTicker(100 * time.Millisecond)
|
|
||||||
assert.Eventually(t, func() bool { return Connections.GetActiveSessions(username) == 1 }, 1*time.Second, 200*time.Millisecond)
|
|
||||||
assert.Eventually(t, func() bool {
|
|
||||||
Connections.RLock()
|
|
||||||
defer Connections.RUnlock()
|
|
||||||
return len(Connections.sshConnections) == 1
|
|
||||||
}, 1*time.Second, 200*time.Millisecond)
|
|
||||||
stopIdleTimeoutTicker()
|
|
||||||
assert.Len(t, Connections.GetStats(), 2)
|
|
||||||
c.lastActivity = time.Now().Add(-24 * time.Hour).UnixNano()
|
|
||||||
cFTP.lastActivity = time.Now().Add(-24 * time.Hour).UnixNano()
|
|
||||||
sshConn2.lastActivity = c.lastActivity
|
|
||||||
startIdleTimeoutTicker(100 * time.Millisecond)
|
|
||||||
assert.Eventually(t, func() bool { return len(Connections.GetStats()) == 0 }, 1*time.Second, 200*time.Millisecond)
|
|
||||||
assert.Eventually(t, func() bool {
|
|
||||||
Connections.RLock()
|
|
||||||
defer Connections.RUnlock()
|
|
||||||
return len(Connections.sshConnections) == 0
|
|
||||||
}, 1*time.Second, 200*time.Millisecond)
|
|
||||||
assert.Equal(t, int32(0), Connections.GetClientConnections())
|
|
||||||
stopIdleTimeoutTicker()
|
|
||||||
assert.True(t, customConn1.isClosed)
|
|
||||||
assert.True(t, customConn2.isClosed)
|
|
||||||
|
|
||||||
Config = configCopy
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCloseConnection(t *testing.T) {
|
|
||||||
c := NewBaseConnection("id", ProtocolSFTP, "", dataprovider.User{})
|
|
||||||
fakeConn := &fakeConnection{
|
|
||||||
BaseConnection: c,
|
|
||||||
}
|
|
||||||
assert.True(t, Connections.IsNewConnectionAllowed("127.0.0.1"))
|
|
||||||
Connections.Add(fakeConn)
|
|
||||||
assert.Len(t, Connections.GetStats(), 1)
|
|
||||||
res := Connections.Close(fakeConn.GetID())
|
|
||||||
assert.True(t, res)
|
|
||||||
assert.Eventually(t, func() bool { return len(Connections.GetStats()) == 0 }, 300*time.Millisecond, 50*time.Millisecond)
|
|
||||||
res = Connections.Close(fakeConn.GetID())
|
|
||||||
assert.False(t, res)
|
|
||||||
Connections.Remove(fakeConn.GetID())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSwapConnection(t *testing.T) {
|
|
||||||
c := NewBaseConnection("id", ProtocolFTP, "", dataprovider.User{})
|
|
||||||
fakeConn := &fakeConnection{
|
|
||||||
BaseConnection: c,
|
|
||||||
}
|
|
||||||
Connections.Add(fakeConn)
|
|
||||||
if assert.Len(t, Connections.GetStats(), 1) {
|
|
||||||
assert.Equal(t, "", Connections.GetStats()[0].Username)
|
|
||||||
}
|
|
||||||
c = NewBaseConnection("id", ProtocolFTP, "", dataprovider.User{
|
|
||||||
Username: userTestUsername,
|
|
||||||
})
|
|
||||||
fakeConn = &fakeConnection{
|
|
||||||
BaseConnection: c,
|
|
||||||
}
|
|
||||||
err := Connections.Swap(fakeConn)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
if assert.Len(t, Connections.GetStats(), 1) {
|
|
||||||
assert.Equal(t, userTestUsername, Connections.GetStats()[0].Username)
|
|
||||||
}
|
|
||||||
res := Connections.Close(fakeConn.GetID())
|
|
||||||
assert.True(t, res)
|
|
||||||
assert.Eventually(t, func() bool { return len(Connections.GetStats()) == 0 }, 300*time.Millisecond, 50*time.Millisecond)
|
|
||||||
err = Connections.Swap(fakeConn)
|
|
||||||
assert.Error(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAtomicUpload(t *testing.T) {
|
|
||||||
configCopy := Config
|
|
||||||
|
|
||||||
Config.UploadMode = UploadModeStandard
|
|
||||||
assert.False(t, Config.IsAtomicUploadEnabled())
|
|
||||||
Config.UploadMode = UploadModeAtomic
|
|
||||||
assert.True(t, Config.IsAtomicUploadEnabled())
|
|
||||||
Config.UploadMode = UploadModeAtomicWithResume
|
|
||||||
assert.True(t, Config.IsAtomicUploadEnabled())
|
|
||||||
|
|
||||||
Config = configCopy
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestConnectionStatus(t *testing.T) {
|
|
||||||
username := "test_user"
|
|
||||||
user := dataprovider.User{
|
|
||||||
Username: username,
|
|
||||||
}
|
|
||||||
fs := vfs.NewOsFs("", os.TempDir(), "")
|
|
||||||
c1 := NewBaseConnection("id1", ProtocolSFTP, "", user)
|
|
||||||
fakeConn1 := &fakeConnection{
|
|
||||||
BaseConnection: c1,
|
|
||||||
}
|
|
||||||
t1 := NewBaseTransfer(nil, c1, nil, "/p1", "/p1", "/r1", TransferUpload, 0, 0, 0, true, fs)
|
|
||||||
t1.BytesReceived = 123
|
|
||||||
t2 := NewBaseTransfer(nil, c1, nil, "/p2", "/p2", "/r2", TransferDownload, 0, 0, 0, true, fs)
|
|
||||||
t2.BytesSent = 456
|
|
||||||
c2 := NewBaseConnection("id2", ProtocolSSH, "", user)
|
|
||||||
fakeConn2 := &fakeConnection{
|
|
||||||
BaseConnection: c2,
|
|
||||||
command: "md5sum",
|
|
||||||
}
|
|
||||||
c3 := NewBaseConnection("id3", ProtocolWebDAV, "", user)
|
|
||||||
fakeConn3 := &fakeConnection{
|
|
||||||
BaseConnection: c3,
|
|
||||||
command: "PROPFIND",
|
|
||||||
}
|
|
||||||
t3 := NewBaseTransfer(nil, c3, nil, "/p2", "/p2", "/r2", TransferDownload, 0, 0, 0, true, fs)
|
|
||||||
Connections.Add(fakeConn1)
|
|
||||||
Connections.Add(fakeConn2)
|
|
||||||
Connections.Add(fakeConn3)
|
|
||||||
|
|
||||||
stats := Connections.GetStats()
|
|
||||||
assert.Len(t, stats, 3)
|
|
||||||
for _, stat := range stats {
|
|
||||||
assert.Equal(t, stat.Username, username)
|
|
||||||
assert.True(t, strings.HasPrefix(stat.GetConnectionInfo(), stat.Protocol))
|
|
||||||
assert.True(t, strings.HasPrefix(stat.GetConnectionDuration(), "00:"))
|
|
||||||
if stat.ConnectionID == "SFTP_id1" {
|
|
||||||
assert.Len(t, stat.Transfers, 2)
|
|
||||||
assert.Greater(t, len(stat.GetTransfersAsString()), 0)
|
|
||||||
for _, tr := range stat.Transfers {
|
|
||||||
if tr.OperationType == operationDownload {
|
|
||||||
assert.True(t, strings.HasPrefix(tr.getConnectionTransferAsString(), "DL"))
|
|
||||||
} else if tr.OperationType == operationUpload {
|
|
||||||
assert.True(t, strings.HasPrefix(tr.getConnectionTransferAsString(), "UL"))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else if stat.ConnectionID == "DAV_id3" {
|
|
||||||
assert.Len(t, stat.Transfers, 1)
|
|
||||||
assert.Greater(t, len(stat.GetTransfersAsString()), 0)
|
|
||||||
} else {
|
|
||||||
assert.Equal(t, 0, len(stat.GetTransfersAsString()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
err := t1.Close()
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = t2.Close()
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
err = fakeConn3.SignalTransfersAbort()
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, int32(1), atomic.LoadInt32(&t3.AbortTransfer))
|
|
||||||
err = t3.Close()
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = fakeConn3.SignalTransfersAbort()
|
|
||||||
assert.Error(t, err)
|
|
||||||
|
|
||||||
Connections.Remove(fakeConn1.GetID())
|
|
||||||
stats = Connections.GetStats()
|
|
||||||
assert.Len(t, stats, 2)
|
|
||||||
assert.Equal(t, fakeConn3.GetID(), stats[0].ConnectionID)
|
|
||||||
assert.Equal(t, fakeConn2.GetID(), stats[1].ConnectionID)
|
|
||||||
Connections.Remove(fakeConn2.GetID())
|
|
||||||
stats = Connections.GetStats()
|
|
||||||
assert.Len(t, stats, 1)
|
|
||||||
assert.Equal(t, fakeConn3.GetID(), stats[0].ConnectionID)
|
|
||||||
Connections.Remove(fakeConn3.GetID())
|
|
||||||
stats = Connections.GetStats()
|
|
||||||
assert.Len(t, stats, 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestQuotaScans(t *testing.T) {
|
|
||||||
username := "username"
|
|
||||||
assert.True(t, QuotaScans.AddUserQuotaScan(username))
|
|
||||||
assert.False(t, QuotaScans.AddUserQuotaScan(username))
|
|
||||||
if assert.Len(t, QuotaScans.GetUsersQuotaScans(), 1) {
|
|
||||||
assert.Equal(t, QuotaScans.GetUsersQuotaScans()[0].Username, username)
|
|
||||||
}
|
|
||||||
|
|
||||||
assert.True(t, QuotaScans.RemoveUserQuotaScan(username))
|
|
||||||
assert.False(t, QuotaScans.RemoveUserQuotaScan(username))
|
|
||||||
assert.Len(t, QuotaScans.GetUsersQuotaScans(), 0)
|
|
||||||
|
|
||||||
folderName := "folder"
|
|
||||||
assert.True(t, QuotaScans.AddVFolderQuotaScan(folderName))
|
|
||||||
assert.False(t, QuotaScans.AddVFolderQuotaScan(folderName))
|
|
||||||
if assert.Len(t, QuotaScans.GetVFoldersQuotaScans(), 1) {
|
|
||||||
assert.Equal(t, QuotaScans.GetVFoldersQuotaScans()[0].Name, folderName)
|
|
||||||
}
|
|
||||||
|
|
||||||
assert.True(t, QuotaScans.RemoveVFolderQuotaScan(folderName))
|
|
||||||
assert.False(t, QuotaScans.RemoveVFolderQuotaScan(folderName))
|
|
||||||
assert.Len(t, QuotaScans.GetVFoldersQuotaScans(), 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestProxyProtocolVersion(t *testing.T) {
|
|
||||||
c := Configuration{
|
|
||||||
ProxyProtocol: 1,
|
|
||||||
}
|
|
||||||
proxyListener, err := c.GetProxyListener(nil)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Nil(t, proxyListener.Policy)
|
|
||||||
|
|
||||||
c.ProxyProtocol = 2
|
|
||||||
proxyListener, err = c.GetProxyListener(nil)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.NotNil(t, proxyListener.Policy)
|
|
||||||
|
|
||||||
c.ProxyProtocol = 1
|
|
||||||
c.ProxyAllowed = []string{"invalid"}
|
|
||||||
_, err = c.GetProxyListener(nil)
|
|
||||||
assert.Error(t, err)
|
|
||||||
|
|
||||||
c.ProxyProtocol = 2
|
|
||||||
_, err = c.GetProxyListener(nil)
|
|
||||||
assert.Error(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestStartupHook(t *testing.T) {
|
|
||||||
Config.StartupHook = ""
|
|
||||||
|
|
||||||
assert.NoError(t, Config.ExecuteStartupHook())
|
|
||||||
|
|
||||||
Config.StartupHook = "http://foo\x7f.com/startup"
|
|
||||||
assert.Error(t, Config.ExecuteStartupHook())
|
|
||||||
|
|
||||||
Config.StartupHook = "http://invalid:5678/"
|
|
||||||
assert.Error(t, Config.ExecuteStartupHook())
|
|
||||||
|
|
||||||
Config.StartupHook = fmt.Sprintf("http://%v", httpAddr)
|
|
||||||
assert.NoError(t, Config.ExecuteStartupHook())
|
|
||||||
|
|
||||||
Config.StartupHook = "invalidhook"
|
|
||||||
assert.Error(t, Config.ExecuteStartupHook())
|
|
||||||
|
|
||||||
if runtime.GOOS != osWindows {
|
|
||||||
hookCmd, err := exec.LookPath("true")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
Config.StartupHook = hookCmd
|
|
||||||
assert.NoError(t, Config.ExecuteStartupHook())
|
|
||||||
}
|
|
||||||
|
|
||||||
Config.StartupHook = ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPostConnectHook(t *testing.T) {
|
|
||||||
Config.PostConnectHook = ""
|
|
||||||
|
|
||||||
ipAddr := "127.0.0.1"
|
|
||||||
|
|
||||||
assert.NoError(t, Config.ExecutePostConnectHook(ipAddr, ProtocolFTP))
|
|
||||||
|
|
||||||
Config.PostConnectHook = "http://foo\x7f.com/"
|
|
||||||
assert.Error(t, Config.ExecutePostConnectHook(ipAddr, ProtocolSFTP))
|
|
||||||
|
|
||||||
Config.PostConnectHook = "http://invalid:1234/"
|
|
||||||
assert.Error(t, Config.ExecutePostConnectHook(ipAddr, ProtocolSFTP))
|
|
||||||
|
|
||||||
Config.PostConnectHook = fmt.Sprintf("http://%v/404", httpAddr)
|
|
||||||
assert.Error(t, Config.ExecutePostConnectHook(ipAddr, ProtocolFTP))
|
|
||||||
|
|
||||||
Config.PostConnectHook = fmt.Sprintf("http://%v", httpAddr)
|
|
||||||
assert.NoError(t, Config.ExecutePostConnectHook(ipAddr, ProtocolFTP))
|
|
||||||
|
|
||||||
Config.PostConnectHook = "invalid"
|
|
||||||
assert.Error(t, Config.ExecutePostConnectHook(ipAddr, ProtocolFTP))
|
|
||||||
|
|
||||||
if runtime.GOOS == osWindows {
|
|
||||||
Config.PostConnectHook = "C:\\bad\\command"
|
|
||||||
assert.Error(t, Config.ExecutePostConnectHook(ipAddr, ProtocolSFTP))
|
|
||||||
} else {
|
|
||||||
Config.PostConnectHook = "/invalid/path"
|
|
||||||
assert.Error(t, Config.ExecutePostConnectHook(ipAddr, ProtocolSFTP))
|
|
||||||
|
|
||||||
hookCmd, err := exec.LookPath("true")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
Config.PostConnectHook = hookCmd
|
|
||||||
assert.NoError(t, Config.ExecutePostConnectHook(ipAddr, ProtocolSFTP))
|
|
||||||
}
|
|
||||||
|
|
||||||
Config.PostConnectHook = ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCryptoConvertFileInfo(t *testing.T) {
|
|
||||||
name := "name"
|
|
||||||
fs, err := vfs.NewCryptFs("connID1", os.TempDir(), "", vfs.CryptFsConfig{Passphrase: kms.NewPlainSecret("secret")})
|
|
||||||
require.NoError(t, err)
|
|
||||||
cryptFs := fs.(*vfs.CryptFs)
|
|
||||||
info := vfs.NewFileInfo(name, true, 48, time.Now(), false)
|
|
||||||
assert.Equal(t, info, cryptFs.ConvertFileInfo(info))
|
|
||||||
info = vfs.NewFileInfo(name, false, 48, time.Now(), false)
|
|
||||||
assert.NotEqual(t, info.Size(), cryptFs.ConvertFileInfo(info).Size())
|
|
||||||
info = vfs.NewFileInfo(name, false, 33, time.Now(), false)
|
|
||||||
assert.Equal(t, int64(0), cryptFs.ConvertFileInfo(info).Size())
|
|
||||||
info = vfs.NewFileInfo(name, false, 1, time.Now(), false)
|
|
||||||
assert.Equal(t, int64(0), cryptFs.ConvertFileInfo(info).Size())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFolderCopy(t *testing.T) {
|
|
||||||
folder := vfs.BaseVirtualFolder{
|
|
||||||
ID: 1,
|
|
||||||
Name: "name",
|
|
||||||
MappedPath: filepath.Clean(os.TempDir()),
|
|
||||||
UsedQuotaSize: 4096,
|
|
||||||
UsedQuotaFiles: 2,
|
|
||||||
LastQuotaUpdate: utils.GetTimeAsMsSinceEpoch(time.Now()),
|
|
||||||
Users: []string{"user1", "user2"},
|
|
||||||
}
|
|
||||||
folderCopy := folder.GetACopy()
|
|
||||||
folder.ID = 2
|
|
||||||
folder.Users = []string{"user3"}
|
|
||||||
require.Len(t, folderCopy.Users, 2)
|
|
||||||
require.True(t, utils.IsStringInSlice("user1", folderCopy.Users))
|
|
||||||
require.True(t, utils.IsStringInSlice("user2", folderCopy.Users))
|
|
||||||
require.Equal(t, int64(1), folderCopy.ID)
|
|
||||||
require.Equal(t, folder.Name, folderCopy.Name)
|
|
||||||
require.Equal(t, folder.MappedPath, folderCopy.MappedPath)
|
|
||||||
require.Equal(t, folder.UsedQuotaSize, folderCopy.UsedQuotaSize)
|
|
||||||
require.Equal(t, folder.UsedQuotaFiles, folderCopy.UsedQuotaFiles)
|
|
||||||
require.Equal(t, folder.LastQuotaUpdate, folderCopy.LastQuotaUpdate)
|
|
||||||
|
|
||||||
folder.FsConfig = vfs.Filesystem{
|
|
||||||
CryptConfig: vfs.CryptFsConfig{
|
|
||||||
Passphrase: kms.NewPlainSecret("crypto secret"),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
folderCopy = folder.GetACopy()
|
|
||||||
folder.FsConfig.CryptConfig.Passphrase = kms.NewEmptySecret()
|
|
||||||
require.Len(t, folderCopy.Users, 1)
|
|
||||||
require.True(t, utils.IsStringInSlice("user3", folderCopy.Users))
|
|
||||||
require.Equal(t, int64(2), folderCopy.ID)
|
|
||||||
require.Equal(t, folder.Name, folderCopy.Name)
|
|
||||||
require.Equal(t, folder.MappedPath, folderCopy.MappedPath)
|
|
||||||
require.Equal(t, folder.UsedQuotaSize, folderCopy.UsedQuotaSize)
|
|
||||||
require.Equal(t, folder.UsedQuotaFiles, folderCopy.UsedQuotaFiles)
|
|
||||||
require.Equal(t, folder.LastQuotaUpdate, folderCopy.LastQuotaUpdate)
|
|
||||||
require.Equal(t, "crypto secret", folderCopy.FsConfig.CryptConfig.Passphrase.GetPayload())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCachedFs(t *testing.T) {
|
|
||||||
user := dataprovider.User{
|
|
||||||
HomeDir: filepath.Clean(os.TempDir()),
|
|
||||||
}
|
|
||||||
conn := NewBaseConnection("id", ProtocolSFTP, "", user)
|
|
||||||
// changing the user should not affect the connection
|
|
||||||
user.HomeDir = filepath.Join(os.TempDir(), "temp")
|
|
||||||
err := os.Mkdir(user.HomeDir, os.ModePerm)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
fs, err := user.GetFilesystem("")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
p, err := fs.ResolvePath("/")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, user.GetHomeDir(), p)
|
|
||||||
|
|
||||||
_, p, err = conn.GetFsAndResolvedPath("/")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, filepath.Clean(os.TempDir()), p)
|
|
||||||
user.FsConfig.Provider = vfs.S3FilesystemProvider
|
|
||||||
_, err = user.GetFilesystem("")
|
|
||||||
assert.Error(t, err)
|
|
||||||
conn.User.FsConfig.Provider = vfs.S3FilesystemProvider
|
|
||||||
_, p, err = conn.GetFsAndResolvedPath("/")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, filepath.Clean(os.TempDir()), p)
|
|
||||||
err = os.Remove(user.HomeDir)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestParseAllowedIPAndRanges(t *testing.T) {
|
|
||||||
_, err := utils.ParseAllowedIPAndRanges([]string{"1.1.1.1", "not an ip"})
|
|
||||||
assert.Error(t, err)
|
|
||||||
_, err = utils.ParseAllowedIPAndRanges([]string{"1.1.1.5", "192.168.1.0/240"})
|
|
||||||
assert.Error(t, err)
|
|
||||||
allow, err := utils.ParseAllowedIPAndRanges([]string{"192.168.1.2", "172.16.0.0/24"})
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.True(t, allow[0](net.ParseIP("192.168.1.2")))
|
|
||||||
assert.False(t, allow[0](net.ParseIP("192.168.2.2")))
|
|
||||||
assert.True(t, allow[1](net.ParseIP("172.16.0.1")))
|
|
||||||
assert.False(t, allow[1](net.ParseIP("172.16.1.1")))
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkBcryptHashing(b *testing.B) {
|
|
||||||
bcryptPassword := "bcryptpassword"
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
_, err := bcrypt.GenerateFromPassword([]byte(bcryptPassword), 10)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkCompareBcryptPassword(b *testing.B) {
|
|
||||||
bcryptPassword := "$2a$10$lPDdnDimJZ7d5/GwL6xDuOqoZVRXok6OHHhivCnanWUtcgN0Zafki"
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
err := bcrypt.CompareHashAndPassword([]byte(bcryptPassword), []byte("password"))
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkArgon2Hashing(b *testing.B) {
|
|
||||||
argonPassword := "argon2password"
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
_, err := argon2id.CreateHash(argonPassword, argon2id.DefaultParams)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkCompareArgon2Password(b *testing.B) {
|
|
||||||
argon2Password := "$argon2id$v=19$m=65536,t=1,p=2$aOoAOdAwvzhOgi7wUFjXlw$wn/y37dBWdKHtPXHR03nNaKHWKPXyNuVXOknaU+YZ+s"
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
_, err := argon2id.ComparePasswordAndHash("password", argon2Password)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
1110
common/connection.go
|
@ -1,321 +0,0 @@
|
||||||
package common
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"path"
|
|
||||||
"path/filepath"
|
|
||||||
"runtime"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/pkg/sftp"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
|
|
||||||
"github.com/drakkan/sftpgo/dataprovider"
|
|
||||||
"github.com/drakkan/sftpgo/vfs"
|
|
||||||
)
|
|
||||||
|
|
||||||
// MockOsFs mockable OsFs
|
|
||||||
type MockOsFs struct {
|
|
||||||
vfs.Fs
|
|
||||||
hasVirtualFolders bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// Name returns the name for the Fs implementation
|
|
||||||
func (fs MockOsFs) Name() string {
|
|
||||||
return "mockOsFs"
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasVirtualFolders returns true if folders are emulated
|
|
||||||
func (fs MockOsFs) HasVirtualFolders() bool {
|
|
||||||
return fs.hasVirtualFolders
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fs MockOsFs) IsUploadResumeSupported() bool {
|
|
||||||
return !fs.hasVirtualFolders
|
|
||||||
}
|
|
||||||
|
|
||||||
func newMockOsFs(hasVirtualFolders bool, connectionID, rootDir string) vfs.Fs {
|
|
||||||
return &MockOsFs{
|
|
||||||
Fs: vfs.NewOsFs(connectionID, rootDir, ""),
|
|
||||||
hasVirtualFolders: hasVirtualFolders,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRemoveErrors(t *testing.T) {
|
|
||||||
mappedPath := filepath.Join(os.TempDir(), "map")
|
|
||||||
homePath := filepath.Join(os.TempDir(), "home")
|
|
||||||
|
|
||||||
user := dataprovider.User{
|
|
||||||
Username: "remove_errors_user",
|
|
||||||
HomeDir: homePath,
|
|
||||||
VirtualFolders: []vfs.VirtualFolder{
|
|
||||||
{
|
|
||||||
BaseVirtualFolder: vfs.BaseVirtualFolder{
|
|
||||||
Name: filepath.Base(mappedPath),
|
|
||||||
MappedPath: mappedPath,
|
|
||||||
},
|
|
||||||
VirtualPath: "/virtualpath",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
user.Permissions = make(map[string][]string)
|
|
||||||
user.Permissions["/"] = []string{dataprovider.PermAny}
|
|
||||||
fs := vfs.NewOsFs("", os.TempDir(), "")
|
|
||||||
conn := NewBaseConnection("", ProtocolFTP, "", user)
|
|
||||||
err := conn.IsRemoveDirAllowed(fs, mappedPath, "/virtualpath1")
|
|
||||||
if assert.Error(t, err) {
|
|
||||||
assert.Contains(t, err.Error(), "permission denied")
|
|
||||||
}
|
|
||||||
err = conn.RemoveFile(fs, filepath.Join(homePath, "missing_file"), "/missing_file",
|
|
||||||
vfs.NewFileInfo("info", false, 100, time.Now(), false))
|
|
||||||
assert.Error(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSetStatMode(t *testing.T) {
|
|
||||||
oldSetStatMode := Config.SetstatMode
|
|
||||||
Config.SetstatMode = 1
|
|
||||||
|
|
||||||
fakePath := "fake path"
|
|
||||||
user := dataprovider.User{
|
|
||||||
HomeDir: os.TempDir(),
|
|
||||||
}
|
|
||||||
user.Permissions = make(map[string][]string)
|
|
||||||
user.Permissions["/"] = []string{dataprovider.PermAny}
|
|
||||||
fs := newMockOsFs(true, "", user.GetHomeDir())
|
|
||||||
conn := NewBaseConnection("", ProtocolWebDAV, "", user)
|
|
||||||
err := conn.handleChmod(fs, fakePath, fakePath, nil)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = conn.handleChown(fs, fakePath, fakePath, nil)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = conn.handleChtimes(fs, fakePath, fakePath, nil)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
Config.SetstatMode = 2
|
|
||||||
err = conn.handleChmod(fs, fakePath, fakePath, nil)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
Config.SetstatMode = oldSetStatMode
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRecursiveRenameWalkError(t *testing.T) {
|
|
||||||
fs := vfs.NewOsFs("", os.TempDir(), "")
|
|
||||||
conn := NewBaseConnection("", ProtocolWebDAV, "", dataprovider.User{})
|
|
||||||
err := conn.checkRecursiveRenameDirPermissions(fs, fs, "/source", "/target")
|
|
||||||
assert.ErrorIs(t, err, os.ErrNotExist)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCrossRenameFsErrors(t *testing.T) {
|
|
||||||
fs := vfs.NewOsFs("", os.TempDir(), "")
|
|
||||||
conn := NewBaseConnection("", ProtocolWebDAV, "", dataprovider.User{})
|
|
||||||
res := conn.hasSpaceForCrossRename(fs, vfs.QuotaCheckResult{}, 1, "missingsource")
|
|
||||||
assert.False(t, res)
|
|
||||||
if runtime.GOOS != osWindows {
|
|
||||||
dirPath := filepath.Join(os.TempDir(), "d")
|
|
||||||
err := os.Mkdir(dirPath, os.ModePerm)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = os.Chmod(dirPath, 0001)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
res = conn.hasSpaceForCrossRename(fs, vfs.QuotaCheckResult{}, 1, dirPath)
|
|
||||||
assert.False(t, res)
|
|
||||||
|
|
||||||
err = os.Chmod(dirPath, os.ModePerm)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = os.Remove(dirPath)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRenameVirtualFolders(t *testing.T) {
|
|
||||||
vdir := "/avdir"
|
|
||||||
u := dataprovider.User{}
|
|
||||||
u.VirtualFolders = append(u.VirtualFolders, vfs.VirtualFolder{
|
|
||||||
BaseVirtualFolder: vfs.BaseVirtualFolder{
|
|
||||||
Name: "name",
|
|
||||||
MappedPath: "mappedPath",
|
|
||||||
},
|
|
||||||
VirtualPath: vdir,
|
|
||||||
})
|
|
||||||
fs := vfs.NewOsFs("", os.TempDir(), "")
|
|
||||||
conn := NewBaseConnection("", ProtocolFTP, "", u)
|
|
||||||
res := conn.isRenamePermitted(fs, fs, "source", "target", vdir, "vdirtarget", nil)
|
|
||||||
assert.False(t, res)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestUpdateQuotaAfterRename(t *testing.T) {
|
|
||||||
user := dataprovider.User{
|
|
||||||
Username: userTestUsername,
|
|
||||||
HomeDir: filepath.Join(os.TempDir(), "home"),
|
|
||||||
}
|
|
||||||
mappedPath := filepath.Join(os.TempDir(), "vdir")
|
|
||||||
user.Permissions = make(map[string][]string)
|
|
||||||
user.Permissions["/"] = []string{dataprovider.PermAny}
|
|
||||||
user.VirtualFolders = append(user.VirtualFolders, vfs.VirtualFolder{
|
|
||||||
BaseVirtualFolder: vfs.BaseVirtualFolder{
|
|
||||||
MappedPath: mappedPath,
|
|
||||||
},
|
|
||||||
VirtualPath: "/vdir",
|
|
||||||
QuotaFiles: -1,
|
|
||||||
QuotaSize: -1,
|
|
||||||
})
|
|
||||||
user.VirtualFolders = append(user.VirtualFolders, vfs.VirtualFolder{
|
|
||||||
BaseVirtualFolder: vfs.BaseVirtualFolder{
|
|
||||||
MappedPath: mappedPath,
|
|
||||||
},
|
|
||||||
VirtualPath: "/vdir1",
|
|
||||||
QuotaFiles: -1,
|
|
||||||
QuotaSize: -1,
|
|
||||||
})
|
|
||||||
err := os.MkdirAll(user.GetHomeDir(), os.ModePerm)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = os.MkdirAll(mappedPath, os.ModePerm)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
fs, err := user.GetFilesystem("id")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
c := NewBaseConnection("", ProtocolSFTP, "", user)
|
|
||||||
request := sftp.NewRequest("Rename", "/testfile")
|
|
||||||
if runtime.GOOS != osWindows {
|
|
||||||
request.Filepath = "/dir"
|
|
||||||
request.Target = path.Join("/vdir", "dir")
|
|
||||||
testDirPath := filepath.Join(mappedPath, "dir")
|
|
||||||
err := os.MkdirAll(testDirPath, os.ModePerm)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = os.Chmod(testDirPath, 0001)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = c.updateQuotaAfterRename(fs, request.Filepath, request.Target, testDirPath, 0)
|
|
||||||
assert.Error(t, err)
|
|
||||||
err = os.Chmod(testDirPath, os.ModePerm)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
}
|
|
||||||
testFile1 := "/testfile1"
|
|
||||||
request.Target = testFile1
|
|
||||||
request.Filepath = path.Join("/vdir", "file")
|
|
||||||
err = c.updateQuotaAfterRename(fs, request.Filepath, request.Target, filepath.Join(mappedPath, "file"), 0)
|
|
||||||
assert.Error(t, err)
|
|
||||||
err = os.WriteFile(filepath.Join(mappedPath, "file"), []byte("test content"), os.ModePerm)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
request.Filepath = testFile1
|
|
||||||
request.Target = path.Join("/vdir", "file")
|
|
||||||
err = c.updateQuotaAfterRename(fs, request.Filepath, request.Target, filepath.Join(mappedPath, "file"), 12)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = os.WriteFile(filepath.Join(user.GetHomeDir(), "testfile1"), []byte("test content"), os.ModePerm)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
request.Target = testFile1
|
|
||||||
request.Filepath = path.Join("/vdir", "file")
|
|
||||||
err = c.updateQuotaAfterRename(fs, request.Filepath, request.Target, filepath.Join(mappedPath, "file"), 12)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
request.Target = path.Join("/vdir1", "file")
|
|
||||||
request.Filepath = path.Join("/vdir", "file")
|
|
||||||
err = c.updateQuotaAfterRename(fs, request.Filepath, request.Target, filepath.Join(mappedPath, "file"), 12)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
err = os.RemoveAll(mappedPath)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = os.RemoveAll(user.GetHomeDir())
|
|
||||||
assert.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestErrorsMapping(t *testing.T) {
|
|
||||||
fs := vfs.NewOsFs("", os.TempDir(), "")
|
|
||||||
conn := NewBaseConnection("", ProtocolSFTP, "", dataprovider.User{HomeDir: os.TempDir()})
|
|
||||||
for _, protocol := range supportedProtocols {
|
|
||||||
conn.SetProtocol(protocol)
|
|
||||||
err := conn.GetFsError(fs, os.ErrNotExist)
|
|
||||||
if protocol == ProtocolSFTP {
|
|
||||||
assert.EqualError(t, err, sftp.ErrSSHFxNoSuchFile.Error())
|
|
||||||
} else if protocol == ProtocolWebDAV || protocol == ProtocolFTP || protocol == ProtocolHTTP {
|
|
||||||
assert.EqualError(t, err, os.ErrNotExist.Error())
|
|
||||||
} else {
|
|
||||||
assert.EqualError(t, err, ErrNotExist.Error())
|
|
||||||
}
|
|
||||||
err = conn.GetFsError(fs, os.ErrPermission)
|
|
||||||
if protocol == ProtocolSFTP {
|
|
||||||
assert.EqualError(t, err, sftp.ErrSSHFxPermissionDenied.Error())
|
|
||||||
} else {
|
|
||||||
assert.EqualError(t, err, ErrPermissionDenied.Error())
|
|
||||||
}
|
|
||||||
err = conn.GetFsError(fs, os.ErrClosed)
|
|
||||||
if protocol == ProtocolSFTP {
|
|
||||||
assert.EqualError(t, err, sftp.ErrSSHFxFailure.Error())
|
|
||||||
} else {
|
|
||||||
assert.EqualError(t, err, ErrGenericFailure.Error())
|
|
||||||
}
|
|
||||||
err = conn.GetFsError(fs, ErrPermissionDenied)
|
|
||||||
if protocol == ProtocolSFTP {
|
|
||||||
assert.EqualError(t, err, sftp.ErrSSHFxFailure.Error())
|
|
||||||
} else {
|
|
||||||
assert.EqualError(t, err, ErrPermissionDenied.Error())
|
|
||||||
}
|
|
||||||
err = conn.GetFsError(fs, vfs.ErrVfsUnsupported)
|
|
||||||
if protocol == ProtocolSFTP {
|
|
||||||
assert.EqualError(t, err, sftp.ErrSSHFxOpUnsupported.Error())
|
|
||||||
} else {
|
|
||||||
assert.EqualError(t, err, ErrOpUnsupported.Error())
|
|
||||||
}
|
|
||||||
err = conn.GetFsError(fs, vfs.ErrStorageSizeUnavailable)
|
|
||||||
if protocol == ProtocolSFTP {
|
|
||||||
assert.EqualError(t, err, sftp.ErrSSHFxOpUnsupported.Error())
|
|
||||||
} else {
|
|
||||||
assert.EqualError(t, err, vfs.ErrStorageSizeUnavailable.Error())
|
|
||||||
}
|
|
||||||
err = conn.GetFsError(fs, nil)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = conn.GetOpUnsupportedError()
|
|
||||||
if protocol == ProtocolSFTP {
|
|
||||||
assert.EqualError(t, err, sftp.ErrSSHFxOpUnsupported.Error())
|
|
||||||
} else {
|
|
||||||
assert.EqualError(t, err, ErrOpUnsupported.Error())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMaxWriteSize(t *testing.T) {
|
|
||||||
permissions := make(map[string][]string)
|
|
||||||
permissions["/"] = []string{dataprovider.PermAny}
|
|
||||||
user := dataprovider.User{
|
|
||||||
Username: userTestUsername,
|
|
||||||
Permissions: permissions,
|
|
||||||
HomeDir: filepath.Clean(os.TempDir()),
|
|
||||||
}
|
|
||||||
fs, err := user.GetFilesystem("123")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
conn := NewBaseConnection("", ProtocolFTP, "", user)
|
|
||||||
quotaResult := vfs.QuotaCheckResult{
|
|
||||||
HasSpace: true,
|
|
||||||
}
|
|
||||||
size, err := conn.GetMaxWriteSize(quotaResult, false, 0, fs.IsUploadResumeSupported())
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, int64(0), size)
|
|
||||||
|
|
||||||
conn.User.Filters.MaxUploadFileSize = 100
|
|
||||||
size, err = conn.GetMaxWriteSize(quotaResult, false, 0, fs.IsUploadResumeSupported())
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, int64(100), size)
|
|
||||||
|
|
||||||
quotaResult.QuotaSize = 1000
|
|
||||||
size, err = conn.GetMaxWriteSize(quotaResult, false, 50, fs.IsUploadResumeSupported())
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, int64(100), size)
|
|
||||||
|
|
||||||
quotaResult.QuotaSize = 1000
|
|
||||||
quotaResult.UsedSize = 990
|
|
||||||
size, err = conn.GetMaxWriteSize(quotaResult, false, 50, fs.IsUploadResumeSupported())
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, int64(60), size)
|
|
||||||
|
|
||||||
quotaResult.QuotaSize = 0
|
|
||||||
quotaResult.UsedSize = 0
|
|
||||||
size, err = conn.GetMaxWriteSize(quotaResult, true, 100, fs.IsUploadResumeSupported())
|
|
||||||
assert.EqualError(t, err, ErrQuotaExceeded.Error())
|
|
||||||
assert.Equal(t, int64(0), size)
|
|
||||||
|
|
||||||
size, err = conn.GetMaxWriteSize(quotaResult, true, 10, fs.IsUploadResumeSupported())
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, int64(90), size)
|
|
||||||
|
|
||||||
fs = newMockOsFs(true, fs.ConnectionID(), user.GetHomeDir())
|
|
||||||
size, err = conn.GetMaxWriteSize(quotaResult, true, 100, fs.IsUploadResumeSupported())
|
|
||||||
assert.EqualError(t, err, ErrOpUnsupported.Error())
|
|
||||||
assert.Equal(t, int64(0), size)
|
|
||||||
}
|
|
|
@ -1,573 +0,0 @@
|
||||||
package common
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/hex"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"net"
|
|
||||||
"os"
|
|
||||||
"sort"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/yl2chen/cidranger"
|
|
||||||
|
|
||||||
"github.com/drakkan/sftpgo/dataprovider"
|
|
||||||
"github.com/drakkan/sftpgo/logger"
|
|
||||||
"github.com/drakkan/sftpgo/utils"
|
|
||||||
)
|
|
||||||
|
|
||||||
// HostEvent is the enumerable for the support host event
|
|
||||||
type HostEvent int
|
|
||||||
|
|
||||||
// Supported host events
|
|
||||||
const (
|
|
||||||
HostEventLoginFailed HostEvent = iota
|
|
||||||
HostEventUserNotFound
|
|
||||||
HostEventNoLoginTried
|
|
||||||
HostEventLimitExceeded
|
|
||||||
)
|
|
||||||
|
|
||||||
// DefenderEntry defines a defender entry
|
|
||||||
type DefenderEntry struct {
|
|
||||||
IP string `json:"ip"`
|
|
||||||
Score int `json:"score,omitempty"`
|
|
||||||
BanTime time.Time `json:"ban_time,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetID returns an unique ID for a defender entry
|
|
||||||
func (d *DefenderEntry) GetID() string {
|
|
||||||
return hex.EncodeToString([]byte(d.IP))
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetBanTime returns the ban time for a defender entry as string
|
|
||||||
func (d *DefenderEntry) GetBanTime() string {
|
|
||||||
if d.BanTime.IsZero() {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return d.BanTime.UTC().Format(time.RFC3339)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalJSON returns the JSON encoding of a DefenderEntry.
|
|
||||||
func (d *DefenderEntry) MarshalJSON() ([]byte, error) {
|
|
||||||
return json.Marshal(&struct {
|
|
||||||
ID string `json:"id"`
|
|
||||||
IP string `json:"ip"`
|
|
||||||
Score int `json:"score,omitempty"`
|
|
||||||
BanTime string `json:"ban_time,omitempty"`
|
|
||||||
}{
|
|
||||||
ID: d.GetID(),
|
|
||||||
IP: d.IP,
|
|
||||||
Score: d.Score,
|
|
||||||
BanTime: d.GetBanTime(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Defender defines the interface that a defender must implements
|
|
||||||
type Defender interface {
|
|
||||||
GetHosts() []*DefenderEntry
|
|
||||||
GetHost(ip string) (*DefenderEntry, error)
|
|
||||||
AddEvent(ip string, event HostEvent)
|
|
||||||
IsBanned(ip string) bool
|
|
||||||
GetBanTime(ip string) *time.Time
|
|
||||||
GetScore(ip string) int
|
|
||||||
DeleteHost(ip string) bool
|
|
||||||
Reload() error
|
|
||||||
}
|
|
||||||
|
|
||||||
// DefenderConfig defines the "defender" configuration
|
|
||||||
type DefenderConfig struct {
|
|
||||||
// Set to true to enable the defender
|
|
||||||
Enabled bool `json:"enabled" mapstructure:"enabled"`
|
|
||||||
// BanTime is the number of minutes that a host is banned
|
|
||||||
BanTime int `json:"ban_time" mapstructure:"ban_time"`
|
|
||||||
// Percentage increase of the ban time if a banned host tries to connect again
|
|
||||||
BanTimeIncrement int `json:"ban_time_increment" mapstructure:"ban_time_increment"`
|
|
||||||
// Threshold value for banning a client
|
|
||||||
Threshold int `json:"threshold" mapstructure:"threshold"`
|
|
||||||
// Score for invalid login attempts, eg. non-existent user accounts or
|
|
||||||
// client disconnected for inactivity without authentication attempts
|
|
||||||
ScoreInvalid int `json:"score_invalid" mapstructure:"score_invalid"`
|
|
||||||
// Score for valid login attempts, eg. user accounts that exist
|
|
||||||
ScoreValid int `json:"score_valid" mapstructure:"score_valid"`
|
|
||||||
// Score for limit exceeded events, generated from the rate limiters or for max connections
|
|
||||||
// per-host exceeded
|
|
||||||
ScoreLimitExceeded int `json:"score_limit_exceeded" mapstructure:"score_limit_exceeded"`
|
|
||||||
// Defines the time window, in minutes, for tracking client errors.
|
|
||||||
// A host is banned if it has exceeded the defined threshold during
|
|
||||||
// the last observation time minutes
|
|
||||||
ObservationTime int `json:"observation_time" mapstructure:"observation_time"`
|
|
||||||
// The number of banned IPs and host scores kept in memory will vary between the
|
|
||||||
// soft and hard limit
|
|
||||||
EntriesSoftLimit int `json:"entries_soft_limit" mapstructure:"entries_soft_limit"`
|
|
||||||
EntriesHardLimit int `json:"entries_hard_limit" mapstructure:"entries_hard_limit"`
|
|
||||||
// Path to a file containing a list of ip addresses and/or networks to never ban
|
|
||||||
SafeListFile string `json:"safelist_file" mapstructure:"safelist_file"`
|
|
||||||
// Path to a file containing a list of ip addresses and/or networks to always ban
|
|
||||||
BlockListFile string `json:"blocklist_file" mapstructure:"blocklist_file"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type memoryDefender struct {
|
|
||||||
config *DefenderConfig
|
|
||||||
sync.RWMutex
|
|
||||||
// IP addresses of the clients trying to connected are stored inside hosts,
|
|
||||||
// they are added to banned once the thresold is reached.
|
|
||||||
// A violation from a banned host will increase the ban time
|
|
||||||
// based on the configured BanTimeIncrement
|
|
||||||
hosts map[string]hostScore // the key is the host IP
|
|
||||||
banned map[string]time.Time // the key is the host IP
|
|
||||||
safeList *HostList
|
|
||||||
blockList *HostList
|
|
||||||
}
|
|
||||||
|
|
||||||
// HostListFile defines the structure expected for safe/block list files
|
|
||||||
type HostListFile struct {
|
|
||||||
IPAddresses []string `json:"addresses"`
|
|
||||||
CIDRNetworks []string `json:"networks"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// HostList defines the structure used to keep the HostListFile in memory
|
|
||||||
type HostList struct {
|
|
||||||
IPAddresses map[string]bool
|
|
||||||
Ranges cidranger.Ranger
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *HostList) isListed(ip string) bool {
|
|
||||||
if _, ok := h.IPAddresses[ip]; ok {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
ok, err := h.Ranges.Contains(net.ParseIP(ip))
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return ok
|
|
||||||
}
|
|
||||||
|
|
||||||
type hostEvent struct {
|
|
||||||
dateTime time.Time
|
|
||||||
score int
|
|
||||||
}
|
|
||||||
|
|
||||||
type hostScore struct {
|
|
||||||
TotalScore int
|
|
||||||
Events []hostEvent
|
|
||||||
}
|
|
||||||
|
|
||||||
// validate returns an error if the configuration is invalid
|
|
||||||
func (c *DefenderConfig) validate() error {
|
|
||||||
if !c.Enabled {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if c.ScoreInvalid >= c.Threshold {
|
|
||||||
return fmt.Errorf("score_invalid %v cannot be greater than threshold %v", c.ScoreInvalid, c.Threshold)
|
|
||||||
}
|
|
||||||
if c.ScoreValid >= c.Threshold {
|
|
||||||
return fmt.Errorf("score_valid %v cannot be greater than threshold %v", c.ScoreValid, c.Threshold)
|
|
||||||
}
|
|
||||||
if c.ScoreLimitExceeded >= c.Threshold {
|
|
||||||
return fmt.Errorf("score_limit_exceeded %v cannot be greater than threshold %v", c.ScoreLimitExceeded, c.Threshold)
|
|
||||||
}
|
|
||||||
if c.BanTime <= 0 {
|
|
||||||
return fmt.Errorf("invalid ban_time %v", c.BanTime)
|
|
||||||
}
|
|
||||||
if c.BanTimeIncrement <= 0 {
|
|
||||||
return fmt.Errorf("invalid ban_time_increment %v", c.BanTimeIncrement)
|
|
||||||
}
|
|
||||||
if c.ObservationTime <= 0 {
|
|
||||||
return fmt.Errorf("invalid observation_time %v", c.ObservationTime)
|
|
||||||
}
|
|
||||||
if c.EntriesSoftLimit <= 0 {
|
|
||||||
return fmt.Errorf("invalid entries_soft_limit %v", c.EntriesSoftLimit)
|
|
||||||
}
|
|
||||||
if c.EntriesHardLimit <= c.EntriesSoftLimit {
|
|
||||||
return fmt.Errorf("invalid entries_hard_limit %v must be > %v", c.EntriesHardLimit, c.EntriesSoftLimit)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func newInMemoryDefender(config *DefenderConfig) (Defender, error) {
|
|
||||||
err := config.validate()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defender := &memoryDefender{
|
|
||||||
config: config,
|
|
||||||
hosts: make(map[string]hostScore),
|
|
||||||
banned: make(map[string]time.Time),
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := defender.Reload(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return defender, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reload reloads block and safe lists
|
|
||||||
func (d *memoryDefender) Reload() error {
|
|
||||||
blockList, err := loadHostListFromFile(d.config.BlockListFile)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
d.Lock()
|
|
||||||
d.blockList = blockList
|
|
||||||
d.Unlock()
|
|
||||||
|
|
||||||
safeList, err := loadHostListFromFile(d.config.SafeListFile)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
d.Lock()
|
|
||||||
d.safeList = safeList
|
|
||||||
d.Unlock()
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetHosts returns hosts that are banned or for which some violations have been detected
|
|
||||||
func (d *memoryDefender) GetHosts() []*DefenderEntry {
|
|
||||||
d.RLock()
|
|
||||||
defer d.RUnlock()
|
|
||||||
|
|
||||||
var result []*DefenderEntry
|
|
||||||
for k, v := range d.banned {
|
|
||||||
result = append(result, &DefenderEntry{
|
|
||||||
IP: k,
|
|
||||||
BanTime: v,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
for k, v := range d.hosts {
|
|
||||||
result = append(result, &DefenderEntry{
|
|
||||||
IP: k,
|
|
||||||
Score: v.TotalScore,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetHost returns a defender host by ip, if any
|
|
||||||
func (d *memoryDefender) GetHost(ip string) (*DefenderEntry, error) {
|
|
||||||
d.RLock()
|
|
||||||
defer d.RUnlock()
|
|
||||||
|
|
||||||
if banTime, ok := d.banned[ip]; ok {
|
|
||||||
return &DefenderEntry{
|
|
||||||
IP: ip,
|
|
||||||
BanTime: banTime,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if ev, ok := d.hosts[ip]; ok {
|
|
||||||
return &DefenderEntry{
|
|
||||||
IP: ip,
|
|
||||||
Score: ev.TotalScore,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, dataprovider.NewRecordNotFoundError("host not found")
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsBanned returns true if the specified IP is banned
|
|
||||||
// and increase ban time if the IP is found.
|
|
||||||
// This method must be called as soon as the client connects
|
|
||||||
func (d *memoryDefender) IsBanned(ip string) bool {
|
|
||||||
d.RLock()
|
|
||||||
|
|
||||||
if banTime, ok := d.banned[ip]; ok {
|
|
||||||
if banTime.After(time.Now()) {
|
|
||||||
increment := d.config.BanTime * d.config.BanTimeIncrement / 100
|
|
||||||
if increment == 0 {
|
|
||||||
increment++
|
|
||||||
}
|
|
||||||
|
|
||||||
d.RUnlock()
|
|
||||||
|
|
||||||
// we can save an earlier ban time if there are contemporary updates
|
|
||||||
// but this should not make much difference. I prefer to hold a read lock
|
|
||||||
// until possible for performance reasons, this method is called each
|
|
||||||
// time a new client connects and it must be as fast as possible
|
|
||||||
d.Lock()
|
|
||||||
d.banned[ip] = banTime.Add(time.Duration(increment) * time.Minute)
|
|
||||||
d.Unlock()
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
defer d.RUnlock()
|
|
||||||
|
|
||||||
if d.blockList != nil && d.blockList.isListed(ip) {
|
|
||||||
// permanent ban
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteHost removes the specified IP from the defender lists
|
|
||||||
func (d *memoryDefender) DeleteHost(ip string) bool {
|
|
||||||
d.Lock()
|
|
||||||
defer d.Unlock()
|
|
||||||
|
|
||||||
if _, ok := d.banned[ip]; ok {
|
|
||||||
delete(d.banned, ip)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, ok := d.hosts[ip]; ok {
|
|
||||||
delete(d.hosts, ip)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddEvent adds an event for the given IP.
|
|
||||||
// This method must be called for clients not yet banned
|
|
||||||
func (d *memoryDefender) AddEvent(ip string, event HostEvent) {
|
|
||||||
d.Lock()
|
|
||||||
defer d.Unlock()
|
|
||||||
|
|
||||||
if d.safeList != nil && d.safeList.isListed(ip) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// ignore events for already banned hosts
|
|
||||||
if _, ok := d.banned[ip]; ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var score int
|
|
||||||
|
|
||||||
switch event {
|
|
||||||
case HostEventLoginFailed:
|
|
||||||
score = d.config.ScoreValid
|
|
||||||
case HostEventLimitExceeded:
|
|
||||||
score = d.config.ScoreLimitExceeded
|
|
||||||
case HostEventUserNotFound, HostEventNoLoginTried:
|
|
||||||
score = d.config.ScoreInvalid
|
|
||||||
}
|
|
||||||
|
|
||||||
ev := hostEvent{
|
|
||||||
dateTime: time.Now(),
|
|
||||||
score: score,
|
|
||||||
}
|
|
||||||
|
|
||||||
if hs, ok := d.hosts[ip]; ok {
|
|
||||||
hs.Events = append(hs.Events, ev)
|
|
||||||
hs.TotalScore = 0
|
|
||||||
|
|
||||||
idx := 0
|
|
||||||
for _, event := range hs.Events {
|
|
||||||
if event.dateTime.Add(time.Duration(d.config.ObservationTime) * time.Minute).After(time.Now()) {
|
|
||||||
hs.Events[idx] = event
|
|
||||||
hs.TotalScore += event.score
|
|
||||||
idx++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
hs.Events = hs.Events[:idx]
|
|
||||||
if hs.TotalScore >= d.config.Threshold {
|
|
||||||
d.banned[ip] = time.Now().Add(time.Duration(d.config.BanTime) * time.Minute)
|
|
||||||
delete(d.hosts, ip)
|
|
||||||
d.cleanupBanned()
|
|
||||||
} else {
|
|
||||||
d.hosts[ip] = hs
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
d.hosts[ip] = hostScore{
|
|
||||||
TotalScore: ev.score,
|
|
||||||
Events: []hostEvent{ev},
|
|
||||||
}
|
|
||||||
d.cleanupHosts()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *memoryDefender) countBanned() int {
|
|
||||||
d.RLock()
|
|
||||||
defer d.RUnlock()
|
|
||||||
|
|
||||||
return len(d.banned)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *memoryDefender) countHosts() int {
|
|
||||||
d.RLock()
|
|
||||||
defer d.RUnlock()
|
|
||||||
|
|
||||||
return len(d.hosts)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetBanTime returns the ban time for the given IP or nil if the IP is not banned
|
|
||||||
func (d *memoryDefender) GetBanTime(ip string) *time.Time {
|
|
||||||
d.RLock()
|
|
||||||
defer d.RUnlock()
|
|
||||||
|
|
||||||
if banTime, ok := d.banned[ip]; ok {
|
|
||||||
return &banTime
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetScore returns the score for the given IP
|
|
||||||
func (d *memoryDefender) GetScore(ip string) int {
|
|
||||||
d.RLock()
|
|
||||||
defer d.RUnlock()
|
|
||||||
|
|
||||||
score := 0
|
|
||||||
|
|
||||||
if hs, ok := d.hosts[ip]; ok {
|
|
||||||
for _, event := range hs.Events {
|
|
||||||
if event.dateTime.Add(time.Duration(d.config.ObservationTime) * time.Minute).After(time.Now()) {
|
|
||||||
score += event.score
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return score
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *memoryDefender) cleanupBanned() {
|
|
||||||
if len(d.banned) > d.config.EntriesHardLimit {
|
|
||||||
kvList := make(kvList, 0, len(d.banned))
|
|
||||||
|
|
||||||
for k, v := range d.banned {
|
|
||||||
if v.Before(time.Now()) {
|
|
||||||
delete(d.banned, k)
|
|
||||||
}
|
|
||||||
|
|
||||||
kvList = append(kvList, kv{
|
|
||||||
Key: k,
|
|
||||||
Value: v.UnixNano(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// we removed expired ip addresses, if any, above, this could be enough
|
|
||||||
numToRemove := len(d.banned) - d.config.EntriesSoftLimit
|
|
||||||
|
|
||||||
if numToRemove <= 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
sort.Sort(kvList)
|
|
||||||
|
|
||||||
for idx, kv := range kvList {
|
|
||||||
if idx >= numToRemove {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
delete(d.banned, kv.Key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *memoryDefender) cleanupHosts() {
|
|
||||||
if len(d.hosts) > d.config.EntriesHardLimit {
|
|
||||||
kvList := make(kvList, 0, len(d.hosts))
|
|
||||||
|
|
||||||
for k, v := range d.hosts {
|
|
||||||
value := int64(0)
|
|
||||||
if len(v.Events) > 0 {
|
|
||||||
value = v.Events[len(v.Events)-1].dateTime.UnixNano()
|
|
||||||
}
|
|
||||||
kvList = append(kvList, kv{
|
|
||||||
Key: k,
|
|
||||||
Value: value,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
sort.Sort(kvList)
|
|
||||||
|
|
||||||
numToRemove := len(d.hosts) - d.config.EntriesSoftLimit
|
|
||||||
|
|
||||||
for idx, kv := range kvList {
|
|
||||||
if idx >= numToRemove {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
delete(d.hosts, kv.Key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func loadHostListFromFile(name string) (*HostList, error) {
|
|
||||||
if name == "" {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
if !utils.IsFileInputValid(name) {
|
|
||||||
return nil, fmt.Errorf("invalid host list file name %#v", name)
|
|
||||||
}
|
|
||||||
|
|
||||||
info, err := os.Stat(name)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// opinionated max size, you should avoid big host lists
|
|
||||||
if info.Size() > 1048576*5 { // 5MB
|
|
||||||
return nil, fmt.Errorf("host list file %#v is too big: %v bytes", name, info.Size())
|
|
||||||
}
|
|
||||||
|
|
||||||
content, err := os.ReadFile(name)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("unable to read input file %#v: %v", name, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var hostList HostListFile
|
|
||||||
|
|
||||||
err = json.Unmarshal(content, &hostList)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(hostList.CIDRNetworks) > 0 || len(hostList.IPAddresses) > 0 {
|
|
||||||
result := &HostList{
|
|
||||||
IPAddresses: make(map[string]bool),
|
|
||||||
Ranges: cidranger.NewPCTrieRanger(),
|
|
||||||
}
|
|
||||||
ipCount := 0
|
|
||||||
cdrCount := 0
|
|
||||||
for _, ip := range hostList.IPAddresses {
|
|
||||||
if net.ParseIP(ip) == nil {
|
|
||||||
logger.Warn(logSender, "", "unable to parse IP %#v", ip)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
result.IPAddresses[ip] = true
|
|
||||||
ipCount++
|
|
||||||
}
|
|
||||||
for _, cidrNet := range hostList.CIDRNetworks {
|
|
||||||
_, network, err := net.ParseCIDR(cidrNet)
|
|
||||||
if err != nil {
|
|
||||||
logger.Warn(logSender, "", "unable to parse CIDR network %#v", cidrNet)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
err = result.Ranges.Insert(cidranger.NewBasicRangerEntry(*network))
|
|
||||||
if err == nil {
|
|
||||||
cdrCount++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.Info(logSender, "", "list %#v loaded, ip addresses loaded: %v/%v networks loaded: %v/%v",
|
|
||||||
name, ipCount, len(hostList.IPAddresses), cdrCount, len(hostList.CIDRNetworks))
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type kv struct {
|
|
||||||
Key string
|
|
||||||
Value int64
|
|
||||||
}
|
|
||||||
|
|
||||||
type kvList []kv
|
|
||||||
|
|
||||||
func (p kvList) Len() int { return len(p) }
|
|
||||||
func (p kvList) Less(i, j int) bool { return p[i].Value < p[j].Value }
|
|
||||||
func (p kvList) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
|
|
@ -1,556 +0,0 @@
|
||||||
package common
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/rand"
|
|
||||||
"encoding/hex"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"net"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"runtime"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
"github.com/yl2chen/cidranger"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestBasicDefender(t *testing.T) {
|
|
||||||
bl := HostListFile{
|
|
||||||
IPAddresses: []string{"172.16.1.1", "172.16.1.2"},
|
|
||||||
CIDRNetworks: []string{"10.8.0.0/24"},
|
|
||||||
}
|
|
||||||
sl := HostListFile{
|
|
||||||
IPAddresses: []string{"172.16.1.3", "172.16.1.4"},
|
|
||||||
CIDRNetworks: []string{"192.168.8.0/24"},
|
|
||||||
}
|
|
||||||
blFile := filepath.Join(os.TempDir(), "bl.json")
|
|
||||||
slFile := filepath.Join(os.TempDir(), "sl.json")
|
|
||||||
|
|
||||||
data, err := json.Marshal(bl)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
err = os.WriteFile(blFile, data, os.ModePerm)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
data, err = json.Marshal(sl)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
err = os.WriteFile(slFile, data, os.ModePerm)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
config := &DefenderConfig{
|
|
||||||
Enabled: true,
|
|
||||||
BanTime: 10,
|
|
||||||
BanTimeIncrement: 2,
|
|
||||||
Threshold: 5,
|
|
||||||
ScoreInvalid: 2,
|
|
||||||
ScoreValid: 1,
|
|
||||||
ScoreLimitExceeded: 3,
|
|
||||||
ObservationTime: 15,
|
|
||||||
EntriesSoftLimit: 1,
|
|
||||||
EntriesHardLimit: 2,
|
|
||||||
SafeListFile: "slFile",
|
|
||||||
BlockListFile: "blFile",
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = newInMemoryDefender(config)
|
|
||||||
assert.Error(t, err)
|
|
||||||
config.BlockListFile = blFile
|
|
||||||
_, err = newInMemoryDefender(config)
|
|
||||||
assert.Error(t, err)
|
|
||||||
config.SafeListFile = slFile
|
|
||||||
d, err := newInMemoryDefender(config)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
defender := d.(*memoryDefender)
|
|
||||||
assert.True(t, defender.IsBanned("172.16.1.1"))
|
|
||||||
assert.False(t, defender.IsBanned("172.16.1.10"))
|
|
||||||
assert.False(t, defender.IsBanned("10.8.2.3"))
|
|
||||||
assert.True(t, defender.IsBanned("10.8.0.3"))
|
|
||||||
assert.False(t, defender.IsBanned("invalid ip"))
|
|
||||||
assert.Equal(t, 0, defender.countBanned())
|
|
||||||
assert.Equal(t, 0, defender.countHosts())
|
|
||||||
assert.Len(t, defender.GetHosts(), 0)
|
|
||||||
_, err = defender.GetHost("10.8.0.4")
|
|
||||||
assert.Error(t, err)
|
|
||||||
|
|
||||||
defender.AddEvent("172.16.1.4", HostEventLoginFailed)
|
|
||||||
defender.AddEvent("192.168.8.4", HostEventUserNotFound)
|
|
||||||
defender.AddEvent("172.16.1.3", HostEventLimitExceeded)
|
|
||||||
assert.Equal(t, 0, defender.countHosts())
|
|
||||||
|
|
||||||
testIP := "12.34.56.78"
|
|
||||||
defender.AddEvent(testIP, HostEventLoginFailed)
|
|
||||||
assert.Equal(t, 1, defender.countHosts())
|
|
||||||
assert.Equal(t, 0, defender.countBanned())
|
|
||||||
assert.Equal(t, 1, defender.GetScore(testIP))
|
|
||||||
if assert.Len(t, defender.GetHosts(), 1) {
|
|
||||||
assert.Equal(t, 1, defender.GetHosts()[0].Score)
|
|
||||||
assert.True(t, defender.GetHosts()[0].BanTime.IsZero())
|
|
||||||
assert.Empty(t, defender.GetHosts()[0].GetBanTime())
|
|
||||||
}
|
|
||||||
host, err := defender.GetHost(testIP)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, 1, host.Score)
|
|
||||||
assert.Empty(t, host.GetBanTime())
|
|
||||||
assert.Nil(t, defender.GetBanTime(testIP))
|
|
||||||
defender.AddEvent(testIP, HostEventLimitExceeded)
|
|
||||||
assert.Equal(t, 1, defender.countHosts())
|
|
||||||
assert.Equal(t, 0, defender.countBanned())
|
|
||||||
assert.Equal(t, 4, defender.GetScore(testIP))
|
|
||||||
if assert.Len(t, defender.GetHosts(), 1) {
|
|
||||||
assert.Equal(t, 4, defender.GetHosts()[0].Score)
|
|
||||||
}
|
|
||||||
defender.AddEvent(testIP, HostEventNoLoginTried)
|
|
||||||
defender.AddEvent(testIP, HostEventNoLoginTried)
|
|
||||||
assert.Equal(t, 0, defender.countHosts())
|
|
||||||
assert.Equal(t, 1, defender.countBanned())
|
|
||||||
assert.Equal(t, 0, defender.GetScore(testIP))
|
|
||||||
assert.NotNil(t, defender.GetBanTime(testIP))
|
|
||||||
if assert.Len(t, defender.GetHosts(), 1) {
|
|
||||||
assert.Equal(t, 0, defender.GetHosts()[0].Score)
|
|
||||||
assert.False(t, defender.GetHosts()[0].BanTime.IsZero())
|
|
||||||
assert.NotEmpty(t, defender.GetHosts()[0].GetBanTime())
|
|
||||||
assert.Equal(t, hex.EncodeToString([]byte(testIP)), defender.GetHosts()[0].GetID())
|
|
||||||
}
|
|
||||||
host, err = defender.GetHost(testIP)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, 0, host.Score)
|
|
||||||
assert.NotEmpty(t, host.GetBanTime())
|
|
||||||
|
|
||||||
// now test cleanup, testIP is already banned
|
|
||||||
testIP1 := "12.34.56.79"
|
|
||||||
testIP2 := "12.34.56.80"
|
|
||||||
testIP3 := "12.34.56.81"
|
|
||||||
|
|
||||||
defender.AddEvent(testIP1, HostEventNoLoginTried)
|
|
||||||
defender.AddEvent(testIP2, HostEventNoLoginTried)
|
|
||||||
assert.Equal(t, 2, defender.countHosts())
|
|
||||||
time.Sleep(20 * time.Millisecond)
|
|
||||||
defender.AddEvent(testIP3, HostEventNoLoginTried)
|
|
||||||
assert.Equal(t, defender.config.EntriesSoftLimit, defender.countHosts())
|
|
||||||
// testIP1 and testIP2 should be removed
|
|
||||||
assert.Equal(t, defender.config.EntriesSoftLimit, defender.countHosts())
|
|
||||||
assert.Equal(t, 0, defender.GetScore(testIP1))
|
|
||||||
assert.Equal(t, 0, defender.GetScore(testIP2))
|
|
||||||
assert.Equal(t, 2, defender.GetScore(testIP3))
|
|
||||||
|
|
||||||
defender.AddEvent(testIP3, HostEventNoLoginTried)
|
|
||||||
defender.AddEvent(testIP3, HostEventNoLoginTried)
|
|
||||||
// IP3 is now banned
|
|
||||||
assert.NotNil(t, defender.GetBanTime(testIP3))
|
|
||||||
assert.Equal(t, 0, defender.countHosts())
|
|
||||||
|
|
||||||
time.Sleep(20 * time.Millisecond)
|
|
||||||
for i := 0; i < 3; i++ {
|
|
||||||
defender.AddEvent(testIP1, HostEventNoLoginTried)
|
|
||||||
}
|
|
||||||
assert.Equal(t, 0, defender.countHosts())
|
|
||||||
assert.Equal(t, config.EntriesSoftLimit, defender.countBanned())
|
|
||||||
assert.Nil(t, defender.GetBanTime(testIP))
|
|
||||||
assert.Nil(t, defender.GetBanTime(testIP3))
|
|
||||||
assert.NotNil(t, defender.GetBanTime(testIP1))
|
|
||||||
|
|
||||||
for i := 0; i < 3; i++ {
|
|
||||||
defender.AddEvent(testIP, HostEventNoLoginTried)
|
|
||||||
time.Sleep(10 * time.Millisecond)
|
|
||||||
defender.AddEvent(testIP3, HostEventNoLoginTried)
|
|
||||||
}
|
|
||||||
assert.Equal(t, 0, defender.countHosts())
|
|
||||||
assert.Equal(t, defender.config.EntriesSoftLimit, defender.countBanned())
|
|
||||||
|
|
||||||
banTime := defender.GetBanTime(testIP3)
|
|
||||||
if assert.NotNil(t, banTime) {
|
|
||||||
assert.True(t, defender.IsBanned(testIP3))
|
|
||||||
// ban time should increase
|
|
||||||
newBanTime := defender.GetBanTime(testIP3)
|
|
||||||
assert.True(t, newBanTime.After(*banTime))
|
|
||||||
}
|
|
||||||
|
|
||||||
assert.True(t, defender.DeleteHost(testIP3))
|
|
||||||
assert.False(t, defender.DeleteHost(testIP3))
|
|
||||||
|
|
||||||
err = os.Remove(slFile)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = os.Remove(blFile)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLoadHostListFromFile(t *testing.T) {
|
|
||||||
_, err := loadHostListFromFile(".")
|
|
||||||
assert.Error(t, err)
|
|
||||||
|
|
||||||
hostsFilePath := filepath.Join(os.TempDir(), "hostfile")
|
|
||||||
content := make([]byte, 1048576*6)
|
|
||||||
_, err = rand.Read(content)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
err = os.WriteFile(hostsFilePath, content, os.ModePerm)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
_, err = loadHostListFromFile(hostsFilePath)
|
|
||||||
assert.Error(t, err)
|
|
||||||
|
|
||||||
hl := HostListFile{
|
|
||||||
IPAddresses: []string{},
|
|
||||||
CIDRNetworks: []string{},
|
|
||||||
}
|
|
||||||
|
|
||||||
asJSON, err := json.Marshal(hl)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = os.WriteFile(hostsFilePath, asJSON, os.ModePerm)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
hostList, err := loadHostListFromFile(hostsFilePath)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Nil(t, hostList)
|
|
||||||
|
|
||||||
hl.IPAddresses = append(hl.IPAddresses, "invalidip")
|
|
||||||
asJSON, err = json.Marshal(hl)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = os.WriteFile(hostsFilePath, asJSON, os.ModePerm)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
hostList, err = loadHostListFromFile(hostsFilePath)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Len(t, hostList.IPAddresses, 0)
|
|
||||||
|
|
||||||
hl.IPAddresses = nil
|
|
||||||
hl.CIDRNetworks = append(hl.CIDRNetworks, "invalid net")
|
|
||||||
|
|
||||||
asJSON, err = json.Marshal(hl)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = os.WriteFile(hostsFilePath, asJSON, os.ModePerm)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
hostList, err = loadHostListFromFile(hostsFilePath)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.NotNil(t, hostList)
|
|
||||||
assert.Len(t, hostList.IPAddresses, 0)
|
|
||||||
assert.Equal(t, 0, hostList.Ranges.Len())
|
|
||||||
|
|
||||||
if runtime.GOOS != "windows" {
|
|
||||||
err = os.Chmod(hostsFilePath, 0111)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
_, err = loadHostListFromFile(hostsFilePath)
|
|
||||||
assert.Error(t, err)
|
|
||||||
|
|
||||||
err = os.Chmod(hostsFilePath, 0644)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = os.WriteFile(hostsFilePath, []byte("non json content"), os.ModePerm)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
_, err = loadHostListFromFile(hostsFilePath)
|
|
||||||
assert.Error(t, err)
|
|
||||||
|
|
||||||
err = os.Remove(hostsFilePath)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDefenderCleanup(t *testing.T) {
|
|
||||||
d := memoryDefender{
|
|
||||||
banned: make(map[string]time.Time),
|
|
||||||
hosts: make(map[string]hostScore),
|
|
||||||
config: &DefenderConfig{
|
|
||||||
ObservationTime: 1,
|
|
||||||
EntriesSoftLimit: 2,
|
|
||||||
EntriesHardLimit: 3,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
d.banned["1.1.1.1"] = time.Now().Add(-24 * time.Hour)
|
|
||||||
d.banned["1.1.1.2"] = time.Now().Add(-24 * time.Hour)
|
|
||||||
d.banned["1.1.1.3"] = time.Now().Add(-24 * time.Hour)
|
|
||||||
d.banned["1.1.1.4"] = time.Now().Add(-24 * time.Hour)
|
|
||||||
|
|
||||||
d.cleanupBanned()
|
|
||||||
assert.Equal(t, 0, d.countBanned())
|
|
||||||
|
|
||||||
d.banned["2.2.2.2"] = time.Now().Add(2 * time.Minute)
|
|
||||||
d.banned["2.2.2.3"] = time.Now().Add(1 * time.Minute)
|
|
||||||
d.banned["2.2.2.4"] = time.Now().Add(3 * time.Minute)
|
|
||||||
d.banned["2.2.2.5"] = time.Now().Add(4 * time.Minute)
|
|
||||||
|
|
||||||
d.cleanupBanned()
|
|
||||||
assert.Equal(t, d.config.EntriesSoftLimit, d.countBanned())
|
|
||||||
assert.Nil(t, d.GetBanTime("2.2.2.3"))
|
|
||||||
|
|
||||||
d.hosts["3.3.3.3"] = hostScore{
|
|
||||||
TotalScore: 0,
|
|
||||||
Events: []hostEvent{
|
|
||||||
{
|
|
||||||
dateTime: time.Now().Add(-5 * time.Minute),
|
|
||||||
score: 1,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
dateTime: time.Now().Add(-3 * time.Minute),
|
|
||||||
score: 1,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
dateTime: time.Now(),
|
|
||||||
score: 1,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
d.hosts["3.3.3.4"] = hostScore{
|
|
||||||
TotalScore: 1,
|
|
||||||
Events: []hostEvent{
|
|
||||||
{
|
|
||||||
dateTime: time.Now().Add(-3 * time.Minute),
|
|
||||||
score: 1,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
d.hosts["3.3.3.5"] = hostScore{
|
|
||||||
TotalScore: 1,
|
|
||||||
Events: []hostEvent{
|
|
||||||
{
|
|
||||||
dateTime: time.Now().Add(-2 * time.Minute),
|
|
||||||
score: 1,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
d.hosts["3.3.3.6"] = hostScore{
|
|
||||||
TotalScore: 1,
|
|
||||||
Events: []hostEvent{
|
|
||||||
{
|
|
||||||
dateTime: time.Now().Add(-1 * time.Minute),
|
|
||||||
score: 1,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
assert.Equal(t, 1, d.GetScore("3.3.3.3"))
|
|
||||||
|
|
||||||
d.cleanupHosts()
|
|
||||||
assert.Equal(t, d.config.EntriesSoftLimit, d.countHosts())
|
|
||||||
assert.Equal(t, 0, d.GetScore("3.3.3.4"))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDefenderConfig(t *testing.T) {
|
|
||||||
c := DefenderConfig{}
|
|
||||||
err := c.validate()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
c.Enabled = true
|
|
||||||
c.Threshold = 10
|
|
||||||
c.ScoreInvalid = 10
|
|
||||||
err = c.validate()
|
|
||||||
require.Error(t, err)
|
|
||||||
|
|
||||||
c.ScoreInvalid = 2
|
|
||||||
c.ScoreLimitExceeded = 10
|
|
||||||
err = c.validate()
|
|
||||||
require.Error(t, err)
|
|
||||||
|
|
||||||
c.ScoreLimitExceeded = 2
|
|
||||||
c.ScoreValid = 10
|
|
||||||
err = c.validate()
|
|
||||||
require.Error(t, err)
|
|
||||||
|
|
||||||
c.ScoreValid = 1
|
|
||||||
c.BanTime = 0
|
|
||||||
err = c.validate()
|
|
||||||
require.Error(t, err)
|
|
||||||
|
|
||||||
c.BanTime = 30
|
|
||||||
c.BanTimeIncrement = 0
|
|
||||||
err = c.validate()
|
|
||||||
require.Error(t, err)
|
|
||||||
|
|
||||||
c.BanTimeIncrement = 50
|
|
||||||
c.ObservationTime = 0
|
|
||||||
err = c.validate()
|
|
||||||
require.Error(t, err)
|
|
||||||
|
|
||||||
c.ObservationTime = 30
|
|
||||||
err = c.validate()
|
|
||||||
require.Error(t, err)
|
|
||||||
|
|
||||||
c.EntriesSoftLimit = 10
|
|
||||||
err = c.validate()
|
|
||||||
require.Error(t, err)
|
|
||||||
|
|
||||||
c.EntriesHardLimit = 10
|
|
||||||
err = c.validate()
|
|
||||||
require.Error(t, err)
|
|
||||||
|
|
||||||
c.EntriesHardLimit = 20
|
|
||||||
err = c.validate()
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkDefenderBannedSearch(b *testing.B) {
|
|
||||||
d := getDefenderForBench()
|
|
||||||
|
|
||||||
ip, ipnet, err := net.ParseCIDR("10.8.0.0/12") // 1048574 ip addresses
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for ip := ip.Mask(ipnet.Mask); ipnet.Contains(ip); inc(ip) {
|
|
||||||
d.banned[ip.String()] = time.Now().Add(10 * time.Minute)
|
|
||||||
}
|
|
||||||
|
|
||||||
b.ResetTimer()
|
|
||||||
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
d.IsBanned("192.168.1.1")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkCleanup(b *testing.B) {
|
|
||||||
d := getDefenderForBench()
|
|
||||||
|
|
||||||
ip, ipnet, err := net.ParseCIDR("192.168.4.0/24")
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
b.ResetTimer()
|
|
||||||
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
for ip := ip.Mask(ipnet.Mask); ipnet.Contains(ip); inc(ip) {
|
|
||||||
d.AddEvent(ip.String(), HostEventLoginFailed)
|
|
||||||
if d.countHosts() > d.config.EntriesHardLimit {
|
|
||||||
panic("too many hosts")
|
|
||||||
}
|
|
||||||
if d.countBanned() > d.config.EntriesSoftLimit {
|
|
||||||
panic("too many ip banned")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkDefenderBannedSearchWithBlockList(b *testing.B) {
|
|
||||||
d := getDefenderForBench()
|
|
||||||
|
|
||||||
d.blockList = &HostList{
|
|
||||||
IPAddresses: make(map[string]bool),
|
|
||||||
Ranges: cidranger.NewPCTrieRanger(),
|
|
||||||
}
|
|
||||||
|
|
||||||
ip, ipnet, err := net.ParseCIDR("129.8.0.0/12") // 1048574 ip addresses
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for ip := ip.Mask(ipnet.Mask); ipnet.Contains(ip); inc(ip) {
|
|
||||||
d.banned[ip.String()] = time.Now().Add(10 * time.Minute)
|
|
||||||
d.blockList.IPAddresses[ip.String()] = true
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := 0; i < 255; i++ {
|
|
||||||
cidr := fmt.Sprintf("10.8.%v.1/24", i)
|
|
||||||
_, network, _ := net.ParseCIDR(cidr)
|
|
||||||
if err := d.blockList.Ranges.Insert(cidranger.NewBasicRangerEntry(*network)); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
b.ResetTimer()
|
|
||||||
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
d.IsBanned("192.168.1.1")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkHostListSearch(b *testing.B) {
|
|
||||||
hostlist := &HostList{
|
|
||||||
IPAddresses: make(map[string]bool),
|
|
||||||
Ranges: cidranger.NewPCTrieRanger(),
|
|
||||||
}
|
|
||||||
|
|
||||||
ip, ipnet, _ := net.ParseCIDR("172.16.0.0/16")
|
|
||||||
|
|
||||||
for ip := ip.Mask(ipnet.Mask); ipnet.Contains(ip); inc(ip) {
|
|
||||||
hostlist.IPAddresses[ip.String()] = true
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := 0; i < 255; i++ {
|
|
||||||
cidr := fmt.Sprintf("10.8.%v.1/24", i)
|
|
||||||
_, network, _ := net.ParseCIDR(cidr)
|
|
||||||
if err := hostlist.Ranges.Insert(cidranger.NewBasicRangerEntry(*network)); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
b.ResetTimer()
|
|
||||||
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
if hostlist.isListed("192.167.1.2") {
|
|
||||||
panic("should not be listed")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkCIDRanger(b *testing.B) {
|
|
||||||
ranger := cidranger.NewPCTrieRanger()
|
|
||||||
for i := 0; i < 255; i++ {
|
|
||||||
cidr := fmt.Sprintf("192.168.%v.1/24", i)
|
|
||||||
_, network, _ := net.ParseCIDR(cidr)
|
|
||||||
if err := ranger.Insert(cidranger.NewBasicRangerEntry(*network)); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ipToMatch := net.ParseIP("192.167.1.2")
|
|
||||||
|
|
||||||
b.ResetTimer()
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
if _, err := ranger.Contains(ipToMatch); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkNetContains(b *testing.B) {
|
|
||||||
var nets []*net.IPNet
|
|
||||||
for i := 0; i < 255; i++ {
|
|
||||||
cidr := fmt.Sprintf("192.168.%v.1/24", i)
|
|
||||||
_, network, _ := net.ParseCIDR(cidr)
|
|
||||||
nets = append(nets, network)
|
|
||||||
}
|
|
||||||
|
|
||||||
ipToMatch := net.ParseIP("192.167.1.1")
|
|
||||||
|
|
||||||
b.ResetTimer()
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
for _, n := range nets {
|
|
||||||
n.Contains(ipToMatch)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func getDefenderForBench() *memoryDefender {
|
|
||||||
config := &DefenderConfig{
|
|
||||||
Enabled: true,
|
|
||||||
BanTime: 30,
|
|
||||||
BanTimeIncrement: 50,
|
|
||||||
Threshold: 10,
|
|
||||||
ScoreInvalid: 2,
|
|
||||||
ScoreValid: 2,
|
|
||||||
ObservationTime: 30,
|
|
||||||
EntriesSoftLimit: 50,
|
|
||||||
EntriesHardLimit: 100,
|
|
||||||
}
|
|
||||||
return &memoryDefender{
|
|
||||||
config: config,
|
|
||||||
hosts: make(map[string]hostScore),
|
|
||||||
banned: make(map[string]time.Time),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func inc(ip net.IP) {
|
|
||||||
for j := len(ip) - 1; j >= 0; j-- {
|
|
||||||
ip[j]++
|
|
||||||
if ip[j] > 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,200 +0,0 @@
|
||||||
package common
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/tls"
|
|
||||||
"crypto/x509"
|
|
||||||
"crypto/x509/pkix"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/drakkan/sftpgo/logger"
|
|
||||||
"github.com/drakkan/sftpgo/utils"
|
|
||||||
)
|
|
||||||
|
|
||||||
// CertManager defines a TLS certificate manager
|
|
||||||
type CertManager struct {
|
|
||||||
certPath string
|
|
||||||
keyPath string
|
|
||||||
configDir string
|
|
||||||
logSender string
|
|
||||||
sync.RWMutex
|
|
||||||
caCertificates []string
|
|
||||||
caRevocationLists []string
|
|
||||||
cert *tls.Certificate
|
|
||||||
rootCAs *x509.CertPool
|
|
||||||
crls []*pkix.CertificateList
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reload tries to reload certificate and CRLs
|
|
||||||
func (m *CertManager) Reload() error {
|
|
||||||
errCrt := m.loadCertificate()
|
|
||||||
errCRLs := m.LoadCRLs()
|
|
||||||
|
|
||||||
if errCrt != nil {
|
|
||||||
return errCrt
|
|
||||||
}
|
|
||||||
return errCRLs
|
|
||||||
}
|
|
||||||
|
|
||||||
// LoadCertificate loads the configured x509 key pair
|
|
||||||
func (m *CertManager) loadCertificate() error {
|
|
||||||
newCert, err := tls.LoadX509KeyPair(m.certPath, m.keyPath)
|
|
||||||
if err != nil {
|
|
||||||
logger.Warn(m.logSender, "", "unable to load X509 key pair, cert file %#v key file %#v error: %v",
|
|
||||||
m.certPath, m.keyPath, err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
logger.Debug(m.logSender, "", "TLS certificate %#v successfully loaded", m.certPath)
|
|
||||||
|
|
||||||
m.Lock()
|
|
||||||
defer m.Unlock()
|
|
||||||
|
|
||||||
m.cert = &newCert
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetCertificateFunc returns the loaded certificate
|
|
||||||
func (m *CertManager) GetCertificateFunc() func(*tls.ClientHelloInfo) (*tls.Certificate, error) {
|
|
||||||
return func(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) {
|
|
||||||
m.RLock()
|
|
||||||
defer m.RUnlock()
|
|
||||||
|
|
||||||
return m.cert, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsRevoked returns true if the specified certificate has been revoked
|
|
||||||
func (m *CertManager) IsRevoked(crt *x509.Certificate, caCrt *x509.Certificate) bool {
|
|
||||||
m.RLock()
|
|
||||||
defer m.RUnlock()
|
|
||||||
|
|
||||||
if crt == nil || caCrt == nil {
|
|
||||||
logger.Warn(m.logSender, "", "unable to verify crt %v ca crt %v", crt, caCrt)
|
|
||||||
return len(m.crls) > 0
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, crl := range m.crls {
|
|
||||||
if !crl.HasExpired(time.Now()) && caCrt.CheckCRLSignature(crl) == nil {
|
|
||||||
for _, rc := range crl.TBSCertList.RevokedCertificates {
|
|
||||||
if rc.SerialNumber.Cmp(crt.SerialNumber) == 0 {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// LoadCRLs tries to load certificate revocation lists from the given paths
|
|
||||||
func (m *CertManager) LoadCRLs() error {
|
|
||||||
if len(m.caRevocationLists) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var crls []*pkix.CertificateList
|
|
||||||
|
|
||||||
for _, revocationList := range m.caRevocationLists {
|
|
||||||
if !utils.IsFileInputValid(revocationList) {
|
|
||||||
return fmt.Errorf("invalid root CA revocation list %#v", revocationList)
|
|
||||||
}
|
|
||||||
if revocationList != "" && !filepath.IsAbs(revocationList) {
|
|
||||||
revocationList = filepath.Join(m.configDir, revocationList)
|
|
||||||
}
|
|
||||||
crlBytes, err := os.ReadFile(revocationList)
|
|
||||||
if err != nil {
|
|
||||||
logger.Warn(m.logSender, "unable to read revocation list %#v", revocationList)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
crl, err := x509.ParseCRL(crlBytes)
|
|
||||||
if err != nil {
|
|
||||||
logger.Warn(m.logSender, "unable to parse revocation list %#v", revocationList)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.Debug(m.logSender, "", "CRL %#v successfully loaded", revocationList)
|
|
||||||
crls = append(crls, crl)
|
|
||||||
}
|
|
||||||
|
|
||||||
m.Lock()
|
|
||||||
defer m.Unlock()
|
|
||||||
|
|
||||||
m.crls = crls
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetRootCAs returns the set of root certificate authorities that servers
|
|
||||||
// use if required to verify a client certificate
|
|
||||||
func (m *CertManager) GetRootCAs() *x509.CertPool {
|
|
||||||
m.RLock()
|
|
||||||
defer m.RUnlock()
|
|
||||||
|
|
||||||
return m.rootCAs
|
|
||||||
}
|
|
||||||
|
|
||||||
// LoadRootCAs tries to load root CA certificate authorities from the given paths
|
|
||||||
func (m *CertManager) LoadRootCAs() error {
|
|
||||||
if len(m.caCertificates) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
rootCAs := x509.NewCertPool()
|
|
||||||
|
|
||||||
for _, rootCA := range m.caCertificates {
|
|
||||||
if !utils.IsFileInputValid(rootCA) {
|
|
||||||
return fmt.Errorf("invalid root CA certificate %#v", rootCA)
|
|
||||||
}
|
|
||||||
if rootCA != "" && !filepath.IsAbs(rootCA) {
|
|
||||||
rootCA = filepath.Join(m.configDir, rootCA)
|
|
||||||
}
|
|
||||||
crt, err := os.ReadFile(rootCA)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if rootCAs.AppendCertsFromPEM(crt) {
|
|
||||||
logger.Debug(m.logSender, "", "TLS certificate authority %#v successfully loaded", rootCA)
|
|
||||||
} else {
|
|
||||||
err := fmt.Errorf("unable to load TLS certificate authority %#v", rootCA)
|
|
||||||
logger.Warn(m.logSender, "", "%v", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
m.Lock()
|
|
||||||
defer m.Unlock()
|
|
||||||
|
|
||||||
m.rootCAs = rootCAs
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetCACertificates sets the root CA authorities file paths.
|
|
||||||
// This should not be changed at runtime
|
|
||||||
func (m *CertManager) SetCACertificates(caCertificates []string) {
|
|
||||||
m.caCertificates = caCertificates
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetCARevocationLists sets the CA revocation lists file paths.
|
|
||||||
// This should not be changed at runtime
|
|
||||||
func (m *CertManager) SetCARevocationLists(caRevocationLists []string) {
|
|
||||||
m.caRevocationLists = caRevocationLists
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewCertManager creates a new certificate manager
|
|
||||||
func NewCertManager(certificateFile, certificateKeyFile, configDir, logSender string) (*CertManager, error) {
|
|
||||||
manager := &CertManager{
|
|
||||||
cert: nil,
|
|
||||||
certPath: certificateFile,
|
|
||||||
keyPath: certificateKeyFile,
|
|
||||||
configDir: configDir,
|
|
||||||
logSender: logSender,
|
|
||||||
}
|
|
||||||
err := manager.loadCertificate()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return manager, nil
|
|
||||||
}
|
|
|
@ -1,386 +0,0 @@
|
||||||
package common
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/tls"
|
|
||||||
"crypto/x509"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
serverCert = `-----BEGIN CERTIFICATE-----
|
|
||||||
MIIEIDCCAgigAwIBAgIRAPOR9zTkX35vSdeyGpF8Rn8wDQYJKoZIhvcNAQELBQAw
|
|
||||||
EzERMA8GA1UEAxMIQ2VydEF1dGgwHhcNMjEwMTAyMjEyMjU1WhcNMjIwNzAyMjEz
|
|
||||||
MDUxWjARMQ8wDQYDVQQDEwZzZXJ2ZXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw
|
|
||||||
ggEKAoIBAQCte0PJhCTNqTiqdwk/s4JanKIMKUVWr2u94a+JYy5gJ9xYXrQ49SeN
|
|
||||||
m+fwhTAOqctP5zNVkFqxlBytJZg3pqCKqRoOOl1qVgL3F3o7JdhZGi67aw8QMLPx
|
|
||||||
tLPpYWnnrlUQoXRJdTlqkDqO8lOZl9HO5oZeidPZ7r5BVD6ZiujAC6Zg0jIc+EPt
|
|
||||||
qhaUJ1CStoAeRf1rNWKmDsLv5hEaDWoaHF9sNVzDQg6atZ3ici00qQj+uvEZo8mL
|
|
||||||
k6egg3rqsTv9ml2qlrRgFumt99J60hTt3tuQaAruHY80O9nGy3SCXC11daa7gszH
|
|
||||||
ElCRvhUVoOxRtB54YBEtJ0gEpFnTO9J1AgMBAAGjcTBvMA4GA1UdDwEB/wQEAwID
|
|
||||||
uDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwHQYDVR0OBBYEFAgDXwPV
|
|
||||||
nhztNz+H20iNWgoIx8adMB8GA1UdIwQYMBaAFO1yCNAGr/zQTJIi8lw3w5OiuBvM
|
|
||||||
MA0GCSqGSIb3DQEBCwUAA4ICAQCR5kgIb4vAtrtsXD24n6RtU1yIXHPLNmDStVrH
|
|
||||||
uaMYNnHlLhRlQFCjHhjWvZ89FQC7FeNOITc3FpibJySyw7JfnsyEOGxEbcAS4uLB
|
|
||||||
2pdAiJPqdQtxIVcyi5vu53m1T5tm0sy8sBrGxU466aDQ8VGqjcjfTwNIyoFMd3p/
|
|
||||||
ezFRvg2BudwU9hqApgfHfLi4WCuI3hLO2tbmgDinyH0HI0YYNNweGpiBYbTLF4Tx
|
|
||||||
H6vHgD9USMZeu4+HX0IIsBiHQD7TTIe5ceREkPcNPd5qTpIvT3zKQ/KwwT90/zjP
|
|
||||||
aWmz6pLxBfjRu7MY/bDfxfRUqsrLYJCVBoaDVRWR9rhiPIFkC5JzoWD/4hdj2iis
|
|
||||||
N0+OOaJ77L+/ArFprE+7Fu3cSdYlfiNjV8R5kE29cAxKLI92CjAiTKrEuxKcQPKO
|
|
||||||
+taWNKIYYjEDZwVnzlkTIl007X0RBuzu9gh4w5NwJdt8ZOJAp0JV0Cq+UvG+FC/v
|
|
||||||
lYk82E6j1HKhf4CXmrjsrD1Fyu41mpVFOpa2ATiFGvms913MkXuyO8g99IllmDw1
|
|
||||||
D7/PN4Qe9N6Zm7yoKZM0IUw2v+SUMIdOAZ7dptO9ZjtYOfiAIYN3jM8R4JYgPiuD
|
|
||||||
DGSM9LJBJxCxI/DiO1y1Z3n9TcdDQYut8Gqdi/aYXw2YeqyHXosX5Od3vcK/O5zC
|
|
||||||
pOJTYQ==
|
|
||||||
-----END CERTIFICATE-----`
|
|
||||||
serverKey = `-----BEGIN RSA PRIVATE KEY-----
|
|
||||||
MIIEowIBAAKCAQEArXtDyYQkzak4qncJP7OCWpyiDClFVq9rveGviWMuYCfcWF60
|
|
||||||
OPUnjZvn8IUwDqnLT+czVZBasZQcrSWYN6agiqkaDjpdalYC9xd6OyXYWRouu2sP
|
|
||||||
EDCz8bSz6WFp565VEKF0SXU5apA6jvJTmZfRzuaGXonT2e6+QVQ+mYrowAumYNIy
|
|
||||||
HPhD7aoWlCdQkraAHkX9azVipg7C7+YRGg1qGhxfbDVcw0IOmrWd4nItNKkI/rrx
|
|
||||||
GaPJi5OnoIN66rE7/Zpdqpa0YBbprffSetIU7d7bkGgK7h2PNDvZxst0glwtdXWm
|
|
||||||
u4LMxxJQkb4VFaDsUbQeeGARLSdIBKRZ0zvSdQIDAQABAoIBAF4sI8goq7HYwqIG
|
|
||||||
rEagM4rsrCrd3H4KC/qvoJJ7/JjGCp8OCddBfY8pquat5kCPe4aMgxlXm2P6evaj
|
|
||||||
CdZr5Ypf8Xz3we4PctyfKgMhsCfuRqAGpc6sIYJ8DY4LC2pxAExe2LlnoRtv39np
|
|
||||||
QeiGuaYPDbIUL6SGLVFZYgIHngFhbDYfL83q3Cb/PnivUGFvUVQCfRBUKO2d8KYq
|
|
||||||
TrVB5BWD2GrHor24ApQmci1OOqfbkIevkK6bk8HUfSZiZGI9LUQiPHMxi5k2x43J
|
|
||||||
nIwhZnW2N28dorKnWHg2vh7viGvinVRZ3MEyX150oCw/L6SYM4fqR6t2ZSBgNQHT
|
|
||||||
ZNoDtwECgYEA4lXMgtYqKuSlZ3TKfxAj03tJ/gbRdKcUCEGXEbdpY70tTu6KESZS
|
|
||||||
etid4Ut/sWEoPTJsgYiGbgJl571t1O8oR1UZYgh9hBGHLV6UEIt9n2PbExhE2vL3
|
|
||||||
SB7+LfO+tMvM4qKUBN+uy4GpU0NiyEEecw4x4S7MRSyHFRIDR7B6RV0CgYEAxDgS
|
|
||||||
mDaNUfSdfB5mXekLUJAwqeKRdL9RjXYaHbnoZ5kIwQ73tFikRwyTsLQwMhjE1l3z
|
|
||||||
MItTzIAyTf/BlK3dsp6bHTaT7hXIjHBsuKATN5qAuUpzTrg9+QaCawVSlQgNeF3a
|
|
||||||
iyfD4dVp66Bzn3gO757TWqmroBZ2e1owbAQvF/kCgYAKT/Jze6KMNcK7hfy78VZQ
|
|
||||||
imuCoXjlob8t6R8i9YJdwv7Pe9rakS5s3nXDEBePU2fr8eIzvK6zUHSoLF9WtlbV
|
|
||||||
eTEg4FYnsEzCam7AmjptCrWulwp8F1ng9ViLa3Gi9y4snU+1MSPbrdqzKnzTtvPW
|
|
||||||
Ni1bnzA7bp3w/dMcbxQDGQKBgB50hY5SiUS7LuZg4YqZ7UOn3aXAoMr6FvJZ7lvG
|
|
||||||
yyepPQ6aACBh0b2lWhcHIKPl7EdJdcGHHo6TJzusAqPNCKf8rh6upe9COkpx+K3/
|
|
||||||
SnxK4sffol4JgrTwKbXqsZKoGU8hYhZPKbwXn8UOtmN+AvN2N1/PDfBfDCzBJtrd
|
|
||||||
G2IhAoGBAN19976xAMDjKb2+wd/mQYA2fR7E8lodxdX3LDnblYmndTKY67nVo94M
|
|
||||||
FHPKZSN590HkFJ+wmChnOrqjtosY+N25CKMS7939EUIDrq+B+bYTWM/gcwdLXNUk
|
|
||||||
Rygw/078Z3ZDJamXmyez5WpeLFrrbmI8sLnBBmSjQvMb6vCEtQ2Z
|
|
||||||
-----END RSA PRIVATE KEY-----`
|
|
||||||
caCRT = `-----BEGIN CERTIFICATE-----
|
|
||||||
MIIE5jCCAs6gAwIBAgIBATANBgkqhkiG9w0BAQsFADATMREwDwYDVQQDEwhDZXJ0
|
|
||||||
QXV0aDAeFw0yMTAxMDIyMTIwNTVaFw0yMjA3MDIyMTMwNTJaMBMxETAPBgNVBAMT
|
|
||||||
CENlcnRBdXRoMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA4Tiho5xW
|
|
||||||
AC15JRkMwfp3/TJwI2As7MY5dele5cmdr5bHAE+sRKqC+Ti88OJWCV5saoyax/1S
|
|
||||||
CjxJlQMZMl169P1QYJskKjdG2sdv6RLWLMgwSNRRjxp/Bw9dHdiEb9MjLgu28Jro
|
|
||||||
9peQkHcRHeMf5hM9WvlIJGrdzbC4hUehmqggcqgARainBkYjf0SwuWxHeu4nMqkp
|
|
||||||
Ak5tcSTLCjHfEFHZ9Te0TIPG5YkWocQKyeLgu4lvuU+DD2W2lym+YVUtRMGs1Env
|
|
||||||
k7p+N0DcGU26qfzZ2sF5ZXkqm7dBsGQB9pIxwc2Q8T1dCIyP9OQCKVILdc5aVFf1
|
|
||||||
cryQFHYzYNNZXFlIBims5VV5Mgfp8ESHQSue+v6n6ykecLEyKt1F1Y/MWY/nWUSI
|
|
||||||
8zdq83jdBAZVjo9MSthxVn57/06s/hQca65IpcTZV2gX0a+eRlAVqaRbAhL3LaZe
|
|
||||||
bYsW3WHKoUOftwemuep3nL51TzlXZVL7Oz/ClGaEOsnGG9KFO6jh+W768qC0zLQI
|
|
||||||
CdE7v2Zex98sZteHCg9fGJHIaYoF0aJG5P3WI5oZf2fy7UIYN9ADLFZiorCXAZEh
|
|
||||||
CSU6mDoRViZ4RGR9GZxbDZ9KYn7O8M/KCR72bkQg73TlMsk1zSXEw0MKLUjtsw6c
|
|
||||||
rZ0Jt8t3sRatHO3JrYHALMt9vZfyNCZp0IsCAwEAAaNFMEMwDgYDVR0PAQH/BAQD
|
|
||||||
AgEGMBIGA1UdEwEB/wQIMAYBAf8CAQAwHQYDVR0OBBYEFO1yCNAGr/zQTJIi8lw3
|
|
||||||
w5OiuBvMMA0GCSqGSIb3DQEBCwUAA4ICAQA6gCNuM7r8mnx674dm31GxBjQy5ZwB
|
|
||||||
7CxDzYEvL/oiZ3Tv3HlPfN2LAAsJUfGnghh9DOytenL2CTZWjl/emP5eijzmlP+9
|
|
||||||
zva5I6CIMCf/eDDVsRdO244t0o4uG7+At0IgSDM3bpVaVb4RHZNjEziYChsEYY8d
|
|
||||||
HK6iwuRSvFniV6yhR/Vj1Ymi9yZ5xclqseLXiQnUB0PkfIk23+7s42cXB16653fH
|
|
||||||
O/FsPyKBLiKJArizLYQc12aP3QOrYoYD9+fAzIIzew7A5C0aanZCGzkuFpO6TRlD
|
|
||||||
Tb7ry9Gf0DfPpCgxraH8tOcmnqp/ka3hjqo/SRnnTk0IFrmmLdarJvjD46rKwBo4
|
|
||||||
MjyAIR1mQ5j8GTlSFBmSgETOQ/EYvO3FPLmra1Fh7L+DvaVzTpqI9fG3TuyyY+Ri
|
|
||||||
Fby4ycTOGSZOe5Fh8lqkX5Y47mCUJ3zHzOA1vUJy2eTlMRGpu47Eb1++Vm6EzPUP
|
|
||||||
2EF5aD+zwcssh+atZvQbwxpgVqVcyLt91RSkKkmZQslh0rnlTb68yxvUnD3zw7So
|
|
||||||
o6TAf9UvwVMEvdLT9NnFd6hwi2jcNte/h538GJwXeBb8EkfpqLKpTKyicnOdkamZ
|
|
||||||
7E9zY8SHNRYMwB9coQ/W8NvufbCgkvOoLyMXk5edbXofXl3PhNGOlraWbghBnzf5
|
|
||||||
r3rwjFsQOoZotA==
|
|
||||||
-----END CERTIFICATE-----`
|
|
||||||
caKey = `-----BEGIN RSA PRIVATE KEY-----
|
|
||||||
MIIJKQIBAAKCAgEA4Tiho5xWAC15JRkMwfp3/TJwI2As7MY5dele5cmdr5bHAE+s
|
|
||||||
RKqC+Ti88OJWCV5saoyax/1SCjxJlQMZMl169P1QYJskKjdG2sdv6RLWLMgwSNRR
|
|
||||||
jxp/Bw9dHdiEb9MjLgu28Jro9peQkHcRHeMf5hM9WvlIJGrdzbC4hUehmqggcqgA
|
|
||||||
RainBkYjf0SwuWxHeu4nMqkpAk5tcSTLCjHfEFHZ9Te0TIPG5YkWocQKyeLgu4lv
|
|
||||||
uU+DD2W2lym+YVUtRMGs1Envk7p+N0DcGU26qfzZ2sF5ZXkqm7dBsGQB9pIxwc2Q
|
|
||||||
8T1dCIyP9OQCKVILdc5aVFf1cryQFHYzYNNZXFlIBims5VV5Mgfp8ESHQSue+v6n
|
|
||||||
6ykecLEyKt1F1Y/MWY/nWUSI8zdq83jdBAZVjo9MSthxVn57/06s/hQca65IpcTZ
|
|
||||||
V2gX0a+eRlAVqaRbAhL3LaZebYsW3WHKoUOftwemuep3nL51TzlXZVL7Oz/ClGaE
|
|
||||||
OsnGG9KFO6jh+W768qC0zLQICdE7v2Zex98sZteHCg9fGJHIaYoF0aJG5P3WI5oZ
|
|
||||||
f2fy7UIYN9ADLFZiorCXAZEhCSU6mDoRViZ4RGR9GZxbDZ9KYn7O8M/KCR72bkQg
|
|
||||||
73TlMsk1zSXEw0MKLUjtsw6crZ0Jt8t3sRatHO3JrYHALMt9vZfyNCZp0IsCAwEA
|
|
||||||
AQKCAgAV+ElERYbaI5VyufvVnFJCH75ypPoc6sVGLEq2jbFVJJcq/5qlZCC8oP1F
|
|
||||||
Xj7YUR6wUiDzK1Hqb7EZ2SCHGjlZVrCVi+y+NYAy7UuMZ+r+mVSkdhmypPoJPUVv
|
|
||||||
GOTqZ6VB46Cn3eSl0WknvoWr7bD555yPmEuiSc5zNy74yWEJTidEKAFGyknowcTK
|
|
||||||
sG+w1tAuPLcUKQ44DGB+rgEkcHL7C5EAa7upzx0C3RmZFB+dTAVyJdkBMbFuOhTS
|
|
||||||
sB7DLeTplR7/4mp9da7EQw51ZXC1DlZOEZt++4/desXsqATNAbva1OuzrLG7mMKe
|
|
||||||
N/PCBh/aERQcsCvgUmaXqGQgqN1Jhw8kbXnjZnVd9iE7TAh7ki3VqNy1OMgTwOex
|
|
||||||
bBYWaCqHuDYIxCjeW0qLJcn0cKQ13FVYrxgInf4Jp82SQht5b/zLL3IRZEyKcLJF
|
|
||||||
kL6g1wlmTUTUX0z8eZzlM0ZCrqtExjgElMO/rV971nyNV5WU8Og3NmE8/slqMrmJ
|
|
||||||
DlrQr9q0WJsDKj1IMe46EUM6ix7bbxC5NIfJ96dgdxZDn6ghjca6iZYqqUACvmUj
|
|
||||||
cq08s3R4Ouw9/87kn11wwGBx2yDueCwrjKEGc0RKjweGbwu0nBxOrkJ8JXz6bAv7
|
|
||||||
1OKfYaX3afI9B8x4uaiuRs38oBQlg9uAYFfl4HNBPuQikGLmsQKCAQEA8VjFOsaz
|
|
||||||
y6NMZzKXi7WZ48uu3ed5x3Kf6RyDr1WvQ1jkBMv9b6b8Gp1CRnPqviRBto9L8QAg
|
|
||||||
bCXZTqnXzn//brskmW8IZgqjAlf89AWa53piucu9/hgidrHRZobs5gTqev28uJdc
|
|
||||||
zcuw1g8c3nCpY9WeTjHODzX5NXYRLFpkazLfYa6c8Q9jZR4KKrpdM+66fxL0JlOd
|
|
||||||
7dN0oQtEqEAugsd3cwkZgvWhY4oM7FGErrZoDLy273ZdJzi/vU+dThyVzfD8Ab8u
|
|
||||||
VxxuobVMT/S608zbe+uaiUdov5s96OkCl87403UNKJBH+6LNb3rjBBLE9NPN5ET9
|
|
||||||
JLQMrYd+zj8jQwKCAQEA7uU5I9MOufo9bIgJqjY4Ie1+Ex9DZEMUYFAvGNCJCVcS
|
|
||||||
mwOdGF8AWzIavTLACmEDJO7t/OrBdoo4L7IEsCNjgA3WiIwIMiWUVqveAGUMEXr6
|
|
||||||
TRI5EolV6FTqqIP6AS+BAeBq7G1ELgsTrWNHh11rW3+3kBMuOCn77PUQ8WHwcq/r
|
|
||||||
teZcZn4Ewcr6P7cBODgVvnBPhe/J8xHS0HFVCeS1CvaiNYgees5yA80Apo9IPjDJ
|
|
||||||
YWawLjmH5wUBI5yDFVp067wjqJnoKPSoKwWkZXqUk+zgFXx5KT0gh/c5yh1frASp
|
|
||||||
q6oaYnHEVC5qj2SpT1GFLonTcrQUXiSkiUudvNu1GQKCAQEAmko+5GFtRe0ihgLQ
|
|
||||||
4S76r6diJli6AKil1Fg3U1r6zZpBQ1PJtJxTJQyN9w5Z7q6tF/GqAesrzxevQdvQ
|
|
||||||
rCImAPtA3ZofC2UXawMnIjWHHx6diNvYnV1+gtUQ4nO1dSOFZ5VZFcUmPiZO6boF
|
|
||||||
oaryj3FcX+71JcJCjEvrlKhA9Es0hXUkvfMxfs5if4he1zlyHpTWYr4oA4egUugq
|
|
||||||
P0mwskikc3VIyvEO+NyjgFxo72yLPkFSzemkidN8uKDyFqKtnlfGM7OuA2CY1WZa
|
|
||||||
3+67lXWshx9KzyJIs92iCYkU8EoPxtdYzyrV6efdX7x27v60zTOut5TnJJS6WiF6
|
|
||||||
Do5MkwKCAQAxoR9IyP0DN/BwzqYrXU42Bi+t603F04W1KJNQNWpyrUspNwv41yus
|
|
||||||
xnD1o0hwH41Wq+h3JZIBfV+E0RfWO9Pc84MBJQ5C1LnHc7cQH+3s575+Km3+4tcd
|
|
||||||
CB8j2R8kBeloKWYtLdn/Mr/ownpGreqyvIq2/LUaZ+Z1aMgXTYB1YwS16mCBzmZQ
|
|
||||||
mEl62RsAwe4KfSyYJ6OtwqMoOJMxFfliiLBULK4gVykqjvk2oQeiG+KKQJoTUFJi
|
|
||||||
dRCyhD5bPkqR+qjxyt+HOqSBI4/uoROi05AOBqjpH1DVzk+MJKQOiX1yM0l98CKY
|
|
||||||
Vng+x+vAla/0Zh+ucajVkgk4mKPxazdpAoIBAQC17vWk4KYJpF2RC3pKPcQ0PdiX
|
|
||||||
bN35YNlvyhkYlSfDNdyH3aDrGiycUyW2mMXUgEDFsLRxHMTL+zPC6efqO6sTAJDY
|
|
||||||
cBptsW4drW/qo8NTx3dNOisLkW+mGGJOR/w157hREFr29ymCVMYu/Z7fVWIeSpCq
|
|
||||||
p3u8YX8WTljrxwSczlGjvpM7uJx3SfYRM4TUoy+8wU8bK74LywLa5f60bQY6Dye0
|
|
||||||
Gqd9O6OoPfgcQlwjC5MiAofeqwPJvU0hQOPoehZyNLAmOCWXTYWaTP7lxO1r6+NE
|
|
||||||
M3hGYqW3W8Ixua71OskCypBZg/HVlIP/lzjRzdx+VOB2hbWVth2Iup/Z1egW
|
|
||||||
-----END RSA PRIVATE KEY-----`
|
|
||||||
caCRL = `-----BEGIN X509 CRL-----
|
|
||||||
MIICpzCBkAIBATANBgkqhkiG9w0BAQsFADATMREwDwYDVQQDEwhDZXJ0QXV0aBcN
|
|
||||||
MjEwMTAyMjEzNDA1WhcNMjMwMTAyMjEzNDA1WjAkMCICEQC+l04DbHWMyC3fG09k
|
|
||||||
VXf+Fw0yMTAxMDIyMTM0MDVaoCMwITAfBgNVHSMEGDAWgBTtcgjQBq/80EySIvJc
|
|
||||||
N8OTorgbzDANBgkqhkiG9w0BAQsFAAOCAgEAEJ7z+uNc8sqtxlOhSdTGDzX/xput
|
|
||||||
E857kFQkSlMnU2whQ8c+XpYrBLA5vIZJNSSwohTpM4+zVBX/bJpmu3wqqaArRO9/
|
|
||||||
YcW5mQk9Anvb4WjQW1cHmtNapMTzoC9AiYt/OWPfy+P6JCgCr4Hy6LgQyIRL6bM9
|
|
||||||
VYTalolOm1qa4Y5cIeT7iHq/91mfaqo8/6MYRjLl8DOTROpmw8OS9bCXkzGKdCat
|
|
||||||
AbAzwkQUSauyoCQ10rpX+Y64w9ng3g4Dr20aCqPf5osaqplEJ2HTK8ljDTidlslv
|
|
||||||
9anQj8ax3Su89vI8+hK+YbfVQwrThabgdSjQsn+veyx8GlP8WwHLAQ379KjZjWg+
|
|
||||||
OlOSwBeU1vTdP0QcB8X5C2gVujAyuQekbaV86xzIBOj7vZdfHZ6ee30TZ2FKiMyg
|
|
||||||
7/N2OqW0w77ChsjB4MSHJCfuTgIeg62GzuZXLM+Q2Z9LBdtm4Byg+sm/P52adOEg
|
|
||||||
gVb2Zf4KSvsAmA0PIBlu449/QXUFcMxzLFy7mwTeZj2B4Ln0Hm0szV9f9R8MwMtB
|
|
||||||
SyLYxVH+mgqaR6Jkk22Q/yYyLPaELfafX5gp/AIXG8n0zxfVaTvK3auSgb1Q6ZLS
|
|
||||||
5QH9dSIsmZHlPq7GoSXmKpMdjUL8eaky/IMteioyXgsBiATzl5L2dsw6MTX3MDF0
|
|
||||||
QbDK+MzhmbKfDxs=
|
|
||||||
-----END X509 CRL-----`
|
|
||||||
client1Crt = `-----BEGIN CERTIFICATE-----
|
|
||||||
MIIEITCCAgmgAwIBAgIRAIppZHoj1hM80D7WzTEKLuAwDQYJKoZIhvcNAQELBQAw
|
|
||||||
EzERMA8GA1UEAxMIQ2VydEF1dGgwHhcNMjEwMTAyMjEyMzEwWhcNMjIwNzAyMjEz
|
|
||||||
MDUxWjASMRAwDgYDVQQDEwdjbGllbnQxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
|
|
||||||
MIIBCgKCAQEAoKbYY9MdF2kF/nhBESIiZTdVYtA8XL9xrIZyDj9EnCiTxHiVbJtH
|
|
||||||
XVwszqSl5TRrotPmnmAQcX3r8OCk+z+RQZ0QQj257P3kG6q4rNnOcWCS5xEd20jP
|
|
||||||
yhQ3m+hMGfZsotNTQze1ochuQgLUN6IPyPxZkH22ia3jX4iu1eo/QxeLYHj1UHw4
|
|
||||||
3Cii9yE+j5kPUC21xmnrGKdUrB55NYLXHx6yTIqYR5znSOVB8oJi18/hwdZmH859
|
|
||||||
DHhm0Hx1HrS+jbjI3+CMorZJ3WUyNf+CkiVLD3xYutPbxzEpwiqkG/XYzLH0habT
|
|
||||||
cDcILo18n+o3jvem2KWBrDhyairjIDscwQIDAQABo3EwbzAOBgNVHQ8BAf8EBAMC
|
|
||||||
A7gwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQWBBSJ5GIv
|
|
||||||
zIrE4ZSQt2+CGblKTDswizAfBgNVHSMEGDAWgBTtcgjQBq/80EySIvJcN8OTorgb
|
|
||||||
zDANBgkqhkiG9w0BAQsFAAOCAgEALh4f5GhvNYNou0Ab04iQBbLEdOu2RlbK1B5n
|
|
||||||
K9P/umYenBHMY/z6HT3+6tpcHsDuqE8UVdq3f3Gh4S2Gu9m8PRitT+cJ3gdo9Plm
|
|
||||||
3rD4ufn/s6rGg3ppydXcedm17492tbccUDWOBZw3IO/ASVq13WPgT0/Kev7cPq0k
|
|
||||||
sSdSNhVeXqx8Myc2/d+8GYyzbul2Kpfa7h9i24sK49E9ftnSmsIvngONo08eT1T0
|
|
||||||
3wAOyK2981LIsHaAWcneShKFLDB6LeXIT9oitOYhiykhFlBZ4M1GNlSNfhQ8IIQP
|
|
||||||
xbqMNXCLkW4/BtLhGEEcg0QVso6Kudl9rzgTfQknrdF7pHp6rS46wYUjoSyIY6dl
|
|
||||||
oLmnoAVJX36J3QPWelePI9e07X2wrTfiZWewwgw3KNRWjd6/zfPLe7GoqXnK1S2z
|
|
||||||
PT8qMfCaTwKTtUkzXuTFvQ8bAo2My/mS8FOcpkt2oQWeOsADHAUX7fz5BCoa2DL3
|
|
||||||
k/7Mh4gVT+JYZEoTwCFuYHgMWFWe98naqHi9lB4yR981p1QgXgxO7qBeipagKY1F
|
|
||||||
LlH1iwXUqZ3MZnkNA+4e1Fglsw3sa/rC+L98HnznJ/YbTfQbCP6aQ1qcOymrjMud
|
|
||||||
7MrFwqZjtd/SK4Qx1VpK6jGEAtPgWBTUS3p9ayg6lqjMBjsmySWfvRsDQbq6P5Ct
|
|
||||||
O/e3EH8=
|
|
||||||
-----END CERTIFICATE-----`
|
|
||||||
client1Key = `-----BEGIN RSA PRIVATE KEY-----
|
|
||||||
MIIEpAIBAAKCAQEAoKbYY9MdF2kF/nhBESIiZTdVYtA8XL9xrIZyDj9EnCiTxHiV
|
|
||||||
bJtHXVwszqSl5TRrotPmnmAQcX3r8OCk+z+RQZ0QQj257P3kG6q4rNnOcWCS5xEd
|
|
||||||
20jPyhQ3m+hMGfZsotNTQze1ochuQgLUN6IPyPxZkH22ia3jX4iu1eo/QxeLYHj1
|
|
||||||
UHw43Cii9yE+j5kPUC21xmnrGKdUrB55NYLXHx6yTIqYR5znSOVB8oJi18/hwdZm
|
|
||||||
H859DHhm0Hx1HrS+jbjI3+CMorZJ3WUyNf+CkiVLD3xYutPbxzEpwiqkG/XYzLH0
|
|
||||||
habTcDcILo18n+o3jvem2KWBrDhyairjIDscwQIDAQABAoIBAEBSjVFqtbsp0byR
|
|
||||||
aXvyrtLX1Ng7h++at2jca85Ihq//jyqbHTje8zPuNAKI6eNbmb0YGr5OuEa4pD9N
|
|
||||||
ssDmMsKSoG/lRwwcm7h4InkSvBWpFShvMgUaohfHAHzsBYxfnh+TfULsi0y7c2n6
|
|
||||||
t/2OZcOTRkkUDIITnXYiw93ibHHv2Mv2bBDu35kGrcK+c2dN5IL5ZjTjMRpbJTe2
|
|
||||||
44RBJbdTxHBVSgoGBnugF+s2aEma6Ehsj70oyfoVpM6Aed5kGge0A5zA1JO7WCn9
|
|
||||||
Ay/DzlULRXHjJIoRWd2NKvx5n3FNppUc9vJh2plRHalRooZ2+MjSf8HmXlvG2Hpb
|
|
||||||
ScvmWgECgYEA1G+A/2KnxWsr/7uWIJ7ClcGCiNLdk17Pv3DZ3G4qUsU2ITftfIbb
|
|
||||||
tU0Q/b19na1IY8Pjy9ptP7t74/hF5kky97cf1FA8F+nMj/k4+wO8QDI8OJfzVzh9
|
|
||||||
PwielA5vbE+xmvis5Hdp8/od1Yrc/rPSy2TKtPFhvsqXjqoUmOAjDP8CgYEAwZjH
|
|
||||||
9dt1sc2lx/rMxihlWEzQ3JPswKW9/LJAmbRBoSWF9FGNjbX7uhWtXRKJkzb8ZAwa
|
|
||||||
88azluNo2oftbDD/+jw8b2cDgaJHlLAkSD4O1D1RthW7/LKD15qZ/oFsRb13NV85
|
|
||||||
ZNKtwslXGbfVNyGKUVFm7fVA8vBAOUey+LKDFj8CgYEAg8WWstOzVdYguMTXXuyb
|
|
||||||
ruEV42FJaDyLiSirOvxq7GTAKuLSQUg1yMRBIeQEo2X1XU0JZE3dLodRVhuO4EXP
|
|
||||||
g7Dn4X7Th9HSvgvNuIacowWGLWSz4Qp9RjhGhXhezUSx2nseY6le46PmFavJYYSR
|
|
||||||
4PBofMyt4PcyA6Cknh+KHmkCgYEAnTriG7ETE0a7v4DXUpB4TpCEiMCy5Xs2o8Z5
|
|
||||||
ZNva+W+qLVUWq+MDAIyechqeFSvxK6gRM69LJ96lx+XhU58wJiFJzAhT9rK/g+jS
|
|
||||||
bsHH9WOfu0xHkuHA5hgvvV2Le9B2wqgFyva4HJy82qxMxCu/VG/SMqyfBS9OWbb7
|
|
||||||
ibQhdq0CgYAl53LUWZsFSZIth1vux2LVOsI8C3X1oiXDGpnrdlQ+K7z57hq5EsRq
|
|
||||||
GC+INxwXbvKNqp5h0z2MvmKYPDlGVTgw8f8JjM7TkN17ERLcydhdRrMONUryZpo8
|
|
||||||
1xTob+8blyJgfxZUIAKbMbMbIiU0WAF0rfD/eJJwS4htOW/Hfv4TGA==
|
|
||||||
-----END RSA PRIVATE KEY-----`
|
|
||||||
// client 2 crt is revoked
|
|
||||||
client2Crt = `-----BEGIN CERTIFICATE-----
|
|
||||||
MIIEITCCAgmgAwIBAgIRAL6XTgNsdYzILd8bT2RVd/4wDQYJKoZIhvcNAQELBQAw
|
|
||||||
EzERMA8GA1UEAxMIQ2VydEF1dGgwHhcNMjEwMTAyMjEyMzIwWhcNMjIwNzAyMjEz
|
|
||||||
MDUxWjASMRAwDgYDVQQDEwdjbGllbnQyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
|
|
||||||
MIIBCgKCAQEA6xjW5KQR3/OFQtV5M75WINqQ4AzXSu6DhSz/yumaaQZP/UxY+6hi
|
|
||||||
jcrFzGo9MMie/Sza8DhkXOFAl2BelUubrOeB2cl+/Gr8OCyRi2Gv6j3zCsuN/4jQ
|
|
||||||
tNaoez/IbkDvI3l/ZpzBtnuNY2RiemGgHuORXHRVf3qVlsw+npBIRW5rM2HkO/xG
|
|
||||||
oZjeBErWVu390Lyn+Gvk2TqQDnkutWnxUC60/zPlHhXZ4BwaFAekbSnjsSDB1YFM
|
|
||||||
s8HwW4oBryoxdj3/+/qLrBHt75IdLw3T7/V1UDJQM3EvSQOr12w4egpldhtsC871
|
|
||||||
nnBQZeY6qA5feffIwwg/6lJm70o6S6OX6wIDAQABo3EwbzAOBgNVHQ8BAf8EBAMC
|
|
||||||
A7gwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQWBBTB84v5
|
|
||||||
t9HqhLhMODbn6oYkEQt3KzAfBgNVHSMEGDAWgBTtcgjQBq/80EySIvJcN8OTorgb
|
|
||||||
zDANBgkqhkiG9w0BAQsFAAOCAgEALGtBCve5k8tToL3oLuXp/oSik6ovIB/zq4I/
|
|
||||||
4zNMYPU31+ZWz6aahysgx1JL1yqTa3Qm8o2tu52MbnV10dM7CIw7c/cYa+c+OPcG
|
|
||||||
5LF97kp13X+r2axy+CmwM86b4ILaDGs2Qyai6VB6k7oFUve+av5o7aUrNFpqGCJz
|
|
||||||
HWdtHZSVA3JMATzy0TfWanwkzreqfdw7qH0yZ9bDURlBKAVWrqnCstva9jRuv+AI
|
|
||||||
eqxr/4Ro986TFjJdoAP3Vr16CPg7/B6GA/KmsBWJrpeJdPWq4i2gpLKvYZoy89qD
|
|
||||||
mUZf34RbzcCtV4NvV1DadGnt4us0nvLrvS5rL2+2uWD09kZYq9RbLkvgzF/cY0fz
|
|
||||||
i7I1bi5XQ+alWe0uAk5ZZL/D+GTRYUX1AWwCqwJxmHrMxcskMyO9pXvLyuSWRDLo
|
|
||||||
YNBrbX9nLcfJzVCp+X+9sntTHjs4l6Cw+fLepJIgtgqdCHtbhTiv68vSM6cgb4br
|
|
||||||
6n2xrXRKuioiWFOrTSRr+oalZh8dGJ/xvwY8IbWknZAvml9mf1VvfE7Ma5P777QM
|
|
||||||
fsbYVTq0Y3R/5hIWsC3HA5z6MIM8L1oRe/YyhP3CTmrCHkVKyDOosGXpGz+JVcyo
|
|
||||||
cfYkY5A3yFKB2HaCwZSfwFmRhxkrYWGEbHv3Cd9YkZs1J3hNhGFZyVMC9Uh0S85a
|
|
||||||
6zdDidU=
|
|
||||||
-----END CERTIFICATE-----`
|
|
||||||
client2Key = `-----BEGIN RSA PRIVATE KEY-----
|
|
||||||
MIIEpAIBAAKCAQEA6xjW5KQR3/OFQtV5M75WINqQ4AzXSu6DhSz/yumaaQZP/UxY
|
|
||||||
+6hijcrFzGo9MMie/Sza8DhkXOFAl2BelUubrOeB2cl+/Gr8OCyRi2Gv6j3zCsuN
|
|
||||||
/4jQtNaoez/IbkDvI3l/ZpzBtnuNY2RiemGgHuORXHRVf3qVlsw+npBIRW5rM2Hk
|
|
||||||
O/xGoZjeBErWVu390Lyn+Gvk2TqQDnkutWnxUC60/zPlHhXZ4BwaFAekbSnjsSDB
|
|
||||||
1YFMs8HwW4oBryoxdj3/+/qLrBHt75IdLw3T7/V1UDJQM3EvSQOr12w4egpldhts
|
|
||||||
C871nnBQZeY6qA5feffIwwg/6lJm70o6S6OX6wIDAQABAoIBAFatstVb1KdQXsq0
|
|
||||||
cFpui8zTKOUiduJOrDkWzTygAmlEhYtrccdfXu7OWz0x0lvBLDVGK3a0I/TGrAzj
|
|
||||||
4BuFY+FM/egxTVt9in6fmA3et4BS1OAfCryzUdfK6RV//8L+t+zJZ/qKQzWnugpy
|
|
||||||
QYjDo8ifuMFwtvEoXizaIyBNLAhEp9hnrv+Tyi2O2gahPvCHsD48zkyZRCHYRstD
|
|
||||||
NH5cIrwz9/RJgPO1KI+QsJE7Nh7stR0sbr+5TPU4fnsL2mNhMUF2TJrwIPrc1yp+
|
|
||||||
YIUjdnh3SO88j4TQT3CIrWi8i4pOy6N0dcVn3gpCRGaqAKyS2ZYUj+yVtLO4KwxZ
|
|
||||||
SZ1lNvECgYEA78BrF7f4ETfWSLcBQ3qxfLs7ibB6IYo2x25685FhZjD+zLXM1AKb
|
|
||||||
FJHEXUm3mUYrFJK6AFEyOQnyGKBOLs3S6oTAswMPbTkkZeD1Y9O6uv0AHASLZnK6
|
|
||||||
pC6ub0eSRF5LUyTQ55Jj8D7QsjXJueO8v+G5ihWhNSN9tB2UA+8NBmkCgYEA+weq
|
|
||||||
cvoeMIEMBQHnNNLy35bwfqrceGyPIRBcUIvzQfY1vk7KW6DYOUzC7u+WUzy/hA52
|
|
||||||
DjXVVhua2eMQ9qqtOav7djcMc2W9RbLowxvno7K5qiCss013MeWk64TCWy+WMp5A
|
|
||||||
AVAtOliC3hMkIKqvR2poqn+IBTh1449agUJQqTMCgYEAu06IHGq1GraV6g9XpGF5
|
|
||||||
wqoAlMzUTdnOfDabRilBf/YtSr+J++ThRcuwLvXFw7CnPZZ4TIEjDJ7xjj3HdxeE
|
|
||||||
fYYjineMmNd40UNUU556F1ZLvJfsVKizmkuCKhwvcMx+asGrmA+tlmds4p3VMS50
|
|
||||||
KzDtpKzLWlmU/p/RINWlRmkCgYBy0pHTn7aZZx2xWKqCDg+L2EXPGqZX6wgZDpu7
|
|
||||||
OBifzlfM4ctL2CmvI/5yPmLbVgkgBWFYpKUdiujsyyEiQvWTUKhn7UwjqKDHtcsk
|
|
||||||
G6p7xS+JswJrzX4885bZJ9Oi1AR2yM3sC9l0O7I4lDbNPmWIXBLeEhGMmcPKv/Kc
|
|
||||||
91Ff4wKBgQCF3ur+Vt0PSU0ucrPVHjCe7tqazm0LJaWbPXL1Aw0pzdM2EcNcW/MA
|
|
||||||
w0kqpr7MgJ94qhXCBcVcfPuFN9fBOadM3UBj1B45Cz3pptoK+ScI8XKno6jvVK/p
|
|
||||||
xr5cb9VBRBtB9aOKVfuRhpatAfS2Pzm2Htae9lFn7slGPUmu2hkjDw==
|
|
||||||
-----END RSA PRIVATE KEY-----`
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestLoadCertificate(t *testing.T) {
|
|
||||||
caCrtPath := filepath.Join(os.TempDir(), "testca.crt")
|
|
||||||
caCrlPath := filepath.Join(os.TempDir(), "testcrl.crt")
|
|
||||||
certPath := filepath.Join(os.TempDir(), "test.crt")
|
|
||||||
keyPath := filepath.Join(os.TempDir(), "test.key")
|
|
||||||
err := os.WriteFile(caCrtPath, []byte(caCRT), os.ModePerm)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = os.WriteFile(caCrlPath, []byte(caCRL), os.ModePerm)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = os.WriteFile(certPath, []byte(serverCert), os.ModePerm)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = os.WriteFile(keyPath, []byte(serverKey), os.ModePerm)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
certManager, err := NewCertManager(certPath, keyPath, configDir, logSenderTest)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
certFunc := certManager.GetCertificateFunc()
|
|
||||||
if assert.NotNil(t, certFunc) {
|
|
||||||
hello := &tls.ClientHelloInfo{
|
|
||||||
ServerName: "localhost",
|
|
||||||
CipherSuites: []uint16{tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305},
|
|
||||||
}
|
|
||||||
cert, err := certFunc(hello)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, certManager.cert, cert)
|
|
||||||
}
|
|
||||||
|
|
||||||
certManager.SetCACertificates(nil)
|
|
||||||
err = certManager.LoadRootCAs()
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
certManager.SetCACertificates([]string{""})
|
|
||||||
err = certManager.LoadRootCAs()
|
|
||||||
assert.Error(t, err)
|
|
||||||
|
|
||||||
certManager.SetCACertificates([]string{"invalid"})
|
|
||||||
err = certManager.LoadRootCAs()
|
|
||||||
assert.Error(t, err)
|
|
||||||
|
|
||||||
// laoding the key as root CA must fail
|
|
||||||
certManager.SetCACertificates([]string{keyPath})
|
|
||||||
err = certManager.LoadRootCAs()
|
|
||||||
assert.Error(t, err)
|
|
||||||
|
|
||||||
certManager.SetCACertificates([]string{certPath})
|
|
||||||
err = certManager.LoadRootCAs()
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
rootCa := certManager.GetRootCAs()
|
|
||||||
assert.NotNil(t, rootCa)
|
|
||||||
|
|
||||||
err = certManager.Reload()
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
certManager.SetCARevocationLists(nil)
|
|
||||||
err = certManager.LoadCRLs()
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
certManager.SetCARevocationLists([]string{""})
|
|
||||||
err = certManager.LoadCRLs()
|
|
||||||
assert.Error(t, err)
|
|
||||||
|
|
||||||
certManager.SetCARevocationLists([]string{"invalid crl"})
|
|
||||||
err = certManager.LoadCRLs()
|
|
||||||
assert.Error(t, err)
|
|
||||||
|
|
||||||
// this is not a crl and must fail
|
|
||||||
certManager.SetCARevocationLists([]string{caCrtPath})
|
|
||||||
err = certManager.LoadCRLs()
|
|
||||||
assert.Error(t, err)
|
|
||||||
|
|
||||||
certManager.SetCARevocationLists([]string{caCrlPath})
|
|
||||||
err = certManager.LoadCRLs()
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
crt, err := tls.X509KeyPair([]byte(caCRT), []byte(caKey))
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
x509CAcrt, err := x509.ParseCertificate(crt.Certificate[0])
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
crt, err = tls.X509KeyPair([]byte(client1Crt), []byte(client1Key))
|
|
||||||
assert.NoError(t, err)
|
|
||||||
x509crt, err := x509.ParseCertificate(crt.Certificate[0])
|
|
||||||
if assert.NoError(t, err) {
|
|
||||||
assert.False(t, certManager.IsRevoked(x509crt, x509CAcrt))
|
|
||||||
}
|
|
||||||
|
|
||||||
crt, err = tls.X509KeyPair([]byte(client2Crt), []byte(client2Key))
|
|
||||||
assert.NoError(t, err)
|
|
||||||
x509crt, err = x509.ParseCertificate(crt.Certificate[0])
|
|
||||||
if assert.NoError(t, err) {
|
|
||||||
assert.True(t, certManager.IsRevoked(x509crt, x509CAcrt))
|
|
||||||
}
|
|
||||||
|
|
||||||
assert.True(t, certManager.IsRevoked(nil, nil))
|
|
||||||
|
|
||||||
err = os.Remove(caCrlPath)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = certManager.Reload()
|
|
||||||
assert.Error(t, err)
|
|
||||||
|
|
||||||
err = os.Remove(certPath)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = os.Remove(keyPath)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = certManager.Reload()
|
|
||||||
assert.Error(t, err)
|
|
||||||
|
|
||||||
err = os.Remove(caCrtPath)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLoadInvalidCert(t *testing.T) {
|
|
||||||
certManager, err := NewCertManager("test.crt", "test.key", configDir, logSenderTest)
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.Nil(t, certManager)
|
|
||||||
}
|
|
|
@ -1,306 +0,0 @@
|
||||||
package common
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"path"
|
|
||||||
"sync"
|
|
||||||
"sync/atomic"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/drakkan/sftpgo/dataprovider"
|
|
||||||
"github.com/drakkan/sftpgo/logger"
|
|
||||||
"github.com/drakkan/sftpgo/metrics"
|
|
||||||
"github.com/drakkan/sftpgo/vfs"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// ErrTransferClosed defines the error returned for a closed transfer
|
|
||||||
ErrTransferClosed = errors.New("transfer already closed")
|
|
||||||
)
|
|
||||||
|
|
||||||
// BaseTransfer contains protocols common transfer details for an upload or a download.
|
|
||||||
type BaseTransfer struct { //nolint:maligned
|
|
||||||
ID uint64
|
|
||||||
BytesSent int64
|
|
||||||
BytesReceived int64
|
|
||||||
Fs vfs.Fs
|
|
||||||
File vfs.File
|
|
||||||
Connection *BaseConnection
|
|
||||||
cancelFn func()
|
|
||||||
fsPath string
|
|
||||||
effectiveFsPath string
|
|
||||||
requestPath string
|
|
||||||
start time.Time
|
|
||||||
MaxWriteSize int64
|
|
||||||
MinWriteOffset int64
|
|
||||||
InitialSize int64
|
|
||||||
isNewFile bool
|
|
||||||
transferType int
|
|
||||||
AbortTransfer int32
|
|
||||||
sync.Mutex
|
|
||||||
ErrTransfer error
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewBaseTransfer returns a new BaseTransfer and adds it to the given connection
|
|
||||||
func NewBaseTransfer(file vfs.File, conn *BaseConnection, cancelFn func(), fsPath, effectiveFsPath, requestPath string,
|
|
||||||
transferType int, minWriteOffset, initialSize, maxWriteSize int64, isNewFile bool, fs vfs.Fs) *BaseTransfer {
|
|
||||||
t := &BaseTransfer{
|
|
||||||
ID: conn.GetTransferID(),
|
|
||||||
File: file,
|
|
||||||
Connection: conn,
|
|
||||||
cancelFn: cancelFn,
|
|
||||||
fsPath: fsPath,
|
|
||||||
effectiveFsPath: effectiveFsPath,
|
|
||||||
start: time.Now(),
|
|
||||||
transferType: transferType,
|
|
||||||
MinWriteOffset: minWriteOffset,
|
|
||||||
InitialSize: initialSize,
|
|
||||||
isNewFile: isNewFile,
|
|
||||||
requestPath: requestPath,
|
|
||||||
BytesSent: 0,
|
|
||||||
BytesReceived: 0,
|
|
||||||
MaxWriteSize: maxWriteSize,
|
|
||||||
AbortTransfer: 0,
|
|
||||||
Fs: fs,
|
|
||||||
}
|
|
||||||
|
|
||||||
conn.AddTransfer(t)
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetID returns the transfer ID
|
|
||||||
func (t *BaseTransfer) GetID() uint64 {
|
|
||||||
return t.ID
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetType returns the transfer type
|
|
||||||
func (t *BaseTransfer) GetType() int {
|
|
||||||
return t.transferType
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetSize returns the transferred size
|
|
||||||
func (t *BaseTransfer) GetSize() int64 {
|
|
||||||
if t.transferType == TransferDownload {
|
|
||||||
return atomic.LoadInt64(&t.BytesSent)
|
|
||||||
}
|
|
||||||
return atomic.LoadInt64(&t.BytesReceived)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetStartTime returns the start time
|
|
||||||
func (t *BaseTransfer) GetStartTime() time.Time {
|
|
||||||
return t.start
|
|
||||||
}
|
|
||||||
|
|
||||||
// SignalClose signals that the transfer should be closed.
|
|
||||||
// For same protocols, for example WebDAV, we have no
|
|
||||||
// access to the network connection, so we use this method
|
|
||||||
// to make the next read or write to fail
|
|
||||||
func (t *BaseTransfer) SignalClose() {
|
|
||||||
atomic.StoreInt32(&(t.AbortTransfer), 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetVirtualPath returns the transfer virtual path
|
|
||||||
func (t *BaseTransfer) GetVirtualPath() string {
|
|
||||||
return t.requestPath
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetFsPath returns the transfer filesystem path
|
|
||||||
func (t *BaseTransfer) GetFsPath() string {
|
|
||||||
return t.fsPath
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetRealFsPath returns the real transfer filesystem path.
|
|
||||||
// If atomic uploads are enabled this differ from fsPath
|
|
||||||
func (t *BaseTransfer) GetRealFsPath(fsPath string) string {
|
|
||||||
if fsPath == t.GetFsPath() {
|
|
||||||
if t.File != nil {
|
|
||||||
return t.File.Name()
|
|
||||||
}
|
|
||||||
return t.fsPath
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetCancelFn sets the cancel function for the transfer
|
|
||||||
func (t *BaseTransfer) SetCancelFn(cancelFn func()) {
|
|
||||||
t.cancelFn = cancelFn
|
|
||||||
}
|
|
||||||
|
|
||||||
// Truncate changes the size of the opened file.
|
|
||||||
// Supported for local fs only
|
|
||||||
func (t *BaseTransfer) Truncate(fsPath string, size int64) (int64, error) {
|
|
||||||
if fsPath == t.GetFsPath() {
|
|
||||||
if t.File != nil {
|
|
||||||
initialSize := t.InitialSize
|
|
||||||
err := t.File.Truncate(size)
|
|
||||||
if err == nil {
|
|
||||||
t.Lock()
|
|
||||||
t.InitialSize = size
|
|
||||||
if t.MaxWriteSize > 0 {
|
|
||||||
sizeDiff := initialSize - size
|
|
||||||
t.MaxWriteSize += sizeDiff
|
|
||||||
metrics.TransferCompleted(atomic.LoadInt64(&t.BytesSent), atomic.LoadInt64(&t.BytesReceived), t.transferType, t.ErrTransfer)
|
|
||||||
atomic.StoreInt64(&t.BytesReceived, 0)
|
|
||||||
}
|
|
||||||
t.Unlock()
|
|
||||||
}
|
|
||||||
t.Connection.Log(logger.LevelDebug, "file %#v truncated to size %v max write size %v new initial size %v err: %v",
|
|
||||||
fsPath, size, t.MaxWriteSize, t.InitialSize, err)
|
|
||||||
return initialSize, err
|
|
||||||
}
|
|
||||||
if size == 0 && atomic.LoadInt64(&t.BytesSent) == 0 {
|
|
||||||
// for cloud providers the file is always truncated to zero, we don't support append/resume for uploads
|
|
||||||
// for buffered SFTP we can have buffered bytes so we returns an error
|
|
||||||
if !vfs.IsBufferedSFTPFs(t.Fs) {
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return 0, vfs.ErrVfsUnsupported
|
|
||||||
}
|
|
||||||
return 0, errTransferMismatch
|
|
||||||
}
|
|
||||||
|
|
||||||
// TransferError is called if there is an unexpected error.
|
|
||||||
// For example network or client issues
|
|
||||||
func (t *BaseTransfer) TransferError(err error) {
|
|
||||||
t.Lock()
|
|
||||||
defer t.Unlock()
|
|
||||||
if t.ErrTransfer != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
t.ErrTransfer = err
|
|
||||||
if t.cancelFn != nil {
|
|
||||||
t.cancelFn()
|
|
||||||
}
|
|
||||||
elapsed := time.Since(t.start).Nanoseconds() / 1000000
|
|
||||||
t.Connection.Log(logger.LevelWarn, "Unexpected error for transfer, path: %#v, error: \"%v\" bytes sent: %v, "+
|
|
||||||
"bytes received: %v transfer running since %v ms", t.fsPath, t.ErrTransfer, atomic.LoadInt64(&t.BytesSent),
|
|
||||||
atomic.LoadInt64(&t.BytesReceived), elapsed)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *BaseTransfer) getUploadFileSize() (int64, error) {
|
|
||||||
var fileSize int64
|
|
||||||
info, err := t.Fs.Stat(t.fsPath)
|
|
||||||
if err == nil {
|
|
||||||
fileSize = info.Size()
|
|
||||||
}
|
|
||||||
if vfs.IsCryptOsFs(t.Fs) && t.ErrTransfer != nil {
|
|
||||||
errDelete := t.Fs.Remove(t.fsPath, false)
|
|
||||||
if errDelete != nil {
|
|
||||||
t.Connection.Log(logger.LevelWarn, "error removing partial crypto file %#v: %v", t.fsPath, errDelete)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return fileSize, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close it is called when the transfer is completed.
|
|
||||||
// It logs the transfer info, updates the user quota (for uploads)
|
|
||||||
// and executes any defined action.
|
|
||||||
// If there is an error no action will be executed and, in atomic mode,
|
|
||||||
// we try to delete the temporary file
|
|
||||||
func (t *BaseTransfer) Close() error {
|
|
||||||
defer t.Connection.RemoveTransfer(t)
|
|
||||||
|
|
||||||
var err error
|
|
||||||
numFiles := 0
|
|
||||||
if t.isNewFile {
|
|
||||||
numFiles = 1
|
|
||||||
}
|
|
||||||
metrics.TransferCompleted(atomic.LoadInt64(&t.BytesSent), atomic.LoadInt64(&t.BytesReceived), t.transferType, t.ErrTransfer)
|
|
||||||
if t.ErrTransfer == ErrQuotaExceeded && t.File != nil {
|
|
||||||
// if quota is exceeded we try to remove the partial file for uploads to local filesystem
|
|
||||||
err = t.Fs.Remove(t.File.Name(), false)
|
|
||||||
if err == nil {
|
|
||||||
numFiles--
|
|
||||||
atomic.StoreInt64(&t.BytesReceived, 0)
|
|
||||||
t.MinWriteOffset = 0
|
|
||||||
}
|
|
||||||
t.Connection.Log(logger.LevelWarn, "upload denied due to space limit, delete temporary file: %#v, deletion error: %v",
|
|
||||||
t.File.Name(), err)
|
|
||||||
} else if t.transferType == TransferUpload && t.effectiveFsPath != t.fsPath {
|
|
||||||
if t.ErrTransfer == nil || Config.UploadMode == UploadModeAtomicWithResume {
|
|
||||||
err = t.Fs.Rename(t.effectiveFsPath, t.fsPath)
|
|
||||||
t.Connection.Log(logger.LevelDebug, "atomic upload completed, rename: %#v -> %#v, error: %v",
|
|
||||||
t.effectiveFsPath, t.fsPath, err)
|
|
||||||
} else {
|
|
||||||
err = t.Fs.Remove(t.effectiveFsPath, false)
|
|
||||||
t.Connection.Log(logger.LevelWarn, "atomic upload completed with error: \"%v\", delete temporary file: %#v, "+
|
|
||||||
"deletion error: %v", t.ErrTransfer, t.effectiveFsPath, err)
|
|
||||||
if err == nil {
|
|
||||||
numFiles--
|
|
||||||
atomic.StoreInt64(&t.BytesReceived, 0)
|
|
||||||
t.MinWriteOffset = 0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
elapsed := time.Since(t.start).Nanoseconds() / 1000000
|
|
||||||
if t.transferType == TransferDownload {
|
|
||||||
logger.TransferLog(downloadLogSender, t.fsPath, elapsed, atomic.LoadInt64(&t.BytesSent), t.Connection.User.Username,
|
|
||||||
t.Connection.ID, t.Connection.protocol, t.Connection.remoteAddr)
|
|
||||||
ExecuteActionNotification(&t.Connection.User, operationDownload, t.fsPath, t.requestPath, "", "", t.Connection.protocol,
|
|
||||||
atomic.LoadInt64(&t.BytesSent), t.ErrTransfer)
|
|
||||||
} else {
|
|
||||||
fileSize := atomic.LoadInt64(&t.BytesReceived) + t.MinWriteOffset
|
|
||||||
if statSize, err := t.getUploadFileSize(); err == nil {
|
|
||||||
fileSize = statSize
|
|
||||||
}
|
|
||||||
t.Connection.Log(logger.LevelDebug, "uploaded file size %v", fileSize)
|
|
||||||
t.updateQuota(numFiles, fileSize)
|
|
||||||
logger.TransferLog(uploadLogSender, t.fsPath, elapsed, atomic.LoadInt64(&t.BytesReceived), t.Connection.User.Username,
|
|
||||||
t.Connection.ID, t.Connection.protocol, t.Connection.remoteAddr)
|
|
||||||
ExecuteActionNotification(&t.Connection.User, operationUpload, t.fsPath, t.requestPath, "", "", t.Connection.protocol, fileSize,
|
|
||||||
t.ErrTransfer)
|
|
||||||
}
|
|
||||||
if t.ErrTransfer != nil {
|
|
||||||
t.Connection.Log(logger.LevelWarn, "transfer error: %v, path: %#v", t.ErrTransfer, t.fsPath)
|
|
||||||
if err == nil {
|
|
||||||
err = t.ErrTransfer
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *BaseTransfer) updateQuota(numFiles int, fileSize int64) bool {
|
|
||||||
// S3 uploads are atomic, if there is an error nothing is uploaded
|
|
||||||
if t.File == nil && t.ErrTransfer != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
sizeDiff := fileSize - t.InitialSize
|
|
||||||
if t.transferType == TransferUpload && (numFiles != 0 || sizeDiff > 0) {
|
|
||||||
vfolder, err := t.Connection.User.GetVirtualFolderForPath(path.Dir(t.requestPath))
|
|
||||||
if err == nil {
|
|
||||||
dataprovider.UpdateVirtualFolderQuota(&vfolder.BaseVirtualFolder, numFiles, //nolint:errcheck
|
|
||||||
sizeDiff, false)
|
|
||||||
if vfolder.IsIncludedInUserQuota() {
|
|
||||||
dataprovider.UpdateUserQuota(&t.Connection.User, numFiles, sizeDiff, false) //nolint:errcheck
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
dataprovider.UpdateUserQuota(&t.Connection.User, numFiles, sizeDiff, false) //nolint:errcheck
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// HandleThrottle manage bandwidth throttling
|
|
||||||
func (t *BaseTransfer) HandleThrottle() {
|
|
||||||
var wantedBandwidth int64
|
|
||||||
var trasferredBytes int64
|
|
||||||
if t.transferType == TransferDownload {
|
|
||||||
wantedBandwidth = t.Connection.User.DownloadBandwidth
|
|
||||||
trasferredBytes = atomic.LoadInt64(&t.BytesSent)
|
|
||||||
} else {
|
|
||||||
wantedBandwidth = t.Connection.User.UploadBandwidth
|
|
||||||
trasferredBytes = atomic.LoadInt64(&t.BytesReceived)
|
|
||||||
}
|
|
||||||
if wantedBandwidth > 0 {
|
|
||||||
// real and wanted elapsed as milliseconds, bytes as kilobytes
|
|
||||||
realElapsed := time.Since(t.start).Nanoseconds() / 1000000
|
|
||||||
// trasferredBytes / 1024 = KB/s, we multiply for 1000 to get milliseconds
|
|
||||||
wantedElapsed := 1000 * (trasferredBytes / 1024) / wantedBandwidth
|
|
||||||
if wantedElapsed > realElapsed {
|
|
||||||
toSleep := time.Duration(wantedElapsed - realElapsed)
|
|
||||||
time.Sleep(toSleep * time.Millisecond)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,275 +0,0 @@
|
||||||
package common
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
|
|
||||||
"github.com/drakkan/sftpgo/dataprovider"
|
|
||||||
"github.com/drakkan/sftpgo/kms"
|
|
||||||
"github.com/drakkan/sftpgo/vfs"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestTransferUpdateQuota(t *testing.T) {
|
|
||||||
conn := NewBaseConnection("", ProtocolSFTP, "", dataprovider.User{})
|
|
||||||
transfer := BaseTransfer{
|
|
||||||
Connection: conn,
|
|
||||||
transferType: TransferUpload,
|
|
||||||
BytesReceived: 123,
|
|
||||||
Fs: vfs.NewOsFs("", os.TempDir(), ""),
|
|
||||||
}
|
|
||||||
errFake := errors.New("fake error")
|
|
||||||
transfer.TransferError(errFake)
|
|
||||||
assert.False(t, transfer.updateQuota(1, 0))
|
|
||||||
err := transfer.Close()
|
|
||||||
if assert.Error(t, err) {
|
|
||||||
assert.EqualError(t, err, errFake.Error())
|
|
||||||
}
|
|
||||||
mappedPath := filepath.Join(os.TempDir(), "vdir")
|
|
||||||
vdirPath := "/vdir"
|
|
||||||
conn.User.VirtualFolders = append(conn.User.VirtualFolders, vfs.VirtualFolder{
|
|
||||||
BaseVirtualFolder: vfs.BaseVirtualFolder{
|
|
||||||
MappedPath: mappedPath,
|
|
||||||
},
|
|
||||||
VirtualPath: vdirPath,
|
|
||||||
QuotaFiles: -1,
|
|
||||||
QuotaSize: -1,
|
|
||||||
})
|
|
||||||
transfer.ErrTransfer = nil
|
|
||||||
transfer.BytesReceived = 1
|
|
||||||
transfer.requestPath = "/vdir/file"
|
|
||||||
assert.True(t, transfer.updateQuota(1, 0))
|
|
||||||
err = transfer.Close()
|
|
||||||
assert.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestTransferThrottling(t *testing.T) {
|
|
||||||
u := dataprovider.User{
|
|
||||||
Username: "test",
|
|
||||||
UploadBandwidth: 50,
|
|
||||||
DownloadBandwidth: 40,
|
|
||||||
}
|
|
||||||
fs := vfs.NewOsFs("", os.TempDir(), "")
|
|
||||||
testFileSize := int64(131072)
|
|
||||||
wantedUploadElapsed := 1000 * (testFileSize / 1024) / u.UploadBandwidth
|
|
||||||
wantedDownloadElapsed := 1000 * (testFileSize / 1024) / u.DownloadBandwidth
|
|
||||||
// some tolerance
|
|
||||||
wantedUploadElapsed -= wantedDownloadElapsed / 10
|
|
||||||
wantedDownloadElapsed -= wantedDownloadElapsed / 10
|
|
||||||
conn := NewBaseConnection("id", ProtocolSCP, "", u)
|
|
||||||
transfer := NewBaseTransfer(nil, conn, nil, "", "", "", TransferUpload, 0, 0, 0, true, fs)
|
|
||||||
transfer.BytesReceived = testFileSize
|
|
||||||
transfer.Connection.UpdateLastActivity()
|
|
||||||
startTime := transfer.Connection.GetLastActivity()
|
|
||||||
transfer.HandleThrottle()
|
|
||||||
elapsed := time.Since(startTime).Nanoseconds() / 1000000
|
|
||||||
assert.GreaterOrEqual(t, elapsed, wantedUploadElapsed, "upload bandwidth throttling not respected")
|
|
||||||
err := transfer.Close()
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
transfer = NewBaseTransfer(nil, conn, nil, "", "", "", TransferDownload, 0, 0, 0, true, fs)
|
|
||||||
transfer.BytesSent = testFileSize
|
|
||||||
transfer.Connection.UpdateLastActivity()
|
|
||||||
startTime = transfer.Connection.GetLastActivity()
|
|
||||||
|
|
||||||
transfer.HandleThrottle()
|
|
||||||
elapsed = time.Since(startTime).Nanoseconds() / 1000000
|
|
||||||
assert.GreaterOrEqual(t, elapsed, wantedDownloadElapsed, "download bandwidth throttling not respected")
|
|
||||||
err = transfer.Close()
|
|
||||||
assert.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRealPath(t *testing.T) {
|
|
||||||
testFile := filepath.Join(os.TempDir(), "afile.txt")
|
|
||||||
fs := vfs.NewOsFs("123", os.TempDir(), "")
|
|
||||||
u := dataprovider.User{
|
|
||||||
Username: "user",
|
|
||||||
HomeDir: os.TempDir(),
|
|
||||||
}
|
|
||||||
u.Permissions = make(map[string][]string)
|
|
||||||
u.Permissions["/"] = []string{dataprovider.PermAny}
|
|
||||||
file, err := os.Create(testFile)
|
|
||||||
require.NoError(t, err)
|
|
||||||
conn := NewBaseConnection(fs.ConnectionID(), ProtocolSFTP, "", u)
|
|
||||||
transfer := NewBaseTransfer(file, conn, nil, testFile, testFile, "/transfer_test_file", TransferUpload, 0, 0, 0, true, fs)
|
|
||||||
rPath := transfer.GetRealFsPath(testFile)
|
|
||||||
assert.Equal(t, testFile, rPath)
|
|
||||||
rPath = conn.getRealFsPath(testFile)
|
|
||||||
assert.Equal(t, testFile, rPath)
|
|
||||||
err = transfer.Close()
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = file.Close()
|
|
||||||
assert.NoError(t, err)
|
|
||||||
transfer.File = nil
|
|
||||||
rPath = transfer.GetRealFsPath(testFile)
|
|
||||||
assert.Equal(t, testFile, rPath)
|
|
||||||
rPath = transfer.GetRealFsPath("")
|
|
||||||
assert.Empty(t, rPath)
|
|
||||||
err = os.Remove(testFile)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Len(t, conn.GetTransfers(), 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestTruncate(t *testing.T) {
|
|
||||||
testFile := filepath.Join(os.TempDir(), "transfer_test_file")
|
|
||||||
fs := vfs.NewOsFs("123", os.TempDir(), "")
|
|
||||||
u := dataprovider.User{
|
|
||||||
Username: "user",
|
|
||||||
HomeDir: os.TempDir(),
|
|
||||||
}
|
|
||||||
u.Permissions = make(map[string][]string)
|
|
||||||
u.Permissions["/"] = []string{dataprovider.PermAny}
|
|
||||||
file, err := os.Create(testFile)
|
|
||||||
if !assert.NoError(t, err) {
|
|
||||||
assert.FailNow(t, "unable to open test file")
|
|
||||||
}
|
|
||||||
_, err = file.Write([]byte("hello"))
|
|
||||||
assert.NoError(t, err)
|
|
||||||
conn := NewBaseConnection(fs.ConnectionID(), ProtocolSFTP, "", u)
|
|
||||||
transfer := NewBaseTransfer(file, conn, nil, testFile, testFile, "/transfer_test_file", TransferUpload, 0, 5, 100, false, fs)
|
|
||||||
|
|
||||||
err = conn.SetStat("/transfer_test_file", &StatAttributes{
|
|
||||||
Size: 2,
|
|
||||||
Flags: StatAttrSize,
|
|
||||||
})
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, int64(103), transfer.MaxWriteSize)
|
|
||||||
err = transfer.Close()
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = file.Close()
|
|
||||||
assert.NoError(t, err)
|
|
||||||
fi, err := os.Stat(testFile)
|
|
||||||
if assert.NoError(t, err) {
|
|
||||||
assert.Equal(t, int64(2), fi.Size())
|
|
||||||
}
|
|
||||||
|
|
||||||
transfer = NewBaseTransfer(file, conn, nil, testFile, testFile, "/transfer_test_file", TransferUpload, 0, 0, 100, true, fs)
|
|
||||||
// file.Stat will fail on a closed file
|
|
||||||
err = conn.SetStat("/transfer_test_file", &StatAttributes{
|
|
||||||
Size: 2,
|
|
||||||
Flags: StatAttrSize,
|
|
||||||
})
|
|
||||||
assert.Error(t, err)
|
|
||||||
err = transfer.Close()
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
transfer = NewBaseTransfer(nil, conn, nil, testFile, testFile, "", TransferUpload, 0, 0, 0, true, fs)
|
|
||||||
_, err = transfer.Truncate("mismatch", 0)
|
|
||||||
assert.EqualError(t, err, errTransferMismatch.Error())
|
|
||||||
_, err = transfer.Truncate(testFile, 0)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
_, err = transfer.Truncate(testFile, 1)
|
|
||||||
assert.EqualError(t, err, vfs.ErrVfsUnsupported.Error())
|
|
||||||
|
|
||||||
err = transfer.Close()
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
err = os.Remove(testFile)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
assert.Len(t, conn.GetTransfers(), 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestTransferErrors(t *testing.T) {
|
|
||||||
isCancelled := false
|
|
||||||
cancelFn := func() {
|
|
||||||
isCancelled = true
|
|
||||||
}
|
|
||||||
testFile := filepath.Join(os.TempDir(), "transfer_test_file")
|
|
||||||
fs := vfs.NewOsFs("id", os.TempDir(), "")
|
|
||||||
u := dataprovider.User{
|
|
||||||
Username: "test",
|
|
||||||
HomeDir: os.TempDir(),
|
|
||||||
}
|
|
||||||
err := os.WriteFile(testFile, []byte("test data"), os.ModePerm)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
file, err := os.Open(testFile)
|
|
||||||
if !assert.NoError(t, err) {
|
|
||||||
assert.FailNow(t, "unable to open test file")
|
|
||||||
}
|
|
||||||
conn := NewBaseConnection("id", ProtocolSFTP, "", u)
|
|
||||||
transfer := NewBaseTransfer(file, conn, nil, testFile, testFile, "/transfer_test_file", TransferUpload, 0, 0, 0, true, fs)
|
|
||||||
assert.Nil(t, transfer.cancelFn)
|
|
||||||
assert.Equal(t, testFile, transfer.GetFsPath())
|
|
||||||
transfer.SetCancelFn(cancelFn)
|
|
||||||
errFake := errors.New("err fake")
|
|
||||||
transfer.BytesReceived = 9
|
|
||||||
transfer.TransferError(ErrQuotaExceeded)
|
|
||||||
assert.True(t, isCancelled)
|
|
||||||
transfer.TransferError(errFake)
|
|
||||||
assert.Error(t, transfer.ErrTransfer, ErrQuotaExceeded.Error())
|
|
||||||
// the file is closed from the embedding struct before to call close
|
|
||||||
err = file.Close()
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = transfer.Close()
|
|
||||||
if assert.Error(t, err) {
|
|
||||||
assert.Error(t, err, ErrQuotaExceeded.Error())
|
|
||||||
}
|
|
||||||
assert.NoFileExists(t, testFile)
|
|
||||||
|
|
||||||
err = os.WriteFile(testFile, []byte("test data"), os.ModePerm)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
file, err = os.Open(testFile)
|
|
||||||
if !assert.NoError(t, err) {
|
|
||||||
assert.FailNow(t, "unable to open test file")
|
|
||||||
}
|
|
||||||
fsPath := filepath.Join(os.TempDir(), "test_file")
|
|
||||||
transfer = NewBaseTransfer(file, conn, nil, fsPath, file.Name(), "/test_file", TransferUpload, 0, 0, 0, true, fs)
|
|
||||||
transfer.BytesReceived = 9
|
|
||||||
transfer.TransferError(errFake)
|
|
||||||
assert.Error(t, transfer.ErrTransfer, errFake.Error())
|
|
||||||
// the file is closed from the embedding struct before to call close
|
|
||||||
err = file.Close()
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = transfer.Close()
|
|
||||||
if assert.Error(t, err) {
|
|
||||||
assert.Error(t, err, errFake.Error())
|
|
||||||
}
|
|
||||||
assert.NoFileExists(t, testFile)
|
|
||||||
|
|
||||||
err = os.WriteFile(testFile, []byte("test data"), os.ModePerm)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
file, err = os.Open(testFile)
|
|
||||||
if !assert.NoError(t, err) {
|
|
||||||
assert.FailNow(t, "unable to open test file")
|
|
||||||
}
|
|
||||||
transfer = NewBaseTransfer(file, conn, nil, fsPath, file.Name(), "/test_file", TransferUpload, 0, 0, 0, true, fs)
|
|
||||||
transfer.BytesReceived = 9
|
|
||||||
// the file is closed from the embedding struct before to call close
|
|
||||||
err = file.Close()
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = transfer.Close()
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.NoFileExists(t, testFile)
|
|
||||||
assert.FileExists(t, fsPath)
|
|
||||||
err = os.Remove(fsPath)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
assert.Len(t, conn.GetTransfers(), 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRemovePartialCryptoFile(t *testing.T) {
|
|
||||||
testFile := filepath.Join(os.TempDir(), "transfer_test_file")
|
|
||||||
fs, err := vfs.NewCryptFs("id", os.TempDir(), "", vfs.CryptFsConfig{Passphrase: kms.NewPlainSecret("secret")})
|
|
||||||
require.NoError(t, err)
|
|
||||||
u := dataprovider.User{
|
|
||||||
Username: "test",
|
|
||||||
HomeDir: os.TempDir(),
|
|
||||||
}
|
|
||||||
conn := NewBaseConnection(fs.ConnectionID(), ProtocolSFTP, "", u)
|
|
||||||
transfer := NewBaseTransfer(nil, conn, nil, testFile, testFile, "/transfer_test_file", TransferUpload, 0, 0, 0, true, fs)
|
|
||||||
transfer.ErrTransfer = errors.New("test error")
|
|
||||||
_, err = transfer.getUploadFileSize()
|
|
||||||
assert.Error(t, err)
|
|
||||||
err = os.WriteFile(testFile, []byte("test data"), os.ModePerm)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
size, err := transfer.getUploadFileSize()
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, int64(9), size)
|
|
||||||
assert.NoFileExists(t, testFile)
|
|
||||||
}
|
|
1081
config/config.go
|
@ -1,11 +0,0 @@
|
||||||
// +build linux
|
|
||||||
|
|
||||||
package config
|
|
||||||
|
|
||||||
import "github.com/spf13/viper"
|
|
||||||
|
|
||||||
// linux specific config search path
|
|
||||||
func setViperAdditionalConfigPaths() {
|
|
||||||
viper.AddConfigPath("$HOME/.config/sftpgo")
|
|
||||||
viper.AddConfigPath("/etc/sftpgo")
|
|
||||||
}
|
|
|
@ -1,7 +0,0 @@
|
||||||
// +build !linux
|
|
||||||
|
|
||||||
package config
|
|
||||||
|
|
||||||
func setViperAdditionalConfigPaths() {
|
|
||||||
|
|
||||||
}
|
|
|
@ -1,859 +0,0 @@
|
||||||
package config_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/spf13/viper"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
|
|
||||||
"github.com/drakkan/sftpgo/common"
|
|
||||||
"github.com/drakkan/sftpgo/config"
|
|
||||||
"github.com/drakkan/sftpgo/dataprovider"
|
|
||||||
"github.com/drakkan/sftpgo/ftpd"
|
|
||||||
"github.com/drakkan/sftpgo/httpclient"
|
|
||||||
"github.com/drakkan/sftpgo/httpd"
|
|
||||||
"github.com/drakkan/sftpgo/sftpd"
|
|
||||||
"github.com/drakkan/sftpgo/utils"
|
|
||||||
"github.com/drakkan/sftpgo/webdavd"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
tempConfigName = "temp"
|
|
||||||
)
|
|
||||||
|
|
||||||
func reset() {
|
|
||||||
viper.Reset()
|
|
||||||
config.Init()
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLoadConfigTest(t *testing.T) {
|
|
||||||
reset()
|
|
||||||
|
|
||||||
configDir := ".."
|
|
||||||
err := config.LoadConfig(configDir, "")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.NotEqual(t, httpd.Conf{}, config.GetHTTPConfig())
|
|
||||||
assert.NotEqual(t, dataprovider.Config{}, config.GetProviderConf())
|
|
||||||
assert.NotEqual(t, sftpd.Configuration{}, config.GetSFTPDConfig())
|
|
||||||
assert.NotEqual(t, httpclient.Config{}, config.GetHTTPConfig())
|
|
||||||
confName := tempConfigName + ".json"
|
|
||||||
configFilePath := filepath.Join(configDir, confName)
|
|
||||||
err = config.LoadConfig(configDir, confName)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = os.WriteFile(configFilePath, []byte("{invalid json}"), os.ModePerm)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = config.LoadConfig(configDir, confName)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = os.WriteFile(configFilePath, []byte("{\"sftpd\": {\"bind_port\": \"a\"}}"), os.ModePerm)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = config.LoadConfig(configDir, confName)
|
|
||||||
assert.Error(t, err)
|
|
||||||
err = os.Remove(configFilePath)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLoadConfigFileNotFound(t *testing.T) {
|
|
||||||
reset()
|
|
||||||
|
|
||||||
viper.SetConfigName("configfile")
|
|
||||||
err := config.LoadConfig(os.TempDir(), "")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestEmptyBanner(t *testing.T) {
|
|
||||||
reset()
|
|
||||||
|
|
||||||
configDir := ".."
|
|
||||||
confName := tempConfigName + ".json"
|
|
||||||
configFilePath := filepath.Join(configDir, confName)
|
|
||||||
err := config.LoadConfig(configDir, "")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
sftpdConf := config.GetSFTPDConfig()
|
|
||||||
sftpdConf.Banner = " "
|
|
||||||
c := make(map[string]sftpd.Configuration)
|
|
||||||
c["sftpd"] = sftpdConf
|
|
||||||
jsonConf, _ := json.Marshal(c)
|
|
||||||
err = os.WriteFile(configFilePath, jsonConf, os.ModePerm)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = config.LoadConfig(configDir, confName)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
sftpdConf = config.GetSFTPDConfig()
|
|
||||||
assert.NotEmpty(t, strings.TrimSpace(sftpdConf.Banner))
|
|
||||||
err = os.Remove(configFilePath)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
ftpdConf := config.GetFTPDConfig()
|
|
||||||
ftpdConf.Banner = " "
|
|
||||||
c1 := make(map[string]ftpd.Configuration)
|
|
||||||
c1["ftpd"] = ftpdConf
|
|
||||||
jsonConf, _ = json.Marshal(c1)
|
|
||||||
err = os.WriteFile(configFilePath, jsonConf, os.ModePerm)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = config.LoadConfig(configDir, confName)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
ftpdConf = config.GetFTPDConfig()
|
|
||||||
assert.NotEmpty(t, strings.TrimSpace(ftpdConf.Banner))
|
|
||||||
err = os.Remove(configFilePath)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestInvalidUploadMode(t *testing.T) {
|
|
||||||
reset()
|
|
||||||
|
|
||||||
configDir := ".."
|
|
||||||
confName := tempConfigName + ".json"
|
|
||||||
configFilePath := filepath.Join(configDir, confName)
|
|
||||||
err := config.LoadConfig(configDir, "")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
commonConf := config.GetCommonConfig()
|
|
||||||
commonConf.UploadMode = 10
|
|
||||||
c := make(map[string]common.Configuration)
|
|
||||||
c["common"] = commonConf
|
|
||||||
jsonConf, err := json.Marshal(c)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = os.WriteFile(configFilePath, jsonConf, os.ModePerm)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = config.LoadConfig(configDir, confName)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, 0, config.GetCommonConfig().UploadMode)
|
|
||||||
err = os.Remove(configFilePath)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestInvalidExternalAuthScope(t *testing.T) {
|
|
||||||
reset()
|
|
||||||
|
|
||||||
configDir := ".."
|
|
||||||
confName := tempConfigName + ".json"
|
|
||||||
configFilePath := filepath.Join(configDir, confName)
|
|
||||||
err := config.LoadConfig(configDir, "")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
providerConf := config.GetProviderConf()
|
|
||||||
providerConf.ExternalAuthScope = 100
|
|
||||||
c := make(map[string]dataprovider.Config)
|
|
||||||
c["data_provider"] = providerConf
|
|
||||||
jsonConf, err := json.Marshal(c)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = os.WriteFile(configFilePath, jsonConf, os.ModePerm)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = config.LoadConfig(configDir, confName)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, 0, config.GetProviderConf().ExternalAuthScope)
|
|
||||||
err = os.Remove(configFilePath)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestInvalidCredentialsPath(t *testing.T) {
|
|
||||||
reset()
|
|
||||||
|
|
||||||
configDir := ".."
|
|
||||||
confName := tempConfigName + ".json"
|
|
||||||
configFilePath := filepath.Join(configDir, confName)
|
|
||||||
err := config.LoadConfig(configDir, "")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
providerConf := config.GetProviderConf()
|
|
||||||
providerConf.CredentialsPath = ""
|
|
||||||
c := make(map[string]dataprovider.Config)
|
|
||||||
c["data_provider"] = providerConf
|
|
||||||
jsonConf, err := json.Marshal(c)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = os.WriteFile(configFilePath, jsonConf, os.ModePerm)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = config.LoadConfig(configDir, confName)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, "credentials", config.GetProviderConf().CredentialsPath)
|
|
||||||
err = os.Remove(configFilePath)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestInvalidProxyProtocol(t *testing.T) {
|
|
||||||
reset()
|
|
||||||
|
|
||||||
configDir := ".."
|
|
||||||
confName := tempConfigName + ".json"
|
|
||||||
configFilePath := filepath.Join(configDir, confName)
|
|
||||||
err := config.LoadConfig(configDir, "")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
commonConf := config.GetCommonConfig()
|
|
||||||
commonConf.ProxyProtocol = 10
|
|
||||||
c := make(map[string]common.Configuration)
|
|
||||||
c["common"] = commonConf
|
|
||||||
jsonConf, err := json.Marshal(c)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = os.WriteFile(configFilePath, jsonConf, os.ModePerm)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = config.LoadConfig(configDir, confName)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, 0, config.GetCommonConfig().ProxyProtocol)
|
|
||||||
err = os.Remove(configFilePath)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestInvalidUsersBaseDir(t *testing.T) {
|
|
||||||
reset()
|
|
||||||
|
|
||||||
configDir := ".."
|
|
||||||
confName := tempConfigName + ".json"
|
|
||||||
configFilePath := filepath.Join(configDir, confName)
|
|
||||||
err := config.LoadConfig(configDir, "")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
providerConf := config.GetProviderConf()
|
|
||||||
providerConf.UsersBaseDir = "."
|
|
||||||
c := make(map[string]dataprovider.Config)
|
|
||||||
c["data_provider"] = providerConf
|
|
||||||
jsonConf, err := json.Marshal(c)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = os.WriteFile(configFilePath, jsonConf, os.ModePerm)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = config.LoadConfig(configDir, confName)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Empty(t, config.GetProviderConf().UsersBaseDir)
|
|
||||||
err = os.Remove(configFilePath)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSetGetConfig(t *testing.T) {
|
|
||||||
reset()
|
|
||||||
|
|
||||||
sftpdConf := config.GetSFTPDConfig()
|
|
||||||
sftpdConf.MaxAuthTries = 10
|
|
||||||
config.SetSFTPDConfig(sftpdConf)
|
|
||||||
assert.Equal(t, sftpdConf.MaxAuthTries, config.GetSFTPDConfig().MaxAuthTries)
|
|
||||||
dataProviderConf := config.GetProviderConf()
|
|
||||||
dataProviderConf.Host = "test host"
|
|
||||||
config.SetProviderConf(dataProviderConf)
|
|
||||||
assert.Equal(t, dataProviderConf.Host, config.GetProviderConf().Host)
|
|
||||||
httpdConf := config.GetHTTPDConfig()
|
|
||||||
httpdConf.Bindings = append(httpdConf.Bindings, httpd.Binding{Address: "0.0.0.0"})
|
|
||||||
config.SetHTTPDConfig(httpdConf)
|
|
||||||
assert.Equal(t, httpdConf.Bindings[0].Address, config.GetHTTPDConfig().Bindings[0].Address)
|
|
||||||
commonConf := config.GetCommonConfig()
|
|
||||||
commonConf.IdleTimeout = 10
|
|
||||||
config.SetCommonConfig(commonConf)
|
|
||||||
assert.Equal(t, commonConf.IdleTimeout, config.GetCommonConfig().IdleTimeout)
|
|
||||||
ftpdConf := config.GetFTPDConfig()
|
|
||||||
ftpdConf.CertificateFile = "cert"
|
|
||||||
ftpdConf.CertificateKeyFile = "key"
|
|
||||||
config.SetFTPDConfig(ftpdConf)
|
|
||||||
assert.Equal(t, ftpdConf.CertificateFile, config.GetFTPDConfig().CertificateFile)
|
|
||||||
assert.Equal(t, ftpdConf.CertificateKeyFile, config.GetFTPDConfig().CertificateKeyFile)
|
|
||||||
webDavConf := config.GetWebDAVDConfig()
|
|
||||||
webDavConf.CertificateFile = "dav_cert"
|
|
||||||
webDavConf.CertificateKeyFile = "dav_key"
|
|
||||||
config.SetWebDAVDConfig(webDavConf)
|
|
||||||
assert.Equal(t, webDavConf.CertificateFile, config.GetWebDAVDConfig().CertificateFile)
|
|
||||||
assert.Equal(t, webDavConf.CertificateKeyFile, config.GetWebDAVDConfig().CertificateKeyFile)
|
|
||||||
kmsConf := config.GetKMSConfig()
|
|
||||||
kmsConf.Secrets.MasterKeyPath = "apath"
|
|
||||||
kmsConf.Secrets.URL = "aurl"
|
|
||||||
config.SetKMSConfig(kmsConf)
|
|
||||||
assert.Equal(t, kmsConf.Secrets.MasterKeyPath, config.GetKMSConfig().Secrets.MasterKeyPath)
|
|
||||||
assert.Equal(t, kmsConf.Secrets.URL, config.GetKMSConfig().Secrets.URL)
|
|
||||||
telemetryConf := config.GetTelemetryConfig()
|
|
||||||
telemetryConf.BindPort = 10001
|
|
||||||
telemetryConf.BindAddress = "0.0.0.0"
|
|
||||||
config.SetTelemetryConfig(telemetryConf)
|
|
||||||
assert.Equal(t, telemetryConf.BindPort, config.GetTelemetryConfig().BindPort)
|
|
||||||
assert.Equal(t, telemetryConf.BindAddress, config.GetTelemetryConfig().BindAddress)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestServiceToStart(t *testing.T) {
|
|
||||||
reset()
|
|
||||||
|
|
||||||
configDir := ".."
|
|
||||||
err := config.LoadConfig(configDir, "")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.True(t, config.HasServicesToStart())
|
|
||||||
sftpdConf := config.GetSFTPDConfig()
|
|
||||||
sftpdConf.Bindings[0].Port = 0
|
|
||||||
config.SetSFTPDConfig(sftpdConf)
|
|
||||||
assert.False(t, config.HasServicesToStart())
|
|
||||||
ftpdConf := config.GetFTPDConfig()
|
|
||||||
ftpdConf.Bindings[0].Port = 2121
|
|
||||||
config.SetFTPDConfig(ftpdConf)
|
|
||||||
assert.True(t, config.HasServicesToStart())
|
|
||||||
ftpdConf.Bindings[0].Port = 0
|
|
||||||
config.SetFTPDConfig(ftpdConf)
|
|
||||||
webdavdConf := config.GetWebDAVDConfig()
|
|
||||||
webdavdConf.Bindings[0].Port = 9000
|
|
||||||
config.SetWebDAVDConfig(webdavdConf)
|
|
||||||
assert.True(t, config.HasServicesToStart())
|
|
||||||
webdavdConf.Bindings[0].Port = 0
|
|
||||||
config.SetWebDAVDConfig(webdavdConf)
|
|
||||||
assert.False(t, config.HasServicesToStart())
|
|
||||||
sftpdConf.Bindings[0].Port = 2022
|
|
||||||
config.SetSFTPDConfig(sftpdConf)
|
|
||||||
assert.True(t, config.HasServicesToStart())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSFTPDBindingsCompatibility(t *testing.T) {
|
|
||||||
reset()
|
|
||||||
|
|
||||||
configDir := ".."
|
|
||||||
confName := tempConfigName + ".json"
|
|
||||||
configFilePath := filepath.Join(configDir, confName)
|
|
||||||
err := config.LoadConfig(configDir, "")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
sftpdConf := config.GetSFTPDConfig()
|
|
||||||
require.Len(t, sftpdConf.Bindings, 1)
|
|
||||||
sftpdConf.Bindings = nil
|
|
||||||
sftpdConf.BindPort = 9022 //nolint:staticcheck
|
|
||||||
sftpdConf.BindAddress = "127.0.0.1" //nolint:staticcheck
|
|
||||||
c := make(map[string]sftpd.Configuration)
|
|
||||||
c["sftpd"] = sftpdConf
|
|
||||||
jsonConf, err := json.Marshal(c)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = os.WriteFile(configFilePath, jsonConf, os.ModePerm)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = config.LoadConfig(configDir, confName)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
sftpdConf = config.GetSFTPDConfig()
|
|
||||||
// the default binding should be replaced with the deprecated configuration
|
|
||||||
require.Len(t, sftpdConf.Bindings, 1)
|
|
||||||
require.Equal(t, 9022, sftpdConf.Bindings[0].Port)
|
|
||||||
require.Equal(t, "127.0.0.1", sftpdConf.Bindings[0].Address)
|
|
||||||
require.True(t, sftpdConf.Bindings[0].ApplyProxyConfig)
|
|
||||||
|
|
||||||
err = config.LoadConfig(configDir, confName)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
sftpdConf = config.GetSFTPDConfig()
|
|
||||||
require.Len(t, sftpdConf.Bindings, 1)
|
|
||||||
require.Equal(t, 9022, sftpdConf.Bindings[0].Port)
|
|
||||||
require.Equal(t, "127.0.0.1", sftpdConf.Bindings[0].Address)
|
|
||||||
require.True(t, sftpdConf.Bindings[0].ApplyProxyConfig)
|
|
||||||
err = os.Remove(configFilePath)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFTPDBindingsCompatibility(t *testing.T) {
|
|
||||||
reset()
|
|
||||||
|
|
||||||
configDir := ".."
|
|
||||||
confName := tempConfigName + ".json"
|
|
||||||
configFilePath := filepath.Join(configDir, confName)
|
|
||||||
err := config.LoadConfig(configDir, "")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
ftpdConf := config.GetFTPDConfig()
|
|
||||||
require.Len(t, ftpdConf.Bindings, 1)
|
|
||||||
ftpdConf.Bindings = nil
|
|
||||||
ftpdConf.BindPort = 9022 //nolint:staticcheck
|
|
||||||
ftpdConf.BindAddress = "127.1.0.1" //nolint:staticcheck
|
|
||||||
ftpdConf.ForcePassiveIP = "127.1.1.1" //nolint:staticcheck
|
|
||||||
ftpdConf.TLSMode = 2 //nolint:staticcheck
|
|
||||||
c := make(map[string]ftpd.Configuration)
|
|
||||||
c["ftpd"] = ftpdConf
|
|
||||||
jsonConf, err := json.Marshal(c)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = os.WriteFile(configFilePath, jsonConf, os.ModePerm)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = config.LoadConfig(configDir, confName)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
ftpdConf = config.GetFTPDConfig()
|
|
||||||
// the default binding should be replaced with the deprecated configuration
|
|
||||||
require.Len(t, ftpdConf.Bindings, 1)
|
|
||||||
require.Equal(t, 9022, ftpdConf.Bindings[0].Port)
|
|
||||||
require.Equal(t, "127.1.0.1", ftpdConf.Bindings[0].Address)
|
|
||||||
require.True(t, ftpdConf.Bindings[0].ApplyProxyConfig)
|
|
||||||
require.Equal(t, 2, ftpdConf.Bindings[0].TLSMode)
|
|
||||||
require.Equal(t, "127.1.1.1", ftpdConf.Bindings[0].ForcePassiveIP)
|
|
||||||
err = os.Remove(configFilePath)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestWebDAVDBindingsCompatibility(t *testing.T) {
|
|
||||||
reset()
|
|
||||||
|
|
||||||
configDir := ".."
|
|
||||||
confName := tempConfigName + ".json"
|
|
||||||
configFilePath := filepath.Join(configDir, confName)
|
|
||||||
err := config.LoadConfig(configDir, "")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
webdavConf := config.GetWebDAVDConfig()
|
|
||||||
require.Len(t, webdavConf.Bindings, 1)
|
|
||||||
webdavConf.Bindings = nil
|
|
||||||
webdavConf.BindPort = 9080 //nolint:staticcheck
|
|
||||||
webdavConf.BindAddress = "127.0.0.1" //nolint:staticcheck
|
|
||||||
c := make(map[string]webdavd.Configuration)
|
|
||||||
c["webdavd"] = webdavConf
|
|
||||||
jsonConf, err := json.Marshal(c)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = os.WriteFile(configFilePath, jsonConf, os.ModePerm)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = config.LoadConfig(configDir, confName)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
webdavConf = config.GetWebDAVDConfig()
|
|
||||||
// the default binding should be replaced with the deprecated configuration
|
|
||||||
require.Len(t, webdavConf.Bindings, 1)
|
|
||||||
require.Equal(t, 9080, webdavConf.Bindings[0].Port)
|
|
||||||
require.Equal(t, "127.0.0.1", webdavConf.Bindings[0].Address)
|
|
||||||
require.False(t, webdavConf.Bindings[0].EnableHTTPS)
|
|
||||||
err = os.Remove(configFilePath)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestHTTPDBindingsCompatibility(t *testing.T) {
|
|
||||||
reset()
|
|
||||||
|
|
||||||
configDir := ".."
|
|
||||||
confName := tempConfigName + ".json"
|
|
||||||
configFilePath := filepath.Join(configDir, confName)
|
|
||||||
err := config.LoadConfig(configDir, "")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
httpdConf := config.GetHTTPDConfig()
|
|
||||||
require.Len(t, httpdConf.Bindings, 1)
|
|
||||||
httpdConf.Bindings = nil
|
|
||||||
httpdConf.BindPort = 9080 //nolint:staticcheck
|
|
||||||
httpdConf.BindAddress = "127.1.1.1" //nolint:staticcheck
|
|
||||||
c := make(map[string]httpd.Conf)
|
|
||||||
c["httpd"] = httpdConf
|
|
||||||
jsonConf, err := json.Marshal(c)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = os.WriteFile(configFilePath, jsonConf, os.ModePerm)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = config.LoadConfig(configDir, confName)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
httpdConf = config.GetHTTPDConfig()
|
|
||||||
// the default binding should be replaced with the deprecated configuration
|
|
||||||
require.Len(t, httpdConf.Bindings, 1)
|
|
||||||
require.Equal(t, 9080, httpdConf.Bindings[0].Port)
|
|
||||||
require.Equal(t, "127.1.1.1", httpdConf.Bindings[0].Address)
|
|
||||||
require.False(t, httpdConf.Bindings[0].EnableHTTPS)
|
|
||||||
require.True(t, httpdConf.Bindings[0].EnableWebAdmin)
|
|
||||||
err = os.Remove(configFilePath)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRateLimitersFromEnv(t *testing.T) {
|
|
||||||
reset()
|
|
||||||
|
|
||||||
os.Setenv("SFTPGO_COMMON__RATE_LIMITERS__0__AVERAGE", "100")
|
|
||||||
os.Setenv("SFTPGO_COMMON__RATE_LIMITERS__0__PERIOD", "2000")
|
|
||||||
os.Setenv("SFTPGO_COMMON__RATE_LIMITERS__0__BURST", "10")
|
|
||||||
os.Setenv("SFTPGO_COMMON__RATE_LIMITERS__0__TYPE", "2")
|
|
||||||
os.Setenv("SFTPGO_COMMON__RATE_LIMITERS__0__PROTOCOLS", "SSH, FTP")
|
|
||||||
os.Setenv("SFTPGO_COMMON__RATE_LIMITERS__0__GENERATE_DEFENDER_EVENTS", "1")
|
|
||||||
os.Setenv("SFTPGO_COMMON__RATE_LIMITERS__0__ENTRIES_SOFT_LIMIT", "50")
|
|
||||||
os.Setenv("SFTPGO_COMMON__RATE_LIMITERS__0__ENTRIES_HARD_LIMIT", "100")
|
|
||||||
os.Setenv("SFTPGO_COMMON__RATE_LIMITERS__8__AVERAGE", "50")
|
|
||||||
t.Cleanup(func() {
|
|
||||||
os.Unsetenv("SFTPGO_COMMON__RATE_LIMITERS__0__AVERAGE")
|
|
||||||
os.Unsetenv("SFTPGO_COMMON__RATE_LIMITERS__0__PERIOD")
|
|
||||||
os.Unsetenv("SFTPGO_COMMON__RATE_LIMITERS__0__BURST")
|
|
||||||
os.Unsetenv("SFTPGO_COMMON__RATE_LIMITERS__0__TYPE")
|
|
||||||
os.Unsetenv("SFTPGO_COMMON__RATE_LIMITERS__0__PROTOCOLS")
|
|
||||||
os.Unsetenv("SFTPGO_COMMON__RATE_LIMITERS__0__GENERATE_DEFENDER_EVENTS")
|
|
||||||
os.Unsetenv("SFTPGO_COMMON__RATE_LIMITERS__0__ENTRIES_SOFT_LIMIT")
|
|
||||||
os.Unsetenv("SFTPGO_COMMON__RATE_LIMITERS__0__ENTRIES_HARD_LIMIT")
|
|
||||||
os.Unsetenv("SFTPGO_COMMON__RATE_LIMITERS__8__AVERAGE")
|
|
||||||
})
|
|
||||||
|
|
||||||
configDir := ".."
|
|
||||||
err := config.LoadConfig(configDir, "")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
limiters := config.GetCommonConfig().RateLimitersConfig
|
|
||||||
require.Len(t, limiters, 2)
|
|
||||||
require.Equal(t, int64(100), limiters[0].Average)
|
|
||||||
require.Equal(t, int64(2000), limiters[0].Period)
|
|
||||||
require.Equal(t, 10, limiters[0].Burst)
|
|
||||||
require.Equal(t, 2, limiters[0].Type)
|
|
||||||
protocols := limiters[0].Protocols
|
|
||||||
require.Len(t, protocols, 2)
|
|
||||||
require.True(t, utils.IsStringInSlice(common.ProtocolFTP, protocols))
|
|
||||||
require.True(t, utils.IsStringInSlice(common.ProtocolSSH, protocols))
|
|
||||||
require.True(t, limiters[0].GenerateDefenderEvents)
|
|
||||||
require.Equal(t, 50, limiters[0].EntriesSoftLimit)
|
|
||||||
require.Equal(t, 100, limiters[0].EntriesHardLimit)
|
|
||||||
require.Equal(t, int64(50), limiters[1].Average)
|
|
||||||
// we check the default values here
|
|
||||||
require.Equal(t, int64(1000), limiters[1].Period)
|
|
||||||
require.Equal(t, 1, limiters[1].Burst)
|
|
||||||
require.Equal(t, 2, limiters[1].Type)
|
|
||||||
protocols = limiters[1].Protocols
|
|
||||||
require.Len(t, protocols, 4)
|
|
||||||
require.True(t, utils.IsStringInSlice(common.ProtocolFTP, protocols))
|
|
||||||
require.True(t, utils.IsStringInSlice(common.ProtocolSSH, protocols))
|
|
||||||
require.True(t, utils.IsStringInSlice(common.ProtocolWebDAV, protocols))
|
|
||||||
require.True(t, utils.IsStringInSlice(common.ProtocolHTTP, protocols))
|
|
||||||
require.False(t, limiters[1].GenerateDefenderEvents)
|
|
||||||
require.Equal(t, 100, limiters[1].EntriesSoftLimit)
|
|
||||||
require.Equal(t, 150, limiters[1].EntriesHardLimit)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSFTPDBindingsFromEnv(t *testing.T) {
|
|
||||||
reset()
|
|
||||||
|
|
||||||
os.Setenv("SFTPGO_SFTPD__BINDINGS__0__ADDRESS", "127.0.0.1")
|
|
||||||
os.Setenv("SFTPGO_SFTPD__BINDINGS__0__PORT", "2200")
|
|
||||||
os.Setenv("SFTPGO_SFTPD__BINDINGS__0__APPLY_PROXY_CONFIG", "false")
|
|
||||||
os.Setenv("SFTPGO_SFTPD__BINDINGS__3__ADDRESS", "127.0.1.1")
|
|
||||||
os.Setenv("SFTPGO_SFTPD__BINDINGS__3__PORT", "2203")
|
|
||||||
t.Cleanup(func() {
|
|
||||||
os.Unsetenv("SFTPGO_SFTPD__BINDINGS__0__ADDRESS")
|
|
||||||
os.Unsetenv("SFTPGO_SFTPD__BINDINGS__0__PORT")
|
|
||||||
os.Unsetenv("SFTPGO_SFTPD__BINDINGS__0__APPLY_PROXY_CONFIG")
|
|
||||||
os.Unsetenv("SFTPGO_SFTPD__BINDINGS__3__ADDRESS")
|
|
||||||
os.Unsetenv("SFTPGO_SFTPD__BINDINGS__3__PORT")
|
|
||||||
})
|
|
||||||
|
|
||||||
configDir := ".."
|
|
||||||
err := config.LoadConfig(configDir, "")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
bindings := config.GetSFTPDConfig().Bindings
|
|
||||||
require.Len(t, bindings, 2)
|
|
||||||
require.Equal(t, 2200, bindings[0].Port)
|
|
||||||
require.Equal(t, "127.0.0.1", bindings[0].Address)
|
|
||||||
require.False(t, bindings[0].ApplyProxyConfig)
|
|
||||||
require.Equal(t, 2203, bindings[1].Port)
|
|
||||||
require.Equal(t, "127.0.1.1", bindings[1].Address)
|
|
||||||
require.True(t, bindings[1].ApplyProxyConfig) // default value
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFTPDBindingsFromEnv(t *testing.T) {
|
|
||||||
reset()
|
|
||||||
|
|
||||||
os.Setenv("SFTPGO_FTPD__BINDINGS__0__ADDRESS", "127.0.0.1")
|
|
||||||
os.Setenv("SFTPGO_FTPD__BINDINGS__0__PORT", "2200")
|
|
||||||
os.Setenv("SFTPGO_FTPD__BINDINGS__0__APPLY_PROXY_CONFIG", "f")
|
|
||||||
os.Setenv("SFTPGO_FTPD__BINDINGS__0__TLS_MODE", "2")
|
|
||||||
os.Setenv("SFTPGO_FTPD__BINDINGS__0__FORCE_PASSIVE_IP", "127.0.1.2")
|
|
||||||
os.Setenv("SFTPGO_FTPD__BINDINGS__0__TLS_CIPHER_SUITES", "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256")
|
|
||||||
os.Setenv("SFTPGO_FTPD__BINDINGS__9__ADDRESS", "127.0.1.1")
|
|
||||||
os.Setenv("SFTPGO_FTPD__BINDINGS__9__PORT", "2203")
|
|
||||||
os.Setenv("SFTPGO_FTPD__BINDINGS__9__TLS_MODE", "1")
|
|
||||||
os.Setenv("SFTPGO_FTPD__BINDINGS__9__FORCE_PASSIVE_IP", "127.0.1.1")
|
|
||||||
os.Setenv("SFTPGO_FTPD__BINDINGS__9__CLIENT_AUTH_TYPE", "2")
|
|
||||||
|
|
||||||
t.Cleanup(func() {
|
|
||||||
os.Unsetenv("SFTPGO_FTPD__BINDINGS__0__ADDRESS")
|
|
||||||
os.Unsetenv("SFTPGO_FTPD__BINDINGS__0__PORT")
|
|
||||||
os.Unsetenv("SFTPGO_FTPD__BINDINGS__0__APPLY_PROXY_CONFIG")
|
|
||||||
os.Unsetenv("SFTPGO_FTPD__BINDINGS__0__TLS_MODE")
|
|
||||||
os.Unsetenv("SFTPGO_FTPD__BINDINGS__0__FORCE_PASSIVE_IP")
|
|
||||||
os.Unsetenv("SFTPGO_FTPD__BINDINGS__0__TLS_CIPHER_SUITES")
|
|
||||||
os.Unsetenv("SFTPGO_FTPD__BINDINGS__9__ADDRESS")
|
|
||||||
os.Unsetenv("SFTPGO_FTPD__BINDINGS__9__PORT")
|
|
||||||
os.Unsetenv("SFTPGO_FTPD__BINDINGS__9__TLS_MODE")
|
|
||||||
os.Unsetenv("SFTPGO_FTPD__BINDINGS__9__FORCE_PASSIVE_IP")
|
|
||||||
os.Unsetenv("SFTPGO_FTPD__BINDINGS__9__CLIENT_AUTH_TYPE")
|
|
||||||
})
|
|
||||||
|
|
||||||
configDir := ".."
|
|
||||||
err := config.LoadConfig(configDir, "")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
bindings := config.GetFTPDConfig().Bindings
|
|
||||||
require.Len(t, bindings, 2)
|
|
||||||
require.Equal(t, 2200, bindings[0].Port)
|
|
||||||
require.Equal(t, "127.0.0.1", bindings[0].Address)
|
|
||||||
require.False(t, bindings[0].ApplyProxyConfig)
|
|
||||||
require.Equal(t, 2, bindings[0].TLSMode)
|
|
||||||
require.Equal(t, "127.0.1.2", bindings[0].ForcePassiveIP)
|
|
||||||
require.Equal(t, 0, bindings[0].ClientAuthType)
|
|
||||||
require.Len(t, bindings[0].TLSCipherSuites, 2)
|
|
||||||
require.Equal(t, "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", bindings[0].TLSCipherSuites[0])
|
|
||||||
require.Equal(t, "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", bindings[0].TLSCipherSuites[1])
|
|
||||||
require.Equal(t, 2203, bindings[1].Port)
|
|
||||||
require.Equal(t, "127.0.1.1", bindings[1].Address)
|
|
||||||
require.True(t, bindings[1].ApplyProxyConfig) // default value
|
|
||||||
require.Equal(t, 1, bindings[1].TLSMode)
|
|
||||||
require.Equal(t, "127.0.1.1", bindings[1].ForcePassiveIP)
|
|
||||||
require.Equal(t, 2, bindings[1].ClientAuthType)
|
|
||||||
require.Nil(t, bindings[1].TLSCipherSuites)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestWebDAVBindingsFromEnv(t *testing.T) {
|
|
||||||
reset()
|
|
||||||
|
|
||||||
os.Setenv("SFTPGO_WEBDAVD__BINDINGS__1__ADDRESS", "127.0.0.1")
|
|
||||||
os.Setenv("SFTPGO_WEBDAVD__BINDINGS__1__PORT", "8000")
|
|
||||||
os.Setenv("SFTPGO_WEBDAVD__BINDINGS__1__ENABLE_HTTPS", "0")
|
|
||||||
os.Setenv("SFTPGO_WEBDAVD__BINDINGS__1__TLS_CIPHER_SUITES", "TLS_RSA_WITH_AES_128_CBC_SHA ")
|
|
||||||
os.Setenv("SFTPGO_WEBDAVD__BINDINGS__1__PROXY_ALLOWED", "192.168.10.1")
|
|
||||||
os.Setenv("SFTPGO_WEBDAVD__BINDINGS__2__ADDRESS", "127.0.1.1")
|
|
||||||
os.Setenv("SFTPGO_WEBDAVD__BINDINGS__2__PORT", "9000")
|
|
||||||
os.Setenv("SFTPGO_WEBDAVD__BINDINGS__2__ENABLE_HTTPS", "1")
|
|
||||||
os.Setenv("SFTPGO_WEBDAVD__BINDINGS__2__CLIENT_AUTH_TYPE", "1")
|
|
||||||
os.Setenv("SFTPGO_WEBDAVD__BINDINGS__2__PREFIX", "/dav2")
|
|
||||||
t.Cleanup(func() {
|
|
||||||
os.Unsetenv("SFTPGO_WEBDAVD__BINDINGS__1__ADDRESS")
|
|
||||||
os.Unsetenv("SFTPGO_WEBDAVD__BINDINGS__1__PORT")
|
|
||||||
os.Unsetenv("SFTPGO_WEBDAVD__BINDINGS__1__ENABLE_HTTPS")
|
|
||||||
os.Unsetenv("SFTPGO_WEBDAVD__BINDINGS__1__TLS_CIPHER_SUITES")
|
|
||||||
os.Unsetenv("SFTPGO_WEBDAVD__BINDINGS__1__PROXY_ALLOWED")
|
|
||||||
os.Unsetenv("SFTPGO_WEBDAVD__BINDINGS__2__ADDRESS")
|
|
||||||
os.Unsetenv("SFTPGO_WEBDAVD__BINDINGS__2__PORT")
|
|
||||||
os.Unsetenv("SFTPGO_WEBDAVD__BINDINGS__2__ENABLE_HTTPS")
|
|
||||||
os.Unsetenv("SFTPGO_WEBDAVD__BINDINGS__2__CLIENT_AUTH_TYPE")
|
|
||||||
os.Unsetenv("SFTPGO_WEBDAVD__BINDINGS__2__PREFIX")
|
|
||||||
})
|
|
||||||
|
|
||||||
configDir := ".."
|
|
||||||
err := config.LoadConfig(configDir, "")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
bindings := config.GetWebDAVDConfig().Bindings
|
|
||||||
require.Len(t, bindings, 3)
|
|
||||||
require.Equal(t, 0, bindings[0].Port)
|
|
||||||
require.Empty(t, bindings[0].Address)
|
|
||||||
require.False(t, bindings[0].EnableHTTPS)
|
|
||||||
require.Len(t, bindings[0].TLSCipherSuites, 0)
|
|
||||||
require.Empty(t, bindings[0].Prefix)
|
|
||||||
require.Equal(t, 8000, bindings[1].Port)
|
|
||||||
require.Equal(t, "127.0.0.1", bindings[1].Address)
|
|
||||||
require.False(t, bindings[1].EnableHTTPS)
|
|
||||||
require.Equal(t, 0, bindings[1].ClientAuthType)
|
|
||||||
require.Len(t, bindings[1].TLSCipherSuites, 1)
|
|
||||||
require.Equal(t, "TLS_RSA_WITH_AES_128_CBC_SHA", bindings[1].TLSCipherSuites[0])
|
|
||||||
require.Equal(t, "192.168.10.1", bindings[1].ProxyAllowed[0])
|
|
||||||
require.Empty(t, bindings[1].Prefix)
|
|
||||||
require.Equal(t, 9000, bindings[2].Port)
|
|
||||||
require.Equal(t, "127.0.1.1", bindings[2].Address)
|
|
||||||
require.True(t, bindings[2].EnableHTTPS)
|
|
||||||
require.Equal(t, 1, bindings[2].ClientAuthType)
|
|
||||||
require.Nil(t, bindings[2].TLSCipherSuites)
|
|
||||||
require.Equal(t, "/dav2", bindings[2].Prefix)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestHTTPDBindingsFromEnv(t *testing.T) {
|
|
||||||
reset()
|
|
||||||
|
|
||||||
sockPath := filepath.Clean(os.TempDir())
|
|
||||||
|
|
||||||
os.Setenv("SFTPGO_HTTPD__BINDINGS__0__ADDRESS", sockPath)
|
|
||||||
os.Setenv("SFTPGO_HTTPD__BINDINGS__0__PORT", "0")
|
|
||||||
os.Setenv("SFTPGO_HTTPD__BINDINGS__0__TLS_CIPHER_SUITES", " TLS_AES_128_GCM_SHA256")
|
|
||||||
os.Setenv("SFTPGO_HTTPD__BINDINGS__1__ADDRESS", "127.0.0.1")
|
|
||||||
os.Setenv("SFTPGO_HTTPD__BINDINGS__1__PORT", "8000")
|
|
||||||
os.Setenv("SFTPGO_HTTPD__BINDINGS__1__ENABLE_HTTPS", "0")
|
|
||||||
os.Setenv("SFTPGO_HTTPD__BINDINGS__2__ADDRESS", "127.0.1.1")
|
|
||||||
os.Setenv("SFTPGO_HTTPD__BINDINGS__2__PORT", "9000")
|
|
||||||
os.Setenv("SFTPGO_HTTPD__BINDINGS__2__ENABLE_WEB_ADMIN", "0")
|
|
||||||
os.Setenv("SFTPGO_HTTPD__BINDINGS__2__ENABLE_WEB_CLIENT", "0")
|
|
||||||
os.Setenv("SFTPGO_HTTPD__BINDINGS__2__ENABLE_HTTPS", "1")
|
|
||||||
os.Setenv("SFTPGO_HTTPD__BINDINGS__2__CLIENT_AUTH_TYPE", "1")
|
|
||||||
os.Setenv("SFTPGO_HTTPD__BINDINGS__2__TLS_CIPHER_SUITES", " TLS_AES_256_GCM_SHA384 , TLS_CHACHA20_POLY1305_SHA256")
|
|
||||||
os.Setenv("SFTPGO_HTTPD__BINDINGS__2__PROXY_ALLOWED", " 192.168.9.1 , 172.16.25.0/24")
|
|
||||||
t.Cleanup(func() {
|
|
||||||
os.Unsetenv("SFTPGO_HTTPD__BINDINGS__0__ADDRESS")
|
|
||||||
os.Unsetenv("SFTPGO_HTTPD__BINDINGS__0__PORT")
|
|
||||||
os.Unsetenv("SFTPGO_HTTPD__BINDINGS__0__TLS_CIPHER_SUITES")
|
|
||||||
os.Unsetenv("SFTPGO_HTTPD__BINDINGS__1__ADDRESS")
|
|
||||||
os.Unsetenv("SFTPGO_HTTPD__BINDINGS__1__PORT")
|
|
||||||
os.Unsetenv("SFTPGO_HTTPD__BINDINGS__1__ENABLE_HTTPS")
|
|
||||||
os.Unsetenv("SFTPGO_HTTPD__BINDINGS__2__ADDRESS")
|
|
||||||
os.Unsetenv("SFTPGO_HTTPD__BINDINGS__2__PORT")
|
|
||||||
os.Unsetenv("SFTPGO_HTTPD__BINDINGS__2__ENABLE_HTTPS")
|
|
||||||
os.Unsetenv("SFTPGO_HTTPD__BINDINGS__2__ENABLE_WEB_ADMIN")
|
|
||||||
os.Unsetenv("SFTPGO_HTTPD__BINDINGS__2__ENABLE_WEB_CLIENT")
|
|
||||||
os.Unsetenv("SFTPGO_HTTPD__BINDINGS__2__CLIENT_AUTH_TYPE")
|
|
||||||
os.Unsetenv("SFTPGO_HTTPD__BINDINGS__2__TLS_CIPHER_SUITES")
|
|
||||||
os.Unsetenv("SFTPGO_HTTPD__BINDINGS__2__PROXY_ALLOWED")
|
|
||||||
})
|
|
||||||
|
|
||||||
configDir := ".."
|
|
||||||
err := config.LoadConfig(configDir, "")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
bindings := config.GetHTTPDConfig().Bindings
|
|
||||||
require.Len(t, bindings, 3)
|
|
||||||
require.Equal(t, 0, bindings[0].Port)
|
|
||||||
require.Equal(t, sockPath, bindings[0].Address)
|
|
||||||
require.False(t, bindings[0].EnableHTTPS)
|
|
||||||
require.True(t, bindings[0].EnableWebAdmin)
|
|
||||||
require.True(t, bindings[0].EnableWebClient)
|
|
||||||
require.Len(t, bindings[0].TLSCipherSuites, 1)
|
|
||||||
require.Equal(t, "TLS_AES_128_GCM_SHA256", bindings[0].TLSCipherSuites[0])
|
|
||||||
require.Equal(t, 8000, bindings[1].Port)
|
|
||||||
require.Equal(t, "127.0.0.1", bindings[1].Address)
|
|
||||||
require.False(t, bindings[1].EnableHTTPS)
|
|
||||||
require.True(t, bindings[1].EnableWebAdmin)
|
|
||||||
require.True(t, bindings[1].EnableWebClient)
|
|
||||||
require.Nil(t, bindings[1].TLSCipherSuites)
|
|
||||||
|
|
||||||
require.Equal(t, 9000, bindings[2].Port)
|
|
||||||
require.Equal(t, "127.0.1.1", bindings[2].Address)
|
|
||||||
require.True(t, bindings[2].EnableHTTPS)
|
|
||||||
require.False(t, bindings[2].EnableWebAdmin)
|
|
||||||
require.False(t, bindings[2].EnableWebClient)
|
|
||||||
require.Equal(t, 1, bindings[2].ClientAuthType)
|
|
||||||
require.Len(t, bindings[2].TLSCipherSuites, 2)
|
|
||||||
require.Equal(t, "TLS_AES_256_GCM_SHA384", bindings[2].TLSCipherSuites[0])
|
|
||||||
require.Equal(t, "TLS_CHACHA20_POLY1305_SHA256", bindings[2].TLSCipherSuites[1])
|
|
||||||
require.Len(t, bindings[2].ProxyAllowed, 2)
|
|
||||||
require.Equal(t, "192.168.9.1", bindings[2].ProxyAllowed[0])
|
|
||||||
require.Equal(t, "172.16.25.0/24", bindings[2].ProxyAllowed[1])
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestHTTPClientCertificatesFromEnv(t *testing.T) {
|
|
||||||
reset()
|
|
||||||
|
|
||||||
configDir := ".."
|
|
||||||
confName := tempConfigName + ".json"
|
|
||||||
configFilePath := filepath.Join(configDir, confName)
|
|
||||||
err := config.LoadConfig(configDir, "")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
httpConf := config.GetHTTPConfig()
|
|
||||||
httpConf.Certificates = append(httpConf.Certificates, httpclient.TLSKeyPair{
|
|
||||||
Cert: "cert",
|
|
||||||
Key: "key",
|
|
||||||
})
|
|
||||||
c := make(map[string]httpclient.Config)
|
|
||||||
c["http"] = httpConf
|
|
||||||
jsonConf, err := json.Marshal(c)
|
|
||||||
require.NoError(t, err)
|
|
||||||
err = os.WriteFile(configFilePath, jsonConf, os.ModePerm)
|
|
||||||
require.NoError(t, err)
|
|
||||||
err = config.LoadConfig(configDir, confName)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Len(t, config.GetHTTPConfig().Certificates, 1)
|
|
||||||
require.Equal(t, "cert", config.GetHTTPConfig().Certificates[0].Cert)
|
|
||||||
require.Equal(t, "key", config.GetHTTPConfig().Certificates[0].Key)
|
|
||||||
|
|
||||||
os.Setenv("SFTPGO_HTTP__CERTIFICATES__0__CERT", "cert0")
|
|
||||||
os.Setenv("SFTPGO_HTTP__CERTIFICATES__0__KEY", "key0")
|
|
||||||
os.Setenv("SFTPGO_HTTP__CERTIFICATES__8__CERT", "cert8")
|
|
||||||
os.Setenv("SFTPGO_HTTP__CERTIFICATES__9__CERT", "cert9")
|
|
||||||
os.Setenv("SFTPGO_HTTP__CERTIFICATES__9__KEY", "key9")
|
|
||||||
|
|
||||||
t.Cleanup(func() {
|
|
||||||
os.Unsetenv("SFTPGO_HTTP__CERTIFICATES__0__CERT")
|
|
||||||
os.Unsetenv("SFTPGO_HTTP__CERTIFICATES__0__KEY")
|
|
||||||
os.Unsetenv("SFTPGO_HTTP__CERTIFICATES__8__CERT")
|
|
||||||
os.Unsetenv("SFTPGO_HTTP__CERTIFICATES__9__CERT")
|
|
||||||
os.Unsetenv("SFTPGO_HTTP__CERTIFICATES__9__KEY")
|
|
||||||
})
|
|
||||||
|
|
||||||
err = config.LoadConfig(configDir, confName)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Len(t, config.GetHTTPConfig().Certificates, 2)
|
|
||||||
require.Equal(t, "cert0", config.GetHTTPConfig().Certificates[0].Cert)
|
|
||||||
require.Equal(t, "key0", config.GetHTTPConfig().Certificates[0].Key)
|
|
||||||
require.Equal(t, "cert9", config.GetHTTPConfig().Certificates[1].Cert)
|
|
||||||
require.Equal(t, "key9", config.GetHTTPConfig().Certificates[1].Key)
|
|
||||||
|
|
||||||
err = os.Remove(configFilePath)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
config.Init()
|
|
||||||
|
|
||||||
err = config.LoadConfig(configDir, "")
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Len(t, config.GetHTTPConfig().Certificates, 2)
|
|
||||||
require.Equal(t, "cert0", config.GetHTTPConfig().Certificates[0].Cert)
|
|
||||||
require.Equal(t, "key0", config.GetHTTPConfig().Certificates[0].Key)
|
|
||||||
require.Equal(t, "cert9", config.GetHTTPConfig().Certificates[1].Cert)
|
|
||||||
require.Equal(t, "key9", config.GetHTTPConfig().Certificates[1].Key)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestHTTPClientHeadersFromEnv(t *testing.T) {
|
|
||||||
reset()
|
|
||||||
|
|
||||||
configDir := ".."
|
|
||||||
confName := tempConfigName + ".json"
|
|
||||||
configFilePath := filepath.Join(configDir, confName)
|
|
||||||
err := config.LoadConfig(configDir, "")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
httpConf := config.GetHTTPConfig()
|
|
||||||
httpConf.Headers = append(httpConf.Headers, httpclient.Header{
|
|
||||||
Key: "key",
|
|
||||||
Value: "value",
|
|
||||||
URL: "url",
|
|
||||||
})
|
|
||||||
c := make(map[string]httpclient.Config)
|
|
||||||
c["http"] = httpConf
|
|
||||||
jsonConf, err := json.Marshal(c)
|
|
||||||
require.NoError(t, err)
|
|
||||||
err = os.WriteFile(configFilePath, jsonConf, os.ModePerm)
|
|
||||||
require.NoError(t, err)
|
|
||||||
err = config.LoadConfig(configDir, confName)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Len(t, config.GetHTTPConfig().Headers, 1)
|
|
||||||
require.Equal(t, "key", config.GetHTTPConfig().Headers[0].Key)
|
|
||||||
require.Equal(t, "value", config.GetHTTPConfig().Headers[0].Value)
|
|
||||||
require.Equal(t, "url", config.GetHTTPConfig().Headers[0].URL)
|
|
||||||
|
|
||||||
os.Setenv("SFTPGO_HTTP__HEADERS__0__KEY", "key0")
|
|
||||||
os.Setenv("SFTPGO_HTTP__HEADERS__0__VALUE", "value0")
|
|
||||||
os.Setenv("SFTPGO_HTTP__HEADERS__0__URL", "url0")
|
|
||||||
os.Setenv("SFTPGO_HTTP__HEADERS__8__KEY", "key8")
|
|
||||||
os.Setenv("SFTPGO_HTTP__HEADERS__9__KEY", "key9")
|
|
||||||
os.Setenv("SFTPGO_HTTP__HEADERS__9__VALUE", "value9")
|
|
||||||
os.Setenv("SFTPGO_HTTP__HEADERS__9__URL", "url9")
|
|
||||||
|
|
||||||
t.Cleanup(func() {
|
|
||||||
os.Unsetenv("SFTPGO_HTTP__HEADERS__0__KEY")
|
|
||||||
os.Unsetenv("SFTPGO_HTTP__HEADERS__0__VALUE")
|
|
||||||
os.Unsetenv("SFTPGO_HTTP__HEADERS__0__URL")
|
|
||||||
os.Unsetenv("SFTPGO_HTTP__HEADERS__8__KEY")
|
|
||||||
os.Unsetenv("SFTPGO_HTTP__HEADERS__9__KEY")
|
|
||||||
os.Unsetenv("SFTPGO_HTTP__HEADERS__9__VALUE")
|
|
||||||
os.Unsetenv("SFTPGO_HTTP__HEADERS__9__URL")
|
|
||||||
})
|
|
||||||
|
|
||||||
err = config.LoadConfig(configDir, confName)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Len(t, config.GetHTTPConfig().Headers, 2)
|
|
||||||
require.Equal(t, "key0", config.GetHTTPConfig().Headers[0].Key)
|
|
||||||
require.Equal(t, "value0", config.GetHTTPConfig().Headers[0].Value)
|
|
||||||
require.Equal(t, "url0", config.GetHTTPConfig().Headers[0].URL)
|
|
||||||
require.Equal(t, "key9", config.GetHTTPConfig().Headers[1].Key)
|
|
||||||
require.Equal(t, "value9", config.GetHTTPConfig().Headers[1].Value)
|
|
||||||
require.Equal(t, "url9", config.GetHTTPConfig().Headers[1].URL)
|
|
||||||
|
|
||||||
err = os.Remove(configFilePath)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
config.Init()
|
|
||||||
|
|
||||||
err = config.LoadConfig(configDir, "")
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Len(t, config.GetHTTPConfig().Headers, 2)
|
|
||||||
require.Equal(t, "key0", config.GetHTTPConfig().Headers[0].Key)
|
|
||||||
require.Equal(t, "value0", config.GetHTTPConfig().Headers[0].Value)
|
|
||||||
require.Equal(t, "url0", config.GetHTTPConfig().Headers[0].URL)
|
|
||||||
require.Equal(t, "key9", config.GetHTTPConfig().Headers[1].Key)
|
|
||||||
require.Equal(t, "value9", config.GetHTTPConfig().Headers[1].Value)
|
|
||||||
require.Equal(t, "url9", config.GetHTTPConfig().Headers[1].URL)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestConfigFromEnv(t *testing.T) {
|
|
||||||
reset()
|
|
||||||
|
|
||||||
os.Setenv("SFTPGO_SFTPD__BINDINGS__0__ADDRESS", "127.0.0.1")
|
|
||||||
os.Setenv("SFTPGO_WEBDAVD__BINDINGS__0__PORT", "12000")
|
|
||||||
os.Setenv("SFTPGO_DATA_PROVIDER__PASSWORD_HASHING__ARGON2_OPTIONS__ITERATIONS", "41")
|
|
||||||
os.Setenv("SFTPGO_DATA_PROVIDER__POOL_SIZE", "10")
|
|
||||||
os.Setenv("SFTPGO_DATA_PROVIDER__ACTIONS__EXECUTE_ON", "add")
|
|
||||||
os.Setenv("SFTPGO_KMS__SECRETS__URL", "local")
|
|
||||||
os.Setenv("SFTPGO_KMS__SECRETS__MASTER_KEY_PATH", "path")
|
|
||||||
os.Setenv("SFTPGO_TELEMETRY__TLS_CIPHER_SUITES", "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA")
|
|
||||||
t.Cleanup(func() {
|
|
||||||
os.Unsetenv("SFTPGO_SFTPD__BINDINGS__0__ADDRESS")
|
|
||||||
os.Unsetenv("SFTPGO_WEBDAVD__BINDINGS__0__PORT")
|
|
||||||
os.Unsetenv("SFTPGO_DATA_PROVIDER__PASSWORD_HASHING__ARGON2_OPTIONS__ITERATIONS")
|
|
||||||
os.Unsetenv("SFTPGO_DATA_PROVIDER__POOL_SIZE")
|
|
||||||
os.Unsetenv("SFTPGO_DATA_PROVIDER__ACTIONS__EXECUTE_ON")
|
|
||||||
os.Unsetenv("SFTPGO_KMS__SECRETS__URL")
|
|
||||||
os.Unsetenv("SFTPGO_KMS__SECRETS__MASTER_KEY_PATH")
|
|
||||||
os.Unsetenv("SFTPGO_TELEMETRY__TLS_CIPHER_SUITES")
|
|
||||||
})
|
|
||||||
err := config.LoadConfig(".", "invalid config")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
sftpdConfig := config.GetSFTPDConfig()
|
|
||||||
assert.Equal(t, "127.0.0.1", sftpdConfig.Bindings[0].Address)
|
|
||||||
assert.Equal(t, 12000, config.GetWebDAVDConfig().Bindings[0].Port)
|
|
||||||
dataProviderConf := config.GetProviderConf()
|
|
||||||
assert.Equal(t, uint32(41), dataProviderConf.PasswordHashing.Argon2Options.Iterations)
|
|
||||||
assert.Equal(t, 10, dataProviderConf.PoolSize)
|
|
||||||
assert.Len(t, dataProviderConf.Actions.ExecuteOn, 1)
|
|
||||||
assert.Contains(t, dataProviderConf.Actions.ExecuteOn, "add")
|
|
||||||
kmsConfig := config.GetKMSConfig()
|
|
||||||
assert.Equal(t, "local", kmsConfig.Secrets.URL)
|
|
||||||
assert.Equal(t, "path", kmsConfig.Secrets.MasterKeyPath)
|
|
||||||
telemetryConfig := config.GetTelemetryConfig()
|
|
||||||
assert.Len(t, telemetryConfig.TLSCipherSuites, 2)
|
|
||||||
assert.Equal(t, "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", telemetryConfig.TLSCipherSuites[0])
|
|
||||||
assert.Equal(t, "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", telemetryConfig.TLSCipherSuites[1])
|
|
||||||
}
|
|
6
crowdin.yml
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
project_id_env: CROWDIN_PROJECT_ID
|
||||||
|
api_token_env: CROWDIN_PERSONAL_TOKEN
|
||||||
|
files:
|
||||||
|
- source: /static/locales/en/translation.json
|
||||||
|
translation: /static/locales/%two_letters_code%/%original_file_name%
|
||||||
|
type: i18next_json
|
|
@ -1,253 +0,0 @@
|
||||||
package dataprovider
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/sha256"
|
|
||||||
"encoding/base64"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"net"
|
|
||||||
"regexp"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/alexedwards/argon2id"
|
|
||||||
"golang.org/x/crypto/bcrypt"
|
|
||||||
|
|
||||||
"github.com/drakkan/sftpgo/utils"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Available permissions for SFTPGo admins
|
|
||||||
const (
|
|
||||||
PermAdminAny = "*"
|
|
||||||
PermAdminAddUsers = "add_users"
|
|
||||||
PermAdminChangeUsers = "edit_users"
|
|
||||||
PermAdminDeleteUsers = "del_users"
|
|
||||||
PermAdminViewUsers = "view_users"
|
|
||||||
PermAdminViewConnections = "view_conns"
|
|
||||||
PermAdminCloseConnections = "close_conns"
|
|
||||||
PermAdminViewServerStatus = "view_status"
|
|
||||||
PermAdminManageAdmins = "manage_admins"
|
|
||||||
PermAdminQuotaScans = "quota_scans"
|
|
||||||
PermAdminManageSystem = "manage_system"
|
|
||||||
PermAdminManageDefender = "manage_defender"
|
|
||||||
PermAdminViewDefender = "view_defender"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
emailRegex = regexp.MustCompile("^(?:(?:(?:(?:[a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+(?:\\.([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+)*)|(?:(?:\\x22)(?:(?:(?:(?:\\x20|\\x09)*(?:\\x0d\\x0a))?(?:\\x20|\\x09)+)?(?:(?:[\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(?:(?:[\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}]))))*(?:(?:(?:\\x20|\\x09)*(?:\\x0d\\x0a))?(\\x20|\\x09)+)?(?:\\x22))))@(?:(?:(?:[a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(?:(?:[a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])(?:[a-zA-Z]|\\d|-|\\.|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*(?:[a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.)+(?:(?:[a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(?:(?:[a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])(?:[a-zA-Z]|\\d|-|\\.|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*(?:[a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.?$")
|
|
||||||
validAdminPerms = []string{PermAdminAny, PermAdminAddUsers, PermAdminChangeUsers, PermAdminDeleteUsers,
|
|
||||||
PermAdminViewUsers, PermAdminViewConnections, PermAdminCloseConnections, PermAdminViewServerStatus,
|
|
||||||
PermAdminManageAdmins, PermAdminQuotaScans, PermAdminManageSystem, PermAdminManageDefender,
|
|
||||||
PermAdminViewDefender}
|
|
||||||
)
|
|
||||||
|
|
||||||
// AdminFilters defines additional restrictions for SFTPGo admins
|
|
||||||
// TODO: rename to AdminOptions in v3
|
|
||||||
type AdminFilters struct {
|
|
||||||
// only clients connecting from these IP/Mask are allowed.
|
|
||||||
// IP/Mask must be in CIDR notation as defined in RFC 4632 and RFC 4291
|
|
||||||
// for example "192.0.2.0/24" or "2001:db8::/32"
|
|
||||||
AllowList []string `json:"allow_list,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Admin defines a SFTPGo admin
|
|
||||||
type Admin struct {
|
|
||||||
// Database unique identifier
|
|
||||||
ID int64 `json:"id"`
|
|
||||||
// 1 enabled, 0 disabled (login is not allowed)
|
|
||||||
Status int `json:"status"`
|
|
||||||
// Username
|
|
||||||
Username string `json:"username"`
|
|
||||||
Password string `json:"password,omitempty"`
|
|
||||||
Email string `json:"email"`
|
|
||||||
Permissions []string `json:"permissions"`
|
|
||||||
Filters AdminFilters `json:"filters,omitempty"`
|
|
||||||
Description string `json:"description,omitempty"`
|
|
||||||
AdditionalInfo string `json:"additional_info,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *Admin) checkPassword() error {
|
|
||||||
if a.Password != "" && !utils.IsStringPrefixInSlice(a.Password, internalHashPwdPrefixes) {
|
|
||||||
if config.PasswordHashing.Algo == HashingAlgoBcrypt {
|
|
||||||
pwd, err := bcrypt.GenerateFromPassword([]byte(a.Password), config.PasswordHashing.BcryptOptions.Cost)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
a.Password = string(pwd)
|
|
||||||
} else {
|
|
||||||
pwd, err := argon2id.CreateHash(a.Password, argon2Params)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
a.Password = pwd
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *Admin) validate() error {
|
|
||||||
if a.Username == "" {
|
|
||||||
return &ValidationError{err: "username is mandatory"}
|
|
||||||
}
|
|
||||||
if a.Password == "" {
|
|
||||||
return &ValidationError{err: "please set a password"}
|
|
||||||
}
|
|
||||||
if !config.SkipNaturalKeysValidation && !usernameRegex.MatchString(a.Username) {
|
|
||||||
return &ValidationError{err: fmt.Sprintf("username %#v is not valid, the following characters are allowed: a-zA-Z0-9-_.~", a.Username)}
|
|
||||||
}
|
|
||||||
if err := a.checkPassword(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
a.Permissions = utils.RemoveDuplicates(a.Permissions)
|
|
||||||
if len(a.Permissions) == 0 {
|
|
||||||
return &ValidationError{err: "please grant some permissions to this admin"}
|
|
||||||
}
|
|
||||||
if utils.IsStringInSlice(PermAdminAny, a.Permissions) {
|
|
||||||
a.Permissions = []string{PermAdminAny}
|
|
||||||
}
|
|
||||||
for _, perm := range a.Permissions {
|
|
||||||
if !utils.IsStringInSlice(perm, validAdminPerms) {
|
|
||||||
return &ValidationError{err: fmt.Sprintf("invalid permission: %#v", perm)}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if a.Email != "" && !emailRegex.MatchString(a.Email) {
|
|
||||||
return &ValidationError{err: fmt.Sprintf("email %#v is not valid", a.Email)}
|
|
||||||
}
|
|
||||||
for _, IPMask := range a.Filters.AllowList {
|
|
||||||
_, _, err := net.ParseCIDR(IPMask)
|
|
||||||
if err != nil {
|
|
||||||
return &ValidationError{err: fmt.Sprintf("could not parse allow list entry %#v : %v", IPMask, err)}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// CheckPassword verifies the admin password
|
|
||||||
func (a *Admin) CheckPassword(password string) (bool, error) {
|
|
||||||
if strings.HasPrefix(a.Password, bcryptPwdPrefix) {
|
|
||||||
if err := bcrypt.CompareHashAndPassword([]byte(a.Password), []byte(password)); err != nil {
|
|
||||||
return false, ErrInvalidCredentials
|
|
||||||
}
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
return argon2id.ComparePasswordAndHash(password, a.Password)
|
|
||||||
}
|
|
||||||
|
|
||||||
// CanLoginFromIP returns true if login from the given IP is allowed
|
|
||||||
func (a *Admin) CanLoginFromIP(ip string) bool {
|
|
||||||
if len(a.Filters.AllowList) == 0 {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
parsedIP := net.ParseIP(ip)
|
|
||||||
if parsedIP == nil {
|
|
||||||
return len(a.Filters.AllowList) == 0
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, ipMask := range a.Filters.AllowList {
|
|
||||||
_, network, err := net.ParseCIDR(ipMask)
|
|
||||||
if err != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if network.Contains(parsedIP) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *Admin) checkUserAndPass(password, ip string) error {
|
|
||||||
if a.Status != 1 {
|
|
||||||
return fmt.Errorf("admin %#v is disabled", a.Username)
|
|
||||||
}
|
|
||||||
if a.Password == "" || password == "" {
|
|
||||||
return errors.New("credentials cannot be null or empty")
|
|
||||||
}
|
|
||||||
match, err := a.CheckPassword(password)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if !match {
|
|
||||||
return ErrInvalidCredentials
|
|
||||||
}
|
|
||||||
if !a.CanLoginFromIP(ip) {
|
|
||||||
return fmt.Errorf("login from IP %v not allowed", ip)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// HideConfidentialData hides admin confidential data
|
|
||||||
func (a *Admin) HideConfidentialData() {
|
|
||||||
a.Password = ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasPermission returns true if the admin has the specified permission
|
|
||||||
func (a *Admin) HasPermission(perm string) bool {
|
|
||||||
if utils.IsStringInSlice(PermAdminAny, a.Permissions) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return utils.IsStringInSlice(perm, a.Permissions)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetPermissionsAsString returns permission as string
|
|
||||||
func (a *Admin) GetPermissionsAsString() string {
|
|
||||||
return strings.Join(a.Permissions, ", ")
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetAllowedIPAsString returns the allowed IP as comma separated string
|
|
||||||
func (a *Admin) GetAllowedIPAsString() string {
|
|
||||||
return strings.Join(a.Filters.AllowList, ",")
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetValidPerms returns the allowed admin permissions
|
|
||||||
func (a *Admin) GetValidPerms() []string {
|
|
||||||
return validAdminPerms
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetInfoString returns admin's info as string.
|
|
||||||
func (a *Admin) GetInfoString() string {
|
|
||||||
var result string
|
|
||||||
if a.Email != "" {
|
|
||||||
result = fmt.Sprintf("Email: %v. ", a.Email)
|
|
||||||
}
|
|
||||||
if len(a.Filters.AllowList) > 0 {
|
|
||||||
result += fmt.Sprintf("Allowed IP/Mask: %v. ", len(a.Filters.AllowList))
|
|
||||||
}
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetSignature returns a signature for this admin.
|
|
||||||
// It could change after an update
|
|
||||||
func (a *Admin) GetSignature() string {
|
|
||||||
data := []byte(a.Username)
|
|
||||||
data = append(data, []byte(a.Password)...)
|
|
||||||
signature := sha256.Sum256(data)
|
|
||||||
return base64.StdEncoding.EncodeToString(signature[:])
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *Admin) getACopy() Admin {
|
|
||||||
permissions := make([]string, len(a.Permissions))
|
|
||||||
copy(permissions, a.Permissions)
|
|
||||||
filters := AdminFilters{}
|
|
||||||
filters.AllowList = make([]string, len(a.Filters.AllowList))
|
|
||||||
copy(filters.AllowList, a.Filters.AllowList)
|
|
||||||
|
|
||||||
return Admin{
|
|
||||||
ID: a.ID,
|
|
||||||
Status: a.Status,
|
|
||||||
Username: a.Username,
|
|
||||||
Password: a.Password,
|
|
||||||
Email: a.Email,
|
|
||||||
Permissions: permissions,
|
|
||||||
Filters: filters,
|
|
||||||
AdditionalInfo: a.AdditionalInfo,
|
|
||||||
Description: a.Description,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// setDefaults sets the appropriate value for the default admin
|
|
||||||
func (a *Admin) setDefaults() {
|
|
||||||
a.Username = "admin"
|
|
||||||
a.Password = "password"
|
|
||||||
a.Status = 1
|
|
||||||
a.Permissions = []string{PermAdminAny}
|
|
||||||
}
|
|
1375
dataprovider/bolt.go
|
@ -1,17 +0,0 @@
|
||||||
// +build nobolt
|
|
||||||
|
|
||||||
package dataprovider
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
|
|
||||||
"github.com/drakkan/sftpgo/version"
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
version.AddFeature("-bolt")
|
|
||||||
}
|
|
||||||
|
|
||||||
func initializeBoltProvider(basePath string) error {
|
|
||||||
return errors.New("bolt disabled at build time")
|
|
||||||
}
|
|
|
@ -1,62 +0,0 @@
|
||||||
package dataprovider
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
var cachedPasswords passwordsCache
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
cachedPasswords = passwordsCache{
|
|
||||||
cache: make(map[string]string),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type passwordsCache struct {
|
|
||||||
sync.RWMutex
|
|
||||||
cache map[string]string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *passwordsCache) Add(username, password string) {
|
|
||||||
if !config.PasswordCaching || username == "" || password == "" {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
c.Lock()
|
|
||||||
defer c.Unlock()
|
|
||||||
|
|
||||||
c.cache[username] = password
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *passwordsCache) Remove(username string) {
|
|
||||||
if !config.PasswordCaching {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
c.Lock()
|
|
||||||
defer c.Unlock()
|
|
||||||
|
|
||||||
delete(c.cache, username)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check returns if the user is found and if the password match
|
|
||||||
func (c *passwordsCache) Check(username, password string) (bool, bool) {
|
|
||||||
if username == "" || password == "" {
|
|
||||||
return false, false
|
|
||||||
}
|
|
||||||
|
|
||||||
c.RLock()
|
|
||||||
defer c.RUnlock()
|
|
||||||
|
|
||||||
pwd, ok := c.cache[username]
|
|
||||||
if !ok {
|
|
||||||
return false, false
|
|
||||||
}
|
|
||||||
|
|
||||||
return true, pwd == password
|
|
||||||
}
|
|
||||||
|
|
||||||
// CheckCachedPassword is an utility method used only in test cases
|
|
||||||
func CheckCachedPassword(username, password string) (bool, bool) {
|
|
||||||
return cachedPasswords.Check(username, password)
|
|
||||||
}
|
|
|
@ -1,118 +0,0 @@
|
||||||
package dataprovider
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/drakkan/sftpgo/kms"
|
|
||||||
"github.com/drakkan/sftpgo/vfs"
|
|
||||||
)
|
|
||||||
|
|
||||||
type compatAzBlobFsConfigV9 struct {
|
|
||||||
Container string `json:"container,omitempty"`
|
|
||||||
AccountName string `json:"account_name,omitempty"`
|
|
||||||
AccountKey *kms.Secret `json:"account_key,omitempty"`
|
|
||||||
Endpoint string `json:"endpoint,omitempty"`
|
|
||||||
SASURL string `json:"sas_url,omitempty"`
|
|
||||||
KeyPrefix string `json:"key_prefix,omitempty"`
|
|
||||||
UploadPartSize int64 `json:"upload_part_size,omitempty"`
|
|
||||||
UploadConcurrency int `json:"upload_concurrency,omitempty"`
|
|
||||||
UseEmulator bool `json:"use_emulator,omitempty"`
|
|
||||||
AccessTier string `json:"access_tier,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type compatFilesystemV9 struct {
|
|
||||||
Provider vfs.FilesystemProvider `json:"provider"`
|
|
||||||
S3Config vfs.S3FsConfig `json:"s3config,omitempty"`
|
|
||||||
GCSConfig vfs.GCSFsConfig `json:"gcsconfig,omitempty"`
|
|
||||||
AzBlobConfig compatAzBlobFsConfigV9 `json:"azblobconfig,omitempty"`
|
|
||||||
CryptConfig vfs.CryptFsConfig `json:"cryptconfig,omitempty"`
|
|
||||||
SFTPConfig vfs.SFTPFsConfig `json:"sftpconfig,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type compatBaseFolderV9 struct {
|
|
||||||
ID int64 `json:"id"`
|
|
||||||
Name string `json:"name"`
|
|
||||||
MappedPath string `json:"mapped_path,omitempty"`
|
|
||||||
Description string `json:"description,omitempty"`
|
|
||||||
UsedQuotaSize int64 `json:"used_quota_size"`
|
|
||||||
UsedQuotaFiles int `json:"used_quota_files"`
|
|
||||||
LastQuotaUpdate int64 `json:"last_quota_update"`
|
|
||||||
Users []string `json:"users,omitempty"`
|
|
||||||
FsConfig compatFilesystemV9 `json:"filesystem"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type compatFolderV9 struct {
|
|
||||||
compatBaseFolderV9
|
|
||||||
VirtualPath string `json:"virtual_path"`
|
|
||||||
QuotaSize int64 `json:"quota_size"`
|
|
||||||
QuotaFiles int `json:"quota_files"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type compatUserV9 struct {
|
|
||||||
ID int64 `json:"id"`
|
|
||||||
Username string `json:"username"`
|
|
||||||
FsConfig compatFilesystemV9 `json:"filesystem"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func convertFsConfigFromV9(compatFs compatFilesystemV9, aead string) (vfs.Filesystem, error) {
|
|
||||||
fsConfig := vfs.Filesystem{
|
|
||||||
Provider: compatFs.Provider,
|
|
||||||
S3Config: compatFs.S3Config,
|
|
||||||
GCSConfig: compatFs.GCSConfig,
|
|
||||||
CryptConfig: compatFs.CryptConfig,
|
|
||||||
SFTPConfig: compatFs.SFTPConfig,
|
|
||||||
}
|
|
||||||
azSASURL := kms.NewEmptySecret()
|
|
||||||
if compatFs.Provider == vfs.AzureBlobFilesystemProvider && compatFs.AzBlobConfig.SASURL != "" {
|
|
||||||
azSASURL = kms.NewPlainSecret(compatFs.AzBlobConfig.SASURL)
|
|
||||||
}
|
|
||||||
if compatFs.AzBlobConfig.AccountKey == nil {
|
|
||||||
compatFs.AzBlobConfig.AccountKey = kms.NewEmptySecret()
|
|
||||||
}
|
|
||||||
fsConfig.AzBlobConfig = vfs.AzBlobFsConfig{
|
|
||||||
Container: compatFs.AzBlobConfig.Container,
|
|
||||||
AccountName: compatFs.AzBlobConfig.AccountName,
|
|
||||||
AccountKey: compatFs.AzBlobConfig.AccountKey,
|
|
||||||
Endpoint: compatFs.AzBlobConfig.Endpoint,
|
|
||||||
SASURL: azSASURL,
|
|
||||||
KeyPrefix: compatFs.AzBlobConfig.KeyPrefix,
|
|
||||||
UploadPartSize: compatFs.AzBlobConfig.UploadPartSize,
|
|
||||||
UploadConcurrency: compatFs.AzBlobConfig.UploadConcurrency,
|
|
||||||
UseEmulator: compatFs.AzBlobConfig.UseEmulator,
|
|
||||||
AccessTier: compatFs.AzBlobConfig.AccessTier,
|
|
||||||
}
|
|
||||||
err := fsConfig.AzBlobConfig.EncryptCredentials(aead)
|
|
||||||
return fsConfig, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func convertFsConfigToV9(fs vfs.Filesystem) (compatFilesystemV9, error) {
|
|
||||||
azSASURL := ""
|
|
||||||
if fs.Provider == vfs.AzureBlobFilesystemProvider {
|
|
||||||
if fs.AzBlobConfig.SASURL != nil && fs.AzBlobConfig.SASURL.IsEncrypted() {
|
|
||||||
err := fs.AzBlobConfig.SASURL.Decrypt()
|
|
||||||
if err != nil {
|
|
||||||
return compatFilesystemV9{}, err
|
|
||||||
}
|
|
||||||
azSASURL = fs.AzBlobConfig.SASURL.GetPayload()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
azFsCompat := compatAzBlobFsConfigV9{
|
|
||||||
Container: fs.AzBlobConfig.Container,
|
|
||||||
AccountName: fs.AzBlobConfig.AccountName,
|
|
||||||
AccountKey: fs.AzBlobConfig.AccountKey,
|
|
||||||
Endpoint: fs.AzBlobConfig.Endpoint,
|
|
||||||
SASURL: azSASURL,
|
|
||||||
KeyPrefix: fs.AzBlobConfig.KeyPrefix,
|
|
||||||
UploadPartSize: fs.AzBlobConfig.UploadPartSize,
|
|
||||||
UploadConcurrency: fs.AzBlobConfig.UploadConcurrency,
|
|
||||||
UseEmulator: fs.AzBlobConfig.UseEmulator,
|
|
||||||
AccessTier: fs.AzBlobConfig.AccessTier,
|
|
||||||
}
|
|
||||||
fsV9 := compatFilesystemV9{
|
|
||||||
Provider: fs.Provider,
|
|
||||||
S3Config: fs.S3Config,
|
|
||||||
GCSConfig: fs.GCSConfig,
|
|
||||||
AzBlobConfig: azFsCompat,
|
|
||||||
CryptConfig: fs.CryptConfig,
|
|
||||||
SFTPConfig: fs.SFTPConfig,
|
|
||||||
}
|
|
||||||
return fsV9, nil
|
|
||||||
}
|
|
|
@ -1,941 +0,0 @@
|
||||||
package dataprovider
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/x509"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"sort"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/drakkan/sftpgo/logger"
|
|
||||||
"github.com/drakkan/sftpgo/utils"
|
|
||||||
"github.com/drakkan/sftpgo/vfs"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
errMemoryProviderClosed = errors.New("memory provider is closed")
|
|
||||||
)
|
|
||||||
|
|
||||||
type memoryProviderHandle struct {
|
|
||||||
// configuration file to use for loading users
|
|
||||||
configFile string
|
|
||||||
sync.Mutex
|
|
||||||
isClosed bool
|
|
||||||
// slice with ordered usernames
|
|
||||||
usernames []string
|
|
||||||
// map for users, username is the key
|
|
||||||
users map[string]User
|
|
||||||
// map for virtual folders, folder name is the key
|
|
||||||
vfolders map[string]vfs.BaseVirtualFolder
|
|
||||||
// slice with ordered folder names
|
|
||||||
vfoldersNames []string
|
|
||||||
// map for admins, username is the key
|
|
||||||
admins map[string]Admin
|
|
||||||
// slice with ordered admins
|
|
||||||
adminsUsernames []string
|
|
||||||
}
|
|
||||||
|
|
||||||
// MemoryProvider auth provider for a memory store
|
|
||||||
type MemoryProvider struct {
|
|
||||||
dbHandle *memoryProviderHandle
|
|
||||||
}
|
|
||||||
|
|
||||||
func initializeMemoryProvider(basePath string) {
|
|
||||||
configFile := ""
|
|
||||||
if utils.IsFileInputValid(config.Name) {
|
|
||||||
configFile = config.Name
|
|
||||||
if !filepath.IsAbs(configFile) {
|
|
||||||
configFile = filepath.Join(basePath, configFile)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
provider = &MemoryProvider{
|
|
||||||
dbHandle: &memoryProviderHandle{
|
|
||||||
isClosed: false,
|
|
||||||
usernames: []string{},
|
|
||||||
users: make(map[string]User),
|
|
||||||
vfolders: make(map[string]vfs.BaseVirtualFolder),
|
|
||||||
vfoldersNames: []string{},
|
|
||||||
admins: make(map[string]Admin),
|
|
||||||
adminsUsernames: []string{},
|
|
||||||
configFile: configFile,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
if err := provider.reloadConfig(); err != nil {
|
|
||||||
logger.Error(logSender, "", "unable to load initial data: %v", err)
|
|
||||||
logger.ErrorToConsole("unable to load initial data: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MemoryProvider) checkAvailability() error {
|
|
||||||
p.dbHandle.Lock()
|
|
||||||
defer p.dbHandle.Unlock()
|
|
||||||
if p.dbHandle.isClosed {
|
|
||||||
return errMemoryProviderClosed
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MemoryProvider) close() error {
|
|
||||||
p.dbHandle.Lock()
|
|
||||||
defer p.dbHandle.Unlock()
|
|
||||||
if p.dbHandle.isClosed {
|
|
||||||
return errMemoryProviderClosed
|
|
||||||
}
|
|
||||||
p.dbHandle.isClosed = true
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MemoryProvider) validateUserAndTLSCert(username, protocol string, tlsCert *x509.Certificate) (User, error) {
|
|
||||||
var user User
|
|
||||||
if tlsCert == nil {
|
|
||||||
return user, errors.New("TLS certificate cannot be null or empty")
|
|
||||||
}
|
|
||||||
user, err := p.userExists(username)
|
|
||||||
if err != nil {
|
|
||||||
providerLog(logger.LevelWarn, "error authenticating user %#v: %v", username, err)
|
|
||||||
return user, err
|
|
||||||
}
|
|
||||||
return checkUserAndTLSCertificate(&user, protocol, tlsCert)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MemoryProvider) validateUserAndPass(username, password, ip, protocol string) (User, error) {
|
|
||||||
var user User
|
|
||||||
if password == "" {
|
|
||||||
return user, errors.New("credentials cannot be null or empty")
|
|
||||||
}
|
|
||||||
user, err := p.userExists(username)
|
|
||||||
if err != nil {
|
|
||||||
providerLog(logger.LevelWarn, "error authenticating user %#v: %v", username, err)
|
|
||||||
return user, err
|
|
||||||
}
|
|
||||||
return checkUserAndPass(&user, password, ip, protocol)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MemoryProvider) validateUserAndPubKey(username string, pubKey []byte) (User, string, error) {
|
|
||||||
var user User
|
|
||||||
if len(pubKey) == 0 {
|
|
||||||
return user, "", errors.New("credentials cannot be null or empty")
|
|
||||||
}
|
|
||||||
user, err := p.userExists(username)
|
|
||||||
if err != nil {
|
|
||||||
providerLog(logger.LevelWarn, "error authenticating user %#v: %v", username, err)
|
|
||||||
return user, "", err
|
|
||||||
}
|
|
||||||
return checkUserAndPubKey(&user, pubKey)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MemoryProvider) validateAdminAndPass(username, password, ip string) (Admin, error) {
|
|
||||||
admin, err := p.adminExists(username)
|
|
||||||
if err != nil {
|
|
||||||
providerLog(logger.LevelWarn, "error authenticating admin %#v: %v", username, err)
|
|
||||||
return admin, ErrInvalidCredentials
|
|
||||||
}
|
|
||||||
err = admin.checkUserAndPass(password, ip)
|
|
||||||
return admin, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MemoryProvider) updateLastLogin(username string) error {
|
|
||||||
p.dbHandle.Lock()
|
|
||||||
defer p.dbHandle.Unlock()
|
|
||||||
if p.dbHandle.isClosed {
|
|
||||||
return errMemoryProviderClosed
|
|
||||||
}
|
|
||||||
user, err := p.userExistsInternal(username)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
user.LastLogin = utils.GetTimeAsMsSinceEpoch(time.Now())
|
|
||||||
p.dbHandle.users[user.Username] = user
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MemoryProvider) updateQuota(username string, filesAdd int, sizeAdd int64, reset bool) error {
|
|
||||||
p.dbHandle.Lock()
|
|
||||||
defer p.dbHandle.Unlock()
|
|
||||||
if p.dbHandle.isClosed {
|
|
||||||
return errMemoryProviderClosed
|
|
||||||
}
|
|
||||||
user, err := p.userExistsInternal(username)
|
|
||||||
if err != nil {
|
|
||||||
providerLog(logger.LevelWarn, "unable to update quota for user %#v error: %v", username, err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if reset {
|
|
||||||
user.UsedQuotaSize = sizeAdd
|
|
||||||
user.UsedQuotaFiles = filesAdd
|
|
||||||
} else {
|
|
||||||
user.UsedQuotaSize += sizeAdd
|
|
||||||
user.UsedQuotaFiles += filesAdd
|
|
||||||
}
|
|
||||||
user.LastQuotaUpdate = utils.GetTimeAsMsSinceEpoch(time.Now())
|
|
||||||
providerLog(logger.LevelDebug, "quota updated for user %#v, files increment: %v size increment: %v is reset? %v",
|
|
||||||
username, filesAdd, sizeAdd, reset)
|
|
||||||
p.dbHandle.users[user.Username] = user
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MemoryProvider) getUsedQuota(username string) (int, int64, error) {
|
|
||||||
p.dbHandle.Lock()
|
|
||||||
defer p.dbHandle.Unlock()
|
|
||||||
if p.dbHandle.isClosed {
|
|
||||||
return 0, 0, errMemoryProviderClosed
|
|
||||||
}
|
|
||||||
user, err := p.userExistsInternal(username)
|
|
||||||
if err != nil {
|
|
||||||
providerLog(logger.LevelWarn, "unable to get quota for user %#v error: %v", username, err)
|
|
||||||
return 0, 0, err
|
|
||||||
}
|
|
||||||
return user.UsedQuotaFiles, user.UsedQuotaSize, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MemoryProvider) addUser(user *User) error {
|
|
||||||
// we can query virtual folder while validating a user
|
|
||||||
// so we have to check without holding the lock
|
|
||||||
err := ValidateUser(user)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
p.dbHandle.Lock()
|
|
||||||
defer p.dbHandle.Unlock()
|
|
||||||
if p.dbHandle.isClosed {
|
|
||||||
return errMemoryProviderClosed
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = p.userExistsInternal(user.Username)
|
|
||||||
if err == nil {
|
|
||||||
return fmt.Errorf("username %#v already exists", user.Username)
|
|
||||||
}
|
|
||||||
user.ID = p.getNextID()
|
|
||||||
user.LastQuotaUpdate = 0
|
|
||||||
user.UsedQuotaSize = 0
|
|
||||||
user.UsedQuotaFiles = 0
|
|
||||||
user.LastLogin = 0
|
|
||||||
user.VirtualFolders = p.joinVirtualFoldersFields(user)
|
|
||||||
p.dbHandle.users[user.Username] = user.getACopy()
|
|
||||||
p.dbHandle.usernames = append(p.dbHandle.usernames, user.Username)
|
|
||||||
sort.Strings(p.dbHandle.usernames)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MemoryProvider) updateUser(user *User) error {
|
|
||||||
// we can query virtual folder while validating a user
|
|
||||||
// so we have to check without holding the lock
|
|
||||||
err := ValidateUser(user)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
p.dbHandle.Lock()
|
|
||||||
defer p.dbHandle.Unlock()
|
|
||||||
if p.dbHandle.isClosed {
|
|
||||||
return errMemoryProviderClosed
|
|
||||||
}
|
|
||||||
|
|
||||||
u, err := p.userExistsInternal(user.Username)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
for _, oldFolder := range u.VirtualFolders {
|
|
||||||
p.removeUserFromFolderMapping(oldFolder.Name, u.Username)
|
|
||||||
}
|
|
||||||
user.VirtualFolders = p.joinVirtualFoldersFields(user)
|
|
||||||
user.LastQuotaUpdate = u.LastQuotaUpdate
|
|
||||||
user.UsedQuotaSize = u.UsedQuotaSize
|
|
||||||
user.UsedQuotaFiles = u.UsedQuotaFiles
|
|
||||||
user.LastLogin = u.LastLogin
|
|
||||||
user.ID = u.ID
|
|
||||||
// pre-login and external auth hook will use the passed *user so save a copy
|
|
||||||
p.dbHandle.users[user.Username] = user.getACopy()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MemoryProvider) deleteUser(user *User) error {
|
|
||||||
p.dbHandle.Lock()
|
|
||||||
defer p.dbHandle.Unlock()
|
|
||||||
if p.dbHandle.isClosed {
|
|
||||||
return errMemoryProviderClosed
|
|
||||||
}
|
|
||||||
u, err := p.userExistsInternal(user.Username)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
for _, oldFolder := range u.VirtualFolders {
|
|
||||||
p.removeUserFromFolderMapping(oldFolder.Name, u.Username)
|
|
||||||
}
|
|
||||||
delete(p.dbHandle.users, user.Username)
|
|
||||||
// this could be more efficient
|
|
||||||
p.dbHandle.usernames = make([]string, 0, len(p.dbHandle.users))
|
|
||||||
for username := range p.dbHandle.users {
|
|
||||||
p.dbHandle.usernames = append(p.dbHandle.usernames, username)
|
|
||||||
}
|
|
||||||
sort.Strings(p.dbHandle.usernames)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MemoryProvider) dumpUsers() ([]User, error) {
|
|
||||||
p.dbHandle.Lock()
|
|
||||||
defer p.dbHandle.Unlock()
|
|
||||||
users := make([]User, 0, len(p.dbHandle.usernames))
|
|
||||||
var err error
|
|
||||||
if p.dbHandle.isClosed {
|
|
||||||
return users, errMemoryProviderClosed
|
|
||||||
}
|
|
||||||
for _, username := range p.dbHandle.usernames {
|
|
||||||
u := p.dbHandle.users[username]
|
|
||||||
user := u.getACopy()
|
|
||||||
err = addCredentialsToUser(&user)
|
|
||||||
if err != nil {
|
|
||||||
return users, err
|
|
||||||
}
|
|
||||||
users = append(users, user)
|
|
||||||
}
|
|
||||||
return users, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MemoryProvider) dumpFolders() ([]vfs.BaseVirtualFolder, error) {
|
|
||||||
p.dbHandle.Lock()
|
|
||||||
defer p.dbHandle.Unlock()
|
|
||||||
folders := make([]vfs.BaseVirtualFolder, 0, len(p.dbHandle.vfoldersNames))
|
|
||||||
if p.dbHandle.isClosed {
|
|
||||||
return folders, errMemoryProviderClosed
|
|
||||||
}
|
|
||||||
for _, f := range p.dbHandle.vfolders {
|
|
||||||
folders = append(folders, f)
|
|
||||||
}
|
|
||||||
return folders, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MemoryProvider) getUsers(limit int, offset int, order string) ([]User, error) {
|
|
||||||
users := make([]User, 0, limit)
|
|
||||||
var err error
|
|
||||||
p.dbHandle.Lock()
|
|
||||||
defer p.dbHandle.Unlock()
|
|
||||||
if p.dbHandle.isClosed {
|
|
||||||
return users, errMemoryProviderClosed
|
|
||||||
}
|
|
||||||
if limit <= 0 {
|
|
||||||
return users, err
|
|
||||||
}
|
|
||||||
itNum := 0
|
|
||||||
if order == OrderASC {
|
|
||||||
for _, username := range p.dbHandle.usernames {
|
|
||||||
itNum++
|
|
||||||
if itNum <= offset {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
u := p.dbHandle.users[username]
|
|
||||||
user := u.getACopy()
|
|
||||||
user.PrepareForRendering()
|
|
||||||
users = append(users, user)
|
|
||||||
if len(users) >= limit {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
for i := len(p.dbHandle.usernames) - 1; i >= 0; i-- {
|
|
||||||
itNum++
|
|
||||||
if itNum <= offset {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
username := p.dbHandle.usernames[i]
|
|
||||||
u := p.dbHandle.users[username]
|
|
||||||
user := u.getACopy()
|
|
||||||
user.PrepareForRendering()
|
|
||||||
users = append(users, user)
|
|
||||||
if len(users) >= limit {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return users, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MemoryProvider) userExists(username string) (User, error) {
|
|
||||||
p.dbHandle.Lock()
|
|
||||||
defer p.dbHandle.Unlock()
|
|
||||||
if p.dbHandle.isClosed {
|
|
||||||
return User{}, errMemoryProviderClosed
|
|
||||||
}
|
|
||||||
return p.userExistsInternal(username)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MemoryProvider) userExistsInternal(username string) (User, error) {
|
|
||||||
if val, ok := p.dbHandle.users[username]; ok {
|
|
||||||
return val.getACopy(), nil
|
|
||||||
}
|
|
||||||
return User{}, &RecordNotFoundError{err: fmt.Sprintf("username %#v does not exist", username)}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MemoryProvider) addAdmin(admin *Admin) error {
|
|
||||||
p.dbHandle.Lock()
|
|
||||||
defer p.dbHandle.Unlock()
|
|
||||||
if p.dbHandle.isClosed {
|
|
||||||
return errMemoryProviderClosed
|
|
||||||
}
|
|
||||||
err := admin.validate()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
_, err = p.adminExistsInternal(admin.Username)
|
|
||||||
if err == nil {
|
|
||||||
return fmt.Errorf("admin %#v already exists", admin.Username)
|
|
||||||
}
|
|
||||||
admin.ID = p.getNextAdminID()
|
|
||||||
p.dbHandle.admins[admin.Username] = admin.getACopy()
|
|
||||||
p.dbHandle.adminsUsernames = append(p.dbHandle.adminsUsernames, admin.Username)
|
|
||||||
sort.Strings(p.dbHandle.adminsUsernames)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MemoryProvider) updateAdmin(admin *Admin) error {
|
|
||||||
p.dbHandle.Lock()
|
|
||||||
defer p.dbHandle.Unlock()
|
|
||||||
if p.dbHandle.isClosed {
|
|
||||||
return errMemoryProviderClosed
|
|
||||||
}
|
|
||||||
err := admin.validate()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
a, err := p.adminExistsInternal(admin.Username)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
admin.ID = a.ID
|
|
||||||
p.dbHandle.admins[admin.Username] = admin.getACopy()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MemoryProvider) deleteAdmin(admin *Admin) error {
|
|
||||||
p.dbHandle.Lock()
|
|
||||||
defer p.dbHandle.Unlock()
|
|
||||||
if p.dbHandle.isClosed {
|
|
||||||
return errMemoryProviderClosed
|
|
||||||
}
|
|
||||||
_, err := p.adminExistsInternal(admin.Username)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
delete(p.dbHandle.admins, admin.Username)
|
|
||||||
// this could be more efficient
|
|
||||||
p.dbHandle.adminsUsernames = make([]string, 0, len(p.dbHandle.admins))
|
|
||||||
for username := range p.dbHandle.admins {
|
|
||||||
p.dbHandle.adminsUsernames = append(p.dbHandle.adminsUsernames, username)
|
|
||||||
}
|
|
||||||
sort.Strings(p.dbHandle.adminsUsernames)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MemoryProvider) adminExists(username string) (Admin, error) {
|
|
||||||
p.dbHandle.Lock()
|
|
||||||
defer p.dbHandle.Unlock()
|
|
||||||
if p.dbHandle.isClosed {
|
|
||||||
return Admin{}, errMemoryProviderClosed
|
|
||||||
}
|
|
||||||
return p.adminExistsInternal(username)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MemoryProvider) adminExistsInternal(username string) (Admin, error) {
|
|
||||||
if val, ok := p.dbHandle.admins[username]; ok {
|
|
||||||
return val.getACopy(), nil
|
|
||||||
}
|
|
||||||
return Admin{}, &RecordNotFoundError{err: fmt.Sprintf("admin %#v does not exist", username)}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MemoryProvider) dumpAdmins() ([]Admin, error) {
|
|
||||||
p.dbHandle.Lock()
|
|
||||||
defer p.dbHandle.Unlock()
|
|
||||||
|
|
||||||
admins := make([]Admin, 0, len(p.dbHandle.admins))
|
|
||||||
if p.dbHandle.isClosed {
|
|
||||||
return admins, errMemoryProviderClosed
|
|
||||||
}
|
|
||||||
for _, admin := range p.dbHandle.admins {
|
|
||||||
admins = append(admins, admin)
|
|
||||||
}
|
|
||||||
return admins, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MemoryProvider) getAdmins(limit int, offset int, order string) ([]Admin, error) {
|
|
||||||
admins := make([]Admin, 0, limit)
|
|
||||||
|
|
||||||
p.dbHandle.Lock()
|
|
||||||
defer p.dbHandle.Unlock()
|
|
||||||
|
|
||||||
if p.dbHandle.isClosed {
|
|
||||||
return admins, errMemoryProviderClosed
|
|
||||||
}
|
|
||||||
if limit <= 0 {
|
|
||||||
return admins, nil
|
|
||||||
}
|
|
||||||
itNum := 0
|
|
||||||
if order == OrderASC {
|
|
||||||
for _, username := range p.dbHandle.adminsUsernames {
|
|
||||||
itNum++
|
|
||||||
if itNum <= offset {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
a := p.dbHandle.admins[username]
|
|
||||||
admin := a.getACopy()
|
|
||||||
admin.HideConfidentialData()
|
|
||||||
admins = append(admins, admin)
|
|
||||||
if len(admins) >= limit {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
for i := len(p.dbHandle.adminsUsernames) - 1; i >= 0; i-- {
|
|
||||||
itNum++
|
|
||||||
if itNum <= offset {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
username := p.dbHandle.adminsUsernames[i]
|
|
||||||
a := p.dbHandle.admins[username]
|
|
||||||
admin := a.getACopy()
|
|
||||||
admin.HideConfidentialData()
|
|
||||||
admins = append(admins, admin)
|
|
||||||
if len(admins) >= limit {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return admins, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MemoryProvider) updateFolderQuota(name string, filesAdd int, sizeAdd int64, reset bool) error {
|
|
||||||
p.dbHandle.Lock()
|
|
||||||
defer p.dbHandle.Unlock()
|
|
||||||
if p.dbHandle.isClosed {
|
|
||||||
return errMemoryProviderClosed
|
|
||||||
}
|
|
||||||
folder, err := p.folderExistsInternal(name)
|
|
||||||
if err != nil {
|
|
||||||
providerLog(logger.LevelWarn, "unable to update quota for folder %#v error: %v", name, err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if reset {
|
|
||||||
folder.UsedQuotaSize = sizeAdd
|
|
||||||
folder.UsedQuotaFiles = filesAdd
|
|
||||||
} else {
|
|
||||||
folder.UsedQuotaSize += sizeAdd
|
|
||||||
folder.UsedQuotaFiles += filesAdd
|
|
||||||
}
|
|
||||||
folder.LastQuotaUpdate = utils.GetTimeAsMsSinceEpoch(time.Now())
|
|
||||||
p.dbHandle.vfolders[name] = folder
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MemoryProvider) getUsedFolderQuota(name string) (int, int64, error) {
|
|
||||||
p.dbHandle.Lock()
|
|
||||||
defer p.dbHandle.Unlock()
|
|
||||||
if p.dbHandle.isClosed {
|
|
||||||
return 0, 0, errMemoryProviderClosed
|
|
||||||
}
|
|
||||||
folder, err := p.folderExistsInternal(name)
|
|
||||||
if err != nil {
|
|
||||||
providerLog(logger.LevelWarn, "unable to get quota for folder %#v error: %v", name, err)
|
|
||||||
return 0, 0, err
|
|
||||||
}
|
|
||||||
return folder.UsedQuotaFiles, folder.UsedQuotaSize, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MemoryProvider) joinVirtualFoldersFields(user *User) []vfs.VirtualFolder {
|
|
||||||
var folders []vfs.VirtualFolder
|
|
||||||
for idx := range user.VirtualFolders {
|
|
||||||
folder := &user.VirtualFolders[idx]
|
|
||||||
f, err := p.addOrUpdateFolderInternal(&folder.BaseVirtualFolder, user.Username, 0, 0, 0)
|
|
||||||
if err == nil {
|
|
||||||
folder.BaseVirtualFolder = f
|
|
||||||
folders = append(folders, *folder)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return folders
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MemoryProvider) removeUserFromFolderMapping(folderName, username string) {
|
|
||||||
folder, err := p.folderExistsInternal(folderName)
|
|
||||||
if err == nil {
|
|
||||||
var usernames []string
|
|
||||||
for _, user := range folder.Users {
|
|
||||||
if user != username {
|
|
||||||
usernames = append(usernames, user)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
folder.Users = usernames
|
|
||||||
p.dbHandle.vfolders[folder.Name] = folder
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MemoryProvider) updateFoldersMappingInternal(folder vfs.BaseVirtualFolder) {
|
|
||||||
p.dbHandle.vfolders[folder.Name] = folder
|
|
||||||
if !utils.IsStringInSlice(folder.Name, p.dbHandle.vfoldersNames) {
|
|
||||||
p.dbHandle.vfoldersNames = append(p.dbHandle.vfoldersNames, folder.Name)
|
|
||||||
sort.Strings(p.dbHandle.vfoldersNames)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MemoryProvider) addOrUpdateFolderInternal(baseFolder *vfs.BaseVirtualFolder, username string, usedQuotaSize int64,
|
|
||||||
usedQuotaFiles int, lastQuotaUpdate int64) (vfs.BaseVirtualFolder, error) {
|
|
||||||
folder, err := p.folderExistsInternal(baseFolder.Name)
|
|
||||||
if err == nil {
|
|
||||||
// exists
|
|
||||||
folder.MappedPath = baseFolder.MappedPath
|
|
||||||
folder.Description = baseFolder.Description
|
|
||||||
folder.FsConfig = baseFolder.FsConfig.GetACopy()
|
|
||||||
if !utils.IsStringInSlice(username, folder.Users) {
|
|
||||||
folder.Users = append(folder.Users, username)
|
|
||||||
}
|
|
||||||
p.updateFoldersMappingInternal(folder)
|
|
||||||
return folder, nil
|
|
||||||
}
|
|
||||||
if _, ok := err.(*RecordNotFoundError); ok {
|
|
||||||
folder = baseFolder.GetACopy()
|
|
||||||
folder.ID = p.getNextFolderID()
|
|
||||||
folder.UsedQuotaSize = usedQuotaSize
|
|
||||||
folder.UsedQuotaFiles = usedQuotaFiles
|
|
||||||
folder.LastQuotaUpdate = lastQuotaUpdate
|
|
||||||
folder.Users = []string{username}
|
|
||||||
p.updateFoldersMappingInternal(folder)
|
|
||||||
return folder, nil
|
|
||||||
}
|
|
||||||
return folder, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MemoryProvider) folderExistsInternal(name string) (vfs.BaseVirtualFolder, error) {
|
|
||||||
if val, ok := p.dbHandle.vfolders[name]; ok {
|
|
||||||
return val, nil
|
|
||||||
}
|
|
||||||
return vfs.BaseVirtualFolder{}, &RecordNotFoundError{err: fmt.Sprintf("folder %#v does not exist", name)}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MemoryProvider) getFolders(limit, offset int, order string) ([]vfs.BaseVirtualFolder, error) {
|
|
||||||
folders := make([]vfs.BaseVirtualFolder, 0, limit)
|
|
||||||
var err error
|
|
||||||
p.dbHandle.Lock()
|
|
||||||
defer p.dbHandle.Unlock()
|
|
||||||
if p.dbHandle.isClosed {
|
|
||||||
return folders, errMemoryProviderClosed
|
|
||||||
}
|
|
||||||
if limit <= 0 {
|
|
||||||
return folders, err
|
|
||||||
}
|
|
||||||
itNum := 0
|
|
||||||
if order == OrderASC {
|
|
||||||
for _, name := range p.dbHandle.vfoldersNames {
|
|
||||||
itNum++
|
|
||||||
if itNum <= offset {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
f := p.dbHandle.vfolders[name]
|
|
||||||
folder := f.GetACopy()
|
|
||||||
folder.PrepareForRendering()
|
|
||||||
folders = append(folders, folder)
|
|
||||||
if len(folders) >= limit {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
for i := len(p.dbHandle.vfoldersNames) - 1; i >= 0; i-- {
|
|
||||||
itNum++
|
|
||||||
if itNum <= offset {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
name := p.dbHandle.vfoldersNames[i]
|
|
||||||
f := p.dbHandle.vfolders[name]
|
|
||||||
folder := f.GetACopy()
|
|
||||||
folder.PrepareForRendering()
|
|
||||||
folders = append(folders, folder)
|
|
||||||
if len(folders) >= limit {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return folders, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MemoryProvider) getFolderByName(name string) (vfs.BaseVirtualFolder, error) {
|
|
||||||
p.dbHandle.Lock()
|
|
||||||
defer p.dbHandle.Unlock()
|
|
||||||
if p.dbHandle.isClosed {
|
|
||||||
return vfs.BaseVirtualFolder{}, errMemoryProviderClosed
|
|
||||||
}
|
|
||||||
folder, err := p.folderExistsInternal(name)
|
|
||||||
if err != nil {
|
|
||||||
return vfs.BaseVirtualFolder{}, err
|
|
||||||
}
|
|
||||||
return folder.GetACopy(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MemoryProvider) addFolder(folder *vfs.BaseVirtualFolder) error {
|
|
||||||
err := ValidateFolder(folder)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
p.dbHandle.Lock()
|
|
||||||
defer p.dbHandle.Unlock()
|
|
||||||
if p.dbHandle.isClosed {
|
|
||||||
return errMemoryProviderClosed
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = p.folderExistsInternal(folder.Name)
|
|
||||||
if err == nil {
|
|
||||||
return fmt.Errorf("folder %#v already exists", folder.Name)
|
|
||||||
}
|
|
||||||
folder.ID = p.getNextFolderID()
|
|
||||||
folder.Users = nil
|
|
||||||
p.dbHandle.vfolders[folder.Name] = folder.GetACopy()
|
|
||||||
p.dbHandle.vfoldersNames = append(p.dbHandle.vfoldersNames, folder.Name)
|
|
||||||
sort.Strings(p.dbHandle.vfoldersNames)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MemoryProvider) updateFolder(folder *vfs.BaseVirtualFolder) error {
|
|
||||||
err := ValidateFolder(folder)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
p.dbHandle.Lock()
|
|
||||||
defer p.dbHandle.Unlock()
|
|
||||||
if p.dbHandle.isClosed {
|
|
||||||
return errMemoryProviderClosed
|
|
||||||
}
|
|
||||||
f, err := p.folderExistsInternal(folder.Name)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
folder.ID = f.ID
|
|
||||||
folder.LastQuotaUpdate = f.LastQuotaUpdate
|
|
||||||
folder.UsedQuotaFiles = f.UsedQuotaFiles
|
|
||||||
folder.UsedQuotaSize = f.UsedQuotaSize
|
|
||||||
folder.Users = f.Users
|
|
||||||
p.dbHandle.vfolders[folder.Name] = folder.GetACopy()
|
|
||||||
// now update the related users
|
|
||||||
for _, username := range folder.Users {
|
|
||||||
user, err := p.userExistsInternal(username)
|
|
||||||
if err == nil {
|
|
||||||
var folders []vfs.VirtualFolder
|
|
||||||
for idx := range user.VirtualFolders {
|
|
||||||
userFolder := &user.VirtualFolders[idx]
|
|
||||||
if folder.Name == userFolder.Name {
|
|
||||||
userFolder.BaseVirtualFolder = folder.GetACopy()
|
|
||||||
}
|
|
||||||
folders = append(folders, *userFolder)
|
|
||||||
}
|
|
||||||
user.VirtualFolders = folders
|
|
||||||
p.dbHandle.users[user.Username] = user
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MemoryProvider) deleteFolder(folder *vfs.BaseVirtualFolder) error {
|
|
||||||
p.dbHandle.Lock()
|
|
||||||
defer p.dbHandle.Unlock()
|
|
||||||
if p.dbHandle.isClosed {
|
|
||||||
return errMemoryProviderClosed
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err := p.folderExistsInternal(folder.Name)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
for _, username := range folder.Users {
|
|
||||||
user, err := p.userExistsInternal(username)
|
|
||||||
if err == nil {
|
|
||||||
var folders []vfs.VirtualFolder
|
|
||||||
for idx := range user.VirtualFolders {
|
|
||||||
userFolder := &user.VirtualFolders[idx]
|
|
||||||
if folder.Name != userFolder.Name {
|
|
||||||
folders = append(folders, *userFolder)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
user.VirtualFolders = folders
|
|
||||||
p.dbHandle.users[user.Username] = user
|
|
||||||
}
|
|
||||||
}
|
|
||||||
delete(p.dbHandle.vfolders, folder.Name)
|
|
||||||
p.dbHandle.vfoldersNames = []string{}
|
|
||||||
for name := range p.dbHandle.vfolders {
|
|
||||||
p.dbHandle.vfoldersNames = append(p.dbHandle.vfoldersNames, name)
|
|
||||||
}
|
|
||||||
sort.Strings(p.dbHandle.vfoldersNames)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MemoryProvider) getNextID() int64 {
|
|
||||||
nextID := int64(1)
|
|
||||||
for _, v := range p.dbHandle.users {
|
|
||||||
if v.ID >= nextID {
|
|
||||||
nextID = v.ID + 1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nextID
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MemoryProvider) getNextFolderID() int64 {
|
|
||||||
nextID := int64(1)
|
|
||||||
for _, v := range p.dbHandle.vfolders {
|
|
||||||
if v.ID >= nextID {
|
|
||||||
nextID = v.ID + 1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nextID
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MemoryProvider) getNextAdminID() int64 {
|
|
||||||
nextID := int64(1)
|
|
||||||
for _, a := range p.dbHandle.admins {
|
|
||||||
if a.ID >= nextID {
|
|
||||||
nextID = a.ID + 1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nextID
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MemoryProvider) clear() {
|
|
||||||
p.dbHandle.Lock()
|
|
||||||
defer p.dbHandle.Unlock()
|
|
||||||
p.dbHandle.usernames = []string{}
|
|
||||||
p.dbHandle.users = make(map[string]User)
|
|
||||||
p.dbHandle.vfoldersNames = []string{}
|
|
||||||
p.dbHandle.vfolders = make(map[string]vfs.BaseVirtualFolder)
|
|
||||||
p.dbHandle.admins = make(map[string]Admin)
|
|
||||||
p.dbHandle.adminsUsernames = []string{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MemoryProvider) reloadConfig() error {
|
|
||||||
if p.dbHandle.configFile == "" {
|
|
||||||
providerLog(logger.LevelDebug, "no dump configuration file defined")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
providerLog(logger.LevelDebug, "loading dump from file: %#v", p.dbHandle.configFile)
|
|
||||||
fi, err := os.Stat(p.dbHandle.configFile)
|
|
||||||
if err != nil {
|
|
||||||
providerLog(logger.LevelWarn, "error loading dump: %v", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if fi.Size() == 0 {
|
|
||||||
err = errors.New("dump configuration file is invalid, its size must be > 0")
|
|
||||||
providerLog(logger.LevelWarn, "error loading dump: %v", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if fi.Size() > 10485760 {
|
|
||||||
err = errors.New("dump configuration file is invalid, its size must be <= 10485760 bytes")
|
|
||||||
providerLog(logger.LevelWarn, "error loading dump: %v", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
content, err := os.ReadFile(p.dbHandle.configFile)
|
|
||||||
if err != nil {
|
|
||||||
providerLog(logger.LevelWarn, "error loading dump: %v", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
dump, err := ParseDumpData(content)
|
|
||||||
if err != nil {
|
|
||||||
providerLog(logger.LevelWarn, "error loading dump: %v", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
p.clear()
|
|
||||||
|
|
||||||
if err := p.restoreFolders(&dump); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := p.restoreUsers(&dump); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := p.restoreAdmins(&dump); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
providerLog(logger.LevelDebug, "config loaded from file: %#v", p.dbHandle.configFile)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MemoryProvider) restoreAdmins(dump *BackupData) error {
|
|
||||||
for _, admin := range dump.Admins {
|
|
||||||
a, err := p.adminExists(admin.Username)
|
|
||||||
admin := admin // pin
|
|
||||||
if err == nil {
|
|
||||||
admin.ID = a.ID
|
|
||||||
err = p.updateAdmin(&admin)
|
|
||||||
if err != nil {
|
|
||||||
providerLog(logger.LevelWarn, "error updating admin %#v: %v", admin.Username, err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
err = p.addAdmin(&admin)
|
|
||||||
if err != nil {
|
|
||||||
providerLog(logger.LevelWarn, "error adding admin %#v: %v", admin.Username, err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MemoryProvider) restoreFolders(dump *BackupData) error {
|
|
||||||
for _, folder := range dump.Folders {
|
|
||||||
folder := folder // pin
|
|
||||||
f, err := p.getFolderByName(folder.Name)
|
|
||||||
if err == nil {
|
|
||||||
folder.ID = f.ID
|
|
||||||
err = p.updateFolder(&folder)
|
|
||||||
if err != nil {
|
|
||||||
providerLog(logger.LevelWarn, "error updating folder %#v: %v", folder.Name, err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
folder.Users = nil
|
|
||||||
err = p.addFolder(&folder)
|
|
||||||
if err != nil {
|
|
||||||
providerLog(logger.LevelWarn, "error adding folder %#v: %v", folder.Name, err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MemoryProvider) restoreUsers(dump *BackupData) error {
|
|
||||||
for _, user := range dump.Users {
|
|
||||||
user := user // pin
|
|
||||||
u, err := p.userExists(user.Username)
|
|
||||||
if err == nil {
|
|
||||||
user.ID = u.ID
|
|
||||||
err = p.updateUser(&user)
|
|
||||||
if err != nil {
|
|
||||||
providerLog(logger.LevelWarn, "error updating user %#v: %v", user.Username, err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
err = p.addUser(&user)
|
|
||||||
if err != nil {
|
|
||||||
providerLog(logger.LevelWarn, "error adding user %#v: %v", user.Username, err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// initializeDatabase does nothing, no initilization is needed for memory provider
|
|
||||||
func (p *MemoryProvider) initializeDatabase() error {
|
|
||||||
return ErrNoInitRequired
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MemoryProvider) migrateDatabase() error {
|
|
||||||
return ErrNoInitRequired
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MemoryProvider) revertDatabase(targetVersion int) error {
|
|
||||||
return errors.New("memory provider does not store data, revert not possible")
|
|
||||||
}
|
|
|
@ -1,332 +0,0 @@
|
||||||
// +build !nomysql
|
|
||||||
|
|
||||||
package dataprovider
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"crypto/x509"
|
|
||||||
"database/sql"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
// we import go-sql-driver/mysql here to be able to disable MySQL support using a build tag
|
|
||||||
_ "github.com/go-sql-driver/mysql"
|
|
||||||
|
|
||||||
"github.com/drakkan/sftpgo/logger"
|
|
||||||
"github.com/drakkan/sftpgo/version"
|
|
||||||
"github.com/drakkan/sftpgo/vfs"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
mysqlInitialSQL = "CREATE TABLE `{{schema_version}}` (`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, `version` integer NOT NULL);" +
|
|
||||||
"CREATE TABLE `{{admins}}` (`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, `username` varchar(255) NOT NULL UNIQUE, " +
|
|
||||||
"`password` varchar(255) NOT NULL, `email` varchar(255) NULL, `status` integer NOT NULL, `permissions` longtext NOT NULL, " +
|
|
||||||
"`filters` longtext NULL, `additional_info` longtext NULL);" +
|
|
||||||
"CREATE TABLE `{{folders}}` (`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, `name` varchar(255) NOT NULL UNIQUE, " +
|
|
||||||
"`path` varchar(512) NULL, `used_quota_size` bigint NOT NULL, `used_quota_files` integer NOT NULL, " +
|
|
||||||
"`last_quota_update` bigint NOT NULL);" +
|
|
||||||
"CREATE TABLE `{{users}}` (`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, `status` integer NOT NULL, " +
|
|
||||||
"`expiration_date` bigint NOT NULL, `username` varchar(255) NOT NULL UNIQUE, `password` longtext NULL, " +
|
|
||||||
"`public_keys` longtext NULL, `home_dir` varchar(512) NOT NULL, `uid` integer NOT NULL, `gid` integer NOT NULL, " +
|
|
||||||
"`max_sessions` integer NOT NULL, `quota_size` bigint NOT NULL, `quota_files` integer NOT NULL, " +
|
|
||||||
"`permissions` longtext NOT NULL, `used_quota_size` bigint NOT NULL, `used_quota_files` integer NOT NULL, " +
|
|
||||||
"`last_quota_update` bigint NOT NULL, `upload_bandwidth` integer NOT NULL, `download_bandwidth` integer NOT NULL, " +
|
|
||||||
"`last_login` bigint NOT NULL, `filters` longtext NULL, `filesystem` longtext NULL, `additional_info` longtext NULL);" +
|
|
||||||
"CREATE TABLE `{{folders_mapping}}` (`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, `virtual_path` varchar(512) NOT NULL, " +
|
|
||||||
"`quota_size` bigint NOT NULL, `quota_files` integer NOT NULL, `folder_id` integer NOT NULL, `user_id` integer NOT NULL);" +
|
|
||||||
"ALTER TABLE `{{folders_mapping}}` ADD CONSTRAINT `{{prefix}}unique_mapping` UNIQUE (`user_id`, `folder_id`);" +
|
|
||||||
"ALTER TABLE `{{folders_mapping}}` ADD CONSTRAINT `{{prefix}}folders_mapping_folder_id_fk_folders_id` FOREIGN KEY (`folder_id`) REFERENCES `{{folders}}` (`id`) ON DELETE CASCADE;" +
|
|
||||||
"ALTER TABLE `{{folders_mapping}}` ADD CONSTRAINT `{{prefix}}folders_mapping_user_id_fk_users_id` FOREIGN KEY (`user_id`) REFERENCES `{{users}}` (`id`) ON DELETE CASCADE;" +
|
|
||||||
"INSERT INTO {{schema_version}} (version) VALUES (8);"
|
|
||||||
mysqlV9SQL = "ALTER TABLE `{{admins}}` ADD COLUMN `description` varchar(512) NULL;" +
|
|
||||||
"ALTER TABLE `{{folders}}` ADD COLUMN `description` varchar(512) NULL;" +
|
|
||||||
"ALTER TABLE `{{folders}}` ADD COLUMN `filesystem` longtext NULL;" +
|
|
||||||
"ALTER TABLE `{{users}}` ADD COLUMN `description` varchar(512) NULL;"
|
|
||||||
mysqlV9DownSQL = "ALTER TABLE `{{users}}` DROP COLUMN `description`;" +
|
|
||||||
"ALTER TABLE `{{folders}}` DROP COLUMN `filesystem`;" +
|
|
||||||
"ALTER TABLE `{{folders}}` DROP COLUMN `description`;" +
|
|
||||||
"ALTER TABLE `{{admins}}` DROP COLUMN `description`;"
|
|
||||||
)
|
|
||||||
|
|
||||||
// MySQLProvider auth provider for MySQL/MariaDB database
|
|
||||||
type MySQLProvider struct {
|
|
||||||
dbHandle *sql.DB
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
version.AddFeature("+mysql")
|
|
||||||
}
|
|
||||||
|
|
||||||
func initializeMySQLProvider() error {
|
|
||||||
var err error
|
|
||||||
|
|
||||||
dbHandle, err := sql.Open("mysql", getMySQLConnectionString(false))
|
|
||||||
if err == nil {
|
|
||||||
providerLog(logger.LevelDebug, "mysql database handle created, connection string: %#v, pool size: %v",
|
|
||||||
getMySQLConnectionString(true), config.PoolSize)
|
|
||||||
dbHandle.SetMaxOpenConns(config.PoolSize)
|
|
||||||
if config.PoolSize > 0 {
|
|
||||||
dbHandle.SetMaxIdleConns(config.PoolSize)
|
|
||||||
} else {
|
|
||||||
dbHandle.SetMaxIdleConns(2)
|
|
||||||
}
|
|
||||||
dbHandle.SetConnMaxLifetime(240 * time.Second)
|
|
||||||
provider = &MySQLProvider{dbHandle: dbHandle}
|
|
||||||
} else {
|
|
||||||
providerLog(logger.LevelWarn, "error creating mysql database handler, connection string: %#v, error: %v",
|
|
||||||
getMySQLConnectionString(true), err)
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
func getMySQLConnectionString(redactedPwd bool) string {
|
|
||||||
var connectionString string
|
|
||||||
if config.ConnectionString == "" {
|
|
||||||
password := config.Password
|
|
||||||
if redactedPwd {
|
|
||||||
password = "[redacted]"
|
|
||||||
}
|
|
||||||
connectionString = fmt.Sprintf("%v:%v@tcp([%v]:%v)/%v?charset=utf8&interpolateParams=true&timeout=10s&tls=%v&writeTimeout=10s&readTimeout=10s",
|
|
||||||
config.Username, password, config.Host, config.Port, config.Name, getSSLMode())
|
|
||||||
} else {
|
|
||||||
connectionString = config.ConnectionString
|
|
||||||
}
|
|
||||||
return connectionString
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MySQLProvider) checkAvailability() error {
|
|
||||||
return sqlCommonCheckAvailability(p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MySQLProvider) validateUserAndPass(username, password, ip, protocol string) (User, error) {
|
|
||||||
return sqlCommonValidateUserAndPass(username, password, ip, protocol, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MySQLProvider) validateUserAndTLSCert(username, protocol string, tlsCert *x509.Certificate) (User, error) {
|
|
||||||
return sqlCommonValidateUserAndTLSCertificate(username, protocol, tlsCert, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MySQLProvider) validateUserAndPubKey(username string, publicKey []byte) (User, string, error) {
|
|
||||||
return sqlCommonValidateUserAndPubKey(username, publicKey, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MySQLProvider) updateQuota(username string, filesAdd int, sizeAdd int64, reset bool) error {
|
|
||||||
return sqlCommonUpdateQuota(username, filesAdd, sizeAdd, reset, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MySQLProvider) getUsedQuota(username string) (int, int64, error) {
|
|
||||||
return sqlCommonGetUsedQuota(username, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MySQLProvider) updateLastLogin(username string) error {
|
|
||||||
return sqlCommonUpdateLastLogin(username, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MySQLProvider) userExists(username string) (User, error) {
|
|
||||||
return sqlCommonGetUserByUsername(username, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MySQLProvider) addUser(user *User) error {
|
|
||||||
return sqlCommonAddUser(user, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MySQLProvider) updateUser(user *User) error {
|
|
||||||
return sqlCommonUpdateUser(user, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MySQLProvider) deleteUser(user *User) error {
|
|
||||||
return sqlCommonDeleteUser(user, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MySQLProvider) dumpUsers() ([]User, error) {
|
|
||||||
return sqlCommonDumpUsers(p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MySQLProvider) getUsers(limit int, offset int, order string) ([]User, error) {
|
|
||||||
return sqlCommonGetUsers(limit, offset, order, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MySQLProvider) dumpFolders() ([]vfs.BaseVirtualFolder, error) {
|
|
||||||
return sqlCommonDumpFolders(p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MySQLProvider) getFolders(limit, offset int, order string) ([]vfs.BaseVirtualFolder, error) {
|
|
||||||
return sqlCommonGetFolders(limit, offset, order, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MySQLProvider) getFolderByName(name string) (vfs.BaseVirtualFolder, error) {
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), defaultSQLQueryTimeout)
|
|
||||||
defer cancel()
|
|
||||||
return sqlCommonGetFolderByName(ctx, name, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MySQLProvider) addFolder(folder *vfs.BaseVirtualFolder) error {
|
|
||||||
return sqlCommonAddFolder(folder, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MySQLProvider) updateFolder(folder *vfs.BaseVirtualFolder) error {
|
|
||||||
return sqlCommonUpdateFolder(folder, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MySQLProvider) deleteFolder(folder *vfs.BaseVirtualFolder) error {
|
|
||||||
return sqlCommonDeleteFolder(folder, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MySQLProvider) updateFolderQuota(name string, filesAdd int, sizeAdd int64, reset bool) error {
|
|
||||||
return sqlCommonUpdateFolderQuota(name, filesAdd, sizeAdd, reset, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MySQLProvider) getUsedFolderQuota(name string) (int, int64, error) {
|
|
||||||
return sqlCommonGetFolderUsedQuota(name, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MySQLProvider) adminExists(username string) (Admin, error) {
|
|
||||||
return sqlCommonGetAdminByUsername(username, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MySQLProvider) addAdmin(admin *Admin) error {
|
|
||||||
return sqlCommonAddAdmin(admin, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MySQLProvider) updateAdmin(admin *Admin) error {
|
|
||||||
return sqlCommonUpdateAdmin(admin, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MySQLProvider) deleteAdmin(admin *Admin) error {
|
|
||||||
return sqlCommonDeleteAdmin(admin, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MySQLProvider) getAdmins(limit int, offset int, order string) ([]Admin, error) {
|
|
||||||
return sqlCommonGetAdmins(limit, offset, order, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MySQLProvider) dumpAdmins() ([]Admin, error) {
|
|
||||||
return sqlCommonDumpAdmins(p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MySQLProvider) validateAdminAndPass(username, password, ip string) (Admin, error) {
|
|
||||||
return sqlCommonValidateAdminAndPass(username, password, ip, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MySQLProvider) close() error {
|
|
||||||
return p.dbHandle.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MySQLProvider) reloadConfig() error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// initializeDatabase creates the initial database structure
|
|
||||||
func (p *MySQLProvider) initializeDatabase() error {
|
|
||||||
dbVersion, err := sqlCommonGetDatabaseVersion(p.dbHandle, false)
|
|
||||||
if err == nil && dbVersion.Version > 0 {
|
|
||||||
return ErrNoInitRequired
|
|
||||||
}
|
|
||||||
initialSQL := strings.ReplaceAll(mysqlInitialSQL, "{{schema_version}}", sqlTableSchemaVersion)
|
|
||||||
initialSQL = strings.ReplaceAll(initialSQL, "{{admins}}", sqlTableAdmins)
|
|
||||||
initialSQL = strings.ReplaceAll(initialSQL, "{{folders}}", sqlTableFolders)
|
|
||||||
initialSQL = strings.ReplaceAll(initialSQL, "{{users}}", sqlTableUsers)
|
|
||||||
initialSQL = strings.ReplaceAll(initialSQL, "{{folders_mapping}}", sqlTableFoldersMapping)
|
|
||||||
initialSQL = strings.ReplaceAll(initialSQL, "{{prefix}}", config.SQLTablesPrefix)
|
|
||||||
|
|
||||||
return sqlCommonExecSQLAndUpdateDBVersion(p.dbHandle, strings.Split(initialSQL, ";"), 8)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MySQLProvider) migrateDatabase() error {
|
|
||||||
dbVersion, err := sqlCommonGetDatabaseVersion(p.dbHandle, true)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
switch version := dbVersion.Version; {
|
|
||||||
case version == sqlDatabaseVersion:
|
|
||||||
providerLog(logger.LevelDebug, "sql database is up to date, current version: %v", version)
|
|
||||||
return ErrNoInitRequired
|
|
||||||
case version < 8:
|
|
||||||
err = fmt.Errorf("database version %v is too old, please see the upgrading docs", version)
|
|
||||||
providerLog(logger.LevelError, "%v", err)
|
|
||||||
logger.ErrorToConsole("%v", err)
|
|
||||||
return err
|
|
||||||
case version == 8:
|
|
||||||
return updateMySQLDatabaseFromV8(p.dbHandle)
|
|
||||||
case version == 9:
|
|
||||||
return updateMySQLDatabaseFromV9(p.dbHandle)
|
|
||||||
default:
|
|
||||||
if version > sqlDatabaseVersion {
|
|
||||||
providerLog(logger.LevelWarn, "database version %v is newer than the supported one: %v", version,
|
|
||||||
sqlDatabaseVersion)
|
|
||||||
logger.WarnToConsole("database version %v is newer than the supported one: %v", version,
|
|
||||||
sqlDatabaseVersion)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return fmt.Errorf("database version not handled: %v", version)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MySQLProvider) revertDatabase(targetVersion int) error {
|
|
||||||
dbVersion, err := sqlCommonGetDatabaseVersion(p.dbHandle, true)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if dbVersion.Version == targetVersion {
|
|
||||||
return errors.New("current version match target version, nothing to do")
|
|
||||||
}
|
|
||||||
|
|
||||||
switch dbVersion.Version {
|
|
||||||
case 9:
|
|
||||||
return downgradeMySQLDatabaseFromV9(p.dbHandle)
|
|
||||||
case 10:
|
|
||||||
return downgradeMySQLDatabaseFromV10(p.dbHandle)
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("database version not handled: %v", dbVersion.Version)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func updateMySQLDatabaseFromV8(dbHandle *sql.DB) error {
|
|
||||||
if err := updateMySQLDatabaseFrom8To9(dbHandle); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return updateMySQLDatabaseFromV9(dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func updateMySQLDatabaseFromV9(dbHandle *sql.DB) error {
|
|
||||||
return updateMySQLDatabaseFrom9To10(dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func downgradeMySQLDatabaseFromV9(dbHandle *sql.DB) error {
|
|
||||||
return downgradeMySQLDatabaseFrom9To8(dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func downgradeMySQLDatabaseFromV10(dbHandle *sql.DB) error {
|
|
||||||
if err := downgradeMySQLDatabaseFrom10To9(dbHandle); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return downgradeMySQLDatabaseFromV9(dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func updateMySQLDatabaseFrom8To9(dbHandle *sql.DB) error {
|
|
||||||
logger.InfoToConsole("updating database version: 8 -> 9")
|
|
||||||
providerLog(logger.LevelInfo, "updating database version: 8 -> 9")
|
|
||||||
sql := strings.ReplaceAll(mysqlV9SQL, "{{users}}", sqlTableUsers)
|
|
||||||
sql = strings.ReplaceAll(sql, "{{admins}}", sqlTableAdmins)
|
|
||||||
sql = strings.ReplaceAll(sql, "{{folders}}", sqlTableFolders)
|
|
||||||
return sqlCommonExecSQLAndUpdateDBVersion(dbHandle, strings.Split(sql, ";"), 9)
|
|
||||||
}
|
|
||||||
|
|
||||||
func downgradeMySQLDatabaseFrom9To8(dbHandle *sql.DB) error {
|
|
||||||
logger.InfoToConsole("downgrading database version: 9 -> 8")
|
|
||||||
providerLog(logger.LevelInfo, "downgrading database version: 9 -> 8")
|
|
||||||
sql := strings.ReplaceAll(mysqlV9DownSQL, "{{users}}", sqlTableUsers)
|
|
||||||
sql = strings.ReplaceAll(sql, "{{admins}}", sqlTableAdmins)
|
|
||||||
sql = strings.ReplaceAll(sql, "{{folders}}", sqlTableFolders)
|
|
||||||
return sqlCommonExecSQLAndUpdateDBVersion(dbHandle, strings.Split(sql, ";"), 8)
|
|
||||||
}
|
|
||||||
|
|
||||||
func updateMySQLDatabaseFrom9To10(dbHandle *sql.DB) error {
|
|
||||||
return sqlCommonUpdateDatabaseFrom9To10(dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func downgradeMySQLDatabaseFrom10To9(dbHandle *sql.DB) error {
|
|
||||||
return sqlCommonDowngradeDatabaseFrom10To9(dbHandle)
|
|
||||||
}
|
|
|
@ -1,17 +0,0 @@
|
||||||
// +build nomysql
|
|
||||||
|
|
||||||
package dataprovider
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
|
|
||||||
"github.com/drakkan/sftpgo/version"
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
version.AddFeature("-mysql")
|
|
||||||
}
|
|
||||||
|
|
||||||
func initializeMySQLProvider() error {
|
|
||||||
return errors.New("MySQL disabled at build time")
|
|
||||||
}
|
|
|
@ -1,345 +0,0 @@
|
||||||
// +build !nopgsql
|
|
||||||
|
|
||||||
package dataprovider
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"crypto/x509"
|
|
||||||
"database/sql"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
// we import lib/pq here to be able to disable PostgreSQL support using a build tag
|
|
||||||
_ "github.com/lib/pq"
|
|
||||||
|
|
||||||
"github.com/drakkan/sftpgo/logger"
|
|
||||||
"github.com/drakkan/sftpgo/version"
|
|
||||||
"github.com/drakkan/sftpgo/vfs"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
pgsqlInitial = `CREATE TABLE "{{schema_version}}" ("id" serial NOT NULL PRIMARY KEY, "version" integer NOT NULL);
|
|
||||||
CREATE TABLE "{{admins}}" ("id" serial NOT NULL PRIMARY KEY, "username" varchar(255) NOT NULL UNIQUE,
|
|
||||||
"password" varchar(255) NOT NULL, "email" varchar(255) NULL, "status" integer NOT NULL, "permissions" text NOT NULL,
|
|
||||||
"filters" text NULL, "additional_info" text NULL);
|
|
||||||
CREATE TABLE "{{folders}}" ("id" serial NOT NULL PRIMARY KEY, "name" varchar(255) NOT NULL UNIQUE,
|
|
||||||
"path" varchar(512) NULL, "used_quota_size" bigint NOT NULL, "used_quota_files" integer NOT NULL,
|
|
||||||
"last_quota_update" bigint NOT NULL);
|
|
||||||
CREATE TABLE "{{users}}" ("id" serial NOT NULL PRIMARY KEY, "status" integer NOT NULL, "expiration_date" bigint NOT NULL,
|
|
||||||
"username" varchar(255) NOT NULL UNIQUE, "password" text NULL, "public_keys" text NULL, "home_dir" varchar(512) NOT NULL,
|
|
||||||
"uid" integer NOT NULL, "gid" integer NOT NULL, "max_sessions" integer NOT NULL, "quota_size" bigint NOT NULL,
|
|
||||||
"quota_files" integer NOT NULL, "permissions" text NOT NULL, "used_quota_size" bigint NOT NULL,
|
|
||||||
"used_quota_files" integer NOT NULL, "last_quota_update" bigint NOT NULL, "upload_bandwidth" integer NOT NULL,
|
|
||||||
"download_bandwidth" integer NOT NULL, "last_login" bigint NOT NULL, "filters" text NULL, "filesystem" text NULL,
|
|
||||||
"additional_info" text NULL);
|
|
||||||
CREATE TABLE "{{folders_mapping}}" ("id" serial NOT NULL PRIMARY KEY, "virtual_path" varchar(512) NOT NULL,
|
|
||||||
"quota_size" bigint NOT NULL, "quota_files" integer NOT NULL, "folder_id" integer NOT NULL, "user_id" integer NOT NULL);
|
|
||||||
ALTER TABLE "{{folders_mapping}}" ADD CONSTRAINT "{{prefix}}unique_mapping" UNIQUE ("user_id", "folder_id");
|
|
||||||
ALTER TABLE "{{folders_mapping}}" ADD CONSTRAINT "{{prefix}}folders_mapping_folder_id_fk_folders_id"
|
|
||||||
FOREIGN KEY ("folder_id") REFERENCES "{{folders}}" ("id") MATCH SIMPLE ON UPDATE NO ACTION ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED;
|
|
||||||
ALTER TABLE "{{folders_mapping}}" ADD CONSTRAINT "{{prefix}}folders_mapping_user_id_fk_users_id"
|
|
||||||
FOREIGN KEY ("user_id") REFERENCES "{{users}}" ("id") MATCH SIMPLE ON UPDATE NO ACTION ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED;
|
|
||||||
CREATE INDEX "{{prefix}}folders_mapping_folder_id_idx" ON "{{folders_mapping}}" ("folder_id");
|
|
||||||
CREATE INDEX "{{prefix}}folders_mapping_user_id_idx" ON "{{folders_mapping}}" ("user_id");
|
|
||||||
INSERT INTO {{schema_version}} (version) VALUES (8);
|
|
||||||
`
|
|
||||||
pgsqlV9SQL = `ALTER TABLE "{{admins}}" ADD COLUMN "description" varchar(512) NULL;
|
|
||||||
ALTER TABLE "{{folders}}" ADD COLUMN "description" varchar(512) NULL;
|
|
||||||
ALTER TABLE "{{folders}}" ADD COLUMN "filesystem" text NULL;
|
|
||||||
ALTER TABLE "{{users}}" ADD COLUMN "description" varchar(512) NULL;
|
|
||||||
`
|
|
||||||
pgsqlV9DownSQL = `ALTER TABLE "{{users}}" DROP COLUMN "description" CASCADE;
|
|
||||||
ALTER TABLE "{{folders}}" DROP COLUMN "filesystem" CASCADE;
|
|
||||||
ALTER TABLE "{{folders}}" DROP COLUMN "description" CASCADE;
|
|
||||||
ALTER TABLE "{{admins}}" DROP COLUMN "description" CASCADE;
|
|
||||||
`
|
|
||||||
)
|
|
||||||
|
|
||||||
// PGSQLProvider auth provider for PostgreSQL database
|
|
||||||
type PGSQLProvider struct {
|
|
||||||
dbHandle *sql.DB
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
version.AddFeature("+pgsql")
|
|
||||||
}
|
|
||||||
|
|
||||||
func initializePGSQLProvider() error {
|
|
||||||
var err error
|
|
||||||
dbHandle, err := sql.Open("postgres", getPGSQLConnectionString(false))
|
|
||||||
if err == nil {
|
|
||||||
providerLog(logger.LevelDebug, "postgres database handle created, connection string: %#v, pool size: %v",
|
|
||||||
getPGSQLConnectionString(true), config.PoolSize)
|
|
||||||
dbHandle.SetMaxOpenConns(config.PoolSize)
|
|
||||||
if config.PoolSize > 0 {
|
|
||||||
dbHandle.SetMaxIdleConns(config.PoolSize)
|
|
||||||
} else {
|
|
||||||
dbHandle.SetMaxIdleConns(2)
|
|
||||||
}
|
|
||||||
dbHandle.SetConnMaxLifetime(240 * time.Second)
|
|
||||||
provider = &PGSQLProvider{dbHandle: dbHandle}
|
|
||||||
} else {
|
|
||||||
providerLog(logger.LevelWarn, "error creating postgres database handler, connection string: %#v, error: %v",
|
|
||||||
getPGSQLConnectionString(true), err)
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func getPGSQLConnectionString(redactedPwd bool) string {
|
|
||||||
var connectionString string
|
|
||||||
if config.ConnectionString == "" {
|
|
||||||
password := config.Password
|
|
||||||
if redactedPwd {
|
|
||||||
password = "[redacted]"
|
|
||||||
}
|
|
||||||
connectionString = fmt.Sprintf("host='%v' port=%v dbname='%v' user='%v' password='%v' sslmode=%v connect_timeout=10",
|
|
||||||
config.Host, config.Port, config.Name, config.Username, password, getSSLMode())
|
|
||||||
} else {
|
|
||||||
connectionString = config.ConnectionString
|
|
||||||
}
|
|
||||||
return connectionString
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *PGSQLProvider) checkAvailability() error {
|
|
||||||
return sqlCommonCheckAvailability(p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *PGSQLProvider) validateUserAndPass(username, password, ip, protocol string) (User, error) {
|
|
||||||
return sqlCommonValidateUserAndPass(username, password, ip, protocol, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *PGSQLProvider) validateUserAndTLSCert(username, protocol string, tlsCert *x509.Certificate) (User, error) {
|
|
||||||
return sqlCommonValidateUserAndTLSCertificate(username, protocol, tlsCert, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *PGSQLProvider) validateUserAndPubKey(username string, publicKey []byte) (User, string, error) {
|
|
||||||
return sqlCommonValidateUserAndPubKey(username, publicKey, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *PGSQLProvider) updateQuota(username string, filesAdd int, sizeAdd int64, reset bool) error {
|
|
||||||
return sqlCommonUpdateQuota(username, filesAdd, sizeAdd, reset, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *PGSQLProvider) getUsedQuota(username string) (int, int64, error) {
|
|
||||||
return sqlCommonGetUsedQuota(username, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *PGSQLProvider) updateLastLogin(username string) error {
|
|
||||||
return sqlCommonUpdateLastLogin(username, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *PGSQLProvider) userExists(username string) (User, error) {
|
|
||||||
return sqlCommonGetUserByUsername(username, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *PGSQLProvider) addUser(user *User) error {
|
|
||||||
return sqlCommonAddUser(user, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *PGSQLProvider) updateUser(user *User) error {
|
|
||||||
return sqlCommonUpdateUser(user, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *PGSQLProvider) deleteUser(user *User) error {
|
|
||||||
return sqlCommonDeleteUser(user, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *PGSQLProvider) dumpUsers() ([]User, error) {
|
|
||||||
return sqlCommonDumpUsers(p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *PGSQLProvider) getUsers(limit int, offset int, order string) ([]User, error) {
|
|
||||||
return sqlCommonGetUsers(limit, offset, order, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *PGSQLProvider) dumpFolders() ([]vfs.BaseVirtualFolder, error) {
|
|
||||||
return sqlCommonDumpFolders(p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *PGSQLProvider) getFolders(limit, offset int, order string) ([]vfs.BaseVirtualFolder, error) {
|
|
||||||
return sqlCommonGetFolders(limit, offset, order, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *PGSQLProvider) getFolderByName(name string) (vfs.BaseVirtualFolder, error) {
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), defaultSQLQueryTimeout)
|
|
||||||
defer cancel()
|
|
||||||
return sqlCommonGetFolderByName(ctx, name, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *PGSQLProvider) addFolder(folder *vfs.BaseVirtualFolder) error {
|
|
||||||
return sqlCommonAddFolder(folder, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *PGSQLProvider) updateFolder(folder *vfs.BaseVirtualFolder) error {
|
|
||||||
return sqlCommonUpdateFolder(folder, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *PGSQLProvider) deleteFolder(folder *vfs.BaseVirtualFolder) error {
|
|
||||||
return sqlCommonDeleteFolder(folder, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *PGSQLProvider) updateFolderQuota(name string, filesAdd int, sizeAdd int64, reset bool) error {
|
|
||||||
return sqlCommonUpdateFolderQuota(name, filesAdd, sizeAdd, reset, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *PGSQLProvider) getUsedFolderQuota(name string) (int, int64, error) {
|
|
||||||
return sqlCommonGetFolderUsedQuota(name, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *PGSQLProvider) adminExists(username string) (Admin, error) {
|
|
||||||
return sqlCommonGetAdminByUsername(username, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *PGSQLProvider) addAdmin(admin *Admin) error {
|
|
||||||
return sqlCommonAddAdmin(admin, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *PGSQLProvider) updateAdmin(admin *Admin) error {
|
|
||||||
return sqlCommonUpdateAdmin(admin, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *PGSQLProvider) deleteAdmin(admin *Admin) error {
|
|
||||||
return sqlCommonDeleteAdmin(admin, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *PGSQLProvider) getAdmins(limit int, offset int, order string) ([]Admin, error) {
|
|
||||||
return sqlCommonGetAdmins(limit, offset, order, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *PGSQLProvider) dumpAdmins() ([]Admin, error) {
|
|
||||||
return sqlCommonDumpAdmins(p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *PGSQLProvider) validateAdminAndPass(username, password, ip string) (Admin, error) {
|
|
||||||
return sqlCommonValidateAdminAndPass(username, password, ip, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *PGSQLProvider) close() error {
|
|
||||||
return p.dbHandle.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *PGSQLProvider) reloadConfig() error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// initializeDatabase creates the initial database structure
|
|
||||||
func (p *PGSQLProvider) initializeDatabase() error {
|
|
||||||
dbVersion, err := sqlCommonGetDatabaseVersion(p.dbHandle, false)
|
|
||||||
if err == nil && dbVersion.Version > 0 {
|
|
||||||
return ErrNoInitRequired
|
|
||||||
}
|
|
||||||
initialSQL := strings.ReplaceAll(pgsqlInitial, "{{schema_version}}", sqlTableSchemaVersion)
|
|
||||||
initialSQL = strings.ReplaceAll(initialSQL, "{{admins}}", sqlTableAdmins)
|
|
||||||
initialSQL = strings.ReplaceAll(initialSQL, "{{folders}}", sqlTableFolders)
|
|
||||||
initialSQL = strings.ReplaceAll(initialSQL, "{{users}}", sqlTableUsers)
|
|
||||||
initialSQL = strings.ReplaceAll(initialSQL, "{{folders_mapping}}", sqlTableFoldersMapping)
|
|
||||||
initialSQL = strings.ReplaceAll(initialSQL, "{{prefix}}", config.SQLTablesPrefix)
|
|
||||||
if config.Driver == CockroachDataProviderName {
|
|
||||||
// Cockroach does not support deferrable constraint validation, we don't need it,
|
|
||||||
// we keep these definitions for the PostgreSQL driver to avoid changes for users
|
|
||||||
// upgrading from old SFTPGo versions
|
|
||||||
initialSQL = strings.ReplaceAll(initialSQL, "DEFERRABLE INITIALLY DEFERRED", "")
|
|
||||||
}
|
|
||||||
|
|
||||||
return sqlCommonExecSQLAndUpdateDBVersion(p.dbHandle, []string{initialSQL}, 8)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *PGSQLProvider) migrateDatabase() error {
|
|
||||||
dbVersion, err := sqlCommonGetDatabaseVersion(p.dbHandle, true)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
switch version := dbVersion.Version; {
|
|
||||||
case version == sqlDatabaseVersion:
|
|
||||||
providerLog(logger.LevelDebug, "sql database is up to date, current version: %v", version)
|
|
||||||
return ErrNoInitRequired
|
|
||||||
case version < 8:
|
|
||||||
err = fmt.Errorf("database version %v is too old, please see the upgrading docs", version)
|
|
||||||
providerLog(logger.LevelError, "%v", err)
|
|
||||||
logger.ErrorToConsole("%v", err)
|
|
||||||
return err
|
|
||||||
case version == 8:
|
|
||||||
return updatePGSQLDatabaseFromV8(p.dbHandle)
|
|
||||||
case version == 9:
|
|
||||||
return updatePGSQLDatabaseFromV9(p.dbHandle)
|
|
||||||
default:
|
|
||||||
if version > sqlDatabaseVersion {
|
|
||||||
providerLog(logger.LevelWarn, "database version %v is newer than the supported one: %v", version,
|
|
||||||
sqlDatabaseVersion)
|
|
||||||
logger.WarnToConsole("database version %v is newer than the supported one: %v", version,
|
|
||||||
sqlDatabaseVersion)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return fmt.Errorf("database version not handled: %v", version)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *PGSQLProvider) revertDatabase(targetVersion int) error {
|
|
||||||
dbVersion, err := sqlCommonGetDatabaseVersion(p.dbHandle, true)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if dbVersion.Version == targetVersion {
|
|
||||||
return errors.New("current version match target version, nothing to do")
|
|
||||||
}
|
|
||||||
|
|
||||||
switch dbVersion.Version {
|
|
||||||
case 9:
|
|
||||||
return downgradePGSQLDatabaseFromV9(p.dbHandle)
|
|
||||||
case 10:
|
|
||||||
return downgradePGSQLDatabaseFromV10(p.dbHandle)
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("database version not handled: %v", dbVersion.Version)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func updatePGSQLDatabaseFromV8(dbHandle *sql.DB) error {
|
|
||||||
if err := updatePGSQLDatabaseFrom8To9(dbHandle); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return updatePGSQLDatabaseFromV9(dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func updatePGSQLDatabaseFromV9(dbHandle *sql.DB) error {
|
|
||||||
return updatePGSQLDatabaseFrom9To10(dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func downgradePGSQLDatabaseFromV9(dbHandle *sql.DB) error {
|
|
||||||
return downgradePGSQLDatabaseFrom9To8(dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func downgradePGSQLDatabaseFromV10(dbHandle *sql.DB) error {
|
|
||||||
if err := downgradePGSQLDatabaseFrom10To9(dbHandle); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return downgradePGSQLDatabaseFromV9(dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func updatePGSQLDatabaseFrom8To9(dbHandle *sql.DB) error {
|
|
||||||
logger.InfoToConsole("updating database version: 8 -> 9")
|
|
||||||
providerLog(logger.LevelInfo, "updating database version: 8 -> 9")
|
|
||||||
sql := strings.ReplaceAll(pgsqlV9SQL, "{{users}}", sqlTableUsers)
|
|
||||||
sql = strings.ReplaceAll(sql, "{{admins}}", sqlTableAdmins)
|
|
||||||
sql = strings.ReplaceAll(sql, "{{folders}}", sqlTableFolders)
|
|
||||||
return sqlCommonExecSQLAndUpdateDBVersion(dbHandle, []string{sql}, 9)
|
|
||||||
}
|
|
||||||
|
|
||||||
func downgradePGSQLDatabaseFrom9To8(dbHandle *sql.DB) error {
|
|
||||||
logger.InfoToConsole("downgrading database version: 9 -> 8")
|
|
||||||
providerLog(logger.LevelInfo, "downgrading database version: 9 -> 8")
|
|
||||||
sql := strings.ReplaceAll(pgsqlV9DownSQL, "{{users}}", sqlTableUsers)
|
|
||||||
sql = strings.ReplaceAll(sql, "{{admins}}", sqlTableAdmins)
|
|
||||||
sql = strings.ReplaceAll(sql, "{{folders}}", sqlTableFolders)
|
|
||||||
return sqlCommonExecSQLAndUpdateDBVersion(dbHandle, []string{sql}, 8)
|
|
||||||
}
|
|
||||||
|
|
||||||
func updatePGSQLDatabaseFrom9To10(dbHandle *sql.DB) error {
|
|
||||||
return sqlCommonUpdateDatabaseFrom9To10(dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func downgradePGSQLDatabaseFrom10To9(dbHandle *sql.DB) error {
|
|
||||||
return sqlCommonDowngradeDatabaseFrom10To9(dbHandle)
|
|
||||||
}
|
|
|
@ -1,17 +0,0 @@
|
||||||
// +build nopgsql
|
|
||||||
|
|
||||||
package dataprovider
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
|
|
||||||
"github.com/drakkan/sftpgo/version"
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
version.AddFeature("-pgsql")
|
|
||||||
}
|
|
||||||
|
|
||||||
func initializePGSQLProvider() error {
|
|
||||||
return errors.New("PostgreSQL disabled at build time")
|
|
||||||
}
|
|
|
@ -1,369 +0,0 @@
|
||||||
// +build !nosqlite
|
|
||||||
|
|
||||||
package dataprovider
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"crypto/x509"
|
|
||||||
"database/sql"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
// we import go-sqlite3 here to be able to disable SQLite support using a build tag
|
|
||||||
_ "github.com/mattn/go-sqlite3"
|
|
||||||
|
|
||||||
"github.com/drakkan/sftpgo/logger"
|
|
||||||
"github.com/drakkan/sftpgo/utils"
|
|
||||||
"github.com/drakkan/sftpgo/version"
|
|
||||||
"github.com/drakkan/sftpgo/vfs"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
sqliteInitialSQL = `CREATE TABLE "{{schema_version}}" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "version" integer NOT NULL);
|
|
||||||
CREATE TABLE "{{admins}}" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "username" varchar(255) NOT NULL UNIQUE,
|
|
||||||
"password" varchar(255) NOT NULL, "email" varchar(255) NULL, "status" integer NOT NULL, "permissions" text NOT NULL,
|
|
||||||
"filters" text NULL, "additional_info" text NULL);
|
|
||||||
CREATE TABLE "{{folders}}" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "name" varchar(255) NOT NULL UNIQUE,
|
|
||||||
"path" varchar(512) NULL, "used_quota_size" bigint NOT NULL, "used_quota_files" integer NOT NULL,
|
|
||||||
"last_quota_update" bigint NOT NULL);
|
|
||||||
CREATE TABLE "{{users}}" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "username" varchar(255) NOT NULL UNIQUE,
|
|
||||||
"password" text NULL, "public_keys" text NULL, "home_dir" varchar(512) NOT NULL, "uid" integer NOT NULL, "gid" integer NOT NULL,
|
|
||||||
"max_sessions" integer NOT NULL, "quota_size" bigint NOT NULL, "quota_files" integer NOT NULL, "permissions" text NOT NULL,
|
|
||||||
"used_quota_size" bigint NOT NULL, "used_quota_files" integer NOT NULL, "last_quota_update" bigint NOT NULL,
|
|
||||||
"upload_bandwidth" integer NOT NULL, "download_bandwidth" integer NOT NULL, "expiration_date" bigint NOT NULL,
|
|
||||||
"last_login" bigint NOT NULL, "status" integer NOT NULL, "filters" text NULL, "filesystem" text NULL,
|
|
||||||
"additional_info" text NULL);
|
|
||||||
CREATE TABLE "{{folders_mapping}}" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "virtual_path" varchar(512) NOT NULL,
|
|
||||||
"quota_size" bigint NOT NULL, "quota_files" integer NOT NULL, "folder_id" integer NOT NULL REFERENCES "{{folders}}" ("id")
|
|
||||||
ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED, "user_id" integer NOT NULL REFERENCES "{{users}}" ("id") ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
|
|
||||||
CONSTRAINT "{{prefix}}unique_mapping" UNIQUE ("user_id", "folder_id"));
|
|
||||||
CREATE INDEX "{{prefix}}folders_mapping_folder_id_idx" ON "{{folders_mapping}}" ("folder_id");
|
|
||||||
CREATE INDEX "{{prefix}}folders_mapping_user_id_idx" ON "{{folders_mapping}}" ("user_id");
|
|
||||||
INSERT INTO {{schema_version}} (version) VALUES (8);
|
|
||||||
`
|
|
||||||
sqliteV9SQL = `ALTER TABLE "{{admins}}" ADD COLUMN "description" varchar(512) NULL;
|
|
||||||
ALTER TABLE "{{folders}}" ADD COLUMN "description" varchar(512) NULL;
|
|
||||||
ALTER TABLE "{{folders}}" ADD COLUMN "filesystem" text NULL;
|
|
||||||
ALTER TABLE "{{users}}" ADD COLUMN "description" varchar(512) NULL;
|
|
||||||
`
|
|
||||||
sqliteV9DownSQL = `CREATE TABLE "new__users" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "status" integer NOT NULL,
|
|
||||||
"expiration_date" bigint NOT NULL, "username" varchar(255) NOT NULL UNIQUE, "password" text NULL, "public_keys" text NULL,
|
|
||||||
"home_dir" varchar(512) NOT NULL, "uid" integer NOT NULL, "gid" integer NOT NULL, "max_sessions" integer NOT NULL,
|
|
||||||
"quota_size" bigint NOT NULL, "quota_files" integer NOT NULL, "permissions" text NOT NULL, "used_quota_size" bigint NOT NULL,
|
|
||||||
"used_quota_files" integer NOT NULL, "last_quota_update" bigint NOT NULL, "upload_bandwidth" integer NOT NULL,
|
|
||||||
"download_bandwidth" integer NOT NULL, "last_login" bigint NOT NULL, "filters" text NULL, "filesystem" text NULL,
|
|
||||||
"additional_info" text NULL);
|
|
||||||
INSERT INTO "new__users" ("id", "status", "expiration_date", "username", "password", "public_keys", "home_dir", "uid", "gid",
|
|
||||||
"max_sessions", "quota_size", "quota_files", "permissions", "used_quota_size", "used_quota_files", "last_quota_update",
|
|
||||||
"upload_bandwidth", "download_bandwidth", "last_login", "filters", "filesystem", "additional_info")
|
|
||||||
SELECT "id", "status", "expiration_date", "username", "password", "public_keys", "home_dir", "uid", "gid", "max_sessions",
|
|
||||||
"quota_size", "quota_files", "permissions", "used_quota_size", "used_quota_files", "last_quota_update", "upload_bandwidth",
|
|
||||||
"download_bandwidth", "last_login", "filters", "filesystem", "additional_info" FROM "{{users}}";
|
|
||||||
DROP TABLE "{{users}}";
|
|
||||||
ALTER TABLE "new__users" RENAME TO "{{users}}";
|
|
||||||
CREATE TABLE "new__admins" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "username" varchar(255) NOT NULL UNIQUE,
|
|
||||||
"password" varchar(255) NOT NULL, "email" varchar(255) NULL, "status" integer NOT NULL, "permissions" text NOT NULL,
|
|
||||||
"filters" text NULL, "additional_info" text NULL);
|
|
||||||
INSERT INTO "new__admins" ("id", "username", "password", "email", "status", "permissions", "filters", "additional_info")
|
|
||||||
SELECT "id", "username", "password", "email", "status", "permissions", "filters", "additional_info" FROM "{{admins}}";
|
|
||||||
DROP TABLE "{{admins}}";
|
|
||||||
ALTER TABLE "new__admins" RENAME TO "{{admins}}";
|
|
||||||
CREATE TABLE "new__folders" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "name" varchar(255) NOT NULL UNIQUE,
|
|
||||||
"path" varchar(512) NULL, "used_quota_size" bigint NOT NULL, "used_quota_files" integer NOT NULL, "last_quota_update" bigint NOT NULL);
|
|
||||||
INSERT INTO "new__folders" ("id", "name", "path", "used_quota_size", "used_quota_files", "last_quota_update")
|
|
||||||
SELECT "id", "name", "path", "used_quota_size", "used_quota_files", "last_quota_update" FROM "{{folders}}";
|
|
||||||
DROP TABLE "{{folders}}";
|
|
||||||
ALTER TABLE "new__folders" RENAME TO "{{folders}}";
|
|
||||||
`
|
|
||||||
)
|
|
||||||
|
|
||||||
// SQLiteProvider auth provider for SQLite database
|
|
||||||
type SQLiteProvider struct {
|
|
||||||
dbHandle *sql.DB
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
version.AddFeature("+sqlite")
|
|
||||||
}
|
|
||||||
|
|
||||||
func initializeSQLiteProvider(basePath string) error {
|
|
||||||
var err error
|
|
||||||
var connectionString string
|
|
||||||
|
|
||||||
if config.ConnectionString == "" {
|
|
||||||
dbPath := config.Name
|
|
||||||
if !utils.IsFileInputValid(dbPath) {
|
|
||||||
return fmt.Errorf("invalid database path: %#v", dbPath)
|
|
||||||
}
|
|
||||||
if !filepath.IsAbs(dbPath) {
|
|
||||||
dbPath = filepath.Join(basePath, dbPath)
|
|
||||||
}
|
|
||||||
connectionString = fmt.Sprintf("file:%v?cache=shared&_foreign_keys=1", dbPath)
|
|
||||||
} else {
|
|
||||||
connectionString = config.ConnectionString
|
|
||||||
}
|
|
||||||
dbHandle, err := sql.Open("sqlite3", connectionString)
|
|
||||||
if err == nil {
|
|
||||||
providerLog(logger.LevelDebug, "sqlite database handle created, connection string: %#v", connectionString)
|
|
||||||
dbHandle.SetMaxOpenConns(1)
|
|
||||||
provider = &SQLiteProvider{dbHandle: dbHandle}
|
|
||||||
} else {
|
|
||||||
providerLog(logger.LevelWarn, "error creating sqlite database handler, connection string: %#v, error: %v",
|
|
||||||
connectionString, err)
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *SQLiteProvider) checkAvailability() error {
|
|
||||||
return sqlCommonCheckAvailability(p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *SQLiteProvider) validateUserAndPass(username, password, ip, protocol string) (User, error) {
|
|
||||||
return sqlCommonValidateUserAndPass(username, password, ip, protocol, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *SQLiteProvider) validateUserAndTLSCert(username, protocol string, tlsCert *x509.Certificate) (User, error) {
|
|
||||||
return sqlCommonValidateUserAndTLSCertificate(username, protocol, tlsCert, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *SQLiteProvider) validateUserAndPubKey(username string, publicKey []byte) (User, string, error) {
|
|
||||||
return sqlCommonValidateUserAndPubKey(username, publicKey, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *SQLiteProvider) updateQuota(username string, filesAdd int, sizeAdd int64, reset bool) error {
|
|
||||||
return sqlCommonUpdateQuota(username, filesAdd, sizeAdd, reset, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *SQLiteProvider) getUsedQuota(username string) (int, int64, error) {
|
|
||||||
return sqlCommonGetUsedQuota(username, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *SQLiteProvider) updateLastLogin(username string) error {
|
|
||||||
return sqlCommonUpdateLastLogin(username, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *SQLiteProvider) userExists(username string) (User, error) {
|
|
||||||
return sqlCommonGetUserByUsername(username, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *SQLiteProvider) addUser(user *User) error {
|
|
||||||
return sqlCommonAddUser(user, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *SQLiteProvider) updateUser(user *User) error {
|
|
||||||
return sqlCommonUpdateUser(user, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *SQLiteProvider) deleteUser(user *User) error {
|
|
||||||
return sqlCommonDeleteUser(user, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *SQLiteProvider) dumpUsers() ([]User, error) {
|
|
||||||
return sqlCommonDumpUsers(p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *SQLiteProvider) getUsers(limit int, offset int, order string) ([]User, error) {
|
|
||||||
return sqlCommonGetUsers(limit, offset, order, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *SQLiteProvider) dumpFolders() ([]vfs.BaseVirtualFolder, error) {
|
|
||||||
return sqlCommonDumpFolders(p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *SQLiteProvider) getFolders(limit, offset int, order string) ([]vfs.BaseVirtualFolder, error) {
|
|
||||||
return sqlCommonGetFolders(limit, offset, order, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *SQLiteProvider) getFolderByName(name string) (vfs.BaseVirtualFolder, error) {
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), defaultSQLQueryTimeout)
|
|
||||||
defer cancel()
|
|
||||||
return sqlCommonGetFolderByName(ctx, name, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *SQLiteProvider) addFolder(folder *vfs.BaseVirtualFolder) error {
|
|
||||||
return sqlCommonAddFolder(folder, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *SQLiteProvider) updateFolder(folder *vfs.BaseVirtualFolder) error {
|
|
||||||
return sqlCommonUpdateFolder(folder, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *SQLiteProvider) deleteFolder(folder *vfs.BaseVirtualFolder) error {
|
|
||||||
return sqlCommonDeleteFolder(folder, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *SQLiteProvider) updateFolderQuota(name string, filesAdd int, sizeAdd int64, reset bool) error {
|
|
||||||
return sqlCommonUpdateFolderQuota(name, filesAdd, sizeAdd, reset, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *SQLiteProvider) getUsedFolderQuota(name string) (int, int64, error) {
|
|
||||||
return sqlCommonGetFolderUsedQuota(name, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *SQLiteProvider) adminExists(username string) (Admin, error) {
|
|
||||||
return sqlCommonGetAdminByUsername(username, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *SQLiteProvider) addAdmin(admin *Admin) error {
|
|
||||||
return sqlCommonAddAdmin(admin, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *SQLiteProvider) updateAdmin(admin *Admin) error {
|
|
||||||
return sqlCommonUpdateAdmin(admin, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *SQLiteProvider) deleteAdmin(admin *Admin) error {
|
|
||||||
return sqlCommonDeleteAdmin(admin, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *SQLiteProvider) getAdmins(limit int, offset int, order string) ([]Admin, error) {
|
|
||||||
return sqlCommonGetAdmins(limit, offset, order, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *SQLiteProvider) dumpAdmins() ([]Admin, error) {
|
|
||||||
return sqlCommonDumpAdmins(p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *SQLiteProvider) validateAdminAndPass(username, password, ip string) (Admin, error) {
|
|
||||||
return sqlCommonValidateAdminAndPass(username, password, ip, p.dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *SQLiteProvider) close() error {
|
|
||||||
return p.dbHandle.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *SQLiteProvider) reloadConfig() error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// initializeDatabase creates the initial database structure
|
|
||||||
func (p *SQLiteProvider) initializeDatabase() error {
|
|
||||||
dbVersion, err := sqlCommonGetDatabaseVersion(p.dbHandle, false)
|
|
||||||
if err == nil && dbVersion.Version > 0 {
|
|
||||||
return ErrNoInitRequired
|
|
||||||
}
|
|
||||||
initialSQL := strings.ReplaceAll(sqliteInitialSQL, "{{schema_version}}", sqlTableSchemaVersion)
|
|
||||||
initialSQL = strings.ReplaceAll(initialSQL, "{{admins}}", sqlTableAdmins)
|
|
||||||
initialSQL = strings.ReplaceAll(initialSQL, "{{folders}}", sqlTableFolders)
|
|
||||||
initialSQL = strings.ReplaceAll(initialSQL, "{{users}}", sqlTableUsers)
|
|
||||||
initialSQL = strings.ReplaceAll(initialSQL, "{{folders_mapping}}", sqlTableFoldersMapping)
|
|
||||||
initialSQL = strings.ReplaceAll(initialSQL, "{{prefix}}", config.SQLTablesPrefix)
|
|
||||||
|
|
||||||
return sqlCommonExecSQLAndUpdateDBVersion(p.dbHandle, []string{initialSQL}, 8)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *SQLiteProvider) migrateDatabase() error {
|
|
||||||
dbVersion, err := sqlCommonGetDatabaseVersion(p.dbHandle, true)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
switch version := dbVersion.Version; {
|
|
||||||
case version == sqlDatabaseVersion:
|
|
||||||
providerLog(logger.LevelDebug, "sql database is up to date, current version: %v", version)
|
|
||||||
return ErrNoInitRequired
|
|
||||||
case version < 8:
|
|
||||||
err = fmt.Errorf("database version %v is too old, please see the upgrading docs", version)
|
|
||||||
providerLog(logger.LevelError, "%v", err)
|
|
||||||
logger.ErrorToConsole("%v", err)
|
|
||||||
return err
|
|
||||||
case version == 8:
|
|
||||||
return updateSQLiteDatabaseFromV8(p.dbHandle)
|
|
||||||
case version == 9:
|
|
||||||
return updateSQLiteDatabaseFromV9(p.dbHandle)
|
|
||||||
default:
|
|
||||||
if version > sqlDatabaseVersion {
|
|
||||||
providerLog(logger.LevelWarn, "database version %v is newer than the supported one: %v", version,
|
|
||||||
sqlDatabaseVersion)
|
|
||||||
logger.WarnToConsole("database version %v is newer than the supported one: %v", version,
|
|
||||||
sqlDatabaseVersion)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return fmt.Errorf("database version not handled: %v", version)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *SQLiteProvider) revertDatabase(targetVersion int) error {
|
|
||||||
dbVersion, err := sqlCommonGetDatabaseVersion(p.dbHandle, true)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if dbVersion.Version == targetVersion {
|
|
||||||
return errors.New("current version match target version, nothing to do")
|
|
||||||
}
|
|
||||||
|
|
||||||
switch dbVersion.Version {
|
|
||||||
case 9:
|
|
||||||
return downgradeSQLiteDatabaseFromV9(p.dbHandle)
|
|
||||||
case 10:
|
|
||||||
return downgradeSQLiteDatabaseFromV10(p.dbHandle)
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("database version not handled: %v", dbVersion.Version)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func updateSQLiteDatabaseFromV8(dbHandle *sql.DB) error {
|
|
||||||
if err := updateSQLiteDatabaseFrom8To9(dbHandle); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return updateSQLiteDatabaseFromV9(dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func updateSQLiteDatabaseFromV9(dbHandle *sql.DB) error {
|
|
||||||
return updateSQLiteDatabaseFrom9To10(dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func downgradeSQLiteDatabaseFromV9(dbHandle *sql.DB) error {
|
|
||||||
return downgradeSQLiteDatabaseFrom9To8(dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func downgradeSQLiteDatabaseFromV10(dbHandle *sql.DB) error {
|
|
||||||
if err := downgradeSQLiteDatabaseFrom10To9(dbHandle); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return downgradeSQLiteDatabaseFromV9(dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func updateSQLiteDatabaseFrom8To9(dbHandle *sql.DB) error {
|
|
||||||
logger.InfoToConsole("updating database version: 8 -> 9")
|
|
||||||
providerLog(logger.LevelInfo, "updating database version: 8 -> 9")
|
|
||||||
sql := strings.ReplaceAll(sqliteV9SQL, "{{users}}", sqlTableUsers)
|
|
||||||
sql = strings.ReplaceAll(sql, "{{admins}}", sqlTableAdmins)
|
|
||||||
sql = strings.ReplaceAll(sql, "{{folders}}", sqlTableFolders)
|
|
||||||
return sqlCommonExecSQLAndUpdateDBVersion(dbHandle, []string{sql}, 9)
|
|
||||||
}
|
|
||||||
|
|
||||||
func downgradeSQLiteDatabaseFrom9To8(dbHandle *sql.DB) error {
|
|
||||||
logger.InfoToConsole("downgrading database version: 9 -> 8")
|
|
||||||
providerLog(logger.LevelInfo, "downgrading database version: 9 -> 8")
|
|
||||||
if err := setPragmaFK(dbHandle, "OFF"); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
sql := strings.ReplaceAll(sqliteV9DownSQL, "{{users}}", sqlTableUsers)
|
|
||||||
sql = strings.ReplaceAll(sql, "{{admins}}", sqlTableAdmins)
|
|
||||||
sql = strings.ReplaceAll(sql, "{{folders}}", sqlTableFolders)
|
|
||||||
if err := sqlCommonExecSQLAndUpdateDBVersion(dbHandle, []string{sql}, 8); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return setPragmaFK(dbHandle, "ON")
|
|
||||||
}
|
|
||||||
|
|
||||||
func updateSQLiteDatabaseFrom9To10(dbHandle *sql.DB) error {
|
|
||||||
return sqlCommonUpdateDatabaseFrom9To10(dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func downgradeSQLiteDatabaseFrom10To9(dbHandle *sql.DB) error {
|
|
||||||
return sqlCommonDowngradeDatabaseFrom10To9(dbHandle)
|
|
||||||
}
|
|
||||||
|
|
||||||
func setPragmaFK(dbHandle *sql.DB, value string) error {
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), longSQLQueryTimeout)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
sql := fmt.Sprintf("PRAGMA foreign_keys=%v;", value)
|
|
||||||
|
|
||||||
_, err := dbHandle.ExecContext(ctx, sql)
|
|
||||||
return err
|
|
||||||
}
|
|
|
@ -1,17 +0,0 @@
|
||||||
// +build nosqlite
|
|
||||||
|
|
||||||
package dataprovider
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
|
|
||||||
"github.com/drakkan/sftpgo/version"
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
version.AddFeature("-sqlite")
|
|
||||||
}
|
|
||||||
|
|
||||||
func initializeSQLiteProvider(basePath string) error {
|
|
||||||
return errors.New("SQLite disabled at build time")
|
|
||||||
}
|
|
|
@ -1,228 +0,0 @@
|
||||||
package dataprovider
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/drakkan/sftpgo/vfs"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
selectUserFields = "id,username,password,public_keys,home_dir,uid,gid,max_sessions,quota_size,quota_files,permissions,used_quota_size," +
|
|
||||||
"used_quota_files,last_quota_update,upload_bandwidth,download_bandwidth,expiration_date,last_login,status,filters,filesystem," +
|
|
||||||
"additional_info,description"
|
|
||||||
selectFolderFields = "id,path,used_quota_size,used_quota_files,last_quota_update,name,description,filesystem"
|
|
||||||
selectAdminFields = "id,username,password,status,email,permissions,filters,additional_info,description"
|
|
||||||
)
|
|
||||||
|
|
||||||
func getSQLPlaceholders() []string {
|
|
||||||
var placeholders []string
|
|
||||||
for i := 1; i <= 20; i++ {
|
|
||||||
if config.Driver == PGSQLDataProviderName || config.Driver == CockroachDataProviderName {
|
|
||||||
placeholders = append(placeholders, fmt.Sprintf("$%v", i))
|
|
||||||
} else {
|
|
||||||
placeholders = append(placeholders, "?")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return placeholders
|
|
||||||
}
|
|
||||||
|
|
||||||
func getAdminByUsernameQuery() string {
|
|
||||||
return fmt.Sprintf(`SELECT %v FROM %v WHERE username = %v`, selectAdminFields, sqlTableAdmins, sqlPlaceholders[0])
|
|
||||||
}
|
|
||||||
|
|
||||||
func getAdminsQuery(order string) string {
|
|
||||||
return fmt.Sprintf(`SELECT %v FROM %v ORDER BY username %v LIMIT %v OFFSET %v`, selectAdminFields, sqlTableAdmins,
|
|
||||||
order, sqlPlaceholders[0], sqlPlaceholders[1])
|
|
||||||
}
|
|
||||||
|
|
||||||
func getDumpAdminsQuery() string {
|
|
||||||
return fmt.Sprintf(`SELECT %v FROM %v`, selectAdminFields, sqlTableAdmins)
|
|
||||||
}
|
|
||||||
|
|
||||||
func getAddAdminQuery() string {
|
|
||||||
return fmt.Sprintf(`INSERT INTO %v (username,password,status,email,permissions,filters,additional_info,description)
|
|
||||||
VALUES (%v,%v,%v,%v,%v,%v,%v,%v)`, sqlTableAdmins, sqlPlaceholders[0], sqlPlaceholders[1],
|
|
||||||
sqlPlaceholders[2], sqlPlaceholders[3], sqlPlaceholders[4], sqlPlaceholders[5], sqlPlaceholders[6], sqlPlaceholders[7])
|
|
||||||
}
|
|
||||||
|
|
||||||
func getUpdateAdminQuery() string {
|
|
||||||
return fmt.Sprintf(`UPDATE %v SET password=%v,status=%v,email=%v,permissions=%v,filters=%v,additional_info=%v,description=%v
|
|
||||||
WHERE username = %v`, sqlTableAdmins, sqlPlaceholders[0], sqlPlaceholders[1], sqlPlaceholders[2],
|
|
||||||
sqlPlaceholders[3], sqlPlaceholders[4], sqlPlaceholders[5], sqlPlaceholders[6], sqlPlaceholders[7])
|
|
||||||
}
|
|
||||||
|
|
||||||
func getDeleteAdminQuery() string {
|
|
||||||
return fmt.Sprintf(`DELETE FROM %v WHERE username = %v`, sqlTableAdmins, sqlPlaceholders[0])
|
|
||||||
}
|
|
||||||
|
|
||||||
func getUserByUsernameQuery() string {
|
|
||||||
return fmt.Sprintf(`SELECT %v FROM %v WHERE username = %v`, selectUserFields, sqlTableUsers, sqlPlaceholders[0])
|
|
||||||
}
|
|
||||||
|
|
||||||
func getUsersQuery(order string) string {
|
|
||||||
return fmt.Sprintf(`SELECT %v FROM %v ORDER BY username %v LIMIT %v OFFSET %v`, selectUserFields, sqlTableUsers,
|
|
||||||
order, sqlPlaceholders[0], sqlPlaceholders[1])
|
|
||||||
}
|
|
||||||
|
|
||||||
func getDumpUsersQuery() string {
|
|
||||||
return fmt.Sprintf(`SELECT %v FROM %v`, selectUserFields, sqlTableUsers)
|
|
||||||
}
|
|
||||||
|
|
||||||
func getDumpFoldersQuery() string {
|
|
||||||
return fmt.Sprintf(`SELECT %v FROM %v`, selectFolderFields, sqlTableFolders)
|
|
||||||
}
|
|
||||||
|
|
||||||
func getUpdateQuotaQuery(reset bool) string {
|
|
||||||
if reset {
|
|
||||||
return fmt.Sprintf(`UPDATE %v SET used_quota_size = %v,used_quota_files = %v,last_quota_update = %v
|
|
||||||
WHERE username = %v`, sqlTableUsers, sqlPlaceholders[0], sqlPlaceholders[1], sqlPlaceholders[2], sqlPlaceholders[3])
|
|
||||||
}
|
|
||||||
return fmt.Sprintf(`UPDATE %v SET used_quota_size = used_quota_size + %v,used_quota_files = used_quota_files + %v,last_quota_update = %v
|
|
||||||
WHERE username = %v`, sqlTableUsers, sqlPlaceholders[0], sqlPlaceholders[1], sqlPlaceholders[2], sqlPlaceholders[3])
|
|
||||||
}
|
|
||||||
|
|
||||||
func getUpdateLastLoginQuery() string {
|
|
||||||
return fmt.Sprintf(`UPDATE %v SET last_login = %v WHERE username = %v`, sqlTableUsers, sqlPlaceholders[0], sqlPlaceholders[1])
|
|
||||||
}
|
|
||||||
|
|
||||||
func getQuotaQuery() string {
|
|
||||||
return fmt.Sprintf(`SELECT used_quota_size,used_quota_files FROM %v WHERE username = %v`, sqlTableUsers,
|
|
||||||
sqlPlaceholders[0])
|
|
||||||
}
|
|
||||||
|
|
||||||
func getAddUserQuery() string {
|
|
||||||
return fmt.Sprintf(`INSERT INTO %v (username,password,public_keys,home_dir,uid,gid,max_sessions,quota_size,quota_files,permissions,
|
|
||||||
used_quota_size,used_quota_files,last_quota_update,upload_bandwidth,download_bandwidth,status,last_login,expiration_date,filters,
|
|
||||||
filesystem,additional_info,description)
|
|
||||||
VALUES (%v,%v,%v,%v,%v,%v,%v,%v,%v,%v,0,0,0,%v,%v,%v,0,%v,%v,%v,%v,%v)`, sqlTableUsers, sqlPlaceholders[0], sqlPlaceholders[1],
|
|
||||||
sqlPlaceholders[2], sqlPlaceholders[3], sqlPlaceholders[4], sqlPlaceholders[5], sqlPlaceholders[6], sqlPlaceholders[7],
|
|
||||||
sqlPlaceholders[8], sqlPlaceholders[9], sqlPlaceholders[10], sqlPlaceholders[11], sqlPlaceholders[12], sqlPlaceholders[13],
|
|
||||||
sqlPlaceholders[14], sqlPlaceholders[15], sqlPlaceholders[16], sqlPlaceholders[17])
|
|
||||||
}
|
|
||||||
|
|
||||||
func getUpdateUserQuery() string {
|
|
||||||
return fmt.Sprintf(`UPDATE %v SET password=%v,public_keys=%v,home_dir=%v,uid=%v,gid=%v,max_sessions=%v,quota_size=%v,
|
|
||||||
quota_files=%v,permissions=%v,upload_bandwidth=%v,download_bandwidth=%v,status=%v,expiration_date=%v,filters=%v,filesystem=%v,
|
|
||||||
additional_info=%v,description=%v WHERE id = %v`, sqlTableUsers, sqlPlaceholders[0], sqlPlaceholders[1], sqlPlaceholders[2], sqlPlaceholders[3],
|
|
||||||
sqlPlaceholders[4], sqlPlaceholders[5], sqlPlaceholders[6], sqlPlaceholders[7], sqlPlaceholders[8], sqlPlaceholders[9],
|
|
||||||
sqlPlaceholders[10], sqlPlaceholders[11], sqlPlaceholders[12], sqlPlaceholders[13], sqlPlaceholders[14], sqlPlaceholders[15],
|
|
||||||
sqlPlaceholders[16], sqlPlaceholders[17])
|
|
||||||
}
|
|
||||||
|
|
||||||
func getDeleteUserQuery() string {
|
|
||||||
return fmt.Sprintf(`DELETE FROM %v WHERE id = %v`, sqlTableUsers, sqlPlaceholders[0])
|
|
||||||
}
|
|
||||||
|
|
||||||
func getFolderByNameQuery() string {
|
|
||||||
return fmt.Sprintf(`SELECT %v FROM %v WHERE name = %v`, selectFolderFields, sqlTableFolders, sqlPlaceholders[0])
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkFolderNameQuery() string {
|
|
||||||
return fmt.Sprintf(`SELECT name FROM %v WHERE name = %v`, sqlTableFolders, sqlPlaceholders[0])
|
|
||||||
}
|
|
||||||
|
|
||||||
func getAddFolderQuery() string {
|
|
||||||
return fmt.Sprintf(`INSERT INTO %v (path,used_quota_size,used_quota_files,last_quota_update,name,description,filesystem)
|
|
||||||
VALUES (%v,%v,%v,%v,%v,%v,%v)`, sqlTableFolders, sqlPlaceholders[0], sqlPlaceholders[1], sqlPlaceholders[2],
|
|
||||||
sqlPlaceholders[3], sqlPlaceholders[4], sqlPlaceholders[5], sqlPlaceholders[6])
|
|
||||||
}
|
|
||||||
|
|
||||||
func getUpdateFolderQuery() string {
|
|
||||||
return fmt.Sprintf(`UPDATE %v SET path=%v,description=%v,filesystem=%v WHERE name = %v`, sqlTableFolders, sqlPlaceholders[0],
|
|
||||||
sqlPlaceholders[1], sqlPlaceholders[2], sqlPlaceholders[3])
|
|
||||||
}
|
|
||||||
|
|
||||||
func getDeleteFolderQuery() string {
|
|
||||||
return fmt.Sprintf(`DELETE FROM %v WHERE id = %v`, sqlTableFolders, sqlPlaceholders[0])
|
|
||||||
}
|
|
||||||
|
|
||||||
func getClearFolderMappingQuery() string {
|
|
||||||
return fmt.Sprintf(`DELETE FROM %v WHERE user_id = (SELECT id FROM %v WHERE username = %v)`, sqlTableFoldersMapping,
|
|
||||||
sqlTableUsers, sqlPlaceholders[0])
|
|
||||||
}
|
|
||||||
|
|
||||||
func getAddFolderMappingQuery() string {
|
|
||||||
return fmt.Sprintf(`INSERT INTO %v (virtual_path,quota_size,quota_files,folder_id,user_id)
|
|
||||||
VALUES (%v,%v,%v,%v,(SELECT id FROM %v WHERE username = %v))`, sqlTableFoldersMapping, sqlPlaceholders[0],
|
|
||||||
sqlPlaceholders[1], sqlPlaceholders[2], sqlPlaceholders[3], sqlTableUsers, sqlPlaceholders[4])
|
|
||||||
}
|
|
||||||
|
|
||||||
func getFoldersQuery(order string) string {
|
|
||||||
return fmt.Sprintf(`SELECT %v FROM %v ORDER BY name %v LIMIT %v OFFSET %v`, selectFolderFields, sqlTableFolders,
|
|
||||||
order, sqlPlaceholders[0], sqlPlaceholders[1])
|
|
||||||
}
|
|
||||||
|
|
||||||
func getUpdateFolderQuotaQuery(reset bool) string {
|
|
||||||
if reset {
|
|
||||||
return fmt.Sprintf(`UPDATE %v SET used_quota_size = %v,used_quota_files = %v,last_quota_update = %v
|
|
||||||
WHERE name = %v`, sqlTableFolders, sqlPlaceholders[0], sqlPlaceholders[1], sqlPlaceholders[2], sqlPlaceholders[3])
|
|
||||||
}
|
|
||||||
return fmt.Sprintf(`UPDATE %v SET used_quota_size = used_quota_size + %v,used_quota_files = used_quota_files + %v,last_quota_update = %v
|
|
||||||
WHERE name = %v`, sqlTableFolders, sqlPlaceholders[0], sqlPlaceholders[1], sqlPlaceholders[2], sqlPlaceholders[3])
|
|
||||||
}
|
|
||||||
|
|
||||||
func getQuotaFolderQuery() string {
|
|
||||||
return fmt.Sprintf(`SELECT used_quota_size,used_quota_files FROM %v WHERE name = %v`, sqlTableFolders,
|
|
||||||
sqlPlaceholders[0])
|
|
||||||
}
|
|
||||||
|
|
||||||
func getRelatedFoldersForUsersQuery(users []User) string {
|
|
||||||
var sb strings.Builder
|
|
||||||
for _, u := range users {
|
|
||||||
if sb.Len() == 0 {
|
|
||||||
sb.WriteString("(")
|
|
||||||
} else {
|
|
||||||
sb.WriteString(",")
|
|
||||||
}
|
|
||||||
sb.WriteString(strconv.FormatInt(u.ID, 10))
|
|
||||||
}
|
|
||||||
if sb.Len() > 0 {
|
|
||||||
sb.WriteString(")")
|
|
||||||
}
|
|
||||||
return fmt.Sprintf(`SELECT f.id,f.name,f.path,f.used_quota_size,f.used_quota_files,f.last_quota_update,fm.virtual_path,
|
|
||||||
fm.quota_size,fm.quota_files,fm.user_id,f.filesystem,f.description FROM %v f INNER JOIN %v fm ON f.id = fm.folder_id WHERE
|
|
||||||
fm.user_id IN %v ORDER BY fm.user_id`, sqlTableFolders, sqlTableFoldersMapping, sb.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
func getRelatedUsersForFoldersQuery(folders []vfs.BaseVirtualFolder) string {
|
|
||||||
var sb strings.Builder
|
|
||||||
for _, f := range folders {
|
|
||||||
if sb.Len() == 0 {
|
|
||||||
sb.WriteString("(")
|
|
||||||
} else {
|
|
||||||
sb.WriteString(",")
|
|
||||||
}
|
|
||||||
sb.WriteString(strconv.FormatInt(f.ID, 10))
|
|
||||||
}
|
|
||||||
if sb.Len() > 0 {
|
|
||||||
sb.WriteString(")")
|
|
||||||
}
|
|
||||||
return fmt.Sprintf(`SELECT fm.folder_id,u.username FROM %v fm INNER JOIN %v u ON fm.user_id = u.id
|
|
||||||
WHERE fm.folder_id IN %v ORDER BY fm.folder_id`, sqlTableFoldersMapping, sqlTableUsers, sb.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
func getDatabaseVersionQuery() string {
|
|
||||||
return fmt.Sprintf("SELECT version from %v LIMIT 1", sqlTableSchemaVersion)
|
|
||||||
}
|
|
||||||
|
|
||||||
func getUpdateDBVersionQuery() string {
|
|
||||||
return fmt.Sprintf(`UPDATE %v SET version=%v`, sqlTableSchemaVersion, sqlPlaceholders[0])
|
|
||||||
}
|
|
||||||
|
|
||||||
func getCompatUserV10FsConfigQuery() string {
|
|
||||||
return fmt.Sprintf(`SELECT id,username,filesystem FROM %v`, sqlTableUsers)
|
|
||||||
}
|
|
||||||
|
|
||||||
func updateCompatUserV10FsConfigQuery() string {
|
|
||||||
return fmt.Sprintf(`UPDATE %v SET filesystem=%v WHERE id=%v`, sqlTableUsers, sqlPlaceholders[0], sqlPlaceholders[1])
|
|
||||||
}
|
|
||||||
|
|
||||||
func getCompatFolderV10FsConfigQuery() string {
|
|
||||||
return fmt.Sprintf(`SELECT id,name,filesystem FROM %v`, sqlTableFolders)
|
|
||||||
}
|
|
||||||
|
|
||||||
func updateCompatFolderV10FsConfigQuery() string {
|
|
||||||
return fmt.Sprintf(`UPDATE %v SET filesystem=%v WHERE id=%v`, sqlTableFolders, sqlPlaceholders[0], sqlPlaceholders[1])
|
|
||||||
}
|
|
1200
dataprovider/user.go
189
docker/README.md
|
@ -1,189 +0,0 @@
|
||||||
# Official Docker image
|
|
||||||
|
|
||||||
SFTPGo provides an official Docker image, it is available on both [Docker Hub](https://hub.docker.com/r/drakkan/sftpgo) and on [GitHub Container Registry](https://github.com/users/drakkan/packages/container/package/sftpgo).
|
|
||||||
|
|
||||||
## Supported tags and respective Dockerfile links
|
|
||||||
|
|
||||||
- [v2.1.0, v2.1, v2, latest](https://github.com/drakkan/sftpgo/blob/v2.1.0/Dockerfile)
|
|
||||||
- [v2.1.0-alpine, v2.1-alpine, v2-alpine, alpine](https://github.com/drakkan/sftpgo/blob/v2.1.0/Dockerfile.alpine)
|
|
||||||
- [v2.1.0-slim, v2.1-slim, v2-slim, slim](https://github.com/drakkan/sftpgo/blob/v2.1.0/Dockerfile)
|
|
||||||
- [v2.1.0-alpine-slim, v2.1-alpine-slim, v2-alpine-slim, alpine-slim](https://github.com/drakkan/sftpgo/blob/v2.1.0/Dockerfile.alpine)
|
|
||||||
- [edge](../Dockerfile)
|
|
||||||
- [edge-alpine](../Dockerfile.alpine)
|
|
||||||
- [edge-slim](../Dockerfile)
|
|
||||||
- [edge-alpine-slim](../Dockerfile.alpine)
|
|
||||||
|
|
||||||
## How to use the SFTPGo image
|
|
||||||
|
|
||||||
### Start a `sftpgo` server instance
|
|
||||||
|
|
||||||
Starting a SFTPGo instance is simple:
|
|
||||||
|
|
||||||
```shell
|
|
||||||
docker run --name some-sftpgo -p 127.0.0.1:8080:8080 -p 2022:2022 -d "drakkan/sftpgo:tag"
|
|
||||||
```
|
|
||||||
|
|
||||||
... where `some-sftpgo` is the name you want to assign to your container, and `tag` is the tag specifying the SFTPGo version you want. See the list above for relevant tags.
|
|
||||||
|
|
||||||
Now visit [http://localhost:8080/web/admin](http://localhost:8080/web/admin), create the first admin and then log in and create a new SFTPGo user. The SFTP service is available on port 2022.
|
|
||||||
|
|
||||||
If you don't want to persist any files, for example for testing purposes, you can run an SFTPGo instance like this:
|
|
||||||
|
|
||||||
```shell
|
|
||||||
docker run --rm --name some-sftpgo -p 8080:8080 -p 2022:2022 -d "drakkan/sftpgo:tag"
|
|
||||||
```
|
|
||||||
|
|
||||||
If you prefer GitHub Container Registry to Docker Hub replace `drakkan/sftpgo:tag` with `ghcr.io/drakkan/sftpgo:tag`.
|
|
||||||
|
|
||||||
### Enable FTP service
|
|
||||||
|
|
||||||
FTP is disabled by default, you can enable the FTP service by starting the SFTPGo instance in this way:
|
|
||||||
|
|
||||||
```shell
|
|
||||||
docker run --name some-sftpgo \
|
|
||||||
-p 8080:8080 \
|
|
||||||
-p 2022:2022 \
|
|
||||||
-p 2121:2121 \
|
|
||||||
-p 50000-50100:50000-50100 \
|
|
||||||
-e SFTPGO_FTPD__BINDINGS__0__PORT=2121 \
|
|
||||||
-e SFTPGO_FTPD__BINDINGS__0__FORCE_PASSIVE_IP=<your external ip here> \
|
|
||||||
-d "drakkan/sftpgo:tag"
|
|
||||||
```
|
|
||||||
|
|
||||||
The FTP service is now available on port 2121 and SFTP on port 2022.
|
|
||||||
|
|
||||||
You can change the passive ports range (`50000-50100` by default) by setting the environment variables `SFTPGO_FTPD__PASSIVE_PORT_RANGE__START` and `SFTPGO_FTPD__PASSIVE_PORT_RANGE__END`.
|
|
||||||
|
|
||||||
It is recommended that you provide a certificate and key file to expose FTP over TLS. You should prefer SFTP to FTP even if you configure TLS, please don't blindly enable the old FTP protocol.
|
|
||||||
|
|
||||||
### Enable WebDAV service
|
|
||||||
|
|
||||||
WebDAV is disabled by default, you can enable the WebDAV service by starting the SFTPGo instance in this way:
|
|
||||||
|
|
||||||
```shell
|
|
||||||
docker run --name some-sftpgo \
|
|
||||||
-p 8080:8080 \
|
|
||||||
-p 2022:2022 \
|
|
||||||
-p 10080:10080 \
|
|
||||||
-e SFTPGO_WEBDAVD__BINDINGS__0__PORT=10080 \
|
|
||||||
-d "drakkan/sftpgo:tag"
|
|
||||||
```
|
|
||||||
|
|
||||||
The WebDAV service is now available on port 10080 and SFTP on port 2022.
|
|
||||||
|
|
||||||
It is recommended that you provide a certificate and key file to expose WebDAV over https.
|
|
||||||
|
|
||||||
### Container shell access and viewing SFTPGo logs
|
|
||||||
|
|
||||||
The docker exec command allows you to run commands inside a Docker container. The following command line will give you a shell inside your `sftpgo` container:
|
|
||||||
|
|
||||||
```shell
|
|
||||||
docker exec -it some-sftpgo sh
|
|
||||||
```
|
|
||||||
|
|
||||||
The logs are available through Docker's container log:
|
|
||||||
|
|
||||||
```shell
|
|
||||||
docker logs some-sftpgo
|
|
||||||
```
|
|
||||||
|
|
||||||
### Where to Store Data
|
|
||||||
|
|
||||||
Important note: There are several ways to store data used by applications that run in Docker containers. We encourage users of the SFTPGo images to familiarize themselves with the options available, including:
|
|
||||||
|
|
||||||
- Let Docker manage the storage for SFTPGo data by [writing them to disk on the host system using its own internal volume management](https://docs.docker.com/engine/tutorials/dockervolumes/#adding-a-data-volume). This is the default and is easy and fairly transparent to the user. The downside is that the files may be hard to locate for tools and applications that run directly on the host system, i.e. outside containers.
|
|
||||||
- Create a data directory on the host system (outside the container) and [mount this to a directory visible from inside the container]((https://docs.docker.com/engine/tutorials/dockervolumes/#mount-a-host-directory-as-a-data-volume)). This places the SFTPGo files in a known location on the host system, and makes it easy for tools and applications on the host system to access the files. The downside is that the user needs to make sure that the directory exists, and that e.g. directory permissions and other security mechanisms on the host system are set up correctly. The SFTPGo image runs using `1000` as UID/GID by default.
|
|
||||||
|
|
||||||
The Docker documentation is a good starting point for understanding the different storage options and variations, and there are multiple blogs and forum postings that discuss and give advice in this area. We will simply show the basic procedure here for the latter option above:
|
|
||||||
|
|
||||||
1. Create a data directory on a suitable volume on your host system, e.g. `/my/own/sftpgodata`.
|
|
||||||
2. Create a home directory for the sftpgo container user on your host system e.g. `/my/own/sftpgohome`.
|
|
||||||
3. Start your SFTPGo container like this:
|
|
||||||
|
|
||||||
```shell
|
|
||||||
docker run --name some-sftpgo \
|
|
||||||
-p 127.0.0.1:8080:8090 \
|
|
||||||
-p 2022:2022 \
|
|
||||||
--mount type=bind,source=/my/own/sftpgodata,target=/srv/sftpgo \
|
|
||||||
--mount type=bind,source=/my/own/sftpgohome,target=/var/lib/sftpgo \
|
|
||||||
-e SFTPGO_HTTPD__BINDINGS__0__PORT=8090 \
|
|
||||||
-d "drakkan/sftpgo:tag"
|
|
||||||
```
|
|
||||||
|
|
||||||
As you can see SFTPGo uses two main volumes:
|
|
||||||
|
|
||||||
- `/srv/sftpgo` to handle persistent data. The default home directory for SFTP/FTP/WebDAV users is `/srv/sftpgo/data/<username>`. Backups are stored in `/srv/sftpgo/backups`
|
|
||||||
- `/var/lib/sftpgo` is the home directory for the sftpgo system user defined inside the container. This is the container working directory too, host keys will be created here when using the default configuration.
|
|
||||||
|
|
||||||
If you want to get fine grained control, you can also mount `/srv/sftpgo/data` and `/srv/sftpgo/backups` as separate volumes instead of mounting `/srv/sftpgo`.
|
|
||||||
|
|
||||||
### Configuration
|
|
||||||
|
|
||||||
The runtime configuration can be customized via environment variables that you can set passing the `-e` option to the `docker run` command or inside the `environment` section if you are using [docker stack deploy](https://docs.docker.com/engine/reference/commandline/stack_deploy/) or [docker-compose](https://github.com/docker/compose).
|
|
||||||
|
|
||||||
Please take a look [here](../docs/full-configuration.md#environment-variables) to learn how to configure SFTPGo via environment variables.
|
|
||||||
|
|
||||||
Alternately you can mount your custom configuration file to `/var/lib/sftpgo` or `/var/lib/sftpgo/.config/sftpgo`.
|
|
||||||
|
|
||||||
### Loading initial data
|
|
||||||
|
|
||||||
Initial data can be loaded in the following ways:
|
|
||||||
|
|
||||||
- via the `--loaddata-from` flag or the `SFTPGO_LOADDATA_FROM` environment variable
|
|
||||||
- by providing a dump file to the memory provider
|
|
||||||
|
|
||||||
Please take a look [here](../docs/full-configuration.md) for more details.
|
|
||||||
|
|
||||||
### Running as an arbitrary user
|
|
||||||
|
|
||||||
The SFTPGo image runs using `1000` as UID/GID by default. If you know the permissions of your data and/or configuration directory are already set appropriately or you have need of running SFTPGo with a specific UID/GID, it is possible to invoke this image with `--user` set to any value (other than `root/0`) in order to achieve the desired access/configuration:
|
|
||||||
|
|
||||||
```shell
|
|
||||||
$ ls -lnd data
|
|
||||||
drwxr-xr-x 2 1100 1100 6 7 nov 09.09 data
|
|
||||||
$ ls -lnd config
|
|
||||||
drwxr-xr-x 2 1100 1100 6 7 nov 09.19 config
|
|
||||||
```
|
|
||||||
|
|
||||||
With the above directory permissions, you can start a SFTPGo instance like this:
|
|
||||||
|
|
||||||
```shell
|
|
||||||
docker run --name some-sftpgo \
|
|
||||||
--user 1100:1100 \
|
|
||||||
-p 127.0.0.1:8080:8080 \
|
|
||||||
-p 2022:2022 \
|
|
||||||
--mount type=bind,source="${PWD}/data",target=/srv/sftpgo \
|
|
||||||
--mount type=bind,source="${PWD}/config",target=/var/lib/sftpgo \
|
|
||||||
-d "drakkan/sftpgo:tag"
|
|
||||||
```
|
|
||||||
|
|
||||||
Alternately build your own image using the official one as a base, here is a sample Dockerfile:
|
|
||||||
|
|
||||||
```shell
|
|
||||||
FROM drakkan/sftpgo:tag
|
|
||||||
USER root
|
|
||||||
RUN chown -R 1100:1100 /etc/sftpgo && chown 1100:1100 /var/lib/sftpgo /srv/sftpgo
|
|
||||||
USER 1100:1100
|
|
||||||
```
|
|
||||||
|
|
||||||
## Image Variants
|
|
||||||
|
|
||||||
The `sftpgo` images comes in many flavors, each designed for a specific use case. The `edge` and `edge-alpine`tags are updated after each new commit.
|
|
||||||
|
|
||||||
### `sftpgo:<version>`
|
|
||||||
|
|
||||||
This is the defacto image, it is based on [Debian](https://www.debian.org/), available in [the `debian` official image](https://hub.docker.com/_/debian). If you are unsure about what your needs are, you probably want to use this one.
|
|
||||||
|
|
||||||
### `sftpgo:<version>-alpine`
|
|
||||||
|
|
||||||
This image is based on the popular [Alpine Linux project](https://alpinelinux.org/), available in [the `alpine` official image](https://hub.docker.com/_/alpine). Alpine Linux is much smaller than most distribution base images (~5MB), and thus leads to much slimmer images in general.
|
|
||||||
|
|
||||||
This variant is highly recommended when final image size being as small as possible is desired. The main caveat to note is that it does use [musl libc](https://musl.libc.org/) instead of [glibc and friends](https://www.etalabs.net/compare_libcs.html), so certain software might run into issues depending on the depth of their libc requirements. However, most software doesn't have an issue with this, so this variant is usually a very safe choice. See [this Hacker News comment thread](https://news.ycombinator.com/item?id=10782897) for more discussion of the issues that might arise and some pro/con comparisons of using Alpine-based images.
|
|
||||||
|
|
||||||
### `sftpgo:<suite>-slim`
|
|
||||||
|
|
||||||
These tags provide a slimmer image that does not include the optional `git` and `rsync` dependencies.
|
|
||||||
|
|
||||||
## Helm Chart
|
|
||||||
|
|
||||||
An helm chart is [available](https://artifacthub.io/packages/helm/sagikazarmark/sftpgo). You can find the source code [here](https://github.com/sagikazarmark/helm-charts/tree/master/charts/sftpgo).
|
|
25
docker/scripts/download-plugins.sh
Executable file
|
@ -0,0 +1,25 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
set -e
|
||||||
|
|
||||||
|
ARCH=`uname -m`
|
||||||
|
|
||||||
|
case ${ARCH} in
|
||||||
|
"x86_64")
|
||||||
|
SUFFIX=amd64
|
||||||
|
;;
|
||||||
|
"aarch64")
|
||||||
|
SUFFIX=arm64
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
SUFFIX=ppc64le
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
echo "download plugins for arch ${SUFFIX}"
|
||||||
|
|
||||||
|
for PLUGIN in geoipfilter kms pubsub eventstore eventsearch auth
|
||||||
|
do
|
||||||
|
echo "download plugin from https://github.com/sftpgo/sftpgo-plugin-${PLUGIN}/releases/latest/download/sftpgo-plugin-${PLUGIN}-linux-${SUFFIX}"
|
||||||
|
curl -L "https://github.com/sftpgo/sftpgo-plugin-${PLUGIN}/releases/latest/download/sftpgo-plugin-${PLUGIN}-linux-${SUFFIX}" --output "/usr/local/bin/sftpgo-plugin-${PLUGIN}"
|
||||||
|
chmod 755 "/usr/local/bin/sftpgo-plugin-${PLUGIN}"
|
||||||
|
done
|
|
@ -1,28 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
SFTPGO_PUID=${SFTPGO_PUID:-1000}
|
|
||||||
SFTPGO_PGID=${SFTPGO_PGID:-1000}
|
|
||||||
|
|
||||||
if [ "$1" = 'sftpgo' ]; then
|
|
||||||
if [ "$(id -u)" = '0' ]; then
|
|
||||||
for DIR in "/etc/sftpgo" "/var/lib/sftpgo" "/srv/sftpgo"
|
|
||||||
do
|
|
||||||
DIR_UID=$(stat -c %u ${DIR})
|
|
||||||
DIR_GID=$(stat -c %g ${DIR})
|
|
||||||
if [ ${DIR_UID} != ${SFTPGO_PUID} ] || [ ${DIR_GID} != ${SFTPGO_PGID} ]; then
|
|
||||||
echo '{"level":"info","time":"'`date +%Y-%m-%dT%H:%M:%S.000`'","sender":"entrypoint","message":"change owner for \"'${DIR}'\" UID: '${SFTPGO_PUID}' GID: '${SFTPGO_PGID}'"}'
|
|
||||||
if [ ${DIR} = "/etc/sftpgo" ]; then
|
|
||||||
chown -R ${SFTPGO_PUID}:${SFTPGO_PGID} ${DIR}
|
|
||||||
else
|
|
||||||
chown ${SFTPGO_PUID}:${SFTPGO_PGID} ${DIR}
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
echo '{"level":"info","time":"'`date +%Y-%m-%dT%H:%M:%S.000`'","sender":"entrypoint","message":"run as UID: '${SFTPGO_PUID}' GID: '${SFTPGO_PGID}'"}'
|
|
||||||
exec su-exec ${SFTPGO_PUID}:${SFTPGO_PGID} "$@"
|
|
||||||
fi
|
|
||||||
|
|
||||||
exec "$@"
|
|
||||||
fi
|
|
||||||
|
|
||||||
exec "$@"
|
|
|
@ -1,32 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
SFTPGO_PUID=${SFTPGO_PUID:-1000}
|
|
||||||
SFTPGO_PGID=${SFTPGO_PGID:-1000}
|
|
||||||
|
|
||||||
if [ "$1" = 'sftpgo' ]; then
|
|
||||||
if [ "$(id -u)" = '0' ]; then
|
|
||||||
getent passwd ${SFTPGO_PUID} > /dev/null
|
|
||||||
HAS_PUID=$?
|
|
||||||
getent group ${SFTPGO_PGID} > /dev/null
|
|
||||||
HAS_PGID=$?
|
|
||||||
if [ ${HAS_PUID} -ne 0 ] || [ ${HAS_PGID} -ne 0 ]; then
|
|
||||||
echo '{"level":"info","time":"'`date +%Y-%m-%dT%H:%M:%S.%3N`'","sender":"entrypoint","message":"prepare to run as UID: '${SFTPGO_PUID}' GID: '${SFTPGO_PGID}'"}'
|
|
||||||
if [ ${HAS_PGID} -ne 0 ]; then
|
|
||||||
echo '{"level":"info","time":"'`date +%Y-%m-%dT%H:%M:%S.%3N`'","sender":"entrypoint","message":"set GID to: '${SFTPGO_PGID}'"}'
|
|
||||||
groupmod -g ${SFTPGO_PGID} sftpgo
|
|
||||||
fi
|
|
||||||
if [ ${HAS_PUID} -ne 0 ]; then
|
|
||||||
echo '{"level":"info","time":"'`date +%Y-%m-%dT%H:%M:%S.%3N`'","sender":"entrypoint","message":"set UID to: '${SFTPGO_PUID}'"}'
|
|
||||||
usermod -u ${SFTPGO_PUID} sftpgo
|
|
||||||
fi
|
|
||||||
chown -R ${SFTPGO_PUID}:${SFTPGO_PGID} /etc/sftpgo
|
|
||||||
chown ${SFTPGO_PUID}:${SFTPGO_PGID} /var/lib/sftpgo /srv/sftpgo
|
|
||||||
fi
|
|
||||||
echo '{"level":"info","time":"'`date +%Y-%m-%dT%H:%M:%S.%3N`'","sender":"entrypoint","message":"run as UID: '${SFTPGO_PUID}' GID: '${SFTPGO_PGID}'"}'
|
|
||||||
exec gosu ${SFTPGO_PUID}:${SFTPGO_PGID} "$@"
|
|
||||||
fi
|
|
||||||
|
|
||||||
exec "$@"
|
|
||||||
fi
|
|
||||||
|
|
||||||
exec "$@"
|
|
|
@ -1,50 +0,0 @@
|
||||||
FROM golang:alpine as builder
|
|
||||||
|
|
||||||
RUN apk add --no-cache git gcc g++ ca-certificates \
|
|
||||||
&& go get -v -d github.com/drakkan/sftpgo
|
|
||||||
WORKDIR /go/src/github.com/drakkan/sftpgo
|
|
||||||
ARG TAG
|
|
||||||
ARG FEATURES
|
|
||||||
# Use --build-arg TAG=LATEST for latest tag. Use e.g. --build-arg TAG=v1.0.0 for a specific tag/commit. Otherwise HEAD (master) is built.
|
|
||||||
RUN git checkout $(if [ "${TAG}" = LATEST ]; then echo `git rev-list --tags --max-count=1`; elif [ -n "${TAG}" ]; then echo "${TAG}"; else echo HEAD; fi)
|
|
||||||
RUN go build $(if [ -n "${FEATURES}" ]; then echo "-tags ${FEATURES}"; fi) -ldflags "-s -w -X github.com/drakkan/sftpgo/version.commit=`git describe --always --dirty` -X github.com/drakkan/sftpgo/version.date=`date -u +%FT%TZ`" -v -o /go/bin/sftpgo
|
|
||||||
|
|
||||||
FROM alpine:latest
|
|
||||||
|
|
||||||
RUN apk add --no-cache ca-certificates su-exec \
|
|
||||||
&& mkdir -p /data /etc/sftpgo /srv/sftpgo/config /srv/sftpgo/web /srv/sftpgo/backups
|
|
||||||
|
|
||||||
# git and rsync are optional, uncomment the next line to add support for them if needed.
|
|
||||||
#RUN apk add --no-cache git rsync
|
|
||||||
|
|
||||||
COPY --from=builder /go/bin/sftpgo /bin/
|
|
||||||
COPY --from=builder /go/src/github.com/drakkan/sftpgo/sftpgo.json /etc/sftpgo/sftpgo.json
|
|
||||||
COPY --from=builder /go/src/github.com/drakkan/sftpgo/templates /srv/sftpgo/web/templates
|
|
||||||
COPY --from=builder /go/src/github.com/drakkan/sftpgo/static /srv/sftpgo/web/static
|
|
||||||
COPY docker-entrypoint.sh /bin/entrypoint.sh
|
|
||||||
RUN chmod +x /bin/entrypoint.sh
|
|
||||||
|
|
||||||
VOLUME [ "/data", "/srv/sftpgo/config", "/srv/sftpgo/backups" ]
|
|
||||||
EXPOSE 2022 8080
|
|
||||||
|
|
||||||
# uncomment the following settings to enable FTP support
|
|
||||||
#ENV SFTPGO_FTPD__BIND_PORT=2121
|
|
||||||
#ENV SFTPGO_FTPD__FORCE_PASSIVE_IP=<your FTP visibile IP here>
|
|
||||||
#EXPOSE 2121
|
|
||||||
|
|
||||||
# we need to expose the passive ports range too
|
|
||||||
#EXPOSE 50000-50100
|
|
||||||
|
|
||||||
# it is a good idea to provide certificates to enable FTPS too
|
|
||||||
#ENV SFTPGO_FTPD__CERTIFICATE_FILE=/srv/sftpgo/config/mycert.crt
|
|
||||||
#ENV SFTPGO_FTPD__CERTIFICATE_KEY_FILE=/srv/sftpgo/config/mycert.key
|
|
||||||
|
|
||||||
# uncomment the following setting to enable WebDAV support
|
|
||||||
#ENV SFTPGO_WEBDAVD__BIND_PORT=8090
|
|
||||||
|
|
||||||
# it is a good idea to provide certificates to enable WebDAV over HTTPS
|
|
||||||
#ENV SFTPGO_WEBDAVD__CERTIFICATE_FILE=${CONFIG_DIR}/mycert.crt
|
|
||||||
#ENV SFTPGO_WEBDAVD__CERTIFICATE_KEY_FILE=${CONFIG_DIR}/mycert.key
|
|
||||||
|
|
||||||
ENTRYPOINT ["/bin/entrypoint.sh"]
|
|
||||||
CMD ["serve"]
|
|
|
@ -1,61 +0,0 @@
|
||||||
# SFTPGo with Docker and Alpine
|
|
||||||
|
|
||||||
:warning: The recommended way to run SFTPGo on Docker is to use the official [images](https://hub.docker.com/r/drakkan/sftpgo). The documentation here is now obsolete.
|
|
||||||
|
|
||||||
This DockerFile is made to build image to host multiple instances of SFTPGo started with different users.
|
|
||||||
|
|
||||||
## Example
|
|
||||||
|
|
||||||
> 1003 is a custom uid:gid for this instance of SFTPGo
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Prereq on docker host
|
|
||||||
sudo groupadd -g 1003 sftpgrp && \
|
|
||||||
sudo useradd -u 1003 -g 1003 sftpuser -d /home/sftpuser/ && \
|
|
||||||
sudo -u sftpuser mkdir /home/sftpuser/{conf,data} && \
|
|
||||||
curl https://raw.githubusercontent.com/drakkan/sftpgo/master/sftpgo.json -o /home/sftpuser/conf/sftpgo.json
|
|
||||||
|
|
||||||
# Edit sftpgo.json as you need
|
|
||||||
|
|
||||||
# Get and build SFTPGo image.
|
|
||||||
# Add --build-arg TAG=LATEST to build the latest tag or e.g. TAG=v1.0.0 for a specific tag/commit.
|
|
||||||
# Add --build-arg FEATURES=<build features comma separated> to specify the features to build.
|
|
||||||
git clone https://github.com/drakkan/sftpgo.git && \
|
|
||||||
cd sftpgo && \
|
|
||||||
sudo docker build -t sftpgo docker/sftpgo/alpine/
|
|
||||||
|
|
||||||
# Initialize the configured provider. For PostgreSQL and MySQL providers you need to create the configured database and the "initprovider" command will create the required tables.
|
|
||||||
sudo docker run --name sftpgo \
|
|
||||||
-e PUID=1003 \
|
|
||||||
-e GUID=1003 \
|
|
||||||
-v /home/sftpuser/conf/:/srv/sftpgo/config \
|
|
||||||
sftpgo initprovider -c /srv/sftpgo/config
|
|
||||||
|
|
||||||
# Start the image
|
|
||||||
sudo docker rm sftpgo && sudo docker run --name sftpgo \
|
|
||||||
-e SFTPGO_LOG_FILE_PATH= \
|
|
||||||
-e SFTPGO_CONFIG_DIR=/srv/sftpgo/config \
|
|
||||||
-e SFTPGO_HTTPD__TEMPLATES_PATH=/srv/sftpgo/web/templates \
|
|
||||||
-e SFTPGO_HTTPD__STATIC_FILES_PATH=/srv/sftpgo/web/static \
|
|
||||||
-e SFTPGO_HTTPD__BACKUPS_PATH=/srv/sftpgo/backups \
|
|
||||||
-p 8080:8080 \
|
|
||||||
-p 2022:2022 \
|
|
||||||
-e PUID=1003 \
|
|
||||||
-e GUID=1003 \
|
|
||||||
-v /home/sftpuser/conf/:/srv/sftpgo/config \
|
|
||||||
-v /home/sftpuser/data:/data \
|
|
||||||
-v /home/sftpuser/backups:/srv/sftpgo/backups \
|
|
||||||
sftpgo
|
|
||||||
```
|
|
||||||
|
|
||||||
If you want to enable FTP/S you also need the publish the FTP port and the FTP passive port range, defined in your `Dockerfile`, by adding, for example, the following options to the `docker run` command `-p 2121:2121 -p 50000-50100:50000-50100`. The same goes for WebDAV, you need to publish the configured port.
|
|
||||||
|
|
||||||
The script `entrypoint.sh` makes sure to correct the permissions of directories and start the process with the right user.
|
|
||||||
|
|
||||||
Several images can be run with different parameters.
|
|
||||||
|
|
||||||
## Custom systemd script
|
|
||||||
|
|
||||||
An example of systemd script is present [here](sftpgo.service), with `Environment` parameter to set `PUID` and `GUID`
|
|
||||||
|
|
||||||
`WorkingDirectory` parameter must be exist with one file in this directory like `sftpgo-${PUID}.env` corresponding to the variable file for SFTPGo instance.
|
|
|
@ -1,7 +0,0 @@
|
||||||
#!/bin/sh
|
|
||||||
|
|
||||||
set -eu
|
|
||||||
|
|
||||||
chown -R "${PUID}:${GUID}" /data /etc/sftpgo /srv/sftpgo/config /srv/sftpgo/backups \
|
|
||||||
&& exec su-exec "${PUID}:${GUID}" \
|
|
||||||
/bin/sftpgo "$@"
|
|
|
@ -1,35 +0,0 @@
|
||||||
[Unit]
|
|
||||||
Description=SFTPGo server
|
|
||||||
After=docker.service
|
|
||||||
|
|
||||||
[Service]
|
|
||||||
User=root
|
|
||||||
Group=root
|
|
||||||
WorkingDirectory=/etc/sftpgo
|
|
||||||
Environment=PUID=1003
|
|
||||||
Environment=GUID=1003
|
|
||||||
EnvironmentFile=-/etc/sysconfig/sftpgo.env
|
|
||||||
ExecStartPre=-docker kill sftpgo
|
|
||||||
ExecStartPre=-docker rm sftpgo
|
|
||||||
ExecStart=docker run --name sftpgo \
|
|
||||||
--env-file sftpgo-${PUID}.env \
|
|
||||||
-e PUID=${PUID} \
|
|
||||||
-e GUID=${GUID} \
|
|
||||||
-e SFTPGO_LOG_FILE_PATH= \
|
|
||||||
-e SFTPGO_CONFIG_DIR=/srv/sftpgo/config \
|
|
||||||
-e SFTPGO_HTTPD__TEMPLATES_PATH=/srv/sftpgo/web/templates \
|
|
||||||
-e SFTPGO_HTTPD__STATIC_FILES_PATH=/srv/sftpgo/web/static \
|
|
||||||
-e SFTPGO_HTTPD__BACKUPS_PATH=/srv/sftpgo/backups \
|
|
||||||
-p 8080:8080 \
|
|
||||||
-p 2022:2022 \
|
|
||||||
-v /home/sftpuser/conf/:/srv/sftpgo/config \
|
|
||||||
-v /home/sftpuser/data:/data \
|
|
||||||
-v /home/sftpuser/backups:/srv/sftpgo/backups \
|
|
||||||
sftpgo
|
|
||||||
ExecStop=docker stop sftpgo
|
|
||||||
SyslogIdentifier=sftpgo
|
|
||||||
Restart=always
|
|
||||||
RestartSec=10s
|
|
||||||
|
|
||||||
[Install]
|
|
||||||
WantedBy=multi-user.target
|
|
|
@ -1,93 +0,0 @@
|
||||||
# we use a multi stage build to have a separate build and run env
|
|
||||||
FROM golang:latest as buildenv
|
|
||||||
LABEL maintainer="nicola.murino@gmail.com"
|
|
||||||
RUN go get -v -d github.com/drakkan/sftpgo
|
|
||||||
WORKDIR /go/src/github.com/drakkan/sftpgo
|
|
||||||
ARG TAG
|
|
||||||
ARG FEATURES
|
|
||||||
# Use --build-arg TAG=LATEST for latest tag. Use e.g. --build-arg TAG=v1.0.0 for a specific tag/commit. Otherwise HEAD (master) is built.
|
|
||||||
RUN git checkout $(if [ "${TAG}" = LATEST ]; then echo `git rev-list --tags --max-count=1`; elif [ -n "${TAG}" ]; then echo "${TAG}"; else echo HEAD; fi)
|
|
||||||
RUN go build $(if [ -n "${FEATURES}" ]; then echo "-tags ${FEATURES}"; fi) -ldflags "-s -w -X github.com/drakkan/sftpgo/version.commit=`git describe --always --dirty` -X github.com/drakkan/sftpgo/version.date=`date -u +%FT%TZ`" -v -o sftpgo
|
|
||||||
|
|
||||||
# now define the run environment
|
|
||||||
FROM debian:latest
|
|
||||||
|
|
||||||
# ca-certificates is needed for Cloud Storage Support and for HTTPS/FTPS.
|
|
||||||
RUN apt-get update && apt-get install -y ca-certificates && apt-get clean
|
|
||||||
|
|
||||||
# git and rsync are optional, uncomment the next line to add support for them if needed.
|
|
||||||
#RUN apt-get update && apt-get install -y git rsync && apt-get clean
|
|
||||||
|
|
||||||
ARG BASE_DIR=/app
|
|
||||||
ARG DATA_REL_DIR=data
|
|
||||||
ARG CONFIG_REL_DIR=config
|
|
||||||
ARG BACKUP_REL_DIR=backups
|
|
||||||
ARG USERNAME=sftpgo
|
|
||||||
ARG GROUPNAME=sftpgo
|
|
||||||
ARG UID=515
|
|
||||||
ARG GID=515
|
|
||||||
ARG WEB_REL_PATH=web
|
|
||||||
|
|
||||||
# HOME_DIR for sftpgo itself
|
|
||||||
ENV HOME_DIR=${BASE_DIR}/${USERNAME}
|
|
||||||
# DATA_DIR, this is a volume that you can use hold user's home dirs
|
|
||||||
ENV DATA_DIR=${BASE_DIR}/${DATA_REL_DIR}
|
|
||||||
# CONFIG_DIR, this is a volume to persist the daemon private keys, configuration file ecc..
|
|
||||||
ENV CONFIG_DIR=${BASE_DIR}/${CONFIG_REL_DIR}
|
|
||||||
# BACKUPS_DIR, this is a volume to store backups done using "dumpdata" REST API
|
|
||||||
ENV BACKUPS_DIR=${BASE_DIR}/${BACKUP_REL_DIR}
|
|
||||||
ENV WEB_DIR=${BASE_DIR}/${WEB_REL_PATH}
|
|
||||||
|
|
||||||
RUN mkdir -p ${DATA_DIR} ${CONFIG_DIR} ${WEB_DIR} ${BACKUPS_DIR}
|
|
||||||
RUN groupadd --system -g ${GID} ${GROUPNAME}
|
|
||||||
RUN useradd --system --create-home --no-log-init --home-dir ${HOME_DIR} --comment "SFTPGo user" --shell /usr/sbin/nologin --gid ${GID} --uid ${UID} ${USERNAME}
|
|
||||||
|
|
||||||
WORKDIR ${HOME_DIR}
|
|
||||||
RUN mkdir -p bin .config/sftpgo
|
|
||||||
ENV PATH ${HOME_DIR}/bin:$PATH
|
|
||||||
COPY --from=buildenv /go/src/github.com/drakkan/sftpgo/sftpgo bin/sftpgo
|
|
||||||
# default config file to use if no config file is found inside the CONFIG_DIR volume.
|
|
||||||
# You can override each configuration options via env vars too
|
|
||||||
COPY --from=buildenv /go/src/github.com/drakkan/sftpgo/sftpgo.json .config/sftpgo/
|
|
||||||
COPY --from=buildenv /go/src/github.com/drakkan/sftpgo/templates ${WEB_DIR}/templates
|
|
||||||
COPY --from=buildenv /go/src/github.com/drakkan/sftpgo/static ${WEB_DIR}/static
|
|
||||||
RUN chown -R ${UID}:${GID} ${DATA_DIR} ${BACKUPS_DIR}
|
|
||||||
|
|
||||||
# run as non root user
|
|
||||||
USER ${USERNAME}
|
|
||||||
|
|
||||||
EXPOSE 2022 8080
|
|
||||||
|
|
||||||
# the defined volumes must have write access for the UID and GID defined above
|
|
||||||
VOLUME [ "$DATA_DIR", "$CONFIG_DIR", "$BACKUPS_DIR" ]
|
|
||||||
|
|
||||||
# override some default configuration options using env vars
|
|
||||||
ENV SFTPGO_CONFIG_DIR=${CONFIG_DIR}
|
|
||||||
# setting SFTPGO_LOG_FILE_PATH to an empty string will log to stdout
|
|
||||||
ENV SFTPGO_LOG_FILE_PATH=""
|
|
||||||
ENV SFTPGO_HTTPD__BIND_ADDRESS=""
|
|
||||||
ENV SFTPGO_HTTPD__TEMPLATES_PATH=${WEB_DIR}/templates
|
|
||||||
ENV SFTPGO_HTTPD__STATIC_FILES_PATH=${WEB_DIR}/static
|
|
||||||
ENV SFTPGO_DATA_PROVIDER__USERS_BASE_DIR=${DATA_DIR}
|
|
||||||
ENV SFTPGO_HTTPD__BACKUPS_PATH=${BACKUPS_DIR}
|
|
||||||
|
|
||||||
# uncomment the following settings to enable FTP support
|
|
||||||
#ENV SFTPGO_FTPD__BIND_PORT=2121
|
|
||||||
#ENV SFTPGO_FTPD__FORCE_PASSIVE_IP=<your FTP visibile IP here>
|
|
||||||
#EXPOSE 2121
|
|
||||||
# we need to expose the passive ports range too
|
|
||||||
#EXPOSE 50000-50100
|
|
||||||
|
|
||||||
# it is a good idea to provide certificates to enable FTPS too
|
|
||||||
#ENV SFTPGO_FTPD__CERTIFICATE_FILE=${CONFIG_DIR}/mycert.crt
|
|
||||||
#ENV SFTPGO_FTPD__CERTIFICATE_KEY_FILE=${CONFIG_DIR}/mycert.key
|
|
||||||
|
|
||||||
# uncomment the following setting to enable WebDAV support
|
|
||||||
#ENV SFTPGO_WEBDAVD__BIND_PORT=8090
|
|
||||||
|
|
||||||
# it is a good idea to provide certificates to enable WebDAV over HTTPS
|
|
||||||
#ENV SFTPGO_WEBDAVD__CERTIFICATE_FILE=${CONFIG_DIR}/mycert.crt
|
|
||||||
#ENV SFTPGO_WEBDAVD__CERTIFICATE_KEY_FILE=${CONFIG_DIR}/mycert.key
|
|
||||||
|
|
||||||
ENTRYPOINT ["sftpgo"]
|
|
||||||
CMD ["serve"]
|
|
|
@ -1,59 +0,0 @@
|
||||||
# Dockerfile based on Debian stable
|
|
||||||
|
|
||||||
:warning: The recommended way to run SFTPGo on Docker is to use the official [images](https://hub.docker.com/r/drakkan/sftpgo). The documentation here is now obsolete.
|
|
||||||
|
|
||||||
Please read the comments inside the `Dockerfile` to learn how to customize things for your setup.
|
|
||||||
|
|
||||||
You can build the container image using `docker build`, for example:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
docker build -t="drakkan/sftpgo" .
|
|
||||||
```
|
|
||||||
|
|
||||||
This will build master of github.com/drakkan/sftpgo.
|
|
||||||
|
|
||||||
To build the latest tag you can add `--build-arg TAG=LATEST` and to build a specific tag/commit you can use for example `TAG=v1.0.0`, like this:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
docker build -t="drakkan/sftpgo" --build-arg TAG=v1.0.0 .
|
|
||||||
```
|
|
||||||
|
|
||||||
To specify the features to build you can add `--build-arg FEATURES=<build features comma separated>`. For example you can disable SQLite and S3 support like this:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
docker build -t="drakkan/sftpgo" --build-arg FEATURES=nosqlite,nos3 .
|
|
||||||
```
|
|
||||||
|
|
||||||
Please take a look at the [build from source](./../../../docs/build-from-source.md) documentation for the complete list of the features that can be disabled.
|
|
||||||
|
|
||||||
Now create the required folders on the host system, for example:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
sudo mkdir -p /srv/sftpgo/data /srv/sftpgo/config /srv/sftpgo/backups
|
|
||||||
```
|
|
||||||
|
|
||||||
and give write access to them to the UID/GID defined inside the `Dockerfile`. You can choose to create a new user, on the host system, with a matching UID/GID pair, or simply do something like this:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
sudo chown -R <UID>:<GID> /srv/sftpgo/data /srv/sftpgo/config /srv/sftpgo/backups
|
|
||||||
```
|
|
||||||
|
|
||||||
Download the default configuration file and edit it as you need:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
sudo curl https://raw.githubusercontent.com/drakkan/sftpgo/master/sftpgo.json -o /srv/sftpgo/config/sftpgo.json
|
|
||||||
```
|
|
||||||
|
|
||||||
Initialize the configured provider. For PostgreSQL and MySQL providers you need to create the configured database and the `initprovider` command will create the required tables:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
docker run --name sftpgo --mount type=bind,source=/srv/sftpgo/config,target=/app/config drakkan/sftpgo initprovider -c /app/config
|
|
||||||
```
|
|
||||||
|
|
||||||
and finally you can run the image using something like this:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
docker rm sftpgo && docker run --name sftpgo -p 8080:8080 -p 2022:2022 --mount type=bind,source=/srv/sftpgo/data,target=/app/data --mount type=bind,source=/srv/sftpgo/config,target=/app/config --mount type=bind,source=/srv/sftpgo/backups,target=/app/backups drakkan/sftpgo
|
|
||||||
```
|
|
||||||
|
|
||||||
If you want to enable FTP/S you also need the publish the FTP port and the FTP passive port range, defined in your `Dockerfile`, by adding, for example, the following options to the `docker run` command `-p 2121:2121 -p 50000-50100:50000-50100`. The same goes for WebDAV, you need to publish the configured port.
|
|
|
@ -1,22 +0,0 @@
|
||||||
# Account's configuration properties
|
|
||||||
|
|
||||||
Please take a look at the [OpenAPI schema](../httpd/schema/openapi.yaml) for the exact definitions of user, folder and admin fields.
|
|
||||||
If you need an example you can export a dump using the Web Admin or by invoking the `dumpdata` endpoint directly, you need to obtain an access token first, for example:
|
|
||||||
|
|
||||||
```shell
|
|
||||||
$ curl "http://admin:password@127.0.0.1:8080/api/v2/token"
|
|
||||||
{"access_token":"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOlsiQVBJIl0sImV4cCI6MTYxMzMzNTI2MSwianRpIjoiYzBrb2gxZmNkcnBjaHNzMGZwZmciLCJuYmYiOjE2MTMzMzQ2MzEsInBlcm1pc3Npb25zIjpbIioiXSwic3ViIjoiYUJ0SHUwMHNBUmxzZ29yeEtLQ1pZZWVqSTRKVTlXbThHSGNiVWtWVmc1TT0iLCJ1c2VybmFtZSI6ImFkbWluIn0.WiyqvUF-92zCr--y4Q_sxn-tPnISFzGZd_exsG-K7ME","expires_at":"2021-02-14T20:41:01Z"}
|
|
||||||
|
|
||||||
curl -H "Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOlsiQVBJIl0sImV4cCI6MTYxMzMzNTI2MSwianRpIjoiYzBrb2gxZmNkcnBjaHNzMGZwZmciLCJuYmYiOjE2MTMzMzQ2MzEsInBlcm1pc3Npb25zIjpbIioiXSwic3ViIjoiYUJ0SHUwMHNBUmxzZ29yeEtLQ1pZZWVqSTRKVTlXbThHSGNiVWtWVmc1TT0iLCJ1c2VybmFtZSI6ImFkbWluIn0.WiyqvUF-92zCr--y4Q_sxn-tPnISFzGZd_exsG-K7ME" "http://127.0.0.1:8080/api/v2/dumpdata?output-data=1"
|
|
||||||
```
|
|
||||||
|
|
||||||
the dump is a JSON with users, folder and admins.
|
|
||||||
|
|
||||||
These properties are stored inside the configured data provider.
|
|
||||||
|
|
||||||
SFTPGo supports checking passwords stored with bcrypt, pbkdf2, md5crypt and sha512crypt too. For pbkdf2 the supported format is `$<algo>$<iterations>$<salt>$<hashed pwd base64 encoded>`, where algo is `pbkdf2-sha1` or `pbkdf2-sha256` or `pbkdf2-sha512` or `$pbkdf2-b64salt-sha256$`. For example the pbkdf2-sha256 of the word password using 150000 iterations and E86a9YMX3zC7 as salt must be stored as `$pbkdf2-sha256$150000$E86a9YMX3zC7$R5J62hsSq+pYw00hLLPKBbcGXmq7fj5+/M0IFoYtZbo=`. In pbkdf2 variant with b64salt the salt is base64 encoded. For bcrypt the format must be the one supported by golang's crypto/bcrypt package, for example the password secret with cost 14 must be stored as `$2a$14$ajq8Q7fbtFRQvXpdCq7Jcuy.Rx1h/L4J60Otx.gyNLbAYctGMJ9tK`. For md5crypt and sha512crypt we support the format used in `/etc/shadow` with the `$1$` and `$6$` prefix, this is useful if you are migrating from Unix system user accounts. We support Apache md5crypt (`$apr1$` prefix) too. Using the REST API you can send a password hashed as bcrypt, pbkdf2, md5crypt or sha512crypt and it will be stored as is.
|
|
||||||
|
|
||||||
If you want to use your existing accounts, you have these options:
|
|
||||||
|
|
||||||
- you can import your users inside SFTPGo. Take a look at [convert users](.../examples/convertusers) script, it can convert and import users from Linux system users and Pure-FTPd/ProFTPD virtual users
|
|
||||||
- you can use an external authentication program
|
|
|
@ -1,20 +0,0 @@
|
||||||
# Azure Blob Storage backend
|
|
||||||
|
|
||||||
To connect SFTPGo to Azure Blob Storage, you need to specify the access credentials. Azure Blob Storage has different options for credentials, we support:
|
|
||||||
|
|
||||||
1. Providing an account name and account key.
|
|
||||||
2. Providing a shared access signature (SAS).
|
|
||||||
|
|
||||||
If you authenticate using account and key you also need to specify a container. The endpoint can generally be left blank, the default is `blob.core.windows.net`.
|
|
||||||
|
|
||||||
If you provide a SAS URL the container is optional and if given it must match the one inside the shared access signature.
|
|
||||||
|
|
||||||
If you want to connect to an emulator such as [Azurite](https://github.com/Azure/Azurite) you need to provide the account name/key pair and an endpoint prefixed with the protocol, for example `http://127.0.0.1:10000`.
|
|
||||||
|
|
||||||
Specifying a different `key_prefix`, you can assign different "folders" of the same container to different users. This is similar to a chroot directory for local filesystem. Each SFTPGo user can only access the assigned folder and its contents. The folder identified by `key_prefix` does not need to be pre-created.
|
|
||||||
|
|
||||||
For multipart uploads you can customize the parts size and the upload concurrency. Please note that if the upload bandwidth between the client and SFTPGo is greater than the upload bandwidth between SFTPGo and the Azure Blob service then the client should wait for the last parts to be uploaded to Azure after finishing uploading the file to SFTPGo, and it may time out. Keep this in mind if you customize these parameters.
|
|
||||||
|
|
||||||
The configured container must exist.
|
|
||||||
|
|
||||||
This backend is very similar to the [S3](./s3.md) backend, and it has the same limitations.
|
|
|
@ -1,43 +0,0 @@
|
||||||
# Build SFTPGo from source
|
|
||||||
|
|
||||||
Download the sources and use `go build`.
|
|
||||||
|
|
||||||
The following build tags are available:
|
|
||||||
|
|
||||||
- `nogcs`, disable Google Cloud Storage backend, default enabled
|
|
||||||
- `nos3`, disable S3 Compabible Object Storage backends, default enabled
|
|
||||||
- `noazblob`, disable Azure Blob Storage backend, default enabled
|
|
||||||
- `nobolt`, disable Bolt data provider, default enabled
|
|
||||||
- `nomysql`, disable MySQL data provider, default enabled
|
|
||||||
- `nopgsql`, disable PostgreSQL data provider, default enabled
|
|
||||||
- `nosqlite`, disable SQLite data provider, default enabled
|
|
||||||
- `noportable`, disable portable mode, default enabled
|
|
||||||
- `nometrics`, disable Prometheus metrics, default enabled
|
|
||||||
- `novaultkms`, disable Vault transit secret engine, default enabled
|
|
||||||
- `noawskms`, disable AWS KMS, default enabled
|
|
||||||
- `nogcpkms`, disable GCP KMS, default enabled
|
|
||||||
|
|
||||||
If no build tag is specified the build will include the default features.
|
|
||||||
|
|
||||||
The optional [SQLite driver](https://github.com/mattn/go-sqlite3 "go-sqlite3") is a `CGO` package and so it requires a `C` compiler at build time.
|
|
||||||
On Linux and macOS, a compiler is easy to install or already installed. On Windows, you need to download [MinGW-w64](https://sourceforge.net/projects/mingw-w64/files/) and build SFTPGo from its command prompt.
|
|
||||||
|
|
||||||
The compiler is a build time only dependency. It is not required at runtime.
|
|
||||||
|
|
||||||
Version info, such as git commit and build date, can be embedded setting the following string variables at build time:
|
|
||||||
|
|
||||||
- `github.com/drakkan/sftpgo/version.commit`
|
|
||||||
- `github.com/drakkan/sftpgo/version.date`
|
|
||||||
|
|
||||||
For example, you can build using the following command:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
go build -tags nogcs,nos3,nosqlite -ldflags "-s -w -X github.com/drakkan/sftpgo/version.commit=`git describe --always --dirty` -X github.com/drakkan/sftpgo/version.date=`date -u +%FT%TZ`" -o sftpgo
|
|
||||||
```
|
|
||||||
|
|
||||||
You should get a version that includes git commit, build date and available features like this one:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
$ ./sftpgo -v
|
|
||||||
SFTPGo 0.9.6-dev-b30614e-dirty-2020-06-19T11:04:56Z +metrics -gcs -s3 +bolt +mysql +pgsql -sqlite +portable
|
|
||||||
```
|
|
|
@ -1,47 +0,0 @@
|
||||||
# Check password hook
|
|
||||||
|
|
||||||
This hook allows you to externally check the provided password, its main use case is to allow to easily support things like password+OTP for protocols without keyboard interactive support such as FTP and WebDAV. You can ask your users to login using a string consisting of a fixed password and a One Time Token, you can verify the token inside the hook and ask to SFTPGo to verify the fixed part.
|
|
||||||
|
|
||||||
The same thing can be achieved using [External authentication](./external-auth.md) but using this hook is simpler in some use cases.
|
|
||||||
|
|
||||||
The `check password hook` can be defined as the absolute path of your program or an HTTP URL.
|
|
||||||
|
|
||||||
The expected response is a JSON serialized struct containing the following keys:
|
|
||||||
|
|
||||||
- `status` integer. 0 means KO, 1 means OK, 2 means partial success
|
|
||||||
- `to_verify` string. For `status` = 2 SFTPGo will check this password against the one stored inside SFTPGo data provider
|
|
||||||
|
|
||||||
If the hook defines an external program it can read the following environment variables:
|
|
||||||
|
|
||||||
- `SFTPGO_AUTHD_USERNAME`
|
|
||||||
- `SFTPGO_AUTHD_PASSWORD`
|
|
||||||
- `SFTPGO_AUTHD_IP`
|
|
||||||
- `SFTPGO_AUTHD_PROTOCOL`, possible values are `SSH`, `FTP`, `DAV`, `HTTP`
|
|
||||||
|
|
||||||
Previous global environment variables aren't cleared when the script is called. The content of these variables is _not_ quoted. They may contain special characters. They are under the control of a possibly malicious remote user.
|
|
||||||
|
|
||||||
The program must write, on its standard output, the expected JSON serialized response described above.
|
|
||||||
|
|
||||||
If the hook is an HTTP URL then it will be invoked as HTTP POST. The request body will contain a JSON serialized struct with the following fields:
|
|
||||||
|
|
||||||
- `username`
|
|
||||||
- `password`
|
|
||||||
- `ip`
|
|
||||||
- `protocol`, possible values are `SSH`, `FTP`, `DAV`
|
|
||||||
|
|
||||||
If authentication succeeds the HTTP response code must be 200 and the response body must contain the expected JSON serialized response described above.
|
|
||||||
|
|
||||||
The program hook must finish within 30 seconds, the HTTP hook timeout will use the global configuration for HTTP clients.
|
|
||||||
|
|
||||||
You can also restrict the hook scope using the `check_password_scope` configuration key:
|
|
||||||
|
|
||||||
- `0` means all supported protocols.
|
|
||||||
- `1` means SSH only
|
|
||||||
- `2` means FTP only
|
|
||||||
- `4` means WebDAV only
|
|
||||||
|
|
||||||
You can combine the scopes. For example, 6 means FTP and WebDAV.
|
|
||||||
|
|
||||||
You can disable the hook on a per-user basis.
|
|
||||||
|
|
||||||
An example check password program allowing 2FA using password + one time token can be found inside the source tree [checkpwd](../examples/OTP/authy/checkpwd) directory.
|
|
|
@ -1,97 +0,0 @@
|
||||||
# Custom Actions
|
|
||||||
|
|
||||||
The `actions` struct inside the `common` configuration section allows to configure the actions for file operations and SSH commands.
|
|
||||||
The `hook` can be defined as the absolute path of your program or an HTTP URL.
|
|
||||||
|
|
||||||
The following `actions` are supported:
|
|
||||||
|
|
||||||
- `download`
|
|
||||||
- `pre-download`
|
|
||||||
- `upload`
|
|
||||||
- `pre-upload`
|
|
||||||
- `delete`
|
|
||||||
- `pre-delete`
|
|
||||||
- `rename`
|
|
||||||
- `ssh_cmd`
|
|
||||||
|
|
||||||
The `upload` condition includes both uploads to new files and overwrite of existing files. If an upload is aborted for quota limits SFTPGo tries to remove the partial file, so if the notification reports a zero size file and a quota exceeded error the file has been deleted. The `ssh_cmd` condition will be triggered after a command is successfully executed via SSH. `scp` will trigger the `download` and `upload` conditions and not `ssh_cmd`.
|
|
||||||
|
|
||||||
The notification will indicate if an error is detected and so, for example, a partial file is uploaded.
|
|
||||||
|
|
||||||
The `pre-delete` action, if defined, will be called just before files deletion. If the external command completes with a zero exit status or the HTTP notification response code is `200` then SFTPGo will assume that the file was already deleted/moved and so it will not try to remove the file and it will not execute the hook defined for the `delete` action.
|
|
||||||
|
|
||||||
The `pre-download` and `pre-upload` actions, will be called before downloads and uploads. If the external command completes with a zero exit status or the HTTP notification response code is `200` then SFTPGo allows the operation, otherwise the client will get a permission denied error.
|
|
||||||
|
|
||||||
If the `hook` defines a path to an external program, then this program is invoked with the following arguments:
|
|
||||||
|
|
||||||
- `action`, string, supported action
|
|
||||||
- `username`
|
|
||||||
- `path` is the full filesystem path, can be empty for some ssh commands
|
|
||||||
- `target_path`, non-empty for `rename` action and for `sftpgo-copy` SSH command
|
|
||||||
- `ssh_cmd`, non-empty for `ssh_cmd` action
|
|
||||||
|
|
||||||
The external program can also read the following environment variables:
|
|
||||||
|
|
||||||
- `SFTPGO_ACTION`
|
|
||||||
- `SFTPGO_ACTION_USERNAME`
|
|
||||||
- `SFTPGO_ACTION_PATH`
|
|
||||||
- `SFTPGO_ACTION_TARGET`, non-empty for `rename` `SFTPGO_ACTION`
|
|
||||||
- `SFTPGO_ACTION_SSH_CMD`, non-empty for `ssh_cmd` `SFTPGO_ACTION`
|
|
||||||
- `SFTPGO_ACTION_FILE_SIZE`, non-zero for `pre-upload`,`upload`, `download` and `delete` actions if the file size is greater than `0`
|
|
||||||
- `SFTPGO_ACTION_FS_PROVIDER`, `0` for local filesystem, `1` for S3 backend, `2` for Google Cloud Storage (GCS) backend, `3` for Azure Blob Storage backend, `4` for local encrypted backend, `5` for SFTP backend
|
|
||||||
- `SFTPGO_ACTION_BUCKET`, non-empty for S3, GCS and Azure backends
|
|
||||||
- `SFTPGO_ACTION_ENDPOINT`, non-empty for S3, SFTP and Azure backend if configured. For Azure this is the endpoint, if configured
|
|
||||||
- `SFTPGO_ACTION_STATUS`, integer. Status for `upload`, `download` and `ssh_cmd` actions. 0 means a generic error occurred. 1 means no error, 2 means quota exceeded error
|
|
||||||
- `SFTPGO_ACTION_PROTOCOL`, string. Possible values are `SSH`, `SFTP`, `SCP`, `FTP`, `DAV`, `HTTP`
|
|
||||||
- `SFTPGO_ACTION_OPEN_FLAGS`, integer. File open flags, can be non-zero for `pre-upload` action. If `SFTPGO_ACTION_FILE_SIZE` is greater than zero and `SFTPGO_ACTION_OPEN_FLAGS&512 == 0` the target file will not be truncated
|
|
||||||
|
|
||||||
Previous global environment variables aren't cleared when the script is called.
|
|
||||||
The program must finish within 30 seconds.
|
|
||||||
|
|
||||||
If the `hook` defines an HTTP URL then this URL will be invoked as HTTP POST. The request body will contain a JSON serialized struct with the following fields:
|
|
||||||
|
|
||||||
- `action`
|
|
||||||
- `username`
|
|
||||||
- `path`
|
|
||||||
- `target_path`, included for `rename` action
|
|
||||||
- `ssh_cmd`, included for `ssh_cmd` action
|
|
||||||
- `file_size`, included for `pre-upload`, `upload`, `download`, `delete` actions if the file size is greater than `0`
|
|
||||||
- `fs_provider`, `0` for local filesystem, `1` for S3 backend, `2` for Google Cloud Storage (GCS) backend, `3` for Azure Blob Storage backend, `4` for local encrypted backend, `5` for SFTP backend
|
|
||||||
- `bucket`, inlcuded for S3, GCS and Azure backends
|
|
||||||
- `endpoint`, included for S3, SFTP and Azure backend if configured. For Azure this is the endpoint, if configured
|
|
||||||
- `status`, integer. Status for `upload`, `download` and `ssh_cmd` actions. 0 means a generic error occurred. 1 means no error, 2 means quota exceeded error
|
|
||||||
- `protocol`, string. Possible values are `SSH`, `SFTP`, `SCP`, `FTP`, `DAV`, `HTTP`
|
|
||||||
- `open_flags`, integer. File open flags, can be non-zero for `pre-upload` action. If `file_size` is greater than zero and `file_size&512 == 0` the target file will not be truncated
|
|
||||||
|
|
||||||
The HTTP hook will use the global configuration for HTTP clients and will respect the retry configurations.
|
|
||||||
|
|
||||||
The `pre-*` actions are always executed synchronously while the other ones are asynchronous. You can specify the actions to run synchronously via the `execute_sync` configuration key. Executing an action synchronously means that SFTPGo will not return a result code to the client (which is waiting for it) until your hook have completed its execution. If your hook takes a long time to complete this could cause a timeout on the client side, which wouldn't receive the server response in a timely manner and eventually drop the connection.
|
|
||||||
|
|
||||||
The `actions` struct inside the `data_provider` configuration section allows you to configure actions on user add, update, delete.
|
|
||||||
|
|
||||||
Actions will not be fired for internal updates, such as the last login or the user quota fields, or after external authentication.
|
|
||||||
|
|
||||||
If the `hook` defines a path to an external program, then this program is invoked with the following arguments:
|
|
||||||
|
|
||||||
- `action`, string, possible values are: `add`, `update`, `delete`
|
|
||||||
- `username`
|
|
||||||
- `ID`
|
|
||||||
- `status`
|
|
||||||
- `expiration_date`
|
|
||||||
- `home_dir`
|
|
||||||
- `uid`
|
|
||||||
- `gid`
|
|
||||||
|
|
||||||
The external program can also read the following environment variables:
|
|
||||||
|
|
||||||
- `SFTPGO_USER_ACTION`
|
|
||||||
- `SFTPGO_USER`, user serialized as JSON with sensitive fields removed
|
|
||||||
|
|
||||||
Previous global environment variables aren't cleared when the script is called.
|
|
||||||
The program must finish within 15 seconds.
|
|
||||||
|
|
||||||
If the `hook` defines an HTTP URL then this URL will be invoked as HTTP POST. The action is added to the query string, for example `<hook>?action=update`, and the user is sent serialized as JSON inside the POST body with sensitive fields removed.
|
|
||||||
|
|
||||||
The HTTP hook will use the global configuration for HTTP clients and will respect the retry configurations.
|
|
||||||
|
|
||||||
The structure for SFTPGo users can be found within the [OpenAPI schema](../httpd/schema/openapi.yaml).
|
|
18
docs/dare.md
|
@ -1,18 +0,0 @@
|
||||||
# Data At Rest Encryption (DARE)
|
|
||||||
|
|
||||||
SFTPGo supports data at-rest encryption via its `cryptfs` virtual file system, in this mode SFTPGo transparently encrypts and decrypts data (to/from the disk) on-the-fly during uploads and/or downloads, making sure that the files at-rest on the server-side are always encrypted.
|
|
||||||
|
|
||||||
So, because of the way it works, as described here above, when you set up an encrypted filesystem for a user you need to make sure it points to an empty path/directory (that has no files in it). Otherwise, it would try to decrypt existing files that are not encrypted in the first place and fail.
|
|
||||||
|
|
||||||
The SFTPGo's `cryptfs` is a tiny wrapper around [sio](https://github.com/minio/sio) therefore data is encrypted and authenticated using `AES-256-GCM` or `ChaCha20-Poly1305`. AES-GCM will be used if the CPU provides hardware support for it.
|
|
||||||
|
|
||||||
The only required configuration parameter is a `passphrase`, each file will be encrypted using an unique, randomly generated secret key derived from the given passphrase using the HMAC-based Extract-and-Expand Key Derivation Function (HKDF) as defined in [RFC 5869](http://tools.ietf.org/html/rfc5869). It is important to note that the per-object encryption key is never stored anywhere: it is derived from your `passphrase` and a randomly generated initialization vector just before encryption/decryption. The initialization vector is stored with the file.
|
|
||||||
|
|
||||||
The passphrase is stored encrypted itself according to your [KMS configuration](./kms.md) and is required to decrypt any file encrypted using an encryption key derived from it.
|
|
||||||
|
|
||||||
The encrypted filesystem has some limitations compared to the local, unencrypted, one:
|
|
||||||
|
|
||||||
- Resuming uploads is not supported.
|
|
||||||
- Opening a file for both reading and writing at the same time is not supported and so clients that require advanced filesystem-like features such as `sshfs` are not supported too.
|
|
||||||
- Truncate is not supported.
|
|
||||||
- System commands such as `git` or `rsync` are not supported: they will store data unencrypted.
|
|
|
@ -1,61 +0,0 @@
|
||||||
# Defender
|
|
||||||
|
|
||||||
The built-in `defender` allows you to configure an auto-blocking policy for SFTPGo and thus helps to prevent DoS (Denial of Service) and brute force password guessing.
|
|
||||||
|
|
||||||
If enabled it will protect SFTP, FTP and WebDAV services and it will automatically block hosts (IP addresses) that continually fail to log in or attempt to connect.
|
|
||||||
|
|
||||||
You can configure a score for each event type:
|
|
||||||
|
|
||||||
- `score_valid`, defines the score for valid login attempts, eg. user accounts that exist. Default `1`.
|
|
||||||
- `score_invalid`, defines the score for invalid login attempts, eg. non-existent user accounts or client disconnected for inactivity without authentication attempts. Default `2`.
|
|
||||||
- `score_limit_exceeded`, defines the score for hosts that exceeded the configured rate limits or the configured max connections per host. Default `3`.
|
|
||||||
|
|
||||||
And then you can configure:
|
|
||||||
|
|
||||||
- `observation_time`, defines the time window, in minutes, for tracking client errors.
|
|
||||||
- `threshold`, defines the threshold value before banning a host.
|
|
||||||
- `ban_time`, defines the time to ban a client, as minutes
|
|
||||||
|
|
||||||
So a host is banned, for `ban_time` minutes, if it has exceeded the defined threshold during the last observation time minutes.
|
|
||||||
|
|
||||||
A banned IP has no score, it makes no sense to accumulate host events in memory for an already banned IP address.
|
|
||||||
|
|
||||||
If an already banned client tries to log in again, its ban time will be incremented according the `ban_time_increment` configuration.
|
|
||||||
|
|
||||||
The `ban_time_increment` is calculated as percentage of `ban_time`, so if `ban_time` is 30 minutes and `ban_time_increment` is 50 the host will be banned for additionally 15 minutes. You can also specify values greater than 100 for `ban_time_increment` if you want to increase the penalty for already banned hosts.
|
|
||||||
|
|
||||||
The `defender` will keep in memory both the host scores and the banned hosts, you can limit the memory usage using the `entries_soft_limit` and `entries_hard_limit` configuration keys.
|
|
||||||
|
|
||||||
Using the REST API you can:
|
|
||||||
|
|
||||||
- list hosts within the defender's lists
|
|
||||||
- remove hosts from the defender's lists
|
|
||||||
|
|
||||||
The `defender` can also load a permanent block list and/or a safe list of ip addresses/networks from a file:
|
|
||||||
|
|
||||||
- `safelist_file`, defines the path to a file containing a list of ip addresses and/or networks to never ban.
|
|
||||||
- `blocklist_file`, defines the path to a file containing a list of ip addresses and/or networks to always ban.
|
|
||||||
|
|
||||||
These list must be stored as JSON conforming to the following schema:
|
|
||||||
|
|
||||||
- `addresses`, list of strings. Each string must be a valid IPv4/IPv6 address.
|
|
||||||
- `networks`, list of strings. Each string must be a valid IPv4/IPv6 CIDR address.
|
|
||||||
|
|
||||||
Here is a small example:
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"addresses":[
|
|
||||||
"192.0.2.1",
|
|
||||||
"2001:db8::68"
|
|
||||||
],
|
|
||||||
"networks":[
|
|
||||||
"192.0.2.0/24",
|
|
||||||
"2001:db8:1234::/48"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
These list will be loaded in memory for faster lookups. The REST API queries "live" data and not these lists.
|
|
||||||
|
|
||||||
The `defender` is optimized for fast and time constant lookups however as it keeps all the lists and the entries in memory you should carefully measure the memory requirements for your use case.
|
|
|
@ -1,59 +0,0 @@
|
||||||
# Dynamic user creation or modification
|
|
||||||
|
|
||||||
Dynamic user creation or modification is supported via an external program or an HTTP URL that can be invoked just before the user login.
|
|
||||||
To enable dynamic user modification, you must set the absolute path of your program or an HTTP URL using the `pre_login_hook` key in your configuration file.
|
|
||||||
|
|
||||||
The external program can read the following environment variables to get info about the user trying to login:
|
|
||||||
|
|
||||||
- `SFTPGO_LOGIND_USER`, it contains the user trying to login serialized as JSON. A JSON serialized user id equal to zero means the user does not exist inside SFTPGo
|
|
||||||
- `SFTPGO_LOGIND_METHOD`, possible values are: `password`, `publickey`, `keyboard-interactive`, `TLSCertificate`
|
|
||||||
- `SFTPGO_LOGIND_IP`, ip address of the user trying to login
|
|
||||||
- `SFTPGO_LOGIND_PROTOCOL`, possible values are `SSH`, `FTP`, `DAV`, `HTTP`
|
|
||||||
|
|
||||||
The program must write, on its standard output:
|
|
||||||
|
|
||||||
- an empty string (or no response at all) if the user should not be created/updated
|
|
||||||
- or the SFTPGo user, JSON serialized, if you want to create or update the given user
|
|
||||||
|
|
||||||
If the hook is an HTTP URL then it will be invoked as HTTP POST. The login method, the used protocol and the ip address of the user trying to login are added to the query string, for example `<http_url>?login_method=password&ip=1.2.3.4&protocol=SSH`.
|
|
||||||
The request body will contain the user trying to login serialized as JSON. If no modification is needed the HTTP response code must be 204, otherwise the response code must be 200 and the response body a valid SFTPGo user serialized as JSON.
|
|
||||||
|
|
||||||
Actions defined for user's updates will not be executed in this case and an already logged in user with the same username will not be disconnected, you have to handle these things yourself.
|
|
||||||
|
|
||||||
The JSON response can include only the fields to update instead of the full user. For example, if you want to disable the user, you can return a response like this:
|
|
||||||
|
|
||||||
```json
|
|
||||||
{"status": 0}
|
|
||||||
```
|
|
||||||
|
|
||||||
Please note that if you want to create a new user, the pre-login hook response must include all the mandatory user fields.
|
|
||||||
|
|
||||||
The program hook must finish within 30 seconds, the HTTP hook will use the global configuration for HTTP clients.
|
|
||||||
|
|
||||||
If an error happens while executing the hook then login will be denied.
|
|
||||||
|
|
||||||
"Dynamic user creation or modification" and "External Authentication" are mutually exclusive, they are quite similar, the difference is that "External Authentication" returns an already authenticated user while using "Dynamic users modification" you simply create or update a user. The authentication will be checked inside SFTPGo.
|
|
||||||
In other words while using "External Authentication" the external program receives the credentials of the user trying to login (for example the cleartext password) and it needs to validate them. While using "Dynamic users modification" the pre-login program receives the user stored inside the dataprovider (it includes the hashed password if any) and it can modify it, after the modification SFTPGo will check the credentials of the user trying to login.
|
|
||||||
|
|
||||||
You can disable the hook on a per-user basis.
|
|
||||||
|
|
||||||
Let's see a very basic example. Our sample program will grant access to the existing user `test_user` only in the time range 10:00-18:00. Other users will not be modified since the program will terminate with no output.
|
|
||||||
|
|
||||||
```shell
|
|
||||||
#!/bin/bash
|
|
||||||
|
|
||||||
CURRENT_TIME=`date +%H:%M`
|
|
||||||
if [[ "$SFTPGO_LOGIND_USER" =~ "\"test_user\"" ]]
|
|
||||||
then
|
|
||||||
if [[ $CURRENT_TIME > "18:00" || $CURRENT_TIME < "10:00" ]]
|
|
||||||
then
|
|
||||||
echo '{"status":0}'
|
|
||||||
else
|
|
||||||
echo '{"status":1}'
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
```
|
|
||||||
|
|
||||||
Please note that this is a demo program and it might not work in all cases. For example, the username should be obtained by parsing the JSON serialized user and not by searching the username inside the JSON as shown here.
|
|
||||||
|
|
||||||
The structure for SFTPGo users can be found within the [OpenAPI schema](../httpd/schema/openapi.yaml).
|
|
|
@ -1,77 +0,0 @@
|
||||||
# External Authentication
|
|
||||||
|
|
||||||
To enable external authentication, you must set the absolute path of your authentication program or an HTTP URL using the `external_auth_hook` key in your configuration file.
|
|
||||||
|
|
||||||
The external program can read the following environment variables to get info about the user trying to authenticate:
|
|
||||||
|
|
||||||
- `SFTPGO_AUTHD_USERNAME`
|
|
||||||
- `SFTPGO_AUTHD_USER`, STPGo user serialized as JSON, empty if the user does not exist within the data provider
|
|
||||||
- `SFTPGO_AUTHD_IP`
|
|
||||||
- `SFTPGO_AUTHD_PROTOCOL`, possible values are `SSH`, `FTP`, `DAV`, `HTTP`
|
|
||||||
- `SFTPGO_AUTHD_PASSWORD`, not empty for password authentication
|
|
||||||
- `SFTPGO_AUTHD_PUBLIC_KEY`, not empty for public key authentication
|
|
||||||
- `SFTPGO_AUTHD_KEYBOARD_INTERACTIVE`, not empty for keyboard interactive authentication
|
|
||||||
- `SFTPGO_AUTHD_TLS_CERT`, TLS client certificate PEM encoded. Not empty for TLS certificate authentication
|
|
||||||
|
|
||||||
Previous global environment variables aren't cleared when the script is called. The content of these variables is _not_ quoted. They may contain special characters. They are under the control of a possibly malicious remote user.
|
|
||||||
The program can inspect the SFTPGo user, if it exists, using the `SFTPGO_AUTHD_USER` environment variable.
|
|
||||||
The program must write, on its standard output:
|
|
||||||
|
|
||||||
- a valid SFTPGo user serialized as JSON if the authentication succeeds. The user will be added/updated within the defined data provider
|
|
||||||
- an empty string, or no response at all, if authentication succeeds and the existing SFTPGo user does not need to be updated. Please note that in versions 2.0.x and earlier an empty response was interpreted as an authentication error
|
|
||||||
- a user with an empty username if the authentication fails
|
|
||||||
|
|
||||||
If the hook is an HTTP URL then it will be invoked as HTTP POST. The request body will contain a JSON serialized struct with the following fields:
|
|
||||||
|
|
||||||
- `username`
|
|
||||||
- `ip`
|
|
||||||
- `user`, STPGo user serialized as JSON, omitted if the user does not exist within the data provider
|
|
||||||
- `protocol`, possible values are `SSH`, `FTP`, `DAV`, `HTTP`
|
|
||||||
- `password`, not empty for password authentication
|
|
||||||
- `public_key`, not empty for public key authentication
|
|
||||||
- `keyboard_interactive`, not empty for keyboard interactive authentication
|
|
||||||
- `tls_cert`, TLS client certificate PEM encoded. Not empty for TLS certificate authentication
|
|
||||||
|
|
||||||
If authentication succeeds the HTTP response code must be 200 and the response body can be:
|
|
||||||
|
|
||||||
- a valid SFTPGo user serialized as JSON. The user will be added/updated within the defined data provider
|
|
||||||
- empty, the existing SFTPGo user does not need to be updated. Please note that in versions 2.0.x and earlier an empty response was interpreted as an authentication error
|
|
||||||
|
|
||||||
If the authentication fails the HTTP response code must be != 200 or the returned SFTPGo user must have an empty username.
|
|
||||||
|
|
||||||
Actions defined for users added/updated will not be executed in this case and an already logged in user with the same username will not be disconnected.
|
|
||||||
|
|
||||||
The program hook must finish within 30 seconds, the HTTP hook timeout will use the global configuration for HTTP clients.
|
|
||||||
|
|
||||||
This method is slower than built-in authentication, but it's very flexible as anyone can easily write his own authentication hooks.
|
|
||||||
You can also restrict the authentication scope for the hook using the `external_auth_scope` configuration key:
|
|
||||||
|
|
||||||
- `0` means all supported authentication scopes. The external hook will be used for password, public key, keyboard interactive and TLS certificate authentication
|
|
||||||
- `1` means passwords only
|
|
||||||
- `2` means public keys only
|
|
||||||
- `4` means keyboard interactive only
|
|
||||||
- `8` means TLS certificate only
|
|
||||||
|
|
||||||
You can combine the scopes. For example, 3 means password and public key, 5 means password and keyboard interactive, and so on.
|
|
||||||
|
|
||||||
Let's see a very basic example. Our sample authentication program will only accept user `test_user` with any password or public key.
|
|
||||||
|
|
||||||
```shell
|
|
||||||
#!/bin/sh
|
|
||||||
|
|
||||||
if test "$SFTPGO_AUTHD_USERNAME" = "test_user"; then
|
|
||||||
echo '{"status":1,"username":"test_user","expiration_date":0,"home_dir":"/tmp/test_user","uid":0,"gid":0,"max_sessions":0,"quota_size":0,"quota_files":100000,"permissions":{"/":["*"],"/somedir":["list","download"]},"upload_bandwidth":0,"download_bandwidth":0,"filters":{"allowed_ip":[],"denied_ip":[]},"public_keys":[]}'
|
|
||||||
else
|
|
||||||
echo '{"username":""}'
|
|
||||||
fi
|
|
||||||
```
|
|
||||||
|
|
||||||
The structure for SFTPGo users can be found within the [OpenAPI schema](../httpd/schema/openapi.yaml).
|
|
||||||
|
|
||||||
You can disable the hook on a per-user basis so that you can mix external and internal users.
|
|
||||||
|
|
||||||
An example authentication program allowing to authenticate against an LDAP server can be found inside the source tree [ldapauth](../examples/ldapauth) directory.
|
|
||||||
|
|
||||||
An example server, to use as HTTP authentication hook, allowing to authenticate against an LDAP server can be found inside the source tree [ldapauthserver](../examples/ldapauthserver) directory.
|
|
||||||
|
|
||||||
If you have an external authentication hook that could be useful to others too, please let us know and/or please send a pull request.
|
|
|
@ -1,313 +0,0 @@
|
||||||
# Configuring SFTPGo
|
|
||||||
|
|
||||||
## Command line options
|
|
||||||
|
|
||||||
The SFTPGo executable can be used this way:
|
|
||||||
|
|
||||||
```console
|
|
||||||
Usage:
|
|
||||||
sftpgo [command]
|
|
||||||
|
|
||||||
Available Commands:
|
|
||||||
gen A collection of useful generators
|
|
||||||
help Help about any command
|
|
||||||
initprovider Initializes and/or updates the configured data provider
|
|
||||||
portable Serve a single directory
|
|
||||||
serve Start the SFTP Server
|
|
||||||
|
|
||||||
Flags:
|
|
||||||
-h, --help help for sftpgo
|
|
||||||
-v, --version
|
|
||||||
|
|
||||||
Use "sftpgo [command] --help" for more information about a command
|
|
||||||
```
|
|
||||||
|
|
||||||
The `serve` command supports the following flags:
|
|
||||||
|
|
||||||
- `--config-dir` string. Location of the config dir. This directory is used as the base for files with a relative path, eg. the private keys for the SFTP server or the SQLite database if you use SQLite as data provider. The configuration file, if not explicitly set, is looked for in this dir. We support reading from JSON, TOML, YAML, HCL, envfile and Java properties config files. The default config file name is `sftpgo` and therefore `sftpgo.json`, `sftpgo.yaml` and so on are searched. The default value is the working directory (".") or the value of `SFTPGO_CONFIG_DIR` environment variable.
|
|
||||||
- `--config-file` string. This flag explicitly defines the path, name and extension of the config file. If must be an absolute path or a path relative to the configuration directory. The specified file name must have a supported extension (JSON, YAML, TOML, HCL or Java properties). The default value is empty or the value of `SFTPGO_CONFIG_FILE` environment variable.
|
|
||||||
- `--loaddata-from` string. Load users and folders from this file. The file must be specified as absolute path and it must contain a backup obtained using the `dumpdata` REST API or compatible content. The default value is empty or the value of `SFTPGO_LOADDATA_FROM` environment variable.
|
|
||||||
- `--loaddata-clean` boolean. Determine if the loaddata-from file should be removed after a successful load. Default `false` or the value of `SFTPGO_LOADDATA_CLEAN` environment variable (1 or `true`, 0 or `false`).
|
|
||||||
- `--loaddata-mode`, integer. Restore mode for data to load. 0 means new users are added, existing users are updated. 1 means new users are added, existing users are not modified. Default 1 or the value of `SFTPGO_LOADDATA_MODE` environment variable.
|
|
||||||
- `--loaddata-scan`, integer. Quota scan mode after data load. 0 means no quota scan. 1 means quota scan. 2 means scan quota if the user has quota restrictions. Default 0 or the value of `SFTPGO_LOADDATA_QUOTA_SCAN` environment variable.
|
|
||||||
- `--log-compress` boolean. Determine if the rotated log files should be compressed using gzip. Default `false` or the value of `SFTPGO_LOG_COMPRESS` environment variable (1 or `true`, 0 or `false`). It is unused if `log-file-path` is empty.
|
|
||||||
- `--log-file-path` string. Location for the log file, default "sftpgo.log" or the value of `SFTPGO_LOG_FILE_PATH` environment variable. Leave empty to write logs to the standard error.
|
|
||||||
- `--log-max-age` int. Maximum number of days to retain old log files. Default 28 or the value of `SFTPGO_LOG_MAX_AGE` environment variable. It is unused if `log-file-path` is empty.
|
|
||||||
- `--log-max-backups` int. Maximum number of old log files to retain. Default 5 or the value of `SFTPGO_LOG_MAX_BACKUPS` environment variable. It is unused if `log-file-path` is empty.
|
|
||||||
- `--log-max-size` int. Maximum size in megabytes of the log file before it gets rotated. Default 10 or the value of `SFTPGO_LOG_MAX_SIZE` environment variable. It is unused if `log-file-path` is empty.
|
|
||||||
- `--log-verbose` boolean. Enable verbose logs. Default `true` or the value of `SFTPGO_LOG_VERBOSE` environment variable (1 or `true`, 0 or `false`).
|
|
||||||
- `--profiler` boolean. Enable the built-in profiler. The profiler will be accessible via HTTP/HTTPS using the base URL "/debug/pprof/". Default `false` or the value of `SFTPGO_PROFILER` environment variable (1 or `true`, 0 or `false`).
|
|
||||||
|
|
||||||
Log file can be rotated on demand sending a `SIGUSR1` signal on Unix based systems and using the command `sftpgo service rotatelogs` on Windows.
|
|
||||||
|
|
||||||
If you don't configure any private host key, the daemon will use `id_rsa`, `id_ecdsa` and `id_ed25519` in the configuration directory. If these files don't exist, the daemon will attempt to autogenerate them. The server supports any private key format supported by [`crypto/ssh`](https://github.com/golang/crypto/blob/master/ssh/keys.go#L33).
|
|
||||||
|
|
||||||
The `gen` command allows to generate completion scripts for your shell and man pages.
|
|
||||||
|
|
||||||
## Configuration file
|
|
||||||
|
|
||||||
The configuration file contains the following sections:
|
|
||||||
|
|
||||||
- **"common"**, configuration parameters shared among all the supported protocols
|
|
||||||
- `idle_timeout`, integer. Time in minutes after which an idle client will be disconnected. 0 means disabled. Default: 15
|
|
||||||
- `upload_mode` integer. 0 means standard: the files are uploaded directly to the requested path. 1 means atomic: files are uploaded to a temporary path and renamed to the requested path when the client ends the upload. Atomic mode avoids problems such as a web server that serves partial files when the files are being uploaded. In atomic mode, if there is an upload error, the temporary file is deleted and so the requested upload path will not contain a partial file. 2 means atomic with resume support: same as atomic but if there is an upload error, the temporary file is renamed to the requested path and not deleted. This way, a client can reconnect and resume the upload.
|
|
||||||
- `actions`, struct. It contains the command to execute and/or the HTTP URL to notify and the trigger conditions. See [Custom Actions](./custom-actions.md) for more details
|
|
||||||
- `execute_on`, list of strings. Valid values are `pre-download`, `download`, `pre-upload`, `upload`, `pre-delete`, `delete`, `rename`, `ssh_cmd`. Leave empty to disable actions.
|
|
||||||
- `execute_sync`, list of strings. Actions to be performed synchronously. The `pre-delete` action is always executed synchronously while the other ones are asynchronous. Executing an action synchronously means that SFTPGo will not return a result code to the client (which is waiting for it) until your hook have completed its execution. Leave empty to execute only the `pre-delete` hook synchronously
|
|
||||||
- `hook`, string. Absolute path to the command to execute or HTTP URL to notify.
|
|
||||||
- `setstat_mode`, integer. 0 means "normal mode": requests for changing permissions, owner/group and access/modification times are executed. 1 means "ignore mode": requests for changing permissions, owner/group and access/modification times are silently ignored. 2 means "ignore mode for cloud based filesystems": requests for changing permissions, owner/group and access/modification times are silently ignored for cloud filesystems and executed for local filesystem.
|
|
||||||
- `temp_path`, string. Defines the path for temporary files such as those used for atomic uploads or file pipes. If you set this option you must make sure that the defined path exists, is accessible for writing by the user running SFTPGo, and is on the same filesystem as the users home directories otherwise the renaming for atomic uploads will become a copy and therefore may take a long time. The temporary files are not namespaced. The default is generally fine. Leave empty for the default.
|
|
||||||
- `proxy_protocol`, integer. Support for [HAProxy PROXY protocol](https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt). If you are running SFTPGo behind a proxy server such as HAProxy, AWS ELB or NGNIX, you can enable the proxy protocol. It provides a convenient way to safely transport connection information such as a client's address across multiple layers of NAT or TCP proxies to get the real client IP address instead of the proxy IP. Both protocol versions 1 and 2 are supported. If the proxy protocol is enabled in SFTPGo then you have to enable the protocol in your proxy configuration too. For example, for HAProxy, add `send-proxy` or `send-proxy-v2` to each server configuration line. The following modes are supported:
|
|
||||||
- 0, disabled
|
|
||||||
- 1, enabled. Proxy header will be used and requests without proxy header will be accepted
|
|
||||||
- 2, required. Proxy header will be used and requests without proxy header will be rejected
|
|
||||||
- `proxy_allowed`, List of IP addresses and IP ranges allowed to send the proxy header:
|
|
||||||
- If `proxy_protocol` is set to 1 and we receive a proxy header from an IP that is not in the list then the connection will be accepted and the header will be ignored
|
|
||||||
- If `proxy_protocol` is set to 2 and we receive a proxy header from an IP that is not in the list then the connection will be rejected
|
|
||||||
- `startup_hook`, string. Absolute path to an external program or an HTTP URL to invoke as soon as SFTPGo starts. If you define an HTTP URL it will be invoked using a `GET` request. Please note that SFTPGo services may not yet be available when this hook is run. Leave empty do disable
|
|
||||||
- `post_connect_hook`, string. Absolute path to the command to execute or HTTP URL to notify. See [Post connect hook](./post-connect-hook.md) for more details. Leave empty to disable
|
|
||||||
- `max_total_connections`, integer. Maximum number of concurrent client connections. 0 means unlimited. Default: 0.
|
|
||||||
- `max_per_host_connections`, integer. Maximum number of concurrent client connections from the same host (IP). If the defender is enabled, exceeding this limit will generate `score_limit_exceeded` events and thus hosts that repeatedly exceed the max allowed connections can be automatically blocked. 0 means unlimited. Default: 20.
|
|
||||||
- `defender`, struct containing the defender configuration. See [Defender](./defender.md) for more details.
|
|
||||||
- `enabled`, boolean. Default `false`.
|
|
||||||
- `ban_time`, integer. Ban time in minutes.
|
|
||||||
- `ban_time_increment`, integer. Ban time increment, as a percentage, if a banned host tries to connect again.
|
|
||||||
- `threshold`, integer. Threshold value for banning a client.
|
|
||||||
- `score_invalid`, integer. Score for invalid login attempts, eg. non-existent user accounts or client disconnected for inactivity without authentication attempts.
|
|
||||||
- `score_valid`, integer. Score for valid login attempts, eg. user accounts that exist.
|
|
||||||
- `score_limit_exceeded`, integer. Score for hosts that exceeded the configured rate limits or the maximum, per-host, allowed connections.
|
|
||||||
- `observation_time`, integer. Defines the time window, in minutes, for tracking client errors. A host is banned if it has exceeded the defined threshold during the last observation time minutes.
|
|
||||||
- `entries_soft_limit`, integer.
|
|
||||||
- `entries_hard_limit`, integer. The number of banned IPs and host scores kept in memory will vary between the soft and hard limit.
|
|
||||||
- `safelist_file`, string. Path to a file containing a list of ip addresses and/or networks to never ban.
|
|
||||||
- `blocklist_file`, string. Path to a file containing a list of ip addresses and/or networks to always ban. The lists can be reloaded on demand sending a `SIGHUP` signal on Unix based systems and a `paramchange` request to the running service on Windows. An host that is already banned will not be automatically unbanned if you put it inside the safe list, you have to unban it using the REST API.
|
|
||||||
- `rate_limiters`, list of structs containing the rate limiters configuration. Take a look [here](./rate-limiting.md) for more details. Each struct has the following fields:
|
|
||||||
- `average`, integer. Average defines the maximum rate allowed. 0 means disabled. Default: 0
|
|
||||||
- `period`, integer. Period defines the period as milliseconds. The rate is actually defined by dividing average by period Default: 1000 (1 second).
|
|
||||||
- `burst`, integer. Burst defines the maximum number of requests allowed to go through in the same arbitrarily small period of time. Default: 1
|
|
||||||
- `type`, integer. 1 means a global rate limiter, independent from the source host. 2 means a per-ip rate limiter. Default: 2
|
|
||||||
- `protocols`, list of strings. Available protocols are `SSH`, `FTP`, `DAV`, `HTTP`. By default all supported protocols are enabled
|
|
||||||
- `generate_defender_events`, boolean. If `true`, the defender is enabled, and this is not a global rate limiter, a new defender event will be generated each time the configured limit is exceeded. Default `false`
|
|
||||||
- `entries_soft_limit`, integer.
|
|
||||||
- `entries_hard_limit`, integer. The number of per-ip rate limiters kept in memory will vary between the soft and hard limit
|
|
||||||
- **"sftpd"**, the configuration for the SFTP server
|
|
||||||
- `bindings`, list of structs. Each struct has the following fields:
|
|
||||||
- `port`, integer. The port used for serving SFTP requests. 0 means disabled. Default: 2022
|
|
||||||
- `address`, string. Leave blank to listen on all available network interfaces. Default: ""
|
|
||||||
- `apply_proxy_config`, boolean. If enabled the common proxy configuration, if any, will be applied. Default `true`
|
|
||||||
- `bind_port`, integer. Deprecated, please use `bindings`
|
|
||||||
- `bind_address`, string. Deprecated, please use `bindings`
|
|
||||||
- `idle_timeout`, integer. Deprecated, please use the same key in `common` section.
|
|
||||||
- `max_auth_tries` integer. Maximum number of authentication attempts permitted per connection. If set to a negative number, the number of attempts is unlimited. If set to zero, the number of attempts is limited to 6.
|
|
||||||
- `banner`, string. Identification string used by the server. Leave empty to use the default banner. Default `SFTPGo_<version>`, for example `SSH-2.0-SFTPGo_0.9.5`
|
|
||||||
- `upload_mode` integer. Deprecated, please use the same key in `common` section.
|
|
||||||
- `actions`, struct. Deprecated, please use the same key in `common` section.
|
|
||||||
- `keys`, struct array. Deprecated, please use `host_keys`.
|
|
||||||
- `private_key`, path to the private key file. It can be a path relative to the config dir or an absolute one.
|
|
||||||
- `host_keys`, list of strings. It contains the daemon's private host keys. Each host key can be defined as a path relative to the configuration directory or an absolute one. If empty, the daemon will search or try to generate `id_rsa`, `id_ecdsa` and `id_ed25519` keys inside the configuration directory. If you configure absolute paths to files named `id_rsa`, `id_ecdsa` and/or `id_ed25519` then SFTPGo will try to generate these keys using the default settings.
|
|
||||||
- `kex_algorithms`, list of strings. Available KEX (Key Exchange) algorithms in preference order. Leave empty to use default values. The supported values can be found here: [`crypto/ssh`](https://github.com/golang/crypto/blob/master/ssh/common.go#L46 "Supported kex algos")
|
|
||||||
- `ciphers`, list of strings. Allowed ciphers. Leave empty to use default values. The supported values can be found here: [crypto/ssh](https://github.com/golang/crypto/blob/master/ssh/common.go#L28 "Supported ciphers")
|
|
||||||
- `macs`, list of strings. Available MAC (message authentication code) algorithms in preference order. Leave empty to use default values. The supported values can be found here: [crypto/ssh](https://github.com/golang/crypto/blob/master/ssh/common.go#L84 "Supported MACs")
|
|
||||||
- `trusted_user_ca_keys`, list of public keys paths of certificate authorities that are trusted to sign user certificates for authentication. The paths can be absolute or relative to the configuration directory.
|
|
||||||
- `login_banner_file`, path to the login banner file. The contents of the specified file, if any, are sent to the remote user before authentication is allowed. It can be a path relative to the config dir or an absolute one. Leave empty to disable login banner.
|
|
||||||
- `setstat_mode`, integer. Deprecated, please use the same key in `common` section.
|
|
||||||
- `enabled_ssh_commands`, list of enabled SSH commands. `*` enables all supported commands. More information can be found [here](./ssh-commands.md).
|
|
||||||
- `keyboard_interactive_auth_hook`, string. Absolute path to an external program or an HTTP URL to invoke for keyboard interactive authentication. See [Keyboard Interactive Authentication](./keyboard-interactive.md) for more details.
|
|
||||||
- `password_authentication`, boolean. Set to false to disable password authentication. This setting will disable multi-step authentication method using public key + password too. It is useful for public key only configurations if you need to manage old clients that will not attempt to authenticate with public keys if the password login method is advertised. Default: true.
|
|
||||||
- `proxy_protocol`, integer. Deprecated, please use the same key in `common` section.
|
|
||||||
- `proxy_allowed`, list of strings. Deprecated, please use the same key in `common` section.
|
|
||||||
- **"ftpd"**, the configuration for the FTP server
|
|
||||||
- `bindings`, list of structs. Each struct has the following fields:
|
|
||||||
- `port`, integer. The port used for serving FTP requests. 0 means disabled. Default: 0.
|
|
||||||
- `address`, string. Leave blank to listen on all available network interfaces. Default: "".
|
|
||||||
- `apply_proxy_config`, boolean. If enabled the common proxy configuration, if any, will be applied. Default `true`.
|
|
||||||
- `tls_mode`, integer. 0 means accept both cleartext and encrypted sessions. 1 means TLS is required for both control and data connection. 2 means implicit TLS. Do not enable this blindly, please check that a proper TLS config is in place if you set `tls_mode` is different from 0.
|
|
||||||
- `force_passive_ip`, ip address. External IP address to expose for passive connections. Leavy empty to autodetect. If not empty, it must be a valid IPv4 address. Defaut: "".
|
|
||||||
- `client_auth_type`, integer. Set to `1` to require a client certificate and verify it. Set to `2` to request a client certificate during the TLS handshake and verify it if given, in this mode the client is allowed not to send a certificate. At least one certification authority must be defined in order to verify client certificates. If no certification authority is defined, this setting is ignored. Default: 0.
|
|
||||||
- `tls_cipher_suites`, list of strings. List of supported cipher suites for TLS version 1.2. If empty, a default list of secure cipher suites is used, with a preference order based on hardware performance. Note that TLS 1.3 ciphersuites are not configurable. The supported ciphersuites names are defined [here](https://github.com/golang/go/blob/master/src/crypto/tls/cipher_suites.go#L52). Any invalid name will be silently ignored. The order matters, the ciphers listed first will be the preferred ones. Default: empty.
|
|
||||||
- `bind_port`, integer. Deprecated, please use `bindings`
|
|
||||||
- `bind_address`, string. Deprecated, please use `bindings`
|
|
||||||
- `banner`, string. Greeting banner displayed when a connection first comes in. Leave empty to use the default banner. Default `SFTPGo <version> ready`, for example `SFTPGo 1.0.0-dev ready`.
|
|
||||||
- `banner_file`, path to the banner file. The contents of the specified file, if any, are displayed when someone connects to the server. It can be a path relative to the config dir or an absolute one. If set, it overrides the banner string provided by the `banner` option. Leave empty to disable.
|
|
||||||
- `active_transfers_port_non_20`, boolean. Do not impose the port 20 for active data transfers. Enabling this option allows to run SFTPGo with less privilege. Default: false.
|
|
||||||
- `force_passive_ip`, ip address. Deprecated, please use `bindings`
|
|
||||||
- `passive_port_range`, struct containing the key `start` and `end`. Port Range for data connections. Random if not specified. Default range is 50000-50100.
|
|
||||||
- `disable_active_mode`, boolean. Set to `true` to disable active FTP, default `false`.
|
|
||||||
- `enable_site`, boolean. Set to true to enable the FTP SITE command. We support `chmod` and `symlink` if SITE support is enabled. Default `false`
|
|
||||||
- `hash_support`, integer. Set to `1` to enable FTP commands that allow to calculate the hash value of files. These FTP commands will be enabled: `HASH`, `XCRC`, `MD5/XMD5`, `XSHA/XSHA1`, `XSHA256`, `XSHA512`. Please keep in mind that to calculate the hash we need to read the whole file, for remote backends this means downloading the file, for the encrypted backend this means decrypting the file. Default `0`.
|
|
||||||
- `combine_support`, integer. Set to 1 to enable support for the non standard `COMB` FTP command. Combine is only supported for local filesystem, for cloud backends it has no advantage as it will download the partial files and will upload the combined one. Cloud backends natively support multipart uploads. Default `0`.
|
|
||||||
- `certificate_file`, string. Certificate for FTPS. This can be an absolute path or a path relative to the config dir.
|
|
||||||
- `certificate_key_file`, string. Private key matching the above certificate. This can be an absolute path or a path relative to the config dir. A certificate and the private key are required to enable explicit and implicit TLS. Certificate and key files can be reloaded on demand sending a `SIGHUP` signal on Unix based systems and a `paramchange` request to the running service on Windows.
|
|
||||||
- `ca_certificates`, list of strings. Set of root certificate authorities to be used to verify client certificates.
|
|
||||||
- `ca_revocation_lists`, list of strings. Set a revocation lists, one for each root CA, to be used to check if a client certificate has been revoked. The revocation lists can be reloaded on demand sending a `SIGHUP` signal on Unix based systems and a `paramchange` request to the running service on Windows.
|
|
||||||
- `tls_mode`, integer. Deprecated, please use `bindings`
|
|
||||||
- **"webdavd"**, the configuration for the WebDAV server, more info [here](./webdav.md)
|
|
||||||
- `bindings`, list of structs. Each struct has the following fields:
|
|
||||||
- `port`, integer. The port used for serving WebDAV requests. 0 means disabled. Default: 0.
|
|
||||||
- `address`, string. Leave blank to listen on all available network interfaces. Default: "".
|
|
||||||
- `enable_https`, boolean. Set to `true` and provide both a certificate and a key file to enable HTTPS connection for this binding. Default `false`.
|
|
||||||
- `client_auth_type`, integer. Set to `1` to require a client certificate and verify it. Set to `2` to request a client certificate during the TLS handshake and verify it if given, in this mode the client is allowed not to send a certificate. At least one certification authority must be defined in order to verify client certificates. If no certification authority is defined, this setting is ignored. Default: 0.
|
|
||||||
- `tls_cipher_suites`, list of strings. List of supported cipher suites for TLS version 1.2. If empty, a default list of secure cipher suites is used, with a preference order based on hardware performance. Note that TLS 1.3 ciphersuites are not configurable. The supported ciphersuites names are defined [here](https://github.com/golang/go/blob/master/src/crypto/tls/cipher_suites.go#L52). Any invalid name will be silently ignored. The order matters, the ciphers listed first will be the preferred ones. Default: empty.
|
|
||||||
- `prefix`, string. Prefix for WebDAV resources, if empty WebDAV resources will be available at the `/` URI. If defined it must be an absolute URI, for example `/dav`. Default: "".
|
|
||||||
- `proxy_allowed`, list of IP addresses and IP ranges allowed to set `X-Forwarded-For`, `X-Real-IP`, `CF-Connecting-IP`, `True-Client-IP` headers. Any of the indicated headers, if set on requests from a connection address not in this list, will be silently ignored. Default: empty.
|
|
||||||
- `bind_port`, integer. Deprecated, please use `bindings`.
|
|
||||||
- `bind_address`, string. Deprecated, please use `bindings`.
|
|
||||||
- `certificate_file`, string. Certificate for WebDAV over HTTPS. This can be an absolute path or a path relative to the config dir.
|
|
||||||
- `certificate_key_file`, string. Private key matching the above certificate. This can be an absolute path or a path relative to the config dir. A certificate and a private key are required to enable HTTPS connections. Certificate and key files can be reloaded on demand sending a `SIGHUP` signal on Unix based systems and a `paramchange` request to the running service on Windows.
|
|
||||||
- `ca_certificates`, list of strings. Set of root certificate authorities to be used to verify client certificates.
|
|
||||||
- `ca_revocation_lists`, list of strings. Set a revocation lists, one for each root CA, to be used to check if a client certificate has been revoked. The revocation lists can be reloaded on demand sending a `SIGHUP` signal on Unix based systems and a `paramchange` request to the running service on Windows.
|
|
||||||
- `cors` struct containing CORS configuration. SFTPGo uses [Go CORS handler](https://github.com/rs/cors), please refer to upstream documentation for fields meaning and their default values.
|
|
||||||
- `enabled`, boolean, set to true to enable CORS.
|
|
||||||
- `allowed_origins`, list of strings.
|
|
||||||
- `allowed_methods`, list of strings.
|
|
||||||
- `allowed_headers`, list of strings.
|
|
||||||
- `exposed_headers`, list of strings.
|
|
||||||
- `allow_credentials` boolean.
|
|
||||||
- `max_age`, integer.
|
|
||||||
- `cache` struct containing cache configuration for the authenticated users.
|
|
||||||
- `enabled`, boolean, set to true to enable user caching. Default: true.
|
|
||||||
- `expiration_time`, integer. Expiration time, in minutes, for the cached users. 0 means unlimited. Default: 0.
|
|
||||||
- `max_size`, integer. Maximum number of users to cache. 0 means unlimited. Default: 50.
|
|
||||||
- **"data_provider"**, the configuration for the data provider
|
|
||||||
- `driver`, string. Supported drivers are `sqlite`, `mysql`, `postgresql`, `cockroachdb`, `bolt`, `memory`
|
|
||||||
- `name`, string. Database name. For driver `sqlite` this can be the database name relative to the config dir or the absolute path to the SQLite database. For driver `memory` this is the (optional) path relative to the config dir or the absolute path to the provider dump, obtained using the `dumpdata` REST API, to load. This dump will be loaded at startup and can be reloaded on demand sending a `SIGHUP` signal on Unix based systems and a `paramchange` request to the running service on Windows. The `memory` provider will not modify the provided file so quota usage and last login will not be persisted. If you plan to use a SQLite database over a `cifs` network share (this is not recommended in general) you must use the `nobrl` mount option otherwise you will get the `database is locked` error. Some users reported that the `bolt` provider works fine over `cifs` shares.
|
|
||||||
- `host`, string. Database host. Leave empty for drivers `sqlite`, `bolt` and `memory`
|
|
||||||
- `port`, integer. Database port. Leave empty for drivers `sqlite`, `bolt` and `memory`
|
|
||||||
- `username`, string. Database user. Leave empty for drivers `sqlite`, `bolt` and `memory`
|
|
||||||
- `password`, string. Database password. Leave empty for drivers `sqlite`, `bolt` and `memory`
|
|
||||||
- `sslmode`, integer. Used for drivers `mysql` and `postgresql`. 0 disable SSL/TLS connections, 1 require ssl, 2 set ssl mode to `verify-ca` for driver `postgresql` and `skip-verify` for driver `mysql`, 3 set ssl mode to `verify-full` for driver `postgresql` and `preferred` for driver `mysql`
|
|
||||||
- `connection_string`, string. Provide a custom database connection string. If not empty, this connection string will be used instead of building one using the previous parameters. Leave empty for drivers `bolt` and `memory`
|
|
||||||
- `sql_tables_prefix`, string. Prefix for SQL tables
|
|
||||||
- `track_quota`, integer. Set the preferred mode to track users quota between the following choices:
|
|
||||||
- 0, disable quota tracking. REST API to scan users home directories/virtual folders and update quota will do nothing
|
|
||||||
- 1, quota is updated each time a user uploads or deletes a file, even if the user has no quota restrictions
|
|
||||||
- 2, quota is updated each time a user uploads or deletes a file, but only for users with quota restrictions and for virtual folders. With this configuration, the `quota scan` and `folder_quota_scan` REST API can still be used to periodically update space usage for users without quota restrictions and for folders
|
|
||||||
- `delayed_quota_update`, integer. This configuration parameter defines the number of seconds to accumulate quota updates. If there are a lot of close uploads, accumulating quota updates can save you many queries to the data provider. If you want to track quotas, a scheduled quota update is recommended in any case, the stored quota may be incorrect for several reasons, such as an unexpected shutdown while uploading files, temporary provider failures, files copied outside of SFTPGo, and so on. You could use the [quotascan example](../examples/quotascan) as a starting point. 0 means immediate quota update.
|
|
||||||
- `pool_size`, integer. Sets the maximum number of open connections for `mysql` and `postgresql` driver. Default 0 (unlimited)
|
|
||||||
- `users_base_dir`, string. Users default base directory. If no home dir is defined while adding a new user, and this value is a valid absolute path, then the user home dir will be automatically defined as the path obtained joining the base dir and the username
|
|
||||||
- `actions`, struct. It contains the command to execute and/or the HTTP URL to notify and the trigger conditions. See [Custom Actions](./custom-actions.md) for more details
|
|
||||||
- `execute_on`, list of strings. Valid values are `add`, `update`, `delete`. `update` action will not be fired for internal updates such as the last login or the user quota fields.
|
|
||||||
- `hook`, string. Absolute path to the command to execute or HTTP URL to notify.
|
|
||||||
- `external_auth_program`, string. Deprecated, please use `external_auth_hook`.
|
|
||||||
- `external_auth_hook`, string. Absolute path to an external program or an HTTP URL to invoke for users authentication. See [External Authentication](./external-auth.md) for more details. Leave empty to disable.
|
|
||||||
- `external_auth_scope`, integer. 0 means all supported authentication scopes (passwords, public keys and keyboard interactive). 1 means passwords only. 2 means public keys only. 4 means key keyboard interactive only. 8 means TLS certificate. The flags can be combined, for example 6 means public keys and keyboard interactive
|
|
||||||
- `credentials_path`, string. It defines the directory for storing user provided credential files such as Google Cloud Storage credentials. This can be an absolute path or a path relative to the config dir
|
|
||||||
- `prefer_database_credentials`, boolean. When true, users' Google Cloud Storage credentials will be written to the data provider instead of disk, though pre-existing credentials on disk will be used as a fallback. When false, they will be written to the directory specified by `credentials_path`.
|
|
||||||
- `pre_login_program`, string. Deprecated, please use `pre_login_hook`.
|
|
||||||
- `pre_login_hook`, string. Absolute path to an external program or an HTTP URL to invoke to modify user details just before the login. See [Dynamic user modification](./dynamic-user-mod.md) for more details. Leave empty to disable.
|
|
||||||
- `post_login_hook`, string. Absolute path to an external program or an HTTP URL to invoke to notify a successful or failed login. See [Post-login hook](./post-login-hook.md) for more details. Leave empty to disable.
|
|
||||||
- `post_login_scope`, defines the scope for the post-login hook. 0 means notify both failed and successful logins. 1 means notify failed logins. 2 means notify successful logins.
|
|
||||||
- `check_password_hook`, string. Absolute path to an external program or an HTTP URL to invoke to check the user provided password. See [Check password hook](./check-password-hook.md) for more details. Leave empty to disable.
|
|
||||||
- `check_password_scope`, defines the scope for the check password hook. 0 means all protocols, 1 means SSH, 2 means FTP, 4 means WebDAV. You can combine the scopes, for example 6 means FTP and WebDAV.
|
|
||||||
- `password_hashing`, struct. It contains the configuration parameters to be used to generate the password hash. SFTPGo can verify passwords in several formats and uses, by default, the `bcrypt` algorithm to hash passwords in plain-text before storing them inside the data provider. These options allow you to customize how the hash is generated.
|
|
||||||
- `argon2_options`, struct containing the options for argon2id hashing algorithm. The `memory` and `iterations` parameters control the computational cost of hashing the password. The higher these figures are, the greater the cost of generating the hash and the longer the runtime. It also follows that the greater the cost will be for any attacker trying to guess the password. If the code is running on a machine with multiple cores, then you can decrease the runtime without reducing the cost by increasing the `parallelism` parameter. This controls the number of threads that the work is spread across.
|
|
||||||
- `memory`, unsigned integer. The amount of memory used by the algorithm (in kibibytes). Default: 65536.
|
|
||||||
- `iterations`, unsigned integer. The number of iterations over the memory. Default: 1.
|
|
||||||
- `parallelism`. unsigned 8 bit integer. The number of threads (or lanes) used by the algorithm. Default: 2.
|
|
||||||
- `bcrypt_options`, struct containing the options for bcrypt hashing algorithm
|
|
||||||
- `cost`, integer between 4 and 31. Default: 10
|
|
||||||
- `algo`, string. Algorithm to use for hashing passwords. Available algorithms: `argon2id`, `bcrypt`. For bcrypt hashing we use the `$2a$` prefix. Default: `bcrypt`
|
|
||||||
- `password_caching`, boolean. Verifying argon2id passwords has a high memory and computational cost, verifying bcrypt passwords has a high computational cost, by enabling, in memory, password caching you reduce these costs. Default: `true`
|
|
||||||
- `update_mode`, integer. Defines how the database will be initialized/updated. 0 means automatically. 1 means manually using the initprovider sub-command.
|
|
||||||
- `skip_natural_keys_validation`, boolean. If `true` you can use any UTF-8 character for natural keys as username, admin name, folder name. These keys are used in URIs for REST API and Web admin. If `false` only unreserved URI characters are allowed: ALPHA / DIGIT / "-" / "." / "_" / "~". Default: `false`.
|
|
||||||
- `create_default_admin`, boolean. If enabled, a default admin user with username `admin` and password `password` will be created on first start. You can also create the first admin user by using the web interface or by loading initial data. Default `false`.
|
|
||||||
- **"httpd"**, the configuration for the HTTP server used to serve REST API and to expose the built-in web interface
|
|
||||||
- `bindings`, list of structs. Each struct has the following fields:
|
|
||||||
- `port`, integer. The port used for serving HTTP requests. Default: 8080.
|
|
||||||
- `address`, string. Leave blank to listen on all available network interfaces. On *NIX you can specify an absolute path to listen on a Unix-domain socket Default: "127.0.0.1".
|
|
||||||
- `enable_web_admin`, boolean. Set to `false` to disable the built-in web admin for this binding. You also need to define `templates_path` and `static_files_path` to use the built-in web admin interface. Default `true`.
|
|
||||||
- `enable_web_client`, boolean. Set to `false` to disable the built-in web client for this binding. You also need to define `templates_path` and `static_files_path` to use the built-in web client interface. Default `true`.
|
|
||||||
- `enable_https`, boolean. Set to `true` and provide both a certificate and a key file to enable HTTPS connection for this binding. Default `false`.
|
|
||||||
- `client_auth_type`, integer. Set to `1` to require client certificate authentication in addition to JWT/Web authentication. You need to define at least a certificate authority for this to work. Default: 0.
|
|
||||||
- `tls_cipher_suites`, list of strings. List of supported cipher suites for TLS version 1.2. If empty, a default list of secure cipher suites is used, with a preference order based on hardware performance. Note that TLS 1.3 ciphersuites are not configurable. The supported ciphersuites names are defined [here](https://github.com/golang/go/blob/master/src/crypto/tls/cipher_suites.go#L52). Any invalid name will be silently ignored. The order matters, the ciphers listed first will be the preferred ones. Default: empty.
|
|
||||||
- `proxy_allowed`, list of IP addresses and IP ranges allowed to set `X-Forwarded-For`, `X-Real-IP`, `X-Forwarded-Proto`, `CF-Connecting-IP`, `True-Client-IP` headers. Any of the indicated headers, if set on requests from a connection address not in this list, will be silently ignored. Default: empty.
|
|
||||||
- `bind_port`, integer. Deprecated, please use `bindings`.
|
|
||||||
- `bind_address`, string. Deprecated, please use `bindings`. Leave blank to listen on all available network interfaces. On \*NIX you can specify an absolute path to listen on a Unix-domain socket. Default: ""
|
|
||||||
- `templates_path`, string. Path to the HTML web templates. This can be an absolute path or a path relative to the config dir
|
|
||||||
- `static_files_path`, string. Path to the static files for the web interface. This can be an absolute path or a path relative to the config dir. If both `templates_path` and `static_files_path` are empty the built-in web interface will be disabled
|
|
||||||
- `backups_path`, string. Path to the backup directory. This can be an absolute path or a path relative to the config dir. We don't allow backups in arbitrary paths for security reasons
|
|
||||||
- `web_root`, string. Defines a base URL for the web admin and client interfaces. If empty web admin and client resources will be available at the root ("/") URI. If defined it must be an absolute URI or it will be ignored
|
|
||||||
- `certificate_file`, string. Certificate for HTTPS. This can be an absolute path or a path relative to the config dir.
|
|
||||||
- `certificate_key_file`, string. Private key matching the above certificate. This can be an absolute path or a path relative to the config dir. If both the certificate and the private key are provided, the server will expect HTTPS connections. Certificate and key files can be reloaded on demand sending a `SIGHUP` signal on Unix based systems and a `paramchange` request to the running service on Windows.
|
|
||||||
- `ca_certificates`, list of strings. Set of root certificate authorities to be used to verify client certificates.
|
|
||||||
- `ca_revocation_lists`, list of strings. Set a revocation lists, one for each root CA, to be used to check if a client certificate has been revoked. The revocation lists can be reloaded on demand sending a `SIGHUP` signal on Unix based systems and a `paramchange` request to the running service on Windows.
|
|
||||||
- **"telemetry"**, the configuration for the telemetry server, more details [below](#telemetry-server)
|
|
||||||
- `bind_port`, integer. The port used for serving HTTP requests. Set to 0 to disable HTTP server. Default: 10000
|
|
||||||
- `bind_address`, string. Leave blank to listen on all available network interfaces. On \*NIX you can specify an absolute path to listen on a Unix-domain socket. Default: "127.0.0.1"
|
|
||||||
- `enable_profiler`, boolean. Enable the built-in profiler. Default `false`
|
|
||||||
- `auth_user_file`, string. Path to a file used to store usernames and passwords for basic authentication. This can be an absolute path or a path relative to the config dir. We support HTTP basic authentication, and the file format must conform to the one generated using the Apache `htpasswd` tool. The supported password formats are bcrypt (`$2y$` prefix) and md5 crypt (`$apr1$` prefix). If empty, HTTP authentication is disabled. Authentication will be always disabled for the `/healthz` endpoint.
|
|
||||||
- `certificate_file`, string. Certificate for HTTPS. This can be an absolute path or a path relative to the config dir.
|
|
||||||
- `certificate_key_file`, string. Private key matching the above certificate. This can be an absolute path or a path relative to the config dir. If both the certificate and the private key are provided, the server will expect HTTPS connections. Certificate and key files can be reloaded on demand sending a `SIGHUP` signal on Unix based systems and a `paramchange` request to the running service on Windows.
|
|
||||||
- `tls_cipher_suites`, list of strings. List of supported cipher suites for TLS version 1.2. If empty, a default list of secure cipher suites is used, with a preference order based on hardware performance. Note that TLS 1.3 ciphersuites are not configurable. The supported ciphersuites names are defined [here](https://github.com/golang/go/blob/master/src/crypto/tls/cipher_suites.go#L52). Any invalid name will be silently ignored. The order matters, the ciphers listed first will be the preferred ones. Default: empty.
|
|
||||||
- **"http"**, the configuration for HTTP clients. HTTP clients are used for executing hooks. Some hooks use a retryable HTTP client, for these hooks you can configure the time between retries and the number of retries. Please check the hook specific documentation to understand which hooks use a retryable HTTP client.
|
|
||||||
- `timeout`, float. Timeout specifies a time limit, in seconds, for requests. For requests with retries this is the timeout for a single request
|
|
||||||
- `retry_wait_min`, integer. Defines the minimum waiting time between attempts in seconds.
|
|
||||||
- `retry_wait_max`, integer. Defines the maximum waiting time between attempts in seconds. The backoff algorithm will perform exponential backoff based on the attempt number and limited by the provided minimum and maximum durations.
|
|
||||||
- `retry_max`, integer. Defines the maximum number of retries if the first request fails.
|
|
||||||
- `ca_certificates`, list of strings. List of paths to extra CA certificates to trust. The paths can be absolute or relative to the config dir. Adding trusted CA certificates is a convenient way to use self-signed certificates without defeating the purpose of using TLS.
|
|
||||||
- `certificates`, list of certificate for mutual TLS. Each certificate is a struct with the following fields:
|
|
||||||
- `cert`, string. Path to the certificate file. The path can be absolute or relative to the config dir.
|
|
||||||
- `key`, string. Path to the key file. The path can be absolute or relative to the config dir.
|
|
||||||
- `skip_tls_verify`, boolean. if enabled the HTTP client accepts any TLS certificate presented by the server and any host name in that certificate. In this mode, TLS is susceptible to man-in-the-middle attacks. This should be used only for testing.
|
|
||||||
- `headers`, list of structs. You can define a list of http headers to add to each hook. Each struct has the following fields:
|
|
||||||
- `key`, string
|
|
||||||
- `value`, string. The header is silently ignored if `key` or `value` are empty
|
|
||||||
- `url`, string, optional. If not empty, the header will be added only if the request URL starts with the one specified here
|
|
||||||
- **kms**, configuration for the Key Management Service, more details can be found [here](./kms.md)
|
|
||||||
- `secrets`
|
|
||||||
- `url`
|
|
||||||
- `master_key_path`
|
|
||||||
|
|
||||||
A full example showing the default config (in JSON format) can be found [here](../sftpgo.json).
|
|
||||||
|
|
||||||
If you want to use a private host key that uses an algorithm/setting different from the auto generated RSA/ECDSA keys, or more than two private keys, you can generate your own keys and replace the empty `keys` array with something like this:
|
|
||||||
|
|
||||||
```json
|
|
||||||
"host_keys": [
|
|
||||||
"id_rsa",
|
|
||||||
"id_ecdsa",
|
|
||||||
"id_ed25519"
|
|
||||||
]
|
|
||||||
```
|
|
||||||
|
|
||||||
where `id_rsa`, `id_ecdsa` and `id_ed25519`, in this example, are files containing your generated keys. You can use absolute paths or paths relative to the configuration directory specified via the `--config-dir` serve flag. By default the configuration directory is the working directory.
|
|
||||||
|
|
||||||
If you want the default host keys generation in a directory different from the config dir, please specify absolute paths to files named `id_rsa`, `id_ecdsa` or `id_ed25519` like this:
|
|
||||||
|
|
||||||
```json
|
|
||||||
"host_keys": [
|
|
||||||
"/etc/sftpgo/keys/id_rsa",
|
|
||||||
"/etc/sftpgo/keys/id_ecdsa",
|
|
||||||
"/etc/sftpgo/keys/id_ed25519"
|
|
||||||
]
|
|
||||||
```
|
|
||||||
|
|
||||||
then SFTPGo will try to create `id_rsa`, `id_ecdsa` and `id_ed25519`, if they are missing, inside the directory `/etc/sftpgo/keys`.
|
|
||||||
|
|
||||||
The configuration can be read from JSON, TOML, YAML, HCL, envfile and Java properties config files. If your `config-file` flag is set to `sftpgo` (default value), you need to create a configuration file called `sftpgo.json` or `sftpgo.yaml` and so on inside `config-dir`.
|
|
||||||
|
|
||||||
## Environment variables
|
|
||||||
|
|
||||||
You can also override all the available configuration options using environment variables. SFTPGo will check for environment variables with a name matching the key uppercased and prefixed with the `SFTPGO_`. You need to use `__` to traverse a struct.
|
|
||||||
|
|
||||||
Let's see some examples:
|
|
||||||
|
|
||||||
- To set the `port` for the first sftpd binding, you need to define the env var `SFTPGO_SFTPD__BINDINGS__0__PORT`
|
|
||||||
- To set the `execute_on` actions, you need to define the env var `SFTPGO_COMMON__ACTIONS__EXECUTE_ON`. For example `SFTPGO_COMMON__ACTIONS__EXECUTE_ON=upload,download`
|
|
||||||
|
|
||||||
On some hardware you can get faster SFTP performance by replacing the Go `crypto/sha256` implementation with [sha256-simd](https://github.com/minio/sha256-simd).
|
|
||||||
|
|
||||||
The performances of SHA256 is relevant for clients using AES CTR ciphers and `hmac-sha2-256` as Message Authentication Code (MAC).
|
|
||||||
|
|
||||||
Up to 2.0.x versions SFTPGo automatically used `sha256-simd` but over the time the standard Go implementation improved a lot and now is faster than `sha256-simd` on some CPUs.
|
|
||||||
You can select `sha256-simd` setting the environment variable `SFTPGO_MINIO_SHA256_SIMD` to `1`.
|
|
||||||
|
|
||||||
`sha256-simd` is particularly useful if you have an Intel CPU with SHA extensions or an ARM CPU with Cryptography Extensions.
|
|
||||||
|
|
||||||
## Telemetry Server
|
|
||||||
|
|
||||||
The telemetry server exposes the following endpoints:
|
|
||||||
|
|
||||||
- `/healthz`, health information (for health checks)
|
|
||||||
- `/metrics`, Prometheus metrics
|
|
||||||
- `/debug/pprof`, if enabled via the `enable_profiler` configuration key, for profiling, more details [here](./profiling.md)
|
|
|
@ -1,11 +0,0 @@
|
||||||
# Google Cloud Storage backend
|
|
||||||
|
|
||||||
To connect SFTPGo to Google Cloud Storage you can use use the Application Default Credentials (ADC) strategy to try to find your application's credentials automatically or you can explicitly provide a JSON credentials file that you can obtain from the Google Cloud Console. Take a look [here](https://cloud.google.com/docs/authentication/production#providing_credentials_to_your_application) for details.
|
|
||||||
|
|
||||||
Specifying a different `key_prefix`, you can assign different "folders" of the same bucket to different users. This is similar to a chroot directory for local filesystem. Each SFTP/SCP user can only access the assigned folder and its contents. The folder identified by `key_prefix` does not need to be pre-created.
|
|
||||||
|
|
||||||
You can optionally specify a [storage class](https://cloud.google.com/storage/docs/storage-classes) too. Leave it blank to use the default storage class.
|
|
||||||
|
|
||||||
The configured bucket must exist.
|
|
||||||
|
|
||||||
This backend is very similar to the [S3](./s3.md) backend, and it has the same limitations.
|
|
|
@ -1,8 +0,0 @@
|
||||||
# Tutorials
|
|
||||||
|
|
||||||
Here we collect step-to-step tutorials. SFTPGo users are encouraged to contribute!
|
|
||||||
|
|
||||||
- [Getting Started](./getting-started.md)
|
|
||||||
- [SFTPGo with PostgreSQL data provider and S3 backend](./postgresql-s3.md)
|
|
||||||
- [SFTPGo on Windows with Active Directory Integration + Caddy Static File Server](https://www.youtube.com/watch?v=M5UcJI8t4AI)
|
|
||||||
- [Securing SFTPGo with a free Let's Encrypt TLS Certificate](./lets-encrypt-certificate.md)
|
|
|
@ -1,491 +0,0 @@
|
||||||
# Getting Started
|
|
||||||
|
|
||||||
SFTPGo allows to securely share your files over SFTP and optionally FTP/S and WebDAV too.
|
|
||||||
Several storage backends are supported and they are configurable per user, so you can serve a local directory for a user and an S3 bucket (or part of it) for another one.
|
|
||||||
SFTPGo also supports virtual folders, a virtual folder can use any of the supported storage backends. So you can have, for example, an S3 user that exposes a GCS bucket (or part of it) on a specified path and an encrypted local filesystem on another one.
|
|
||||||
Virtual folders can be private or shared among multiple users, for shared virtual folders you can define different quota limits for each user.
|
|
||||||
|
|
||||||
In this tutorial we explore the main features and concepts using the built-in web admin interface. Advanced users can also use the SFTPGo [REST API](https://sftpgo.stoplight.io/docs/sftpgo/openapi.yaml)
|
|
||||||
|
|
||||||
- [Installation](#Installation)
|
|
||||||
- [Initial configuration](#Initial-configuration)
|
|
||||||
- [Creating users](#Creating-users)
|
|
||||||
- [Creating users with a Cloud Storage backend](#Creating-users-with-a-Cloud-Storage-backend)
|
|
||||||
- [Creating users with a local encrypted backend (Data At Rest Encryption)](#Creating-users-with-a-local-encrypted-backend-Data-At-Rest-Encryption))
|
|
||||||
- [Virtual permissions](#Virtual-permissions)
|
|
||||||
- [Virtual folders](#Virtual-folders)
|
|
||||||
- [Configuration parameters](#Configuration-parameters)
|
|
||||||
- [Use PostgreSQL data provider](#Use-PostgreSQL-data-provider)
|
|
||||||
- [Use MySQL/MariaDB data provider](#Use-MySQLMariaDB-data-provider)
|
|
||||||
- [Use CockroachDB data provider](#Use-CockroachDB-data-provider)
|
|
||||||
- [Enable FTP service](#Enable-FTP-service)
|
|
||||||
- [Enable WebDAV service](#Enable-WebDAV-service)
|
|
||||||
|
|
||||||
## Installation
|
|
||||||
|
|
||||||
You can easily install SFTPGo by downloading the appropriate package for your operating system and architecture. Please visit the [releases](https://github.com/drakkan/sftpgo/releases "releases") page.
|
|
||||||
|
|
||||||
An official Docker image is available. Documentation is [here](./../../docker/README.md).
|
|
||||||
|
|
||||||
In this guide, we assume that SFTPGo is already installed and running using the default configuration.
|
|
||||||
|
|
||||||
## Initial configuration
|
|
||||||
|
|
||||||
Before you can use SFTPGo you need to create an admin account, so open [http://127.0.0.1:8080/web/admin](http://127.0.0.1:8080/web) in your web browser, replacing `127.0.0.1` with the appropriate IP address if SFTPGo is not running on localhost.
|
|
||||||
|
|
||||||
![Setup](./img/setup.png)
|
|
||||||
|
|
||||||
After creating the admin account you will be automatically logged in.
|
|
||||||
|
|
||||||
![Users list](./img/initial-screen.png)
|
|
||||||
|
|
||||||
The the web admin is now available at the following URL:
|
|
||||||
|
|
||||||
[http://127.0.0.1:8080/web/admin](http://127.0.0.1:8080/web/admin)
|
|
||||||
|
|
||||||
From the `Status` page you see the active services.
|
|
||||||
|
|
||||||
![Status](./img/status.png)
|
|
||||||
|
|
||||||
The default configuration enables the SFTP service on port `2022` and uses `SQLite` as data provider.
|
|
||||||
|
|
||||||
## Creating users
|
|
||||||
|
|
||||||
Let's create our first local user:
|
|
||||||
|
|
||||||
- from the users page click the `+` icon to open the Add user page
|
|
||||||
- the only required fields are the `Username`, a `Password` or a `Public key`, and the default `Permissions`
|
|
||||||
- if you are on Windows or you installed SFTPGo manually and no `users_base_dir` is defined in your configuration file you also have to set a `Home Dir`. It must be an absolute path, for example `/srv/sftpgo/data/username` on Linux or `C:\sftpgo\data\username` on Windows. SFTPGo will try to automatically create the home directory, if missing, when the user logs in. Each user can only access files and folders inside its home directory.
|
|
||||||
- click `Submit`
|
|
||||||
|
|
||||||
![Add user](./img/add-user.png)
|
|
||||||
|
|
||||||
Now test the new user, we use the `sftp` CLI here, you can use any SFTP client.
|
|
||||||
|
|
||||||
```shell
|
|
||||||
$ sftp -P 2022 nicola@127.0.0.1
|
|
||||||
nicola@127.0.0.1's password:
|
|
||||||
Connected to 127.0.0.1.
|
|
||||||
sftp> ls
|
|
||||||
sftp> put file.txt
|
|
||||||
Uploading file.txt to /file.txt
|
|
||||||
file.txt 100% 4034 3.9MB/s 00:00
|
|
||||||
sftp> ls
|
|
||||||
file.txt
|
|
||||||
sftp> mkdir adir
|
|
||||||
sftp> cd adir/
|
|
||||||
sftp> put file.txt
|
|
||||||
Uploading file.txt to /adir/file.txt
|
|
||||||
file.txt 100% 4034 4.0MB/s 00:00
|
|
||||||
sftp> ls
|
|
||||||
file.txt
|
|
||||||
sftp> get file.txt
|
|
||||||
Fetching /adir/file.txt to file.txt
|
|
||||||
/adir/file.txt 100% 4034 1.9MB/s 00:00
|
|
||||||
```
|
|
||||||
|
|
||||||
It worked! We can upload/download files and create directories.
|
|
||||||
|
|
||||||
Each user can browse and download their files and change their credentials using the web client interface available at the following URL:
|
|
||||||
|
|
||||||
[http://127.0.0.1:8080/web/client](http://127.0.0.1:8080/web/client)
|
|
||||||
|
|
||||||
![Web client files](./img/web-client-files.png)
|
|
||||||
|
|
||||||
![Web client credentials](./img/web-client-credentials.png)
|
|
||||||
|
|
||||||
### Creating users with a Cloud Storage backend
|
|
||||||
|
|
||||||
The procedure is similar to the one described for local users, you have only specify the Cloud Storage backend and its credentials.
|
|
||||||
|
|
||||||
The screenshot below shows an example configuration for an S3 backend.
|
|
||||||
|
|
||||||
![S3 user](./img/s3-user.png)
|
|
||||||
|
|
||||||
The screenshot below shows an example configuration for an Azure Blob Storage backend.
|
|
||||||
|
|
||||||
![Azure Blob user](./img/az-user.png)
|
|
||||||
|
|
||||||
The screenshot below shows an example configuration for a Google Cloud Storage backend.
|
|
||||||
|
|
||||||
![Google Cloud user](./img/gcs-user.png)
|
|
||||||
|
|
||||||
The screenshot below shows an example configuration for an SFTP server as storage backend.
|
|
||||||
|
|
||||||
![User using another SFTP server as storage backend](./img/sftp-user.png)
|
|
||||||
|
|
||||||
Setting a `Key Prefix` you restrict the user to a specific "folder" in the bucket, so that the same bucket can be shared among different users by assigning to each user a specific portion of the bucket.
|
|
||||||
|
|
||||||
### Creating users with a local encrypted backend (Data At Rest Encryption)
|
|
||||||
|
|
||||||
The procedure is similar to the one described for local users, you have only specify the encryption passphrase.
|
|
||||||
The screenshot below shows an example configuration.
|
|
||||||
|
|
||||||
![User with cryptfs backend](./img/local-encrypted.png)
|
|
||||||
|
|
||||||
You can find more details about Data At Rest Encryption [here](../dare.md).
|
|
||||||
|
|
||||||
## Virtual permissions
|
|
||||||
|
|
||||||
SFTPGo supports per directory virtual permissions. For each user you have to specify global permissions and then override them on a per-directory basis.
|
|
||||||
|
|
||||||
Take a look at the following screens.
|
|
||||||
|
|
||||||
![Virtual permissions](./img/virtual-permissions.png)
|
|
||||||
![Per-directory permissions](./img/dir-permissions.png)
|
|
||||||
|
|
||||||
This user has full access as default (`*`), can only list and download from `/read-only` path and has no permissions at all for the `/subdir` path.
|
|
||||||
|
|
||||||
Let's test it. We use the `sftp` CLI here, you can use any SFTP client.
|
|
||||||
|
|
||||||
```shell
|
|
||||||
$ sftp -P 2022 nicola@127.0.0.1
|
|
||||||
Connected to 127.0.0.1.
|
|
||||||
sftp> ls
|
|
||||||
adir file.txt read-only subdir
|
|
||||||
sftp> put file.txt
|
|
||||||
Uploading file.txt to /file.txt
|
|
||||||
file.txt 100% 4034 19.4MB/s 00:00
|
|
||||||
sftp> rm file.txt
|
|
||||||
Removing /file.txt
|
|
||||||
sftp> ls
|
|
||||||
adir read-only subdir
|
|
||||||
sftp> cd read-only/
|
|
||||||
sftp> ls
|
|
||||||
file.txt
|
|
||||||
sftp> put file1.txt
|
|
||||||
Uploading file1.txt to /read-only/file1.txt
|
|
||||||
remote open("/read-only/file1.txt"): Permission denied
|
|
||||||
sftp> get file.txt
|
|
||||||
Fetching /read-only/file.txt to file.txt
|
|
||||||
/read-only/file.txt 100% 4034 2.2MB/s 00:00
|
|
||||||
sftp> cd ..
|
|
||||||
sftp> ls
|
|
||||||
adir read-only subdir
|
|
||||||
sftp> cd /subdir
|
|
||||||
sftp> ls
|
|
||||||
remote readdir("/subdir"): Permission denied
|
|
||||||
```
|
|
||||||
|
|
||||||
as you can see it worked as expected.
|
|
||||||
|
|
||||||
## Virtual folders
|
|
||||||
|
|
||||||
From the web admin interface click `Folders` and then the `+` icon.
|
|
||||||
|
|
||||||
![Add folder](./img/add-folder.png)
|
|
||||||
|
|
||||||
To create a local folder you need to specify a `Name` and an `Absolute path`. For other backends you have to specify the backend type and its credentials, this is the same procedure already detailed for creating users with cloud backends.
|
|
||||||
|
|
||||||
Suppose we created two virtual folders name `localfolder` and `minio` as you can see in the following screen.
|
|
||||||
|
|
||||||
![Folders](./img/folders.png)
|
|
||||||
|
|
||||||
- `localfolder` uses the local filesystem as storage backend
|
|
||||||
- `minio` uses MinIO (S3 compatible) as storage backend
|
|
||||||
|
|
||||||
Now, click `Users`, on the left menu, select a user and click the `Edit` icon, to update the user and associate the virtual folders.
|
|
||||||
|
|
||||||
Virtual folders must be referenced using their unique name and you can expose them on a configurable virtual path. Take a look at the following screenshot.
|
|
||||||
|
|
||||||
![Virtual Folders](./img/virtual-folders.png)
|
|
||||||
|
|
||||||
We exposed the folder named `localfolder` on the path `/vdirlocal` (this must be an absolute UNIX path on Windows too) and the folder named `minio` on the path `/vdirminio`. For `localfolder` the quota usage is included within the user quota, while for the `minio` folder we defined separate quota limits: at most 2 files and at most 100MB, whichever is reached first.
|
|
||||||
|
|
||||||
The folder `minio` can be shared with other users and we can define different quota limits on a per-user basis. The folder `localfolder` is considered private since we have included its quota limits within those of the user, if we share them with other users we will break quota calculation.
|
|
||||||
|
|
||||||
Let's test these virtual folders. We use the `sftp` CLI here, you can use any SFTP client.
|
|
||||||
|
|
||||||
```shell
|
|
||||||
$ sftp -P 2022 nicola@127.0.0.1
|
|
||||||
nicola@127.0.0.1's password:
|
|
||||||
Connected to 127.0.0.1.
|
|
||||||
sftp> ls
|
|
||||||
adir read-only subdir vdirlocal vdirminio
|
|
||||||
sftp> cd vdirlocal
|
|
||||||
sftp> put file.txt
|
|
||||||
Uploading file.txt to /vdirlocal/file.txt
|
|
||||||
file.txt 100% 4034 17.3MB/s 00:00
|
|
||||||
sftp> ls
|
|
||||||
file.txt
|
|
||||||
sftp> cd ..
|
|
||||||
sftp> cd vdirminio/
|
|
||||||
sftp> put file.txt
|
|
||||||
Uploading file.txt to /vdirminio/file.txt
|
|
||||||
file.txt 100% 4034 4.8MB/s 00:00
|
|
||||||
sftp> ls
|
|
||||||
file.txt
|
|
||||||
sftp> put file.txt file1.txt
|
|
||||||
Uploading file.txt to /vdirminio/file1.txt
|
|
||||||
file.txt 100% 4034 2.8MB/s 00:00
|
|
||||||
sftp> put file.txt file2.txt
|
|
||||||
Uploading file.txt to /vdirminio/file2.txt
|
|
||||||
remote open("/vdirminio/file2.txt"): Failure
|
|
||||||
sftp> quit
|
|
||||||
```
|
|
||||||
|
|
||||||
The last upload failed since we exceeded the number of files quota limit.
|
|
||||||
|
|
||||||
## Configuration parameters
|
|
||||||
|
|
||||||
Until now we used the default configuration, to change the global service parameters you have to edit the configuration file, or set appropriate environment variables, and restart SFTPGo to apply the changes.
|
|
||||||
|
|
||||||
A full explanation of all configuration methods can be found [here](./../full-configuration.md), we explore some common use cases. Please keep in mind that SFTPGo can also be configured via [environment variables](../full-configuration.md#environment-variables), this is very convenient if you are using Docker.
|
|
||||||
|
|
||||||
The default configuration file is `sftpgo.json` and it can be found within the `/etc/sftpgo` directory if you installed from Linux distro packages. On Windows the configuration file can be found within the `{commonappdata}\SFTPGo` directory where `{commonappdata}` is typically `C:\ProgramData`. SFTPGo also supports reading from TOML and YAML configuration files.
|
|
||||||
|
|
||||||
The following snippets assume your are running SFTPGo on Linux but they can be easily adapted for other operating systems.
|
|
||||||
|
|
||||||
### Use PostgreSQL data provider
|
|
||||||
|
|
||||||
Create a PostgreSQL database named `sftpgo` and a PostgreSQL user with the correct permissions, for example using the `psql` CLI.
|
|
||||||
|
|
||||||
```shell
|
|
||||||
sudo -i -u postgres psql
|
|
||||||
CREATE DATABASE "sftpgo" WITH ENCODING='UTF8' CONNECTION LIMIT=-1;
|
|
||||||
create user "sftpgo" with encrypted password 'your password here';
|
|
||||||
grant all privileges on database "sftpgo" to "sftpgo";
|
|
||||||
\q
|
|
||||||
```
|
|
||||||
|
|
||||||
Open the SFTPGo configuration file, search for the `data_provider` section and change it as follow.
|
|
||||||
|
|
||||||
```json
|
|
||||||
"data_provider": {
|
|
||||||
"driver": "postgresql",
|
|
||||||
"name": "sftpgo",
|
|
||||||
"host": "127.0.0.1",
|
|
||||||
"port": 5432,
|
|
||||||
"username": "sftpgo",
|
|
||||||
"password": "your password here",
|
|
||||||
...
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Confirm that the database connection works by initializing the data provider.
|
|
||||||
|
|
||||||
```shell
|
|
||||||
$ sudo su - sftpgo -s /bin/bash -c 'sftpgo initprovider -c /etc/sftpgo'
|
|
||||||
2021-05-19T22:21:54.000 INF Initializing provider: "postgresql" config file: "/etc/sftpgo/sftpgo.json"
|
|
||||||
2021-05-19T22:21:54.000 INF updating database version: 8 -> 9
|
|
||||||
2021-05-19T22:21:54.000 INF Data provider successfully initialized/updated
|
|
||||||
```
|
|
||||||
|
|
||||||
Ensure that SFTPGo starts after the database service.
|
|
||||||
|
|
||||||
```shell
|
|
||||||
sudo systemctl edit sftpgo.service
|
|
||||||
```
|
|
||||||
|
|
||||||
And override the unit definition with the following snippet.
|
|
||||||
|
|
||||||
```shell
|
|
||||||
[Unit]
|
|
||||||
After=postgresql.service
|
|
||||||
```
|
|
||||||
|
|
||||||
Restart SFTPGo to apply the changes.
|
|
||||||
|
|
||||||
### Use MySQL/MariaDB data provider
|
|
||||||
|
|
||||||
Create a MySQL database named `sftpgo` and a MySQL user with the correct permissions, for example using the `mysql` CLI.
|
|
||||||
|
|
||||||
```shell
|
|
||||||
$ mysql -u root
|
|
||||||
MariaDB [(none)]> CREATE DATABASE sftpgo CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;
|
|
||||||
Query OK, 1 row affected (0.000 sec)
|
|
||||||
|
|
||||||
MariaDB [(none)]> grant all privileges on sftpgo.* to sftpgo@localhost identified by 'your password here';
|
|
||||||
Query OK, 0 rows affected (0.027 sec)
|
|
||||||
|
|
||||||
MariaDB [(none)]> quit
|
|
||||||
Bye
|
|
||||||
```
|
|
||||||
|
|
||||||
Open the SFTPGo configuration file, search for the `data_provider` section and change it as follow.
|
|
||||||
|
|
||||||
```json
|
|
||||||
"data_provider": {
|
|
||||||
"driver": "mysql",
|
|
||||||
"name": "sftpgo",
|
|
||||||
"host": "127.0.0.1",
|
|
||||||
"port": 3306,
|
|
||||||
"username": "sftpgo",
|
|
||||||
"password": "your password here",
|
|
||||||
...
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Confirm that the database connection works by initializing the data provider.
|
|
||||||
|
|
||||||
```shell
|
|
||||||
$ sudo su - sftpgo -s /bin/bash -c 'sftpgo initprovider -c /etc/sftpgo'
|
|
||||||
2021-05-19T22:29:30.000 INF Initializing provider: "mysql" config file: "/etc/sftpgo/sftpgo.json"
|
|
||||||
2021-05-19T22:29:30.000 INF updating database version: 8 -> 9
|
|
||||||
2021-05-19T22:29:30.000 INF Data provider successfully initialized/updated
|
|
||||||
```
|
|
||||||
|
|
||||||
Ensure that SFTPGo starts after the database service.
|
|
||||||
|
|
||||||
```shell
|
|
||||||
sudo systemctl edit sftpgo.service
|
|
||||||
```
|
|
||||||
|
|
||||||
And override the unit definition with the following snippet.
|
|
||||||
|
|
||||||
```shell
|
|
||||||
[Unit]
|
|
||||||
After=mariadb.service
|
|
||||||
```
|
|
||||||
|
|
||||||
Restart SFTPGo to apply the changes.
|
|
||||||
|
|
||||||
### Use CockroachDB data provider
|
|
||||||
|
|
||||||
We suppose you have installed CocroackDB this way:
|
|
||||||
|
|
||||||
```shell
|
|
||||||
sudo su
|
|
||||||
export CRDB_VERSION=21.1.2 # set the latest available version here
|
|
||||||
wget -qO- https://binaries.cockroachdb.com/cockroach-v${CRDB_VERSION}.linux-amd64.tgz | tar xvz
|
|
||||||
cp -i cockroach-v${CRDB_VERSION}.linux-amd64/cockroach /usr/local/bin/
|
|
||||||
mkdir -p /usr/local/lib/cockroach
|
|
||||||
cp -i cockroach-v${CRDB_VERSION}.linux-amd64/lib/libgeos.so /usr/local/lib/cockroach/
|
|
||||||
cp -i cockroach-v${CRDB_VERSION}.linux-amd64/lib/libgeos_c.so /usr/local/lib/cockroach/
|
|
||||||
mkdir /var/lib/cockroach
|
|
||||||
chown sftpgo:sftpgo /var/lib/cockroach
|
|
||||||
mkdir -p /etc/cockroach/{certs,ca}
|
|
||||||
chmod 700 /etc/cockroach/ca
|
|
||||||
/usr/local/bin/cockroach cert create-ca --certs-dir=/etc/cockroach/certs --ca-key=/etc/cockroach/ca/ca.key
|
|
||||||
/usr/local/bin/cockroach cert create-node localhost $(hostname) --certs-dir=/etc/cockroach/certs --ca-key=/etc/cockroach/ca/ca.key
|
|
||||||
/usr/local/bin/cockroach cert create-client root --certs-dir=/etc/cockroach/certs --ca-key=/etc/cockroach/ca/ca.key
|
|
||||||
chown -R sftpgo:sftpgo /etc/cockroach/certs
|
|
||||||
exit
|
|
||||||
```
|
|
||||||
|
|
||||||
and you are running it using a systemd unit like this one:
|
|
||||||
|
|
||||||
```shell
|
|
||||||
[Unit]
|
|
||||||
Description=Cockroach Database single node
|
|
||||||
Requires=network.target
|
|
||||||
[Service]
|
|
||||||
Type=notify
|
|
||||||
WorkingDirectory=/var/lib/cockroach
|
|
||||||
ExecStart=/usr/local/bin/cockroach start-single-node --certs-dir=/etc/cockroach/certs --http-addr 127.0.0.1:8888 --listen-addr 127.0.0.1:26257 --cache=.25 --max-sql-memory=.25 --store=path=/var/lib/cockroach
|
|
||||||
TimeoutStopSec=60
|
|
||||||
Restart=always
|
|
||||||
RestartSec=10
|
|
||||||
StandardOutput=syslog
|
|
||||||
StandardError=syslog
|
|
||||||
SyslogIdentifier=cockroach
|
|
||||||
User=sftpgo
|
|
||||||
[Install]
|
|
||||||
WantedBy=default.target
|
|
||||||
```
|
|
||||||
|
|
||||||
Create a CockroachDB database named `sftpgo`.
|
|
||||||
|
|
||||||
```shell
|
|
||||||
$ sudo /usr/local/bin/cockroach sql --certs-dir=/etc/cockroach/certs -e 'create database "sftpgo"'
|
|
||||||
CREATE DATABASE
|
|
||||||
|
|
||||||
Time: 13ms
|
|
||||||
```
|
|
||||||
|
|
||||||
Open the SFTPGo configuration file, search for the `data_provider` section and change it as follow.
|
|
||||||
|
|
||||||
```json
|
|
||||||
"data_provider": {
|
|
||||||
"driver": "cockroachdb",
|
|
||||||
"name": "",
|
|
||||||
"host": "",
|
|
||||||
"port": 0,
|
|
||||||
"username": "",
|
|
||||||
"password": "",
|
|
||||||
"sslmode": 0,
|
|
||||||
"connection_string": "postgresql://root@localhost:26257/sftpgo?sslcert=%2Fetc%2Fcockroach%2Fcerts%2Fclient.root.crt&sslkey=%2Fetc%2Fcockroach%2Fcerts%2Fclient.root.key&sslmode=verify-full&sslrootcert=%2Fetc%2Fcockroach%2Fcerts%2Fca.crt&connect_timeout=10"
|
|
||||||
...
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Confirm that the database connection works by initializing the data provider.
|
|
||||||
|
|
||||||
```shell
|
|
||||||
$ sudo su - sftpgo -s /bin/bash -c 'sftpgo initprovider -c /etc/sftpgo'
|
|
||||||
2021-05-19T22:41:53.000 INF Initializing provider: "cockroachdb" config file: "/etc/sftpgo/sftpgo.json"
|
|
||||||
2021-05-19T22:41:53.000 INF updating database version: 8 -> 9
|
|
||||||
2021-05-19T22:41:53.000 INF Data provider successfully initialized/updated
|
|
||||||
```
|
|
||||||
|
|
||||||
Ensure that SFTPGo starts after the database service.
|
|
||||||
|
|
||||||
```shell
|
|
||||||
sudo systemctl edit sftpgo.service
|
|
||||||
```
|
|
||||||
|
|
||||||
And override the unit definition with the following snippet.
|
|
||||||
|
|
||||||
```shell
|
|
||||||
[Unit]
|
|
||||||
After=cockroachdb.service
|
|
||||||
```
|
|
||||||
|
|
||||||
Restart SFTPGo to apply the changes.
|
|
||||||
|
|
||||||
### Enable FTP service
|
|
||||||
|
|
||||||
Open the SFTPGo configuration file, search for the `ftpd` section and change it as follow.
|
|
||||||
|
|
||||||
```json
|
|
||||||
"ftpd": {
|
|
||||||
"bindings": [
|
|
||||||
{
|
|
||||||
"port": 2121,
|
|
||||||
"address": "",
|
|
||||||
"apply_proxy_config": true,
|
|
||||||
"tls_mode": 0,
|
|
||||||
"force_passive_ip": "",
|
|
||||||
"client_auth_type": 0,
|
|
||||||
"tls_cipher_suites": []
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"banner": "",
|
|
||||||
"banner_file": "",
|
|
||||||
"active_transfers_port_non_20": true,
|
|
||||||
"passive_port_range": {
|
|
||||||
"start": 50000,
|
|
||||||
"end": 50100
|
|
||||||
},
|
|
||||||
...
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Restart SFTPGo to apply the changes. The FTP service is now available on port `2121`.
|
|
||||||
|
|
||||||
You can also configure the passive ports range (`50000-50100` by default), these ports must be reachable for passive FTP to work. If your FTP server is on the private network side of a NAT configuration you have to set `force_passive_ip` to your external IP address. You may also need to open the passive port range on your firewall.
|
|
||||||
|
|
||||||
It is recommended that you provide a certificate and key file to expose FTP over TLS. You should prefer SFTP to FTP even if you configure TLS, please don't blindly enable the old FTP protocol.
|
|
||||||
|
|
||||||
### Enable WebDAV service
|
|
||||||
|
|
||||||
Open the SFTPGo configuration file, search for the `webdavd` section and change it as follow.
|
|
||||||
|
|
||||||
```json
|
|
||||||
"webdavd": {
|
|
||||||
"bindings": [
|
|
||||||
{
|
|
||||||
"port": 10080,
|
|
||||||
"address": "",
|
|
||||||
"enable_https": false,
|
|
||||||
"client_auth_type": 0,
|
|
||||||
"tls_cipher_suites": [],
|
|
||||||
"prefix": "",
|
|
||||||
"proxy_allowed": []
|
|
||||||
}
|
|
||||||
],
|
|
||||||
...
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Restart SFTPGo to apply the changes. The WebDAV service is now available on port `10080`. It is recommended that you provide a certificate and key file to expose WebDAV over https.
|
|
Before Width: | Height: | Size: 53 KiB |
Before Width: | Height: | Size: 63 KiB |
Before Width: | Height: | Size: 64 KiB |
Before Width: | Height: | Size: 30 KiB |
Before Width: | Height: | Size: 58 KiB |
Before Width: | Height: | Size: 47 KiB |
Before Width: | Height: | Size: 60 KiB |
Before Width: | Height: | Size: 11 KiB |
Before Width: | Height: | Size: 14 KiB |
Before Width: | Height: | Size: 58 KiB |