mirror of
https://github.com/mjl-/mox.git
synced 2025-06-28 12:18:16 +03:00
Compare commits
891 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
833a67fe3d | ||
![]() |
f5b8c64b84 | ||
![]() |
f1259ee80e | ||
![]() |
bb438488c5 | ||
![]() |
91bfff220e | ||
![]() |
cc627af263 | ||
![]() |
76e58f4a63 | ||
![]() |
4a14abc254 | ||
![]() |
70bbfc8f10 | ||
![]() |
aff279711c | ||
![]() |
2e0eea88b0 | ||
![]() |
baacdbca18 | ||
![]() |
ee99e82cf4 | ||
![]() |
b7262d536d | ||
![]() |
794ef75d17 | ||
![]() |
4eddf5885d | ||
![]() |
53f391ad18 | ||
![]() |
14af5bbb12 | ||
![]() |
75bb1bfa2f | ||
![]() |
5f9f45983d | ||
![]() |
0ce0296a9f | ||
![]() |
805ae0d827 | ||
![]() |
1b2b152cb5 | ||
![]() |
31c22618f5 | ||
![]() |
07533252b3 | ||
![]() |
3fe765dce9 | ||
![]() |
e7b562e3f2 | ||
![]() |
2c1283f032 | ||
![]() |
af3e9351bc | ||
![]() |
fd5167fdb3 | ||
![]() |
1a6d268e1d | ||
![]() |
507ca73b96 | ||
![]() |
8bab38eac4 | ||
![]() |
5a7d5fce98 | ||
![]() |
902de0e1f9 | ||
![]() |
39c21f80cd | ||
![]() |
462568d878 | ||
![]() |
2defbce0bc | ||
![]() |
69d2699961 | ||
![]() |
00c8db98e6 | ||
![]() |
deb57462a4 | ||
![]() |
479bf29124 | ||
![]() |
5dcf674761 | ||
![]() |
aba0061073 | ||
![]() |
cc5e3165ea | ||
![]() |
3e128d744e | ||
![]() |
3a3a11560e | ||
![]() |
eeeabdc6de | ||
![]() |
3ac38aacca | ||
![]() |
6ab31c15b7 | ||
![]() |
a5d74eb718 | ||
![]() |
d6e55b5f36 | ||
![]() |
68729fa5a3 | ||
![]() |
789e4875ca | ||
![]() |
6bf80d91bc | ||
![]() |
aa631c604c | ||
![]() |
8b418a9ca2 | ||
![]() |
027e5754a0 | ||
![]() |
7a87522be0 | ||
![]() |
a2c79e25c1 | ||
![]() |
15a8ce8c0b | ||
![]() |
04b1f030b7 | ||
![]() |
88ec5c6fbe | ||
![]() |
a68a9d8a48 | ||
![]() |
b37faa06bd | ||
![]() |
b0e4dcdb61 | ||
![]() |
773d8cc959 | ||
![]() |
70aedddc90 | ||
![]() |
297e83188c | ||
![]() |
75036c3a71 | ||
![]() |
99f9eb438f | ||
![]() |
9ca50ab207 | ||
![]() |
5294a63c26 | ||
![]() |
719dc2bee1 | ||
![]() |
26793e407a | ||
![]() |
ac4b006ecd | ||
![]() |
c4255a96f8 | ||
![]() |
eadbda027c | ||
![]() |
0cf0bfb8a6 | ||
![]() |
60da7f34b8 | ||
![]() |
397fd1f5e7 | ||
![]() |
a553a107f0 | ||
![]() |
0857e81a6c | ||
![]() |
2314397078 | ||
![]() |
1c58d38280 | ||
![]() |
9a8bb1134b | ||
![]() |
d0b241499f | ||
![]() |
2fc75b5b7b | ||
![]() |
d78aa9d1d7 | ||
![]() |
51f58a52c9 | ||
![]() |
493cfee3e1 | ||
![]() |
64f2f788b1 | ||
![]() |
f6132bdbc0 | ||
![]() |
e572d01341 | ||
![]() |
7872b138a5 | ||
![]() |
aa2b24d861 | ||
![]() |
06b7c8bba0 | ||
![]() |
edfc24a701 | ||
![]() |
96667a87eb | ||
![]() |
a5c64e4361 | ||
![]() |
577944310c | ||
![]() |
684c716e4d | ||
![]() |
2da280f2bb | ||
![]() |
bc50c3bf7f | ||
![]() |
f5b67b5d3d | ||
![]() |
2beb30cc20 | ||
![]() |
7855a32852 | ||
![]() |
82371ad15b | ||
![]() |
9ce552368b | ||
![]() |
ea64936a67 | ||
![]() |
5ba51adb14 | ||
![]() |
3b731b7afe | ||
![]() |
7756150a69 | ||
![]() |
ffc7ed96bc | ||
![]() |
1037a756fa | ||
![]() |
3050baa15a | ||
![]() |
b822533df3 | ||
![]() |
caaace403a | ||
![]() |
f10bb2c1ae | ||
![]() |
44d37892b8 | ||
![]() |
d7bd50b5a5 | ||
![]() |
f235b6ad83 | ||
![]() |
9c40205343 | ||
![]() |
062c3ac182 | ||
![]() |
394bdef39d | ||
![]() |
aa85baf511 | ||
![]() |
17de90e29d | ||
![]() |
ea55c85938 | ||
![]() |
92a87acfcb | ||
![]() |
1066eb4c9f | ||
![]() |
88a68e9143 | ||
![]() |
78e0c0255f | ||
![]() |
b56d6c4061 | ||
![]() |
d27fc1e7fc | ||
![]() |
f117cc0fe1 | ||
![]() |
0ed820e3b0 | ||
![]() |
2809136451 | ||
![]() |
463e801909 | ||
![]() |
3b224ea0c2 | ||
![]() |
151729af08 | ||
![]() |
797c1cf9f0 | ||
![]() |
cad585a70e | ||
![]() |
9f3cb7340b | ||
![]() |
7c7473ef0e | ||
![]() |
f40f94670e | ||
![]() |
3f6c45a41f | ||
![]() |
95d2002e77 | ||
![]() |
a458920721 | ||
![]() |
6ed97469b7 | ||
![]() |
02c4715724 | ||
![]() |
5e4d80d48e | ||
![]() |
dcaa99a85c | ||
![]() |
7288e038e6 | ||
![]() |
cbe5bb235c | ||
![]() |
de6262b90a | ||
![]() |
f30c44eddb | ||
![]() |
9dff879164 | ||
![]() |
1c4bf8909c | ||
![]() |
4765bf3b2c | ||
![]() |
3d0dc3a79d | ||
![]() |
6f678125a5 | ||
![]() |
1d6f45e592 | ||
![]() |
6da5f8f586 | ||
![]() |
f33870ba85 | ||
![]() |
3e53abc4db | ||
![]() |
09975a3100 | ||
![]() |
46c1693ee9 | ||
![]() |
93b627ceab | ||
![]() |
c210b50433 | ||
![]() |
2f0997682b | ||
![]() |
c7354cc22b | ||
![]() |
7b3ebb2647 | ||
![]() |
e5e15a3965 | ||
![]() |
1277d78cb1 | ||
![]() |
d08e0d3882 | ||
![]() |
091faa8048 | ||
![]() |
ef77f58e08 | ||
![]() |
ad26fd265d | ||
![]() |
c8fd9ca664 | ||
![]() |
f9280b0891 | ||
![]() |
807d01ee21 | ||
![]() |
ec7904c0ee | ||
![]() |
df17ae2321 | ||
![]() |
6ed736241d | ||
![]() |
49e2eba52b | ||
![]() |
2d3d726f05 | ||
![]() |
132efdd9fb | ||
![]() |
3e2695323c | ||
![]() |
8b26e3c972 | ||
![]() |
890c75367a | ||
![]() |
76e96ee673 | ||
![]() |
3d52efbdf9 | ||
![]() |
6aa2139a54 | ||
![]() |
8fac9f862b | ||
![]() |
7df54071d7 | ||
![]() |
acc1c133b0 | ||
![]() |
3c77e076e2 | ||
![]() |
0203dfa9d9 | ||
![]() |
008de1cafb | ||
![]() |
7647264a72 | ||
![]() |
f15f2d68fc | ||
![]() |
315f10d5f2 | ||
![]() |
5fcea1eb3b | ||
![]() |
be1065a6c4 | ||
![]() |
b85401a83d | ||
![]() |
dd92ed5117 | ||
![]() |
871f70151c | ||
![]() |
d4d2a0fd99 | ||
![]() |
1e15a10b66 | ||
![]() |
f7193bd4c3 | ||
![]() |
5a14a5b067 | ||
![]() |
b8bf99e082 | ||
![]() |
eb88e2651a | ||
![]() |
e5eaf4d46f | ||
![]() |
9b429cce4f | ||
![]() |
965a2b426f | ||
![]() |
f7666d1582 | ||
![]() |
aa9a06680f | ||
![]() |
d082aaada8 | ||
![]() |
5320ec1c5b | ||
![]() |
2255ebcf11 | ||
![]() |
35af7e30a6 | ||
![]() |
cbe418ec59 | ||
![]() |
f7b58c87b1 | ||
![]() |
94fb48c2dc | ||
![]() |
17baf9a883 | ||
![]() |
69a4995449 | ||
![]() |
0871bf5219 | ||
![]() |
3f727cf380 | ||
![]() |
4d3c4115f8 | ||
![]() |
0a77bc5955 | ||
![]() |
ce75852b7c | ||
![]() |
b750668152 | ||
![]() |
056b571fb6 | ||
![]() |
e59f894a94 | ||
![]() |
42793834f8 | ||
![]() |
8804d6b60e | ||
![]() |
5f7831a7f0 | ||
![]() |
de435fceba | ||
![]() |
96a3ecd52c | ||
![]() |
afb182cb14 | ||
![]() |
09e7ddba9e | ||
![]() |
96d86ad6f1 | ||
![]() |
9e8c8ca583 | ||
![]() |
1f604c6a3d | ||
![]() |
ee48cf0dfd | ||
![]() |
bd693805fd | ||
![]() |
d7f057709f | ||
![]() |
636bb91df6 | ||
![]() |
01deecb684 | ||
![]() |
7f5e1087d4 | ||
![]() |
726c0931f7 | ||
![]() |
501f594a0a | ||
![]() |
32d4e9a14c | ||
![]() |
3d4cd00430 | ||
![]() |
0e338b0530 | ||
![]() |
c13f1814fc | ||
![]() |
355488028d | ||
![]() |
68c130f60e | ||
![]() |
22c8911bf3 | ||
![]() |
76f7b9ebf6 | ||
![]() |
8fa197b19d | ||
![]() |
598c5ea6ac | ||
![]() |
879477a01f | ||
![]() |
04305722a7 | ||
![]() |
0fbf24160c | ||
![]() |
354b9f4d98 | ||
![]() |
bd842d3ff5 | ||
![]() |
5699686870 | ||
![]() |
fdc0560ac4 | ||
![]() |
fb65ec0676 | ||
![]() |
5d97bf198a | ||
![]() |
81c179bb4c | ||
![]() |
edb6e8d15c | ||
![]() |
32b549b260 | ||
![]() |
98d0ff22bb | ||
![]() |
9a4fa8633f | ||
![]() |
8f7fc3773b | ||
![]() |
7d3f307156 | ||
![]() |
7ecc3f68ce | ||
![]() |
bbc419c6ab | ||
![]() |
c7315cb72d | ||
![]() |
b0c4b09010 | ||
![]() |
a7bdc41cd4 | ||
![]() |
0977b7a6d3 | ||
![]() |
661e77c622 | ||
![]() |
b7ba0482ba | ||
![]() |
594182aae5 | ||
![]() |
a977082b89 | ||
![]() |
dfe4a54e0b | ||
![]() |
b77f44ab58 | ||
![]() |
fe9afb40bc | ||
![]() |
a485df830d | ||
![]() |
6c488ead0b | ||
![]() |
62bd2f4427 | ||
![]() |
7e7f6d48f1 | ||
![]() |
17346d6def | ||
![]() |
c16162eebc | ||
![]() |
09b13ed4d5 | ||
![]() |
e7e023c6d0 | ||
![]() |
5678b03324 | ||
![]() |
0bb4501472 | ||
![]() |
016fde8d78 | ||
![]() |
79b641cdc6 | ||
![]() |
2c003991bb | ||
![]() |
0a4999f33e | ||
![]() |
aead738836 | ||
![]() |
c629ae26af | ||
![]() |
151bd1a9c0 | ||
![]() |
7e54280a9d | ||
![]() |
367e968199 | ||
![]() |
73373a19c1 | ||
![]() |
e350af7eed | ||
![]() |
beee03574a | ||
![]() |
fdcd2eb0eb | ||
![]() |
9bab3124f6 | ||
![]() |
ac3596a7d7 | ||
![]() |
8254e9ce66 | ||
![]() |
a4f7e71457 | ||
![]() |
f56b04805b | ||
![]() |
dde2258f69 | ||
![]() |
aef99a72d8 | ||
![]() |
614576e409 | ||
![]() |
9152384fd3 | ||
![]() |
bf8cfd9724 | ||
![]() |
3e4cce826e | ||
![]() |
3f000fd4e0 | ||
![]() |
ebb8ad06b5 | ||
![]() |
1179d9d80a | ||
![]() |
a06a4de5ec | ||
![]() |
1a0a396713 | ||
![]() |
1fc8f165f7 | ||
![]() |
83004bb18e | ||
![]() |
30ac690c8f | ||
![]() |
a2c9cfc55b | ||
![]() |
44a6927379 | ||
![]() |
4d28a02621 | ||
![]() |
76aa96ab6f | ||
![]() |
98ce133203 | ||
![]() |
09ee89d5c8 | ||
![]() |
72be3e8423 | ||
![]() |
db3e44913c | ||
![]() |
587beb75b1 | ||
![]() |
a16c08681b | ||
![]() |
195c57f06e | ||
![]() |
7ba18609cd | ||
![]() |
78a59b3476 | ||
![]() |
5f00f7662e | ||
![]() |
e34b2c3730 | ||
![]() |
b7ec84b80a | ||
![]() |
ff6cca1bf9 | ||
![]() |
b3a693ee31 | ||
![]() |
8cc795b2ec | ||
![]() |
32cf6500bd | ||
![]() |
e2924af8d2 | ||
![]() |
6e7f15e0e4 | ||
![]() |
f749eb2a05 | ||
![]() |
fece75cfe7 | ||
![]() |
d9f5625a89 | ||
![]() |
960a51242d | ||
![]() |
1cf7477642 | ||
![]() |
2bb4f78657 | ||
![]() |
bf5cfca6b9 | ||
![]() |
a3f5fd26a6 | ||
![]() |
ed0c520562 | ||
![]() |
8ad32f9ede | ||
![]() |
884f5b5b3f | ||
![]() |
6c0439cf7b | ||
![]() |
71c0bd2dd1 | ||
![]() |
0047f09e2b | ||
![]() |
0f735a1710 | ||
![]() |
3a58b2a1f4 | ||
![]() |
41a62de4d7 | ||
![]() |
9529ae0bd4 | ||
![]() |
e8bbaa451b | ||
![]() |
5229d01601 | ||
![]() |
b54e903f01 | ||
![]() |
8a1d81c29a | ||
![]() |
70adf353ee | ||
![]() |
3bbd7c7d9b | ||
![]() |
ec967ef321 | ||
![]() |
962575f21b | ||
![]() |
e702f45d32 | ||
![]() |
a69887bfab | ||
![]() |
baf4df55a6 | ||
![]() |
8bcce40c55 | ||
![]() |
8654a1f901 | ||
![]() |
0a10283de0 | ||
![]() |
c9451d4d06 | ||
![]() |
abd098e8c0 | ||
![]() |
afc47c8108 | ||
![]() |
daa88480cb | ||
![]() |
09fcc49223 | ||
![]() |
8bec5ef7d4 | ||
![]() |
d014303617 | ||
![]() |
b7ed035730 | ||
![]() |
e1dbc07dba | ||
![]() |
11eaa8cd1a | ||
![]() |
12e6975aa7 | ||
![]() |
4012b72d96 | ||
![]() |
b7d6540d51 | ||
![]() |
2a949f9f79 | ||
![]() |
e585a4d180 | ||
![]() |
4b459af4a8 | ||
![]() |
1ea851bb53 | ||
![]() |
34572d14d0 | ||
![]() |
73381d26ed | ||
![]() |
feb8e6c379 | ||
![]() |
be570d1c7d | ||
![]() |
f4b6e14cb9 | ||
![]() |
ad8c5616b1 | ||
![]() |
606b915447 | ||
![]() |
00c8dacc56 | ||
![]() |
666f84edea | ||
![]() |
d74610c345 | ||
![]() |
89a9a8bc97 | ||
![]() |
ecf6163409 | ||
![]() |
6d38a1e9a4 | ||
![]() |
96e3e5e33e | ||
![]() |
9c5d234162 | ||
![]() |
d34dd8aae6 | ||
![]() |
54b24931c9 | ||
![]() |
6516a27689 | ||
![]() |
0262f4621e | ||
![]() |
d4958732c8 | ||
![]() |
40ade995a5 | ||
![]() |
79f1054b64 | ||
![]() |
25b2ea164f | ||
![]() |
79fb72f3cd | ||
![]() |
cef83341e5 | ||
![]() |
8b2c97808d | ||
![]() |
281411c297 | ||
![]() |
fdee24f3bd | ||
![]() |
dfe587fdeb | ||
![]() |
2c9cb5b847 | ||
![]() |
b91480b5af | ||
![]() |
411cb8fc78 | ||
![]() |
bcf737cbec | ||
![]() |
4dea2de343 | ||
![]() |
6c92949f13 | ||
![]() |
4699504c9f | ||
![]() |
b115c7b10d | ||
![]() |
5f1157060e | ||
![]() |
6984a2ae07 | ||
![]() |
f3501b4e06 | ||
![]() |
c6eea5e1cf | ||
![]() |
a601814c3d | ||
![]() |
0c800f3d7e | ||
![]() |
a96493946b | ||
![]() |
71981ebf43 | ||
![]() |
a5163493e7 | ||
![]() |
7969cf002a | ||
![]() |
92e0d2a682 | ||
![]() |
63cef8e3a5 | ||
![]() |
c57aeac7f0 | ||
![]() |
8e6fe7459b | ||
![]() |
4fbd7abb57 | ||
![]() |
a00b0ba6cd | ||
![]() |
372585de72 | ||
![]() |
03e220c749 | ||
![]() |
a9f11b8fa3 | ||
![]() |
df105a028c | ||
![]() |
484ffa67d1 | ||
![]() |
85f72582c6 | ||
![]() |
b541646275 | ||
![]() |
4db1f5593c | ||
![]() |
9e7d6b85b7 | ||
![]() |
8550a5af45 | ||
![]() |
47ebfa8152 | ||
![]() |
15e450df61 | ||
![]() |
e0c36edb8f | ||
![]() |
a9cb6f9d0a | ||
![]() |
5738d9e7b8 | ||
![]() |
caa4931d35 | ||
![]() |
af968f7614 | ||
![]() |
79f91ebd87 | ||
![]() |
63c3c1fd6a | ||
![]() |
26ff0c9417 | ||
![]() |
13923e4b7b | ||
![]() |
f6497b1aaf | ||
![]() |
79da4faaa1 | ||
![]() |
1c934f0103 | ||
![]() |
dc83ad1df5 | ||
![]() |
cb5097714b | ||
![]() |
37de8de1c5 | ||
![]() |
fd359d5973 | ||
![]() |
50c13965a7 | ||
![]() |
39bfa4338a | ||
![]() |
8046b323fb | ||
![]() |
67300969c1 | ||
![]() |
93c52b01a0 | ||
![]() |
17734196e3 | ||
![]() |
49c8dbf47e | ||
![]() |
ee1db2dde7 | ||
![]() |
f3bf348214 | ||
![]() |
39f4800290 | ||
![]() |
4ea9e9e978 | ||
![]() |
61836f6d00 | ||
![]() |
5f40d23c1c | ||
![]() |
e75419aeaf | ||
![]() |
d1b87cdb0d | ||
![]() |
c698cd07d9 | ||
![]() |
ecf60568b4 | ||
![]() |
dd540e401a | ||
![]() |
5f297ce54c | ||
![]() |
1d9e80fd70 | ||
![]() |
a524c3a50b | ||
![]() |
62be829df0 | ||
![]() |
14aa85482e | ||
![]() |
d9dde0d89e | ||
![]() |
9cf8ee2162 | ||
![]() |
ed8938c113 | ||
![]() |
20812dcf62 | ||
![]() |
46aacdb79b | ||
![]() |
aea8740e65 | ||
![]() |
7b6cfcd572 | ||
![]() |
0bc3072944 | ||
![]() |
dda0a4ced1 | ||
![]() |
2392f79aa9 | ||
![]() |
c348834ce9 | ||
![]() |
9796c4539d | ||
![]() |
ac8256feb6 | ||
![]() |
62db2af846 | ||
![]() |
0f8bf2f220 | ||
![]() |
c930a400be | ||
![]() |
446726c940 | ||
![]() |
1f9b640d9a | ||
![]() |
4b8b53e776 | ||
![]() |
3f5823de31 | ||
![]() |
fce3a5bf73 | ||
![]() |
59bffa4701 | ||
![]() |
b887539ee4 | ||
![]() |
3bfff59940 | ||
![]() |
618e5c2aa3 | ||
![]() |
d84c96eca5 | ||
![]() |
a9940f9855 | ||
![]() |
da3ed38a5c | ||
![]() |
e7478ed6ac | ||
![]() |
4701857d7f | ||
![]() |
dbd6773f6b | ||
![]() |
ee1094e1cb | ||
![]() |
db3fef4981 | ||
![]() |
ca97293cb2 | ||
![]() |
802dcef192 | ||
![]() |
57fc37af22 | ||
![]() |
d73bda7511 | ||
![]() |
e048d0962b | ||
![]() |
dfddf0e874 | ||
![]() |
1be0cf485e | ||
![]() |
1abadc5499 | ||
![]() |
41e3d1af10 | ||
![]() |
fbc18d522d | ||
![]() |
2710a5b971 | ||
![]() |
406fdc312d | ||
![]() |
22f46aa174 | ||
![]() |
6d081f38fc | ||
![]() |
920b858da7 | ||
![]() |
d1b66035a9 | ||
![]() |
810cbdc61d | ||
![]() |
19d1a8059b | ||
![]() |
f3a35a6766 | ||
![]() |
72ac1fde29 | ||
![]() |
fcaa504878 | ||
![]() |
5b20cba50a | ||
![]() |
56b2a9d980 | ||
![]() |
af5da17623 | ||
![]() |
02eb7b5033 | ||
![]() |
7c1879da82 | ||
![]() |
fb81effe45 | ||
![]() |
2ff87a0f9c | ||
![]() |
8e37fadc13 | ||
![]() |
416113af72 | ||
![]() |
9d2e761494 | ||
![]() |
2ae121e400 | ||
![]() |
91b7d3dda8 | ||
![]() |
c66fa64b8b | ||
![]() |
361bc2b516 | ||
![]() |
3d80c05423 | ||
![]() |
73a2a09711 | ||
![]() |
e5f77a0411 | ||
![]() |
bdd8fa078e | ||
![]() |
5b62013f27 | ||
![]() |
51e314f65a | ||
![]() |
651fa68067 | ||
![]() |
bcb80c3598 | ||
![]() |
e24e1bee19 | ||
![]() |
6ce69d5425 | ||
![]() |
58d84f3882 | ||
![]() |
ae37b3ed4d | ||
![]() |
2265769b8e | ||
![]() |
6e6f716e91 | ||
![]() |
ff4237e88a | ||
![]() |
a87ac99038 | ||
![]() |
6a39f2cc54 | ||
![]() |
f90b802d4b | ||
![]() |
a0bae5be55 | ||
![]() |
448879126d | ||
![]() |
1d02760f66 | ||
![]() |
8f55d0ada6 | ||
![]() |
50c9873c2b | ||
![]() |
dcee0345ef | ||
![]() |
2073db194b | ||
![]() |
61bae75228 | ||
![]() |
b2af63b3ec | ||
![]() |
8c99e54ec1 | ||
![]() |
42f6f9cbb3 | ||
![]() |
96faf4b5ec | ||
![]() |
deb16d23b8 | ||
![]() |
893a6f8911 | ||
![]() |
df18ca3c02 | ||
![]() |
2535f351ed | ||
![]() |
d02ac0cb86 | ||
![]() |
2abac1a911 | ||
![]() |
c955fadb6d | ||
![]() |
3a7ed9738a | ||
![]() |
4510e0ce78 | ||
![]() |
0200e539a9 | ||
![]() |
481a25f294 | ||
![]() |
79e522887e | ||
![]() |
38694d3928 | ||
![]() |
81057ee685 | ||
![]() |
9896639ff9 | ||
![]() |
186538fb57 | ||
![]() |
f7686b7db8 | ||
![]() |
725f030d3c | ||
![]() |
ef50f4abf0 | ||
![]() |
b6897d1837 | ||
![]() |
e7699708ef | ||
![]() |
3b6e1851cb | ||
![]() |
d1e93020d8 | ||
![]() |
682f8a0904 | ||
![]() |
34f7e04474 | ||
![]() |
8a866a60dc | ||
![]() |
7b047ed28d | ||
![]() |
a6d55b7e76 | ||
![]() |
f9eb18b6a8 | ||
![]() |
5b4de0523d | ||
![]() |
2f5d6069bf | ||
![]() |
0e5e16b3d0 | ||
![]() |
08995c7806 | ||
![]() |
4ab3e6bc9b | ||
![]() |
101c2703d2 | ||
![]() |
8ca198882e | ||
![]() |
42d817ef3d | ||
![]() |
56956c224b | ||
![]() |
a40f5a5eb3 | ||
![]() |
3e53343d21 | ||
![]() |
6e391c3be0 | ||
![]() |
28fae96a9b | ||
![]() |
96774de8d6 | ||
![]() |
8640fd8cff | ||
![]() |
7d28d80191 | ||
![]() |
14d09bb308 | ||
![]() |
40040542f6 | ||
![]() |
4e26fd13e2 | ||
![]() |
67fe88f431 | ||
![]() |
850f4444d4 | ||
![]() |
52e71167a9 | ||
![]() |
a93dd348fe | ||
![]() |
8dacc31445 | ||
![]() |
7dce883097 | ||
![]() |
c095f3f39c | ||
![]() |
daa908e9f4 | ||
![]() |
c4324fdaa1 | ||
![]() |
3aa5026e11 | ||
![]() |
91140da3a7 | ||
![]() |
f2de89e365 | ||
![]() |
024c13c551 | ||
![]() |
55febe304e | ||
![]() |
f19f16bd8b | ||
![]() |
d19c75559b | ||
![]() |
f1f3135135 | ||
![]() |
8c2814df89 | ||
![]() |
0707f53361 | ||
![]() |
85cef2a06c | ||
![]() |
2b97c21f99 | ||
![]() |
a0f3856e40 | ||
![]() |
2ec8c79e10 | ||
![]() |
3353062dbe | ||
![]() |
be5f804d5b | ||
![]() |
89c543f662 | ||
![]() |
6315d57166 | ||
![]() |
4de0af4fa5 | ||
![]() |
d618cbf918 | ||
![]() |
e6d8049548 | ||
![]() |
2e16d8025d | ||
![]() |
79774c15ec | ||
![]() |
f87f286b80 | ||
![]() |
20f11409b6 | ||
![]() |
fc6e61e9a5 | ||
![]() |
9bc860e207 | ||
![]() |
941a2311f0 | ||
![]() |
d07c871f5c | ||
![]() |
d649cf7dc2 | ||
![]() |
cde54442d2 | ||
![]() |
9534e464f9 | ||
![]() |
0d8603f9e1 | ||
![]() |
ca5ef645f3 | ||
![]() |
3620d6f05e | ||
![]() |
af71e9855b | ||
![]() |
bff0131164 | ||
![]() |
3fb41ff073 | ||
![]() |
b754b5f9ac | ||
![]() |
6f1e38f2ce | ||
![]() |
4a4ccb83a3 | ||
![]() |
fc7b0cc71e | ||
![]() |
f6d03a0eab | ||
![]() |
a5006a9090 | ||
![]() |
cb1b133e28 | ||
![]() |
a6ae87d7ac | ||
![]() |
4283ceecfc | ||
![]() |
165639cb38 | ||
![]() |
f4c20673ff | ||
![]() |
61a5eb61a4 | ||
![]() |
f029db3f47 | ||
![]() |
da9f1d9d0d | ||
![]() |
b3dd4a55c3 | ||
![]() |
affb057a0c | ||
![]() |
aebfd78a9f | ||
![]() |
9e248860ee | ||
![]() |
4c72184b44 | ||
![]() |
b43529a2e9 | ||
![]() |
0b9475271c | ||
![]() |
80547df6ee | ||
![]() |
1ccc5d0177 | ||
![]() |
ddf3cb3653 | ||
![]() |
9f46879377 | ||
![]() |
aed23d900a | ||
![]() |
02a03710dc | ||
![]() |
fdbbfb765b | ||
![]() |
983002b074 | ||
![]() |
34c2dcd49d | ||
![]() |
f5f953b3ab | ||
![]() |
f96310fdd5 | ||
![]() |
48eb530b1f | ||
![]() |
79d06184ab | ||
![]() |
55d05c6bea | ||
![]() |
d7df70acd8 | ||
![]() |
383eb483df | ||
![]() |
a4c6fe815f | ||
![]() |
7cceb3d834 | ||
![]() |
6b68920a3a | ||
![]() |
a30d8c1378 | ||
![]() |
ce91b7d23e | ||
![]() |
0434e49c3a | ||
![]() |
c24bb063e5 | ||
![]() |
f48a53726e | ||
![]() |
038b478d16 | ||
![]() |
01bcd98a42 | ||
![]() |
9c31789c56 | ||
![]() |
383fe4f53a | ||
![]() |
0fc59af9a8 | ||
![]() |
20ebdae8ea | ||
![]() |
34ede1075d | ||
![]() |
19b819d222 | ||
![]() |
f5af258075 | ||
![]() |
8c3c12d96a | ||
![]() |
49cf16d3f2 | ||
![]() |
849b4ec9e9 | ||
![]() |
141637df43 | ||
![]() |
19550cc041 | ||
![]() |
3ef1f31359 | ||
![]() |
01adad62b2 | ||
![]() |
a31dfc573e | ||
![]() |
6273afe84f | ||
![]() |
5be4e91979 | ||
![]() |
a92784b824 | ||
![]() |
e3d0a3a001 | ||
![]() |
700118dbd2 | ||
![]() |
7f1b7198a8 | ||
![]() |
cc4ecf2927 | ||
![]() |
bc62aae0e6 | ||
![]() |
bca33c0364 | ||
![]() |
b7a0904907 | ||
![]() |
8bc554b671 | ||
![]() |
c0100f44e7 | ||
![]() |
840f3afb35 | ||
![]() |
2e5376d7eb | ||
![]() |
f9e261e0fb | ||
![]() |
dcb0f0a82c | ||
![]() |
c5747bd656 | ||
![]() |
671fc5b8f1 | ||
![]() |
e943e0c65d | ||
![]() |
3e9b4107fd | ||
![]() |
4a4d337ab4 | ||
![]() |
70806137da | ||
![]() |
9c25c88542 | ||
![]() |
5b17fcd712 | ||
![]() |
17dac99830 | ||
![]() |
91ffa4e99b | ||
![]() |
785a38c8b0 | ||
![]() |
c2448e5adc | ||
![]() |
88d063b598 | ||
![]() |
6e5ed2e30f | ||
![]() |
96326846cd | ||
![]() |
d854bc116f | ||
![]() |
03c3f56a59 | ||
![]() |
1469b7293e | ||
![]() |
7facf9d446 | ||
![]() |
5817e87a32 | ||
![]() |
faa08583c0 | ||
![]() |
3173da5497 | ||
![]() |
1e049a087d | ||
![]() |
590ed0b81d | ||
![]() |
142b2498bf | ||
![]() |
4819180de1 | ||
![]() |
e58fe31dd1 | ||
![]() |
5baeea4746 | ||
![]() |
40163bd145 | ||
![]() |
afefadf2c0 | ||
![]() |
459317097b | ||
![]() |
8096441f67 | ||
![]() |
2eecf38842 | ||
![]() |
f73125cbcd | ||
![]() |
e81ed7af26 | ||
![]() |
b190a2cda8 | ||
![]() |
d4d3f8ce92 | ||
![]() |
c561d7452b | ||
![]() |
d2f7d59fce | ||
![]() |
64ac9872a4 | ||
![]() |
0187fa0394 | ||
![]() |
41167d6393 | ||
![]() |
05fd5c6947 | ||
![]() |
e53b773d04 | ||
![]() |
c9a846d019 | ||
![]() |
5a4f35ad5f | ||
![]() |
77d78191f8 | ||
![]() |
cafbfc5fdf | ||
![]() |
d25131f2f2 | ||
![]() |
713d781bad | ||
![]() |
70d07c5459 | ||
![]() |
dd0cede4f9 | ||
![]() |
5b8efcc1d9 | ||
![]() |
0971700f6c | ||
![]() |
259928ab62 | ||
![]() |
aca64828bd | ||
![]() |
aad5a5bcb9 | ||
![]() |
753ec56b3d | ||
![]() |
d18983d9a6 | ||
![]() |
dcc051e149 | ||
![]() |
1f5ab1b795 | ||
![]() |
b0623e6038 | ||
![]() |
88fd775ec4 | ||
![]() |
e81930ba20 | ||
![]() |
f6ed860ccb | ||
![]() |
70ab9a7d4c | ||
![]() |
c1753b369d | ||
![]() |
74dab5fc39 | ||
![]() |
1f4df30019 | ||
![]() |
517fb31212 | ||
![]() |
b3f3c0a056 | ||
![]() |
0b364862ed | ||
![]() |
dae73eb32d | ||
![]() |
6eff832d09 | ||
![]() |
08eb1a5472 | ||
![]() |
936a0d5afe | ||
![]() |
0989e59567 | ||
![]() |
df6956bed8 | ||
![]() |
b571dd4b28 | ||
![]() |
51ad345dbb | ||
![]() |
9b57c69c1c | ||
![]() |
9bd497b836 | ||
![]() |
00ea31f2f6 | ||
![]() |
f531a9bf35 | ||
![]() |
c5fdb7309f | ||
![]() |
d36419170b | ||
![]() |
98b5a27fd2 | ||
![]() |
a9b2bc8cec | ||
![]() |
a6e603e1df | ||
![]() |
9d03e2b135 | ||
![]() |
ad49941302 | ||
![]() |
7681f8bdc8 | ||
![]() |
19ea0d9a58 | ||
![]() |
5535515fcb | ||
![]() |
317dc78397 | ||
![]() |
132f08913b | ||
![]() |
10daf3cb81 | ||
![]() |
0099197d00 | ||
![]() |
bddc8e4062 | ||
![]() |
f60ad1452f | ||
![]() |
47b88550be | ||
![]() |
f9eae88aba | ||
![]() |
e413c906b1 | ||
![]() |
2c07645ab4 | ||
![]() |
5742ed1537 | ||
![]() |
eb26e9b921 | ||
![]() |
b0641a1901 | ||
![]() |
e6df84a8de | ||
![]() |
8b0706e02d | ||
![]() |
a9ef0f2aea | ||
![]() |
39a097a5d4 |
@ -6,3 +6,4 @@
|
||||
/cover.*
|
||||
/.go/
|
||||
/tmp/
|
||||
/.git/
|
||||
|
39
.github/workflows/build-test.yml
vendored
Normal file
39
.github/workflows/build-test.yml
vendored
Normal file
@ -0,0 +1,39 @@
|
||||
name: Build and test
|
||||
|
||||
on: [push, pull_request, workflow_dispatch]
|
||||
|
||||
jobs:
|
||||
build-test:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
max-parallel: 1 # cannot run tests concurrently, files are created
|
||||
matrix:
|
||||
go-version: ['stable', 'oldstable']
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
# Trigger rebuilding frontends, should be the same as committed.
|
||||
- uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: 16
|
||||
cache: 'npm'
|
||||
- run: 'touch */*.ts'
|
||||
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
- run: make build
|
||||
|
||||
# Need to run tests with a temp dir on same file system for os.Rename to succeed.
|
||||
- run: 'mkdir -p tmp && TMPDIR=$PWD/tmp make test'
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: coverage-${{ matrix.go-version }}
|
||||
path: cover.html
|
||||
|
||||
# Format code, we check below if nothing changed.
|
||||
- run: 'make fmt'
|
||||
|
||||
# Enforce the steps above didn't make any changes.
|
||||
- run: git diff --exit-code
|
19
.gitignore
vendored
19
.gitignore
vendored
@ -1,25 +1,28 @@
|
||||
/mox
|
||||
/mox.exe
|
||||
/rfc/[0-9][0-9]*
|
||||
/rfc/xr/
|
||||
/local/
|
||||
/testdata/check/
|
||||
/testdata/*/data/
|
||||
/testdata/ctl/config/dkim/
|
||||
/testdata/empty/
|
||||
/testdata/exportmaildir/
|
||||
/testdata/exportmbox/
|
||||
/testdata/httpaccount/data/
|
||||
/testdata/imap/data/
|
||||
/testdata/imaptest/data/
|
||||
/testdata/integration/data/
|
||||
/testdata/junk/*.bloom
|
||||
/testdata/junk/*.db
|
||||
/testdata/queue/data/
|
||||
/testdata/sent/
|
||||
/testdata/smtp/data/
|
||||
/testdata/smtp/datajunk/
|
||||
/testdata/store/data/
|
||||
/testdata/smtp/postmaster/
|
||||
/testdata/train/
|
||||
/testdata/upgradetest.mbox.gz
|
||||
/testdata/integration/example-integration.zone
|
||||
/testdata/integration/tmp-pebble-ca.pem
|
||||
/cover.out
|
||||
/cover.html
|
||||
/.go/
|
||||
/node_modules/
|
||||
/package.json
|
||||
/package-lock.json
|
||||
/upgrade*-verifydata.*.pprof
|
||||
/upgrade*-openaccounts.*.pprof
|
||||
/website/html/
|
||||
|
@ -8,6 +8,8 @@ FROM alpine:latest
|
||||
WORKDIR /mox
|
||||
COPY --from=build /build/mox /bin/mox
|
||||
|
||||
RUN apk add --no-cache tzdata
|
||||
|
||||
# SMTP for incoming message delivery.
|
||||
EXPOSE 25/tcp
|
||||
# SMTP/submission with TLS.
|
||||
|
179
Makefile
179
Makefile
@ -1,79 +1,182 @@
|
||||
default: build
|
||||
|
||||
build:
|
||||
build: build0 frontend build1
|
||||
|
||||
build0:
|
||||
# build early to catch syntax errors
|
||||
CGO_ENABLED=0 go build
|
||||
CGO_ENABLED=0 go vet -tags integration ./...
|
||||
CGO_ENABLED=0 go vet ./...
|
||||
./gendoc.sh
|
||||
(cd http && CGO_ENABLED=0 go run ../vendor/github.com/mjl-/sherpadoc/cmd/sherpadoc/*.go -adjust-function-names none Admin) >http/adminapi.json
|
||||
(cd http && CGO_ENABLED=0 go run ../vendor/github.com/mjl-/sherpadoc/cmd/sherpadoc/*.go -adjust-function-names none Account) >http/accountapi.json
|
||||
# build again, files above are embedded
|
||||
./genapidoc.sh
|
||||
./gents.sh webadmin/api.json webadmin/api.ts
|
||||
./gents.sh webaccount/api.json webaccount/api.ts
|
||||
./gents.sh webmail/api.json webmail/api.ts
|
||||
|
||||
build1:
|
||||
# build again, api json files above are embedded and new frontend code generated
|
||||
CGO_ENABLED=0 go build
|
||||
|
||||
install: build0 frontend
|
||||
CGO_ENABLED=0 go install
|
||||
|
||||
race: build0
|
||||
go build -race
|
||||
|
||||
test:
|
||||
CGO_ENABLED=0 go test -shuffle=on -coverprofile cover.out ./...
|
||||
CGO_ENABLED=0 go test -fullpath -shuffle=on -coverprofile cover.out ./...
|
||||
go tool cover -html=cover.out -o cover.html
|
||||
|
||||
test-race:
|
||||
CGO_ENABLED=1 go test -race -shuffle=on -covermode atomic -coverprofile cover.out ./...
|
||||
CGO_ENABLED=1 go test -fullpath -race -shuffle=on -covermode atomic -coverprofile cover.out ./...
|
||||
go tool cover -html=cover.out -o cover.html
|
||||
|
||||
test-more:
|
||||
TZ= CGO_ENABLED=0 go test -fullpath -shuffle=on -count 2 ./...
|
||||
|
||||
# note: if testdata/upgradetest.mbox.gz exists, its messages will be imported
|
||||
# during tests. helpful for performance/resource consumption tests.
|
||||
test-upgrade: build
|
||||
nice ./test-upgrade.sh
|
||||
|
||||
# needed for "check" target
|
||||
install-staticcheck:
|
||||
CGO_ENABLED=0 go install honnef.co/go/tools/cmd/staticcheck@latest
|
||||
|
||||
install-ineffassign:
|
||||
CGO_ENABLED=0 go install github.com/gordonklaus/ineffassign@v0.1.0
|
||||
|
||||
check:
|
||||
staticcheck ./...
|
||||
staticcheck -tags integration
|
||||
CGO_ENABLED=0 go vet -tags integration
|
||||
CGO_ENABLED=0 go vet -tags website website/website.go
|
||||
CGO_ENABLED=0 go vet -tags link rfc/link.go
|
||||
CGO_ENABLED=0 go vet -tags errata rfc/errata.go
|
||||
CGO_ENABLED=0 go vet -tags xr rfc/xr.go
|
||||
GOARCH=386 CGO_ENABLED=0 go vet ./...
|
||||
CGO_ENABLED=0 ineffassign ./...
|
||||
CGO_ENABLED=0 staticcheck ./...
|
||||
CGO_ENABLED=0 staticcheck -tags integration
|
||||
CGO_ENABLED=0 staticcheck -tags website website/website.go
|
||||
CGO_ENABLED=0 staticcheck -tags link rfc/link.go
|
||||
CGO_ENABLED=0 staticcheck -tags errata rfc/errata.go
|
||||
CGO_ENABLED=0 staticcheck -tags xr rfc/xr.go
|
||||
|
||||
# needed for check-shadow
|
||||
install-shadow:
|
||||
CGO_ENABLED=0 go install golang.org/x/tools/go/analysis/passes/shadow/cmd/shadow@latest
|
||||
|
||||
# having "err" shadowed is common, best to not have others
|
||||
check-shadow:
|
||||
go vet -vettool=$$(which shadow) ./... 2>&1 | grep -v '"err"'
|
||||
CGO_ENABLED=0 go vet -vettool=$$(which shadow) ./... 2>&1 | grep -v '"err"'
|
||||
CGO_ENABLED=0 go vet -tags integration -vettool=$$(which shadow) 2>&1 | grep -v '"err"'
|
||||
CGO_ENABLED=0 go vet -tags website -vettool=$$(which shadow) website/website.go 2>&1 | grep -v '"err"'
|
||||
CGO_ENABLED=0 go vet -tags link -vettool=$$(which shadow) rfc/link.go 2>&1 | grep -v '"err"'
|
||||
CGO_ENABLED=0 go vet -tags errata -vettool=$$(which shadow) rfc/errata.go 2>&1 | grep -v '"err"'
|
||||
CGO_ENABLED=0 go vet -tags xr -vettool=$$(which shadow) rfc/xr.go 2>&1 | grep -v '"err"'
|
||||
|
||||
fuzz:
|
||||
go test -fuzz FuzzParseSignature -fuzztime 5m ./dkim
|
||||
go test -fuzz FuzzParseRecord -fuzztime 5m ./dkim
|
||||
go test -fuzz . -fuzztime 5m ./dmarc
|
||||
go test -fuzz . -fuzztime 5m ./dmarcrpt
|
||||
go test -fuzz . -parallel 1 -fuzztime 5m ./imapserver
|
||||
go test -fuzz . -parallel 1 -fuzztime 5m ./junk
|
||||
go test -fuzz FuzzParseRecord -fuzztime 5m ./mtasts
|
||||
go test -fuzz FuzzParsePolicy -fuzztime 5m ./mtasts
|
||||
go test -fuzz . -parallel 1 -fuzztime 5m ./smtpserver
|
||||
go test -fuzz . -fuzztime 5m ./spf
|
||||
go test -fuzz FuzzParseRecord -fuzztime 5m ./tlsrpt
|
||||
go test -fuzz FuzzParseMessage -fuzztime 5m ./tlsrpt
|
||||
go test -fullpath -fuzz FuzzParseSignature -fuzztime 5m ./dkim
|
||||
go test -fullpath -fuzz FuzzParseRecord -fuzztime 5m ./dkim
|
||||
go test -fullpath -fuzz . -fuzztime 5m ./dmarc
|
||||
go test -fullpath -fuzz . -fuzztime 5m ./dmarcrpt
|
||||
go test -fullpath -fuzz . -parallel 1 -fuzztime 5m ./imapserver
|
||||
go test -fullpath -fuzz . -fuzztime 5m ./imapclient
|
||||
go test -fullpath -fuzz . -parallel 1 -fuzztime 5m ./junk
|
||||
go test -fullpath -fuzz FuzzParseRecord -fuzztime 5m ./mtasts
|
||||
go test -fullpath -fuzz FuzzParsePolicy -fuzztime 5m ./mtasts
|
||||
go test -fullpath -fuzz . -fuzztime 5m ./smtp
|
||||
go test -fullpath -fuzz . -parallel 1 -fuzztime 5m ./smtpserver
|
||||
go test -fullpath -fuzz . -fuzztime 5m ./spf
|
||||
go test -fullpath -fuzz FuzzParseRecord -fuzztime 5m ./tlsrpt
|
||||
go test -fullpath -fuzz FuzzParseMessage -fuzztime 5m ./tlsrpt
|
||||
|
||||
integration-build:
|
||||
docker-compose -f docker-compose-integration.yml build --no-cache moxmail
|
||||
govendor:
|
||||
go mod tidy
|
||||
go mod vendor
|
||||
./genlicenses.sh
|
||||
|
||||
integration-start:
|
||||
-rm -r testdata/integration/data
|
||||
-docker-compose -f docker-compose-integration.yml run moxmail /bin/bash
|
||||
docker-compose -f docker-compose-integration.yml down
|
||||
test-integration:
|
||||
-docker compose -f docker-compose-integration.yml kill
|
||||
-docker compose -f docker-compose-integration.yml down
|
||||
docker image build --pull --no-cache -f Dockerfile -t mox_integration_moxmail .
|
||||
docker image build --pull --no-cache -f testdata/integration/Dockerfile.test -t mox_integration_test testdata/integration
|
||||
-rm -rf testdata/integration/moxacmepebble/data
|
||||
-rm -rf testdata/integration/moxmail2/data
|
||||
-rm -f testdata/integration/tmp-pebble-ca.pem
|
||||
MOX_UID=$$(id -u) docker compose -f docker-compose-integration.yml run test
|
||||
docker compose -f docker-compose-integration.yml kill
|
||||
|
||||
# run from within "make integration-start"
|
||||
integration-test:
|
||||
CGO_ENABLED=0 go test -tags integration
|
||||
|
||||
imaptest-build:
|
||||
-docker-compose -f docker-compose-imaptest.yml build --no-cache mox
|
||||
-docker compose -f docker-compose-imaptest.yml build --no-cache --pull mox
|
||||
|
||||
imaptest-run:
|
||||
-rm -r testdata/imaptest/data
|
||||
mkdir testdata/imaptest/data
|
||||
docker-compose -f docker-compose-imaptest.yml run --entrypoint /usr/local/bin/imaptest imaptest host=mox port=1143 user=mjl@mox.example pass=testtest mbox=imaptest.mbox
|
||||
docker-compose -f docker-compose-imaptest.yml down
|
||||
docker compose -f docker-compose-imaptest.yml run --entrypoint /usr/local/bin/imaptest imaptest host=mox port=1143 user=mjl@mox.example pass=testtest mbox=imaptest.mbox
|
||||
docker compose -f docker-compose-imaptest.yml down
|
||||
|
||||
|
||||
fmt:
|
||||
go fmt ./...
|
||||
gofmt -w -s *.go */*.go
|
||||
|
||||
jswatch:
|
||||
inotifywait -m -e close_write http/admin.html http/account.html | xargs -n2 sh -c 'echo changed; ./checkhtmljs http/admin.html http/account.html'
|
||||
tswatch:
|
||||
bash -c 'while true; do inotifywait -q -e close_write *.ts webadmin/*.ts webaccount/*.ts webmail/*.ts; make frontend; done'
|
||||
|
||||
jsinstall:
|
||||
node_modules/.bin/tsc:
|
||||
-mkdir -p node_modules/.bin
|
||||
npm install jshint@2.13.2
|
||||
npm ci --ignore-scripts
|
||||
|
||||
install-js: node_modules/.bin/tsc
|
||||
|
||||
install-js0:
|
||||
-mkdir -p node_modules/.bin
|
||||
npm install --ignore-scripts --save-dev --save-exact typescript@5.1.6
|
||||
|
||||
webmail/webmail.js: lib.ts webmail/api.ts webmail/lib.ts webmail/webmail.ts
|
||||
./tsc.sh $@ lib.ts webmail/api.ts webmail/lib.ts webmail/webmail.ts
|
||||
|
||||
webmail/msg.js: lib.ts webmail/api.ts webmail/lib.ts webmail/msg.ts
|
||||
./tsc.sh $@ lib.ts webmail/api.ts webmail/lib.ts webmail/msg.ts
|
||||
|
||||
webmail/text.js: lib.ts webmail/api.ts webmail/lib.ts webmail/text.ts
|
||||
./tsc.sh $@ lib.ts webmail/api.ts webmail/lib.ts webmail/text.ts
|
||||
|
||||
webadmin/admin.js: lib.ts webadmin/api.ts webadmin/admin.ts
|
||||
./tsc.sh $@ lib.ts webadmin/api.ts webadmin/admin.ts
|
||||
|
||||
webaccount/account.js: lib.ts webaccount/api.ts webaccount/account.ts
|
||||
./tsc.sh $@ lib.ts webaccount/api.ts webaccount/account.ts
|
||||
|
||||
frontend: node_modules/.bin/tsc webadmin/admin.js webaccount/account.js webmail/webmail.js webmail/msg.js webmail/text.js
|
||||
|
||||
install-apidiff:
|
||||
CGO_ENABLED=0 go install golang.org/x/exp/cmd/apidiff@v0.0.0-20231206192017-f3f8817b8deb
|
||||
|
||||
genapidiff:
|
||||
./apidiff.sh
|
||||
|
||||
docker:
|
||||
docker build -t mox:dev .
|
||||
|
||||
docker-release:
|
||||
./docker-release.sh
|
||||
|
||||
genwebsite:
|
||||
./genwebsite.sh
|
||||
|
||||
buildall:
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=arm go build
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=386 go build
|
||||
CGO_ENABLED=0 GOOS=openbsd GOARCH=amd64 go build
|
||||
CGO_ENABLED=0 GOOS=freebsd GOARCH=amd64 go build
|
||||
CGO_ENABLED=0 GOOS=netbsd GOARCH=amd64 go build
|
||||
CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 go build
|
||||
CGO_ENABLED=0 GOOS=dragonfly GOARCH=amd64 go build
|
||||
CGO_ENABLED=0 GOOS=illumos GOARCH=amd64 go build
|
||||
CGO_ENABLED=0 GOOS=solaris GOARCH=amd64 go build
|
||||
CGO_ENABLED=0 GOOS=aix GOARCH=ppc64 go build
|
||||
CGO_ENABLED=0 GOOS=windows GOARCH=amd64 go build
|
||||
# no plan9 for now
|
||||
|
549
README.md
549
README.md
@ -1,101 +1,48 @@
|
||||
Mox is a modern full-featured open source secure mail server for low-maintenance self-hosted email.
|
||||
|
||||
For more details, see the mox website, https://www.xmox.nl.
|
||||
|
||||
See Quickstart below to get started.
|
||||
|
||||
## Features
|
||||
|
||||
- Quick and easy to start/maintain mail server, for your own domain(s).
|
||||
- SMTP (with extensions) for receiving and submitting email.
|
||||
- SMTP (with extensions) for receiving, submitting and delivering email.
|
||||
- IMAP4 (with extensions) for giving email clients access to email.
|
||||
- Automatic TLS with ACME, for use with Let's Encrypt and other CA's.
|
||||
- SPF, verifying that a remote host is allowed to sent email for a domain.
|
||||
- DKIM, verifying that a message is signed by the claimed sender domain,
|
||||
and for signing emails sent by mox for others to verify.
|
||||
- DMARC, for enforcing SPF/DKIM policies set by domains. Incoming DMARC
|
||||
aggregate reports are analyzed.
|
||||
- Reputation tracking, learning (per user) host- and domain-based reputation from
|
||||
(Non-)Junk email.
|
||||
- Webmail for reading/sending email from the browser.
|
||||
- SPF/DKIM/DMARC for authenticating messages/delivery, also DMARC aggregate
|
||||
reports.
|
||||
- Reputation tracking, learning (per user) host-, domain- and
|
||||
sender address-based reputation from (Non-)Junk email classification.
|
||||
- Bayesian spam filtering that learns (per user) from (Non-)Junk email.
|
||||
- Slowing down senders with no/low reputation or questionable email content
|
||||
(similar to greylisting). Rejected emails are stored in a mailbox called Rejects
|
||||
for a short period, helping with misclassified legitimate synchronous
|
||||
signup/login/transactional emails.
|
||||
- Internationalized email, with unicode names in domains and usernames
|
||||
("localparts").
|
||||
- TLSRPT, parsing reports about TLS usage and issues.
|
||||
- MTA-STS, for ensuring TLS is used whenever it is required. Both serving of
|
||||
policies, and tracking and applying policies of remote servers.
|
||||
- Web admin interface that helps you set up your domains and accounts
|
||||
(instructions to create DNS records, configure
|
||||
SPF/DKIM/DMARC/TLSRPT/MTA-STS), for status information, managing
|
||||
accounts/domains, and modifying the configuration file.
|
||||
- Autodiscovery (with SRV records, Microsoft-style and Thunderbird-style) for
|
||||
easy account setup (though not many clients support it).
|
||||
- Internationalized email (EIA), with unicode in email address usernames
|
||||
("localparts"), and in domain names (IDNA).
|
||||
- Automatic TLS with ACME, for use with Let's Encrypt and other CA's.
|
||||
- DANE and MTA-STS for inbound and outbound delivery over SMTP with STARTTLS,
|
||||
including REQUIRETLS and with incoming/outgoing TLSRPT reporting.
|
||||
- Web admin interface that helps you set up your domains, accounts and list
|
||||
aliases (instructions to create DNS records, configure
|
||||
SPF/DKIM/DMARC/TLSRPT/MTA-STS), for status information, and modifying the
|
||||
configuration file.
|
||||
- Account autodiscovery (with SRV records, Microsoft-style, Thunderbird-style,
|
||||
and Apple device management profiles) for easy account setup (though client
|
||||
support is limited).
|
||||
- Webserver with serving static files and forwarding requests (reverse
|
||||
proxy), so port 443 can also be used to serve websites.
|
||||
- Simple HTTP/JSON API for sending transaction email and receiving delivery
|
||||
events and incoming messages (webapi and webhooks).
|
||||
- Prometheus metrics and structured logging for operational insight.
|
||||
- "mox localserve" subcommand for running mox locally for email-related
|
||||
testing/developing, including pedantic mode.
|
||||
- Most non-server Go packages mox consists of are written to be reusable.
|
||||
|
||||
Mox is available under the MIT-license and was created by Mechiel Lukkien,
|
||||
mechiel@ueber.net. Mox includes the Public Suffix List by Mozilla, under Mozilla
|
||||
Public License, v2.0.
|
||||
|
||||
|
||||
# Download
|
||||
|
||||
You can easily (cross) compile mox if you have a recent Go toolchain installed
|
||||
(see "go version", it must be >= 1.19; otherwise, see https://go.dev/dl/ or
|
||||
https://go.dev/doc/manage-install and $HOME/go/bin):
|
||||
|
||||
GOBIN=$PWD CGO_ENABLED=0 go install github.com/mjl-/mox@latest
|
||||
|
||||
Or you can download a binary built with the latest Go toolchain from
|
||||
https://beta.gobuilds.org/github.com/mjl-/mox, and symlink or rename it to
|
||||
"mox".
|
||||
|
||||
Verify you have a working mox binary:
|
||||
|
||||
./mox version
|
||||
|
||||
Note: Mox only compiles for/works on unix systems, not on Plan 9 or Windows.
|
||||
|
||||
You can also run mox with docker image "docker.io/moxmail/mox", with tags like
|
||||
"latest", "0.0.1" and "0.0.1-go1.20.1-alpine3.17.2", see
|
||||
https://hub.docker.com/r/moxmail/mox. See docker-compose.yml in this
|
||||
repository for instructions on starting. You must run docker with host
|
||||
networking, because mox needs to find your actual public IP's and get the
|
||||
remote IPs for incoming connections, not a local/internal NAT IP.
|
||||
|
||||
|
||||
# Quickstart
|
||||
|
||||
The easiest way to get started with serving email for your domain is to get a
|
||||
vm/machine dedicated to serving email, name it [host].[domain] (e.g.
|
||||
mail.example.com), login as root, and run:
|
||||
|
||||
# Create mox user and homedir (or pick another name or homedir):
|
||||
useradd -m -d /home/mox mox
|
||||
|
||||
cd /home/mox
|
||||
... compile or download mox to this directory, see above ...
|
||||
|
||||
# Generate config files for your address/domain:
|
||||
./mox quickstart you@example.com
|
||||
|
||||
The quickstart creates an account, generates a password and configuration
|
||||
files, prints the DNS records you need to manually create and prints commands
|
||||
to start mox and optionally install mox as a service.
|
||||
|
||||
A dedicated machine is highly recommended because modern email requires HTTPS,
|
||||
and mox currently needs it for automatic TLS. You could combine mox with an
|
||||
existing webserver, but it requires more configuration. If you want to serve
|
||||
websites on the same machine, consider using the webserver built into mox. If
|
||||
you want to run an existing webserver on port 443/80, see "mox help quickstart",
|
||||
it'll tell you to run "./mox quickstart -existing-webserver you@example.com".
|
||||
|
||||
After starting, you can access the admin web interface on internal IPs.
|
||||
|
||||
|
||||
# Future/development
|
||||
mechiel@ueber.net. Mox includes BSD-3-claused code from the Go Authors, and the
|
||||
Public Suffix List by Mozilla under Mozilla Public License, v2.0.
|
||||
|
||||
Mox has automated tests, including for interoperability with Postfix for SMTP.
|
||||
Mox is manually tested with email clients: Mozilla Thunderbird, mutt, iOS Mail,
|
||||
@ -105,45 +52,137 @@ proton.me.
|
||||
|
||||
The code is heavily cross-referenced with the RFCs for readability/maintainability.
|
||||
|
||||
## Roadmap
|
||||
# Quickstart
|
||||
|
||||
- Strict vs lax mode, defaulting to lax when receiving from the internet, and
|
||||
strict when sending.
|
||||
- "developer server" mode, to easily launch a local SMTP/IMAP server to test
|
||||
your apps mail sending capabilities.
|
||||
- Rate limiting and spam detection for submitted/outgoing messages, to reduce
|
||||
impact when an account gets compromised.
|
||||
- Privilege separation, isolating parts of the application to more restricted
|
||||
sandbox (e.g. new unauthenticated connections).
|
||||
- DANE and DNSSEC.
|
||||
- Sending DMARC and TLS reports (currently only receiving).
|
||||
- OAUTH2 support, for single sign on.
|
||||
- ACME verification over HTTP (in addition to current tls-alpn01).
|
||||
The easiest way to get started with serving email for your domain is to get a
|
||||
(virtual) machine dedicated to serving email, name it `[host].[domain]` (e.g.
|
||||
mail.example.com). Having a DNSSEC-verifying resolver installed, such as
|
||||
unbound, is highly recommended. Run as root:
|
||||
|
||||
# Create mox user and homedir (or pick another name or homedir):
|
||||
useradd -m -d /home/mox mox
|
||||
|
||||
cd /home/mox
|
||||
... compile or download mox to this directory, see below ...
|
||||
|
||||
# Generate config files for your address/domain:
|
||||
./mox quickstart you@example.com
|
||||
|
||||
The quickstart:
|
||||
|
||||
- Creates configuration files mox.conf and domains.conf.
|
||||
- Adds the domain and an account for the email address to domains.conf
|
||||
- Generates an admin and account password.
|
||||
- Prints the DNS records you need to add, for the machine and domain.
|
||||
- Prints commands to start mox, and optionally install mox as a service.
|
||||
|
||||
A machine that doesn't already run a webserver is highly recommended because
|
||||
modern email requires HTTPS, and mox currently needs to run a webserver for
|
||||
automatic TLS with ACME. You could combine mox with an existing webserver, but
|
||||
it requires a lot more configuration. If you want to serve websites on the same
|
||||
machine, consider using the webserver built into mox. It's pretty good! If you
|
||||
want to run an existing webserver on port 443/80, see `mox help quickstart`.
|
||||
|
||||
After starting, you can access the admin web interface on internal IPs.
|
||||
|
||||
# Download
|
||||
|
||||
Download a mox binary from
|
||||
https://beta.gobuilds.org/github.com/mjl-/mox@latest/linux-amd64-latest/.
|
||||
|
||||
Symlink or rename it to "mox".
|
||||
|
||||
The URL above always resolves to the latest release for linux/amd64 built with
|
||||
the latest Go toolchain. See the links at the bottom of that page for binaries
|
||||
for other platforms.
|
||||
|
||||
# Compiling
|
||||
|
||||
You can easily (cross) compile mox yourself. You need a recent Go toolchain
|
||||
installed. Run `go version`, it must be >= 1.23. Download the latest version
|
||||
from https://go.dev/dl/ or see https://go.dev/doc/manage-install.
|
||||
|
||||
To download the source code of the latest release, and compile it to binary "mox":
|
||||
|
||||
GOBIN=$PWD CGO_ENABLED=0 go install github.com/mjl-/mox@latest
|
||||
|
||||
Mox only compiles for and fully works on unix systems. Mox also compiles for
|
||||
Windows, but "mox serve" does not yet work, though "mox localserve" (for a
|
||||
local test instance) and most other subcommands do. Mox does not compile for
|
||||
Plan 9.
|
||||
|
||||
# Docker
|
||||
|
||||
Although not recommended, you can also run mox with docker image
|
||||
`r.xmox.nl/mox`, with tags like `v0.0.1` and `v0.0.1-go1.20.1-alpine3.17.2`, see
|
||||
https://r.xmox.nl/r/mox/. See
|
||||
https://github.com/mjl-/mox/blob/main/docker-compose.yml to get started.
|
||||
|
||||
New docker images aren't (automatically) generated for new Go runtime/compile
|
||||
releases.
|
||||
|
||||
It is important to run with docker host networking, so mox can use the public
|
||||
IPs and has correct remote IP information for incoming connections (important
|
||||
for junk filtering and rate-limiting).
|
||||
|
||||
# Development
|
||||
|
||||
See develop.txt for instructions/tips for developing on mox.
|
||||
|
||||
# Sponsors
|
||||
|
||||
Thanks to NLnet foundation, the European Commission's NGI programme, and the
|
||||
Netherlands Ministry of the Interior and Kingdom Relations for financial
|
||||
support:
|
||||
|
||||
- 2024/2025, NLnet NGI0 Zero Core, https://nlnet.nl/project/Mox-Automation/
|
||||
- 2024, NLnet e-Commons Fund, https://nlnet.nl/project/Mox-API/
|
||||
- 2023/2024, NLnet NGI0 Entrust, https://nlnet.nl/project/Mox/
|
||||
|
||||
# Roadmap
|
||||
|
||||
- "mox setup" command, using admin web interface for interactive setup
|
||||
- Automate DNS management, for setup and maintenance, such as DANE/DKIM key rotation
|
||||
- Config options for "transactional email domains", for which mox will only
|
||||
send messages
|
||||
- Encrypted storage of files (email messages, TLS keys), also with per account keys
|
||||
- Recognize common deliverability issues and help postmasters solve them
|
||||
- JMAP, IMAP OBJECTID extension, IMAP JMAPACCESS extension
|
||||
- Calendaring with CalDAV/iCal
|
||||
- Introbox, to which first-time senders are delivered
|
||||
- Add special IMAP mailbox ("Queue?") that contains queued but
|
||||
not-yet-delivered messages.
|
||||
undelivered messages, updated with IMAP flags/keywords/tags and message headers.
|
||||
- External addresses in aliases/lists.
|
||||
- Autoresponder (out of office/vacation)
|
||||
- Mailing list manager
|
||||
- IMAP extensions for "online"/non-syncing/webmail clients (SORT (including
|
||||
DISPLAYFROM, DISPLAYTO), THREAD, PARTIAL, CONTEXT=SEARCH CONTEXT=SORT ESORT,
|
||||
FILTERS)
|
||||
- IMAP ACL support, for account sharing (interacts with many extensions and code)
|
||||
- Improve support for mobile clients with extensions: IMAP URLAUTH, SMTP
|
||||
CHUNKING and BINARYMIME, IMAP CATENATE
|
||||
- Privilege separation, isolating parts of the application to more restricted
|
||||
sandbox (e.g. new unauthenticated connections)
|
||||
- Using mox as backup MX
|
||||
- Sieve for filtering (for now see Rulesets in the account config)
|
||||
- Calendaring
|
||||
- IMAP CONDSTORE and QRESYNC extensions
|
||||
- IMAP THREAD extension
|
||||
- Using mox as backup MX.
|
||||
- Old-style internationalization in messages.
|
||||
- JMAP
|
||||
- Webmail
|
||||
- ARC, with forwarded email from trusted source
|
||||
- Milter support, for integration with external tools
|
||||
- SMTP DSN extension
|
||||
- IMAP Sieve extension, to run Sieve scripts after message changes (not only
|
||||
new deliveries)
|
||||
- OAUTH2 support, for single sign on
|
||||
- Forwarding (to an external address)
|
||||
|
||||
There are many smaller improvements to make as well, search for "todo" in the code.
|
||||
|
||||
## Not supported
|
||||
## Not supported/planned
|
||||
|
||||
But perhaps in the future...
|
||||
There is currently no plan to implement the following. Though this may
|
||||
change in the future.
|
||||
|
||||
- HTTP-based API for sending messages and receiving delivery feedback
|
||||
- Functioning as SMTP relay
|
||||
- Forwarding (to an external address)
|
||||
- Autoresponders
|
||||
- Functioning as an SMTP relay without authentication
|
||||
- POP3
|
||||
- Delivery to (unix) OS system users
|
||||
- PGP or S/MIME
|
||||
- Mailing list manager
|
||||
- Delivery to (unix) OS system users (mbox/Maildir)
|
||||
- Support for pluggable delivery mechanisms
|
||||
|
||||
|
||||
@ -152,18 +191,26 @@ But perhaps in the future...
|
||||
## Why a new mail server implementation?
|
||||
|
||||
Mox aims to make "running a mail server" easy and nearly effortless. Excellent
|
||||
quality mail server software exists, but getting a working setup typically
|
||||
requires you configure half a dozen services (SMTP, IMAP, SPF/DKIM/DMARC, spam
|
||||
filtering). That seems to lead to people no longer running their own mail
|
||||
servers, instead switching to one of the few centralized email providers. Email
|
||||
with SMTP is a long-time decentralized messaging protocol. To keep it
|
||||
decentralized, people need to run their own mail server. Mox aims to make that
|
||||
easy.
|
||||
quality (open source) mail server software exists, but getting a working setup
|
||||
typically requires you configure half a dozen services (SMTP, IMAP,
|
||||
SPF/DKIM/DMARC, spam filtering), which are often written in C (where small bugs
|
||||
often have large consequences). That seems to lead to people no longer running
|
||||
their own mail servers, instead switching to one of the few centralized email
|
||||
providers. Email with SMTP is a long-time decentralized messaging protocol. To
|
||||
keep it decentralized, people need to run their own mail server. Mox aims to
|
||||
make that easy.
|
||||
|
||||
## Where is the documentation?
|
||||
|
||||
See all commands and help text at https://pkg.go.dev/github.com/mjl-/mox/, and
|
||||
example config files at https://pkg.go.dev/github.com/mjl-/mox/config/.
|
||||
To keep mox as a project maintainable, documentation is integrated into, and
|
||||
generated from the code.
|
||||
|
||||
A list of mox commands, and their help output, are at
|
||||
https://www.xmox.nl/commands/.
|
||||
|
||||
Mox is configured through configuration files, and each field comes with
|
||||
documentation. See https://www.xmox.nl/config/ for config files containing all
|
||||
fields and their documentation.
|
||||
|
||||
You can get the same information by running "mox" without arguments to list its
|
||||
subcommands and usage, and "mox help [subcommand]" for more details.
|
||||
@ -171,9 +218,44 @@ subcommands and usage, and "mox help [subcommand]" for more details.
|
||||
The example config files are printed by "mox config describe-static" and "mox
|
||||
config describe-dynamic".
|
||||
|
||||
Mox is still in early stages, and documentation is still limited. Please create
|
||||
an issue describing what is unclear or confusing, and we'll try to improve the
|
||||
documentation.
|
||||
If you're missing some documentation, please create an issue describing what is
|
||||
unclear or confusing, and we'll try to improve the documentation.
|
||||
|
||||
## Is Mox affected by SMTP smuggling?
|
||||
|
||||
Mox itself is not affected: it only treats "\r\n.\r\n" as SMTP end-of-message.
|
||||
But read on for caveats.
|
||||
|
||||
SMTP smuggling exploits differences in handling by SMTP servers of: carriage
|
||||
returns (CR, or "\r"), newlines (line feeds, LF, "\n") in the context of "dot
|
||||
stuffing". SMTP is a text-based protocol. An SMTP transaction to send a
|
||||
message is finalized with a "\r\n.\r\n" sequence. This sequence could occur in
|
||||
the message being transferred, so any verbatim "." at the start of a line in a
|
||||
message is "escaped" with another dot ("dot stuffing"), to not trigger the SMTP
|
||||
end-of-message. SMTP smuggling takes advantage of bugs in some mail servers
|
||||
that interpret other sequences than "\r\n.\r\n" as SMTP end-of-message. For
|
||||
example "\n.\n" or even "\r.\r", and perhaps even other magic character
|
||||
combinations.
|
||||
|
||||
Before v0.0.9, mox accepted SMTP transactions with bare carriage returns
|
||||
(without newline) for compatibility with real-world email messages, considering
|
||||
them meaningless and therefore innocuous.
|
||||
|
||||
Since v0.0.9, SMTP transactions with bare carriage returns are rejected.
|
||||
Sending messages with bare carriage returns to buggy mail servers can cause
|
||||
those mail servers to materialize non-existent messages. Now that mox rejects
|
||||
messages with bare carriage returns, sending a message through mox can no
|
||||
longer be used to trigger those bugs.
|
||||
|
||||
Mox can still handle bare carriage returns in email messages, e.g. those
|
||||
imported from mbox files or Maildirs, or from messages added over IMAP. Mox
|
||||
still fixes up messages with bare newlines by adding the missing carriage
|
||||
returns.
|
||||
|
||||
Before v0.0.9, an SMTP transaction for a message containing "\n.\n" would
|
||||
result in a non-specific error message, and "\r\n.\n" would result in the dot
|
||||
being dropped. Since v0.0.9, these sequences are rejected with a message
|
||||
mentioning SMTP smuggling.
|
||||
|
||||
## How do I import/export email?
|
||||
|
||||
@ -185,6 +267,10 @@ and copy or move messages from one account to the other.
|
||||
Similarly, see the export functionality on the accounts web page and the "mox
|
||||
export maildir" and "mox export mbox" subcommands to export email.
|
||||
|
||||
Importing large mailboxes may require a lot of memory (a limitation of the
|
||||
current database). Splitting up mailboxes in smaller parts (e.g. 100k messages)
|
||||
would help.
|
||||
|
||||
## How can I help?
|
||||
|
||||
Mox needs users and testing in real-life setups! So just give it a try, send
|
||||
@ -200,24 +286,34 @@ compatibility issues, limitations, anti-spam measures, specification
|
||||
violations, that would be interesting to hear about.
|
||||
|
||||
Pull requests for bug fixes and new code are welcome too. If the changes are
|
||||
large, it helps to start a discussion (create a ticket) before doing all the
|
||||
work.
|
||||
large, it helps to start a discussion (create an "issue") before doing all the
|
||||
work. In practice, starting with a small contribution and growing from there has
|
||||
the highest chance of success.
|
||||
|
||||
By contributing (e.g. code), you agree your contributions are licensed under the
|
||||
MIT license (like mox), and have the rights to do so.
|
||||
|
||||
## Where can I discuss mox?
|
||||
|
||||
Join #mox on irc.oftc.net, or #mox on the "Gopher slack".
|
||||
Join #mox on irc.oftc.net, or #mox:matrix.org (https://matrix.to/#/#mox:matrix.org),
|
||||
or #mox on the "Gopher slack".
|
||||
|
||||
For bug reports, please file an issue at https://github.com/mjl-/mox/issues/new.
|
||||
|
||||
## How do I change my password?
|
||||
|
||||
Regular users (doing IMAP/SMTP with authentication) can change their password
|
||||
at the account page, e.g. http://127.0.0.1/. Or you can set a password with "mox
|
||||
at the account page, e.g. `http://localhost/`. Or you can set a password with "mox
|
||||
setaccountpassword".
|
||||
|
||||
The admin can change the password of any account through the admin page, at
|
||||
`http://localhost/admin/` by default (leave username empty when logging in).
|
||||
|
||||
The account and admin pages are served on localhost for configs created with
|
||||
the quickstart. To access these from your browser, run
|
||||
`ssh -L 8080:localhost:80 you@yourmachine` locally and open
|
||||
`http://localhost:8080/[...]`.
|
||||
|
||||
The admin password can be changed with "mox setadminpassword".
|
||||
|
||||
## How do I configure a second mox instance as a backup MX?
|
||||
@ -225,8 +321,13 @@ The admin password can be changed with "mox setadminpassword".
|
||||
Unfortunately, mox does not yet provide an option for that. Mox does spam
|
||||
filtering based on reputation of received messages. It will take a good amount
|
||||
of work to share that information with a backup MX. Without that information,
|
||||
spammers could use a backup MX to get their spam accepted. Until mox has a
|
||||
proper solution, you can simply run a single SMTP server.
|
||||
spammers could use a backup MX to get their spam accepted.
|
||||
|
||||
Until mox has a proper solution, you can simply run a single SMTP server. The
|
||||
author has run a single mail server for over a decade without issues. Machines
|
||||
and network connectivity are stable nowadays, and email delivery will be
|
||||
retried for many hours during temporary errors (e.g. when rebooting a machine
|
||||
after updates).
|
||||
|
||||
## How do I stay up to date?
|
||||
|
||||
@ -235,14 +336,52 @@ through a DNS TXT request for `_updates.xmox.nl` once per 24h. Only if a new
|
||||
version is published will the changelog be fetched and delivered to the
|
||||
postmaster mailbox.
|
||||
|
||||
The changelog is at https://updates.xmox.nl/changelog.
|
||||
The changelog, including latest update instructions, is at
|
||||
https://updates.xmox.nl/changelog.
|
||||
|
||||
You could also monitor newly added tags on this repository, or for the docker
|
||||
image, but update instructions are in the changelog.
|
||||
You can also monitor newly added releases on this repository with the github
|
||||
"watch" feature, or use the github RSS feed for tags
|
||||
(https://github.com/mjl-/mox/tags.atom) or releases
|
||||
(https://github.com/mjl-/mox/releases.atom), or monitor the docker images.
|
||||
|
||||
Keep in mind you have a responsibility to keep the internect-connected software
|
||||
Keep in mind you have a responsibility to keep the internet-connected software
|
||||
you run up to date and secure.
|
||||
|
||||
## How do I upgrade my mox installation?
|
||||
|
||||
We try to make upgrades effortless and you can typically just put a new binary
|
||||
in place and restart. If manual actions are required, the release notes mention
|
||||
them. Check the release notes of all version between your current installation
|
||||
and the release you're upgrading to.
|
||||
|
||||
Before upgrading, make a backup of the config & data directory with `mox backup
|
||||
<destdir>`. This copies all files from the config directory to
|
||||
`<destdir>/config`, and creates `<destdir>/data` with a consistent snapshots of
|
||||
the database files, and message files from the outgoing queue and accounts.
|
||||
Using the new mox binary, run `mox verifydata <destdir>/data` (do NOT use the
|
||||
"live" data directory!) for a dry run. If this fails, an upgrade will probably
|
||||
fail too.
|
||||
|
||||
Important: verifydata with the new mox binary can modify the database files
|
||||
(due to automatic schema upgrades). So make a fresh backup again before the
|
||||
actual upgrade. See the help output of the "backup" and "verifydata" commands
|
||||
for more details.
|
||||
|
||||
During backup, message files are hardlinked if possible, and copied otherwise.
|
||||
Using a destination directory like `data/tmp/backup` increases the odds
|
||||
hardlinking succeeds: the default mox systemd service file mounts
|
||||
the data directory separately, so hardlinks to outside the data directory are
|
||||
cross-device and will fail.
|
||||
|
||||
If an upgrade fails and you have to restore (parts) of the data directory, you
|
||||
should run `mox verifydata <datadir>` (with the original binary) on the
|
||||
restored directory before starting mox again. If problematic files are found,
|
||||
for example queue or account message files that are not in the database, run
|
||||
`mox verifydata -fix <datadir>` to move away those files. After a restore, you may
|
||||
also want to run `mox bumpuidvalidity <account>` for each account for which
|
||||
messages in a mailbox changed, to force IMAP clients to synchronize mailbox
|
||||
state.
|
||||
|
||||
## How secure is mox?
|
||||
|
||||
Security is high on the priority list for mox. Mox is young, so don't expect no
|
||||
@ -265,3 +404,149 @@ can start with a machine with 512MB RAM, any CPU will do. For storage you
|
||||
should account for the size of the email messages (no compression currently),
|
||||
an additional 15% overhead for the meta data, and add some more headroom.
|
||||
Expand as necessary.
|
||||
|
||||
## Won't the big email providers block my email?
|
||||
|
||||
It is a common misconception that it is impossible to run your own email server
|
||||
nowadays. The claim is that the handful big email providers will simply block
|
||||
your email. However, you can run your own email server just fine, and your
|
||||
email will be accepted, provided you are doing it right.
|
||||
|
||||
If your email is rejected, it is often because your IP address has a bad email
|
||||
sending reputation. Email servers often use IP blocklists to reject email
|
||||
networks with a bad email sending reputation. These blocklists often work at
|
||||
the level of whole network ranges. So if you try to run an email server from a
|
||||
hosting provider with a bad reputation (which happens if they don't monitor
|
||||
their network or don't act on abuse/spam reports), your IP too will have a bad
|
||||
reputation and other mail servers (both large and small) may reject messages
|
||||
coming from you. During the quickstart, mox checks if your IPs are on a few
|
||||
often-used blocklists. It's typically not a good idea to host an email server
|
||||
on the cheapest or largest cloud providers: They often don't spend the
|
||||
resources necessary for a good reputation, or they simply block all outgoing
|
||||
SMTP traffic. It's better to look for a technically-focused local provider.
|
||||
They too may initially block outgoing SMTP connections on new machines to
|
||||
prevent spam from their networks. But they will either automatically open up
|
||||
outgoing SMTP traffic after a cool down period (e.g. 24 hours), or after you've
|
||||
contacted their support.
|
||||
|
||||
After you get past the IP blocklist checks, email servers use many more signals
|
||||
to determine if your email message could be spam and should be rejected. Mox
|
||||
helps you set up a system that doesn't trigger most of the technical signals
|
||||
(e.g. with SPF/DKIM/DMARC). But there are more signals, for example: Sending to
|
||||
a mail server or address for the first time. Sending from a newly registered
|
||||
domain (especially if you're sending automated messages, and if you send more
|
||||
messages after previous messages were rejected), domains that existed for a few
|
||||
weeks to a month are treated more friendly. Sending messages with content that
|
||||
resembles known spam messages.
|
||||
|
||||
Should your email be rejected, you will typically get an error message during
|
||||
the SMTP transaction that explains why. In the case of big email providers the
|
||||
error message often has instructions on how to prove to them you are a
|
||||
legitimate sender.
|
||||
|
||||
## Can mox deliver through a smarthost?
|
||||
|
||||
Yes, you can configure a "Transport" in mox.conf and configure "Routes" in
|
||||
domains.conf to send some or all messages through the transport. A transport
|
||||
can be an SMTP relay or authenticated submission, or making mox make outgoing
|
||||
connections through a SOCKS proxy.
|
||||
|
||||
For an example, see https://www.xmox.nl/config/#hdr-example-transport. For
|
||||
details about Transports and Routes, see
|
||||
https://www.xmox.nl/config/#cfg-mox-conf-Transports and
|
||||
https://www.xmox.nl/config/#cfg-domains-conf-Routes.
|
||||
|
||||
Remember to add the IP addresses of the transport to the SPF records of your
|
||||
domains. Keep in mind some 3rd party submission servers may mishandle your
|
||||
messages, for example by replacing your Message-Id header and thereby
|
||||
invalidating your DKIM-signatures, or rejecting messages with more than one
|
||||
DKIM-signature.
|
||||
|
||||
## Can I use mox to send transactional email?
|
||||
|
||||
Yes. While you can use SMTP submission to send messages you've composed
|
||||
yourself, and monitor a mailbox for DSNs, a more convenient option is to use
|
||||
the mox HTTP/JSON-based webapi and webhooks.
|
||||
|
||||
The mox webapi can be used to send outgoing messages that mox composes. The web
|
||||
api can also be used to deal with messages stored in an account, like changing
|
||||
message flags, retrieving messages in parsed form or individual parts of
|
||||
multipart messages, or moving messages to another mailbox or deleting messages
|
||||
altogether.
|
||||
|
||||
Mox webhooks can be used to receive updates about incoming and outgoing
|
||||
deliveries. Mox can automatically manage per account suppression lists.
|
||||
|
||||
See https://www.xmox.nl/features/#hdr-webapi-and-webhooks for details.
|
||||
|
||||
## Can I use existing TLS certificates/keys?
|
||||
|
||||
Yes. The quickstart command creates a config that uses ACME with Let's Encrypt,
|
||||
but you can change the config file to use existing certificate and key files.
|
||||
|
||||
You'll see "ACME: letsencrypt" in the "TLS" section of the "public" Listener.
|
||||
Remove or comment out the ACME-line, and add a "KeyCerts" section, see
|
||||
https://www.xmox.nl/config/#cfg-mox-conf-Listeners-x-TLS-KeyCerts
|
||||
|
||||
You can have multiple certificates and keys: The line with the "-" (dash) is
|
||||
the start of a list item. Duplicate that line up to and including the line with
|
||||
KeyFile for each certificate/key you have. Mox makes a TLS config that holds
|
||||
all specified certificates/keys, and uses it for all services for that Listener
|
||||
(including a webserver), choosing the correct certificate for incoming
|
||||
requests.
|
||||
|
||||
Keep in mind that for each email domain you host, you will need a certificate
|
||||
for `mta-sts.<domain>`, `autoconfig.<domain>` and `mail.<domain>`, unless you
|
||||
disable MTA-STS, autoconfig and the client-settings-domain for that domain.
|
||||
|
||||
Mox opens the key and certificate files during initial startup, as root (and
|
||||
passes file descriptors to the unprivileged process). No special permissions
|
||||
are needed on the key and certificate files.
|
||||
|
||||
## Can I directly access mailboxes through the file system?
|
||||
|
||||
No, mox only provides access to email through protocols like IMAP.
|
||||
|
||||
While it can be convenient for users/email clients to access email through
|
||||
conventions like Maildir, providing such access puts quite a burden on the
|
||||
server: The server has to continuously watch for changes made to the mail store
|
||||
by external programs, and sync its internal state. By only providing access to
|
||||
emails through mox, the storage/state management is simpler and easier to
|
||||
implement reliably.
|
||||
|
||||
Not providing direct file system access also allows future improvements in the
|
||||
storage mechanism. Such as encryption of all stored messages. Programs won't be
|
||||
able to access such messages directly.
|
||||
|
||||
Mox stores metadata about delivered messages in its per-account message index
|
||||
database, more than fits in a simple (filename-based) format like Maildir. The
|
||||
IP address of the remote SMTP server during delivery, SPF/DKIM/DMARC domains
|
||||
and validation status, and more...
|
||||
|
||||
For efficiency, mox doesn't prepend message headers generated during delivery
|
||||
(e.g. Authentication-Results) to the on-disk message file, but only stores it
|
||||
in the database. This prevents a rewrite of the entire message file. When
|
||||
reading a message, mox combines the prepended headers from the database with
|
||||
the message file.
|
||||
|
||||
Mox user accounts have no relation to operating system user accounts. Multiple
|
||||
system users reading their email on a single machine is not very common
|
||||
anymore. All data (for all accounts) stored by mox is accessible only by the
|
||||
mox process. Messages are currently stored as individual files in standard
|
||||
Internet Message Format (IMF), at `data/accounts/<account>/msg/<dir>/<msgid>`:
|
||||
`msgid` is a consecutive unique integer id assigned by the per-account message
|
||||
index database; `dir` groups 8k consecutive message ids into a directory,
|
||||
ensuring they don't become too large. The message index database file for an
|
||||
account is at `data/accounts/<account>/index.db`, accessed with the bstore
|
||||
database library, which uses bbolt (formerly BoltDB) for storage, a
|
||||
transactional key/value library/file format inspired by LMDB.
|
||||
|
||||
## How do I block IPs with authentication failures with fail2ban?
|
||||
|
||||
Mox includes a rate limiter for IPs/networks that cause too many authentication
|
||||
failures. It automatically unblocks such IPs/networks after a while. So you may
|
||||
not need fail2ban. If you want to use fail2ban, you could use this snippet:
|
||||
|
||||
[Definition]
|
||||
failregex = .*failed authentication attempt.*remote=<HOST>
|
||||
ignoreregex =
|
||||
|
1158
admin/admin.go
Normal file
1158
admin/admin.go
Normal file
File diff suppressed because it is too large
Load Diff
175
admin/clientconfig.go
Normal file
175
admin/clientconfig.go
Normal file
@ -0,0 +1,175 @@
|
||||
package admin
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"maps"
|
||||
"slices"
|
||||
|
||||
"github.com/mjl-/mox/config"
|
||||
"github.com/mjl-/mox/dns"
|
||||
"github.com/mjl-/mox/mox-"
|
||||
)
|
||||
|
||||
type TLSMode uint8
|
||||
|
||||
const (
|
||||
TLSModeImmediate TLSMode = 0
|
||||
TLSModeSTARTTLS TLSMode = 1
|
||||
TLSModeNone TLSMode = 2
|
||||
)
|
||||
|
||||
type ProtocolConfig struct {
|
||||
Host dns.Domain
|
||||
Port int
|
||||
TLSMode TLSMode
|
||||
EnabledOnHTTPS bool
|
||||
}
|
||||
|
||||
type ClientConfig struct {
|
||||
IMAP ProtocolConfig
|
||||
Submission ProtocolConfig
|
||||
}
|
||||
|
||||
// ClientConfigDomain returns a single IMAP and Submission client configuration for
|
||||
// a domain.
|
||||
func ClientConfigDomain(d dns.Domain) (rconfig ClientConfig, rerr error) {
|
||||
var haveIMAP, haveSubmission bool
|
||||
|
||||
domConf, ok := mox.Conf.Domain(d)
|
||||
if !ok {
|
||||
return ClientConfig{}, fmt.Errorf("%w: unknown domain", ErrRequest)
|
||||
}
|
||||
|
||||
gather := func(l config.Listener) (done bool) {
|
||||
host := mox.Conf.Static.HostnameDomain
|
||||
if l.Hostname != "" {
|
||||
host = l.HostnameDomain
|
||||
}
|
||||
if domConf.ClientSettingsDomain != "" {
|
||||
host = domConf.ClientSettingsDNSDomain
|
||||
}
|
||||
if !haveIMAP && l.IMAPS.Enabled {
|
||||
rconfig.IMAP.Host = host
|
||||
rconfig.IMAP.Port = config.Port(l.IMAPS.Port, 993)
|
||||
rconfig.IMAP.TLSMode = TLSModeImmediate
|
||||
rconfig.IMAP.EnabledOnHTTPS = l.IMAPS.EnabledOnHTTPS
|
||||
haveIMAP = true
|
||||
}
|
||||
if !haveIMAP && l.IMAP.Enabled {
|
||||
rconfig.IMAP.Host = host
|
||||
rconfig.IMAP.Port = config.Port(l.IMAP.Port, 143)
|
||||
rconfig.IMAP.TLSMode = TLSModeSTARTTLS
|
||||
if l.TLS == nil {
|
||||
rconfig.IMAP.TLSMode = TLSModeNone
|
||||
}
|
||||
haveIMAP = true
|
||||
}
|
||||
if !haveSubmission && l.Submissions.Enabled {
|
||||
rconfig.Submission.Host = host
|
||||
rconfig.Submission.Port = config.Port(l.Submissions.Port, 465)
|
||||
rconfig.Submission.TLSMode = TLSModeImmediate
|
||||
rconfig.Submission.EnabledOnHTTPS = l.Submissions.EnabledOnHTTPS
|
||||
haveSubmission = true
|
||||
}
|
||||
if !haveSubmission && l.Submission.Enabled {
|
||||
rconfig.Submission.Host = host
|
||||
rconfig.Submission.Port = config.Port(l.Submission.Port, 587)
|
||||
rconfig.Submission.TLSMode = TLSModeSTARTTLS
|
||||
if l.TLS == nil {
|
||||
rconfig.Submission.TLSMode = TLSModeNone
|
||||
}
|
||||
haveSubmission = true
|
||||
}
|
||||
return haveIMAP && haveSubmission
|
||||
}
|
||||
|
||||
// Look at the public listener first. Most likely the intended configuration.
|
||||
if public, ok := mox.Conf.Static.Listeners["public"]; ok {
|
||||
if gather(public) {
|
||||
return
|
||||
}
|
||||
}
|
||||
// Go through the other listeners in consistent order.
|
||||
names := slices.Sorted(maps.Keys(mox.Conf.Static.Listeners))
|
||||
for _, name := range names {
|
||||
if gather(mox.Conf.Static.Listeners[name]) {
|
||||
return
|
||||
}
|
||||
}
|
||||
return ClientConfig{}, fmt.Errorf("%w: no listeners found for imap and/or submission", ErrRequest)
|
||||
}
|
||||
|
||||
// ClientConfigs holds the client configuration for IMAP/Submission for a
|
||||
// domain.
|
||||
type ClientConfigs struct {
|
||||
Entries []ClientConfigsEntry
|
||||
}
|
||||
|
||||
type ClientConfigsEntry struct {
|
||||
Protocol string
|
||||
Host dns.Domain
|
||||
Port int
|
||||
Listener string
|
||||
Note string
|
||||
}
|
||||
|
||||
// ClientConfigsDomain returns the client configs for IMAP/Submission for a
|
||||
// domain.
|
||||
func ClientConfigsDomain(d dns.Domain) (ClientConfigs, error) {
|
||||
domConf, ok := mox.Conf.Domain(d)
|
||||
if !ok {
|
||||
return ClientConfigs{}, fmt.Errorf("%w: unknown domain", ErrRequest)
|
||||
}
|
||||
|
||||
c := ClientConfigs{}
|
||||
c.Entries = []ClientConfigsEntry{}
|
||||
var listeners []string
|
||||
|
||||
for name := range mox.Conf.Static.Listeners {
|
||||
listeners = append(listeners, name)
|
||||
}
|
||||
slices.Sort(listeners)
|
||||
|
||||
note := func(tls bool, requiretls bool) string {
|
||||
if !tls {
|
||||
return "plain text, no STARTTLS configured"
|
||||
}
|
||||
if requiretls {
|
||||
return "STARTTLS required"
|
||||
}
|
||||
return "STARTTLS optional"
|
||||
}
|
||||
|
||||
for _, name := range listeners {
|
||||
l := mox.Conf.Static.Listeners[name]
|
||||
host := mox.Conf.Static.HostnameDomain
|
||||
if l.Hostname != "" {
|
||||
host = l.HostnameDomain
|
||||
}
|
||||
if domConf.ClientSettingsDomain != "" {
|
||||
host = domConf.ClientSettingsDNSDomain
|
||||
}
|
||||
if l.Submissions.Enabled {
|
||||
note := "with TLS"
|
||||
if l.Submissions.EnabledOnHTTPS {
|
||||
note += "; also served on port 443 with TLS ALPN \"smtp\""
|
||||
}
|
||||
c.Entries = append(c.Entries, ClientConfigsEntry{"Submission (SMTP)", host, config.Port(l.Submissions.Port, 465), name, note})
|
||||
}
|
||||
if l.IMAPS.Enabled {
|
||||
note := "with TLS"
|
||||
if l.IMAPS.EnabledOnHTTPS {
|
||||
note += "; also served on port 443 with TLS ALPN \"imap\""
|
||||
}
|
||||
c.Entries = append(c.Entries, ClientConfigsEntry{"IMAP", host, config.Port(l.IMAPS.Port, 993), name, note})
|
||||
}
|
||||
if l.Submission.Enabled {
|
||||
c.Entries = append(c.Entries, ClientConfigsEntry{"Submission (SMTP)", host, config.Port(l.Submission.Port, 587), name, note(l.TLS != nil, !l.Submission.NoRequireSTARTTLS)})
|
||||
}
|
||||
if l.IMAP.Enabled {
|
||||
c.Entries = append(c.Entries, ClientConfigsEntry{"IMAP", host, config.Port(l.IMAPS.Port, 143), name, note(l.TLS != nil, !l.IMAP.NoRequireSTARTTLS)})
|
||||
}
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
318
admin/dnsrecords.go
Normal file
318
admin/dnsrecords.go
Normal file
@ -0,0 +1,318 @@
|
||||
package admin
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/ed25519"
|
||||
"crypto/rsa"
|
||||
"crypto/sha256"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/mjl-/adns"
|
||||
|
||||
"github.com/mjl-/mox/config"
|
||||
"github.com/mjl-/mox/dkim"
|
||||
"github.com/mjl-/mox/dmarc"
|
||||
"github.com/mjl-/mox/dns"
|
||||
"github.com/mjl-/mox/mox-"
|
||||
"github.com/mjl-/mox/smtp"
|
||||
"github.com/mjl-/mox/spf"
|
||||
"github.com/mjl-/mox/tlsrpt"
|
||||
"slices"
|
||||
)
|
||||
|
||||
// todo: find a way to automatically create the dns records as it would greatly simplify setting up email for a domain. we could also dynamically make changes, e.g. providing grace periods after disabling a dkim key, only automatically removing the dkim dns key after a few days. but this requires some kind of api and authentication to the dns server. there doesn't appear to be a single commonly used api for dns management. each of the numerous cloud providers have their own APIs and rather large SKDs to use them. we don't want to link all of them in.
|
||||
|
||||
// DomainRecords returns text lines describing DNS records required for configuring
|
||||
// a domain.
|
||||
//
|
||||
// If certIssuerDomainName is set, CAA records to limit TLS certificate issuance to
|
||||
// that caID will be suggested. If acmeAccountURI is also set, CAA records also
|
||||
// restricting issuance to that account ID will be suggested.
|
||||
func DomainRecords(domConf config.Domain, domain dns.Domain, hasDNSSEC bool, certIssuerDomainName, acmeAccountURI string) ([]string, error) {
|
||||
d := domain.ASCII
|
||||
h := mox.Conf.Static.HostnameDomain.ASCII
|
||||
|
||||
// The first line with ";" is used by ../testdata/integration/moxacmepebble.sh and
|
||||
// ../testdata/integration/moxmail2.sh for selecting DNS records
|
||||
records := []string{
|
||||
"; Time To Live of 5 minutes, may be recognized if importing as a zone file.",
|
||||
"; Once your setup is working, you may want to increase the TTL.",
|
||||
"$TTL 300",
|
||||
"",
|
||||
}
|
||||
|
||||
if public, ok := mox.Conf.Static.Listeners["public"]; ok && public.TLS != nil && (len(public.TLS.HostPrivateRSA2048Keys) > 0 || len(public.TLS.HostPrivateECDSAP256Keys) > 0) {
|
||||
records = append(records,
|
||||
`; DANE: These records indicate that a remote mail server trying to deliver email`,
|
||||
`; with SMTP (TCP port 25) must verify the TLS certificate with DANE-EE (3), based`,
|
||||
`; on the certificate public key ("SPKI", 1) that is SHA2-256-hashed (1) to the`,
|
||||
`; hexadecimal hash. DANE-EE verification means only the certificate or public`,
|
||||
`; key is verified, not whether the certificate is signed by a (centralized)`,
|
||||
`; certificate authority (CA), is expired, or matches the host name.`,
|
||||
`;`,
|
||||
`; NOTE: Create the records below only once: They are for the machine, and apply`,
|
||||
`; to all hosted domains.`,
|
||||
)
|
||||
if !hasDNSSEC {
|
||||
records = append(records,
|
||||
";",
|
||||
"; WARNING: Domain does not appear to be DNSSEC-signed. To enable DANE, first",
|
||||
"; enable DNSSEC on your domain, then add the TLSA records. Records below have been",
|
||||
"; commented out.",
|
||||
)
|
||||
}
|
||||
addTLSA := func(privKey crypto.Signer) error {
|
||||
spkiBuf, err := x509.MarshalPKIXPublicKey(privKey.Public())
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshal SubjectPublicKeyInfo for DANE record: %v", err)
|
||||
}
|
||||
sum := sha256.Sum256(spkiBuf)
|
||||
tlsaRecord := adns.TLSA{
|
||||
Usage: adns.TLSAUsageDANEEE,
|
||||
Selector: adns.TLSASelectorSPKI,
|
||||
MatchType: adns.TLSAMatchTypeSHA256,
|
||||
CertAssoc: sum[:],
|
||||
}
|
||||
var s string
|
||||
if hasDNSSEC {
|
||||
s = fmt.Sprintf("_25._tcp.%-*s TLSA %s", 20+len(d)-len("_25._tcp."), h+".", tlsaRecord.Record())
|
||||
} else {
|
||||
s = fmt.Sprintf(";; _25._tcp.%-*s TLSA %s", 20+len(d)-len(";; _25._tcp."), h+".", tlsaRecord.Record())
|
||||
}
|
||||
records = append(records, s)
|
||||
return nil
|
||||
}
|
||||
for _, privKey := range public.TLS.HostPrivateECDSAP256Keys {
|
||||
if err := addTLSA(privKey); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
for _, privKey := range public.TLS.HostPrivateRSA2048Keys {
|
||||
if err := addTLSA(privKey); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
records = append(records, "")
|
||||
}
|
||||
|
||||
if d != h {
|
||||
records = append(records,
|
||||
"; For the machine, only needs to be created once, for the first domain added:",
|
||||
"; ",
|
||||
"; SPF-allow host for itself, resulting in relaxed DMARC pass for (postmaster)",
|
||||
"; messages (DSNs) sent from host:",
|
||||
fmt.Sprintf(`%-*s TXT "v=spf1 a -all"`, 20+len(d), h+"."), // ../rfc/7208:2263 ../rfc/7208:2287
|
||||
"",
|
||||
)
|
||||
}
|
||||
if d != h && mox.Conf.Static.HostTLSRPT.ParsedLocalpart != "" {
|
||||
uri := url.URL{
|
||||
Scheme: "mailto",
|
||||
Opaque: smtp.NewAddress(mox.Conf.Static.HostTLSRPT.ParsedLocalpart, mox.Conf.Static.HostnameDomain).Pack(false),
|
||||
}
|
||||
tlsrptr := tlsrpt.Record{Version: "TLSRPTv1", RUAs: [][]tlsrpt.RUA{{tlsrpt.RUA(uri.String())}}}
|
||||
records = append(records,
|
||||
"; For the machine, only needs to be created once, for the first domain added:",
|
||||
"; ",
|
||||
"; Request reporting about success/failures of TLS connections to (MX) host, for DANE.",
|
||||
fmt.Sprintf(`_smtp._tls.%-*s TXT "%s"`, 20+len(d)-len("_smtp._tls."), h+".", tlsrptr.String()),
|
||||
"",
|
||||
)
|
||||
}
|
||||
|
||||
records = append(records,
|
||||
"; Deliver email for the domain to this host.",
|
||||
fmt.Sprintf("%s. MX 10 %s.", d, h),
|
||||
"",
|
||||
|
||||
"; Outgoing messages will be signed with the first two DKIM keys. The other two",
|
||||
"; configured for backup, switching to them is just a config change.",
|
||||
)
|
||||
var selectors []string
|
||||
for name := range domConf.DKIM.Selectors {
|
||||
selectors = append(selectors, name)
|
||||
}
|
||||
slices.Sort(selectors)
|
||||
for _, name := range selectors {
|
||||
sel := domConf.DKIM.Selectors[name]
|
||||
dkimr := dkim.Record{
|
||||
Version: "DKIM1",
|
||||
Hashes: []string{"sha256"},
|
||||
PublicKey: sel.Key.Public(),
|
||||
}
|
||||
if _, ok := sel.Key.(ed25519.PrivateKey); ok {
|
||||
dkimr.Key = "ed25519"
|
||||
} else if _, ok := sel.Key.(*rsa.PrivateKey); !ok {
|
||||
return nil, fmt.Errorf("unrecognized private key for DKIM selector %q: %T", name, sel.Key)
|
||||
}
|
||||
txt, err := dkimr.Record()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("making DKIM DNS TXT record: %v", err)
|
||||
}
|
||||
|
||||
if len(txt) > 100 {
|
||||
records = append(records,
|
||||
"; NOTE: The following is a single long record split over several lines for use",
|
||||
"; in zone files. When adding through a DNS operator web interface, combine the",
|
||||
"; strings into a single string, without ().",
|
||||
)
|
||||
}
|
||||
s := fmt.Sprintf("%s._domainkey.%s. TXT %s", name, d, mox.TXTStrings(txt))
|
||||
records = append(records, s)
|
||||
|
||||
}
|
||||
dmarcr := dmarc.DefaultRecord
|
||||
dmarcr.Policy = "reject"
|
||||
if domConf.DMARC != nil {
|
||||
uri := url.URL{
|
||||
Scheme: "mailto",
|
||||
Opaque: smtp.NewAddress(domConf.DMARC.ParsedLocalpart, domConf.DMARC.DNSDomain).Pack(false),
|
||||
}
|
||||
dmarcr.AggregateReportAddresses = []dmarc.URI{
|
||||
{Address: uri.String(), MaxSize: 10, Unit: "m"},
|
||||
}
|
||||
}
|
||||
dspfr := spf.Record{Version: "spf1"}
|
||||
for _, ip := range mox.DomainSPFIPs() {
|
||||
mech := "ip4"
|
||||
if ip.To4() == nil {
|
||||
mech = "ip6"
|
||||
}
|
||||
dspfr.Directives = append(dspfr.Directives, spf.Directive{Mechanism: mech, IP: ip})
|
||||
}
|
||||
dspfr.Directives = append(dspfr.Directives,
|
||||
spf.Directive{Mechanism: "mx"},
|
||||
spf.Directive{Qualifier: "~", Mechanism: "all"},
|
||||
)
|
||||
dspftxt, err := dspfr.Record()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("making domain spf record: %v", err)
|
||||
}
|
||||
records = append(records,
|
||||
"",
|
||||
|
||||
"; Specify the MX host is allowed to send for our domain and for itself (for DSNs).",
|
||||
"; ~all means softfail for anything else, which is done instead of -all to prevent older",
|
||||
"; mail servers from rejecting the message because they never get to looking for a dkim/dmarc pass.",
|
||||
fmt.Sprintf(`%s. TXT "%s"`, d, dspftxt),
|
||||
"",
|
||||
|
||||
"; Emails that fail the DMARC check (without aligned DKIM and without aligned SPF)",
|
||||
"; should be rejected, and request reports. If you email through mailing lists that",
|
||||
"; strip DKIM-Signature headers and don't rewrite the From header, you may want to",
|
||||
"; set the policy to p=none.",
|
||||
fmt.Sprintf(`_dmarc.%s. TXT "%s"`, d, dmarcr.String()),
|
||||
"",
|
||||
)
|
||||
|
||||
if sts := domConf.MTASTS; sts != nil {
|
||||
records = append(records,
|
||||
"; Remote servers can use MTA-STS to verify our TLS certificate with the",
|
||||
"; WebPKI pool of CA's (certificate authorities) when delivering over SMTP with",
|
||||
"; STARTTLS.",
|
||||
fmt.Sprintf(`mta-sts.%s. CNAME %s.`, d, h),
|
||||
fmt.Sprintf(`_mta-sts.%s. TXT "v=STSv1; id=%s"`, d, sts.PolicyID),
|
||||
"",
|
||||
)
|
||||
} else {
|
||||
records = append(records,
|
||||
"; Note: No MTA-STS to indicate TLS should be used. Either because disabled for the",
|
||||
"; domain or because mox.conf does not have a listener with MTA-STS configured.",
|
||||
"",
|
||||
)
|
||||
}
|
||||
|
||||
if domConf.TLSRPT != nil {
|
||||
uri := url.URL{
|
||||
Scheme: "mailto",
|
||||
Opaque: smtp.NewAddress(domConf.TLSRPT.ParsedLocalpart, domConf.TLSRPT.DNSDomain).Pack(false),
|
||||
}
|
||||
tlsrptr := tlsrpt.Record{Version: "TLSRPTv1", RUAs: [][]tlsrpt.RUA{{tlsrpt.RUA(uri.String())}}}
|
||||
records = append(records,
|
||||
"; Request reporting about TLS failures.",
|
||||
fmt.Sprintf(`_smtp._tls.%s. TXT "%s"`, d, tlsrptr.String()),
|
||||
"",
|
||||
)
|
||||
}
|
||||
|
||||
if domConf.ClientSettingsDomain != "" && domConf.ClientSettingsDNSDomain != mox.Conf.Static.HostnameDomain {
|
||||
records = append(records,
|
||||
"; Client settings will reference a subdomain of the hosted domain, making it",
|
||||
"; easier to migrate to a different server in the future by not requiring settings",
|
||||
"; in all clients to be updated.",
|
||||
fmt.Sprintf(`%-*s CNAME %s.`, 20+len(d), domConf.ClientSettingsDNSDomain.ASCII+".", h),
|
||||
"",
|
||||
)
|
||||
}
|
||||
|
||||
records = append(records,
|
||||
"; Autoconfig is used by Thunderbird. Autodiscover is (in theory) used by Microsoft.",
|
||||
fmt.Sprintf(`autoconfig.%s. CNAME %s.`, d, h),
|
||||
fmt.Sprintf(`_autodiscover._tcp.%s. SRV 0 1 443 %s.`, d, h),
|
||||
"",
|
||||
|
||||
// ../rfc/6186:133 ../rfc/8314:692
|
||||
"; For secure IMAP and submission autoconfig, point to mail host.",
|
||||
fmt.Sprintf(`_imaps._tcp.%s. SRV 0 1 993 %s.`, d, h),
|
||||
fmt.Sprintf(`_submissions._tcp.%s. SRV 0 1 465 %s.`, d, h),
|
||||
"",
|
||||
// ../rfc/6186:242
|
||||
"; Next records specify POP3 and non-TLS ports are not to be used.",
|
||||
"; These are optional and safe to leave out (e.g. if you have to click a lot in a",
|
||||
"; DNS admin web interface).",
|
||||
fmt.Sprintf(`_imap._tcp.%s. SRV 0 0 0 .`, d),
|
||||
fmt.Sprintf(`_submission._tcp.%s. SRV 0 0 0 .`, d),
|
||||
fmt.Sprintf(`_pop3._tcp.%s. SRV 0 0 0 .`, d),
|
||||
fmt.Sprintf(`_pop3s._tcp.%s. SRV 0 0 0 .`, d),
|
||||
)
|
||||
|
||||
if certIssuerDomainName != "" {
|
||||
// ../rfc/8659:18 for CAA records.
|
||||
records = append(records,
|
||||
"",
|
||||
"; Optional:",
|
||||
"; You could mark Let's Encrypt as the only Certificate Authority allowed to",
|
||||
"; sign TLS certificates for your domain.",
|
||||
fmt.Sprintf(`%s. CAA 0 issue "%s"`, d, certIssuerDomainName),
|
||||
)
|
||||
if acmeAccountURI != "" {
|
||||
// ../rfc/8657:99 for accounturi.
|
||||
// ../rfc/8657:147 for validationmethods.
|
||||
records = append(records,
|
||||
";",
|
||||
"; Optionally limit certificates for this domain to the account ID and methods used by mox.",
|
||||
fmt.Sprintf(`;; %s. CAA 0 issue "%s; accounturi=%s; validationmethods=tls-alpn-01,http-01"`, d, certIssuerDomainName, acmeAccountURI),
|
||||
";",
|
||||
"; Or alternatively only limit for email-specific subdomains, so you can use",
|
||||
"; other accounts/methods for other subdomains.",
|
||||
fmt.Sprintf(`;; autoconfig.%s. CAA 0 issue "%s; accounturi=%s; validationmethods=tls-alpn-01,http-01"`, d, certIssuerDomainName, acmeAccountURI),
|
||||
fmt.Sprintf(`;; mta-sts.%s. CAA 0 issue "%s; accounturi=%s; validationmethods=tls-alpn-01,http-01"`, d, certIssuerDomainName, acmeAccountURI),
|
||||
)
|
||||
if domConf.ClientSettingsDomain != "" && domConf.ClientSettingsDNSDomain != mox.Conf.Static.HostnameDomain {
|
||||
records = append(records,
|
||||
fmt.Sprintf(`;; %-*s CAA 0 issue "%s; accounturi=%s; validationmethods=tls-alpn-01,http-01"`, 20-3+len(d), domConf.ClientSettingsDNSDomain.ASCII, certIssuerDomainName, acmeAccountURI),
|
||||
)
|
||||
}
|
||||
if strings.HasSuffix(h, "."+d) {
|
||||
records = append(records,
|
||||
";",
|
||||
"; And the mail hostname.",
|
||||
fmt.Sprintf(`;; %-*s CAA 0 issue "%s; accounturi=%s; validationmethods=tls-alpn-01,http-01"`, 20-3+len(d), h+".", certIssuerDomainName, acmeAccountURI),
|
||||
)
|
||||
}
|
||||
} else {
|
||||
// The string "will be suggested" is used by
|
||||
// ../testdata/integration/moxacmepebble.sh and ../testdata/integration/moxmail2.sh
|
||||
// as end of DNS records.
|
||||
records = append(records,
|
||||
";",
|
||||
"; Note: After starting up, once an ACME account has been created, CAA records",
|
||||
"; that restrict issuance to the account will be suggested.",
|
||||
)
|
||||
}
|
||||
}
|
||||
return records, nil
|
||||
}
|
38
apidiff.sh
Executable file
38
apidiff.sh
Executable file
@ -0,0 +1,38 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
prevversion=$(go list -mod=readonly -m -f '{{ .Version }}' github.com/mjl-/mox@latest)
|
||||
if ! test -d tmp/mox-$prevversion; then
|
||||
mkdir -p tmp/mox-$prevversion
|
||||
git archive --format=tar $prevversion | tar -C tmp/mox-$prevversion -xf -
|
||||
fi
|
||||
(rm -r tmp/apidiff || exit 0)
|
||||
mkdir -p tmp/apidiff/$prevversion tmp/apidiff/next
|
||||
(rm apidiff/next.txt.new 2>/dev/null || exit 0)
|
||||
touch apidiff/next.txt.new
|
||||
for p in $(cat apidiff/packages.txt); do
|
||||
if ! test -d tmp/mox-$prevversion/$p; then
|
||||
continue
|
||||
fi
|
||||
(cd tmp/mox-$prevversion && apidiff -w ../apidiff/$prevversion/$p.api ./$p)
|
||||
apidiff -w tmp/apidiff/next/$p.api ./$p
|
||||
apidiff -incompatible tmp/apidiff/$prevversion/$p.api tmp/apidiff/next/$p.api >$p.diff
|
||||
if test -s $p.diff; then
|
||||
(
|
||||
echo '#' $p
|
||||
cat $p.diff
|
||||
echo
|
||||
) >>apidiff/next.txt.new
|
||||
fi
|
||||
rm $p.diff
|
||||
done
|
||||
if test -s apidiff/next.txt.new; then
|
||||
(
|
||||
echo "Below are the incompatible changes between $prevversion and next, per package."
|
||||
echo
|
||||
cat apidiff/next.txt.new
|
||||
) >apidiff/next.txt
|
||||
rm apidiff/next.txt.new
|
||||
else
|
||||
mv apidiff/next.txt.new apidiff/next.txt
|
||||
fi
|
10
apidiff/README.txt
Normal file
10
apidiff/README.txt
Normal file
@ -0,0 +1,10 @@
|
||||
This directory lists incompatible changes between released versions for packages
|
||||
intended for reuse by third party projects, as listed in packages.txt. These
|
||||
files are generated using golang.org/x/exp/cmd/apidiff (see
|
||||
https://pkg.go.dev/golang.org/x/exp/apidiff) and ../apidiff.sh.
|
||||
|
||||
There is no guarantee that there will be no breaking changes. With Go's
|
||||
dependency versioning approach (minimal version selection), Go code will never
|
||||
unexpectedly stop compiling. Incompatibilities will show when explicitly
|
||||
updating a dependency. Making the required changes is typically fairly
|
||||
straightforward.
|
5
apidiff/next.txt
Normal file
5
apidiff/next.txt
Normal file
@ -0,0 +1,5 @@
|
||||
Below are the incompatible changes between v0.0.15 and next, per package.
|
||||
|
||||
# smtpclient
|
||||
- GatherDestinations: changed from func(context.Context, *log/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.IPDomain) (bool, bool, bool, github.com/mjl-/mox/dns.Domain, []github.com/mjl-/mox/dns.IPDomain, bool, error) to func(context.Context, *log/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.IPDomain) (bool, bool, bool, github.com/mjl-/mox/dns.Domain, []HostPref, bool, error)
|
||||
|
20
apidiff/packages.txt
Normal file
20
apidiff/packages.txt
Normal file
@ -0,0 +1,20 @@
|
||||
dane
|
||||
dmarc
|
||||
dmarcrpt
|
||||
dns
|
||||
dnsbl
|
||||
iprev
|
||||
message
|
||||
mtasts
|
||||
publicsuffix
|
||||
ratelimit
|
||||
sasl
|
||||
scram
|
||||
smtp
|
||||
smtpclient
|
||||
spf
|
||||
subjectpass
|
||||
tlsrpt
|
||||
updates
|
||||
webapi
|
||||
webhook
|
79
apidiff/v0.0.10.txt
Normal file
79
apidiff/v0.0.10.txt
Normal file
@ -0,0 +1,79 @@
|
||||
Below are the incompatible changes between v0.0.9 and v0.0.10, per package.
|
||||
|
||||
# dane
|
||||
- Dial: changed from func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Resolver, string, string, []github.com/mjl-/adns.TLSAUsage, *crypto/x509.CertPool) (net.Conn, github.com/mjl-/adns.TLSA, error) to func(context.Context, *log/slog.Logger, github.com/mjl-/mox/dns.Resolver, string, string, []github.com/mjl-/adns.TLSAUsage, *crypto/x509.CertPool) (net.Conn, github.com/mjl-/adns.TLSA, error)
|
||||
- TLSClientConfig: changed from func(*golang.org/x/exp/slog.Logger, []github.com/mjl-/adns.TLSA, github.com/mjl-/mox/dns.Domain, []github.com/mjl-/mox/dns.Domain, *github.com/mjl-/adns.TLSA, *crypto/x509.CertPool) crypto/tls.Config to func(*log/slog.Logger, []github.com/mjl-/adns.TLSA, github.com/mjl-/mox/dns.Domain, []github.com/mjl-/mox/dns.Domain, *github.com/mjl-/adns.TLSA, *crypto/x509.CertPool) crypto/tls.Config
|
||||
- Verify: changed from func(*golang.org/x/exp/slog.Logger, []github.com/mjl-/adns.TLSA, crypto/tls.ConnectionState, github.com/mjl-/mox/dns.Domain, []github.com/mjl-/mox/dns.Domain, *crypto/x509.CertPool) (bool, github.com/mjl-/adns.TLSA, error) to func(*log/slog.Logger, []github.com/mjl-/adns.TLSA, crypto/tls.ConnectionState, github.com/mjl-/mox/dns.Domain, []github.com/mjl-/mox/dns.Domain, *crypto/x509.CertPool) (bool, github.com/mjl-/adns.TLSA, error)
|
||||
|
||||
# dmarc
|
||||
- Lookup: changed from func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain) (Status, github.com/mjl-/mox/dns.Domain, *Record, string, bool, error) to func(context.Context, *log/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain) (Status, github.com/mjl-/mox/dns.Domain, *Record, string, bool, error)
|
||||
- LookupExternalReportsAccepted: changed from func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain, github.com/mjl-/mox/dns.Domain) (bool, Status, []*Record, []string, bool, error) to func(context.Context, *log/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain, github.com/mjl-/mox/dns.Domain) (bool, Status, []*Record, []string, bool, error)
|
||||
- Verify: changed from func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain, []github.com/mjl-/mox/dkim.Result, github.com/mjl-/mox/spf.Status, *github.com/mjl-/mox/dns.Domain, bool) (bool, Result) to func(context.Context, *log/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain, []github.com/mjl-/mox/dkim.Result, github.com/mjl-/mox/spf.Status, *github.com/mjl-/mox/dns.Domain, bool) (bool, Result)
|
||||
|
||||
# dmarcrpt
|
||||
- ParseMessageReport: changed from func(*golang.org/x/exp/slog.Logger, io.ReaderAt) (*Feedback, error) to func(*log/slog.Logger, io.ReaderAt) (*Feedback, error)
|
||||
|
||||
# dns
|
||||
- StrictResolver.Log: changed from *golang.org/x/exp/slog.Logger to *log/slog.Logger
|
||||
|
||||
# dnsbl
|
||||
- CheckHealth: changed from func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain) error to func(context.Context, *log/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain) error
|
||||
- Lookup: changed from func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain, net.IP) (Status, string, error) to func(context.Context, *log/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain, net.IP) (Status, string, error)
|
||||
|
||||
# iprev
|
||||
|
||||
# message
|
||||
- (*Part).ParseNextPart: changed from func(*golang.org/x/exp/slog.Logger) (*Part, error) to func(*log/slog.Logger) (*Part, error)
|
||||
- (*Part).Walk: changed from func(*golang.org/x/exp/slog.Logger, *Part) error to func(*log/slog.Logger, *Part) error
|
||||
- EnsurePart: changed from func(*golang.org/x/exp/slog.Logger, bool, io.ReaderAt, int64) (Part, error) to func(*log/slog.Logger, bool, io.ReaderAt, int64) (Part, error)
|
||||
- From: changed from func(*golang.org/x/exp/slog.Logger, bool, io.ReaderAt) (github.com/mjl-/mox/smtp.Address, *Envelope, net/textproto.MIMEHeader, error) to func(*log/slog.Logger, bool, io.ReaderAt) (github.com/mjl-/mox/smtp.Address, *Envelope, net/textproto.MIMEHeader, error)
|
||||
- Parse: changed from func(*golang.org/x/exp/slog.Logger, bool, io.ReaderAt) (Part, error) to func(*log/slog.Logger, bool, io.ReaderAt) (Part, error)
|
||||
|
||||
# mtasts
|
||||
- FetchPolicy: changed from func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Domain) (*Policy, string, error) to func(context.Context, *log/slog.Logger, github.com/mjl-/mox/dns.Domain) (*Policy, string, error)
|
||||
- Get: changed from func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain) (*Record, *Policy, string, error) to func(context.Context, *log/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain) (*Record, *Policy, string, error)
|
||||
- HTTPClientObserve: changed from func(context.Context, *golang.org/x/exp/slog.Logger, string, string, int, error, time.Time) to func(context.Context, *log/slog.Logger, string, string, int, error, time.Time)
|
||||
- LookupRecord: changed from func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain) (*Record, string, error) to func(context.Context, *log/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain) (*Record, string, error)
|
||||
|
||||
# publicsuffix
|
||||
- List.Lookup: changed from func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Domain) github.com/mjl-/mox/dns.Domain to func(context.Context, *log/slog.Logger, github.com/mjl-/mox/dns.Domain) github.com/mjl-/mox/dns.Domain
|
||||
- Lookup: changed from func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Domain) github.com/mjl-/mox/dns.Domain to func(context.Context, *log/slog.Logger, github.com/mjl-/mox/dns.Domain) github.com/mjl-/mox/dns.Domain
|
||||
- ParseList: changed from func(*golang.org/x/exp/slog.Logger, io.Reader) (List, error) to func(*log/slog.Logger, io.Reader) (List, error)
|
||||
|
||||
# ratelimit
|
||||
|
||||
# sasl
|
||||
|
||||
# scram
|
||||
|
||||
# smtp
|
||||
- SePol7ARCFail: removed
|
||||
- SePol7MissingReqTLS: removed
|
||||
|
||||
# smtpclient
|
||||
- Dial: changed from func(context.Context, *golang.org/x/exp/slog.Logger, Dialer, github.com/mjl-/mox/dns.IPDomain, []net.IP, int, map[string][]net.IP, []net.IP) (net.Conn, net.IP, error) to func(context.Context, *log/slog.Logger, Dialer, github.com/mjl-/mox/dns.IPDomain, []net.IP, int, map[string][]net.IP, []net.IP) (net.Conn, net.IP, error)
|
||||
- Error: old is comparable, new is not
|
||||
- GatherDestinations: changed from func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.IPDomain) (bool, bool, bool, github.com/mjl-/mox/dns.Domain, []github.com/mjl-/mox/dns.IPDomain, bool, error) to func(context.Context, *log/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.IPDomain) (bool, bool, bool, github.com/mjl-/mox/dns.Domain, []github.com/mjl-/mox/dns.IPDomain, bool, error)
|
||||
- GatherIPs: changed from func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.IPDomain, map[string][]net.IP) (bool, bool, github.com/mjl-/mox/dns.Domain, []net.IP, bool, error) to func(context.Context, *log/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.IPDomain, map[string][]net.IP) (bool, bool, github.com/mjl-/mox/dns.Domain, []net.IP, bool, error)
|
||||
- GatherTLSA: changed from func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain, bool, github.com/mjl-/mox/dns.Domain) (bool, []github.com/mjl-/adns.TLSA, github.com/mjl-/mox/dns.Domain, error) to func(context.Context, *log/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain, bool, github.com/mjl-/mox/dns.Domain) (bool, []github.com/mjl-/adns.TLSA, github.com/mjl-/mox/dns.Domain, error)
|
||||
- New: changed from func(context.Context, *golang.org/x/exp/slog.Logger, net.Conn, TLSMode, bool, github.com/mjl-/mox/dns.Domain, github.com/mjl-/mox/dns.Domain, Opts) (*Client, error) to func(context.Context, *log/slog.Logger, net.Conn, TLSMode, bool, github.com/mjl-/mox/dns.Domain, github.com/mjl-/mox/dns.Domain, Opts) (*Client, error)
|
||||
|
||||
# spf
|
||||
- Evaluate: changed from func(context.Context, *golang.org/x/exp/slog.Logger, *Record, github.com/mjl-/mox/dns.Resolver, Args) (Status, string, string, bool, error) to func(context.Context, *log/slog.Logger, *Record, github.com/mjl-/mox/dns.Resolver, Args) (Status, string, string, bool, error)
|
||||
- Lookup: changed from func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain) (Status, string, *Record, bool, error) to func(context.Context, *log/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain) (Status, string, *Record, bool, error)
|
||||
- Verify: changed from func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Resolver, Args) (Received, github.com/mjl-/mox/dns.Domain, string, bool, error) to func(context.Context, *log/slog.Logger, github.com/mjl-/mox/dns.Resolver, Args) (Received, github.com/mjl-/mox/dns.Domain, string, bool, error)
|
||||
|
||||
# subjectpass
|
||||
- Generate: changed from func(*golang.org/x/exp/slog.Logger, github.com/mjl-/mox/smtp.Address, []byte, time.Time) string to func(*log/slog.Logger, github.com/mjl-/mox/smtp.Address, []byte, time.Time) string
|
||||
- Verify: changed from func(*golang.org/x/exp/slog.Logger, io.ReaderAt, []byte, time.Duration) error to func(*log/slog.Logger, io.ReaderAt, []byte, time.Duration) error
|
||||
|
||||
# tlsrpt
|
||||
- Lookup: changed from func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain) (*Record, string, error) to func(context.Context, *log/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain) (*Record, string, error)
|
||||
- ParseMessage: changed from func(*golang.org/x/exp/slog.Logger, io.ReaderAt) (*ReportJSON, error) to func(*log/slog.Logger, io.ReaderAt) (*ReportJSON, error)
|
||||
|
||||
# updates
|
||||
- Check: changed from func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain, Version, string, []byte) (Version, *Record, *Changelog, error) to func(context.Context, *log/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain, Version, string, []byte) (Version, *Record, *Changelog, error)
|
||||
- FetchChangelog: changed from func(context.Context, *golang.org/x/exp/slog.Logger, string, Version, []byte) (*Changelog, error) to func(context.Context, *log/slog.Logger, string, Version, []byte) (*Changelog, error)
|
||||
- HTTPClientObserve: changed from func(context.Context, *golang.org/x/exp/slog.Logger, string, string, int, error, time.Time) to func(context.Context, *log/slog.Logger, string, string, int, error, time.Time)
|
||||
- Lookup: changed from func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain) (Version, *Record, error) to func(context.Context, *log/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain) (Version, *Record, error)
|
||||
|
45
apidiff/v0.0.11.txt
Normal file
45
apidiff/v0.0.11.txt
Normal file
@ -0,0 +1,45 @@
|
||||
Below are the incompatible changes between v0.0.10 and v0.0.11, per package.
|
||||
|
||||
# dane
|
||||
|
||||
# dmarc
|
||||
- DMARCPolicy: removed
|
||||
|
||||
# dmarcrpt
|
||||
|
||||
# dns
|
||||
|
||||
# dnsbl
|
||||
|
||||
# iprev
|
||||
|
||||
# message
|
||||
- (*Composer).TextPart: changed from func(string) ([]byte, string, string) to func(string, string) ([]byte, string, string)
|
||||
- From: changed from func(*log/slog.Logger, bool, io.ReaderAt) (github.com/mjl-/mox/smtp.Address, *Envelope, net/textproto.MIMEHeader, error) to func(*log/slog.Logger, bool, io.ReaderAt, *Part) (github.com/mjl-/mox/smtp.Address, *Envelope, net/textproto.MIMEHeader, error)
|
||||
- NewComposer: changed from func(io.Writer, int64) *Composer to func(io.Writer, int64, bool) *Composer
|
||||
|
||||
# mtasts
|
||||
- STSMX: removed
|
||||
|
||||
# publicsuffix
|
||||
|
||||
# ratelimit
|
||||
|
||||
# sasl
|
||||
|
||||
# scram
|
||||
|
||||
# smtp
|
||||
- SeMsg6ConversoinUnsupported3: removed
|
||||
|
||||
# smtpclient
|
||||
- GatherIPs: changed from func(context.Context, *log/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.IPDomain, map[string][]net.IP) (bool, bool, github.com/mjl-/mox/dns.Domain, []net.IP, bool, error) to func(context.Context, *log/slog.Logger, github.com/mjl-/mox/dns.Resolver, string, github.com/mjl-/mox/dns.IPDomain, map[string][]net.IP) (bool, bool, github.com/mjl-/mox/dns.Domain, []net.IP, bool, error)
|
||||
|
||||
# spf
|
||||
|
||||
# subjectpass
|
||||
|
||||
# tlsrpt
|
||||
|
||||
# updates
|
||||
|
43
apidiff/v0.0.12.txt
Normal file
43
apidiff/v0.0.12.txt
Normal file
@ -0,0 +1,43 @@
|
||||
Below are the incompatible changes between v0.0.11 and next, per package.
|
||||
|
||||
# dane
|
||||
|
||||
# dmarc
|
||||
|
||||
# dmarcrpt
|
||||
|
||||
# dns
|
||||
|
||||
# dnsbl
|
||||
|
||||
# iprev
|
||||
|
||||
# message
|
||||
- (*HeaderWriter).AddWrap: changed from func([]byte) to func([]byte, bool)
|
||||
|
||||
# mtasts
|
||||
|
||||
# publicsuffix
|
||||
|
||||
# ratelimit
|
||||
|
||||
# sasl
|
||||
|
||||
# scram
|
||||
|
||||
# smtp
|
||||
|
||||
# smtpclient
|
||||
|
||||
# spf
|
||||
|
||||
# subjectpass
|
||||
|
||||
# tlsrpt
|
||||
|
||||
# updates
|
||||
|
||||
# webapi
|
||||
|
||||
# webhook
|
||||
|
5
apidiff/v0.0.13.txt
Normal file
5
apidiff/v0.0.13.txt
Normal file
@ -0,0 +1,5 @@
|
||||
Below are the incompatible changes between v0.0.13 and next, per package.
|
||||
|
||||
# webhook
|
||||
- PartStructure: removed
|
||||
|
7
apidiff/v0.0.15.txt
Normal file
7
apidiff/v0.0.15.txt
Normal file
@ -0,0 +1,7 @@
|
||||
Below are the incompatible changes between v0.0.14 and next, per package.
|
||||
|
||||
# message
|
||||
- Part.ContentDescription: changed from string to *string
|
||||
- Part.ContentID: changed from string to *string
|
||||
- Part.ContentTransferEncoding: changed from string to *string
|
||||
|
83
apidiff/v0.0.9.txt
Normal file
83
apidiff/v0.0.9.txt
Normal file
@ -0,0 +1,83 @@
|
||||
Below are the incompatible changes between v0.0.8 and v0.0.9, per package.
|
||||
|
||||
# dane
|
||||
- Dial: changed from func(context.Context, github.com/mjl-/mox/dns.Resolver, string, string, []github.com/mjl-/adns.TLSAUsage) (net.Conn, github.com/mjl-/adns.TLSA, error) to func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Resolver, string, string, []github.com/mjl-/adns.TLSAUsage, *crypto/x509.CertPool) (net.Conn, github.com/mjl-/adns.TLSA, error)
|
||||
- TLSClientConfig: changed from func(*github.com/mjl-/mox/mlog.Log, []github.com/mjl-/adns.TLSA, github.com/mjl-/mox/dns.Domain, []github.com/mjl-/mox/dns.Domain, *github.com/mjl-/adns.TLSA) crypto/tls.Config to func(*golang.org/x/exp/slog.Logger, []github.com/mjl-/adns.TLSA, github.com/mjl-/mox/dns.Domain, []github.com/mjl-/mox/dns.Domain, *github.com/mjl-/adns.TLSA, *crypto/x509.CertPool) crypto/tls.Config
|
||||
- Verify: changed from func(*github.com/mjl-/mox/mlog.Log, []github.com/mjl-/adns.TLSA, crypto/tls.ConnectionState, github.com/mjl-/mox/dns.Domain, []github.com/mjl-/mox/dns.Domain) (bool, github.com/mjl-/adns.TLSA, error) to func(*golang.org/x/exp/slog.Logger, []github.com/mjl-/adns.TLSA, crypto/tls.ConnectionState, github.com/mjl-/mox/dns.Domain, []github.com/mjl-/mox/dns.Domain, *crypto/x509.CertPool) (bool, github.com/mjl-/adns.TLSA, error)
|
||||
|
||||
# dmarc
|
||||
- Lookup: changed from func(context.Context, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain) (Status, github.com/mjl-/mox/dns.Domain, *Record, string, bool, error) to func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain) (Status, github.com/mjl-/mox/dns.Domain, *Record, string, bool, error)
|
||||
- LookupExternalReportsAccepted: changed from func(context.Context, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain, github.com/mjl-/mox/dns.Domain) (bool, Status, []*Record, []string, bool, error) to func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain, github.com/mjl-/mox/dns.Domain) (bool, Status, []*Record, []string, bool, error)
|
||||
- Verify: changed from func(context.Context, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain, []github.com/mjl-/mox/dkim.Result, github.com/mjl-/mox/spf.Status, *github.com/mjl-/mox/dns.Domain, bool) (bool, Result) to func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain, []github.com/mjl-/mox/dkim.Result, github.com/mjl-/mox/spf.Status, *github.com/mjl-/mox/dns.Domain, bool) (bool, Result)
|
||||
|
||||
# dmarcrpt
|
||||
- ParseMessageReport: changed from func(*github.com/mjl-/mox/mlog.Log, io.ReaderAt) (*Feedback, error) to func(*golang.org/x/exp/slog.Logger, io.ReaderAt) (*Feedback, error)
|
||||
|
||||
# dns
|
||||
|
||||
# dnsbl
|
||||
- CheckHealth: changed from func(context.Context, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain) error to func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain) error
|
||||
- Lookup: changed from func(context.Context, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain, net.IP) (Status, string, error) to func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain, net.IP) (Status, string, error)
|
||||
|
||||
# iprev
|
||||
|
||||
# message
|
||||
- (*Part).ParseNextPart: changed from func(*github.com/mjl-/mox/mlog.Log) (*Part, error) to func(*golang.org/x/exp/slog.Logger) (*Part, error)
|
||||
- (*Part).Walk: changed from func(*github.com/mjl-/mox/mlog.Log, *Part) error to func(*golang.org/x/exp/slog.Logger, *Part) error
|
||||
- EnsurePart: changed from func(*github.com/mjl-/mox/mlog.Log, bool, io.ReaderAt, int64) (Part, error) to func(*golang.org/x/exp/slog.Logger, bool, io.ReaderAt, int64) (Part, error)
|
||||
- From: changed from func(*github.com/mjl-/mox/mlog.Log, bool, io.ReaderAt) (github.com/mjl-/mox/smtp.Address, net/textproto.MIMEHeader, error) to func(*golang.org/x/exp/slog.Logger, bool, io.ReaderAt) (github.com/mjl-/mox/smtp.Address, *Envelope, net/textproto.MIMEHeader, error)
|
||||
- Parse: changed from func(*github.com/mjl-/mox/mlog.Log, bool, io.ReaderAt) (Part, error) to func(*golang.org/x/exp/slog.Logger, bool, io.ReaderAt) (Part, error)
|
||||
- TLSReceivedComment: removed
|
||||
|
||||
# mtasts
|
||||
- FetchPolicy: changed from func(context.Context, github.com/mjl-/mox/dns.Domain) (*Policy, string, error) to func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Domain) (*Policy, string, error)
|
||||
- Get: changed from func(context.Context, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain) (*Record, *Policy, string, error) to func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain) (*Record, *Policy, string, error)
|
||||
- LookupRecord: changed from func(context.Context, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain) (*Record, string, error) to func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain) (*Record, string, error)
|
||||
|
||||
# publicsuffix
|
||||
- List.Lookup: changed from func(context.Context, github.com/mjl-/mox/dns.Domain) github.com/mjl-/mox/dns.Domain to func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Domain) github.com/mjl-/mox/dns.Domain
|
||||
- Lookup: changed from func(context.Context, github.com/mjl-/mox/dns.Domain) github.com/mjl-/mox/dns.Domain to func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Domain) github.com/mjl-/mox/dns.Domain
|
||||
- ParseList: changed from func(io.Reader) (List, error) to func(*golang.org/x/exp/slog.Logger, io.Reader) (List, error)
|
||||
|
||||
# ratelimit
|
||||
|
||||
# sasl
|
||||
- NewClientSCRAMSHA1: changed from func(string, string) Client to func(string, string, bool) Client
|
||||
- NewClientSCRAMSHA256: changed from func(string, string) Client to func(string, string, bool) Client
|
||||
|
||||
# scram
|
||||
- HMAC: removed
|
||||
- NewClient: changed from func(func() hash.Hash, string, string) *Client to func(func() hash.Hash, string, string, bool, *crypto/tls.ConnectionState) *Client
|
||||
- NewServer: changed from func(func() hash.Hash, []byte) (*Server, error) to func(func() hash.Hash, []byte, *crypto/tls.ConnectionState, bool) (*Server, error)
|
||||
|
||||
# smtp
|
||||
|
||||
# smtpclient
|
||||
- (*Client).TLSEnabled: removed
|
||||
- Dial: changed from func(context.Context, *github.com/mjl-/mox/mlog.Log, Dialer, github.com/mjl-/mox/dns.IPDomain, []net.IP, int, map[string][]net.IP) (net.Conn, net.IP, error) to func(context.Context, *golang.org/x/exp/slog.Logger, Dialer, github.com/mjl-/mox/dns.IPDomain, []net.IP, int, map[string][]net.IP, []net.IP) (net.Conn, net.IP, error)
|
||||
- GatherDestinations: changed from func(context.Context, *github.com/mjl-/mox/mlog.Log, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.IPDomain) (bool, bool, bool, github.com/mjl-/mox/dns.Domain, []github.com/mjl-/mox/dns.IPDomain, bool, error) to func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.IPDomain) (bool, bool, bool, github.com/mjl-/mox/dns.Domain, []github.com/mjl-/mox/dns.IPDomain, bool, error)
|
||||
- GatherIPs: changed from func(context.Context, *github.com/mjl-/mox/mlog.Log, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.IPDomain, map[string][]net.IP) (bool, bool, github.com/mjl-/mox/dns.Domain, []net.IP, bool, error) to func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.IPDomain, map[string][]net.IP) (bool, bool, github.com/mjl-/mox/dns.Domain, []net.IP, bool, error)
|
||||
- GatherTLSA: changed from func(context.Context, *github.com/mjl-/mox/mlog.Log, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain, bool, github.com/mjl-/mox/dns.Domain) (bool, []github.com/mjl-/adns.TLSA, github.com/mjl-/mox/dns.Domain, error) to func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain, bool, github.com/mjl-/mox/dns.Domain) (bool, []github.com/mjl-/adns.TLSA, github.com/mjl-/mox/dns.Domain, error)
|
||||
- New: changed from func(context.Context, *github.com/mjl-/mox/mlog.Log, net.Conn, TLSMode, bool, github.com/mjl-/mox/dns.Domain, github.com/mjl-/mox/dns.Domain, Opts) (*Client, error) to func(context.Context, *golang.org/x/exp/slog.Logger, net.Conn, TLSMode, bool, github.com/mjl-/mox/dns.Domain, github.com/mjl-/mox/dns.Domain, Opts) (*Client, error)
|
||||
- Opts.Auth: changed from []github.com/mjl-/mox/sasl.Client to func([]string, *crypto/tls.ConnectionState) (github.com/mjl-/mox/sasl.Client, error)
|
||||
|
||||
# spf
|
||||
- Evaluate: changed from func(context.Context, *Record, github.com/mjl-/mox/dns.Resolver, Args) (Status, string, string, bool, error) to func(context.Context, *golang.org/x/exp/slog.Logger, *Record, github.com/mjl-/mox/dns.Resolver, Args) (Status, string, string, bool, error)
|
||||
- Lookup: changed from func(context.Context, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain) (Status, string, *Record, bool, error) to func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain) (Status, string, *Record, bool, error)
|
||||
- Verify: changed from func(context.Context, github.com/mjl-/mox/dns.Resolver, Args) (Received, github.com/mjl-/mox/dns.Domain, string, bool, error) to func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Resolver, Args) (Received, github.com/mjl-/mox/dns.Domain, string, bool, error)
|
||||
|
||||
# subjectpass
|
||||
- Generate: changed from func(github.com/mjl-/mox/smtp.Address, []byte, time.Time) string to func(*golang.org/x/exp/slog.Logger, github.com/mjl-/mox/smtp.Address, []byte, time.Time) string
|
||||
- Verify: changed from func(*github.com/mjl-/mox/mlog.Log, io.ReaderAt, []byte, time.Duration) error to func(*golang.org/x/exp/slog.Logger, io.ReaderAt, []byte, time.Duration) error
|
||||
|
||||
# tlsrpt
|
||||
- (*TLSRPTDateRange).UnmarshalJSON: removed
|
||||
- Lookup: changed from func(context.Context, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain) (*Record, string, error) to func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain) (*Record, string, error)
|
||||
- Parse: changed from func(io.Reader) (*Report, error) to func(io.Reader) (*ReportJSON, error)
|
||||
- ParseMessage: changed from func(*github.com/mjl-/mox/mlog.Log, io.ReaderAt) (*Report, error) to func(*golang.org/x/exp/slog.Logger, io.ReaderAt) (*ReportJSON, error)
|
||||
|
||||
# updates
|
||||
- Check: changed from func(context.Context, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain, Version, string, []byte) (Version, *Record, *Changelog, error) to func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain, Version, string, []byte) (Version, *Record, *Changelog, error)
|
||||
- FetchChangelog: changed from func(context.Context, string, Version, []byte) (*Changelog, error) to func(context.Context, *golang.org/x/exp/slog.Logger, string, Version, []byte) (*Changelog, error)
|
||||
- Lookup: changed from func(context.Context, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain) (Version, *Record, error) to func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain) (Version, *Record, error)
|
||||
|
@ -2,11 +2,9 @@
|
||||
// requesting certificates with ACME, typically from Let's Encrypt.
|
||||
package autotls
|
||||
|
||||
// We only do tls-alpn-01. For http-01, we would have to start another
|
||||
// listener. For DNS we would need a third party tool with an API that can make
|
||||
// the DNS changes, as we don't want to link in dozens of bespoke API's for DNS
|
||||
// record manipulation into mox. We can do http-01 relatively easily. It could
|
||||
// be useful to not depend on a single mechanism.
|
||||
// We do tls-alpn-01, and also http-01. For DNS we would need a third party tool
|
||||
// with an API that can make the DNS changes, as we don't want to link in dozens of
|
||||
// bespoke API's for DNS record manipulation into mox.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@ -22,6 +20,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@ -30,19 +29,37 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/crypto/acme"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"golang.org/x/crypto/acme"
|
||||
"golang.org/x/crypto/acme/autocert"
|
||||
|
||||
"github.com/mjl-/autocert"
|
||||
|
||||
"github.com/mjl-/mox/dns"
|
||||
"github.com/mjl-/mox/mlog"
|
||||
"github.com/mjl-/mox/moxvar"
|
||||
)
|
||||
|
||||
var xlog = mlog.New("autotls")
|
||||
|
||||
var (
|
||||
metricMissingServerName = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "mox_autotls_missing_servername_total",
|
||||
Help: "Number of failed TLS connection attempts with missing SNI where no fallback hostname was configured.",
|
||||
},
|
||||
)
|
||||
metricUnknownServerName = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "mox_autotls_unknown_servername_total",
|
||||
Help: "Number of failed TLS connection attempts with an unrecognized SNI name where no fallback hostname was configured.",
|
||||
},
|
||||
)
|
||||
metricCertRequestErrors = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "mox_autotls_cert_request_errors_total",
|
||||
Help: "Number of errors trying to retrieve a certificate for a hostname, possibly ACME verification errors.",
|
||||
},
|
||||
)
|
||||
metricCertput = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "mox_autotls_certput_total",
|
||||
@ -55,7 +72,6 @@ var (
|
||||
// certificates for allowlisted hosts.
|
||||
type Manager struct {
|
||||
ACMETLSConfig *tls.Config // For serving HTTPS on port 443, which is required for certificate requests to succeed.
|
||||
TLSConfig *tls.Config // For all TLS servers not used for validating ACME requests. Like SMTP and IMAP (including with STARTTLS) and HTTPS on ports other than 443.
|
||||
Manager *autocert.Manager
|
||||
|
||||
shutdown <-chan struct{}
|
||||
@ -66,10 +82,19 @@ type Manager struct {
|
||||
|
||||
// Load returns an initialized autotls manager for "name" (used for the ACME key
|
||||
// file and requested certs and their keys). All files are stored within acmeDir.
|
||||
//
|
||||
// contactEmail must be a valid email address to which notifications about ACME can
|
||||
// be sent. directoryURL is the ACME starting point. When shutdown is closed, no
|
||||
// new TLS connections can be created.
|
||||
func Load(name, acmeDir, contactEmail, directoryURL string, shutdown <-chan struct{}) (*Manager, error) {
|
||||
// be sent. directoryURL is the ACME starting point.
|
||||
//
|
||||
// eabKeyID and eabKey are for external account binding when making a new account,
|
||||
// which some ACME providers require.
|
||||
//
|
||||
// getPrivateKey is called to get the private key for the host and key type. It
|
||||
// can be used to deliver a specific (e.g. always the same) private key for a
|
||||
// host, or a newly generated key.
|
||||
//
|
||||
// When shutdown is closed, no new TLS connections can be created.
|
||||
func Load(log mlog.Log, name, acmeDir, contactEmail, directoryURL string, eabKeyID string, eabKey []byte, getPrivateKey func(host string, keyType autocert.KeyType) (crypto.Signer, error), shutdown <-chan struct{}) (*Manager, error) {
|
||||
if directoryURL == "" {
|
||||
return nil, fmt.Errorf("empty ACME directory URL")
|
||||
}
|
||||
@ -78,11 +103,14 @@ func Load(name, acmeDir, contactEmail, directoryURL string, shutdown <-chan stru
|
||||
}
|
||||
|
||||
// Load identity key if it exists. Otherwise, create a new key.
|
||||
p := filepath.Join(acmeDir + "/" + name + ".key")
|
||||
p := filepath.Join(acmeDir, name+".key")
|
||||
var key crypto.Signer
|
||||
f, err := os.Open(p)
|
||||
if f != nil {
|
||||
defer f.Close()
|
||||
defer func() {
|
||||
err := f.Close()
|
||||
log.Check(err, "closing identify key file")
|
||||
}()
|
||||
}
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
key, err = ecdsa.GenerateKey(elliptic.P256(), cryptorand.Reader)
|
||||
@ -130,7 +158,7 @@ func Load(name, acmeDir, contactEmail, directoryURL string, shutdown <-chan stru
|
||||
}
|
||||
|
||||
m := &autocert.Manager{
|
||||
Cache: dirCache(acmeDir + "/keycerts/" + name),
|
||||
Cache: dirCache(filepath.Join(acmeDir, "keycerts", name)),
|
||||
Prompt: autocert.AcceptTOS,
|
||||
Email: contactEmail,
|
||||
Client: &acme.Client{
|
||||
@ -138,57 +166,163 @@ func Load(name, acmeDir, contactEmail, directoryURL string, shutdown <-chan stru
|
||||
Key: key,
|
||||
UserAgent: "mox/" + moxvar.Version,
|
||||
},
|
||||
GetPrivateKey: getPrivateKey,
|
||||
// HostPolicy set below.
|
||||
}
|
||||
|
||||
loggingGetCertificate := func(hello *tls.ClientHelloInfo) (*tls.Certificate, error) {
|
||||
log := xlog.WithContext(hello.Context())
|
||||
|
||||
// Handle missing SNI to prevent logging an error below.
|
||||
// At startup, during config initialization, we already adjust the tls config to
|
||||
// inject the listener hostname if there isn't one in the TLS client hello. This is
|
||||
// common for SMTP STARTTLS connections, which often do not care about the
|
||||
// validation of the certificate.
|
||||
if hello.ServerName == "" {
|
||||
log.Debug("tls request without sni servername, rejecting", mlog.Field("localaddr", hello.Conn.LocalAddr()), mlog.Field("supportedprotos", hello.SupportedProtos))
|
||||
return nil, fmt.Errorf("sni server name required")
|
||||
// If external account binding key is provided, use it for registering a new account.
|
||||
// todo: ideally the key and its id are provided temporarily by the admin when registering a new account. but we don't do that interactive setup yet. in the future, an interactive setup/quickstart would ask for the key once to register a new acme account.
|
||||
if eabKeyID != "" {
|
||||
m.ExternalAccountBinding = &acme.ExternalAccountBinding{
|
||||
KID: eabKeyID,
|
||||
Key: eabKey,
|
||||
}
|
||||
|
||||
cert, err := m.GetCertificate(hello)
|
||||
if err != nil {
|
||||
if errors.Is(err, errHostNotAllowed) {
|
||||
log.Debugx("requesting certificate", err, mlog.Field("host", hello.ServerName))
|
||||
} else {
|
||||
log.Errorx("requesting certificate", err, mlog.Field("host", hello.ServerName))
|
||||
}
|
||||
}
|
||||
return cert, err
|
||||
}
|
||||
|
||||
acmeTLSConfig := *m.TLSConfig()
|
||||
acmeTLSConfig.GetCertificate = loggingGetCertificate
|
||||
|
||||
tlsConfig := tls.Config{
|
||||
GetCertificate: loggingGetCertificate,
|
||||
}
|
||||
|
||||
a := &Manager{
|
||||
ACMETLSConfig: &acmeTLSConfig,
|
||||
TLSConfig: &tlsConfig,
|
||||
Manager: m,
|
||||
shutdown: shutdown,
|
||||
hosts: map[dns.Domain]struct{}{},
|
||||
Manager: m,
|
||||
shutdown: shutdown,
|
||||
hosts: map[dns.Domain]struct{}{},
|
||||
}
|
||||
m.HostPolicy = a.HostPolicy
|
||||
acmeTLSConfig := *m.TLSConfig()
|
||||
acmeTLSConfig.GetCertificate = func(hello *tls.ClientHelloInfo) (*tls.Certificate, error) {
|
||||
return a.loggingGetCertificate(hello, dns.Domain{}, false, false)
|
||||
}
|
||||
a.ACMETLSConfig = &acmeTLSConfig
|
||||
return a, nil
|
||||
}
|
||||
|
||||
// loggingGetCertificate is a helper to implement crypto/tls.Config.GetCertificate,
|
||||
// optionally falling back to a certificate for fallbackHostname in case SNI is
|
||||
// absent or for an unknown hostname.
|
||||
func (m *Manager) loggingGetCertificate(hello *tls.ClientHelloInfo, fallbackHostname dns.Domain, fallbackNoSNI, fallbackUnknownSNI bool) (*tls.Certificate, error) {
|
||||
log := mlog.New("autotls", nil).WithContext(hello.Context()).With(
|
||||
slog.Any("localaddr", hello.Conn.LocalAddr()),
|
||||
slog.Any("supportedprotos", hello.SupportedProtos),
|
||||
slog.String("servername", hello.ServerName),
|
||||
)
|
||||
|
||||
// If we can't find a certificate (depending on fallback parameters), we return a
|
||||
// nil certificate and nil error, which crypto/tls turns into a TLS alert
|
||||
// "unrecognized name", which can be interpreted by clients as a hint that they are
|
||||
// using the wrong hostname, or a certificate is missing. ../rfc/9325:578
|
||||
|
||||
// IP addresses for ServerName are not allowed, but happen in practice. If we
|
||||
// should be lenient (fallbackUnknownSNI), we switch to the fallback hostname,
|
||||
// otherwise we return an error. We don't want to pass IP addresses to
|
||||
// GetCertificate because it will return an error for IPv6 addresses.
|
||||
// ../rfc/6066:367 ../rfc/4366:535
|
||||
if net.ParseIP(hello.ServerName) != nil {
|
||||
if fallbackUnknownSNI {
|
||||
hello.ServerName = fallbackHostname.ASCII
|
||||
log = log.With(slog.String("servername", hello.ServerName))
|
||||
} else {
|
||||
log.Debug("tls request with ip for server name, rejecting")
|
||||
return nil, fmt.Errorf("invalid ip address for sni server name")
|
||||
}
|
||||
}
|
||||
|
||||
if hello.ServerName == "" && fallbackNoSNI {
|
||||
hello.ServerName = fallbackHostname.ASCII
|
||||
log = log.With(slog.String("servername", hello.ServerName))
|
||||
}
|
||||
|
||||
// Handle missing SNI to prevent logging an error below.
|
||||
if hello.ServerName == "" {
|
||||
metricMissingServerName.Inc()
|
||||
log.Debug("tls request without sni server name, rejecting")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
cert, err := m.Manager.GetCertificate(hello)
|
||||
if err != nil && errors.Is(err, errHostNotAllowed) {
|
||||
if !fallbackUnknownSNI {
|
||||
metricUnknownServerName.Inc()
|
||||
log.Debugx("requesting certificate", err)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Some legitimate email deliveries over SMTP use an unknown SNI, e.g. a bare
|
||||
// domain instead of the MX hostname. We "should" return an error, but that would
|
||||
// break email delivery, so we use the fallback name if it is configured.
|
||||
// ../rfc/9325:589
|
||||
|
||||
log = log.With(slog.String("servername", hello.ServerName))
|
||||
log.Debug("certificate for unknown hostname, using fallback hostname")
|
||||
hello.ServerName = fallbackHostname.ASCII
|
||||
cert, err = m.Manager.GetCertificate(hello)
|
||||
if err != nil {
|
||||
metricCertRequestErrors.Inc()
|
||||
log.Errorx("requesting certificate for fallback hostname", err)
|
||||
} else {
|
||||
log.Debug("using certificate for fallback hostname")
|
||||
}
|
||||
return cert, err
|
||||
} else if err != nil {
|
||||
metricCertRequestErrors.Inc()
|
||||
log.Errorx("requesting certificate", err)
|
||||
}
|
||||
return cert, err
|
||||
}
|
||||
|
||||
// TLSConfig returns a TLS server config that optionally returns a certificate for
|
||||
// fallbackHostname if no SNI was done, or for an unknown hostname.
|
||||
//
|
||||
// If fallbackNoSNI is set, TLS connections without SNI will use a certificate for
|
||||
// fallbackHostname. Otherwise, connections without SNI will fail with a message
|
||||
// that no TLS certificate is available.
|
||||
//
|
||||
// If fallbackUnknownSNI is set, TLS connections with an SNI hostname that is not
|
||||
// allowlisted will instead use a certificate for fallbackHostname. Otherwise, such
|
||||
// TLS connections will fail.
|
||||
func (m *Manager) TLSConfig(fallbackHostname dns.Domain, fallbackNoSNI, fallbackUnknownSNI bool) *tls.Config {
|
||||
return &tls.Config{
|
||||
GetCertificate: func(hello *tls.ClientHelloInfo) (*tls.Certificate, error) {
|
||||
return m.loggingGetCertificate(hello, fallbackHostname, fallbackNoSNI, fallbackUnknownSNI)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// CertAvailable checks whether a non-expired ECDSA certificate is available in the
|
||||
// cache for host. No other checks than expiration are done.
|
||||
func (m *Manager) CertAvailable(ctx context.Context, log mlog.Log, host dns.Domain) (bool, error) {
|
||||
ck := host.ASCII // Would be "+rsa" for rsa keys.
|
||||
data, err := m.Manager.Cache.Get(ctx, ck)
|
||||
if err != nil && errors.Is(err, autocert.ErrCacheMiss) {
|
||||
return false, nil
|
||||
} else if err != nil {
|
||||
return false, fmt.Errorf("attempt to get certificate from cache: %v", err)
|
||||
}
|
||||
|
||||
// The cached keycert is of the form: private key, leaf certificate, intermediate certificates...
|
||||
privb, rem := pem.Decode(data)
|
||||
if privb == nil {
|
||||
return false, fmt.Errorf("missing private key in cached keycert file")
|
||||
}
|
||||
pubb, _ := pem.Decode(rem)
|
||||
if pubb == nil {
|
||||
return false, fmt.Errorf("missing certificate in cached keycert file")
|
||||
} else if pubb.Type != "CERTIFICATE" {
|
||||
return false, fmt.Errorf("second pem block is %q, expected CERTIFICATE", pubb.Type)
|
||||
}
|
||||
cert, err := x509.ParseCertificate(pubb.Bytes)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("parsing certificate from cached keycert file: %v", err)
|
||||
}
|
||||
// We assume the certificate has a matching hostname, and is properly CA-signed. We
|
||||
// only check the expiration time.
|
||||
if time.Until(cert.NotBefore) > 0 || time.Since(cert.NotAfter) > 0 {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// SetAllowedHostnames sets a new list of allowed hostnames for automatic TLS.
|
||||
// After setting the host names, a goroutine is start to check that new host names
|
||||
// are fully served by publicIPs (only if non-empty and there is no unspecified
|
||||
// address in the list). If no, log an error with a warning that ACME validation
|
||||
// may fail.
|
||||
func (m *Manager) SetAllowedHostnames(resolver dns.Resolver, hostnames map[dns.Domain]struct{}, publicIPs []string, checkHosts bool) {
|
||||
func (m *Manager) SetAllowedHostnames(log mlog.Log, resolver dns.Resolver, hostnames map[dns.Domain]struct{}, publicIPs []string, checkHosts bool) {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
||||
@ -201,7 +335,7 @@ func (m *Manager) SetAllowedHostnames(resolver dns.Resolver, hostnames map[dns.D
|
||||
return l[i].Name() < l[j].Name()
|
||||
})
|
||||
|
||||
xlog.Debug("autotls setting allowed hostnames", mlog.Field("hostnames", l), mlog.Field("publicips", publicIPs))
|
||||
log.Debug("autotls setting allowed hostnames", slog.Any("hostnames", l), slog.Any("publicips", publicIPs))
|
||||
var added []dns.Domain
|
||||
for h := range hostnames {
|
||||
if _, ok := m.hosts[h]; !ok {
|
||||
@ -225,16 +359,20 @@ func (m *Manager) SetAllowedHostnames(resolver dns.Resolver, hostnames map[dns.D
|
||||
publicIPstrs[ip] = struct{}{}
|
||||
}
|
||||
|
||||
xlog.Debug("checking ips of hosts configured for acme tls cert validation")
|
||||
log.Debug("checking ips of hosts configured for acme tls cert validation")
|
||||
for _, h := range added {
|
||||
ips, err := resolver.LookupIP(ctx, "ip", h.ASCII+".")
|
||||
ips, _, err := resolver.LookupIP(ctx, "ip", h.ASCII+".")
|
||||
if err != nil {
|
||||
xlog.Errorx("warning: acme tls cert validation for host may fail due to dns lookup error", err, mlog.Field("host", h))
|
||||
log.Warnx("acme tls cert validation for host may fail due to dns lookup error", err, slog.Any("host", h))
|
||||
continue
|
||||
}
|
||||
for _, ip := range ips {
|
||||
if _, ok := publicIPstrs[ip.String()]; !ok {
|
||||
xlog.Error("warning: acme tls cert validation for host is likely to fail because not all its ips are being listened on", mlog.Field("hostname", h), mlog.Field("listenedips", publicIPs), mlog.Field("hostips", ips), mlog.Field("missingip", ip))
|
||||
log.Warn("acme tls cert validation for host is likely to fail because not all its ips are being listened on",
|
||||
slog.Any("hostname", h),
|
||||
slog.Any("listenedips", publicIPs),
|
||||
slog.Any("hostips", ips),
|
||||
slog.Any("missingip", ip))
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -257,12 +395,12 @@ var errHostNotAllowed = errors.New("autotls: host not in allowlist")
|
||||
|
||||
// HostPolicy decides if a host is allowed for use with ACME, i.e. whether a
|
||||
// certificate will be returned if present and/or will be requested if not yet
|
||||
// present. Only hosts added with AllowHostname are allowed. During shutdown, no
|
||||
// new connections are allowed.
|
||||
// present. Only hosts added with SetAllowedHostnames are allowed. During shutdown,
|
||||
// no new connections are allowed.
|
||||
func (m *Manager) HostPolicy(ctx context.Context, host string) (rerr error) {
|
||||
log := xlog.WithContext(ctx)
|
||||
log := mlog.New("autotls", nil).WithContext(ctx)
|
||||
defer func() {
|
||||
log.WithContext(ctx).Debugx("autotls hostpolicy result", rerr, mlog.Field("host", host))
|
||||
log.Debugx("autotls hostpolicy result", rerr, slog.String("host", host))
|
||||
}()
|
||||
|
||||
// Don't request new TLS certs when we are shutting down.
|
||||
@ -272,6 +410,12 @@ func (m *Manager) HostPolicy(ctx context.Context, host string) (rerr error) {
|
||||
default:
|
||||
}
|
||||
|
||||
xhost, _, err := net.SplitHostPort(host)
|
||||
if err == nil {
|
||||
// For http-01, host may include a port number.
|
||||
host = xhost
|
||||
}
|
||||
|
||||
d, err := dns.ParseDomain(host)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid host: %v", err)
|
||||
@ -288,46 +432,46 @@ func (m *Manager) HostPolicy(ctx context.Context, host string) (rerr error) {
|
||||
type dirCache autocert.DirCache
|
||||
|
||||
func (d dirCache) Delete(ctx context.Context, name string) (rerr error) {
|
||||
log := xlog.WithContext(ctx)
|
||||
log := mlog.New("autotls", nil).WithContext(ctx)
|
||||
defer func() {
|
||||
log.Debugx("dircache delete result", rerr, mlog.Field("name", name))
|
||||
log.Debugx("dircache delete result", rerr, slog.String("name", name))
|
||||
}()
|
||||
err := autocert.DirCache(d).Delete(ctx, name)
|
||||
if err != nil {
|
||||
log.Errorx("deleting cert from dir cache", err, mlog.Field("name", name))
|
||||
log.Errorx("deleting cert from dir cache", err, slog.String("name", name))
|
||||
} else if !strings.HasSuffix(name, "+token") {
|
||||
log.Info("autotls cert delete", mlog.Field("name", name))
|
||||
log.Info("autotls cert delete", slog.String("name", name))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (d dirCache) Get(ctx context.Context, name string) (rbuf []byte, rerr error) {
|
||||
log := xlog.WithContext(ctx)
|
||||
log := mlog.New("autotls", nil).WithContext(ctx)
|
||||
defer func() {
|
||||
log.Debugx("dircache get result", rerr, mlog.Field("name", name))
|
||||
log.Debugx("dircache get result", rerr, slog.String("name", name))
|
||||
}()
|
||||
buf, err := autocert.DirCache(d).Get(ctx, name)
|
||||
if err != nil && errors.Is(err, autocert.ErrCacheMiss) {
|
||||
log.Infox("getting cert from dir cache", err, mlog.Field("name", name))
|
||||
log.Infox("getting cert from dir cache", err, slog.String("name", name))
|
||||
} else if err != nil {
|
||||
log.Errorx("getting cert from dir cache", err, mlog.Field("name", name))
|
||||
log.Errorx("getting cert from dir cache", err, slog.String("name", name))
|
||||
} else if !strings.HasSuffix(name, "+token") {
|
||||
log.Debug("autotls cert get", mlog.Field("name", name))
|
||||
log.Debug("autotls cert get", slog.String("name", name))
|
||||
}
|
||||
return buf, err
|
||||
}
|
||||
|
||||
func (d dirCache) Put(ctx context.Context, name string, data []byte) (rerr error) {
|
||||
log := xlog.WithContext(ctx)
|
||||
log := mlog.New("autotls", nil).WithContext(ctx)
|
||||
defer func() {
|
||||
log.Debugx("dircache put result", rerr, mlog.Field("name", name))
|
||||
log.Debugx("dircache put result", rerr, slog.String("name", name))
|
||||
}()
|
||||
metricCertput.Inc()
|
||||
err := autocert.DirCache(d).Put(ctx, name, data)
|
||||
if err != nil {
|
||||
log.Errorx("storing cert in dir cache", err, mlog.Field("name", name))
|
||||
log.Errorx("storing cert in dir cache", err, slog.String("name", name))
|
||||
} else if !strings.HasSuffix(name, "+token") {
|
||||
log.Info("autotls cert store", mlog.Field("name", name))
|
||||
log.Info("autotls cert store", slog.String("name", name))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
@ -2,22 +2,30 @@ package autotls
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/crypto/acme/autocert"
|
||||
"github.com/mjl-/autocert"
|
||||
|
||||
"github.com/mjl-/mox/dns"
|
||||
"github.com/mjl-/mox/mlog"
|
||||
)
|
||||
|
||||
func TestAutotls(t *testing.T) {
|
||||
log := mlog.New("autotls", nil)
|
||||
os.RemoveAll("../testdata/autotls")
|
||||
os.MkdirAll("../testdata/autotls", 0770)
|
||||
|
||||
shutdown := make(chan struct{})
|
||||
m, err := Load("test", "../testdata/autotls", "mox@localhost", "https://localhost/", shutdown)
|
||||
|
||||
getPrivateKey := func(host string, keyType autocert.KeyType) (crypto.Signer, error) {
|
||||
return nil, fmt.Errorf("not used")
|
||||
}
|
||||
m, err := Load(log, "test", "../testdata/autotls", "mox@localhost", "https://localhost/", "", nil, getPrivateKey, shutdown)
|
||||
if err != nil {
|
||||
t.Fatalf("load manager: %v", err)
|
||||
}
|
||||
@ -28,7 +36,7 @@ func TestAutotls(t *testing.T) {
|
||||
if err := m.HostPolicy(context.Background(), "mox.example"); err == nil || !errors.Is(err, errHostNotAllowed) {
|
||||
t.Fatalf("hostpolicy, got err %v, expected errHostNotAllowed", err)
|
||||
}
|
||||
m.SetAllowedHostnames(dns.StrictResolver{}, map[dns.Domain]struct{}{{ASCII: "mox.example"}: {}}, nil, false)
|
||||
m.SetAllowedHostnames(log, dns.MockResolver{}, map[dns.Domain]struct{}{{ASCII: "mox.example"}: {}}, nil, false)
|
||||
l = m.Hostnames()
|
||||
if !reflect.DeepEqual(l, []dns.Domain{{ASCII: "mox.example"}}) {
|
||||
t.Fatalf("hostnames, got %v, expected single mox.example", l)
|
||||
@ -36,6 +44,9 @@ func TestAutotls(t *testing.T) {
|
||||
if err := m.HostPolicy(context.Background(), "mox.example"); err != nil {
|
||||
t.Fatalf("hostpolicy, got err %v, expected no error", err)
|
||||
}
|
||||
if err := m.HostPolicy(context.Background(), "mox.example:80"); err != nil {
|
||||
t.Fatalf("hostpolicy, got err %v, expected no error", err)
|
||||
}
|
||||
if err := m.HostPolicy(context.Background(), "other.mox.example"); err == nil || !errors.Is(err, errHostNotAllowed) {
|
||||
t.Fatalf("hostpolicy, got err %v, expected errHostNotAllowed", err)
|
||||
}
|
||||
@ -71,7 +82,7 @@ func TestAutotls(t *testing.T) {
|
||||
|
||||
key0 := m.Manager.Client.Key
|
||||
|
||||
m, err = Load("test", "../testdata/autotls", "mox@localhost", "https://localhost/", shutdown)
|
||||
m, err = Load(log, "test", "../testdata/autotls", "mox@localhost", "https://localhost/", "", nil, getPrivateKey, shutdown)
|
||||
if err != nil {
|
||||
t.Fatalf("load manager again: %v", err)
|
||||
}
|
||||
@ -79,12 +90,12 @@ func TestAutotls(t *testing.T) {
|
||||
t.Fatalf("private key changed after reload")
|
||||
}
|
||||
m.shutdown = make(chan struct{})
|
||||
m.SetAllowedHostnames(dns.StrictResolver{}, map[dns.Domain]struct{}{{ASCII: "mox.example"}: {}}, nil, false)
|
||||
m.SetAllowedHostnames(log, dns.MockResolver{}, map[dns.Domain]struct{}{{ASCII: "mox.example"}: {}}, nil, false)
|
||||
if err := m.HostPolicy(context.Background(), "mox.example"); err != nil {
|
||||
t.Fatalf("hostpolicy, got err %v, expected no error", err)
|
||||
}
|
||||
|
||||
m2, err := Load("test2", "../testdata/autotls", "mox@localhost", "https://localhost/", shutdown)
|
||||
m2, err := Load(log, "test2", "../testdata/autotls", "mox@localhost", "https://localhost/", "", nil, nil, shutdown)
|
||||
if err != nil {
|
||||
t.Fatalf("load another manager: %v", err)
|
||||
}
|
||||
|
698
backup.go
Normal file
698
backup.go
Normal file
@ -0,0 +1,698 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"log/slog"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/mjl-/bstore"
|
||||
|
||||
"github.com/mjl-/mox/dmarcdb"
|
||||
"github.com/mjl-/mox/mox-"
|
||||
"github.com/mjl-/mox/moxvar"
|
||||
"github.com/mjl-/mox/mtastsdb"
|
||||
"github.com/mjl-/mox/queue"
|
||||
"github.com/mjl-/mox/store"
|
||||
"github.com/mjl-/mox/tlsrptdb"
|
||||
)
|
||||
|
||||
func xbackupctl(ctx context.Context, xctl *ctl) {
|
||||
/* protocol:
|
||||
> "backup"
|
||||
> destdir
|
||||
> "verbose" or ""
|
||||
< stream
|
||||
< "ok" or error
|
||||
*/
|
||||
|
||||
// Convention in this function: variables containing "src" or "dst" are file system
|
||||
// paths that can be passed to os.Open and such. Variables with dirs/paths without
|
||||
// "src" or "dst" are incomplete paths relative to the source or destination data
|
||||
// directories.
|
||||
|
||||
dstDir := xctl.xread()
|
||||
verbose := xctl.xread() == "verbose"
|
||||
|
||||
// Set when an error is encountered. At the end, we warn if set.
|
||||
var incomplete bool
|
||||
|
||||
// We'll be writing output, and logging both to mox and the ctl stream.
|
||||
xwriter := xctl.writer()
|
||||
|
||||
// Format easily readable output for the user.
|
||||
formatLog := func(prefix, text string, err error, attrs ...slog.Attr) []byte {
|
||||
var b bytes.Buffer
|
||||
fmt.Fprint(&b, prefix)
|
||||
fmt.Fprint(&b, text)
|
||||
if err != nil {
|
||||
fmt.Fprint(&b, ": "+err.Error())
|
||||
}
|
||||
for _, a := range attrs {
|
||||
fmt.Fprintf(&b, "; %s=%v", a.Key, a.Value)
|
||||
}
|
||||
fmt.Fprint(&b, "\n")
|
||||
return b.Bytes()
|
||||
}
|
||||
|
||||
// Log an error to both the mox service as the user running "mox backup".
|
||||
pkglogx := func(prefix, text string, err error, attrs ...slog.Attr) {
|
||||
xctl.log.Errorx(text, err, attrs...)
|
||||
xwriter.Write(formatLog(prefix, text, err, attrs...))
|
||||
}
|
||||
|
||||
// Log an error but don't mark backup as failed.
|
||||
xwarnx := func(text string, err error, attrs ...slog.Attr) {
|
||||
pkglogx("warning: ", text, err, attrs...)
|
||||
}
|
||||
|
||||
// Log an error that causes the backup to be marked as failed. We typically
|
||||
// continue processing though.
|
||||
xerrx := func(text string, err error, attrs ...slog.Attr) {
|
||||
incomplete = true
|
||||
pkglogx("error: ", text, err, attrs...)
|
||||
}
|
||||
|
||||
// If verbose is enabled, log to the cli command. Always log as info level.
|
||||
xvlog := func(text string, attrs ...slog.Attr) {
|
||||
xctl.log.Info(text, attrs...)
|
||||
if verbose {
|
||||
xwriter.Write(formatLog("", text, nil, attrs...))
|
||||
}
|
||||
}
|
||||
|
||||
dstConfigDir := filepath.Join(dstDir, "config")
|
||||
dstDataDir := filepath.Join(dstDir, "data")
|
||||
|
||||
// Warn if directories already exist, will likely cause failures when trying to
|
||||
// write files that already exist.
|
||||
if _, err := os.Stat(dstConfigDir); err == nil {
|
||||
xwarnx("destination config directory already exists", nil, slog.String("configdir", dstConfigDir))
|
||||
}
|
||||
if _, err := os.Stat(dstDataDir); err == nil {
|
||||
xwarnx("destination data directory already exists", nil, slog.String("datadir", dstDataDir))
|
||||
}
|
||||
|
||||
os.MkdirAll(dstDir, 0770)
|
||||
os.MkdirAll(dstConfigDir, 0770)
|
||||
os.MkdirAll(dstDataDir, 0770)
|
||||
|
||||
// Copy all files in the config dir.
|
||||
srcConfigDir := filepath.Clean(mox.ConfigDirPath("."))
|
||||
err := filepath.WalkDir(srcConfigDir, func(srcPath string, d fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if srcConfigDir == srcPath {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Trim directory and separator.
|
||||
relPath := srcPath[len(srcConfigDir)+1:]
|
||||
|
||||
destPath := filepath.Join(dstConfigDir, relPath)
|
||||
|
||||
if d.IsDir() {
|
||||
if info, err := os.Stat(srcPath); err != nil {
|
||||
return fmt.Errorf("stat config dir %s: %v", srcPath, err)
|
||||
} else if err := os.Mkdir(destPath, info.Mode()&0777); err != nil {
|
||||
return fmt.Errorf("mkdir %s: %v", destPath, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if d.Type()&fs.ModeSymlink != 0 {
|
||||
linkDest, err := os.Readlink(srcPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("reading symlink %s: %v", srcPath, err)
|
||||
}
|
||||
if err := os.Symlink(linkDest, destPath); err != nil {
|
||||
return fmt.Errorf("creating symlink %s: %v", destPath, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if !d.Type().IsRegular() {
|
||||
xwarnx("skipping non-regular/dir/symlink file in config dir", nil, slog.String("path", srcPath))
|
||||
return nil
|
||||
}
|
||||
|
||||
sf, err := os.Open(srcPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("open config file %s: %v", srcPath, err)
|
||||
}
|
||||
info, err := sf.Stat()
|
||||
if err != nil {
|
||||
return fmt.Errorf("stat config file %s: %v", srcPath, err)
|
||||
}
|
||||
df, err := os.OpenFile(destPath, os.O_CREATE|os.O_EXCL|os.O_WRONLY, 0777&info.Mode())
|
||||
if err != nil {
|
||||
return fmt.Errorf("create destination config file %s: %v", destPath, err)
|
||||
}
|
||||
defer func() {
|
||||
if df != nil {
|
||||
err := df.Close()
|
||||
xctl.log.Check(err, "closing file")
|
||||
}
|
||||
}()
|
||||
defer func() {
|
||||
err := sf.Close()
|
||||
xctl.log.Check(err, "closing file")
|
||||
}()
|
||||
if _, err := io.Copy(df, sf); err != nil {
|
||||
return fmt.Errorf("copying config file %s to %s: %v", srcPath, destPath, err)
|
||||
}
|
||||
if err := df.Close(); err != nil {
|
||||
return fmt.Errorf("closing destination config file %s: %v", srcPath, err)
|
||||
}
|
||||
df = nil
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
xerrx("storing config directory", err)
|
||||
}
|
||||
|
||||
srcDataDir := filepath.Clean(mox.DataDirPath("."))
|
||||
|
||||
// When creating a file in the destination, we first ensure its directory exists.
|
||||
// We track which directories we created, to prevent needless syscalls.
|
||||
createdDirs := map[string]struct{}{}
|
||||
ensureDestDir := func(dstpath string) {
|
||||
dstdir := filepath.Dir(dstpath)
|
||||
if _, ok := createdDirs[dstdir]; !ok {
|
||||
err := os.MkdirAll(dstdir, 0770)
|
||||
if err != nil {
|
||||
xerrx("creating directory", err)
|
||||
}
|
||||
createdDirs[dstdir] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// Backup a single file by copying (never hardlinking, the file may change).
|
||||
backupFile := func(path string) {
|
||||
tmFile := time.Now()
|
||||
srcpath := filepath.Join(srcDataDir, path)
|
||||
dstpath := filepath.Join(dstDataDir, path)
|
||||
|
||||
sf, err := os.Open(srcpath)
|
||||
if err != nil {
|
||||
xerrx("open source file (not backed up)", err, slog.String("srcpath", srcpath), slog.String("dstpath", dstpath))
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
err := sf.Close()
|
||||
xctl.log.Check(err, "closing source file")
|
||||
}()
|
||||
|
||||
ensureDestDir(dstpath)
|
||||
df, err := os.OpenFile(dstpath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0660)
|
||||
if err != nil {
|
||||
xerrx("creating destination file (not backed up)", err, slog.String("srcpath", srcpath), slog.String("dstpath", dstpath))
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
if df != nil {
|
||||
err := df.Close()
|
||||
xctl.log.Check(err, "closing destination file")
|
||||
}
|
||||
}()
|
||||
if _, err := io.Copy(df, sf); err != nil {
|
||||
xerrx("copying file (not backed up properly)", err, slog.String("srcpath", srcpath), slog.String("dstpath", dstpath))
|
||||
return
|
||||
}
|
||||
err = df.Close()
|
||||
df = nil
|
||||
if err != nil {
|
||||
xerrx("closing destination file (not backed up properly)", err, slog.String("srcpath", srcpath), slog.String("dstpath", dstpath))
|
||||
return
|
||||
}
|
||||
xvlog("backed up file", slog.String("path", path), slog.Duration("duration", time.Since(tmFile)))
|
||||
}
|
||||
|
||||
// Back up the files in a directory (by copying).
|
||||
backupDir := func(dir string) {
|
||||
tmDir := time.Now()
|
||||
srcdir := filepath.Join(srcDataDir, dir)
|
||||
dstdir := filepath.Join(dstDataDir, dir)
|
||||
err := filepath.WalkDir(srcdir, func(srcpath string, d fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
xerrx("walking file (not backed up)", err, slog.String("srcpath", srcpath))
|
||||
return nil
|
||||
}
|
||||
if d.IsDir() {
|
||||
return nil
|
||||
}
|
||||
backupFile(srcpath[len(srcDataDir)+1:])
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
xerrx("copying directory (not backed up properly)", err,
|
||||
slog.String("srcdir", srcdir),
|
||||
slog.String("dstdir", dstdir),
|
||||
slog.Duration("duration", time.Since(tmDir)))
|
||||
return
|
||||
}
|
||||
xvlog("backed up directory", slog.String("dir", dir), slog.Duration("duration", time.Since(tmDir)))
|
||||
}
|
||||
|
||||
// Backup a database by copying it in a readonly transaction. Wrapped by backupDB
|
||||
// which logs and returns just a bool.
|
||||
backupDB0 := func(db *bstore.DB, path string) error {
|
||||
dstpath := filepath.Join(dstDataDir, path)
|
||||
ensureDestDir(dstpath)
|
||||
df, err := os.OpenFile(dstpath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0660)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating destination file: %v", err)
|
||||
}
|
||||
defer func() {
|
||||
if df != nil {
|
||||
err := df.Close()
|
||||
xctl.log.Check(err, "closing destination database file")
|
||||
}
|
||||
}()
|
||||
err = db.Read(ctx, func(tx *bstore.Tx) error {
|
||||
// Using regular WriteTo seems fine, and fast. It just copies pages.
|
||||
//
|
||||
// bolt.Compact is slower, it writes all key/value pairs, building up new data
|
||||
// structures. My compacted test database was ~60% of original size. Lz4 on the
|
||||
// uncompacted database got it to 14%. Lz4 on the compacted database got it to 13%.
|
||||
// Backups are likely archived somewhere with compression, so we don't compact.
|
||||
//
|
||||
// Tests with WriteTo and os.O_DIRECT were slower than without O_DIRECT, but
|
||||
// probably because everything fit in the page cache. It may be better to use
|
||||
// O_DIRECT when copying many large or inactive databases.
|
||||
_, err := tx.WriteTo(df)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("copying database: %v", err)
|
||||
}
|
||||
err = df.Close()
|
||||
df = nil
|
||||
if err != nil {
|
||||
return fmt.Errorf("closing destination database after copy: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
backupDB := func(db *bstore.DB, path string) bool {
|
||||
start := time.Now()
|
||||
err := backupDB0(db, path)
|
||||
if err != nil {
|
||||
xerrx("backing up database", err, slog.String("path", path), slog.Duration("duration", time.Since(start)))
|
||||
return false
|
||||
}
|
||||
xvlog("backed up database file", slog.String("path", path), slog.Duration("duration", time.Since(start)))
|
||||
return true
|
||||
}
|
||||
|
||||
// Try to create a hardlink. Fall back to copying the file (e.g. when on different file system).
|
||||
warnedHardlink := false // We warn once about failing to hardlink.
|
||||
linkOrCopy := func(srcpath, dstpath string) (bool, error) {
|
||||
ensureDestDir(dstpath)
|
||||
|
||||
if err := os.Link(srcpath, dstpath); err == nil {
|
||||
return true, nil
|
||||
} else if os.IsNotExist(err) {
|
||||
// No point in trying with regular copy, we would warn twice.
|
||||
return false, err
|
||||
} else if !warnedHardlink {
|
||||
var hardlinkHint string
|
||||
if runtime.GOOS == "linux" && errors.Is(err, syscall.EXDEV) {
|
||||
hardlinkHint = " (hint: if running under systemd, ReadWritePaths in mox.service may cause multiple mountpoints; consider merging paths into a single parent directory to prevent cross-device/mountpoint hardlinks)"
|
||||
}
|
||||
xwarnx("creating hardlink to message failed, will be doing regular file copies and not warn again"+hardlinkHint, err, slog.String("srcpath", srcpath), slog.String("dstpath", dstpath))
|
||||
warnedHardlink = true
|
||||
}
|
||||
|
||||
// Fall back to copying.
|
||||
sf, err := os.Open(srcpath)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("open source path %s: %v", srcpath, err)
|
||||
}
|
||||
defer func() {
|
||||
err := sf.Close()
|
||||
xctl.log.Check(err, "closing copied source file")
|
||||
}()
|
||||
|
||||
df, err := os.OpenFile(dstpath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0660)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("create destination path %s: %v", dstpath, err)
|
||||
}
|
||||
defer func() {
|
||||
if df != nil {
|
||||
err := df.Close()
|
||||
xctl.log.Check(err, "closing partial destination file")
|
||||
}
|
||||
}()
|
||||
if _, err := io.Copy(df, sf); err != nil {
|
||||
return false, fmt.Errorf("coping: %v", err)
|
||||
}
|
||||
err = df.Close()
|
||||
df = nil
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("closing destination file: %v", err)
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Start making the backup.
|
||||
tmStart := time.Now()
|
||||
|
||||
xctl.log.Print("making backup", slog.String("destdir", dstDataDir))
|
||||
|
||||
if err := os.MkdirAll(dstDataDir, 0770); err != nil {
|
||||
xerrx("creating destination data directory", err)
|
||||
}
|
||||
|
||||
if err := os.WriteFile(filepath.Join(dstDataDir, "moxversion"), []byte(moxvar.Version), 0660); err != nil {
|
||||
xerrx("writing moxversion", err)
|
||||
}
|
||||
backupDB(store.AuthDB, "auth.db")
|
||||
backupDB(dmarcdb.ReportsDB, "dmarcrpt.db")
|
||||
backupDB(dmarcdb.EvalDB, "dmarceval.db")
|
||||
backupDB(mtastsdb.DB, "mtasts.db")
|
||||
backupDB(tlsrptdb.ReportDB, "tlsrpt.db")
|
||||
backupDB(tlsrptdb.ResultDB, "tlsrptresult.db")
|
||||
backupFile("receivedid.key")
|
||||
|
||||
// Acme directory is optional.
|
||||
srcAcmeDir := filepath.Join(srcDataDir, "acme")
|
||||
if _, err := os.Stat(srcAcmeDir); err == nil {
|
||||
backupDir("acme")
|
||||
} else if !os.IsNotExist(err) {
|
||||
xerrx("copying acme/", err)
|
||||
}
|
||||
|
||||
// Copy the queue database and all message files.
|
||||
backupQueue := func(path string) {
|
||||
tmQueue := time.Now()
|
||||
|
||||
if !backupDB(queue.DB, path) {
|
||||
return
|
||||
}
|
||||
|
||||
dstdbpath := filepath.Join(dstDataDir, path)
|
||||
opts := bstore.Options{MustExist: true, RegisterLogger: xctl.log.Logger}
|
||||
db, err := bstore.Open(ctx, dstdbpath, &opts, queue.DBTypes...)
|
||||
if err != nil {
|
||||
xerrx("open copied queue database", err, slog.String("dstpath", dstdbpath), slog.Duration("duration", time.Since(tmQueue)))
|
||||
return
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if db != nil {
|
||||
err := db.Close()
|
||||
xctl.log.Check(err, "closing new queue db")
|
||||
}
|
||||
}()
|
||||
|
||||
// Link/copy known message files. If a message has been removed while we read the
|
||||
// database, our backup is not consistent and the backup will be marked failed.
|
||||
tmMsgs := time.Now()
|
||||
seen := map[string]struct{}{}
|
||||
var nlinked, ncopied int
|
||||
var maxID int64
|
||||
err = bstore.QueryDB[queue.Msg](ctx, db).ForEach(func(m queue.Msg) error {
|
||||
if m.ID > maxID {
|
||||
maxID = m.ID
|
||||
}
|
||||
mp := store.MessagePath(m.ID)
|
||||
seen[mp] = struct{}{}
|
||||
srcpath := filepath.Join(srcDataDir, "queue", mp)
|
||||
dstpath := filepath.Join(dstDataDir, "queue", mp)
|
||||
if linked, err := linkOrCopy(srcpath, dstpath); err != nil {
|
||||
xerrx("linking/copying queue message", err, slog.String("srcpath", srcpath), slog.String("dstpath", dstpath))
|
||||
} else if linked {
|
||||
nlinked++
|
||||
} else {
|
||||
ncopied++
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
xerrx("processing queue messages (not backed up properly)", err, slog.Duration("duration", time.Since(tmMsgs)))
|
||||
} else {
|
||||
xvlog("queue message files linked/copied",
|
||||
slog.Int("linked", nlinked),
|
||||
slog.Int("copied", ncopied),
|
||||
slog.Duration("duration", time.Since(tmMsgs)))
|
||||
}
|
||||
|
||||
// Read through all files in queue directory and warn about anything we haven't
|
||||
// handled yet. Message files that are newer than we expect from our consistent
|
||||
// database snapshot are ignored.
|
||||
tmWalk := time.Now()
|
||||
srcqdir := filepath.Join(srcDataDir, "queue")
|
||||
err = filepath.WalkDir(srcqdir, func(srcqpath string, d fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
xerrx("walking files in queue", err, slog.String("srcpath", srcqpath))
|
||||
return nil
|
||||
}
|
||||
if d.IsDir() {
|
||||
return nil
|
||||
}
|
||||
p := srcqpath[len(srcqdir)+1:]
|
||||
if _, ok := seen[p]; ok {
|
||||
return nil
|
||||
}
|
||||
if p == "index.db" {
|
||||
return nil
|
||||
}
|
||||
// Skip any messages that were added since we started on our consistent snapshot.
|
||||
// We don't want to cause spurious backup warnings.
|
||||
if id, err := strconv.ParseInt(filepath.Base(p), 10, 64); err == nil && maxID > 0 && id > maxID && p == store.MessagePath(id) {
|
||||
return nil
|
||||
}
|
||||
|
||||
qp := filepath.Join("queue", p)
|
||||
xwarnx("backing up unrecognized file in queue directory", nil, slog.String("path", qp))
|
||||
backupFile(qp)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
xerrx("walking queue directory (not backed up properly)", err, slog.String("dir", "queue"), slog.Duration("duration", time.Since(tmWalk)))
|
||||
} else {
|
||||
xvlog("walked queue directory", slog.Duration("duration", time.Since(tmWalk)))
|
||||
}
|
||||
|
||||
xvlog("queue backed finished", slog.Duration("duration", time.Since(tmQueue)))
|
||||
}
|
||||
backupQueue(filepath.FromSlash("queue/index.db"))
|
||||
|
||||
backupAccount := func(acc *store.Account) {
|
||||
defer func() {
|
||||
err := acc.Close()
|
||||
xctl.log.Check(err, "closing account")
|
||||
}()
|
||||
|
||||
tmAccount := time.Now()
|
||||
|
||||
// Copy database file.
|
||||
dbpath := filepath.Join("accounts", acc.Name, "index.db")
|
||||
backupDB(acc.DB, dbpath)
|
||||
|
||||
// todo: should document/check not taking a rlock on account.
|
||||
|
||||
// Copy junkfilter files, if configured.
|
||||
if jf, _, err := acc.OpenJunkFilter(ctx, xctl.log); err != nil {
|
||||
if !errors.Is(err, store.ErrNoJunkFilter) {
|
||||
xerrx("opening junk filter for account (not backed up)", err)
|
||||
}
|
||||
} else {
|
||||
db := jf.DB()
|
||||
jfpath := filepath.Join("accounts", acc.Name, "junkfilter.db")
|
||||
backupDB(db, jfpath)
|
||||
bloompath := filepath.Join("accounts", acc.Name, "junkfilter.bloom")
|
||||
backupFile(bloompath)
|
||||
err := jf.Close()
|
||||
xctl.log.Check(err, "closing junkfilter")
|
||||
}
|
||||
|
||||
dstdbpath := filepath.Join(dstDataDir, dbpath)
|
||||
opts := bstore.Options{MustExist: true, RegisterLogger: xctl.log.Logger}
|
||||
db, err := bstore.Open(ctx, dstdbpath, &opts, store.DBTypes...)
|
||||
if err != nil {
|
||||
xerrx("open copied account database", err, slog.String("dstpath", dstdbpath), slog.Duration("duration", time.Since(tmAccount)))
|
||||
return
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if db != nil {
|
||||
err := db.Close()
|
||||
xctl.log.Check(err, "close account database")
|
||||
}
|
||||
}()
|
||||
|
||||
// Link/copy known message files.
|
||||
tmMsgs := time.Now()
|
||||
seen := map[string]struct{}{}
|
||||
var maxID int64
|
||||
var nlinked, ncopied int
|
||||
err = bstore.QueryDB[store.Message](ctx, db).FilterEqual("Expunged", false).ForEach(func(m store.Message) error {
|
||||
if m.ID > maxID {
|
||||
maxID = m.ID
|
||||
}
|
||||
mp := store.MessagePath(m.ID)
|
||||
seen[mp] = struct{}{}
|
||||
amp := filepath.Join("accounts", acc.Name, "msg", mp)
|
||||
srcpath := filepath.Join(srcDataDir, amp)
|
||||
dstpath := filepath.Join(dstDataDir, amp)
|
||||
if linked, err := linkOrCopy(srcpath, dstpath); err != nil {
|
||||
xerrx("linking/copying account message", err, slog.String("srcpath", srcpath), slog.String("dstpath", dstpath))
|
||||
} else if linked {
|
||||
nlinked++
|
||||
} else {
|
||||
ncopied++
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
xerrx("processing account messages (not backed up properly)", err, slog.Duration("duration", time.Since(tmMsgs)))
|
||||
} else {
|
||||
xvlog("account message files linked/copied",
|
||||
slog.Int("linked", nlinked),
|
||||
slog.Int("copied", ncopied),
|
||||
slog.Duration("duration", time.Since(tmMsgs)))
|
||||
}
|
||||
|
||||
eraseIDs := map[int64]struct{}{}
|
||||
err = bstore.QueryDB[store.MessageErase](ctx, db).ForEach(func(me store.MessageErase) error {
|
||||
eraseIDs[me.ID] = struct{}{}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
xerrx("listing erased messages", err)
|
||||
}
|
||||
|
||||
// Read through all files in queue directory and warn about anything we haven't
|
||||
// handled yet. Message files that are newer than we expect from our consistent
|
||||
// database snapshot are ignored.
|
||||
tmWalk := time.Now()
|
||||
srcadir := filepath.Join(srcDataDir, "accounts", acc.Name)
|
||||
err = filepath.WalkDir(srcadir, func(srcapath string, d fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
xerrx("walking files in account", err, slog.String("srcpath", srcapath))
|
||||
return nil
|
||||
}
|
||||
if d.IsDir() {
|
||||
return nil
|
||||
}
|
||||
p := srcapath[len(srcadir)+1:]
|
||||
l := strings.Split(p, string(filepath.Separator))
|
||||
if l[0] == "msg" {
|
||||
mp := filepath.Join(l[1:]...)
|
||||
if _, ok := seen[mp]; ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Skip any messages that were added since we started on our consistent snapshot,
|
||||
// or messages that will be erased. We don't want to cause spurious backup
|
||||
// warnings.
|
||||
id, err := strconv.ParseInt(l[len(l)-1], 10, 64)
|
||||
if err == nil && id > maxID && mp == store.MessagePath(id) {
|
||||
return nil
|
||||
} else if _, ok := eraseIDs[id]; err == nil && ok {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
switch p {
|
||||
case "index.db", "junkfilter.db", "junkfilter.bloom":
|
||||
return nil
|
||||
}
|
||||
ap := filepath.Join("accounts", acc.Name, p)
|
||||
if strings.HasPrefix(p, "msg"+string(filepath.Separator)) {
|
||||
xwarnx("backing up unrecognized file in account message directory (should be moved away)", nil, slog.String("path", ap))
|
||||
} else {
|
||||
xwarnx("backing up unrecognized file in account directory", nil, slog.String("path", ap))
|
||||
}
|
||||
backupFile(ap)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
xerrx("walking account directory (not backed up properly)", err, slog.String("srcdir", srcadir), slog.Duration("duration", time.Since(tmWalk)))
|
||||
} else {
|
||||
xvlog("walked account directory", slog.Duration("duration", time.Since(tmWalk)))
|
||||
}
|
||||
|
||||
xvlog("account backup finished", slog.String("dir", filepath.Join("accounts", acc.Name)), slog.Duration("duration", time.Since(tmAccount)))
|
||||
}
|
||||
|
||||
// For each configured account, open it, make a copy of the database and
|
||||
// hardlink/copy the messages. We track the accounts we handled, and skip the
|
||||
// account directories when handling "all other files" below.
|
||||
accounts := map[string]struct{}{}
|
||||
for _, accName := range mox.Conf.Accounts() {
|
||||
acc, err := store.OpenAccount(xctl.log, accName, false)
|
||||
if err != nil {
|
||||
xerrx("opening account for copying (will try to copy as regular files later)", err, slog.String("account", accName))
|
||||
continue
|
||||
}
|
||||
accounts[accName] = struct{}{}
|
||||
backupAccount(acc)
|
||||
}
|
||||
|
||||
// Copy all other files, that aren't part of the known files, databases, queue or accounts.
|
||||
tmWalk := time.Now()
|
||||
err = filepath.WalkDir(srcDataDir, func(srcpath string, d fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
xerrx("walking path", err, slog.String("path", srcpath))
|
||||
return nil
|
||||
}
|
||||
|
||||
if srcpath == srcDataDir {
|
||||
return nil
|
||||
}
|
||||
p := srcpath[len(srcDataDir)+1:]
|
||||
if p == "queue" || p == "acme" || p == "tmp" {
|
||||
return fs.SkipDir
|
||||
}
|
||||
l := strings.Split(p, string(filepath.Separator))
|
||||
if len(l) >= 2 && l[0] == "accounts" {
|
||||
name := l[1]
|
||||
if _, ok := accounts[name]; ok {
|
||||
return fs.SkipDir
|
||||
}
|
||||
}
|
||||
|
||||
// Only files are explicitly backed up.
|
||||
if d.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
switch p {
|
||||
case "auth.db", "dmarcrpt.db", "dmarceval.db", "mtasts.db", "tlsrpt.db", "tlsrptresult.db", "receivedid.key", "ctl":
|
||||
// Already handled.
|
||||
return nil
|
||||
case "lastknownversion": // Optional file, not yet handled.
|
||||
default:
|
||||
xwarnx("backing up unrecognized file", nil, slog.String("path", p))
|
||||
}
|
||||
backupFile(p)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
xerrx("walking other files (not backed up properly)", err, slog.Duration("duration", time.Since(tmWalk)))
|
||||
} else {
|
||||
xvlog("walking other files finished", slog.Duration("duration", time.Since(tmWalk)))
|
||||
}
|
||||
|
||||
xvlog("backup finished", slog.Duration("duration", time.Since(tmStart)))
|
||||
|
||||
xwriter.xclose()
|
||||
|
||||
if incomplete {
|
||||
xctl.xwrite("errors were encountered during backup")
|
||||
} else {
|
||||
xctl.xwriteok()
|
||||
}
|
||||
}
|
@ -1,2 +0,0 @@
|
||||
#!/bin/sh
|
||||
exec ./node_modules/.bin/jshint --extract always $@ | fixjshintlines
|
480
config/config.go
480
config/config.go
@ -5,6 +5,7 @@ import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"regexp"
|
||||
@ -19,6 +20,10 @@ import (
|
||||
|
||||
// todo: better default values, so less has to be specified in the config file.
|
||||
|
||||
// DefaultMaxMsgSize is the maximum message size for incoming and outgoing
|
||||
// messages, in bytes. Can be overridden per listener.
|
||||
const DefaultMaxMsgSize = 100 * 1024 * 1024
|
||||
|
||||
// Port returns port if non-zero, and fallback otherwise.
|
||||
func Port(port, fallback int) int {
|
||||
if port == 0 {
|
||||
@ -30,31 +35,46 @@ func Port(port, fallback int) int {
|
||||
// Static is a parsed form of the mox.conf configuration file, before converting it
|
||||
// into a mox.Config after additional processing.
|
||||
type Static struct {
|
||||
DataDir string `sconf-doc:"Directory where all data is stored, e.g. queue, accounts and messages, ACME TLS certs/keys. If this is a relative path, it is relative to the directory of mox.conf."`
|
||||
DataDir string `sconf-doc:"NOTE: This config file is in 'sconf' format. Indent with tabs. Comments must be on their own line, they don't end a line. Do not escape or quote strings. Details: https://pkg.go.dev/github.com/mjl-/sconf.\n\n\nDirectory where all data is stored, e.g. queue, accounts and messages, ACME TLS certs/keys. If this is a relative path, it is relative to the directory of mox.conf."`
|
||||
LogLevel string `sconf-doc:"Default log level, one of: error, info, debug, trace, traceauth, tracedata. Trace logs SMTP and IMAP protocol transcripts, with traceauth also messages with passwords, and tracedata on top of that also the full data exchanges (full messages), which can be a large amount of data."`
|
||||
PackageLogLevels map[string]string `sconf:"optional" sconf-doc:"Overrides of log level per package (e.g. queue, smtpclient, smtpserver, imapserver, spf, dkim, dmarc, dmarcdb, autotls, junk, mtasts, tlsrpt)."`
|
||||
User string `sconf:"optional" sconf-doc:"User to switch to after binding to all sockets as root. Default: mox. If the value is not a known user, it is parsed as integer and used as uid and gid."`
|
||||
NoFixPermissions bool `sconf:"optional" sconf-doc:"If true, do not automatically fix file permissions when starting up. By default, mox will ensure reasonable owner/permissions on the working, data and config directories (and files), and mox binary (if present)."`
|
||||
Hostname string `sconf-doc:"Full hostname of system, e.g. mail.<domain>"`
|
||||
HostnameDomain dns.Domain `sconf:"-" json:"-"` // Parsed form of hostname.
|
||||
CheckUpdates bool `sconf:"optional" sconf-doc:"If enabled, a single DNS TXT lookup of _updates.xmox.nl is done every 24h to check for a new release. Each time a new release is found, a changelog is fetched from https://updates.xmox.nl and delivered to the postmaster mailbox."`
|
||||
CheckUpdates bool `sconf:"optional" sconf-doc:"If enabled, a single DNS TXT lookup of _updates.xmox.nl is done every 24h to check for a new release. Each time a new release is found, a changelog is fetched from https://updates.xmox.nl/changelog and delivered to the postmaster mailbox."`
|
||||
Pedantic bool `sconf:"optional" sconf-doc:"In pedantic mode protocol violations (that happen in the wild) for SMTP/IMAP/etc result in errors instead of accepting such behaviour."`
|
||||
TLS struct {
|
||||
CA *struct {
|
||||
AdditionalToSystem bool `sconf:"optional"`
|
||||
CertFiles []string `sconf:"optional"`
|
||||
} `sconf:"optional"`
|
||||
CertPool *x509.CertPool `sconf:"-" json:"-"`
|
||||
} `sconf:"optional" sconf-doc:"Global TLS configuration, e.g. for additional Certificate Authorities."`
|
||||
} `sconf:"optional" sconf-doc:"Global TLS configuration, e.g. for additional Certificate Authorities. Used for outgoing SMTP connections, HTTPS requests."`
|
||||
ACME map[string]ACME `sconf:"optional" sconf-doc:"Automatic TLS configuration with ACME, e.g. through Let's Encrypt. The key is a name referenced in TLS configs, e.g. letsencrypt."`
|
||||
AdminPasswordFile string `sconf:"optional" sconf-doc:"File containing hash of admin password, for authentication in the web admin pages (if enabled)."`
|
||||
Listeners map[string]Listener `sconf-doc:"Listeners are groups of IP addresses and services enabled on those IP addresses, such as SMTP/IMAP or internal endpoints for administration or Prometheus metrics. All listeners with SMTP/IMAP services enabled will serve all configured domains. If the listener is named 'public', it will get a few helpful additional configuration checks, for acme automatic tls certificates and monitoring of ips in dnsbls if those are configured."`
|
||||
Postmaster struct {
|
||||
Account string
|
||||
Mailbox string `sconf-doc:"E.g. Postmaster or Inbox."`
|
||||
} `sconf-doc:"Destination for emails delivered to postmaster address."`
|
||||
DefaultMailboxes []string `sconf:"optional" sconf-doc:"Mailboxes to create when adding an account. Inbox is always created. If no mailboxes are specified, the following are automatically created: Sent, Archive, Trash, Drafts and Junk."`
|
||||
} `sconf-doc:"Destination for emails delivered to postmaster addresses: a plain 'postmaster' without domain, 'postmaster@<hostname>' (also for each listener with SMTP enabled), and as fallback for each domain without explicitly configured postmaster destination."`
|
||||
HostTLSRPT struct {
|
||||
Account string `sconf-doc:"Account to deliver TLS reports to. Typically same account as for postmaster."`
|
||||
Mailbox string `sconf-doc:"Mailbox to deliver TLS reports to. Recommended value: TLSRPT."`
|
||||
Localpart string `sconf-doc:"Localpart at hostname to accept TLS reports at. Recommended value: tlsreports."`
|
||||
|
||||
// All IPs that were explicitly listen on for external SMTP. Only set when there
|
||||
ParsedLocalpart smtp.Localpart `sconf:"-"`
|
||||
} `sconf:"optional" sconf-doc:"Destination for per-host TLS reports (TLSRPT). TLS reports can be per recipient domain (for MTA-STS), or per MX host (for DANE). The per-domain TLS reporting configuration is in domains.conf. This is the TLS reporting configuration for this host. If absent, no host-based TLSRPT address is configured, and no host TLSRPT DNS record is suggested."`
|
||||
InitialMailboxes InitialMailboxes `sconf:"optional" sconf-doc:"Mailboxes to create for new accounts. Inbox is always created. Mailboxes can be given a 'special-use' role, which are understood by most mail clients. If absent/empty, the following additional mailboxes are created: Sent, Archive, Trash, Drafts and Junk."`
|
||||
DefaultMailboxes []string `sconf:"optional" sconf-doc:"Deprecated in favor of InitialMailboxes. Mailboxes to create when adding an account. Inbox is always created. If no mailboxes are specified, the following are automatically created: Sent, Archive, Trash, Drafts and Junk."`
|
||||
Transports map[string]Transport `sconf:"optional" sconf-doc:"Transport are mechanisms for delivering messages. Transports can be referenced from Routes in accounts, domains and the global configuration. There is always an implicit/fallback delivery transport doing direct delivery with SMTP from the outgoing message queue. Transports are typically only configured when using smarthosts, i.e. when delivering through another SMTP server. Zero or one transport methods must be set in a transport, never multiple. When using an external party to send email for a domain, keep in mind you may have to add their IP address to your domain's SPF record, and possibly additional DKIM records."`
|
||||
// Awkward naming of fields to get intended default behaviour for zero values.
|
||||
NoOutgoingDMARCReports bool `sconf:"optional" sconf-doc:"Do not send DMARC reports (aggregate only). By default, aggregate reports on DMARC evaluations are sent to domains if their DMARC policy requests them. Reports are sent at whole hours, with a minimum of 1 hour and maximum of 24 hours, rounded up so a whole number of intervals cover 24 hours, aligned at whole days in UTC. Reports are sent from the postmaster@<mailhostname> address."`
|
||||
NoOutgoingTLSReports bool `sconf:"optional" sconf-doc:"Do not send TLS reports. By default, reports about failed SMTP STARTTLS connections and related MTA-STS/DANE policies are sent to domains if their TLSRPT DNS record requests them. Reports covering a 24 hour UTC interval are sent daily. Reports are sent from the postmaster address of the configured domain the mailhostname is in. If there is no such domain, or it does not have DKIM configured, no reports are sent."`
|
||||
OutgoingTLSReportsForAllSuccess bool `sconf:"optional" sconf-doc:"Also send TLS reports if there were no SMTP STARTTLS connection failures. By default, reports are only sent when at least one failure occurred. If a report is sent, it does always include the successful connection counts as well."`
|
||||
QuotaMessageSize int64 `sconf:"optional" sconf-doc:"Default maximum total message size in bytes for each individual account, only applicable if greater than zero. Can be overridden per account. Attempting to add new messages to an account beyond its maximum total size will result in an error. Useful to prevent a single account from filling storage. The quota only applies to the email message files, not to any file system overhead and also not the message index database file (account for approximately 15% overhead)."`
|
||||
|
||||
// All IPs that were explicitly listened on for external SMTP. Only set when there
|
||||
// are no unspecified external SMTP listeners and there is at most one for IPv4 and
|
||||
// at most one for IPv6. Used for setting the local address when making outgoing
|
||||
// connections. Those IPs are assumed to be in an SPF record for the domain,
|
||||
@ -68,39 +88,79 @@ type Static struct {
|
||||
GID uint32 `sconf:"-" json:"-"`
|
||||
}
|
||||
|
||||
// InitialMailboxes are mailboxes created for a new account.
|
||||
type InitialMailboxes struct {
|
||||
SpecialUse SpecialUseMailboxes `sconf:"optional" sconf-doc:"Special-use roles to mailbox to create."`
|
||||
Regular []string `sconf:"optional" sconf-doc:"Regular, non-special-use mailboxes to create."`
|
||||
}
|
||||
|
||||
// SpecialUseMailboxes holds mailbox names for special-use roles. Mail clients
|
||||
// recognize these special-use roles, e.g. appending sent messages to whichever
|
||||
// mailbox has the Sent special-use flag.
|
||||
type SpecialUseMailboxes struct {
|
||||
Sent string `sconf:"optional"`
|
||||
Archive string `sconf:"optional"`
|
||||
Trash string `sconf:"optional"`
|
||||
Draft string `sconf:"optional"`
|
||||
Junk string `sconf:"optional"`
|
||||
}
|
||||
|
||||
// Dynamic is the parsed form of domains.conf, and is automatically reloaded when changed.
|
||||
type Dynamic struct {
|
||||
Domains map[string]Domain `sconf-doc:"Domains for which email is accepted. For internationalized domains, use their IDNA names in UTF-8."`
|
||||
Accounts map[string]Account `sconf-doc:"Accounts to which email can be delivered. An account can accept email for multiple domains, for multiple localparts, and deliver to multiple mailboxes."`
|
||||
Domains map[string]Domain `sconf-doc:"NOTE: This config file is in 'sconf' format. Indent with tabs. Comments must be on their own line, they don't end a line. Do not escape or quote strings. Details: https://pkg.go.dev/github.com/mjl-/sconf.\n\n\nDomains for which email is accepted. For internationalized domains, use their IDNA names in UTF-8."`
|
||||
Accounts map[string]Account `sconf-doc:"Accounts represent mox users, each with a password and email address(es) to which email can be delivered (possibly at different domains). Each account has its own on-disk directory holding its messages and index database. An account name is not an email address."`
|
||||
WebDomainRedirects map[string]string `sconf:"optional" sconf-doc:"Redirect all requests from domain (key) to domain (value). Always redirects to HTTPS. For plain HTTP redirects, use a WebHandler with a WebRedirect."`
|
||||
WebHandlers []WebHandler `sconf:"optional" sconf-doc:"Handle webserver requests by serving static files, redirecting or reverse-proxying HTTP(s). The first matching WebHandler will handle the request. Built-in handlers for autoconfig and mta-sts always run first. If no handler matches, the response status code is file not found (404). If functionality you need is missng, simply forward the requests to an application that can provide the needed functionality."`
|
||||
WebHandlers []WebHandler `sconf:"optional" sconf-doc:"Handle webserver requests by serving static files, redirecting, reverse-proxying HTTP(s) or passing the request to an internal service. The first matching WebHandler will handle the request. Built-in system handlers, e.g. for ACME validation, autoconfig and mta-sts always run first. Built-in handlers for admin, account, webmail and webapi are evaluated after all handlers, including webhandlers (allowing for overrides of internal services for some domains). If no handler matches, the response status code is file not found (404). If webserver features are missing, forward the requests to an application that provides the needed functionality itself."`
|
||||
Routes []Route `sconf:"optional" sconf-doc:"Routes for delivering outgoing messages through the queue. Each delivery attempt evaluates account routes, domain routes and finally these global routes. The transport of the first matching route is used in the delivery attempt. If no routes match, which is the default with no configured routes, messages are delivered directly from the queue."`
|
||||
MonitorDNSBLs []string `sconf:"optional" sconf-doc:"DNS blocklists to periodically check with if IPs we send from are present, without using them for checking incoming deliveries.. Also see DNSBLs in SMTP listeners in mox.conf, which specifies DNSBLs to use both for incoming deliveries and for checking our IPs against. Example DNSBLs: sbl.spamhaus.org, bl.spamcop.net."`
|
||||
|
||||
WebDNSDomainRedirects map[dns.Domain]dns.Domain `sconf:"-"`
|
||||
WebDNSDomainRedirects map[dns.Domain]dns.Domain `sconf:"-" json:"-"`
|
||||
MonitorDNSBLZones []dns.Domain `sconf:"-"`
|
||||
ClientSettingDomains map[dns.Domain]struct{} `sconf:"-" json:"-"`
|
||||
}
|
||||
|
||||
type ACME struct {
|
||||
DirectoryURL string `sconf-doc:"For letsencrypt, use https://acme-v02.api.letsencrypt.org/directory."`
|
||||
RenewBefore time.Duration `sconf:"optional" sconf-doc:"How long before expiration to renew the certificate. Default is 30 days."`
|
||||
ContactEmail string `sconf-doc:"Email address to register at ACME provider. The provider can email you when certificates are about to expire. If you configure an address for which email is delivered by this server, keep in mind that TLS misconfigurations could result in such notification emails not arriving."`
|
||||
Port int `sconf:"optional" sconf-doc:"TLS port for ACME validation, 443 by default. You should only override this if you cannot listen on port 443 directly. ACME will make requests to port 443, so you'll have to add an external mechanism to get the connection here, e.g. by configuring port forwarding."`
|
||||
DirectoryURL string `sconf-doc:"For letsencrypt, use https://acme-v02.api.letsencrypt.org/directory."`
|
||||
RenewBefore time.Duration `sconf:"optional" sconf-doc:"How long before expiration to renew the certificate. Default is 30 days."`
|
||||
ContactEmail string `sconf-doc:"Email address to register at ACME provider. The provider can email you when certificates are about to expire. If you configure an address for which email is delivered by this server, keep in mind that TLS misconfigurations could result in such notification emails not arriving."`
|
||||
Port int `sconf:"optional" sconf-doc:"TLS port for ACME validation, 443 by default. You should only override this if you cannot listen on port 443 directly. ACME will make requests to port 443, so you'll have to add an external mechanism to get the tls connection here, e.g. by configuring firewall-level port forwarding. Validation over the https port uses tls-alpn-01 with application-layer protocol negotiation, which essentially means the original tls connection must make it here unmodified, an https reverse proxy will not work."`
|
||||
IssuerDomainName string `sconf:"optional" sconf-doc:"If set, used for suggested CAA DNS records, for restricting TLS certificate issuance to a Certificate Authority. If empty and DirectyURL is for Let's Encrypt, this value is set automatically to letsencrypt.org."`
|
||||
ExternalAccountBinding *ExternalAccountBinding `sconf:"optional" sconf-doc:"ACME providers can require that a request for a new ACME account reference an existing non-ACME account known to the provider. External account binding references that account by a key id, and authorizes new ACME account requests by signing it with a key known both by the ACME client and ACME provider."`
|
||||
// ../rfc/8555:2111
|
||||
|
||||
Manager *autotls.Manager `sconf:"-" json:"-"`
|
||||
}
|
||||
|
||||
type ExternalAccountBinding struct {
|
||||
KeyID string `sconf-doc:"Key identifier, from ACME provider."`
|
||||
KeyFile string `sconf-doc:"File containing the base64url-encoded key used to sign account requests with external account binding. The ACME provider will verify the account request is correctly signed by the key. File is evaluated relative to the directory of mox.conf."`
|
||||
}
|
||||
|
||||
type Listener struct {
|
||||
IPs []string `sconf-doc:"Use 0.0.0.0 to listen on all IPv4 and/or :: to listen on all IPv6 addresses, but it is better to explicitly specify the IPs you want to use for email, as mox will make sure outgoing connections will only be made from one of those IPs."`
|
||||
Hostname string `sconf:"optional" sconf-doc:"If empty, the config global Hostname is used."`
|
||||
IPs []string `sconf-doc:"Use 0.0.0.0 to listen on all IPv4 and/or :: to listen on all IPv6 addresses, but it is better to explicitly specify the IPs you want to use for email, as mox will make sure outgoing connections will only be made from one of those IPs. If both outgoing IPv4 and IPv6 connectivity is possible, and only one family has explicitly configured addresses, both address families are still used for outgoing connections. Use the \"direct\" transport to limit address families for outgoing connections."`
|
||||
NATIPs []string `sconf:"optional" sconf-doc:"If set, the mail server is configured behind a NAT and field IPs are internal instead of the public IPs, while NATIPs lists the public IPs. Used during IP-related DNS self-checks, such as for iprev, mx, spf, autoconfig, autodiscover, and for autotls."`
|
||||
IPsNATed bool `sconf:"optional" sconf-doc:"Deprecated, use NATIPs instead. If set, IPs are not the public IPs, but are NATed. Skips IP-related DNS self-checks."`
|
||||
Hostname string `sconf:"optional" sconf-doc:"If empty, the config global Hostname is used. The internal services webadmin, webaccount, webmail and webapi only match requests to IPs, this hostname, \"localhost\". All except webadmin also match for any client settings domain."`
|
||||
HostnameDomain dns.Domain `sconf:"-" json:"-"` // Set when parsing config.
|
||||
|
||||
TLS *TLS `sconf:"optional" sconf-doc:"For SMTP/IMAP STARTTLS, direct TLS and HTTPS connections."`
|
||||
SMTPMaxMessageSize int64 `sconf:"optional" sconf-doc:"Maximum size in bytes accepted incoming and outgoing messages. Default is 100MB."`
|
||||
SMTPMaxMessageSize int64 `sconf:"optional" sconf-doc:"Maximum size in bytes for incoming and outgoing messages. Default is 100MB."`
|
||||
SMTP struct {
|
||||
Enabled bool
|
||||
Port int `sconf:"optional" sconf-doc:"Default 25."`
|
||||
NoSTARTTLS bool `sconf:"optional" sconf-doc:"Do not offer STARTTLS to secure the connection. Not recommended."`
|
||||
RequireSTARTTLS bool `sconf:"optional" sconf-doc:"Do not accept incoming messages if STARTTLS is not active. Can be used in combination with a strict MTA-STS policy. A remote SMTP server may not support TLS and may not be able to deliver messages."`
|
||||
DNSBLs []string `sconf:"optional" sconf-doc:"Addresses of DNS block lists for incoming messages. Block lists are only consulted for connections/messages without enough reputation to make an accept/reject decision. This prevents sending IPs of all communications to the block list provider. If any of the listed DNSBLs contains a requested IP address, the message is rejected as spam. The DNSBLs are checked for healthiness before use, at most once per 4 hours. Example DNSBLs: sbl.spamhaus.org, bl.spamcop.net"`
|
||||
DNSBLZones []dns.Domain `sconf:"-"`
|
||||
Port int `sconf:"optional" sconf-doc:"Default 25."`
|
||||
NoSTARTTLS bool `sconf:"optional" sconf-doc:"Do not offer STARTTLS to secure the connection. Not recommended."`
|
||||
RequireSTARTTLS bool `sconf:"optional" sconf-doc:"Do not accept incoming messages if STARTTLS is not active. Consider using in combination with an MTA-STS policy and/or DANE. A remote SMTP server may not support TLS and may not be able to deliver messages. Incoming messages for TLS reporting addresses ignore this setting and do not require TLS."`
|
||||
NoRequireTLS bool `sconf:"optional" sconf-doc:"Do not announce the REQUIRETLS SMTP extension. Messages delivered using the REQUIRETLS extension should only be distributed onwards to servers also implementing the REQUIRETLS extension. In some situations, such as hosting mailing lists, this may not be feasible due to lack of support for the extension by mailing list subscribers."`
|
||||
// Reoriginated messages (such as messages sent to mailing list subscribers) should
|
||||
// keep REQUIRETLS. ../rfc/8689:412
|
||||
|
||||
DNSBLs []string `sconf:"optional" sconf-doc:"Addresses of DNS block lists for incoming messages. Block lists are only consulted for connections/messages without enough reputation to make an accept/reject decision. This prevents sending IPs of all communications to the block list provider. If any of the listed DNSBLs contains a requested IP address, the message is rejected as spam. The DNSBLs are checked for healthiness before use, at most once per 4 hours. IPs we can send from are periodically checked for being in the configured DNSBLs. See MonitorDNSBLs in domains.conf to only monitor IPs we send from, without using those DNSBLs for incoming messages. Example DNSBLs: sbl.spamhaus.org, bl.spamcop.net. See https://www.spamhaus.org/sbl/ and https://www.spamcop.net/ for more information and terms of use."`
|
||||
|
||||
FirstTimeSenderDelay *time.Duration `sconf:"optional" sconf-doc:"Delay before accepting a message from a first-time sender for the destination account. Default: 15s."`
|
||||
|
||||
TLSSessionTicketsDisabled *bool `sconf:"optional" sconf-doc:"Override default setting for enabling TLS session tickets. Disabling session tickets may work around TLS interoperability issues."`
|
||||
|
||||
DNSBLZones []dns.Domain `sconf:"-"`
|
||||
} `sconf:"optional"`
|
||||
Submission struct {
|
||||
Enabled bool
|
||||
@ -108,8 +168,9 @@ type Listener struct {
|
||||
NoRequireSTARTTLS bool `sconf:"optional" sconf-doc:"Do not require STARTTLS. Since users must login, this means password may be sent without encryption. Not recommended."`
|
||||
} `sconf:"optional" sconf-doc:"SMTP for submitting email, e.g. by email applications. Starts out in plain text, can be upgraded to TLS with the STARTTLS command. Prefer using Submissions which is always a TLS connection."`
|
||||
Submissions struct {
|
||||
Enabled bool
|
||||
Port int `sconf:"optional" sconf-doc:"Default 465."`
|
||||
Enabled bool
|
||||
Port int `sconf:"optional" sconf-doc:"Default 465."`
|
||||
EnabledOnHTTPS bool `sconf:"optional" sconf-doc:"Additionally enable submission on HTTPS port 443 via TLS ALPN. TLS Application Layer Protocol Negotiation allows clients to request a specific protocol from the server as part of the TLS connection setup. When this setting is enabled and a client requests the 'smtp' protocol after TLS, it will be able to talk SMTP to Mox on port 443. This is meant to be useful as a censorship circumvention technique for Delta Chat."`
|
||||
} `sconf:"optional" sconf-doc:"SMTP over TLS for submitting email, by email applications. Requires a TLS config."`
|
||||
IMAP struct {
|
||||
Enabled bool
|
||||
@ -117,26 +178,19 @@ type Listener struct {
|
||||
NoRequireSTARTTLS bool `sconf:"optional" sconf-doc:"Enable this only when the connection is otherwise encrypted (e.g. through a VPN)."`
|
||||
} `sconf:"optional" sconf-doc:"IMAP for reading email, by email applications. Starts out in plain text, can be upgraded to TLS with the STARTTLS command. Prefer using IMAPS instead which is always a TLS connection."`
|
||||
IMAPS struct {
|
||||
Enabled bool
|
||||
Port int `sconf:"optional" sconf-doc:"Default 993."`
|
||||
Enabled bool
|
||||
Port int `sconf:"optional" sconf-doc:"Default 993."`
|
||||
EnabledOnHTTPS bool `sconf:"optional" sconf-doc:"Additionally enable IMAP on HTTPS port 443 via TLS ALPN. TLS Application Layer Protocol Negotiation allows clients to request a specific protocol from the server as part of the TLS connection setup. When this setting is enabled and a client requests the 'imap' protocol after TLS, it will be able to talk IMAP to Mox on port 443. This is meant to be useful as a censorship circumvention technique for Delta Chat."`
|
||||
} `sconf:"optional" sconf-doc:"IMAP over TLS for reading email, by email applications. Requires a TLS config."`
|
||||
AccountHTTP struct {
|
||||
Enabled bool
|
||||
Port int `sconf:"optional" sconf-doc:"Default 80."`
|
||||
} `sconf:"optional" sconf-doc:"Account web interface, for email users wanting to change their accounts, e.g. set new password, set new delivery rulesets. Served at /."`
|
||||
AccountHTTPS struct {
|
||||
Enabled bool
|
||||
Port int `sconf:"optional" sconf-doc:"Default 80."`
|
||||
} `sconf:"optional" sconf-doc:"Account web interface listener for HTTPS. Requires a TLS config."`
|
||||
AdminHTTP struct {
|
||||
Enabled bool
|
||||
Port int `sconf:"optional" sconf-doc:"Default 80."`
|
||||
} `sconf:"optional" sconf-doc:"Admin web interface, for managing domains, accounts, etc. Served at /admin/. Preferrably only enable on non-public IPs. Hint: use 'ssh -L 8080:localhost:80 you@yourmachine' and open http://localhost:8080/admin/, or set up a tunnel (e.g. WireGuard) and add its IP to the mox 'internal' listener."`
|
||||
AdminHTTPS struct {
|
||||
Enabled bool
|
||||
Port int `sconf:"optional" sconf-doc:"Default 443."`
|
||||
} `sconf:"optional" sconf-doc:"Admin web interface listener for HTTPS. Requires a TLS config. Preferrably only enable on non-public IPs."`
|
||||
MetricsHTTP struct {
|
||||
AccountHTTP WebService `sconf:"optional" sconf-doc:"Account web interface, for email users wanting to change their accounts, e.g. set new password, set new delivery rulesets. Default path is /."`
|
||||
AccountHTTPS WebService `sconf:"optional" sconf-doc:"Account web interface listener like AccountHTTP, but for HTTPS. Requires a TLS config."`
|
||||
AdminHTTP WebService `sconf:"optional" sconf-doc:"Admin web interface, for managing domains, accounts, etc. Default path is /admin/. Preferably only enable on non-public IPs. Hint: use 'ssh -L 8080:localhost:80 you@yourmachine' and open http://localhost:8080/admin/, or set up a tunnel (e.g. WireGuard) and add its IP to the mox 'internal' listener."`
|
||||
AdminHTTPS WebService `sconf:"optional" sconf-doc:"Admin web interface listener like AdminHTTP, but for HTTPS. Requires a TLS config."`
|
||||
WebmailHTTP WebService `sconf:"optional" sconf-doc:"Webmail client, for reading email. Default path is /webmail/."`
|
||||
WebmailHTTPS WebService `sconf:"optional" sconf-doc:"Webmail client, like WebmailHTTP, but for HTTPS. Requires a TLS config."`
|
||||
WebAPIHTTP WebService `sconf:"optional" sconf-doc:"Like WebAPIHTTP, but with plain HTTP, without TLS."`
|
||||
WebAPIHTTPS WebService `sconf:"optional" sconf-doc:"WebAPI, a simple HTTP/JSON-based API for email, with HTTPS (requires a TLS config). Default path is /webapi/."`
|
||||
MetricsHTTP struct {
|
||||
Enabled bool
|
||||
Port int `sconf:"optional" sconf-doc:"Default 8010."`
|
||||
} `sconf:"optional" sconf-doc:"Serve prometheus metrics, for monitoring. You should not enable this on a public IP."`
|
||||
@ -155,64 +209,177 @@ type Listener struct {
|
||||
NonTLS bool `sconf:"optional" sconf-doc:"If set, plain HTTP instead of HTTPS is spoken on the configured port. Can be useful when the mta-sts domain is reverse proxied."`
|
||||
} `sconf:"optional" sconf-doc:"Serve MTA-STS policies describing SMTP TLS requirements. Requires a TLS config."`
|
||||
WebserverHTTP struct {
|
||||
Enabled bool
|
||||
Port int `sconf:"optional" sconf-doc:"Port for plain HTTP (non-TLS) webserver."`
|
||||
Enabled bool
|
||||
Port int `sconf:"optional" sconf-doc:"Port for plain HTTP (non-TLS) webserver."`
|
||||
RateLimitDisabled bool `sconf:"optional" sconf-doc:"Disable rate limiting for all requests to this port."`
|
||||
} `sconf:"optional" sconf-doc:"All configured WebHandlers will serve on an enabled listener."`
|
||||
WebserverHTTPS struct {
|
||||
Enabled bool
|
||||
Port int `sconf:"optional" sconf-doc:"Port for HTTPS webserver."`
|
||||
Enabled bool
|
||||
Port int `sconf:"optional" sconf-doc:"Port for HTTPS webserver."`
|
||||
RateLimitDisabled bool `sconf:"optional" sconf-doc:"Disable rate limiting for all requests to this port."`
|
||||
} `sconf:"optional" sconf-doc:"All configured WebHandlers will serve on an enabled listener. Either ACME must be configured, or for each WebHandler domain a TLS certificate must be configured."`
|
||||
}
|
||||
|
||||
type Domain struct {
|
||||
Description string `sconf:"optional" sconf-doc:"Free-form description of domain."`
|
||||
LocalpartCatchallSeparator string `sconf:"optional" sconf-doc:"If not empty, only the string before the separator is used to for email delivery decisions. For example, if set to \"+\", you+anything@example.com will be delivered to you@example.com."`
|
||||
LocalpartCaseSensitive bool `sconf:"optional" sconf-doc:"If set, upper/lower case is relevant for email delivery."`
|
||||
DKIM DKIM `sconf:"optional" sconf-doc:"With DKIM signing, a domain is taking responsibility for (content of) emails it sends, letting receiving mail servers build up a (hopefully positive) reputation of the domain, which can help with mail delivery."`
|
||||
DMARC *DMARC `sconf:"optional" sconf-doc:"With DMARC, a domain publishes, in DNS, a policy on how other mail servers should handle incoming messages with the From-header matching this domain and/or subdomain (depending on the configured alignment). Receiving mail servers use this to build up a reputation of this domain, which can help with mail delivery. A domain can also publish an email address to which reports about DMARC verification results can be sent by verifying mail servers, useful for monitoring. Incoming DMARC reports are automatically parsed, validated, added to metrics and stored in the reporting database for later display in the admin web pages."`
|
||||
MTASTS *MTASTS `sconf:"optional" sconf-doc:"With MTA-STS a domain publishes, in DNS, presence of a policy for using/requiring TLS for SMTP connections. The policy is served over HTTPS."`
|
||||
TLSRPT *TLSRPT `sconf:"optional" sconf-doc:"With TLSRPT a domain specifies in DNS where reports about encountered SMTP TLS behaviour should be sent. Useful for monitoring. Incoming TLS reports are automatically parsed, validated, added to metrics and stored in the reporting database for later display in the admin web pages."`
|
||||
// WebService is an internal web interface: webmail, webaccount, webadmin, webapi.
|
||||
type WebService struct {
|
||||
Enabled bool
|
||||
Port int `sconf:"optional" sconf-doc:"Default 80 for HTTP and 443 for HTTPS. See Hostname at Listener for hostname matching behaviour."`
|
||||
Path string `sconf:"optional" sconf-doc:"Path to serve requests on. Should end with a slash, related to cookie paths."`
|
||||
Forwarded bool `sconf:"optional" sconf-doc:"If set, X-Forwarded-* headers are used for the remote IP address for rate limiting and for the \"secure\" status of cookies."`
|
||||
}
|
||||
|
||||
Domain dns.Domain `sconf:"-" json:"-"`
|
||||
// Transport is a method to delivery a message. At most one of the fields can
|
||||
// be non-nil. The non-nil field represents the type of transport. For a
|
||||
// transport with all fields nil, regular email delivery is done.
|
||||
type Transport struct {
|
||||
Submissions *TransportSMTP `sconf:"optional" sconf-doc:"Submission SMTP over a TLS connection to submit email to a remote queue."`
|
||||
Submission *TransportSMTP `sconf:"optional" sconf-doc:"Submission SMTP over a plain TCP connection (possibly with STARTTLS) to submit email to a remote queue."`
|
||||
SMTP *TransportSMTP `sconf:"optional" sconf-doc:"SMTP over a plain connection (possibly with STARTTLS), typically for old-fashioned unauthenticated relaying to a remote queue."`
|
||||
Socks *TransportSocks `sconf:"optional" sconf-doc:"Like regular direct delivery, but makes outgoing connections through a SOCKS proxy."`
|
||||
Direct *TransportDirect `sconf:"optional" sconf-doc:"Like regular direct delivery, but allows to tweak outgoing connections."`
|
||||
Fail *TransportFail `sconf:"optional" sconf-doc:"Immediately fails the delivery attempt."`
|
||||
}
|
||||
|
||||
// TransportSMTP delivers messages by "submission" (SMTP, typically
|
||||
// authenticated) to the queue of a remote host (smarthost), or by relaying
|
||||
// (SMTP, typically unauthenticated).
|
||||
type TransportSMTP struct {
|
||||
Host string `sconf-doc:"Host name to connect to and for verifying its TLS certificate."`
|
||||
Port int `sconf:"optional" sconf-doc:"If unset or 0, the default port for submission(s)/smtp is used: 25 for SMTP, 465 for submissions (with TLS), 587 for submission (possibly with STARTTLS)."`
|
||||
STARTTLSInsecureSkipVerify bool `sconf:"optional" sconf-doc:"If set an unverifiable remote TLS certificate during STARTTLS is accepted."`
|
||||
NoSTARTTLS bool `sconf:"optional" sconf-doc:"If set for submission or smtp transport, do not attempt STARTTLS on the connection. Authentication credentials and messages will be transferred in clear text."`
|
||||
Auth *SMTPAuth `sconf:"optional" sconf-doc:"If set, authentication credentials for the remote server."`
|
||||
|
||||
DNSHost dns.Domain `sconf:"-" json:"-"`
|
||||
}
|
||||
|
||||
// SMTPAuth hold authentication credentials used when delivering messages
|
||||
// through a smarthost.
|
||||
type SMTPAuth struct {
|
||||
Username string
|
||||
Password string
|
||||
Mechanisms []string `sconf:"optional" sconf-doc:"Allowed authentication mechanisms. Defaults to SCRAM-SHA-256-PLUS, SCRAM-SHA-256, SCRAM-SHA-1-PLUS, SCRAM-SHA-1, CRAM-MD5. Not included by default: PLAIN. Specify the strongest mechanism known to be implemented by the server to prevent mechanism downgrade attacks."`
|
||||
|
||||
EffectiveMechanisms []string `sconf:"-" json:"-"`
|
||||
}
|
||||
|
||||
type TransportSocks struct {
|
||||
Address string `sconf-doc:"Address of SOCKS proxy, of the form host:port or ip:port."`
|
||||
RemoteIPs []string `sconf-doc:"IP addresses connections from the SOCKS server will originate from. This IP addresses should be configured in the SPF record (keep in mind DNS record time to live (TTL) when adding a SOCKS proxy). Reverse DNS should be set up for these address, resolving to RemoteHostname. These are typically the IPv4 and IPv6 address for the host in the Address field."`
|
||||
RemoteHostname string `sconf-doc:"Hostname belonging to RemoteIPs. This name is used during in SMTP EHLO. This is typically the hostname of the host in the Address field."`
|
||||
|
||||
// todo: add authentication credentials?
|
||||
|
||||
IPs []net.IP `sconf:"-" json:"-"` // Parsed form of RemoteIPs.
|
||||
Hostname dns.Domain `sconf:"-" json:"-"` // Parsed form of RemoteHostname
|
||||
}
|
||||
|
||||
type TransportDirect struct {
|
||||
DisableIPv4 bool `sconf:"optional" sconf-doc:"If set, outgoing SMTP connections will *NOT* use IPv4 addresses to connect to remote SMTP servers."`
|
||||
DisableIPv6 bool `sconf:"optional" sconf-doc:"If set, outgoing SMTP connections will *NOT* use IPv6 addresses to connect to remote SMTP servers."`
|
||||
|
||||
IPFamily string `sconf:"-" json:"-"`
|
||||
}
|
||||
|
||||
// TransportFail is a transport that fails all delivery attempts.
|
||||
type TransportFail struct {
|
||||
SMTPCode int `sconf:"optional" sconf-doc:"SMTP error code and optional enhanced error code to use for the failure. If empty, 554 is used (transaction failed)."`
|
||||
SMTPMessage string `sconf:"optional" sconf-doc:"Message to include for the rejection. It will be shown in the DSN."`
|
||||
|
||||
// Effective values to use, set when parsing.
|
||||
Code int `sconf:"-"`
|
||||
Message string `sconf:"-"`
|
||||
}
|
||||
|
||||
type Domain struct {
|
||||
Disabled bool `sconf:"optional" sconf-doc:"Disabled domains can be useful during/before migrations. Domains that are disabled can still be configured like normal, including adding addresses using the domain to accounts. However, disabled domains: 1. Do not try to fetch ACME certificates. TLS connections to host names involving the email domain will fail. A TLS certificate for the hostname (that wil be used as MX) itself will be requested. 2. Incoming deliveries over SMTP are rejected with a temporary error '450 4.2.1 recipient domain temporarily disabled'. 3. Submissions over SMTP using an (envelope) SMTP MAIL FROM address or message 'From' address of a disabled domain will be rejected with a temporary error '451 4.3.0 sender domain temporarily disabled'. Note that accounts with addresses at disabled domains can still log in and read email (unless the account itself is disabled)."`
|
||||
Description string `sconf:"optional" sconf-doc:"Free-form description of domain."`
|
||||
ClientSettingsDomain string `sconf:"optional" sconf-doc:"Hostname for client settings instead of the mail server hostname. E.g. mail.<domain>. For future migration to another mail operator without requiring all clients to update their settings, it is convenient to have client settings that reference a subdomain of the hosted domain instead of the hostname of the server where the mail is currently hosted. If empty, the hostname of the mail server is used for client configurations. Unicode name."`
|
||||
LocalpartCatchallSeparator string `sconf:"optional" sconf-doc:"If not empty, only the string before the separator is used to for email delivery decisions. For example, if set to \"+\", you+anything@example.com will be delivered to you@example.com."`
|
||||
LocalpartCatchallSeparators []string `sconf:"optional" sconf-doc:"Similar to LocalpartCatchallSeparator, but in case multiple are needed. For example both \"+\" and \"-\". Only of one LocalpartCatchallSeparator or LocalpartCatchallSeparators can be set. If set, the first separator is used to make unique addresses for outgoing SMTP connections with FromIDLoginAddresses."`
|
||||
LocalpartCaseSensitive bool `sconf:"optional" sconf-doc:"If set, upper/lower case is relevant for email delivery."`
|
||||
DKIM DKIM `sconf:"optional" sconf-doc:"With DKIM signing, a domain is taking responsibility for (content of) emails it sends, letting receiving mail servers build up a (hopefully positive) reputation of the domain, which can help with mail delivery."`
|
||||
DMARC *DMARC `sconf:"optional" sconf-doc:"With DMARC, a domain publishes, in DNS, a policy on how other mail servers should handle incoming messages with the From-header matching this domain and/or subdomain (depending on the configured alignment). Receiving mail servers use this to build up a reputation of this domain, which can help with mail delivery. A domain can also publish an email address to which reports about DMARC verification results can be sent by verifying mail servers, useful for monitoring. Incoming DMARC reports are automatically parsed, validated, added to metrics and stored in the reporting database for later display in the admin web pages."`
|
||||
MTASTS *MTASTS `sconf:"optional" sconf-doc:"MTA-STS is a mechanism that allows publishing a policy with requirements for WebPKI-verified SMTP STARTTLS connections for email delivered to a domain. Existence of a policy is announced in a DNS TXT record (often unprotected/unverified, MTA-STS's weak spot). If a policy exists, it is fetched with a WebPKI-verified HTTPS request. The policy can indicate that WebPKI-verified SMTP STARTTLS is required, and which MX hosts (optionally with a wildcard pattern) are allowd. MX hosts to deliver to are still taken from DNS (again, not necessarily protected/verified), but messages will only be delivered to domains matching the MX hosts from the published policy. Mail servers look up the MTA-STS policy when first delivering to a domain, then keep a cached copy, periodically checking the DNS record if a new policy is available, and fetching and caching it if so. To update a policy, first serve a new policy with an updated policy ID, then update the DNS record (not the other way around). To remove an enforced policy, publish an updated policy with mode \"none\" for a long enough period so all cached policies have been refreshed (taking DNS TTL and policy max age into account), then remove the policy from DNS, wait for TTL to expire, and stop serving the policy."`
|
||||
TLSRPT *TLSRPT `sconf:"optional" sconf-doc:"With TLSRPT a domain specifies in DNS where reports about encountered SMTP TLS behaviour should be sent. Useful for monitoring. Incoming TLS reports are automatically parsed, validated, added to metrics and stored in the reporting database for later display in the admin web pages."`
|
||||
Routes []Route `sconf:"optional" sconf-doc:"Routes for delivering outgoing messages through the queue. Each delivery attempt evaluates account routes, these domain routes and finally global routes. The transport of the first matching route is used in the delivery attempt. If no routes match, which is the default with no configured routes, messages are delivered directly from the queue."`
|
||||
Aliases map[string]Alias `sconf:"optional" sconf-doc:"Aliases that cause messages to be delivered to one or more locally configured addresses. Keys are localparts (encoded, as they appear in email addresses)."`
|
||||
|
||||
Domain dns.Domain `sconf:"-"`
|
||||
ClientSettingsDNSDomain dns.Domain `sconf:"-" json:"-"`
|
||||
|
||||
// Set when DMARC and TLSRPT (when set) has an address with different domain (we're
|
||||
// hosting the reporting), and there are no destination addresses configured for
|
||||
// the domain. Disables some functionality related to hosting a domain.
|
||||
ReportsOnly bool `sconf:"-" json:"-"`
|
||||
LocalpartCatchallSeparatorsEffective []string `sconf:"-"` // Either LocalpartCatchallSeparators, the value of LocalpartCatchallSeparator, or empty.
|
||||
}
|
||||
|
||||
// todo: allow external addresses as members of aliases. we would add messages for them to the queue for outgoing delivery. we should require an admin addresses to which delivery failures will be delivered (locally, and to use in smtp mail from, so dsns go there). also take care to evaluate smtputf8 (if external address requires utf8 and incoming transaction didn't).
|
||||
// todo: as alternative to PostPublic, allow specifying a list of addresses (dmarc-like verified) that are (the only addresses) allowed to post to the list. if msgfrom is an external address, require a valid dkim signature to prevent dmarc-policy-related issues when delivering to remote members.
|
||||
// todo: add option to require messages sent to an alias have that alias as From or Reply-To address?
|
||||
|
||||
type Alias struct {
|
||||
Addresses []string `sconf-doc:"Expanded addresses to deliver to. These must currently be of addresses of local accounts. To prevent duplicate messages, a member address that is also an explicit recipient in the SMTP transaction will only have the message delivered once. If the address in the message From header is a member, that member also won't receive the message."`
|
||||
PostPublic bool `sconf:"optional" sconf-doc:"If true, anyone can send messages to the list. Otherwise only members, based on message From address, which is assumed to be DMARC-like-verified."`
|
||||
ListMembers bool `sconf:"optional" sconf-doc:"If true, members can see addresses of members."`
|
||||
AllowMsgFrom bool `sconf:"optional" sconf-doc:"If true, members are allowed to send messages with this alias address in the message From header."`
|
||||
|
||||
LocalpartStr string `sconf:"-"` // In encoded form.
|
||||
Domain dns.Domain `sconf:"-"`
|
||||
ParsedAddresses []AliasAddress `sconf:"-"` // Matches addresses.
|
||||
}
|
||||
|
||||
type AliasAddress struct {
|
||||
Address smtp.Address // Parsed address.
|
||||
AccountName string // Looked up.
|
||||
Destination Destination // Belonging to address.
|
||||
}
|
||||
|
||||
type DMARC struct {
|
||||
Localpart string `sconf-doc:"Address-part before the @ that accepts DMARC reports. Must be non-internationalized. Recommended value: dmarc-reports."`
|
||||
Localpart string `sconf-doc:"Address-part before the @ that accepts DMARC reports. Must be non-internationalized. Recommended value: dmarcreports."`
|
||||
Domain string `sconf:"optional" sconf-doc:"Alternative domain for reporting address, for incoming reports. Typically empty, causing the domain wherein this config exists to be used. Can be used to receive reports for domains that aren't fully hosted on this server. Configure such a domain as a hosted domain without making all the DNS changes, and configure this field with a domain that is fully hosted on this server, so the localpart and the domain of this field form a reporting address. Then only update the DMARC DNS record for the not fully hosted domain, ensuring the reporting address is specified in its \"rua\" field as shown in the suggested DNS settings. Unicode name."`
|
||||
Account string `sconf-doc:"Account to deliver to."`
|
||||
Mailbox string `sconf-doc:"Mailbox to deliver to, e.g. DMARC."`
|
||||
|
||||
ParsedLocalpart smtp.Localpart `sconf:"-"`
|
||||
ParsedLocalpart smtp.Localpart `sconf:"-"` // Lower-case if case-sensitivity is not configured for domain. Not "canonical" for catchall separators for backwards compatibility.
|
||||
DNSDomain dns.Domain `sconf:"-"` // Effective domain, always set based on Domain field or Domain where this is configured.
|
||||
}
|
||||
|
||||
type MTASTS struct {
|
||||
PolicyID string `sconf-doc:"Policies are versioned. The version must be specified in the DNS record. If you change a policy, first change it in mox, then update the DNS record."`
|
||||
Mode mtasts.Mode `sconf-doc:"testing, enforce or none. If set to enforce, a remote SMTP server will not deliver email to us if it cannot make a TLS connection."`
|
||||
PolicyID string `sconf-doc:"Policies are versioned. The version must be specified in the DNS record. If you change a policy, first change it here to update the served policy, then update the DNS record with the updated policy ID."`
|
||||
Mode mtasts.Mode `sconf-doc:"If set to \"enforce\", a remote SMTP server will not deliver email to us if it cannot make a WebPKI-verified SMTP STARTTLS connection. In mode \"testing\", deliveries can be done without verified TLS, but errors will be reported through TLS reporting. In mode \"none\", verified TLS is not required, used for phasing out an MTA-STS policy."`
|
||||
MaxAge time.Duration `sconf-doc:"How long a remote mail server is allowed to cache a policy. Typically 1 or several weeks."`
|
||||
MX []string `sconf:"optional" sconf-doc:"List of server names allowed for SMTP. If empty, the configured hostname is set. Host names can contain a wildcard (*) as a leading label (matching a single label, e.g. *.example matches host.example, not sub.host.example)."`
|
||||
// todo: parse mx as valid mtasts.Policy.MX, with dns.ParseDomain but taking wildcard into account
|
||||
}
|
||||
|
||||
type TLSRPT struct {
|
||||
Localpart string `sconf-doc:"Address-part before the @ that accepts TLSRPT reports. Recommended value: tls-reports."`
|
||||
Localpart string `sconf-doc:"Address-part before the @ that accepts TLSRPT reports. Recommended value: tlsreports."`
|
||||
Domain string `sconf:"optional" sconf-doc:"Alternative domain for reporting address, for incoming reports. Typically empty, causing the domain wherein this config exists to be used. Can be used to receive reports for domains that aren't fully hosted on this server. Configure such a domain as a hosted domain without making all the DNS changes, and configure this field with a domain that is fully hosted on this server, so the localpart and the domain of this field form a reporting address. Then only update the TLSRPT DNS record for the not fully hosted domain, ensuring the reporting address is specified in its \"rua\" field as shown in the suggested DNS settings. Unicode name."`
|
||||
Account string `sconf-doc:"Account to deliver to."`
|
||||
Mailbox string `sconf-doc:"Mailbox to deliver to, e.g. TLSRPT."`
|
||||
|
||||
ParsedLocalpart smtp.Localpart `sconf:"-"`
|
||||
ParsedLocalpart smtp.Localpart `sconf:"-"` // Lower-case if case-sensitivity is not configured for domain. Not "canonical" for catchall separators for backwards compatibility.
|
||||
DNSDomain dns.Domain `sconf:"-"` // Effective domain, always set based on Domain field or Domain where this is configured.
|
||||
}
|
||||
|
||||
type Canonicalization struct {
|
||||
HeaderRelaxed bool `sconf-doc:"If set, some modifications to the headers (mostly whitespace) are allowed."`
|
||||
BodyRelaxed bool `sconf-doc:"If set, some whitespace modifications to the message body are allowed."`
|
||||
}
|
||||
|
||||
type Selector struct {
|
||||
Hash string `sconf:"optional" sconf-doc:"sha256 (default) or (older, not recommended) sha1"`
|
||||
HashEffective string `sconf:"-"`
|
||||
Canonicalization struct {
|
||||
HeaderRelaxed bool `sconf-doc:"If set, some modifications to the headers (mostly whitespace) are allowed."`
|
||||
BodyRelaxed bool `sconf-doc:"If set, some whitespace modifications to the message body are allowed."`
|
||||
} `sconf:"optional"`
|
||||
Headers []string `sconf:"optional" sconf-doc:"Headers to sign with DKIM. If empty, a reasonable default set of headers is selected."`
|
||||
HeadersEffective []string `sconf:"-"`
|
||||
DontSealHeaders bool `sconf:"optional" sconf-doc:"If set, don't prevent duplicate headers from being added. Not recommended."`
|
||||
Expiration string `sconf:"optional" sconf-doc:"Period a signature is valid after signing, as duration, e.g. 72h. The period should be enough for delivery at the final destination, potentially with several hops/relays. In the order of days at least."`
|
||||
PrivateKeyFile string `sconf-doc:"Either an RSA or ed25519 private key file in PKCS8 PEM form."`
|
||||
Hash string `sconf:"optional" sconf-doc:"sha256 (default) or (older, not recommended) sha1."`
|
||||
HashEffective string `sconf:"-"`
|
||||
Canonicalization Canonicalization `sconf:"optional"`
|
||||
Headers []string `sconf:"optional" sconf-doc:"Headers to sign with DKIM. If empty, a reasonable default set of headers is selected."`
|
||||
HeadersEffective []string `sconf:"-"` // Used when signing. Based on Headers from config, or the reasonable default.
|
||||
DontSealHeaders bool `sconf:"optional" sconf-doc:"If set, don't prevent duplicate headers from being added. Not recommended."`
|
||||
Expiration string `sconf:"optional" sconf-doc:"Period a signature is valid after signing, as duration, e.g. 72h. The period should be enough for delivery at the final destination, potentially with several hops/relays. In the order of days at least."`
|
||||
PrivateKeyFile string `sconf-doc:"Either an RSA or ed25519 private key file in PKCS8 PEM form."`
|
||||
|
||||
Algorithm string `sconf:"-"` // "ed25519", "rsa-*", based on private key.
|
||||
ExpirationSeconds int `sconf:"-" json:"-"` // Parsed from Expiration.
|
||||
Key crypto.Signer `sconf:"-" json:"-"` // As parsed with x509.ParsePKCS8PrivateKey.
|
||||
Domain dns.Domain `sconf:"-" json:"-"` // Of selector only, not FQDN.
|
||||
@ -223,26 +390,81 @@ type DKIM struct {
|
||||
Sign []string `sconf:"optional" sconf-doc:"List of selectors that emails will be signed with."`
|
||||
}
|
||||
|
||||
type Account struct {
|
||||
Domain string `sconf-doc:"Default domain for addresses specified in Destinations. An address can specify a domain override."`
|
||||
Description string `sconf:"optional" sconf-doc:"Free form description, e.g. full name or alternative contact info."`
|
||||
Destinations map[string]Destination `sconf-doc:"Destinations, specified as (encoded) localpart for Domain, or a full address including domain override."`
|
||||
SubjectPass struct {
|
||||
Period time.Duration `sconf-doc:"How long unique values are accepted after generating, e.g. 12h."` // todo: have a reasonable default for this?
|
||||
} `sconf:"optional" sconf-doc:"If configured, messages classified as weakly spam are rejected with instructions to retry delivery, but this time with a signed token added to the subject. During the next delivery attempt, the signed token will bypass the spam filter. Messages with a clear spam signal, such as a known bad reputation, are rejected/delayed without a signed token."`
|
||||
RejectsMailbox string `sconf:"optional" sconf-doc:"Mail that looks like spam will be rejected, but a copy can be stored temporarily in a mailbox, e.g. Rejects. If mail isn't coming in when you expect, you can look there. The mail still isn't accepted, so the remote mail server may retry (hopefully, if legitimate), or give up (hopefully, if indeed a spammer). Messages are automatically removed from this mailbox, so do not set it to a mailbox that has messages you want to keep."`
|
||||
AutomaticJunkFlags struct {
|
||||
Enabled bool `sconf-doc:"If enabled, flags will be set automatically if they match a regular expression below. When two of the three mailbox regular expressions are set, the remaining one will match all unmatched messages. Messages are matched in the order specified and the search stops on the first match. Mailboxes are lowercased before matching."`
|
||||
JunkMailboxRegexp string `sconf:"optional" sconf-doc:"Example: ^(junk|spam)."`
|
||||
NeutralMailboxRegexp string `sconf:"optional" sconf-doc:"Example: ^(inbox|neutral|postmaster|dmarc|tlsrpt|rejects), and you may wish to add trash depending on how you use it, or leave this empty."`
|
||||
NotJunkMailboxRegexp string `sconf:"optional" sconf-doc:"Example: .* or an empty string."`
|
||||
} `sconf:"optional" sconf-doc:"Automatically set $Junk and $NotJunk flags based on mailbox messages are delivered/moved/copied to. Email clients typically have too limited functionality to conveniently set these flags, especially $NonJunk, but they can all move messages to a different mailbox, so this helps them."`
|
||||
JunkFilter *JunkFilter `sconf:"optional" sconf-doc:"Content-based filtering, using the junk-status of individual messages to rank words in such messages as spam or ham. It is recommended you always set the applicable (non)-junk status on messages, and that you do not empty your Trash because those messages contain valuable ham/spam training information."` // todo: sane defaults for junkfilter
|
||||
type Route struct {
|
||||
FromDomain []string `sconf:"optional" sconf-doc:"Matches if the envelope from domain matches one of the configured domains, or if the list is empty. If a domain starts with a dot, prefixes of the domain also match."`
|
||||
ToDomain []string `sconf:"optional" sconf-doc:"Like FromDomain, but matching against the envelope to domain."`
|
||||
MinimumAttempts int `sconf:"optional" sconf-doc:"Matches if at least this many deliveries have already been attempted. This can be used to attempt sending through a smarthost when direct delivery has failed for several times."`
|
||||
Transport string `sconf:"The transport used for delivering the message that matches requirements of the above fields."`
|
||||
|
||||
DNSDomain dns.Domain `sconf:"-"` // Parsed form of Domain.
|
||||
JunkMailbox *regexp.Regexp `sconf:"-" json:"-"`
|
||||
NeutralMailbox *regexp.Regexp `sconf:"-" json:"-"`
|
||||
NotJunkMailbox *regexp.Regexp `sconf:"-" json:"-"`
|
||||
// todo future: add ToMX, where we look up the MX record of the destination domain and check (the first, any, all?) mx host against the values in ToMX.
|
||||
|
||||
FromDomainASCII []string `sconf:"-"`
|
||||
ToDomainASCII []string `sconf:"-"`
|
||||
ResolvedTransport Transport `sconf:"-" json:"-"`
|
||||
}
|
||||
|
||||
// todo: move RejectsMailbox to store.Mailbox.SpecialUse, possibly with "X" prefix?
|
||||
|
||||
// note: outgoing hook events are in ../queue/hooks.go, ../mox-/config.go, ../queue.go and ../webapi/gendoc.sh. keep in sync.
|
||||
|
||||
type OutgoingWebhook struct {
|
||||
URL string `sconf-doc:"URL to POST webhooks."`
|
||||
Authorization string `sconf:"optional" sconf-doc:"If not empty, value of Authorization header to add to HTTP requests."`
|
||||
Events []string `sconf:"optional" sconf-doc:"Events to send outgoing delivery notifications for. If absent, all events are sent. Valid values: delivered, suppressed, delayed, failed, relayed, expanded, canceled, unrecognized."`
|
||||
}
|
||||
|
||||
type IncomingWebhook struct {
|
||||
URL string `sconf-doc:"URL to POST webhooks to for incoming deliveries over SMTP."`
|
||||
Authorization string `sconf:"optional" sconf-doc:"If not empty, value of Authorization header to add to HTTP requests."`
|
||||
}
|
||||
|
||||
type SubjectPass struct {
|
||||
Period time.Duration `sconf-doc:"How long unique values are accepted after generating, e.g. 12h."` // todo: have a reasonable default for this?
|
||||
}
|
||||
|
||||
type AutomaticJunkFlags struct {
|
||||
Enabled bool `sconf-doc:"If enabled, junk/nonjunk flags will be set automatically if they match some of the regular expressions. When two of the three mailbox regular expressions are set, the remaining one will match all unmatched messages. Messages are matched in the order 'junk', 'neutral', 'not junk', and the search stops on the first match. Mailboxes are lowercased before matching."`
|
||||
JunkMailboxRegexp string `sconf:"optional" sconf-doc:"Example: ^(junk|spam)."`
|
||||
NeutralMailboxRegexp string `sconf:"optional" sconf-doc:"Example: ^(inbox|neutral|postmaster|dmarc|tlsrpt|rejects), and you may wish to add trash depending on how you use it, or leave this empty."`
|
||||
NotJunkMailboxRegexp string `sconf:"optional" sconf-doc:"Example: .* or an empty string."`
|
||||
}
|
||||
|
||||
type Account struct {
|
||||
OutgoingWebhook *OutgoingWebhook `sconf:"optional" sconf-doc:"Webhooks for events about outgoing deliveries."`
|
||||
IncomingWebhook *IncomingWebhook `sconf:"optional" sconf-doc:"Webhooks for events about incoming deliveries over SMTP."`
|
||||
FromIDLoginAddresses []string `sconf:"optional" sconf-doc:"Login addresses that cause outgoing email to be sent with SMTP MAIL FROM addresses with a unique id after the localpart catchall separator (which must be enabled when addresses are specified here). Any delivery status notifications (DSN, e.g. for bounces), can be related to the original message and recipient with unique id's. You can login to an account with any valid email address, including variants with the localpart catchall separator. You can use this mechanism to both send outgoing messages with and without unique fromid for a given email address. With the webapi and webmail, a unique id will be generated. For submission, the id from the SMTP MAIL FROM command is used if present, and a unique id is generated otherwise."`
|
||||
KeepRetiredMessagePeriod time.Duration `sconf:"optional" sconf-doc:"Period to keep messages retired from the queue (delivered or failed) around. Keeping retired messages is useful for maintaining the suppression list for transactional email, for matching incoming DSNs to sent messages, and for debugging. The time at which to clean up (remove) is calculated at retire time. E.g. 168h (1 week)."`
|
||||
KeepRetiredWebhookPeriod time.Duration `sconf:"optional" sconf-doc:"Period to keep webhooks retired from the queue (delivered or failed) around. Useful for debugging. The time at which to clean up (remove) is calculated at retire time. E.g. 168h (1 week)."`
|
||||
|
||||
LoginDisabled string `sconf:"optional" sconf-doc:"If non-empty, login attempts on all protocols (e.g. SMTP/IMAP, web interfaces) is rejected with this error message. Useful during migrations. Incoming deliveries for addresses of this account are still accepted as normal."`
|
||||
Domain string `sconf-doc:"Default domain for account. Deprecated behaviour: If a destination is not a full address but only a localpart, this domain is added to form a full address."`
|
||||
Description string `sconf:"optional" sconf-doc:"Free form description, e.g. full name or alternative contact info."`
|
||||
FullName string `sconf:"optional" sconf-doc:"Full name, to use in message From header when composing messages in webmail. Can be overridden per destination."`
|
||||
Destinations map[string]Destination `sconf:"optional" sconf-doc:"Destinations, keys are email addresses (with IDNA domains). All destinations are allowed for logging in with IMAP/SMTP/webmail. If no destinations are configured, the account can not login. If the address is of the form '@domain', i.e. with localpart missing, it serves as a catchall for the domain, matching all messages that are not explicitly configured. Deprecated behaviour: If the address is not a full address but a localpart, it is combined with Domain to form a full address."`
|
||||
SubjectPass SubjectPass `sconf:"optional" sconf-doc:"If configured, messages classified as weakly spam are rejected with instructions to retry delivery, but this time with a signed token added to the subject. During the next delivery attempt, the signed token will bypass the spam filter. Messages with a clear spam signal, such as a known bad reputation, are rejected/delayed without a signed token."`
|
||||
QuotaMessageSize int64 `sconf:"optional" sconf-doc:"Default maximum total message size in bytes for the account, overriding any globally configured default maximum size if non-zero. A negative value can be used to have no limit in case there is a limit by default. Attempting to add new messages to an account beyond its maximum total size will result in an error. Useful to prevent a single account from filling storage."`
|
||||
RejectsMailbox string `sconf:"optional" sconf-doc:"Mail that looks like spam will be rejected, but a copy can be stored temporarily in a mailbox, e.g. Rejects. If mail isn't coming in when you expect, you can look there. The mail still isn't accepted, so the remote mail server may retry (hopefully, if legitimate), or give up (hopefully, if indeed a spammer). Messages are automatically removed from this mailbox, so do not set it to a mailbox that has messages you want to keep."`
|
||||
KeepRejects bool `sconf:"optional" sconf-doc:"Don't automatically delete mail in the RejectsMailbox listed above. This can be useful, e.g. for future spam training. It can also cause storage to fill up."`
|
||||
AutomaticJunkFlags AutomaticJunkFlags `sconf:"optional" sconf-doc:"Automatically set $Junk and $NotJunk flags based on mailbox messages are delivered/moved/copied to. Email clients typically have too limited functionality to conveniently set these flags, especially $NonJunk, but they can all move messages to a different mailbox, so this helps them."`
|
||||
JunkFilter *JunkFilter `sconf:"optional" sconf-doc:"Content-based filtering, using the junk-status of individual messages to rank words in such messages as spam or ham. It is recommended you always set the applicable (non)-junk status on messages, and that you do not empty your Trash because those messages contain valuable ham/spam training information."` // todo: sane defaults for junkfilter
|
||||
MaxOutgoingMessagesPerDay int `sconf:"optional" sconf-doc:"Maximum number of outgoing messages for this account in a 24 hour window. This limits the damage to recipients and the reputation of this mail server in case of account compromise. Default 1000."`
|
||||
MaxFirstTimeRecipientsPerDay int `sconf:"optional" sconf-doc:"Maximum number of first-time recipients in outgoing messages for this account in a 24 hour window. This limits the damage to recipients and the reputation of this mail server in case of account compromise. Default 200."`
|
||||
NoFirstTimeSenderDelay bool `sconf:"optional" sconf-doc:"Do not apply a delay to SMTP connections before accepting an incoming message from a first-time sender. Can be useful for accounts that sends automated responses and want instant replies."`
|
||||
NoCustomPassword bool `sconf:"optional" sconf-doc:"If set, this account cannot set a password of their own choice, but can only set a new randomly generated password, preventing password reuse across services and use of weak passwords. Custom account passwords can be set by the admin."`
|
||||
Routes []Route `sconf:"optional" sconf-doc:"Routes for delivering outgoing messages through the queue. Each delivery attempt evaluates these account routes, domain routes and finally global routes. The transport of the first matching route is used in the delivery attempt. If no routes match, which is the default with no configured routes, messages are delivered directly from the queue."`
|
||||
|
||||
DNSDomain dns.Domain `sconf:"-"` // Parsed form of Domain.
|
||||
JunkMailbox *regexp.Regexp `sconf:"-" json:"-"`
|
||||
NeutralMailbox *regexp.Regexp `sconf:"-" json:"-"`
|
||||
NotJunkMailbox *regexp.Regexp `sconf:"-" json:"-"`
|
||||
ParsedFromIDLoginAddresses []smtp.Address `sconf:"-" json:"-"`
|
||||
Aliases []AddressAlias `sconf:"-"`
|
||||
}
|
||||
|
||||
type AddressAlias struct {
|
||||
SubscriptionAddress string
|
||||
Alias Alias // Without members.
|
||||
MemberAddresses []string // Only if allowed to see.
|
||||
}
|
||||
|
||||
type JunkFilter struct {
|
||||
@ -251,11 +473,19 @@ type JunkFilter struct {
|
||||
}
|
||||
|
||||
type Destination struct {
|
||||
Mailbox string `sconf:"optional" sconf-doc:"Mailbox to deliver to if none of Rulesets match. Default: Inbox."`
|
||||
Rulesets []Ruleset `sconf:"optional" sconf-doc:"Delivery rules based on message and SMTP transaction. You may want to match each mailing list by SMTP MailFrom address, VerifiedDomain and/or List-ID header (typically <listname.example.org> if the list address is listname@example.org), delivering them to their own mailbox."`
|
||||
Mailbox string `sconf:"optional" sconf-doc:"Mailbox to deliver to if none of Rulesets match. Default: Inbox."`
|
||||
Rulesets []Ruleset `sconf:"optional" sconf-doc:"Delivery rules based on message and SMTP transaction. You may want to match each mailing list by SMTP MailFrom address, VerifiedDomain and/or List-ID header (typically <listname.example.org> if the list address is listname@example.org), delivering them to their own mailbox."`
|
||||
SMTPError string `sconf:"optional" sconf-doc:"If non-empty, incoming delivery attempts to this destination will be rejected during SMTP RCPT TO with this error response line. Useful when a catchall address is configured for the domain and messages to some addresses should be rejected. The response line must start with an error code. Currently the following error resonse codes are allowed: 421 (temporary local error), 550 (user not found). If the line consists of only an error code, an appropriate error message is added. Rejecting messages with a 4xx code invites later retries by the remote, while 5xx codes should prevent further delivery attempts."`
|
||||
MessageAuthRequiredSMTPError string `sconf:"optional" sconf-doc:"If non-empty, an additional DMARC-like message authentication check is done for incoming messages, validating the domain in the From-header of the message. Messages without either an aligned SPF or aligned DKIM pass are rejected during the SMTP DATA command with a permanent error code followed by the message in this field. The domain in the message 'From' header is matched in relaxed or strict mode according to the domain's DMARC policy if present, or relaxed mode (organizational instead of exact domain match) otherwise. Useful for autoresponders that don't want to accept messages they don't want to send an automated reply to."`
|
||||
FullName string `sconf:"optional" sconf-doc:"Full name to use in message From header when composing messages coming from this address with webmail."`
|
||||
|
||||
DMARCReports bool `sconf:"-" json:"-"`
|
||||
TLSReports bool `sconf:"-" json:"-"`
|
||||
DMARCReports bool `sconf:"-" json:"-"`
|
||||
HostTLSReports bool `sconf:"-" json:"-"`
|
||||
DomainTLSReports bool `sconf:"-" json:"-"`
|
||||
// Ready to use in SMTP responses.
|
||||
SMTPErrorCode int `sconf:"-" json:"-"`
|
||||
SMTPErrorSecode string `sconf:"-" json:"-"`
|
||||
SMTPErrorMsg string `sconf:"-" json:"-"`
|
||||
}
|
||||
|
||||
// Equal returns whether d and o are equal, only looking at their user-changeable fields.
|
||||
@ -272,16 +502,22 @@ func (d Destination) Equal(o Destination) bool {
|
||||
}
|
||||
|
||||
type Ruleset struct {
|
||||
SMTPMailFromRegexp string `sconf:"optional" sconf-doc:"Matches if this regular expression matches (a substring of) the SMTP MAIL FROM address (not the message From-header). E.g. user@example.org."`
|
||||
SMTPMailFromRegexp string `sconf:"optional" sconf-doc:"Matches if this regular expression matches (a substring of) the SMTP MAIL FROM address (not the message From-header). E.g. '^user@example\\.org$'."`
|
||||
MsgFromRegexp string `sconf:"optional" sconf-doc:"Matches if this regular expression matches (a substring of) the single address in the message From header."`
|
||||
VerifiedDomain string `sconf:"optional" sconf-doc:"Matches if this domain matches an SPF- and/or DKIM-verified (sub)domain."`
|
||||
HeadersRegexp map[string]string `sconf:"optional" sconf-doc:"Matches if these header field/value regular expressions all match (substrings of) the message headers. Header fields and valuees are converted to lower case before matching. Whitespace is trimmed from the value before matching. A header field can occur multiple times in a message, only one instance has to match. For mailing lists, you could match on ^list-id$ with the value typically the mailing list address in angled brackets with @ replaced with a dot, e.g. <name\\.lists\\.example\\.org>."`
|
||||
// todo: add a SMTPRcptTo check, and MessageFrom that works on a properly parsed From header.
|
||||
// todo: add a SMTPRcptTo check
|
||||
|
||||
ListAllowDomain string `sconf:"optional" sconf-doc:"Influence the spam filtering, this does not change whether this ruleset applies to a message. If this domain matches an SPF- and/or DKIM-verified (sub)domain, the message is accepted without further spam checks, such as a junk filter or DMARC reject evaluation. DMARC rejects should not apply for mailing lists that are not configured to rewrite the From-header of messages that don't have a passing DKIM signature of the From-domain. Otherwise, by rejecting messages, you may be automatically unsubscribed from the mailing list. The assumption is that mailing lists do their own spam filtering/moderation."`
|
||||
// todo: once we implement ARC, we can use dkim domains that we cannot verify but that the arc-verified forwarding mail server was able to verify.
|
||||
IsForward bool `sconf:"optional" sconf-doc:"Influences spam filtering only, this option does not change whether a message matches this ruleset. Can only be used together with SMTPMailFromRegexp and VerifiedDomain. SMTPMailFromRegexp must be set to the address used to deliver the forwarded message, e.g. '^user(|\\+.*)@forward\\.example$'. Changes to junk analysis: 1. Messages are not rejected for failing a DMARC policy, because a legitimate forwarded message without valid/intact/aligned DKIM signature would be rejected because any verified SPF domain will be 'unaligned', of the forwarding mail server. 2. The sending mail server IP address, and sending EHLO and MAIL FROM domains and matching DKIM domain aren't used in future reputation-based spam classifications (but other verified DKIM domains are) because the forwarding server is not a useful spam signal for future messages."`
|
||||
ListAllowDomain string `sconf:"optional" sconf-doc:"Influences spam filtering only, this option does not change whether a message matches this ruleset. If this domain matches an SPF- and/or DKIM-verified (sub)domain, the message is accepted without further spam checks, such as a junk filter or DMARC reject evaluation. DMARC rejects should not apply for mailing lists that are not configured to rewrite the From-header of messages that don't have a passing DKIM signature of the From-domain. Otherwise, by rejecting messages, you may be automatically unsubscribed from the mailing list. The assumption is that mailing lists do their own spam filtering/moderation."`
|
||||
AcceptRejectsToMailbox string `sconf:"optional" sconf-doc:"Influences spam filtering only, this option does not change whether a message matches this ruleset. If a message is classified as spam, it isn't rejected during the SMTP transaction (the normal behaviour), but accepted during the SMTP transaction and delivered to the specified mailbox. The specified mailbox is not automatically cleaned up like the account global Rejects mailbox, unless set to that Rejects mailbox."`
|
||||
|
||||
Mailbox string `sconf-doc:"Mailbox to deliver to if this ruleset matches."`
|
||||
Comment string `sconf:"optional" sconf-doc:"Free-form comments."`
|
||||
|
||||
SMTPMailFromRegexpCompiled *regexp.Regexp `sconf:"-" json:"-"`
|
||||
MsgFromRegexpCompiled *regexp.Regexp `sconf:"-" json:"-"`
|
||||
VerifiedDNSDomain dns.Domain `sconf:"-"`
|
||||
HeadersRegexpCompiled [][2]*regexp.Regexp `sconf:"-" json:"-"`
|
||||
ListAllowDNSDomain dns.Domain `sconf:"-"`
|
||||
@ -289,7 +525,7 @@ type Ruleset struct {
|
||||
|
||||
// Equal returns whether r and o are equal, only looking at their user-changeable fields.
|
||||
func (r Ruleset) Equal(o Ruleset) bool {
|
||||
if r.SMTPMailFromRegexp != o.SMTPMailFromRegexp || r.VerifiedDomain != o.VerifiedDomain || r.ListAllowDomain != o.ListAllowDomain || r.Mailbox != o.Mailbox {
|
||||
if r.SMTPMailFromRegexp != o.SMTPMailFromRegexp || r.MsgFromRegexp != o.MsgFromRegexp || r.VerifiedDomain != o.VerifiedDomain || r.IsForward != o.IsForward || r.ListAllowDomain != o.ListAllowDomain || r.AcceptRejectsToMailbox != o.AcceptRejectsToMailbox || r.Mailbox != o.Mailbox || r.Comment != o.Comment {
|
||||
return false
|
||||
}
|
||||
if !reflect.DeepEqual(r.HeadersRegexp, o.HeadersRegexp) {
|
||||
@ -304,22 +540,31 @@ type KeyCert struct {
|
||||
}
|
||||
|
||||
type TLS struct {
|
||||
ACME string `sconf:"optional" sconf-doc:"Name of provider from top-level configuration to use for ACME, e.g. letsencrypt."`
|
||||
KeyCerts []KeyCert `sconf:"optional"`
|
||||
MinVersion string `sconf:"optional" sconf-doc:"Minimum TLS version. Default: TLSv1.2."`
|
||||
ACME string `sconf:"optional" sconf-doc:"Name of provider from top-level configuration to use for ACME, e.g. letsencrypt."`
|
||||
KeyCerts []KeyCert `sconf:"optional" sconf-doc:"Keys and certificates to use for this listener. The files are opened by the privileged root process and passed to the unprivileged mox process, so no special permissions are required on the files. If the private key will not be replaced when refreshing certificates, also consider adding the private key to HostPrivateKeyFiles and configuring DANE TLSA DNS records."`
|
||||
MinVersion string `sconf:"optional" sconf-doc:"Minimum TLS version. Default: TLSv1.2."`
|
||||
HostPrivateKeyFiles []string `sconf:"optional" sconf-doc:"Private keys used for ACME certificates. Specified explicitly so DANE TLSA DNS records can be generated, even before the certificates are requested. DANE is a mechanism to authenticate remote TLS certificates based on a public key or certificate specified in DNS, protected with DNSSEC. DANE is opportunistic and attempted when delivering SMTP with STARTTLS. The private key files must be in PEM format. PKCS8 is recommended, but PKCS1 and EC private keys are recognized as well. Only RSA 2048 bit and ECDSA P-256 keys are currently used. The first of each is used when requesting new certificates through ACME."`
|
||||
ClientAuthDisabled bool `sconf:"optional" sconf-doc:"Disable TLS client authentication with certificates/keys, preventing the TLS server from requesting a TLS certificate from clients. Useful for working around clients that don't handle TLS client authentication well."`
|
||||
|
||||
Config *tls.Config `sconf:"-" json:"-"` // TLS config for non-ACME-verification connections, i.e. SMTP and IMAP, and not port 443.
|
||||
ACMEConfig *tls.Config `sconf:"-" json:"-"` // TLS config that handles ACME verification, for serving on port 443.
|
||||
Config *tls.Config `sconf:"-" json:"-"` // TLS config for non-ACME-verification connections, i.e. SMTP and IMAP, and not port 443. Connections without SNI will use a certificate for the hostname of the listener, connections with an SNI hostname that isn't allowed will be rejected.
|
||||
ConfigFallback *tls.Config `sconf:"-" json:"-"` // Like Config, but uses the certificate for the listener hostname when the requested SNI hostname is not allowed, instead of causing the connection to fail.
|
||||
ACMEConfig *tls.Config `sconf:"-" json:"-"` // TLS config that handles ACME verification, for serving on port 443.
|
||||
HostPrivateRSA2048Keys []crypto.Signer `sconf:"-" json:"-"` // Private keys for new TLS certificates for listener host name, for new certificates with ACME, and for DANE records.
|
||||
HostPrivateECDSAP256Keys []crypto.Signer `sconf:"-" json:"-"`
|
||||
}
|
||||
|
||||
// todo: we could implement matching WebHandler.Domain as IPs too
|
||||
|
||||
type WebHandler struct {
|
||||
LogName string `sconf:"optional" sconf-doc:"Name to use in logging and metrics."`
|
||||
Domain string `sconf-doc:"Both Domain and PathRegexp must match for this WebHandler to match a request. Exactly one of WebStatic, WebRedirect, WebForward must be set."`
|
||||
Domain string `sconf-doc:"Both Domain and PathRegexp must match for this WebHandler to match a request. Exactly one of WebStatic, WebRedirect, WebForward, WebInternal must be set."`
|
||||
PathRegexp string `sconf-doc:"Regular expression matched against request path, must always start with ^ to ensure matching from the start of the path. The matching prefix can optionally be stripped by WebForward. The regular expression does not have to end with $."`
|
||||
DontRedirectPlainHTTP bool `sconf:"optional" sconf-doc:"If set, plain HTTP requests are not automatically permanently redirected (308) to HTTPS. If you don't have a HTTPS webserver configured, set this to true."`
|
||||
Compress bool `sconf:"optional" sconf-doc:"Transparently compress responses (currently with gzip) if the client supports it, the status is 200 OK, no Content-Encoding is set on the response yet and the Content-Type of the response hints that the data is compressible (text/..., specific application/... and .../...+json and .../...+xml). For static files only, a cache with compressed files is kept."`
|
||||
WebStatic *WebStatic `sconf:"optional" sconf-doc:"Serve static files."`
|
||||
WebRedirect *WebRedirect `sconf:"optional" sconf-doc:"Redirect requests to configured URL."`
|
||||
WebForward *WebForward `sconf:"optional" sconf-doc:"Forward requests to another webserver, i.e. reverse proxy."`
|
||||
WebInternal *WebInternal `sconf:"optional" sconf-doc:"Pass request to internal service, like webmail, webapi, etc."`
|
||||
|
||||
Name string `sconf:"-"` // Either LogName, or numeric index if LogName was empty. Used instead of LogName in logging/metrics.
|
||||
DNSDomain dns.Domain `sconf:"-"`
|
||||
@ -335,6 +580,7 @@ func (wh WebHandler) Equal(o WebHandler) bool {
|
||||
x.WebStatic = nil
|
||||
x.WebRedirect = nil
|
||||
x.WebForward = nil
|
||||
x.WebInternal = nil
|
||||
return x
|
||||
}
|
||||
cwh := clean(wh)
|
||||
@ -342,7 +588,7 @@ func (wh WebHandler) Equal(o WebHandler) bool {
|
||||
if cwh != co {
|
||||
return false
|
||||
}
|
||||
if (wh.WebStatic == nil) != (o.WebStatic == nil) || (wh.WebRedirect == nil) != (o.WebRedirect == nil) || (wh.WebForward == nil) != (o.WebForward == nil) {
|
||||
if (wh.WebStatic == nil) != (o.WebStatic == nil) || (wh.WebRedirect == nil) != (o.WebRedirect == nil) || (wh.WebForward == nil) != (o.WebForward == nil) || (wh.WebInternal == nil) != (o.WebInternal == nil) {
|
||||
return false
|
||||
}
|
||||
if wh.WebStatic != nil {
|
||||
@ -354,6 +600,9 @@ func (wh WebHandler) Equal(o WebHandler) bool {
|
||||
if wh.WebForward != nil {
|
||||
return wh.WebForward.equal(*o.WebForward)
|
||||
}
|
||||
if wh.WebInternal != nil {
|
||||
return wh.WebInternal.equal(*o.WebInternal)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
@ -366,7 +615,7 @@ type WebStatic struct {
|
||||
}
|
||||
|
||||
type WebRedirect struct {
|
||||
BaseURL string `sconf:"optional" sconf-doc:"Base URL to redirect to. The path must be empty and will be replaced, either by the request URL path, or by OrigPathRegexp/ReplacePath. Scheme, host, port and fragment stay intact, and query strings are combined. If empty, the response redirects to a different path through OrigPathRegexp and ReplacePath, which must then be set. Use a URL without scheme to redirect without changing the protocol, e.g. //newdomain/."`
|
||||
BaseURL string `sconf:"optional" sconf-doc:"Base URL to redirect to. The path must be empty and will be replaced, either by the request URL path, or by OrigPathRegexp/ReplacePath. Scheme, host, port and fragment stay intact, and query strings are combined. If empty, the response redirects to a different path through OrigPathRegexp and ReplacePath, which must then be set. Use a URL without scheme to redirect without changing the protocol, e.g. //newdomain/. If a redirect would send a request to a URL with the same scheme, host and path, the WebRedirect does not match so a next WebHandler can be tried. This can be used to redirect all plain http traffic to https."`
|
||||
OrigPathRegexp string `sconf:"optional" sconf-doc:"Regular expression for matching path. If set and path does not match, a 404 is returned. The HTTP path used for matching always starts with a slash."`
|
||||
ReplacePath string `sconf:"optional" sconf-doc:"Replacement path for destination URL based on OrigPathRegexp. Implemented with Go's Regexp.ReplaceAllString: $1 is replaced with the text of the first submatch, etc. If both OrigPathRegexp and ReplacePath are empty, BaseURL must be set and all paths are redirected unaltered."`
|
||||
StatusCode int `sconf:"optional" sconf-doc:"Status code to use in redirect, e.g. 307. By default, a permanent redirect (308) is returned."`
|
||||
@ -385,7 +634,7 @@ func (wr WebRedirect) equal(o WebRedirect) bool {
|
||||
|
||||
type WebForward struct {
|
||||
StripPath bool `sconf:"optional" sconf-doc:"Strip the matching WebHandler path from the WebHandler before forwarding the request."`
|
||||
URL string `sconf-doc:"URL to forward HTTP requests to, e.g. http://127.0.0.1:8123/base. If StripPath is false the full request path is added to the URL. Host headers are sent unmodified. New X-Forwarded-{For,Host,Proto} headers are set. Any query string in the URL is ignored. Requests are made using Go's net/http.DefaultTransport that takes environment variables HTTP_PROXY and HTTPS_PROXY into account."`
|
||||
URL string `sconf-doc:"URL to forward HTTP requests to, e.g. http://127.0.0.1:8123/base. If StripPath is false the full request path is added to the URL. Host headers are sent unmodified. New X-Forwarded-{For,Host,Proto} headers are set. Any query string in the URL is ignored. Requests are made using Go's net/http.DefaultTransport that takes environment variables HTTP_PROXY and HTTPS_PROXY into account. Websocket connections are forwarded and data is copied between client and backend without looking at the framing. The websocket 'version' and 'key'/'accept' headers are verified during the handshake, but other websocket headers, including 'origin', 'protocol' and 'extensions' headers, are not inspected and the backend is responsible for verifying/interpreting them."`
|
||||
ResponseHeaders map[string]string `sconf:"optional" sconf-doc:"Headers to add to the response. Useful for adding security- and cache-related headers."`
|
||||
|
||||
TargetURL *url.URL `sconf:"-" json:"-"`
|
||||
@ -396,3 +645,16 @@ func (wf WebForward) equal(o WebForward) bool {
|
||||
o.TargetURL = nil
|
||||
return reflect.DeepEqual(wf, o)
|
||||
}
|
||||
|
||||
type WebInternal struct {
|
||||
BasePath string `sconf-doc:"Path to use as root of internal service, e.g. /webmail/."`
|
||||
Service string `sconf-doc:"Name of the service, values: admin, account, webmail, webapi."`
|
||||
|
||||
Handler http.Handler `sconf:"-" json:"-"`
|
||||
}
|
||||
|
||||
func (wi WebInternal) equal(o WebInternal) bool {
|
||||
wi.Handler = nil
|
||||
o.Handler = nil
|
||||
return reflect.DeepEqual(wi, o)
|
||||
}
|
||||
|
1044
config/doc.go
1044
config/doc.go
File diff suppressed because it is too large
Load Diff
558
ctl_test.go
Normal file
558
ctl_test.go
Normal file
@ -0,0 +1,558 @@
|
||||
//go:build !integration
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/ed25519"
|
||||
cryptorand "crypto/rand"
|
||||
"crypto/x509"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"math/big"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/mjl-/mox/config"
|
||||
"github.com/mjl-/mox/dmarcdb"
|
||||
"github.com/mjl-/mox/dns"
|
||||
"github.com/mjl-/mox/imapclient"
|
||||
"github.com/mjl-/mox/mlog"
|
||||
"github.com/mjl-/mox/mox-"
|
||||
"github.com/mjl-/mox/mtastsdb"
|
||||
"github.com/mjl-/mox/queue"
|
||||
"github.com/mjl-/mox/smtp"
|
||||
"github.com/mjl-/mox/store"
|
||||
"github.com/mjl-/mox/tlsrptdb"
|
||||
)
|
||||
|
||||
var ctxbg = context.Background()
|
||||
var pkglog = mlog.New("ctl", nil)
|
||||
|
||||
func tcheck(t *testing.T, err error, errmsg string) {
|
||||
if err != nil {
|
||||
t.Helper()
|
||||
t.Fatalf("%s: %v", errmsg, err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestCtl executes commands through ctl. This tests at least the protocols (who
|
||||
// sends when/what) is tested. We often don't check the actual results, but
|
||||
// unhandled errors would cause a panic.
|
||||
func TestCtl(t *testing.T) {
|
||||
os.RemoveAll("testdata/ctl/data")
|
||||
mox.ConfigStaticPath = filepath.FromSlash("testdata/ctl/config/mox.conf")
|
||||
mox.ConfigDynamicPath = filepath.FromSlash("testdata/ctl/config/domains.conf")
|
||||
if errs := mox.LoadConfig(ctxbg, pkglog, true, false); len(errs) > 0 {
|
||||
t.Fatalf("loading mox config: %v", errs)
|
||||
}
|
||||
err := store.Init(ctxbg)
|
||||
tcheck(t, err, "store init")
|
||||
defer store.Close()
|
||||
defer store.Switchboard()()
|
||||
|
||||
err = queue.Init()
|
||||
tcheck(t, err, "queue init")
|
||||
defer queue.Shutdown()
|
||||
|
||||
var cid int64
|
||||
|
||||
testctl := func(fn func(clientxctl *ctl)) {
|
||||
t.Helper()
|
||||
|
||||
cconn, sconn := net.Pipe()
|
||||
clientxctl := ctl{conn: cconn, log: pkglog}
|
||||
serverxctl := ctl{conn: sconn, log: pkglog}
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
cid++
|
||||
servectlcmd(ctxbg, &serverxctl, cid, func() {})
|
||||
close(done)
|
||||
}()
|
||||
fn(&clientxctl)
|
||||
cconn.Close()
|
||||
<-done
|
||||
sconn.Close()
|
||||
}
|
||||
|
||||
// "deliver"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdDeliver(xctl, "mjl@mox.example")
|
||||
})
|
||||
|
||||
// "setaccountpassword"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdSetaccountpassword(xctl, "mjl", "test4321")
|
||||
})
|
||||
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueHoldrulesList(xctl)
|
||||
})
|
||||
|
||||
// All messages.
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueHoldrulesAdd(xctl, "", "", "")
|
||||
})
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueHoldrulesAdd(xctl, "mjl", "", "")
|
||||
})
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueHoldrulesAdd(xctl, "", "☺.mox.example", "")
|
||||
})
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueHoldrulesAdd(xctl, "mox", "☺.mox.example", "example.com")
|
||||
})
|
||||
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueHoldrulesRemove(xctl, 1)
|
||||
})
|
||||
|
||||
// Queue a message to list/change/dump.
|
||||
msg := "Subject: subject\r\n\r\nbody\r\n"
|
||||
msgFile, err := store.CreateMessageTemp(pkglog, "queuedump-test")
|
||||
tcheck(t, err, "temp file")
|
||||
_, err = msgFile.Write([]byte(msg))
|
||||
tcheck(t, err, "write message")
|
||||
_, err = msgFile.Seek(0, 0)
|
||||
tcheck(t, err, "rewind message")
|
||||
defer os.Remove(msgFile.Name())
|
||||
defer msgFile.Close()
|
||||
addr, err := smtp.ParseAddress("mjl@mox.example")
|
||||
tcheck(t, err, "parse address")
|
||||
qml := []queue.Msg{queue.MakeMsg(addr.Path(), addr.Path(), false, false, int64(len(msg)), "<random@localhost>", nil, nil, time.Now(), "subject")}
|
||||
queue.Add(ctxbg, pkglog, "mjl", msgFile, qml...)
|
||||
qmid := qml[0].ID
|
||||
|
||||
// Has entries now.
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueHoldrulesList(xctl)
|
||||
})
|
||||
|
||||
// "queuelist"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueList(xctl, queue.Filter{}, queue.Sort{})
|
||||
})
|
||||
|
||||
// "queueholdset"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueHoldSet(xctl, queue.Filter{}, true)
|
||||
})
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueHoldSet(xctl, queue.Filter{}, false)
|
||||
})
|
||||
|
||||
// "queueschedule"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueSchedule(xctl, queue.Filter{}, true, time.Minute)
|
||||
})
|
||||
|
||||
// "queuetransport"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueTransport(xctl, queue.Filter{}, "socks")
|
||||
})
|
||||
|
||||
// "queuerequiretls"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueRequireTLS(xctl, queue.Filter{}, nil)
|
||||
})
|
||||
|
||||
// "queuedump"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueDump(xctl, fmt.Sprintf("%d", qmid))
|
||||
})
|
||||
|
||||
// "queuefail"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueFail(xctl, queue.Filter{})
|
||||
})
|
||||
|
||||
// "queuedrop"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueDrop(xctl, queue.Filter{})
|
||||
})
|
||||
|
||||
// "queueholdruleslist"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueHoldrulesList(xctl)
|
||||
})
|
||||
|
||||
// "queueholdrulesadd"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueHoldrulesAdd(xctl, "mjl", "", "")
|
||||
})
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueHoldrulesAdd(xctl, "mjl", "localhost", "")
|
||||
})
|
||||
|
||||
// "queueholdrulesremove"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueHoldrulesRemove(xctl, 2)
|
||||
})
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueHoldrulesList(xctl)
|
||||
})
|
||||
|
||||
// "queuesuppresslist"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueSuppressList(xctl, "mjl")
|
||||
})
|
||||
|
||||
// "queuesuppressadd"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueSuppressAdd(xctl, "mjl", "base@localhost")
|
||||
})
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueSuppressAdd(xctl, "mjl", "other@localhost")
|
||||
})
|
||||
|
||||
// "queuesuppresslookup"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueSuppressLookup(xctl, "mjl", "base@localhost")
|
||||
})
|
||||
|
||||
// "queuesuppressremove"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueSuppressRemove(xctl, "mjl", "base@localhost")
|
||||
})
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueSuppressList(xctl, "mjl")
|
||||
})
|
||||
|
||||
// "queueretiredlist"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueRetiredList(xctl, queue.RetiredFilter{}, queue.RetiredSort{})
|
||||
})
|
||||
|
||||
// "queueretiredprint"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueRetiredPrint(xctl, "1")
|
||||
})
|
||||
|
||||
// "queuehooklist"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueHookList(xctl, queue.HookFilter{}, queue.HookSort{})
|
||||
})
|
||||
|
||||
// "queuehookschedule"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueHookSchedule(xctl, queue.HookFilter{}, true, time.Minute)
|
||||
})
|
||||
|
||||
// "queuehookprint"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueHookPrint(xctl, "1")
|
||||
})
|
||||
|
||||
// "queuehookcancel"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueHookCancel(xctl, queue.HookFilter{})
|
||||
})
|
||||
|
||||
// "queuehookretiredlist"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueHookRetiredList(xctl, queue.HookRetiredFilter{}, queue.HookRetiredSort{})
|
||||
})
|
||||
|
||||
// "queuehookretiredprint"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueHookRetiredPrint(xctl, "1")
|
||||
})
|
||||
|
||||
// "importmbox"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdImport(xctl, true, "mjl", "inbox", "testdata/importtest.mbox")
|
||||
})
|
||||
|
||||
// "importmaildir"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdImport(xctl, false, "mjl", "inbox", "testdata/importtest.maildir")
|
||||
})
|
||||
|
||||
// "domainadd"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdConfigDomainAdd(xctl, false, dns.Domain{ASCII: "mox2.example"}, "mjl", "")
|
||||
})
|
||||
|
||||
// "accountadd"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdConfigAccountAdd(xctl, "mjl2", "mjl2@mox2.example")
|
||||
})
|
||||
|
||||
// "addressadd"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdConfigAddressAdd(xctl, "mjl3@mox2.example", "mjl2")
|
||||
})
|
||||
|
||||
// Add a message.
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdDeliver(xctl, "mjl3@mox2.example")
|
||||
})
|
||||
// "retrain", retrain junk filter.
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdRetrain(xctl, "mjl2")
|
||||
})
|
||||
|
||||
// "addressrm"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdConfigAddressRemove(xctl, "mjl3@mox2.example")
|
||||
})
|
||||
|
||||
// "accountdisabled"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdConfigAccountDisabled(xctl, "mjl2", "testing")
|
||||
})
|
||||
|
||||
// "accountlist"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdConfigAccountList(xctl)
|
||||
})
|
||||
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdConfigAccountDisabled(xctl, "mjl2", "")
|
||||
})
|
||||
|
||||
// "accountrm"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdConfigAccountRemove(xctl, "mjl2")
|
||||
})
|
||||
|
||||
// "domaindisabled"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdConfigDomainDisabled(xctl, dns.Domain{ASCII: "mox2.example"}, true)
|
||||
})
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdConfigDomainDisabled(xctl, dns.Domain{ASCII: "mox2.example"}, false)
|
||||
})
|
||||
|
||||
// "domainrm"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdConfigDomainRemove(xctl, dns.Domain{ASCII: "mox2.example"})
|
||||
})
|
||||
|
||||
// "aliasadd"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdConfigAliasAdd(xctl, "support@mox.example", config.Alias{Addresses: []string{"mjl@mox.example"}})
|
||||
})
|
||||
|
||||
// "aliaslist"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdConfigAliasList(xctl, "mox.example")
|
||||
})
|
||||
|
||||
// "aliasprint"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdConfigAliasPrint(xctl, "support@mox.example")
|
||||
})
|
||||
|
||||
// "aliasupdate"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdConfigAliasUpdate(xctl, "support@mox.example", "true", "true", "true")
|
||||
})
|
||||
|
||||
// "aliasaddaddr"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdConfigAliasAddaddr(xctl, "support@mox.example", []string{"mjl2@mox.example"})
|
||||
})
|
||||
|
||||
// "aliasrmaddr"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdConfigAliasRmaddr(xctl, "support@mox.example", []string{"mjl2@mox.example"})
|
||||
})
|
||||
|
||||
// "aliasrm"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdConfigAliasRemove(xctl, "support@mox.example")
|
||||
})
|
||||
|
||||
// accounttlspubkeyadd
|
||||
certDER := fakeCert(t)
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdConfigTlspubkeyAdd(xctl, "mjl@mox.example", "testkey", false, certDER)
|
||||
})
|
||||
|
||||
// "accounttlspubkeylist"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdConfigTlspubkeyList(xctl, "")
|
||||
})
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdConfigTlspubkeyList(xctl, "mjl")
|
||||
})
|
||||
|
||||
tpkl, err := store.TLSPublicKeyList(ctxbg, "")
|
||||
tcheck(t, err, "list tls public keys")
|
||||
if len(tpkl) != 1 {
|
||||
t.Fatalf("got %d tls public keys, expected 1", len(tpkl))
|
||||
}
|
||||
fingerprint := tpkl[0].Fingerprint
|
||||
|
||||
// "accounttlspubkeyget"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdConfigTlspubkeyGet(xctl, fingerprint)
|
||||
})
|
||||
|
||||
// "accounttlspubkeyrm"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdConfigTlspubkeyRemove(xctl, fingerprint)
|
||||
})
|
||||
|
||||
tpkl, err = store.TLSPublicKeyList(ctxbg, "")
|
||||
tcheck(t, err, "list tls public keys")
|
||||
if len(tpkl) != 0 {
|
||||
t.Fatalf("got %d tls public keys, expected 0", len(tpkl))
|
||||
}
|
||||
|
||||
// "loglevels"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdLoglevels(xctl)
|
||||
})
|
||||
|
||||
// "setloglevels"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdSetLoglevels(xctl, "", "debug")
|
||||
})
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdSetLoglevels(xctl, "smtpserver", "debug")
|
||||
})
|
||||
|
||||
// Export data, import it again
|
||||
xcmdExport(true, false, []string{filepath.FromSlash("testdata/ctl/data/tmp/export/mbox/"), filepath.FromSlash("testdata/ctl/data/accounts/mjl")}, &cmd{log: pkglog})
|
||||
xcmdExport(false, false, []string{filepath.FromSlash("testdata/ctl/data/tmp/export/maildir/"), filepath.FromSlash("testdata/ctl/data/accounts/mjl")}, &cmd{log: pkglog})
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdImport(xctl, true, "mjl", "inbox", filepath.FromSlash("testdata/ctl/data/tmp/export/mbox/Inbox.mbox"))
|
||||
})
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdImport(xctl, false, "mjl", "inbox", filepath.FromSlash("testdata/ctl/data/tmp/export/maildir/Inbox"))
|
||||
})
|
||||
|
||||
// "recalculatemailboxcounts"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdRecalculateMailboxCounts(xctl, "mjl")
|
||||
})
|
||||
|
||||
// "fixmsgsize"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdFixmsgsize(xctl, "mjl")
|
||||
})
|
||||
testctl(func(xctl *ctl) {
|
||||
acc, err := store.OpenAccount(xctl.log, "mjl", false)
|
||||
tcheck(t, err, "open account")
|
||||
defer func() {
|
||||
acc.Close()
|
||||
acc.WaitClosed()
|
||||
}()
|
||||
|
||||
content := []byte("Subject: hi\r\n\r\nbody\r\n")
|
||||
|
||||
deliver := func(m *store.Message) {
|
||||
t.Helper()
|
||||
m.Size = int64(len(content))
|
||||
msgf, err := store.CreateMessageTemp(xctl.log, "ctltest")
|
||||
tcheck(t, err, "create temp file")
|
||||
defer os.Remove(msgf.Name())
|
||||
defer msgf.Close()
|
||||
_, err = msgf.Write(content)
|
||||
tcheck(t, err, "write message file")
|
||||
|
||||
acc.WithWLock(func() {
|
||||
err = acc.DeliverMailbox(xctl.log, "Inbox", m, msgf)
|
||||
tcheck(t, err, "deliver message")
|
||||
})
|
||||
}
|
||||
|
||||
var msgBadSize store.Message
|
||||
deliver(&msgBadSize)
|
||||
|
||||
msgBadSize.Size = 1
|
||||
err = acc.DB.Update(ctxbg, &msgBadSize)
|
||||
tcheck(t, err, "update message to bad size")
|
||||
mb := store.Mailbox{ID: msgBadSize.MailboxID}
|
||||
err = acc.DB.Get(ctxbg, &mb)
|
||||
tcheck(t, err, "get db")
|
||||
mb.Size -= int64(len(content))
|
||||
mb.Size += 1
|
||||
err = acc.DB.Update(ctxbg, &mb)
|
||||
tcheck(t, err, "update mailbox size")
|
||||
|
||||
// Fix up the size.
|
||||
ctlcmdFixmsgsize(xctl, "")
|
||||
|
||||
err = acc.DB.Get(ctxbg, &msgBadSize)
|
||||
tcheck(t, err, "get message")
|
||||
if msgBadSize.Size != int64(len(content)) {
|
||||
t.Fatalf("after fixing, message size is %d, should be %d", msgBadSize.Size, len(content))
|
||||
}
|
||||
})
|
||||
|
||||
// "reparse"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdReparse(xctl, "mjl")
|
||||
})
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdReparse(xctl, "")
|
||||
})
|
||||
|
||||
// "reassignthreads"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdReassignthreads(xctl, "mjl")
|
||||
})
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdReassignthreads(xctl, "")
|
||||
})
|
||||
|
||||
// "backup", backup account.
|
||||
err = dmarcdb.Init()
|
||||
tcheck(t, err, "dmarcdb init")
|
||||
defer dmarcdb.Close()
|
||||
err = mtastsdb.Init(false)
|
||||
tcheck(t, err, "mtastsdb init")
|
||||
defer mtastsdb.Close()
|
||||
err = tlsrptdb.Init()
|
||||
tcheck(t, err, "tlsrptdb init")
|
||||
defer tlsrptdb.Close()
|
||||
testctl(func(xctl *ctl) {
|
||||
os.RemoveAll("testdata/ctl/data/tmp/backup")
|
||||
err := os.WriteFile("testdata/ctl/data/receivedid.key", make([]byte, 16), 0600)
|
||||
tcheck(t, err, "writing receivedid.key")
|
||||
ctlcmdBackup(xctl, filepath.FromSlash("testdata/ctl/data/tmp/backup"), false)
|
||||
})
|
||||
|
||||
// Verify the backup.
|
||||
xcmd := cmd{
|
||||
flag: flag.NewFlagSet("", flag.ExitOnError),
|
||||
flagArgs: []string{filepath.FromSlash("testdata/ctl/data/tmp/backup/data")},
|
||||
}
|
||||
cmdVerifydata(&xcmd)
|
||||
|
||||
// IMAP connection.
|
||||
testctl(func(xctl *ctl) {
|
||||
a, b := net.Pipe()
|
||||
go func() {
|
||||
opts := imapclient.Opts{
|
||||
Logger: slog.Default().With("cid", mox.Cid()),
|
||||
Error: func(err error) { panic(err) },
|
||||
}
|
||||
client, err := imapclient.New(a, &opts)
|
||||
tcheck(t, err, "new imapclient")
|
||||
client.Select("inbox")
|
||||
client.Logout()
|
||||
defer a.Close()
|
||||
}()
|
||||
ctlcmdIMAPServe(xctl, "mjl@mox.example", b, b)
|
||||
})
|
||||
}
|
||||
|
||||
func fakeCert(t *testing.T) []byte {
|
||||
t.Helper()
|
||||
seed := make([]byte, ed25519.SeedSize)
|
||||
privKey := ed25519.NewKeyFromSeed(seed) // Fake key, don't use this for real!
|
||||
template := &x509.Certificate{
|
||||
SerialNumber: big.NewInt(1), // Required field...
|
||||
}
|
||||
localCertBuf, err := x509.CreateCertificate(cryptorand.Reader, template, template, privKey.Public(), privKey)
|
||||
tcheck(t, err, "making certificate")
|
||||
return localCertBuf
|
||||
}
|
14
curves.go
Normal file
14
curves.go
Normal file
@ -0,0 +1,14 @@
|
||||
//go:build !go1.24
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
)
|
||||
|
||||
var curvesList = []tls.CurveID{
|
||||
tls.CurveP256,
|
||||
tls.CurveP384,
|
||||
tls.CurveP521,
|
||||
tls.X25519,
|
||||
}
|
15
curves_go124.go
Normal file
15
curves_go124.go
Normal file
@ -0,0 +1,15 @@
|
||||
//go:build go1.24
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
)
|
||||
|
||||
var curvesList = []tls.CurveID{
|
||||
tls.CurveP256,
|
||||
tls.CurveP384,
|
||||
tls.CurveP521,
|
||||
tls.X25519,
|
||||
tls.X25519MLKEM768,
|
||||
}
|
516
dane/dane.go
Normal file
516
dane/dane.go
Normal file
@ -0,0 +1,516 @@
|
||||
// Package dane verifies TLS certificates through DNSSEC-verified TLSA records.
|
||||
//
|
||||
// On the internet, TLS certificates are commonly verified by checking if they are
|
||||
// signed by one of many commonly trusted Certificate Authorities (CAs). This is
|
||||
// PKIX or WebPKI. With DANE, TLS certificates are verified through
|
||||
// DNSSEC-protected DNS records of type TLSA. These TLSA records specify the rules
|
||||
// for verification ("usage") and whether a full certificate ("selector" cert) is
|
||||
// checked or only its "subject public key info" ("selector" spki). The (hash of)
|
||||
// the certificate or "spki" is included in the TLSA record ("matchtype").
|
||||
//
|
||||
// DANE SMTP connections have two allowed "usages" (verification rules):
|
||||
// - DANE-EE, which only checks if the certificate or spki match, without the
|
||||
// WebPKI verification of expiration, name or signed-by-trusted-party verification.
|
||||
// - DANE-TA, which does verification similar to PKIX/WebPKI, but verifies against
|
||||
// a certificate authority ("trust anchor", or "TA") specified in the TLSA record
|
||||
// instead of the CA pool.
|
||||
//
|
||||
// DANE has two more "usages", that may be used with protocols other than SMTP:
|
||||
// - PKIX-EE, which matches the certificate or spki, and also verifies the
|
||||
// certificate against the CA pool.
|
||||
// - PKIX-TA, which verifies the certificate or spki against a "trust anchor"
|
||||
// specified in the TLSA record, that also has to be trusted by the CA pool.
|
||||
//
|
||||
// TLSA records are looked up for a specific port number, protocol (tcp/udp) and
|
||||
// host name. Each port can have different TLSA records. TLSA records must be
|
||||
// signed and verified with DNSSEC before they can be trusted and used.
|
||||
//
|
||||
// TLSA records are looked up under "TLSA candidate base domains". The domain
|
||||
// where the TLSA records are found is the "TLSA base domain". If the host to
|
||||
// connect to is a CNAME that can be followed with DNSSEC protection, it is the
|
||||
// first TLSA candidate base domain. If no protected records are found, the
|
||||
// original host name is the second TLSA candidate base domain.
|
||||
//
|
||||
// For TLS connections, the TLSA base domain is used with SNI during the
|
||||
// handshake.
|
||||
//
|
||||
// For TLS certificate verification that requires PKIX/WebPKI/trusted-anchor
|
||||
// verification (all except DANE-EE), the potential second TLSA candidate base
|
||||
// domain name is also a valid hostname. With SMTP, additionally for hosts found in
|
||||
// MX records for a "next-hop domain", the "original next-hop domain" (domain of an
|
||||
// email address to deliver to) is also a valid name, as is the "CNAME-expanded
|
||||
// original next-hop domain", bringing the potential total allowed names to four
|
||||
// (if CNAMEs are followed for the MX hosts).
|
||||
package dane
|
||||
|
||||
// todo: why is https://datatracker.ietf.org/doc/html/draft-barnes-dane-uks-00 not in use? sounds reasonable.
|
||||
// todo: add a DialSRV function that accepts a domain name, looks up srv records, dials the service, verifies dane certificate and returns the connection. for ../rfc/7673
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"crypto/sha512"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/mjl-/adns"
|
||||
|
||||
"github.com/mjl-/mox/dns"
|
||||
"github.com/mjl-/mox/mlog"
|
||||
"github.com/mjl-/mox/stub"
|
||||
"slices"
|
||||
)
|
||||
|
||||
var (
|
||||
MetricVerify stub.Counter = stub.CounterIgnore{}
|
||||
MetricVerifyErrors stub.Counter = stub.CounterIgnore{}
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrNoRecords means no TLSA records were found and host has not opted into DANE.
|
||||
ErrNoRecords = errors.New("dane: no tlsa records")
|
||||
|
||||
// ErrInsecure indicates insecure DNS responses were encountered while looking up
|
||||
// the host, CNAME records, or TLSA records.
|
||||
ErrInsecure = errors.New("dane: dns lookups insecure")
|
||||
|
||||
// ErrNoMatch means some TLSA records were found, but none can be verified against
|
||||
// the remote TLS certificate.
|
||||
ErrNoMatch = errors.New("dane: no match between certificate and tlsa records")
|
||||
)
|
||||
|
||||
// VerifyError is an error encountered while verifying a DANE TLSA record. For
|
||||
// example, an error encountered with x509 certificate trusted-anchor verification.
|
||||
// A TLSA record that does not match a TLS certificate is not a VerifyError.
|
||||
type VerifyError struct {
|
||||
Err error // Underlying error, possibly from crypto/x509.
|
||||
Record adns.TLSA // Cause of error.
|
||||
}
|
||||
|
||||
// Error returns a string explaining this is a dane verify error along with the
|
||||
// underlying error.
|
||||
func (e VerifyError) Error() string {
|
||||
return fmt.Sprintf("dane verify error: %s", e.Err)
|
||||
}
|
||||
|
||||
// Unwrap returns the underlying error.
|
||||
func (e VerifyError) Unwrap() error {
|
||||
return e.Err
|
||||
}
|
||||
|
||||
// Dial looks up DNSSEC-protected DANE TLSA records for the domain name and
|
||||
// port/service in address, checks for allowed usages, makes a network connection
|
||||
// and verifies the remote certificate against the TLSA records. If verification
|
||||
// succeeds, the verified record is returned.
|
||||
//
|
||||
// Different protocols require different usages. For example, SMTP with STARTTLS
|
||||
// for delivery only allows usages DANE-TA and DANE-EE. If allowedUsages is
|
||||
// non-nil, only the specified usages are taken into account when verifying, and
|
||||
// any others ignored.
|
||||
//
|
||||
// Errors that can be returned, possibly in wrapped form:
|
||||
// - ErrNoRecords, also in case the DNS response indicates "not found".
|
||||
// - adns.DNSError, potentially wrapping adns.ExtendedError of which some can
|
||||
// indicate DNSSEC errors.
|
||||
// - ErrInsecure
|
||||
// - VerifyError, potentially wrapping errors from crypto/x509.
|
||||
func Dial(ctx context.Context, elog *slog.Logger, resolver dns.Resolver, network, address string, allowedUsages []adns.TLSAUsage, pkixRoots *x509.CertPool) (net.Conn, adns.TLSA, error) {
|
||||
log := mlog.New("dane", elog)
|
||||
|
||||
// Split host and port.
|
||||
host, portstr, err := net.SplitHostPort(address)
|
||||
if err != nil {
|
||||
return nil, adns.TLSA{}, fmt.Errorf("parsing address: %w", err)
|
||||
}
|
||||
port, err := resolver.LookupPort(ctx, network, portstr)
|
||||
if err != nil {
|
||||
return nil, adns.TLSA{}, fmt.Errorf("parsing port: %w", err)
|
||||
}
|
||||
|
||||
hostDom, err := dns.ParseDomain(strings.TrimSuffix(host, "."))
|
||||
if err != nil {
|
||||
return nil, adns.TLSA{}, fmt.Errorf("parsing host: %w", err)
|
||||
}
|
||||
|
||||
// ../rfc/7671:1015
|
||||
// First follow CNAMEs for host. If the path to the final name is secure, we must
|
||||
// lookup TLSA there first, then fallback to the original name. If the final name
|
||||
// is secure that's also the SNI server name we must use, with the original name as
|
||||
// allowed host during certificate name checks (for all TLSA usages other than
|
||||
// DANE-EE).
|
||||
cnameDom := hostDom
|
||||
cnameAuthentic := true
|
||||
for i := 0; ; i += 1 {
|
||||
if i == 10 {
|
||||
return nil, adns.TLSA{}, fmt.Errorf("too many cname lookups")
|
||||
}
|
||||
cname, cnameResult, err := resolver.LookupCNAME(ctx, cnameDom.ASCII+".")
|
||||
cnameAuthentic = cnameAuthentic && cnameResult.Authentic
|
||||
if !cnameResult.Authentic && i == 0 {
|
||||
return nil, adns.TLSA{}, fmt.Errorf("%w: cname lookup insecure", ErrInsecure)
|
||||
} else if dns.IsNotFound(err) {
|
||||
break
|
||||
} else if err != nil {
|
||||
return nil, adns.TLSA{}, fmt.Errorf("resolving cname %s: %w", cnameDom, err)
|
||||
} else if d, err := dns.ParseDomain(strings.TrimSuffix(cname, ".")); err != nil {
|
||||
return nil, adns.TLSA{}, fmt.Errorf("parsing cname: %w", err)
|
||||
} else {
|
||||
cnameDom = d
|
||||
}
|
||||
}
|
||||
|
||||
// We lookup the IP.
|
||||
ipnetwork := "ip"
|
||||
if strings.HasSuffix(network, "4") {
|
||||
ipnetwork += "4"
|
||||
} else if strings.HasSuffix(network, "6") {
|
||||
ipnetwork += "6"
|
||||
}
|
||||
ips, _, err := resolver.LookupIP(ctx, ipnetwork, cnameDom.ASCII+".")
|
||||
// note: For SMTP with opportunistic DANE we would stop here with an insecure
|
||||
// response. But as long as long as we have a verified original tlsa base name, we
|
||||
// can continue with regular DANE.
|
||||
if err != nil {
|
||||
return nil, adns.TLSA{}, fmt.Errorf("resolving ips: %w", err)
|
||||
} else if len(ips) == 0 {
|
||||
return nil, adns.TLSA{}, &adns.DNSError{Err: "no ips for host", Name: cnameDom.ASCII, IsNotFound: true}
|
||||
}
|
||||
|
||||
// Lookup TLSA records. If resolving CNAME was secure, we try that first. Otherwise
|
||||
// we try at the secure original domain.
|
||||
baseDom := hostDom
|
||||
if cnameAuthentic {
|
||||
baseDom = cnameDom
|
||||
}
|
||||
var records []adns.TLSA
|
||||
var result adns.Result
|
||||
for {
|
||||
var err error
|
||||
records, result, err = resolver.LookupTLSA(ctx, port, network, baseDom.ASCII+".")
|
||||
// If no (secure) records can be found at the final cname, and there is an original
|
||||
// name, try at original name.
|
||||
// ../rfc/7671:1015
|
||||
if baseDom != hostDom && (dns.IsNotFound(err) || !result.Authentic) {
|
||||
baseDom = hostDom
|
||||
continue
|
||||
}
|
||||
if !result.Authentic {
|
||||
return nil, adns.TLSA{}, ErrInsecure
|
||||
} else if dns.IsNotFound(err) {
|
||||
return nil, adns.TLSA{}, ErrNoRecords
|
||||
} else if err != nil {
|
||||
return nil, adns.TLSA{}, fmt.Errorf("lookup dane tlsa records: %w", err)
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
// Keep only the allowed usages.
|
||||
if allowedUsages != nil {
|
||||
o := 0
|
||||
for _, r := range records {
|
||||
if slices.Contains(allowedUsages, r.Usage) {
|
||||
records[o] = r
|
||||
o++
|
||||
}
|
||||
}
|
||||
records = records[:o]
|
||||
if len(records) == 0 {
|
||||
// No point in dialing when we know we won't be able to verify the remote TLS
|
||||
// certificate.
|
||||
return nil, adns.TLSA{}, fmt.Errorf("no usable tlsa records remaining: %w", ErrNoMatch)
|
||||
}
|
||||
}
|
||||
|
||||
// We use the base domain for SNI, allowing the original domain as well.
|
||||
// ../rfc/7671:1021
|
||||
var moreAllowedHosts []dns.Domain
|
||||
if baseDom != hostDom {
|
||||
moreAllowedHosts = []dns.Domain{hostDom}
|
||||
}
|
||||
|
||||
// Dial the remote host.
|
||||
timeout := 30 * time.Second
|
||||
if deadline, ok := ctx.Deadline(); ok && len(ips) > 0 {
|
||||
timeout = time.Until(deadline) / time.Duration(len(ips))
|
||||
}
|
||||
dialer := &net.Dialer{Timeout: timeout}
|
||||
var conn net.Conn
|
||||
var dialErrs []error
|
||||
for _, ip := range ips {
|
||||
addr := net.JoinHostPort(ip.String(), portstr)
|
||||
c, err := dialer.DialContext(ctx, network, addr)
|
||||
if err != nil {
|
||||
dialErrs = append(dialErrs, err)
|
||||
continue
|
||||
}
|
||||
conn = c
|
||||
break
|
||||
}
|
||||
if conn == nil {
|
||||
return nil, adns.TLSA{}, errors.Join(dialErrs...)
|
||||
}
|
||||
|
||||
var verifiedRecord adns.TLSA
|
||||
config := TLSClientConfig(log.Logger, records, baseDom, moreAllowedHosts, &verifiedRecord, pkixRoots)
|
||||
tlsConn := tls.Client(conn, &config)
|
||||
if err := tlsConn.HandshakeContext(ctx); err != nil {
|
||||
xerr := conn.Close()
|
||||
log.Check(xerr, "closing connection")
|
||||
return nil, adns.TLSA{}, err
|
||||
}
|
||||
return tlsConn, verifiedRecord, nil
|
||||
}
|
||||
|
||||
// TLSClientConfig returns a tls.Config to be used for dialing/handshaking a
|
||||
// TLS connection with DANE verification.
|
||||
//
|
||||
// Callers should only pass records that are allowed for the intended use. DANE
|
||||
// with SMTP only allows DANE-EE and DANE-TA usages, not the PKIX-usages.
|
||||
//
|
||||
// The config has InsecureSkipVerify set to true, with a custom VerifyConnection
|
||||
// function for verifying DANE. Its VerifyConnection can return ErrNoMatch and
|
||||
// additionally one or more (wrapped) errors of type VerifyError.
|
||||
//
|
||||
// The TLS config uses allowedHost for SNI.
|
||||
//
|
||||
// If verifiedRecord is not nil, it is set to the record that was successfully
|
||||
// verified, if any.
|
||||
func TLSClientConfig(elog *slog.Logger, records []adns.TLSA, allowedHost dns.Domain, moreAllowedHosts []dns.Domain, verifiedRecord *adns.TLSA, pkixRoots *x509.CertPool) tls.Config {
|
||||
log := mlog.New("dane", elog)
|
||||
return tls.Config{
|
||||
ServerName: allowedHost.ASCII, // For SNI.
|
||||
InsecureSkipVerify: true,
|
||||
VerifyConnection: func(cs tls.ConnectionState) error {
|
||||
verified, record, err := Verify(log.Logger, records, cs, allowedHost, moreAllowedHosts, pkixRoots)
|
||||
log.Debugx("dane verification", err, slog.Bool("verified", verified), slog.Any("record", record))
|
||||
if verified {
|
||||
if verifiedRecord != nil {
|
||||
*verifiedRecord = record
|
||||
}
|
||||
return nil
|
||||
} else if err == nil {
|
||||
return ErrNoMatch
|
||||
}
|
||||
return fmt.Errorf("%w, and error(s) encountered during verification: %w", ErrNoMatch, err)
|
||||
},
|
||||
MinVersion: tls.VersionTLS12, // ../rfc/8996:31 ../rfc/8997:66
|
||||
}
|
||||
}
|
||||
|
||||
// Verify checks if the TLS connection state can be verified against DANE TLSA
|
||||
// records.
|
||||
//
|
||||
// allowedHost along with the optional moreAllowedHosts are the host names that are
|
||||
// allowed during certificate verification (as used by PKIX-TA, PKIX-EE, DANE-TA,
|
||||
// but not DANE-EE). A typical connection would allow just one name, but some uses
|
||||
// of DANE allow multiple, like SMTP which allow up to four valid names for a TLS
|
||||
// certificate based on MX/CNAME/TLSA/DNSSEC lookup results.
|
||||
//
|
||||
// When one of the records matches, Verify returns true, along with the matching
|
||||
// record and a nil error.
|
||||
// If there is no match, then in the typical case Verify returns: false, a zero
|
||||
// record value and a nil error.
|
||||
// If an error is encountered while verifying a record, e.g. for x509
|
||||
// trusted-anchor verification, an error may be returned, typically one or more
|
||||
// (wrapped) errors of type VerifyError.
|
||||
//
|
||||
// Verify is useful when DANE verification and its results has to be done
|
||||
// separately from other validation, e.g. for MTA-STS. The caller can create a
|
||||
// tls.Config with a VerifyConnection function that checks DANE and MTA-STS
|
||||
// separately.
|
||||
func Verify(elog *slog.Logger, records []adns.TLSA, cs tls.ConnectionState, allowedHost dns.Domain, moreAllowedHosts []dns.Domain, pkixRoots *x509.CertPool) (verified bool, matching adns.TLSA, rerr error) {
|
||||
log := mlog.New("dane", elog)
|
||||
MetricVerify.Inc()
|
||||
if len(records) == 0 {
|
||||
MetricVerifyErrors.Inc()
|
||||
return false, adns.TLSA{}, fmt.Errorf("verify requires at least one tlsa record")
|
||||
}
|
||||
var errs []error
|
||||
for _, r := range records {
|
||||
ok, err := verifySingle(log, r, cs, allowedHost, moreAllowedHosts, pkixRoots)
|
||||
if err != nil {
|
||||
errs = append(errs, VerifyError{err, r})
|
||||
} else if ok {
|
||||
return true, r, nil
|
||||
}
|
||||
}
|
||||
MetricVerifyErrors.Inc()
|
||||
return false, adns.TLSA{}, errors.Join(errs...)
|
||||
}
|
||||
|
||||
// verifySingle verifies the TLS connection against a single DANE TLSA record.
|
||||
//
|
||||
// If the remote TLS certificate matches with the TLSA record, true is
|
||||
// returned. Errors may be encountered while verifying, e.g. when checking one
|
||||
// of the allowed hosts against a TLSA record. A typical non-matching/verified
|
||||
// TLSA record returns a nil error. But in some cases, e.g. when encountering
|
||||
// errors while verifying certificates against a trust-anchor, an error can be
|
||||
// returned with one or more underlying x509 verification errors. A nil-nil error
|
||||
// is only returned when verified is false.
|
||||
func verifySingle(log mlog.Log, tlsa adns.TLSA, cs tls.ConnectionState, allowedHost dns.Domain, moreAllowedHosts []dns.Domain, pkixRoots *x509.CertPool) (verified bool, rerr error) {
|
||||
if len(cs.PeerCertificates) == 0 {
|
||||
return false, fmt.Errorf("no server certificate")
|
||||
}
|
||||
|
||||
match := func(cert *x509.Certificate) bool {
|
||||
var buf []byte
|
||||
switch tlsa.Selector {
|
||||
case adns.TLSASelectorCert:
|
||||
buf = cert.Raw
|
||||
case adns.TLSASelectorSPKI:
|
||||
buf = cert.RawSubjectPublicKeyInfo
|
||||
default:
|
||||
return false
|
||||
}
|
||||
|
||||
switch tlsa.MatchType {
|
||||
case adns.TLSAMatchTypeFull:
|
||||
case adns.TLSAMatchTypeSHA256:
|
||||
d := sha256.Sum256(buf)
|
||||
buf = d[:]
|
||||
case adns.TLSAMatchTypeSHA512:
|
||||
d := sha512.Sum512(buf)
|
||||
buf = d[:]
|
||||
default:
|
||||
return false
|
||||
}
|
||||
|
||||
return bytes.Equal(buf, tlsa.CertAssoc)
|
||||
}
|
||||
|
||||
pkixVerify := func(host dns.Domain) ([][]*x509.Certificate, error) {
|
||||
// Default Verify checks for expiration. We pass the host name to check. And we
|
||||
// configure the intermediates. The roots are filled in by the x509 package.
|
||||
opts := x509.VerifyOptions{
|
||||
DNSName: host.ASCII,
|
||||
Intermediates: x509.NewCertPool(),
|
||||
Roots: pkixRoots,
|
||||
}
|
||||
for _, cert := range cs.PeerCertificates[1:] {
|
||||
opts.Intermediates.AddCert(cert)
|
||||
}
|
||||
chains, err := cs.PeerCertificates[0].Verify(opts)
|
||||
return chains, err
|
||||
}
|
||||
|
||||
switch tlsa.Usage {
|
||||
case adns.TLSAUsagePKIXTA:
|
||||
// We cannot get at the system trusted ca certificates to look for the trusted
|
||||
// anchor. So we just ask Go to verify, then see if any of the chains include the
|
||||
// ca certificate.
|
||||
var errs []error
|
||||
for _, host := range append([]dns.Domain{allowedHost}, moreAllowedHosts...) {
|
||||
chains, err := pkixVerify(host)
|
||||
log.Debugx("pkix-ta verify", err)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
continue
|
||||
}
|
||||
// The chains by x509's Verify should include the longest possible match, so it is
|
||||
// sure to include the trusted anchor. ../rfc/7671:835
|
||||
for _, chain := range chains {
|
||||
// If pkix verified, check if any of the certificates match.
|
||||
for i := len(chain) - 1; i >= 0; i-- {
|
||||
if match(chain[i]) {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return false, errors.Join(errs...)
|
||||
|
||||
case adns.TLSAUsagePKIXEE:
|
||||
// Check for a certificate match.
|
||||
if !match(cs.PeerCertificates[0]) {
|
||||
return false, nil
|
||||
}
|
||||
// And do regular pkix checks, ../rfc/7671:799
|
||||
var errs []error
|
||||
for _, host := range append([]dns.Domain{allowedHost}, moreAllowedHosts...) {
|
||||
_, err := pkixVerify(host)
|
||||
log.Debugx("pkix-ee verify", err)
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
errs = append(errs, err)
|
||||
}
|
||||
return false, errors.Join(errs...)
|
||||
|
||||
case adns.TLSAUsageDANETA:
|
||||
// We set roots, so the system defaults don't get used. Verify checks the host name
|
||||
// (set below) and checks for expiration.
|
||||
opts := x509.VerifyOptions{
|
||||
Intermediates: x509.NewCertPool(),
|
||||
Roots: x509.NewCertPool(),
|
||||
}
|
||||
|
||||
// If the full certificate was included, we must add it to the valid roots, the TLS
|
||||
// server may not send it. ../rfc/7671:692
|
||||
var found bool
|
||||
if tlsa.Selector == adns.TLSASelectorCert && tlsa.MatchType == adns.TLSAMatchTypeFull {
|
||||
cert, err := x509.ParseCertificate(tlsa.CertAssoc)
|
||||
if err != nil {
|
||||
log.Debugx("parsing full exact certificate from tlsa record to use as root for usage dane-trusted-anchor", err)
|
||||
// Continue anyway, perhaps the servers sends it again in a way that the tls package can parse? (unlikely)
|
||||
} else {
|
||||
opts.Roots.AddCert(cert)
|
||||
found = true
|
||||
}
|
||||
}
|
||||
|
||||
for i, cert := range cs.PeerCertificates {
|
||||
if match(cert) {
|
||||
opts.Roots.AddCert(cert)
|
||||
found = true
|
||||
break
|
||||
} else if i > 0 {
|
||||
opts.Intermediates.AddCert(cert)
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
// Trusted anchor was not found in TLS certificates so we won't be able to
|
||||
// verify.
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Trusted anchor was found, still need to verify.
|
||||
var errs []error
|
||||
for _, host := range append([]dns.Domain{allowedHost}, moreAllowedHosts...) {
|
||||
opts.DNSName = host.ASCII
|
||||
_, err := cs.PeerCertificates[0].Verify(opts)
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
errs = append(errs, err)
|
||||
}
|
||||
return false, errors.Join(errs...)
|
||||
|
||||
case adns.TLSAUsageDANEEE:
|
||||
// ../rfc/7250 is about raw public keys instead of x.509 certificates in tls
|
||||
// handshakes. Go's crypto/tls does not implement the extension (see
|
||||
// crypto/tls/common.go, the extensions values don't appear in the
|
||||
// rfc, but have values 19 and 20 according to
|
||||
// https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#tls-extensiontype-values-1
|
||||
// ../rfc/7671:1148 mentions the raw public keys are allowed. It's still
|
||||
// questionable that this is commonly implemented. For now the world can probably
|
||||
// live with an ignored certificate wrapped around the subject public key info.
|
||||
|
||||
// We don't verify host name in certificate, ../rfc/7671:489
|
||||
// And we don't check for expiration. ../rfc/7671:527
|
||||
// The whole point of this type is to have simple secure infrastructure that
|
||||
// doesn't automatically expire (at the most inconvenient times).
|
||||
return match(cs.PeerCertificates[0]), nil
|
||||
|
||||
default:
|
||||
// Unknown, perhaps defined in the future. Not an error.
|
||||
log.Debug("unrecognized tlsa usage, skipping", slog.Any("tlsausage", tlsa.Usage))
|
||||
return false, nil
|
||||
}
|
||||
}
|
476
dane/dane_test.go
Normal file
476
dane/dane_test.go
Normal file
@ -0,0 +1,476 @@
|
||||
package dane
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
cryptorand "crypto/rand"
|
||||
"crypto/sha256"
|
||||
"crypto/sha512"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"net"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/mjl-/adns"
|
||||
|
||||
"github.com/mjl-/mox/dns"
|
||||
"github.com/mjl-/mox/mlog"
|
||||
)
|
||||
|
||||
func tcheckf(t *testing.T, err error, format string, args ...any) {
|
||||
t.Helper()
|
||||
if err != nil {
|
||||
t.Fatalf("%s: %s", fmt.Sprintf(format, args...), err)
|
||||
}
|
||||
}
|
||||
|
||||
// Test dialing and DANE TLS verification.
|
||||
func TestDial(t *testing.T) {
|
||||
log := mlog.New("dane", nil)
|
||||
|
||||
// Create fake CA/trusted-anchor certificate.
|
||||
taTempl := x509.Certificate{
|
||||
SerialNumber: big.NewInt(1), // Required field.
|
||||
Subject: pkix.Name{CommonName: "fake ca"},
|
||||
Issuer: pkix.Name{CommonName: "fake ca"},
|
||||
NotBefore: time.Now().Add(-1 * time.Hour),
|
||||
NotAfter: time.Now().Add(1 * time.Hour),
|
||||
KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
|
||||
ExtKeyUsage: []x509.ExtKeyUsage{
|
||||
x509.ExtKeyUsageServerAuth,
|
||||
x509.ExtKeyUsageClientAuth,
|
||||
},
|
||||
BasicConstraintsValid: true,
|
||||
IsCA: true,
|
||||
MaxPathLen: 1,
|
||||
}
|
||||
taPriv, err := ecdsa.GenerateKey(elliptic.P256(), cryptorand.Reader)
|
||||
tcheckf(t, err, "generating trusted-anchor ca private key")
|
||||
taCertBuf, err := x509.CreateCertificate(cryptorand.Reader, &taTempl, &taTempl, taPriv.Public(), taPriv)
|
||||
tcheckf(t, err, "create trusted-anchor ca certificate")
|
||||
taCert, err := x509.ParseCertificate(taCertBuf)
|
||||
tcheckf(t, err, "parsing generated trusted-anchor ca certificate")
|
||||
|
||||
tacertsha256 := sha256.Sum256(taCert.Raw)
|
||||
taCertSHA256 := tacertsha256[:]
|
||||
|
||||
// Generate leaf private key & 2 certs, one expired and one valid, both signed by
|
||||
// trusted-anchor cert.
|
||||
leafPriv, err := ecdsa.GenerateKey(elliptic.P256(), cryptorand.Reader)
|
||||
tcheckf(t, err, "generating leaf private key")
|
||||
|
||||
makeLeaf := func(expired bool) (tls.Certificate, []byte, []byte) {
|
||||
now := time.Now()
|
||||
if expired {
|
||||
now = now.Add(-2 * time.Hour)
|
||||
}
|
||||
leafTempl := x509.Certificate{
|
||||
SerialNumber: big.NewInt(1), // Required field.
|
||||
Issuer: taTempl.Subject,
|
||||
NotBefore: now.Add(-1 * time.Hour),
|
||||
NotAfter: now.Add(1 * time.Hour),
|
||||
DNSNames: []string{"localhost"},
|
||||
}
|
||||
leafCertBuf, err := x509.CreateCertificate(cryptorand.Reader, &leafTempl, taCert, leafPriv.Public(), taPriv)
|
||||
tcheckf(t, err, "create trusted-anchor ca certificate")
|
||||
leafCert, err := x509.ParseCertificate(leafCertBuf)
|
||||
tcheckf(t, err, "parsing generated trusted-anchor ca certificate")
|
||||
|
||||
leafSPKISHA256 := sha256.Sum256(leafCert.RawSubjectPublicKeyInfo)
|
||||
leafSPKISHA512 := sha512.Sum512(leafCert.RawSubjectPublicKeyInfo)
|
||||
|
||||
tlsLeafCert := tls.Certificate{
|
||||
Certificate: [][]byte{leafCertBuf, taCertBuf},
|
||||
PrivateKey: leafPriv, // .(crypto.PrivateKey),
|
||||
Leaf: leafCert,
|
||||
}
|
||||
return tlsLeafCert, leafSPKISHA256[:], leafSPKISHA512[:]
|
||||
}
|
||||
tlsLeafCert, leafSPKISHA256, leafSPKISHA512 := makeLeaf(false)
|
||||
tlsLeafCertExpired, _, _ := makeLeaf(true)
|
||||
|
||||
// Set up loopback tls server.
|
||||
listenConn, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
tcheckf(t, err, "listen for test server")
|
||||
addr := listenConn.Addr().String()
|
||||
_, portstr, err := net.SplitHostPort(addr)
|
||||
tcheckf(t, err, "get localhost port")
|
||||
uport, err := strconv.ParseUint(portstr, 10, 16)
|
||||
tcheckf(t, err, "parse localhost port")
|
||||
port := int(uport)
|
||||
|
||||
defer listenConn.Close()
|
||||
|
||||
// Config for server, replaced during tests.
|
||||
var tlsConfig atomic.Pointer[tls.Config]
|
||||
tlsConfig.Store(&tls.Config{
|
||||
Certificates: []tls.Certificate{tlsLeafCert},
|
||||
})
|
||||
|
||||
// Loop handling incoming TLS connections.
|
||||
go func() {
|
||||
for {
|
||||
conn, err := listenConn.Accept()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
tlsConn := tls.Server(conn, tlsConfig.Load())
|
||||
tlsConn.Handshake()
|
||||
tlsConn.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
dialHost := "localhost"
|
||||
var allowedUsages []adns.TLSAUsage
|
||||
|
||||
pkixRoots := x509.NewCertPool()
|
||||
|
||||
// Helper function for dialing with DANE.
|
||||
test := func(resolver dns.Resolver, expRecord adns.TLSA, expErr any) {
|
||||
t.Helper()
|
||||
|
||||
conn, record, err := Dial(context.Background(), log.Logger, resolver, "tcp", net.JoinHostPort(dialHost, portstr), allowedUsages, pkixRoots)
|
||||
if err == nil {
|
||||
conn.Close()
|
||||
}
|
||||
if (err == nil) != (expErr == nil) || err != nil && !errors.Is(err, expErr.(error)) && !errors.As(err, expErr) {
|
||||
t.Fatalf("got err %v (%#v), expected %#v", err, err, expErr)
|
||||
}
|
||||
if !reflect.DeepEqual(record, expRecord) {
|
||||
t.Fatalf("got verified record %v, expected %v", record, expRecord)
|
||||
}
|
||||
}
|
||||
|
||||
tlsaName := fmt.Sprintf("_%d._tcp.localhost.", port)
|
||||
|
||||
// Make all kinds of records, some invalid or non-matching.
|
||||
var zeroRecord adns.TLSA
|
||||
recordDANEEESPKISHA256 := adns.TLSA{
|
||||
Usage: adns.TLSAUsageDANEEE,
|
||||
Selector: adns.TLSASelectorSPKI,
|
||||
MatchType: adns.TLSAMatchTypeSHA256,
|
||||
CertAssoc: leafSPKISHA256,
|
||||
}
|
||||
recordDANEEESPKISHA512 := adns.TLSA{
|
||||
Usage: adns.TLSAUsageDANEEE,
|
||||
Selector: adns.TLSASelectorSPKI,
|
||||
MatchType: adns.TLSAMatchTypeSHA512,
|
||||
CertAssoc: leafSPKISHA512,
|
||||
}
|
||||
recordDANEEESPKIFull := adns.TLSA{
|
||||
Usage: adns.TLSAUsageDANEEE,
|
||||
Selector: adns.TLSASelectorSPKI,
|
||||
MatchType: adns.TLSAMatchTypeFull,
|
||||
CertAssoc: tlsLeafCert.Leaf.RawSubjectPublicKeyInfo,
|
||||
}
|
||||
mismatchRecordDANEEESPKISHA256 := adns.TLSA{
|
||||
Usage: adns.TLSAUsageDANEEE,
|
||||
Selector: adns.TLSASelectorSPKI,
|
||||
MatchType: adns.TLSAMatchTypeSHA256,
|
||||
CertAssoc: make([]byte, sha256.Size), // Zero, no match.
|
||||
}
|
||||
malformedRecordDANEEESPKISHA256 := adns.TLSA{
|
||||
Usage: adns.TLSAUsageDANEEE,
|
||||
Selector: adns.TLSASelectorSPKI,
|
||||
MatchType: adns.TLSAMatchTypeSHA256,
|
||||
CertAssoc: leafSPKISHA256[:16], // Too short.
|
||||
}
|
||||
unknownparamRecordDANEEESPKISHA256 := adns.TLSA{
|
||||
Usage: adns.TLSAUsage(10), // Unrecognized value.
|
||||
Selector: adns.TLSASelectorSPKI,
|
||||
MatchType: adns.TLSAMatchTypeSHA256,
|
||||
CertAssoc: leafSPKISHA256,
|
||||
}
|
||||
recordDANETACertSHA256 := adns.TLSA{
|
||||
Usage: adns.TLSAUsageDANETA,
|
||||
Selector: adns.TLSASelectorCert,
|
||||
MatchType: adns.TLSAMatchTypeSHA256,
|
||||
CertAssoc: taCertSHA256,
|
||||
}
|
||||
recordDANETACertFull := adns.TLSA{
|
||||
Usage: adns.TLSAUsageDANETA,
|
||||
Selector: adns.TLSASelectorCert,
|
||||
MatchType: adns.TLSAMatchTypeFull,
|
||||
CertAssoc: taCert.Raw,
|
||||
}
|
||||
malformedRecordDANETACertFull := adns.TLSA{
|
||||
Usage: adns.TLSAUsageDANETA,
|
||||
Selector: adns.TLSASelectorCert,
|
||||
MatchType: adns.TLSAMatchTypeFull,
|
||||
CertAssoc: taCert.Raw[1:], // Cannot parse certificate.
|
||||
}
|
||||
mismatchRecordDANETACertSHA256 := adns.TLSA{
|
||||
Usage: adns.TLSAUsageDANETA,
|
||||
Selector: adns.TLSASelectorCert,
|
||||
MatchType: adns.TLSAMatchTypeSHA256,
|
||||
CertAssoc: make([]byte, sha256.Size), // Zero, no match.
|
||||
}
|
||||
recordPKIXEESPKISHA256 := adns.TLSA{
|
||||
Usage: adns.TLSAUsagePKIXEE,
|
||||
Selector: adns.TLSASelectorSPKI,
|
||||
MatchType: adns.TLSAMatchTypeSHA256,
|
||||
CertAssoc: leafSPKISHA256,
|
||||
}
|
||||
recordPKIXTACertSHA256 := adns.TLSA{
|
||||
Usage: adns.TLSAUsagePKIXTA,
|
||||
Selector: adns.TLSASelectorCert,
|
||||
MatchType: adns.TLSAMatchTypeSHA256,
|
||||
CertAssoc: taCertSHA256,
|
||||
}
|
||||
|
||||
resolver := dns.MockResolver{
|
||||
A: map[string][]string{"localhost.": {"127.0.0.1"}},
|
||||
TLSA: map[string][]adns.TLSA{tlsaName: {recordDANEEESPKISHA256}},
|
||||
AllAuthentic: true,
|
||||
}
|
||||
|
||||
// DANE-EE SPKI SHA2-256 record.
|
||||
test(resolver, recordDANEEESPKISHA256, nil)
|
||||
|
||||
// Check that record isn't used if not allowed.
|
||||
allowedUsages = []adns.TLSAUsage{adns.TLSAUsagePKIXTA}
|
||||
test(resolver, zeroRecord, ErrNoMatch)
|
||||
allowedUsages = nil // Restore.
|
||||
|
||||
// Mixed allowed/not allowed usages are fine.
|
||||
resolver = dns.MockResolver{
|
||||
A: map[string][]string{"localhost.": {"127.0.0.1"}},
|
||||
TLSA: map[string][]adns.TLSA{tlsaName: {mismatchRecordDANETACertSHA256, recordDANEEESPKISHA256}},
|
||||
AllAuthentic: true,
|
||||
}
|
||||
allowedUsages = []adns.TLSAUsage{adns.TLSAUsageDANEEE}
|
||||
test(resolver, recordDANEEESPKISHA256, nil)
|
||||
allowedUsages = nil // Restore.
|
||||
|
||||
// DANE-TA CERT SHA2-256 record.
|
||||
resolver.TLSA = map[string][]adns.TLSA{
|
||||
tlsaName: {recordDANETACertSHA256},
|
||||
}
|
||||
test(resolver, recordDANETACertSHA256, nil)
|
||||
|
||||
// No TLSA record.
|
||||
resolver.TLSA = nil
|
||||
test(resolver, zeroRecord, ErrNoRecords)
|
||||
|
||||
// Insecure TLSA record.
|
||||
resolver.TLSA = map[string][]adns.TLSA{
|
||||
tlsaName: {recordDANEEESPKISHA256},
|
||||
}
|
||||
resolver.Inauthentic = []string{"tlsa " + tlsaName}
|
||||
test(resolver, zeroRecord, ErrInsecure)
|
||||
|
||||
// Insecure CNAME.
|
||||
resolver.Inauthentic = []string{"cname localhost."}
|
||||
test(resolver, zeroRecord, ErrInsecure)
|
||||
|
||||
// Insecure TLSA
|
||||
resolver.Inauthentic = []string{"tlsa " + tlsaName}
|
||||
test(resolver, zeroRecord, ErrInsecure)
|
||||
|
||||
// Insecure CNAME should not look at TLSA records under that name, only under original.
|
||||
// Initial name/cname is secure. And it has secure TLSA records. But the lookup for
|
||||
// example1 is not secure, though the final example2 records are.
|
||||
resolver = dns.MockResolver{
|
||||
A: map[string][]string{"example2.": {"127.0.0.1"}},
|
||||
CNAME: map[string]string{"localhost.": "example1.", "example1.": "example2."},
|
||||
TLSA: map[string][]adns.TLSA{
|
||||
fmt.Sprintf("_%d._tcp.example2.", port): {mismatchRecordDANETACertSHA256}, // Should be ignored.
|
||||
tlsaName: {recordDANEEESPKISHA256}, // Should match.
|
||||
},
|
||||
AllAuthentic: true,
|
||||
Inauthentic: []string{"cname example1."},
|
||||
}
|
||||
test(resolver, recordDANEEESPKISHA256, nil)
|
||||
|
||||
// Matching records after following cname.
|
||||
resolver = dns.MockResolver{
|
||||
A: map[string][]string{"example.": {"127.0.0.1"}},
|
||||
CNAME: map[string]string{"localhost.": "example."},
|
||||
TLSA: map[string][]adns.TLSA{fmt.Sprintf("_%d._tcp.example.", port): {recordDANETACertSHA256}},
|
||||
AllAuthentic: true,
|
||||
}
|
||||
test(resolver, recordDANETACertSHA256, nil)
|
||||
|
||||
// Fallback to original name for TLSA records if cname-expanded name doesn't have records.
|
||||
resolver = dns.MockResolver{
|
||||
A: map[string][]string{"example.": {"127.0.0.1"}},
|
||||
CNAME: map[string]string{"localhost.": "example."},
|
||||
TLSA: map[string][]adns.TLSA{tlsaName: {recordDANETACertSHA256}},
|
||||
AllAuthentic: true,
|
||||
}
|
||||
test(resolver, recordDANETACertSHA256, nil)
|
||||
|
||||
// Invalid DANE-EE record.
|
||||
resolver = dns.MockResolver{
|
||||
A: map[string][]string{
|
||||
"localhost.": {"127.0.0.1"},
|
||||
},
|
||||
TLSA: map[string][]adns.TLSA{
|
||||
tlsaName: {mismatchRecordDANEEESPKISHA256},
|
||||
},
|
||||
AllAuthentic: true,
|
||||
}
|
||||
test(resolver, zeroRecord, ErrNoMatch)
|
||||
|
||||
// DANE-EE SPKI SHA2-512 record.
|
||||
resolver = dns.MockResolver{
|
||||
A: map[string][]string{"localhost.": {"127.0.0.1"}},
|
||||
TLSA: map[string][]adns.TLSA{tlsaName: {recordDANEEESPKISHA512}},
|
||||
AllAuthentic: true,
|
||||
}
|
||||
test(resolver, recordDANEEESPKISHA512, nil)
|
||||
|
||||
// DANE-EE SPKI Full record.
|
||||
resolver = dns.MockResolver{
|
||||
A: map[string][]string{"localhost.": {"127.0.0.1"}},
|
||||
TLSA: map[string][]adns.TLSA{tlsaName: {recordDANEEESPKIFull}},
|
||||
AllAuthentic: true,
|
||||
}
|
||||
test(resolver, recordDANEEESPKIFull, nil)
|
||||
|
||||
// DANE-TA with full certificate.
|
||||
resolver = dns.MockResolver{
|
||||
A: map[string][]string{"localhost.": {"127.0.0.1"}},
|
||||
TLSA: map[string][]adns.TLSA{tlsaName: {recordDANETACertFull}},
|
||||
AllAuthentic: true,
|
||||
}
|
||||
test(resolver, recordDANETACertFull, nil)
|
||||
|
||||
// DANE-TA for cert not in TLS handshake.
|
||||
resolver = dns.MockResolver{
|
||||
A: map[string][]string{"localhost.": {"127.0.0.1"}},
|
||||
TLSA: map[string][]adns.TLSA{tlsaName: {mismatchRecordDANETACertSHA256}},
|
||||
AllAuthentic: true,
|
||||
}
|
||||
test(resolver, zeroRecord, ErrNoMatch)
|
||||
|
||||
// DANE-TA with leaf cert for other name.
|
||||
resolver = dns.MockResolver{
|
||||
A: map[string][]string{"example.": {"127.0.0.1"}},
|
||||
TLSA: map[string][]adns.TLSA{fmt.Sprintf("_%d._tcp.example.", port): {recordDANETACertSHA256}},
|
||||
AllAuthentic: true,
|
||||
}
|
||||
origDialHost := dialHost
|
||||
dialHost = "example."
|
||||
test(resolver, zeroRecord, ErrNoMatch)
|
||||
dialHost = origDialHost
|
||||
|
||||
// DANE-TA with expired cert.
|
||||
resolver = dns.MockResolver{
|
||||
A: map[string][]string{"localhost.": {"127.0.0.1"}},
|
||||
TLSA: map[string][]adns.TLSA{tlsaName: {recordDANETACertSHA256}},
|
||||
AllAuthentic: true,
|
||||
}
|
||||
tlsConfig.Store(&tls.Config{
|
||||
Certificates: []tls.Certificate{tlsLeafCertExpired},
|
||||
})
|
||||
test(resolver, zeroRecord, ErrNoMatch)
|
||||
test(resolver, zeroRecord, &VerifyError{})
|
||||
test(resolver, zeroRecord, &x509.CertificateInvalidError{})
|
||||
// Restore.
|
||||
tlsConfig.Store(&tls.Config{
|
||||
Certificates: []tls.Certificate{tlsLeafCert},
|
||||
})
|
||||
|
||||
// Malformed TLSA record is unusable, resulting in failure if none left.
|
||||
resolver = dns.MockResolver{
|
||||
A: map[string][]string{"localhost.": {"127.0.0.1"}},
|
||||
TLSA: map[string][]adns.TLSA{tlsaName: {malformedRecordDANEEESPKISHA256}},
|
||||
AllAuthentic: true,
|
||||
}
|
||||
test(resolver, zeroRecord, ErrNoMatch)
|
||||
|
||||
// Malformed TLSA record is unusable and skipped, other verified record causes Dial to succeed.
|
||||
resolver = dns.MockResolver{
|
||||
A: map[string][]string{"localhost.": {"127.0.0.1"}},
|
||||
TLSA: map[string][]adns.TLSA{tlsaName: {malformedRecordDANEEESPKISHA256, recordDANEEESPKISHA256}},
|
||||
AllAuthentic: true,
|
||||
}
|
||||
test(resolver, recordDANEEESPKISHA256, nil)
|
||||
|
||||
// Record with unknown parameters (usage in this case) is unusable, resulting in failure if none left.
|
||||
resolver = dns.MockResolver{
|
||||
A: map[string][]string{"localhost.": {"127.0.0.1"}},
|
||||
TLSA: map[string][]adns.TLSA{tlsaName: {unknownparamRecordDANEEESPKISHA256}},
|
||||
AllAuthentic: true,
|
||||
}
|
||||
test(resolver, zeroRecord, ErrNoMatch)
|
||||
|
||||
// Unknown parameter does not prevent other valid record to verify.
|
||||
resolver = dns.MockResolver{
|
||||
A: map[string][]string{"localhost.": {"127.0.0.1"}},
|
||||
TLSA: map[string][]adns.TLSA{tlsaName: {unknownparamRecordDANEEESPKISHA256, recordDANEEESPKISHA256}},
|
||||
AllAuthentic: true,
|
||||
}
|
||||
test(resolver, recordDANEEESPKISHA256, nil)
|
||||
|
||||
// Malformed full TA certificate.
|
||||
resolver = dns.MockResolver{
|
||||
A: map[string][]string{"localhost.": {"127.0.0.1"}},
|
||||
TLSA: map[string][]adns.TLSA{tlsaName: {malformedRecordDANETACertFull}},
|
||||
AllAuthentic: true,
|
||||
}
|
||||
test(resolver, zeroRecord, ErrNoMatch)
|
||||
|
||||
// Full TA certificate without getting it from TLS server.
|
||||
resolver = dns.MockResolver{
|
||||
A: map[string][]string{"localhost.": {"127.0.0.1"}},
|
||||
TLSA: map[string][]adns.TLSA{tlsaName: {recordDANETACertFull}},
|
||||
AllAuthentic: true,
|
||||
}
|
||||
tlsLeafOnlyCert := tlsLeafCert
|
||||
tlsLeafOnlyCert.Certificate = tlsLeafOnlyCert.Certificate[:1]
|
||||
tlsConfig.Store(&tls.Config{
|
||||
Certificates: []tls.Certificate{tlsLeafOnlyCert},
|
||||
})
|
||||
test(resolver, recordDANETACertFull, nil)
|
||||
// Restore.
|
||||
tlsConfig.Store(&tls.Config{
|
||||
Certificates: []tls.Certificate{tlsLeafCert},
|
||||
})
|
||||
|
||||
// PKIXEE, will fail due to not being CA-signed.
|
||||
resolver = dns.MockResolver{
|
||||
A: map[string][]string{"localhost.": {"127.0.0.1"}},
|
||||
TLSA: map[string][]adns.TLSA{tlsaName: {recordPKIXEESPKISHA256}},
|
||||
AllAuthentic: true,
|
||||
}
|
||||
test(resolver, zeroRecord, &x509.UnknownAuthorityError{})
|
||||
|
||||
// PKIXTA, will fail due to not being CA-signed.
|
||||
resolver = dns.MockResolver{
|
||||
A: map[string][]string{"localhost.": {"127.0.0.1"}},
|
||||
TLSA: map[string][]adns.TLSA{tlsaName: {recordPKIXTACertSHA256}},
|
||||
AllAuthentic: true,
|
||||
}
|
||||
test(resolver, zeroRecord, &x509.UnknownAuthorityError{})
|
||||
|
||||
// Now we add the TA to the "pkix" trusted roots and try again.
|
||||
pkixRoots.AddCert(taCert)
|
||||
|
||||
// PKIXEE, will now succeed.
|
||||
resolver = dns.MockResolver{
|
||||
A: map[string][]string{"localhost.": {"127.0.0.1"}},
|
||||
TLSA: map[string][]adns.TLSA{tlsaName: {recordPKIXEESPKISHA256}},
|
||||
AllAuthentic: true,
|
||||
}
|
||||
test(resolver, recordPKIXEESPKISHA256, nil)
|
||||
|
||||
// PKIXTA, will fail due to not being CA-signed.
|
||||
resolver = dns.MockResolver{
|
||||
A: map[string][]string{"localhost.": {"127.0.0.1"}},
|
||||
TLSA: map[string][]adns.TLSA{tlsaName: {recordPKIXTACertSHA256}},
|
||||
AllAuthentic: true,
|
||||
}
|
||||
test(resolver, recordPKIXTACertSHA256, nil)
|
||||
}
|
32
dane/examples_test.go
Normal file
32
dane/examples_test.go
Normal file
@ -0,0 +1,32 @@
|
||||
package dane_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/x509"
|
||||
"log"
|
||||
"log/slog"
|
||||
|
||||
"github.com/mjl-/adns"
|
||||
|
||||
"github.com/mjl-/mox/dane"
|
||||
"github.com/mjl-/mox/dns"
|
||||
)
|
||||
|
||||
func ExampleDial() {
|
||||
ctx := context.Background()
|
||||
resolver := dns.StrictResolver{}
|
||||
usages := []adns.TLSAUsage{adns.TLSAUsageDANETA, adns.TLSAUsageDANEEE}
|
||||
pkixRoots, err := x509.SystemCertPool()
|
||||
if err != nil {
|
||||
log.Fatalf("system pkix roots: %v", err)
|
||||
}
|
||||
|
||||
// Connect to SMTP server, use STARTTLS, and verify TLS certificate with DANE.
|
||||
conn, verifiedRecord, err := dane.Dial(ctx, slog.Default(), resolver, "tcp", "mx.example.com", usages, pkixRoots)
|
||||
if err != nil {
|
||||
log.Fatalf("dial: %v", err)
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
log.Printf("connected, conn %v, verified record %s", conn, verifiedRecord)
|
||||
}
|
345
develop.txt
Normal file
345
develop.txt
Normal file
@ -0,0 +1,345 @@
|
||||
This file has notes useful for mox developers.
|
||||
|
||||
# Building & testing
|
||||
|
||||
For a full build, you'll need a recent Go compiler/toolchain and nodejs/npm for
|
||||
the frontend. Run "make build" to do a full build. Run "make test" to run the
|
||||
test suite. With docker installed, you can run "make test-integration" to start
|
||||
up a few mox instances, a dns server, a postfix instance, and send email
|
||||
between them.
|
||||
|
||||
The mox localserve command is a convenient way to test locally. Most of the
|
||||
code paths are reachable/testable with mox localserve, but some use cases will
|
||||
require a full setup.
|
||||
|
||||
Before committing, run at least "make fmt" and "make check" (which requires
|
||||
staticcheck and ineffassign, run "make install-staticcheck install-ineffassign"
|
||||
once). Also run "make check-shadow" and fix any shadowed variables other than
|
||||
"err" (which are filtered out, but causes the command to always exit with an
|
||||
error code; run "make install-shadow" once to install the shadow command). If
|
||||
you've updated RFC references, run "make" in rfc/, it verifies the referenced
|
||||
files exist.
|
||||
|
||||
When making changes to the public API of a package listed in
|
||||
apidiff/packages.txt, run "make genapidiff" to update the list of changes in
|
||||
the upcoming release (run "make install-apidiff" once to install the apidiff
|
||||
command).
|
||||
|
||||
New features may be worth mentioning on the website, see website/ and
|
||||
instructions below.
|
||||
|
||||
|
||||
# Code style, guidelines, notes
|
||||
|
||||
- Keep the same style as existing code.
|
||||
- For Windows: use package "path/filepath" when dealing with files/directories.
|
||||
Test code can pass forward-slashed paths directly to standard library functions,
|
||||
but use proper filepath functions when parameters are passed and in non-test
|
||||
code. Mailbox names always use forward slash, so use package "path" for mailbox
|
||||
name/path manipulation. Do not remove/rename files that are still open.
|
||||
- Not all code uses adns, the DNSSEC-aware resolver. Such as code that makes
|
||||
http requests, like mtasts and autotls/autocert.
|
||||
- We don't have an internal/ directory, really just to prevent long paths in
|
||||
the repo, and to keep all Go code matching *.go */*.go (without matching
|
||||
vendor/). Part of the packages are reusable by other software. Those reusable
|
||||
packages must not cause mox implementation details (such as bstore) to get out,
|
||||
which would cause unexpected dependencies. Those packages also only expose the
|
||||
standard slog package for logging, not our mlog package. Packages not intended
|
||||
for reuse do use mlog as it is more convenient. Internally, we always use
|
||||
mlog.Log to do the logging, wrapping an slog.Logger.
|
||||
- The code uses panic for error handling in quite a few places, including
|
||||
smtpserver, imapserver and web API calls. Functions/methods, variables, struct
|
||||
fields and types that begin with an "x" indicate they can panic on errors. Both
|
||||
for i/o errors that are fatal for a connection, and also often for user-induced
|
||||
errors, for example bad IMAP commands or invalid web API requests. These panics
|
||||
are caught again at the top of a command or top of the connection. Write code
|
||||
that is panic-safe, using defer to clean up and release resources.
|
||||
- Try to check all errors, at the minimum using mlog.Log.Check() to log an error
|
||||
at the appropriate level. Also when just closing a file. Log messages sometimes
|
||||
unexpectedly point out latent issues. Only when there is no point in logging,
|
||||
for example when previous writes to stderr failed, can error logging be skipped.
|
||||
Test code is less strict about checking errors.
|
||||
|
||||
|
||||
# Reusable packages
|
||||
|
||||
Most non-server Go packages are meant to be reusable. This means internal
|
||||
details are not exposed in the API, and we don't make unneeded changes. We can
|
||||
still make breaking changes when it improves mox: We don't want to be stuck
|
||||
with bad API. Third party users aren't affected too seriously due to Go's
|
||||
minimal version selection. The reusable packages are in apidiff/packages.txt.
|
||||
We generate the incompatible changes with each release.
|
||||
|
||||
|
||||
# Web interfaces/frontend
|
||||
|
||||
The web interface frontends (for webmail/, webadmin/ and webaccount/) are
|
||||
written in strict TypeScript. The web API is a simple self-documenting
|
||||
HTTP/JSON RPC API mechanism called sherpa,
|
||||
https://www.ueber.net/who/mjl/sherpa/. The web API exposes types and functions
|
||||
as implemented in Go, using https://github.com/mjl-/sherpa. API definitions in
|
||||
JSON form are generated with https://github.com/mjl-/sherpadoc. Those API
|
||||
definitions are used to generate TypeScript clients with by
|
||||
https://github.com/mjl-/sherpats/.
|
||||
|
||||
The JavaScript that is generated from the TypeScript is included in the
|
||||
repository. This makes it available for inclusion in the binary, which is
|
||||
practical for users, and desirable given Go's reproducible builds. When
|
||||
developing, run "make" to also build the frontend code. Run "make
|
||||
install-frontend" once to install the TypeScript compiler into ./node_modules/.
|
||||
|
||||
There are no other external (runtime or devtime) frontend dependencies. A
|
||||
light-weight abstraction over the DOM is provided by ./lib.ts. A bit more
|
||||
manual UI state management must be done compared to "frameworks", but it is
|
||||
little code, and this allows JavaScript/TypeScript developer to quickly get
|
||||
started. UI state is often encapsulated in a JavaScript object with a
|
||||
TypeScript interface exposing a "root" HTMLElement that is added to the DOM,
|
||||
and functions for accessing/changing the internal state, keeping the UI
|
||||
managable.
|
||||
|
||||
|
||||
# Website
|
||||
|
||||
The content of the public website at https://www.xmox.nl is in website/, as
|
||||
markdown files. The website HTML is generated with "make genwebsite", which
|
||||
writes to website/html/ (files not committed). The FAQ is taken from
|
||||
README.md, the protocol support table is generated from rfc/index.txt. The
|
||||
website is kept in this repository so a commit can change both the
|
||||
implementation and the documentation on the website. Some of the info in
|
||||
README.md is duplicated on the website, often more elaborate and possibly with
|
||||
a slightly less technical audience. The website should also mostly be readable
|
||||
through the markdown in the git repo.
|
||||
|
||||
Large files (images/videos) are in https://github.com/mjl-/mox-website-files to
|
||||
keep the repository reasonably sized.
|
||||
|
||||
The public website may serve the content from the "website" branch. After a
|
||||
release, the main branch (with latest development code and corresponding
|
||||
changes to the website about new features) is merged into the website branch.
|
||||
Commits to the website branch (e.g. for a news item, or any other change
|
||||
unrelated to a new release) is merged back into the main branch.
|
||||
|
||||
|
||||
# TLS certificates
|
||||
|
||||
https://github.com/cloudflare/cfssl is useful for testing with TLS
|
||||
certificates. Create a CA and configure it in mox.conf TLS.CA.CertFiles, and
|
||||
sign host certificates and configure them in the listeners TLS.KeyCerts.
|
||||
|
||||
Setup a local CA with cfssl, run once:
|
||||
|
||||
```sh
|
||||
go install github.com/cloudflare/cfssl/cmd/cfssl@latest
|
||||
go install github.com/cloudflare/cfssl/cmd/cfssljson@latest
|
||||
|
||||
mkdir -p local/cfssl
|
||||
cd local/cfssl
|
||||
|
||||
cfssl print-defaults config > ca-config.json # defaults are fine
|
||||
|
||||
# Based on: cfssl print-defaults csr > ca-csr.json
|
||||
cat <<EOF >ca-csr.json
|
||||
{
|
||||
"CN": "mox ca",
|
||||
"key": {
|
||||
"algo": "ecdsa",
|
||||
"size": 256
|
||||
},
|
||||
"names": [
|
||||
{
|
||||
"C": "NL"
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
|
||||
cfssl gencert -initca ca-csr.json | cfssljson -bare ca - # Generate ca key and cert.
|
||||
|
||||
# Generate wildcard certificates for one or more domains, add localhost for use with pebble, see below.
|
||||
domains="moxtest.example localhost"
|
||||
for domain in $domains; do
|
||||
cat <<EOF >wildcard.$domain.csr.json
|
||||
{
|
||||
"key": {
|
||||
"algo": "ecdsa",
|
||||
"size": 256
|
||||
},
|
||||
"names": [
|
||||
{
|
||||
"O": "mox"
|
||||
}
|
||||
],
|
||||
"hosts": [
|
||||
"$domain",
|
||||
"*.$domain"
|
||||
]
|
||||
}
|
||||
EOF
|
||||
cfssl gencert -ca ca.pem -ca-key ca-key.pem -profile=www wildcard.$domain.csr.json | cfssljson -bare wildcard.$domain
|
||||
done
|
||||
```
|
||||
|
||||
Now configure mox.conf to add the cfssl CA root certificate:
|
||||
|
||||
```
|
||||
TLS:
|
||||
CA:
|
||||
AdditionalToSystem: true
|
||||
CertFiles:
|
||||
# Assuming local/<env>/config/mox.conf and local/cfssl/.
|
||||
- ../../cfssl/ca.pem
|
||||
|
||||
[...]
|
||||
|
||||
Listeners:
|
||||
public:
|
||||
TLS:
|
||||
KeyCerts:
|
||||
# Assuming local/<env>/config/mox.conf and local/cfssl/.
|
||||
CertFile: ../../cfssl/wildcard.$domain.pem
|
||||
KeyFile: ../../cfssl/wildcard.$domain-key.pem
|
||||
```
|
||||
|
||||
|
||||
# ACME
|
||||
|
||||
https://github.com/letsencrypt/pebble is useful for testing with ACME. Start a
|
||||
pebble instance that uses the localhost TLS cert/key created by cfssl for its
|
||||
TLS serving. Pebble generates a new CA certificate for its own use each time it
|
||||
is started. Fetch it from https://localhost:15000/roots/0, write it to a file, and
|
||||
add it to mox.conf TLS.CA.CertFiles. See below.
|
||||
|
||||
Setup pebble, run once:
|
||||
|
||||
```sh
|
||||
go install github.com/letsencrypt/pebble/cmd/pebble@latest
|
||||
|
||||
mkdir -p local/pebble
|
||||
cat <<EOF >local/pebble/config.json
|
||||
{
|
||||
"pebble": {
|
||||
"listenAddress": "localhost:14000",
|
||||
"managementListenAddress": "localhost:15000",
|
||||
"certificate": "local/cfssl/localhost.pem",
|
||||
"privateKey": "local/cfssl/localhost-key.pem",
|
||||
"httpPort": 80,
|
||||
"tlsPort": 443,
|
||||
"ocspResponderURL": "",
|
||||
"externalAccountBindingRequired": false
|
||||
}
|
||||
}
|
||||
EOF
|
||||
```
|
||||
|
||||
Start pebble, this generates a new temporary pebble CA certificate:
|
||||
|
||||
```sh
|
||||
pebble -config local/pebble/config.json
|
||||
```
|
||||
|
||||
Write new CA bundle that includes pebble's temporary CA cert:
|
||||
|
||||
```sh
|
||||
export CURL_CA_BUNDLE=local/ca-bundle.pem # for curl
|
||||
export SSL_CERT_FILE=local/ca-bundle.pem # for go apps
|
||||
cat /etc/ssl/certs/ca-certificates.crt local/cfssl/ca.pem >local/ca-bundle.pem
|
||||
curl https://localhost:15000/roots/0 >local/pebble/ca.pem # fetch temp pebble ca, DO THIS EVERY TIME PEBBLE IS RESTARTED!
|
||||
cat /etc/ssl/certs/ca-certificates.crt local/cfssl/ca.pem local/pebble/ca.pem >local/ca-bundle.pem # create new list that includes cfssl ca and temp pebble ca.
|
||||
rm -r local/*/data/acme/keycerts/pebble # remove existing pebble-signed certs in acme cert/key cache, they are invalid due to newly generated temp pebble ca.
|
||||
```
|
||||
|
||||
Edit mox.conf, adding pebble ACME and its ca.pem:
|
||||
|
||||
```
|
||||
ACME:
|
||||
pebble:
|
||||
DirectoryURL: https://localhost:14000/dir
|
||||
ContactEmail: root@mox.example
|
||||
TLS:
|
||||
CA:
|
||||
AdditionalToSystem: true
|
||||
CertFiles:
|
||||
# Assuming local/<env>/config/mox.conf and local/pebble/ca.pem and local/cfssl/ca.pem.
|
||||
- ../../pebble/ca.pem
|
||||
- ../../cfssl/ca.pem
|
||||
|
||||
[...]
|
||||
|
||||
Listeners:
|
||||
public:
|
||||
TLS:
|
||||
ACME: pebble
|
||||
```
|
||||
|
||||
For mail clients and browsers to accept pebble-signed certificates, you must add
|
||||
the temporary pebble CA cert to their trusted root CA store each time pebble is
|
||||
started (e.g. to your thunderbird/firefox testing profile). Pebble has no option
|
||||
to not regenerate its CA certificate, presumably for fear of people using it for
|
||||
non-testing purposes. Unfortunately, this also makes it inconvenient to use for
|
||||
testing purposes.
|
||||
|
||||
|
||||
# Messages for testing
|
||||
|
||||
For compatibility and preformance testing, it helps to have many messages,
|
||||
created a long time ago and recently, by different mail user agents. A helpful
|
||||
source is the Linux kernel mailing list. Archives are available as multiple git
|
||||
repositories (split due to size) at
|
||||
https://lore.kernel.org/lkml/_/text/mirror/. The git repo's can be converted
|
||||
to compressed mbox files (about 800MB each) with:
|
||||
|
||||
```
|
||||
# 0 is the first epoch (with over half a million messages), 12 is last
|
||||
# already-complete epoch at the time of writing (with a quarter million
|
||||
# messages). The archives are large, converting will take some time.
|
||||
for i in 0 12; do
|
||||
git clone --mirror http://lore.kernel.org/lkml/$i lkml-$i.git
|
||||
(cd lkml-$i.git && time ./tombox.sh | gzip >../lkml-$i.mbox.gz)
|
||||
done
|
||||
```
|
||||
|
||||
With the following "tombox.sh" script:
|
||||
|
||||
```
|
||||
#!/bin/sh
|
||||
pre=''
|
||||
for rev in $(git rev-list master | reverse); do
|
||||
printf "$pre"
|
||||
echo "From sender@host $(date '+%a %b %e %H:%M:%S %Y' -d @$(git show -s --format=%ct $rev))"
|
||||
git show ${rev}:m | sed 's/^>*From />&/'
|
||||
pre='\n'
|
||||
done
|
||||
```
|
||||
|
||||
|
||||
# Release proces
|
||||
|
||||
- Gather feedback on recent changes.
|
||||
- Check if dependencies need updates.
|
||||
- Update to latest publicsuffix/ list.
|
||||
- Check code if there are deprecated features that can be removed.
|
||||
- Generate apidiff and check if breaking changes can be prevented. Update moxtools.
|
||||
- Update features & roadmap in README.md and website.
|
||||
- Write release notes, copy from previous.
|
||||
- Build and run tests with previous major Go release, run "make docker-release" to test building images.
|
||||
- Run tests, including with race detector, also with TZ= for UTC-behaviour, and with -count 2.
|
||||
- Run integration and upgrade tests.
|
||||
- Run fuzzing tests for a while.
|
||||
- Deploy to test environment. Test the update instructions.
|
||||
- Test mox localserve on various OSes (linux, bsd, macos, windows).
|
||||
- Send and receive email through the major webmail providers, check headers.
|
||||
- Send and receive email with imap4/smtp clients.
|
||||
- Check DNS check admin page.
|
||||
- Check with https://internet.nl.
|
||||
- Move apidiff/next.txt to apidiff/<version>.txt, and create empty next.txt.
|
||||
- Add release to the Latest release & News sections of website/index.md.
|
||||
- Create git tag (note: "#" is comment, not title/header), push code.
|
||||
- Build and publish new docker image.
|
||||
- Deploy update to website.
|
||||
- Create new release on the github page, so watchers get a notification.
|
||||
Copy/paste it manually from the tag text, and add link to download/compile
|
||||
instructions to prevent confusion about "assets" github links to.
|
||||
- Publish new cross-referenced code/rfc to www.xmox.nl/xr/.
|
||||
- Update moxtools with latest version.
|
||||
- Update implementations support matrix.
|
||||
- Publish signed release notes for updates.xmox.nl and update DNS record.
|
178
dkim/dkim.go
178
dkim/dkim.go
@ -21,43 +21,25 @@ import (
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"log/slog"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
|
||||
"github.com/mjl-/mox/config"
|
||||
"github.com/mjl-/mox/dns"
|
||||
"github.com/mjl-/mox/mlog"
|
||||
"github.com/mjl-/mox/moxio"
|
||||
"github.com/mjl-/mox/publicsuffix"
|
||||
"github.com/mjl-/mox/smtp"
|
||||
"github.com/mjl-/mox/stub"
|
||||
"slices"
|
||||
)
|
||||
|
||||
var xlog = mlog.New("dkim")
|
||||
// If set, signatures for top-level domain "localhost" are accepted.
|
||||
var Localserve bool
|
||||
|
||||
var (
|
||||
metricDKIMSign = promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "mox_dkim_sign_total",
|
||||
Help: "DKIM messages signings.",
|
||||
},
|
||||
[]string{
|
||||
"key",
|
||||
},
|
||||
)
|
||||
metricDKIMVerify = promauto.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "mox_dkim_verify_duration_seconds",
|
||||
Help: "DKIM verify, including lookup, duration and result.",
|
||||
Buckets: []float64{0.001, 0.005, 0.01, 0.05, 0.100, 0.5, 1, 5, 10, 20},
|
||||
},
|
||||
[]string{
|
||||
"algorithm",
|
||||
"status",
|
||||
},
|
||||
)
|
||||
MetricSign stub.CounterVec = stub.CounterVecIgnore{}
|
||||
MetricVerify stub.HistogramVec = stub.HistogramVecIgnore{}
|
||||
)
|
||||
|
||||
var timeNow = time.Now // Replaced during tests.
|
||||
@ -113,20 +95,45 @@ var (
|
||||
// To decide what to do with a message, both the signature parameters and the DNS
|
||||
// TXT record have to be consulted.
|
||||
type Result struct {
|
||||
Status Status
|
||||
Sig *Sig // Parsed form of DKIM-Signature header. Can be nil for invalid DKIM-Signature header.
|
||||
Record *Record // Parsed form of DKIM DNS record for selector and domain in Sig. Optional.
|
||||
Err error // If Status is not StatusPass, this error holds the details and can be checked using errors.Is.
|
||||
Status Status
|
||||
Sig *Sig // Parsed form of DKIM-Signature header. Can be nil for invalid DKIM-Signature header.
|
||||
Record *Record // Parsed form of DKIM DNS record for selector and domain in Sig. Optional.
|
||||
RecordAuthentic bool // Whether DKIM DNS record was DNSSEC-protected. Only valid if Sig is non-nil.
|
||||
Err error // If Status is not StatusPass, this error holds the details and can be checked using errors.Is.
|
||||
}
|
||||
|
||||
// todo: use some io.Writer to hash the body and the header.
|
||||
|
||||
// Selector holds selectors and key material to generate DKIM signatures.
|
||||
type Selector struct {
|
||||
Hash string // "sha256" or the older "sha1".
|
||||
HeaderRelaxed bool // If the header is canonicalized in relaxed instead of simple mode.
|
||||
BodyRelaxed bool // If the body is canonicalized in relaxed instead of simple mode.
|
||||
Headers []string // Headers to include in signature.
|
||||
|
||||
// Whether to "oversign" headers, ensuring additional/new values of existing
|
||||
// headers cannot be added.
|
||||
SealHeaders bool
|
||||
|
||||
// If > 0, period a signature is valid after signing, as duration, e.g. 72h. The
|
||||
// period should be enough for delivery at the final destination, potentially with
|
||||
// several hops/relays. In the order of days at least.
|
||||
Expiration time.Duration
|
||||
|
||||
PrivateKey crypto.Signer // Either an *rsa.PrivateKey or ed25519.PrivateKey.
|
||||
Domain dns.Domain // Of selector only, not FQDN.
|
||||
}
|
||||
|
||||
// Sign returns line(s) with DKIM-Signature headers, generated according to the configuration.
|
||||
func Sign(ctx context.Context, localpart smtp.Localpart, domain dns.Domain, c config.DKIM, smtputf8 bool, msg io.ReaderAt) (headers string, rerr error) {
|
||||
log := xlog.WithContext(ctx)
|
||||
func Sign(ctx context.Context, elog *slog.Logger, localpart smtp.Localpart, domain dns.Domain, selectors []Selector, smtputf8 bool, msg io.ReaderAt) (headers string, rerr error) {
|
||||
log := mlog.New("dkim", elog)
|
||||
start := timeNow()
|
||||
defer func() {
|
||||
log.Debugx("dkim sign result", rerr, mlog.Field("localpart", localpart), mlog.Field("domain", domain), mlog.Field("smtputf8", smtputf8), mlog.Field("duration", time.Since(start)))
|
||||
log.Debugx("dkim sign result", rerr,
|
||||
slog.Any("localpart", localpart),
|
||||
slog.Any("domain", domain),
|
||||
slog.Bool("smtputf8", smtputf8),
|
||||
slog.Duration("duration", time.Since(start)))
|
||||
}()
|
||||
|
||||
hdrs, bodyOffset, err := parseHeaders(bufio.NewReader(&moxio.AtReader{R: msg}))
|
||||
@ -150,26 +157,25 @@ func Sign(ctx context.Context, localpart smtp.Localpart, domain dns.Domain, c co
|
||||
|
||||
var bodyHashes = map[hashKey][]byte{}
|
||||
|
||||
for _, sign := range c.Sign {
|
||||
sel := c.Selectors[sign]
|
||||
for _, sel := range selectors {
|
||||
sig := newSigWithDefaults()
|
||||
sig.Version = 1
|
||||
switch sel.Key.(type) {
|
||||
switch sel.PrivateKey.(type) {
|
||||
case *rsa.PrivateKey:
|
||||
sig.AlgorithmSign = "rsa"
|
||||
metricDKIMSign.WithLabelValues("rsa").Inc()
|
||||
MetricSign.IncLabels("rsa")
|
||||
case ed25519.PrivateKey:
|
||||
sig.AlgorithmSign = "ed25519"
|
||||
metricDKIMSign.WithLabelValues("ed25519").Inc()
|
||||
MetricSign.IncLabels("ed25519")
|
||||
default:
|
||||
return "", fmt.Errorf("internal error, unknown pivate key %T", sel.Key)
|
||||
return "", fmt.Errorf("internal error, unknown pivate key %T", sel.PrivateKey)
|
||||
}
|
||||
sig.AlgorithmHash = sel.HashEffective
|
||||
sig.AlgorithmHash = sel.Hash
|
||||
sig.Domain = domain
|
||||
sig.Selector = sel.Domain
|
||||
sig.Identity = &Identity{&localpart, domain}
|
||||
sig.SignedHeaders = append([]string{}, sel.HeadersEffective...)
|
||||
if !sel.DontSealHeaders {
|
||||
sig.SignedHeaders = slices.Clone(sel.Headers)
|
||||
if sel.SealHeaders {
|
||||
// ../rfc/6376:2156
|
||||
// Each time a header name is added to the signature, the next unused value is
|
||||
// signed (in reverse order as they occur in the message). So we can add each
|
||||
@ -179,23 +185,23 @@ func Sign(ctx context.Context, localpart smtp.Localpart, domain dns.Domain, c co
|
||||
for _, h := range hdrs {
|
||||
counts[h.lkey]++
|
||||
}
|
||||
for _, h := range sel.HeadersEffective {
|
||||
for _, h := range sel.Headers {
|
||||
for j := counts[strings.ToLower(h)]; j > 0; j-- {
|
||||
sig.SignedHeaders = append(sig.SignedHeaders, h)
|
||||
}
|
||||
}
|
||||
}
|
||||
sig.SignTime = timeNow().Unix()
|
||||
if sel.ExpirationSeconds > 0 {
|
||||
sig.ExpireTime = sig.SignTime + int64(sel.ExpirationSeconds)
|
||||
if sel.Expiration > 0 {
|
||||
sig.ExpireTime = sig.SignTime + int64(sel.Expiration/time.Second)
|
||||
}
|
||||
|
||||
sig.Canonicalization = "simple"
|
||||
if sel.Canonicalization.HeaderRelaxed {
|
||||
if sel.HeaderRelaxed {
|
||||
sig.Canonicalization = "relaxed"
|
||||
}
|
||||
sig.Canonicalization += "/"
|
||||
if sel.Canonicalization.BodyRelaxed {
|
||||
if sel.BodyRelaxed {
|
||||
sig.Canonicalization += "relaxed"
|
||||
} else {
|
||||
sig.Canonicalization += "simple"
|
||||
@ -212,12 +218,12 @@ func Sign(ctx context.Context, localpart smtp.Localpart, domain dns.Domain, c co
|
||||
// DKIM-Signature header.
|
||||
// ../rfc/6376:1700
|
||||
|
||||
hk := hashKey{!sel.Canonicalization.BodyRelaxed, strings.ToLower(sig.AlgorithmHash)}
|
||||
hk := hashKey{!sel.BodyRelaxed, strings.ToLower(sig.AlgorithmHash)}
|
||||
if bh, ok := bodyHashes[hk]; ok {
|
||||
sig.BodyHash = bh
|
||||
} else {
|
||||
br := bufio.NewReader(&moxio.AtReader{R: msg, Offset: int64(bodyOffset)})
|
||||
bh, err = bodyHash(h.New(), !sel.Canonicalization.BodyRelaxed, br)
|
||||
bh, err = bodyHash(h.New(), !sel.BodyRelaxed, br)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@ -231,12 +237,12 @@ func Sign(ctx context.Context, localpart smtp.Localpart, domain dns.Domain, c co
|
||||
}
|
||||
verifySig := []byte(strings.TrimSuffix(sigh, "\r\n"))
|
||||
|
||||
dh, err := dataHash(h.New(), !sel.Canonicalization.HeaderRelaxed, sig, hdrs, verifySig)
|
||||
dh, err := dataHash(h.New(), !sel.HeaderRelaxed, sig, hdrs, verifySig)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
switch key := sel.Key.(type) {
|
||||
switch key := sel.PrivateKey.(type) {
|
||||
case *rsa.PrivateKey:
|
||||
sig.Signature, err = key.Sign(cryptorand.Reader, dh, h)
|
||||
if err != nil {
|
||||
@ -267,22 +273,29 @@ func Sign(ctx context.Context, localpart smtp.Localpart, domain dns.Domain, c co
|
||||
//
|
||||
// A requested record is <selector>._domainkey.<domain>. Exactly one valid DKIM
|
||||
// record should be present.
|
||||
func Lookup(ctx context.Context, resolver dns.Resolver, selector, domain dns.Domain) (rstatus Status, rrecord *Record, rtxt string, rerr error) {
|
||||
log := xlog.WithContext(ctx)
|
||||
//
|
||||
// authentic indicates if DNS results were DNSSEC-verified.
|
||||
func Lookup(ctx context.Context, elog *slog.Logger, resolver dns.Resolver, selector, domain dns.Domain) (rstatus Status, rrecord *Record, rtxt string, authentic bool, rerr error) {
|
||||
log := mlog.New("dkim", elog)
|
||||
start := timeNow()
|
||||
defer func() {
|
||||
log.Debugx("dkim lookup result", rerr, mlog.Field("selector", selector), mlog.Field("domain", domain), mlog.Field("status", rstatus), mlog.Field("record", rrecord), mlog.Field("duration", time.Since(start)))
|
||||
log.Debugx("dkim lookup result", rerr,
|
||||
slog.Any("selector", selector),
|
||||
slog.Any("domain", domain),
|
||||
slog.Any("status", rstatus),
|
||||
slog.Any("record", rrecord),
|
||||
slog.Duration("duration", time.Since(start)))
|
||||
}()
|
||||
|
||||
name := selector.ASCII + "._domainkey." + domain.ASCII + "."
|
||||
records, err := dns.WithPackage(resolver, "dkim").LookupTXT(ctx, name)
|
||||
records, lookupResult, err := dns.WithPackage(resolver, "dkim").LookupTXT(ctx, name)
|
||||
if dns.IsNotFound(err) {
|
||||
// ../rfc/6376:2608
|
||||
// We must return StatusPermerror. We may want to return StatusTemperror because in
|
||||
// practice someone will start using a new key before DNS changes have propagated.
|
||||
return StatusPermerror, nil, "", fmt.Errorf("%w: dns name %q", ErrNoRecord, name)
|
||||
return StatusPermerror, nil, "", lookupResult.Authentic, fmt.Errorf("%w: dns name %q", ErrNoRecord, name)
|
||||
} else if err != nil {
|
||||
return StatusTemperror, nil, "", fmt.Errorf("%w: dns name %q: %s", ErrDNS, name, err)
|
||||
return StatusTemperror, nil, "", lookupResult.Authentic, fmt.Errorf("%w: dns name %q: %s", ErrDNS, name, err)
|
||||
}
|
||||
|
||||
// ../rfc/6376:2612
|
||||
@ -298,7 +311,7 @@ func Lookup(ctx context.Context, resolver dns.Resolver, selector, domain dns.Dom
|
||||
var isdkim bool
|
||||
r, isdkim, err = ParseRecord(s)
|
||||
if err != nil && isdkim {
|
||||
return StatusPermerror, nil, txt, fmt.Errorf("%w: %s", ErrSyntax, err)
|
||||
return StatusPermerror, nil, txt, lookupResult.Authentic, fmt.Errorf("%w: %s", ErrSyntax, err)
|
||||
} else if err != nil {
|
||||
// Hopefully the remote MTA admin discovers the configuration error and fix it for
|
||||
// an upcoming delivery attempt, in case we rejected with temporary status.
|
||||
@ -310,7 +323,7 @@ func Lookup(ctx context.Context, resolver dns.Resolver, selector, domain dns.Dom
|
||||
// ../rfc/6376:1609
|
||||
// ../rfc/6376:2584
|
||||
if record != nil {
|
||||
return StatusTemperror, nil, "", fmt.Errorf("%w: dns name %q", ErrMultipleRecords, name)
|
||||
return StatusTemperror, nil, "", lookupResult.Authentic, fmt.Errorf("%w: dns name %q", ErrMultipleRecords, name)
|
||||
}
|
||||
record = r
|
||||
txt = s
|
||||
@ -318,9 +331,9 @@ func Lookup(ctx context.Context, resolver dns.Resolver, selector, domain dns.Dom
|
||||
}
|
||||
|
||||
if record == nil {
|
||||
return status, nil, "", err
|
||||
return status, nil, "", lookupResult.Authentic, err
|
||||
}
|
||||
return StatusNeutral, record, txt, nil
|
||||
return StatusNeutral, record, txt, lookupResult.Authentic, nil
|
||||
}
|
||||
|
||||
// Verify parses the DKIM-Signature headers in a message and verifies each of them.
|
||||
@ -335,8 +348,8 @@ func Lookup(ctx context.Context, resolver dns.Resolver, selector, domain dns.Dom
|
||||
// verification failure is treated as actual failure. With ignoreTestMode
|
||||
// false, such verification failures are treated as if there is no signature by
|
||||
// returning StatusNone.
|
||||
func Verify(ctx context.Context, resolver dns.Resolver, smtputf8 bool, policy func(*Sig) error, r io.ReaderAt, ignoreTestMode bool) (results []Result, rerr error) {
|
||||
log := xlog.WithContext(ctx)
|
||||
func Verify(ctx context.Context, elog *slog.Logger, resolver dns.Resolver, smtputf8 bool, policy func(*Sig) error, r io.ReaderAt, ignoreTestMode bool) (results []Result, rerr error) {
|
||||
log := mlog.New("dkim", elog)
|
||||
start := timeNow()
|
||||
defer func() {
|
||||
duration := float64(time.Since(start)) / float64(time.Second)
|
||||
@ -346,14 +359,19 @@ func Verify(ctx context.Context, resolver dns.Resolver, smtputf8 bool, policy fu
|
||||
alg = r.Sig.Algorithm()
|
||||
}
|
||||
status := string(r.Status)
|
||||
metricDKIMVerify.WithLabelValues(alg, status).Observe(duration)
|
||||
MetricVerify.ObserveLabels(duration, alg, status)
|
||||
}
|
||||
|
||||
if len(results) == 0 {
|
||||
log.Debugx("dkim verify result", rerr, mlog.Field("smtputf8", smtputf8), mlog.Field("duration", time.Since(start)))
|
||||
log.Debugx("dkim verify result", rerr, slog.Bool("smtputf8", smtputf8), slog.Duration("duration", time.Since(start)))
|
||||
}
|
||||
for _, result := range results {
|
||||
log.Debugx("dkim verify result", result.Err, mlog.Field("smtputf8", smtputf8), mlog.Field("status", result.Status), mlog.Field("sig", result.Sig), mlog.Field("record", result.Record), mlog.Field("duration", time.Since(start)))
|
||||
log.Debugx("dkim verify result", result.Err,
|
||||
slog.Bool("smtputf8", smtputf8),
|
||||
slog.Any("status", result.Status),
|
||||
slog.Any("sig", result.Sig),
|
||||
slog.Any("record", result.Record),
|
||||
slog.Duration("duration", time.Since(start)))
|
||||
}
|
||||
}()
|
||||
|
||||
@ -373,33 +391,33 @@ func Verify(ctx context.Context, resolver dns.Resolver, smtputf8 bool, policy fu
|
||||
if err != nil {
|
||||
// ../rfc/6376:2503
|
||||
err := fmt.Errorf("parsing DKIM-Signature header: %w", err)
|
||||
results = append(results, Result{StatusPermerror, nil, nil, err})
|
||||
results = append(results, Result{StatusPermerror, nil, nil, false, err})
|
||||
continue
|
||||
}
|
||||
|
||||
h, canonHeaderSimple, canonDataSimple, err := checkSignatureParams(ctx, sig)
|
||||
h, canonHeaderSimple, canonDataSimple, err := checkSignatureParams(ctx, log, sig)
|
||||
if err != nil {
|
||||
results = append(results, Result{StatusPermerror, nil, nil, err})
|
||||
results = append(results, Result{StatusPermerror, sig, nil, false, err})
|
||||
continue
|
||||
}
|
||||
|
||||
// ../rfc/6376:2560
|
||||
if err := policy(sig); err != nil {
|
||||
err := fmt.Errorf("%w: %s", ErrPolicy, err)
|
||||
results = append(results, Result{StatusPolicy, nil, nil, err})
|
||||
results = append(results, Result{StatusPolicy, sig, nil, false, err})
|
||||
continue
|
||||
}
|
||||
|
||||
br := bufio.NewReader(&moxio.AtReader{R: r, Offset: int64(bodyOffset)})
|
||||
status, txt, err := verifySignature(ctx, resolver, sig, h, canonHeaderSimple, canonDataSimple, hdrs, verifySig, br, ignoreTestMode)
|
||||
results = append(results, Result{status, sig, txt, err})
|
||||
status, txt, authentic, err := verifySignature(ctx, log.Logger, resolver, sig, h, canonHeaderSimple, canonDataSimple, hdrs, verifySig, br, ignoreTestMode)
|
||||
results = append(results, Result{status, sig, txt, authentic, err})
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// check if signature is acceptable.
|
||||
// Only looks at the signature parameters, not at the DNS record.
|
||||
func checkSignatureParams(ctx context.Context, sig *Sig) (hash crypto.Hash, canonHeaderSimple, canonBodySimple bool, rerr error) {
|
||||
func checkSignatureParams(ctx context.Context, log mlog.Log, sig *Sig) (hash crypto.Hash, canonHeaderSimple, canonBodySimple bool, rerr error) {
|
||||
// "From" header is required, ../rfc/6376:2122 ../rfc/6376:2546
|
||||
var from bool
|
||||
for _, h := range sig.SignedHeaders {
|
||||
@ -428,7 +446,7 @@ func checkSignatureParams(ctx context.Context, sig *Sig) (hash crypto.Hash, cano
|
||||
if subdom.Unicode != "" {
|
||||
subdom.Unicode = "x." + subdom.Unicode
|
||||
}
|
||||
if orgDom := publicsuffix.Lookup(ctx, subdom); subdom.ASCII == orgDom.ASCII {
|
||||
if orgDom := publicsuffix.Lookup(ctx, log.Logger, subdom); subdom.ASCII == orgDom.ASCII && !(Localserve && sig.Domain.ASCII == "localhost") {
|
||||
return 0, false, false, fmt.Errorf("%w: %s", ErrTLD, sig.Domain)
|
||||
}
|
||||
|
||||
@ -477,15 +495,15 @@ func checkSignatureParams(ctx context.Context, sig *Sig) (hash crypto.Hash, cano
|
||||
}
|
||||
|
||||
// lookup the public key in the DNS and verify the signature.
|
||||
func verifySignature(ctx context.Context, resolver dns.Resolver, sig *Sig, hash crypto.Hash, canonHeaderSimple, canonDataSimple bool, hdrs []header, verifySig []byte, body *bufio.Reader, ignoreTestMode bool) (Status, *Record, error) {
|
||||
func verifySignature(ctx context.Context, elog *slog.Logger, resolver dns.Resolver, sig *Sig, hash crypto.Hash, canonHeaderSimple, canonDataSimple bool, hdrs []header, verifySig []byte, body *bufio.Reader, ignoreTestMode bool) (Status, *Record, bool, error) {
|
||||
// ../rfc/6376:2604
|
||||
status, record, _, err := Lookup(ctx, resolver, sig.Selector, sig.Domain)
|
||||
status, record, _, authentic, err := Lookup(ctx, elog, resolver, sig.Selector, sig.Domain)
|
||||
if err != nil {
|
||||
// todo: for temporary errors, we could pass on information so caller returns a 4.7.5 ecode, ../rfc/6376:2777
|
||||
return status, nil, err
|
||||
return status, nil, authentic, err
|
||||
}
|
||||
status, err = verifySignatureRecord(record, sig, hash, canonHeaderSimple, canonDataSimple, hdrs, verifySig, body, ignoreTestMode)
|
||||
return status, record, err
|
||||
return status, record, authentic, err
|
||||
}
|
||||
|
||||
// verify a DKIM signature given the record from dns and signature from the email message.
|
||||
@ -531,7 +549,7 @@ func verifySignatureRecord(r *Record, sig *Sig, hash crypto.Hash, canonHeaderSim
|
||||
if r.PublicKey == nil {
|
||||
return StatusPermerror, ErrKeyRevoked
|
||||
} else if rsaKey, ok := r.PublicKey.(*rsa.PublicKey); ok && rsaKey.N.BitLen() < 1024 {
|
||||
// todo: find a reference that supports this.
|
||||
// ../rfc/8301:157
|
||||
return StatusPermerror, ErrWeakKey
|
||||
}
|
||||
|
||||
@ -822,8 +840,8 @@ func parseHeaders(br *bufio.Reader) ([]header, int, error) {
|
||||
return nil, 0, fmt.Errorf("empty header key")
|
||||
}
|
||||
lkey = strings.ToLower(key)
|
||||
value = append([]byte{}, t[1]...)
|
||||
raw = append([]byte{}, line...)
|
||||
value = slices.Clone(t[1])
|
||||
raw = slices.Clone(line)
|
||||
}
|
||||
if key != "" {
|
||||
l = append(l, header{key, lkey, value, raw})
|
||||
|
@ -15,10 +15,12 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/mjl-/mox/config"
|
||||
"github.com/mjl-/mox/dns"
|
||||
"github.com/mjl-/mox/mlog"
|
||||
)
|
||||
|
||||
var pkglog = mlog.New("dkim", nil)
|
||||
|
||||
func policyOK(sig *Sig) error {
|
||||
return nil
|
||||
}
|
||||
@ -143,7 +145,7 @@ test
|
||||
},
|
||||
}
|
||||
|
||||
results, err := Verify(context.Background(), resolver, false, policyOK, strings.NewReader(message), false)
|
||||
results, err := Verify(context.Background(), pkglog.Logger, resolver, false, policyOK, strings.NewReader(message), false)
|
||||
if err != nil {
|
||||
t.Fatalf("dkim verify: %v", err)
|
||||
}
|
||||
@ -190,7 +192,7 @@ Joe.
|
||||
},
|
||||
}
|
||||
|
||||
results, err := Verify(context.Background(), resolver, false, policyOK, strings.NewReader(message), false)
|
||||
results, err := Verify(context.Background(), pkglog.Logger, resolver, false, policyOK, strings.NewReader(message), false)
|
||||
if err != nil {
|
||||
t.Fatalf("dkim verify: %v", err)
|
||||
}
|
||||
@ -219,50 +221,42 @@ test
|
||||
rsaKey := getRSAKey(t)
|
||||
ed25519Key := ed25519.NewKeyFromSeed(make([]byte, 32))
|
||||
|
||||
selrsa := config.Selector{
|
||||
HashEffective: "sha256",
|
||||
Key: rsaKey,
|
||||
HeadersEffective: strings.Split("From,To,Cc,Bcc,Reply-To,References,In-Reply-To,Subject,Date,Message-ID,Content-Type", ","),
|
||||
Domain: dns.Domain{ASCII: "testrsa"},
|
||||
selrsa := Selector{
|
||||
Hash: "sha256",
|
||||
PrivateKey: rsaKey,
|
||||
Headers: strings.Split("From,To,Cc,Bcc,Reply-To,References,In-Reply-To,Subject,Date,Message-ID,Content-Type", ","),
|
||||
Domain: dns.Domain{ASCII: "testrsa"},
|
||||
}
|
||||
|
||||
// Now with sha1 and relaxed canonicalization.
|
||||
selrsa2 := config.Selector{
|
||||
HashEffective: "sha1",
|
||||
Key: rsaKey,
|
||||
HeadersEffective: strings.Split("From,To,Cc,Bcc,Reply-To,References,In-Reply-To,Subject,Date,Message-ID,Content-Type", ","),
|
||||
Domain: dns.Domain{ASCII: "testrsa2"},
|
||||
selrsa2 := Selector{
|
||||
Hash: "sha1",
|
||||
PrivateKey: rsaKey,
|
||||
Headers: strings.Split("From,To,Cc,Bcc,Reply-To,References,In-Reply-To,Subject,Date,Message-ID,Content-Type", ","),
|
||||
Domain: dns.Domain{ASCII: "testrsa2"},
|
||||
}
|
||||
selrsa2.Canonicalization.HeaderRelaxed = true
|
||||
selrsa2.Canonicalization.BodyRelaxed = true
|
||||
selrsa2.HeaderRelaxed = true
|
||||
selrsa2.BodyRelaxed = true
|
||||
|
||||
// Ed25519 key.
|
||||
seled25519 := config.Selector{
|
||||
HashEffective: "sha256",
|
||||
Key: ed25519Key,
|
||||
HeadersEffective: strings.Split("From,To,Cc,Bcc,Reply-To,References,In-Reply-To,Subject,Date,Message-ID,Content-Type", ","),
|
||||
Domain: dns.Domain{ASCII: "tested25519"},
|
||||
seled25519 := Selector{
|
||||
Hash: "sha256",
|
||||
PrivateKey: ed25519Key,
|
||||
Headers: strings.Split("From,To,Cc,Bcc,Reply-To,References,In-Reply-To,Subject,Date,Message-ID,Content-Type", ","),
|
||||
Domain: dns.Domain{ASCII: "tested25519"},
|
||||
}
|
||||
// Again ed25519, but without sealing headers. Use sha256 again, for reusing the body hash from the previous dkim-signature.
|
||||
seled25519b := config.Selector{
|
||||
HashEffective: "sha256",
|
||||
Key: ed25519Key,
|
||||
HeadersEffective: strings.Split("From,To,Cc,Bcc,Reply-To,Subject,Date", ","),
|
||||
DontSealHeaders: true,
|
||||
Domain: dns.Domain{ASCII: "tested25519b"},
|
||||
}
|
||||
dkimConf := config.DKIM{
|
||||
Selectors: map[string]config.Selector{
|
||||
"testrsa": selrsa,
|
||||
"testrsa2": selrsa2,
|
||||
"tested25519": seled25519,
|
||||
"tested25519b": seled25519b,
|
||||
},
|
||||
Sign: []string{"testrsa", "testrsa2", "tested25519", "tested25519b"},
|
||||
seled25519b := Selector{
|
||||
Hash: "sha256",
|
||||
PrivateKey: ed25519Key,
|
||||
Headers: strings.Split("From,To,Cc,Bcc,Reply-To,Subject,Date", ","),
|
||||
SealHeaders: true,
|
||||
Domain: dns.Domain{ASCII: "tested25519b"},
|
||||
}
|
||||
selectors := []Selector{selrsa, selrsa2, seled25519, seled25519b}
|
||||
|
||||
ctx := context.Background()
|
||||
headers, err := Sign(ctx, "mjl", dns.Domain{ASCII: "mox.example"}, dkimConf, false, strings.NewReader(message))
|
||||
headers, err := Sign(ctx, pkglog.Logger, "mjl", dns.Domain{ASCII: "mox.example"}, selectors, false, strings.NewReader(message))
|
||||
if err != nil {
|
||||
t.Fatalf("sign: %v", err)
|
||||
}
|
||||
@ -293,7 +287,7 @@ test
|
||||
|
||||
nmsg := headers + message
|
||||
|
||||
results, err := Verify(ctx, resolver, false, policyOK, strings.NewReader(nmsg), false)
|
||||
results, err := Verify(ctx, pkglog.Logger, resolver, false, policyOK, strings.NewReader(nmsg), false)
|
||||
if err != nil {
|
||||
t.Fatalf("verify: %s", err)
|
||||
}
|
||||
@ -304,31 +298,31 @@ test
|
||||
//log.Infof("nmsg\n%s", nmsg)
|
||||
|
||||
// Multiple From headers.
|
||||
_, err = Sign(ctx, "mjl", dns.Domain{ASCII: "mox.example"}, dkimConf, false, strings.NewReader("From: <mjl@mox.example>\r\nFrom: <mjl@mox.example>\r\n\r\ntest"))
|
||||
_, err = Sign(ctx, pkglog.Logger, "mjl", dns.Domain{ASCII: "mox.example"}, selectors, false, strings.NewReader("From: <mjl@mox.example>\r\nFrom: <mjl@mox.example>\r\n\r\ntest"))
|
||||
if !errors.Is(err, ErrFrom) {
|
||||
t.Fatalf("sign, got err %v, expected ErrFrom", err)
|
||||
}
|
||||
|
||||
// No From header.
|
||||
_, err = Sign(ctx, "mjl", dns.Domain{ASCII: "mox.example"}, dkimConf, false, strings.NewReader("Brom: <mjl@mox.example>\r\n\r\ntest"))
|
||||
_, err = Sign(ctx, pkglog.Logger, "mjl", dns.Domain{ASCII: "mox.example"}, selectors, false, strings.NewReader("Brom: <mjl@mox.example>\r\n\r\ntest"))
|
||||
if !errors.Is(err, ErrFrom) {
|
||||
t.Fatalf("sign, got err %v, expected ErrFrom", err)
|
||||
}
|
||||
|
||||
// Malformed headers.
|
||||
_, err = Sign(ctx, "mjl", dns.Domain{ASCII: "mox.example"}, dkimConf, false, strings.NewReader(":\r\n\r\ntest"))
|
||||
_, err = Sign(ctx, pkglog.Logger, "mjl", dns.Domain{ASCII: "mox.example"}, selectors, false, strings.NewReader(":\r\n\r\ntest"))
|
||||
if !errors.Is(err, ErrHeaderMalformed) {
|
||||
t.Fatalf("sign, got err %v, expected ErrHeaderMalformed", err)
|
||||
}
|
||||
_, err = Sign(ctx, "mjl", dns.Domain{ASCII: "mox.example"}, dkimConf, false, strings.NewReader(" From:<mjl@mox.example>\r\n\r\ntest"))
|
||||
_, err = Sign(ctx, pkglog.Logger, "mjl", dns.Domain{ASCII: "mox.example"}, selectors, false, strings.NewReader(" From:<mjl@mox.example>\r\n\r\ntest"))
|
||||
if !errors.Is(err, ErrHeaderMalformed) {
|
||||
t.Fatalf("sign, got err %v, expected ErrHeaderMalformed", err)
|
||||
}
|
||||
_, err = Sign(ctx, "mjl", dns.Domain{ASCII: "mox.example"}, dkimConf, false, strings.NewReader("Frøm:<mjl@mox.example>\r\n\r\ntest"))
|
||||
_, err = Sign(ctx, pkglog.Logger, "mjl", dns.Domain{ASCII: "mox.example"}, selectors, false, strings.NewReader("Frøm:<mjl@mox.example>\r\n\r\ntest"))
|
||||
if !errors.Is(err, ErrHeaderMalformed) {
|
||||
t.Fatalf("sign, got err %v, expected ErrHeaderMalformed", err)
|
||||
}
|
||||
_, err = Sign(ctx, "mjl", dns.Domain{ASCII: "mox.example"}, dkimConf, false, strings.NewReader("From:<mjl@mox.example>"))
|
||||
_, err = Sign(ctx, pkglog.Logger, "mjl", dns.Domain{ASCII: "mox.example"}, selectors, false, strings.NewReader("From:<mjl@mox.example>"))
|
||||
if !errors.Is(err, ErrHeaderMalformed) {
|
||||
t.Fatalf("sign, got err %v, expected ErrHeaderMalformed", err)
|
||||
}
|
||||
@ -355,9 +349,9 @@ test
|
||||
var record *Record
|
||||
var recordTxt string
|
||||
var msg string
|
||||
var sel config.Selector
|
||||
var dkimConf config.DKIM
|
||||
var policy func(*Sig) error
|
||||
var sel Selector
|
||||
var selectors []Selector
|
||||
var signed bool
|
||||
var signDomain dns.Domain
|
||||
|
||||
@ -386,18 +380,13 @@ test
|
||||
},
|
||||
}
|
||||
|
||||
sel = config.Selector{
|
||||
HashEffective: "sha256",
|
||||
Key: key,
|
||||
HeadersEffective: strings.Split("From,To,Cc,Bcc,Reply-To,References,In-Reply-To,Subject,Date,Message-ID,Content-Type", ","),
|
||||
Domain: dns.Domain{ASCII: "test"},
|
||||
}
|
||||
dkimConf = config.DKIM{
|
||||
Selectors: map[string]config.Selector{
|
||||
"test": sel,
|
||||
},
|
||||
Sign: []string{"test"},
|
||||
sel = Selector{
|
||||
Hash: "sha256",
|
||||
PrivateKey: key,
|
||||
Headers: strings.Split("From,To,Cc,Bcc,Reply-To,References,In-Reply-To,Subject,Date,Message-ID,Content-Type", ","),
|
||||
Domain: dns.Domain{ASCII: "test"},
|
||||
}
|
||||
selectors = []Selector{sel}
|
||||
|
||||
msg = message
|
||||
signed = false
|
||||
@ -408,7 +397,7 @@ test
|
||||
|
||||
msg = strings.ReplaceAll(msg, "\n", "\r\n")
|
||||
|
||||
headers, err := Sign(context.Background(), "mjl", signDomain, dkimConf, false, strings.NewReader(msg))
|
||||
headers, err := Sign(context.Background(), pkglog.Logger, "mjl", signDomain, selectors, false, strings.NewReader(msg))
|
||||
if err != nil {
|
||||
t.Fatalf("sign: %v", err)
|
||||
}
|
||||
@ -425,7 +414,7 @@ test
|
||||
sign()
|
||||
}
|
||||
|
||||
results, err := Verify(context.Background(), resolver, true, policy, strings.NewReader(msg), false)
|
||||
results, err := Verify(context.Background(), pkglog.Logger, resolver, true, policy, strings.NewReader(msg), false)
|
||||
if (err == nil) != (expErr == nil) || err != nil && !errors.Is(err, expErr) {
|
||||
t.Fatalf("got verify error %v, expected %v", err, expErr)
|
||||
}
|
||||
@ -460,8 +449,8 @@ test
|
||||
})
|
||||
// DNS request is failing temporarily.
|
||||
test(nil, StatusTemperror, ErrDNS, func() {
|
||||
resolver.Fail = map[dns.Mockreq]struct{}{
|
||||
{Type: "txt", Name: "test._domainkey.mox.example."}: {},
|
||||
resolver.Fail = []string{
|
||||
"txt test._domainkey.mox.example.",
|
||||
}
|
||||
})
|
||||
// Claims to be DKIM through v=, but cannot be parsed. ../rfc/6376:2621
|
||||
@ -512,11 +501,9 @@ test
|
||||
})
|
||||
// Unknown canonicalization.
|
||||
test(nil, StatusPermerror, ErrCanonicalizationUnknown, func() {
|
||||
sel.Canonicalization.HeaderRelaxed = true
|
||||
sel.Canonicalization.BodyRelaxed = true
|
||||
dkimConf.Selectors = map[string]config.Selector{
|
||||
"test": sel,
|
||||
}
|
||||
sel.HeaderRelaxed = true
|
||||
sel.BodyRelaxed = true
|
||||
selectors = []Selector{sel}
|
||||
|
||||
sign()
|
||||
msg = strings.ReplaceAll(msg, "relaxed/relaxed", "bogus/bogus")
|
||||
@ -574,10 +561,8 @@ test
|
||||
resolver.TXT = map[string][]string{
|
||||
"test._domainkey.mox.example.": {txt},
|
||||
}
|
||||
sel.Key = key
|
||||
dkimConf.Selectors = map[string]config.Selector{
|
||||
"test": sel,
|
||||
}
|
||||
sel.PrivateKey = key
|
||||
selectors = []Selector{sel}
|
||||
})
|
||||
// Key not allowed for email by DNS record. ../rfc/6376:1541
|
||||
test(nil, StatusPermerror, ErrKeyNotForEmail, func() {
|
||||
@ -600,18 +585,14 @@ test
|
||||
|
||||
// Check that last-occurring header field is used.
|
||||
test(nil, StatusFail, ErrSigVerify, func() {
|
||||
sel.DontSealHeaders = true
|
||||
dkimConf.Selectors = map[string]config.Selector{
|
||||
"test": sel,
|
||||
}
|
||||
sel.SealHeaders = false
|
||||
selectors = []Selector{sel}
|
||||
sign()
|
||||
msg = strings.ReplaceAll(msg, "\r\n\r\n", "\r\nsubject: another\r\n\r\n")
|
||||
})
|
||||
test(nil, StatusPass, nil, func() {
|
||||
sel.DontSealHeaders = true
|
||||
dkimConf.Selectors = map[string]config.Selector{
|
||||
"test": sel,
|
||||
}
|
||||
sel.SealHeaders = false
|
||||
selectors = []Selector{sel}
|
||||
sign()
|
||||
msg = "subject: another\r\n" + msg
|
||||
})
|
||||
|
@ -6,10 +6,15 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/text/unicode/norm"
|
||||
|
||||
"github.com/mjl-/mox/dns"
|
||||
"github.com/mjl-/mox/smtp"
|
||||
)
|
||||
|
||||
// Pedantic enables stricter parsing.
|
||||
var Pedantic bool
|
||||
|
||||
type parseErr string
|
||||
|
||||
func (e parseErr) Error() string {
|
||||
@ -194,16 +199,21 @@ func (p *parser) xcanonical() string {
|
||||
return s
|
||||
}
|
||||
|
||||
func (p *parser) xdomain() dns.Domain {
|
||||
func (p *parser) xdomainselector(isselector bool) dns.Domain {
|
||||
subdomain := func(c rune, i int) bool {
|
||||
// domain names must always be a-labels, ../rfc/6376:1115 ../rfc/6376:1187 ../rfc/6376:1303
|
||||
// todo: add a "lax" mode where underscore is allowed if this is a selector? seen in the wild, but invalid: ../rfc/6376:581 ../rfc/5321:2303
|
||||
return isalphadigit(c) || (i > 0 && c == '-' && p.o+1 < len(p.s))
|
||||
// dkim selectors with underscores happen in the wild, accept them when not in
|
||||
// pedantic mode. ../rfc/6376:581 ../rfc/5321:2303
|
||||
return isalphadigit(c) || (i > 0 && (c == '-' || isselector && !Pedantic && c == '_') && p.o+1 < len(p.s))
|
||||
}
|
||||
s := p.xtakefn1(false, subdomain)
|
||||
for p.hasPrefix(".") {
|
||||
s += p.xtake(".") + p.xtakefn1(false, subdomain)
|
||||
}
|
||||
if isselector {
|
||||
// Not to be interpreted as IDNA.
|
||||
return dns.Domain{ASCII: strings.ToLower(s)}
|
||||
}
|
||||
d, err := dns.ParseDomain(s)
|
||||
if err != nil {
|
||||
p.xerrorf("parsing domain %q: %s", s, err)
|
||||
@ -211,6 +221,14 @@ func (p *parser) xdomain() dns.Domain {
|
||||
return d
|
||||
}
|
||||
|
||||
func (p *parser) xdomain() dns.Domain {
|
||||
return p.xdomainselector(false)
|
||||
}
|
||||
|
||||
func (p *parser) xselector() dns.Domain {
|
||||
return p.xdomainselector(true)
|
||||
}
|
||||
|
||||
func (p *parser) xhdrName(ignoreFWS bool) string {
|
||||
// ../rfc/6376:473
|
||||
// ../rfc/5322:1689
|
||||
@ -258,12 +276,12 @@ func (p *parser) xlocalpart() smtp.Localpart {
|
||||
s += "." + p.xatom()
|
||||
}
|
||||
}
|
||||
// todo: have a strict parser that only allows the actual max of 64 bytes. some services have large localparts because of generated (bounce) addresses.
|
||||
if len(s) > 128 {
|
||||
// In the wild, some services use large localparts for generated (bounce) addresses.
|
||||
if Pedantic && len(s) > 64 || len(s) > 128 {
|
||||
// ../rfc/5321:3486
|
||||
p.xerrorf("localpart longer than 64 octets")
|
||||
}
|
||||
return smtp.Localpart(s)
|
||||
return smtp.Localpart(norm.NFC.String(s))
|
||||
}
|
||||
|
||||
func (p *parser) xquotedString() string {
|
||||
@ -458,10 +476,6 @@ func (p *parser) xqp(pipeEncoded, colonEncoded, ignoreFWS bool) string {
|
||||
return s
|
||||
}
|
||||
|
||||
func (p *parser) xselector() dns.Domain {
|
||||
return p.xdomain()
|
||||
}
|
||||
|
||||
func (p *parser) xtimestamp() int64 {
|
||||
// ../rfc/6376:1325 ../rfc/6376:1358
|
||||
return p.xnumber(12)
|
||||
|
@ -117,7 +117,7 @@ func (s *Sig) Header() (string, error) {
|
||||
} else if i == len(s.SignedHeaders)-1 {
|
||||
v += ";"
|
||||
}
|
||||
w.Addf(sep, v)
|
||||
w.Addf(sep, "%s", v)
|
||||
}
|
||||
}
|
||||
if len(s.CopiedHeaders) > 0 {
|
||||
@ -139,7 +139,7 @@ func (s *Sig) Header() (string, error) {
|
||||
} else if i == len(s.CopiedHeaders)-1 {
|
||||
v += ";"
|
||||
}
|
||||
w.Addf(sep, v)
|
||||
w.Addf(sep, "%s", v)
|
||||
}
|
||||
}
|
||||
|
||||
@ -147,7 +147,7 @@ func (s *Sig) Header() (string, error) {
|
||||
|
||||
w.Addf(" ", "b=")
|
||||
if len(s.Signature) > 0 {
|
||||
w.AddWrap([]byte(base64.StdEncoding.EncodeToString(s.Signature)))
|
||||
w.AddWrap([]byte(base64.StdEncoding.EncodeToString(s.Signature)), false)
|
||||
}
|
||||
w.Add("\r\n")
|
||||
return w.String(), nil
|
||||
|
@ -91,7 +91,7 @@ func TestSig(t *testing.T) {
|
||||
BodyHash: xbase64("LjkN2rUhrS3zKXfH2vNgUzz5ERRJkgP9CURXBX0JP0Q="),
|
||||
Domain: xdomain("xn--mx-lka.example"), // møx.example
|
||||
SignedHeaders: []string{"from"},
|
||||
Selector: xdomain("xn--tst-bma"), // tést
|
||||
Selector: dns.Domain{ASCII: "xn--tst-bma"},
|
||||
Identity: &Identity{&ulp, xdomain("xn--tst-bma.xn--mx-lka.example")}, // tést.møx.example
|
||||
Canonicalization: "simple/simple",
|
||||
Length: -1,
|
||||
|
@ -32,7 +32,7 @@ func TestParseRecord(t *testing.T) {
|
||||
}
|
||||
if r != nil {
|
||||
pk := r.Pubkey
|
||||
for i := 0; i < 2; i++ {
|
||||
for range 2 {
|
||||
ntxt, err := r.Record()
|
||||
if err != nil {
|
||||
t.Fatalf("making record: %v", err)
|
||||
|
192
dmarc/dmarc.go
192
dmarc/dmarc.go
@ -14,34 +14,20 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
mathrand "math/rand"
|
||||
"log/slog"
|
||||
mathrand2 "math/rand/v2"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
|
||||
"github.com/mjl-/mox/dkim"
|
||||
"github.com/mjl-/mox/dns"
|
||||
"github.com/mjl-/mox/mlog"
|
||||
"github.com/mjl-/mox/publicsuffix"
|
||||
"github.com/mjl-/mox/spf"
|
||||
"github.com/mjl-/mox/stub"
|
||||
)
|
||||
|
||||
var xlog = mlog.New("dmarc")
|
||||
|
||||
var (
|
||||
metricDMARCVerify = promauto.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "mox_dmarc_verify_duration_seconds",
|
||||
Help: "DMARC verify, including lookup, duration and result.",
|
||||
Buckets: []float64{0.001, 0.005, 0.01, 0.05, 0.100, 0.5, 1, 5, 10, 20},
|
||||
},
|
||||
[]string{
|
||||
"status",
|
||||
"reject", // yes/no
|
||||
"use", // yes/no, if policy is used after random selection
|
||||
},
|
||||
)
|
||||
MetricVerify stub.HistogramVec = stub.HistogramVecIgnore{}
|
||||
)
|
||||
|
||||
// link errata:
|
||||
@ -71,16 +57,21 @@ const (
|
||||
// Result is a DMARC policy evaluation.
|
||||
type Result struct {
|
||||
// Whether to reject the message based on policies. If false, the message should
|
||||
// not necessarily be accepted, e.g. due to reputation or content-based analysis.
|
||||
// not necessarily be accepted: other checks such as reputation-based and
|
||||
// content-based analysis may lead to reject the message.
|
||||
Reject bool
|
||||
// Result of DMARC validation. A message can fail validation, but still
|
||||
// not be rejected, e.g. if the policy is "none".
|
||||
Status Status
|
||||
Status Status
|
||||
AlignedSPFPass bool
|
||||
AlignedDKIMPass bool
|
||||
// Domain with the DMARC DNS record. May be the organizational domain instead of
|
||||
// the domain in the From-header.
|
||||
Domain dns.Domain
|
||||
// Parsed DMARC record.
|
||||
Record *Record
|
||||
// Whether DMARC DNS response was DNSSEC-signed, regardless of whether SPF/DKIM records were DNSSEC-signed.
|
||||
RecordAuthentic bool
|
||||
// Details about possible error condition, e.g. when parsing the DMARC record failed.
|
||||
Err error
|
||||
}
|
||||
@ -93,36 +84,45 @@ type Result struct {
|
||||
// domain is determined using the public suffix list. E.g. for
|
||||
// "sub.example.com", the organizational domain is "example.com". The returned
|
||||
// domain is the domain with the DMARC record.
|
||||
func Lookup(ctx context.Context, resolver dns.Resolver, from dns.Domain) (status Status, domain dns.Domain, record *Record, txt string, rerr error) {
|
||||
log := xlog.WithContext(ctx)
|
||||
//
|
||||
// rauthentic indicates if the DNS results were DNSSEC-verified.
|
||||
func Lookup(ctx context.Context, elog *slog.Logger, resolver dns.Resolver, msgFrom dns.Domain) (status Status, domain dns.Domain, record *Record, txt string, rauthentic bool, rerr error) {
|
||||
log := mlog.New("dmarc", elog)
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
log.Debugx("dmarc lookup result", rerr, mlog.Field("fromdomain", from), mlog.Field("status", status), mlog.Field("domain", domain), mlog.Field("record", record), mlog.Field("duration", time.Since(start)))
|
||||
log.Debugx("dmarc lookup result", rerr,
|
||||
slog.Any("fromdomain", msgFrom),
|
||||
slog.Any("status", status),
|
||||
slog.Any("domain", domain),
|
||||
slog.Any("record", record),
|
||||
slog.Duration("duration", time.Since(start)))
|
||||
}()
|
||||
|
||||
// ../rfc/7489:859 ../rfc/7489:1370
|
||||
domain = from
|
||||
status, record, txt, err := lookupRecord(ctx, resolver, domain)
|
||||
domain = msgFrom
|
||||
status, record, txt, authentic, err := lookupRecord(ctx, resolver, domain)
|
||||
if status != StatusNone {
|
||||
return status, domain, record, txt, err
|
||||
return status, domain, record, txt, authentic, err
|
||||
}
|
||||
if record == nil {
|
||||
// ../rfc/7489:761 ../rfc/7489:1377
|
||||
domain = publicsuffix.Lookup(ctx, from)
|
||||
if domain == from {
|
||||
return StatusNone, domain, nil, txt, err
|
||||
domain = publicsuffix.Lookup(ctx, log.Logger, msgFrom)
|
||||
if domain == msgFrom {
|
||||
return StatusNone, domain, nil, txt, authentic, err
|
||||
}
|
||||
|
||||
status, record, txt, err = lookupRecord(ctx, resolver, domain)
|
||||
var xauth bool
|
||||
status, record, txt, xauth, err = lookupRecord(ctx, resolver, domain)
|
||||
authentic = authentic && xauth
|
||||
}
|
||||
return status, domain, record, txt, err
|
||||
return status, domain, record, txt, authentic, err
|
||||
}
|
||||
|
||||
func lookupRecord(ctx context.Context, resolver dns.Resolver, domain dns.Domain) (Status, *Record, string, error) {
|
||||
func lookupRecord(ctx context.Context, resolver dns.Resolver, domain dns.Domain) (Status, *Record, string, bool, error) {
|
||||
name := "_dmarc." + domain.ASCII + "."
|
||||
txts, err := dns.WithPackage(resolver, "dmarc").LookupTXT(ctx, name)
|
||||
txts, result, err := dns.WithPackage(resolver, "dmarc").LookupTXT(ctx, name)
|
||||
if err != nil && !dns.IsNotFound(err) {
|
||||
return StatusTemperror, nil, "", fmt.Errorf("%w: %s", ErrDNS, err)
|
||||
return StatusTemperror, nil, "", result.Authentic, fmt.Errorf("%w: %s", ErrDNS, err)
|
||||
}
|
||||
var record *Record
|
||||
var text string
|
||||
@ -133,17 +133,82 @@ func lookupRecord(ctx context.Context, resolver dns.Resolver, domain dns.Domain)
|
||||
// ../rfc/7489:1374
|
||||
continue
|
||||
} else if err != nil {
|
||||
return StatusPermerror, nil, text, fmt.Errorf("%w: %s", ErrSyntax, err)
|
||||
return StatusPermerror, nil, text, result.Authentic, fmt.Errorf("%w: %s", ErrSyntax, err)
|
||||
}
|
||||
if record != nil {
|
||||
// ../ ../rfc/7489:1388
|
||||
return StatusNone, nil, "", ErrMultipleRecords
|
||||
// ../rfc/7489:1388
|
||||
return StatusNone, nil, "", result.Authentic, ErrMultipleRecords
|
||||
}
|
||||
text = txt
|
||||
record = r
|
||||
rerr = nil
|
||||
}
|
||||
return StatusNone, record, text, rerr
|
||||
return StatusNone, record, text, result.Authentic, rerr
|
||||
}
|
||||
|
||||
func lookupReportsRecord(ctx context.Context, resolver dns.Resolver, dmarcDomain, extDestDomain dns.Domain) (Status, []*Record, []string, bool, error) {
|
||||
// ../rfc/7489:1566
|
||||
name := dmarcDomain.ASCII + "._report._dmarc." + extDestDomain.ASCII + "."
|
||||
txts, result, err := dns.WithPackage(resolver, "dmarc").LookupTXT(ctx, name)
|
||||
if err != nil && !dns.IsNotFound(err) {
|
||||
return StatusTemperror, nil, nil, result.Authentic, fmt.Errorf("%w: %s", ErrDNS, err)
|
||||
}
|
||||
var records []*Record
|
||||
var texts []string
|
||||
var rerr error = ErrNoRecord
|
||||
for _, txt := range txts {
|
||||
r, isdmarc, err := ParseRecordNoRequired(txt)
|
||||
// Examples in the RFC use "v=DMARC1", even though it isn't a valid DMARC record.
|
||||
// Accept the specific example.
|
||||
// ../rfc/7489-eid5440
|
||||
if !isdmarc && txt == "v=DMARC1" {
|
||||
xr := DefaultRecord
|
||||
r, isdmarc, err = &xr, true, nil
|
||||
}
|
||||
if !isdmarc {
|
||||
// ../rfc/7489:1586
|
||||
continue
|
||||
}
|
||||
texts = append(texts, txt)
|
||||
records = append(records, r)
|
||||
if err != nil {
|
||||
return StatusPermerror, records, texts, result.Authentic, fmt.Errorf("%w: %s", ErrSyntax, err)
|
||||
}
|
||||
// Multiple records are allowed for the _report record, unlike for policies. ../rfc/7489:1593
|
||||
rerr = nil
|
||||
}
|
||||
return StatusNone, records, texts, result.Authentic, rerr
|
||||
}
|
||||
|
||||
// LookupExternalReportsAccepted returns whether the extDestDomain has opted in
|
||||
// to receiving dmarc reports for dmarcDomain (where the dmarc record was found),
|
||||
// through a "._report._dmarc." DNS TXT DMARC record.
|
||||
//
|
||||
// accepts is true if the external domain has opted in.
|
||||
// If a temporary error occurred, the returned status is StatusTemperror, and a
|
||||
// later retry may give an authoritative result.
|
||||
// The returned error is ErrNoRecord if no opt-in DNS record exists, which is
|
||||
// not a failure condition.
|
||||
//
|
||||
// The normally invalid "v=DMARC1" record is accepted since it is used as
|
||||
// example in RFC 7489.
|
||||
//
|
||||
// authentic indicates if the DNS results were DNSSEC-verified.
|
||||
func LookupExternalReportsAccepted(ctx context.Context, elog *slog.Logger, resolver dns.Resolver, dmarcDomain dns.Domain, extDestDomain dns.Domain) (accepts bool, status Status, records []*Record, txts []string, authentic bool, rerr error) {
|
||||
log := mlog.New("dmarc", elog)
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
log.Debugx("dmarc externalreports result", rerr,
|
||||
slog.Bool("accepts", accepts),
|
||||
slog.Any("dmarcdomain", dmarcDomain),
|
||||
slog.Any("extdestdomain", extDestDomain),
|
||||
slog.Any("records", records),
|
||||
slog.Duration("duration", time.Since(start)))
|
||||
}()
|
||||
|
||||
status, records, txts, authentic, rerr = lookupReportsRecord(ctx, resolver, dmarcDomain, extDestDomain)
|
||||
accepts = rerr == nil
|
||||
return accepts, status, records, txts, authentic, rerr
|
||||
}
|
||||
|
||||
// Verify evaluates the DMARC policy for the domain in the From-header of a
|
||||
@ -157,9 +222,10 @@ func lookupRecord(ctx context.Context, resolver dns.Resolver, domain dns.Domain)
|
||||
// Verify always returns the result of verifying the DMARC policy
|
||||
// against the message (for inclusion in Authentication-Result headers).
|
||||
//
|
||||
// useResult indicates if the result should be applied in a policy decision.
|
||||
func Verify(ctx context.Context, resolver dns.Resolver, from dns.Domain, dkimResults []dkim.Result, spfResult spf.Status, spfIdentity *dns.Domain, applyRandomPercentage bool) (useResult bool, result Result) {
|
||||
log := xlog.WithContext(ctx)
|
||||
// useResult indicates if the result should be applied in a policy decision,
|
||||
// based on the "pct" field in the DMARC record.
|
||||
func Verify(ctx context.Context, elog *slog.Logger, resolver dns.Resolver, msgFrom dns.Domain, dkimResults []dkim.Result, spfResult spf.Status, spfIdentity *dns.Domain, applyRandomPercentage bool) (useResult bool, result Result) {
|
||||
log := mlog.New("dmarc", elog)
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
use := "no"
|
||||
@ -170,25 +236,33 @@ func Verify(ctx context.Context, resolver dns.Resolver, from dns.Domain, dkimRes
|
||||
if result.Reject {
|
||||
reject = "yes"
|
||||
}
|
||||
metricDMARCVerify.WithLabelValues(string(result.Status), reject, use).Observe(float64(time.Since(start)) / float64(time.Second))
|
||||
log.Debugx("dmarc verify result", result.Err, mlog.Field("fromdomain", from), mlog.Field("dkimresults", dkimResults), mlog.Field("spfresult", spfResult), mlog.Field("status", result.Status), mlog.Field("reject", result.Reject), mlog.Field("use", useResult), mlog.Field("duration", time.Since(start)))
|
||||
MetricVerify.ObserveLabels(float64(time.Since(start))/float64(time.Second), string(result.Status), reject, use)
|
||||
log.Debugx("dmarc verify result", result.Err,
|
||||
slog.Any("fromdomain", msgFrom),
|
||||
slog.Any("dkimresults", dkimResults),
|
||||
slog.Any("spfresult", spfResult),
|
||||
slog.Any("status", result.Status),
|
||||
slog.Bool("reject", result.Reject),
|
||||
slog.Bool("use", useResult),
|
||||
slog.Duration("duration", time.Since(start)))
|
||||
}()
|
||||
|
||||
status, recordDomain, record, _, err := Lookup(ctx, resolver, from)
|
||||
status, recordDomain, record, _, authentic, err := Lookup(ctx, log.Logger, resolver, msgFrom)
|
||||
if record == nil {
|
||||
return false, Result{false, status, recordDomain, record, err}
|
||||
return false, Result{false, status, false, false, recordDomain, record, authentic, err}
|
||||
}
|
||||
result.Domain = recordDomain
|
||||
result.Record = record
|
||||
result.RecordAuthentic = authentic
|
||||
|
||||
// Record can request sampling of messages to apply policy.
|
||||
// See ../rfc/7489:1432
|
||||
useResult = !applyRandomPercentage || record.Percentage == 100 || mathrand.Intn(100) < record.Percentage
|
||||
useResult = !applyRandomPercentage || record.Percentage == 100 || mathrand2.IntN(100) < record.Percentage
|
||||
|
||||
// We reject treat "quarantine" and "reject" the same. Thus, we also don't
|
||||
// "downgrade" from reject to quarantine if this message was sampled out.
|
||||
// We treat "quarantine" and "reject" the same. Thus, we also don't "downgrade"
|
||||
// from reject to quarantine if this message was sampled out.
|
||||
// ../rfc/7489:1446 ../rfc/7489:1024
|
||||
if recordDomain != from && record.SubdomainPolicy != PolicyEmpty {
|
||||
if recordDomain != msgFrom && record.SubdomainPolicy != PolicyEmpty {
|
||||
result.Reject = record.SubdomainPolicy != PolicyNone
|
||||
} else {
|
||||
result.Reject = record.Policy != PolicyNone
|
||||
@ -208,17 +282,15 @@ func Verify(ctx context.Context, resolver dns.Resolver, from dns.Domain, dkimRes
|
||||
if r, ok := pubsuffixes[name]; ok {
|
||||
return r
|
||||
}
|
||||
r := publicsuffix.Lookup(ctx, name)
|
||||
r := publicsuffix.Lookup(ctx, log.Logger, name)
|
||||
pubsuffixes[name] = r
|
||||
return r
|
||||
}
|
||||
|
||||
// ../rfc/7489:1319
|
||||
// ../rfc/7489:544
|
||||
if spfResult == spf.StatusPass && spfIdentity != nil && (*spfIdentity == from || result.Record.ASPF == "r" && pubsuffix(from) == pubsuffix(*spfIdentity)) {
|
||||
result.Reject = false
|
||||
result.Status = StatusPass
|
||||
return
|
||||
if spfResult == spf.StatusPass && spfIdentity != nil && (*spfIdentity == msgFrom || result.Record.ASPF == "r" && pubsuffix(msgFrom) == pubsuffix(*spfIdentity)) {
|
||||
result.AlignedSPFPass = true
|
||||
}
|
||||
|
||||
for _, dkimResult := range dkimResults {
|
||||
@ -228,12 +300,16 @@ func Verify(ctx context.Context, resolver dns.Resolver, from dns.Domain, dkimRes
|
||||
continue
|
||||
}
|
||||
// ../rfc/7489:511
|
||||
if dkimResult.Status == dkim.StatusPass && dkimResult.Sig != nil && (dkimResult.Sig.Domain == from || result.Record.ADKIM == "r" && pubsuffix(from) == pubsuffix(dkimResult.Sig.Domain)) {
|
||||
if dkimResult.Status == dkim.StatusPass && dkimResult.Sig != nil && (dkimResult.Sig.Domain == msgFrom || result.Record.ADKIM == "r" && pubsuffix(msgFrom) == pubsuffix(dkimResult.Sig.Domain)) {
|
||||
// ../rfc/7489:535
|
||||
result.Reject = false
|
||||
result.Status = StatusPass
|
||||
return
|
||||
result.AlignedDKIMPass = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if result.AlignedSPFPass || result.AlignedDKIMPass {
|
||||
result.Reject = false
|
||||
result.Status = StatusPass
|
||||
}
|
||||
return
|
||||
}
|
||||
|
@ -8,9 +8,12 @@ import (
|
||||
|
||||
"github.com/mjl-/mox/dkim"
|
||||
"github.com/mjl-/mox/dns"
|
||||
"github.com/mjl-/mox/mlog"
|
||||
"github.com/mjl-/mox/spf"
|
||||
)
|
||||
|
||||
var pkglog = mlog.New("dmarc", nil)
|
||||
|
||||
func TestLookup(t *testing.T) {
|
||||
resolver := dns.MockResolver{
|
||||
TXT: map[string][]string{
|
||||
@ -21,15 +24,15 @@ func TestLookup(t *testing.T) {
|
||||
"_dmarc.malformed.example.": {"v=DMARC1; p=none; bogus;"},
|
||||
"_dmarc.example.com.": {"v=DMARC1; p=none;"},
|
||||
},
|
||||
Fail: map[dns.Mockreq]struct{}{
|
||||
{Type: "txt", Name: "_dmarc.temperror.example."}: {},
|
||||
Fail: []string{
|
||||
"txt _dmarc.temperror.example.",
|
||||
},
|
||||
}
|
||||
|
||||
test := func(d string, expStatus Status, expDomain string, expRecord *Record, expErr error) {
|
||||
t.Helper()
|
||||
|
||||
status, dom, record, _, err := Lookup(context.Background(), resolver, dns.Domain{ASCII: d})
|
||||
status, dom, record, _, _, err := Lookup(context.Background(), pkglog.Logger, resolver, dns.Domain{ASCII: d})
|
||||
if (err == nil) != (expErr == nil) || err != nil && !errors.Is(err, expErr) {
|
||||
t.Fatalf("got err %#v, expected %#v", err, expErr)
|
||||
}
|
||||
@ -50,6 +53,45 @@ func TestLookup(t *testing.T) {
|
||||
test("sub.example.com", StatusNone, "example.com", &r, nil) // Policy published at organizational domain, public suffix.
|
||||
}
|
||||
|
||||
func TestLookupExternalReportsAccepted(t *testing.T) {
|
||||
resolver := dns.MockResolver{
|
||||
TXT: map[string][]string{
|
||||
"example.com._report._dmarc.simple.example.": {"v=DMARC1"},
|
||||
"example.com._report._dmarc.simple2.example.": {"v=DMARC1;"},
|
||||
"example.com._report._dmarc.one.example.": {"v=DMARC1; p=none;", "other"},
|
||||
"example.com._report._dmarc.temperror.example.": {"v=DMARC1; p=none;"},
|
||||
"example.com._report._dmarc.multiple.example.": {"v=DMARC1; p=none;", "v=DMARC1"},
|
||||
"example.com._report._dmarc.malformed.example.": {"v=DMARC1; p=none; bogus;"},
|
||||
},
|
||||
Fail: []string{
|
||||
"txt example.com._report._dmarc.temperror.example.",
|
||||
},
|
||||
}
|
||||
|
||||
test := func(dom, extdom string, expStatus Status, expAccepts bool, expErr error) {
|
||||
t.Helper()
|
||||
|
||||
accepts, status, _, _, _, err := LookupExternalReportsAccepted(context.Background(), pkglog.Logger, resolver, dns.Domain{ASCII: dom}, dns.Domain{ASCII: extdom})
|
||||
if (err == nil) != (expErr == nil) || err != nil && !errors.Is(err, expErr) {
|
||||
t.Fatalf("got err %#v, expected %#v", err, expErr)
|
||||
}
|
||||
if status != expStatus || accepts != expAccepts {
|
||||
t.Fatalf("got status %s, accepts %v, expected %v, %v", status, accepts, expStatus, expAccepts)
|
||||
}
|
||||
}
|
||||
|
||||
r := DefaultRecord
|
||||
r.Policy = PolicyNone
|
||||
test("example.com", "simple.example", StatusNone, true, nil)
|
||||
test("example.org", "simple.example", StatusNone, false, ErrNoRecord)
|
||||
test("example.com", "simple2.example", StatusNone, true, nil)
|
||||
test("example.com", "one.example", StatusNone, true, nil)
|
||||
test("example.com", "absent.example", StatusNone, false, ErrNoRecord)
|
||||
test("example.com", "multiple.example", StatusNone, true, nil)
|
||||
test("example.com", "malformed.example", StatusPermerror, false, ErrSyntax)
|
||||
test("example.com", "temperror.example", StatusTemperror, false, ErrDNS)
|
||||
}
|
||||
|
||||
func TestVerify(t *testing.T) {
|
||||
resolver := dns.MockResolver{
|
||||
TXT: map[string][]string{
|
||||
@ -61,8 +103,8 @@ func TestVerify(t *testing.T) {
|
||||
"_dmarc.malformed.example.": {"v=DMARC1; p=none; bogus"},
|
||||
"_dmarc.example.com.": {"v=DMARC1; p=reject"},
|
||||
},
|
||||
Fail: map[dns.Mockreq]struct{}{
|
||||
{Type: "txt", Name: "_dmarc.temperror.example."}: {},
|
||||
Fail: []string{
|
||||
"txt _dmarc.temperror.example.",
|
||||
},
|
||||
}
|
||||
|
||||
@ -85,7 +127,7 @@ func TestVerify(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("parsing domain: %v", err)
|
||||
}
|
||||
useResult, result := Verify(context.Background(), resolver, from, dkimResults, spfResult, spfIdentity, true)
|
||||
useResult, result := Verify(context.Background(), pkglog.Logger, resolver, from, dkimResults, spfResult, spfIdentity, true)
|
||||
if useResult != expUseResult || !equalResult(result, expResult) {
|
||||
t.Fatalf("verify: got useResult %v, result %#v, expected %v %#v", useResult, result, expUseResult, expResult)
|
||||
}
|
||||
@ -98,7 +140,7 @@ func TestVerify(t *testing.T) {
|
||||
[]dkim.Result{},
|
||||
spf.StatusNone,
|
||||
nil,
|
||||
true, Result{true, StatusFail, dns.Domain{ASCII: "reject.example"}, &reject, nil},
|
||||
true, Result{true, StatusFail, false, false, dns.Domain{ASCII: "reject.example"}, &reject, false, nil},
|
||||
)
|
||||
|
||||
// Accept with spf pass.
|
||||
@ -106,7 +148,7 @@ func TestVerify(t *testing.T) {
|
||||
[]dkim.Result{},
|
||||
spf.StatusPass,
|
||||
&dns.Domain{ASCII: "sub.reject.example"},
|
||||
true, Result{false, StatusPass, dns.Domain{ASCII: "reject.example"}, &reject, nil},
|
||||
true, Result{false, StatusPass, true, false, dns.Domain{ASCII: "reject.example"}, &reject, false, nil},
|
||||
)
|
||||
|
||||
// Accept with dkim pass.
|
||||
@ -122,7 +164,7 @@ func TestVerify(t *testing.T) {
|
||||
},
|
||||
spf.StatusFail,
|
||||
&dns.Domain{ASCII: "reject.example"},
|
||||
true, Result{false, StatusPass, dns.Domain{ASCII: "reject.example"}, &reject, nil},
|
||||
true, Result{false, StatusPass, false, true, dns.Domain{ASCII: "reject.example"}, &reject, false, nil},
|
||||
)
|
||||
|
||||
// Reject due to spf and dkim "strict".
|
||||
@ -142,7 +184,7 @@ func TestVerify(t *testing.T) {
|
||||
},
|
||||
spf.StatusPass,
|
||||
&dns.Domain{ASCII: "sub.strict.example"},
|
||||
true, Result{true, StatusFail, dns.Domain{ASCII: "strict.example"}, &strict, nil},
|
||||
true, Result{true, StatusFail, false, false, dns.Domain{ASCII: "strict.example"}, &strict, false, nil},
|
||||
)
|
||||
|
||||
// No dmarc policy, nothing to say.
|
||||
@ -150,7 +192,7 @@ func TestVerify(t *testing.T) {
|
||||
[]dkim.Result{},
|
||||
spf.StatusNone,
|
||||
nil,
|
||||
false, Result{false, StatusNone, dns.Domain{ASCII: "absent.example"}, nil, ErrNoRecord},
|
||||
false, Result{false, StatusNone, false, false, dns.Domain{ASCII: "absent.example"}, nil, false, ErrNoRecord},
|
||||
)
|
||||
|
||||
// No dmarc policy, spf pass does nothing.
|
||||
@ -158,7 +200,7 @@ func TestVerify(t *testing.T) {
|
||||
[]dkim.Result{},
|
||||
spf.StatusPass,
|
||||
&dns.Domain{ASCII: "absent.example"},
|
||||
false, Result{false, StatusNone, dns.Domain{ASCII: "absent.example"}, nil, ErrNoRecord},
|
||||
false, Result{false, StatusNone, false, false, dns.Domain{ASCII: "absent.example"}, nil, false, ErrNoRecord},
|
||||
)
|
||||
|
||||
none := DefaultRecord
|
||||
@ -168,7 +210,7 @@ func TestVerify(t *testing.T) {
|
||||
[]dkim.Result{},
|
||||
spf.StatusPass,
|
||||
&dns.Domain{ASCII: "none.example"},
|
||||
true, Result{false, StatusPass, dns.Domain{ASCII: "none.example"}, &none, nil},
|
||||
true, Result{false, StatusPass, true, false, dns.Domain{ASCII: "none.example"}, &none, false, nil},
|
||||
)
|
||||
|
||||
// No actual reject due to pct=0.
|
||||
@ -179,7 +221,7 @@ func TestVerify(t *testing.T) {
|
||||
[]dkim.Result{},
|
||||
spf.StatusNone,
|
||||
nil,
|
||||
false, Result{true, StatusFail, dns.Domain{ASCII: "test.example"}, &testr, nil},
|
||||
false, Result{true, StatusFail, false, false, dns.Domain{ASCII: "test.example"}, &testr, false, nil},
|
||||
)
|
||||
|
||||
// No reject if subdomain has "none" policy.
|
||||
@ -190,7 +232,7 @@ func TestVerify(t *testing.T) {
|
||||
[]dkim.Result{},
|
||||
spf.StatusFail,
|
||||
&dns.Domain{ASCII: "sub.subnone.example"},
|
||||
true, Result{false, StatusFail, dns.Domain{ASCII: "subnone.example"}, &sub, nil},
|
||||
true, Result{false, StatusFail, false, false, dns.Domain{ASCII: "subnone.example"}, &sub, false, nil},
|
||||
)
|
||||
|
||||
// No reject if spf temperror and no other pass.
|
||||
@ -198,7 +240,7 @@ func TestVerify(t *testing.T) {
|
||||
[]dkim.Result{},
|
||||
spf.StatusTemperror,
|
||||
&dns.Domain{ASCII: "mail.reject.example"},
|
||||
true, Result{false, StatusTemperror, dns.Domain{ASCII: "reject.example"}, &reject, nil},
|
||||
true, Result{false, StatusTemperror, false, false, dns.Domain{ASCII: "reject.example"}, &reject, false, nil},
|
||||
)
|
||||
|
||||
// No reject if dkim temperror and no other pass.
|
||||
@ -214,7 +256,7 @@ func TestVerify(t *testing.T) {
|
||||
},
|
||||
spf.StatusNone,
|
||||
nil,
|
||||
true, Result{false, StatusTemperror, dns.Domain{ASCII: "reject.example"}, &reject, nil},
|
||||
true, Result{false, StatusTemperror, false, false, dns.Domain{ASCII: "reject.example"}, &reject, false, nil},
|
||||
)
|
||||
|
||||
// No reject if spf temperror but still dkim pass.
|
||||
@ -230,7 +272,7 @@ func TestVerify(t *testing.T) {
|
||||
},
|
||||
spf.StatusTemperror,
|
||||
&dns.Domain{ASCII: "mail.reject.example"},
|
||||
true, Result{false, StatusPass, dns.Domain{ASCII: "reject.example"}, &reject, nil},
|
||||
true, Result{false, StatusPass, false, true, dns.Domain{ASCII: "reject.example"}, &reject, false, nil},
|
||||
)
|
||||
|
||||
// No reject if dkim temperror but still spf pass.
|
||||
@ -246,7 +288,7 @@ func TestVerify(t *testing.T) {
|
||||
},
|
||||
spf.StatusPass,
|
||||
&dns.Domain{ASCII: "mail.reject.example"},
|
||||
true, Result{false, StatusPass, dns.Domain{ASCII: "reject.example"}, &reject, nil},
|
||||
true, Result{false, StatusPass, true, false, dns.Domain{ASCII: "reject.example"}, &reject, false, nil},
|
||||
)
|
||||
|
||||
// Bad DMARC record results in permerror without reject.
|
||||
@ -254,7 +296,7 @@ func TestVerify(t *testing.T) {
|
||||
[]dkim.Result{},
|
||||
spf.StatusNone,
|
||||
nil,
|
||||
false, Result{false, StatusPermerror, dns.Domain{ASCII: "malformed.example"}, nil, ErrSyntax},
|
||||
false, Result{false, StatusPermerror, false, false, dns.Domain{ASCII: "malformed.example"}, nil, false, ErrSyntax},
|
||||
)
|
||||
|
||||
// DKIM domain that is higher-level than organizational can not result in a pass. ../rfc/7489:525
|
||||
@ -270,6 +312,6 @@ func TestVerify(t *testing.T) {
|
||||
},
|
||||
spf.StatusNone,
|
||||
nil,
|
||||
true, Result{true, StatusFail, dns.Domain{ASCII: "example.com"}, &reject, nil},
|
||||
true, Result{true, StatusFail, false, false, dns.Domain{ASCII: "example.com"}, &reject, false, nil},
|
||||
)
|
||||
}
|
||||
|
85
dmarc/examples_test.go
Normal file
85
dmarc/examples_test.go
Normal file
@ -0,0 +1,85 @@
|
||||
package dmarc_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"log/slog"
|
||||
"net"
|
||||
"strings"
|
||||
|
||||
"github.com/mjl-/mox/dkim"
|
||||
"github.com/mjl-/mox/dmarc"
|
||||
"github.com/mjl-/mox/dns"
|
||||
"github.com/mjl-/mox/message"
|
||||
"github.com/mjl-/mox/spf"
|
||||
)
|
||||
|
||||
func ExampleLookup() {
|
||||
ctx := context.Background()
|
||||
resolver := dns.StrictResolver{}
|
||||
msgFrom, err := dns.ParseDomain("sub.example.com")
|
||||
if err != nil {
|
||||
log.Fatalf("parsing from domain: %v", err)
|
||||
}
|
||||
|
||||
// Lookup DMARC DNS record for domain.
|
||||
status, domain, record, txt, authentic, err := dmarc.Lookup(ctx, slog.Default(), resolver, msgFrom)
|
||||
if err != nil {
|
||||
log.Fatalf("dmarc lookup: %v", err)
|
||||
}
|
||||
|
||||
log.Printf("status %s, domain %s, record %v, txt %q, dnssec %v", status, domain, record, txt, authentic)
|
||||
}
|
||||
|
||||
func ExampleVerify() {
|
||||
ctx := context.Background()
|
||||
resolver := dns.StrictResolver{}
|
||||
|
||||
// Message to verify.
|
||||
msg := strings.NewReader("From: <sender@example.com>\r\nMore: headers\r\n\r\nBody\r\n")
|
||||
msgFrom, _, _, err := message.From(slog.Default(), true, msg, nil)
|
||||
if err != nil {
|
||||
log.Fatalf("parsing message for from header: %v", err)
|
||||
}
|
||||
|
||||
// Verify SPF, for use with DMARC.
|
||||
args := spf.Args{
|
||||
RemoteIP: net.ParseIP("10.11.12.13"),
|
||||
MailFromDomain: dns.Domain{ASCII: "sub.example.com"},
|
||||
}
|
||||
spfReceived, spfDomain, _, _, err := spf.Verify(ctx, slog.Default(), resolver, args)
|
||||
if err != nil {
|
||||
log.Printf("verifying spf: %v", err)
|
||||
}
|
||||
|
||||
// Verify DKIM-Signature headers, for use with DMARC.
|
||||
smtputf8 := false
|
||||
ignoreTestMode := false
|
||||
dkimResults, err := dkim.Verify(ctx, slog.Default(), resolver, smtputf8, dkim.DefaultPolicy, msg, ignoreTestMode)
|
||||
if err != nil {
|
||||
log.Printf("verifying dkim: %v", err)
|
||||
}
|
||||
|
||||
// Verify DMARC, based on DKIM and SPF results.
|
||||
applyRandomPercentage := true
|
||||
useResult, result := dmarc.Verify(ctx, slog.Default(), resolver, msgFrom.Domain, dkimResults, spfReceived.Result, &spfDomain, applyRandomPercentage)
|
||||
|
||||
// Print results.
|
||||
log.Printf("dmarc status: %s", result.Status)
|
||||
log.Printf("use result: %v", useResult)
|
||||
if useResult && result.Reject {
|
||||
log.Printf("should reject message")
|
||||
}
|
||||
log.Printf("result: %#v", result)
|
||||
}
|
||||
|
||||
func ExampleParseRecord() {
|
||||
txt := "v=DMARC1; p=reject; rua=mailto:postmaster@mox.example"
|
||||
|
||||
record, isdmarc, err := dmarc.ParseRecord(txt)
|
||||
if err != nil {
|
||||
log.Fatalf("parsing dmarc record: %v (isdmarc: %v)", err, isdmarc)
|
||||
}
|
||||
|
||||
log.Printf("parsed record: %v", record)
|
||||
}
|
@ -19,7 +19,22 @@ func (e parseErr) Error() string {
|
||||
// for easy comparison.
|
||||
//
|
||||
// DefaultRecord provides default values for tags not present in s.
|
||||
//
|
||||
// isdmarc indicates if the record starts tag "v" with value "DMARC1", and should
|
||||
// be treated as a valid DMARC record. Used to detect possibly multiple DMARC
|
||||
// records (invalid) for a domain with multiple TXT record (quite common).
|
||||
func ParseRecord(s string) (record *Record, isdmarc bool, rerr error) {
|
||||
return parseRecord(s, true)
|
||||
}
|
||||
|
||||
// ParseRecordNoRequired is like ParseRecord, but don't check for required fields
|
||||
// for regular DMARC records. Useful for checking the _report._dmarc record,
|
||||
// used for opting into receiving reports for other domains.
|
||||
func ParseRecordNoRequired(s string) (record *Record, isdmarc bool, rerr error) {
|
||||
return parseRecord(s, false)
|
||||
}
|
||||
|
||||
func parseRecord(s string, checkRequired bool) (record *Record, isdmarc bool, rerr error) {
|
||||
defer func() {
|
||||
x := recover()
|
||||
if x == nil {
|
||||
@ -77,9 +92,9 @@ func ParseRecord(s string) (record *Record, isdmarc bool, rerr error) {
|
||||
// ../rfc/7489:1105
|
||||
p.xerrorf("p= (policy) must be first tag")
|
||||
}
|
||||
r.Policy = DMARCPolicy(p.xtakelist("none", "quarantine", "reject"))
|
||||
r.Policy = Policy(p.xtakelist("none", "quarantine", "reject"))
|
||||
case "sp":
|
||||
r.SubdomainPolicy = DMARCPolicy(p.xkeyword())
|
||||
r.SubdomainPolicy = Policy(p.xkeyword())
|
||||
// note: we check if the value is valid before returning.
|
||||
case "rua":
|
||||
r.AggregateReportAddresses = append(r.AggregateReportAddresses, p.xuri())
|
||||
@ -134,7 +149,7 @@ func ParseRecord(s string) (record *Record, isdmarc bool, rerr error) {
|
||||
// ../rfc/7489:1106 says "p" is required, but ../rfc/7489:1407 implies we must be
|
||||
// able to parse a record without a "p" or with invalid "sp" tag.
|
||||
sp := r.SubdomainPolicy
|
||||
if !seen["p"] || sp != PolicyEmpty && sp != PolicyNone && sp != PolicyQuarantine && sp != PolicyReject {
|
||||
if checkRequired && (!seen["p"] || sp != PolicyEmpty && sp != PolicyNone && sp != PolicyQuarantine && sp != PolicyReject) {
|
||||
if len(r.AggregateReportAddresses) > 0 {
|
||||
r.Policy = PolicyNone
|
||||
r.SubdomainPolicy = PolicyEmpty
|
||||
|
46
dmarc/txt.go
46
dmarc/txt.go
@ -5,25 +5,23 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// todo: DMARCPolicy should be named just Policy, but this is causing conflicting types in sherpadoc output. should somehow get the dmarc-prefix only in the sherpadoc.
|
||||
|
||||
// Policy as used in DMARC DNS record for "p=" or "sp=".
|
||||
type DMARCPolicy string
|
||||
type Policy string
|
||||
|
||||
// ../rfc/7489:1157
|
||||
|
||||
const (
|
||||
PolicyEmpty DMARCPolicy = "" // Only for the optional Record.SubdomainPolicy.
|
||||
PolicyNone DMARCPolicy = "none"
|
||||
PolicyQuarantine DMARCPolicy = "quarantine"
|
||||
PolicyReject DMARCPolicy = "reject"
|
||||
PolicyEmpty Policy = "" // Only for the optional Record.SubdomainPolicy.
|
||||
PolicyNone Policy = "none"
|
||||
PolicyQuarantine Policy = "quarantine"
|
||||
PolicyReject Policy = "reject"
|
||||
)
|
||||
|
||||
// URI is a destination address for reporting.
|
||||
type URI struct {
|
||||
Address string // Should start with "mailto:".
|
||||
MaxSize uint64 // Optional maximum message size, subject to Unit.
|
||||
Unit string // "" (b), "k", "g", "t" (case insensitive), unit size, where k is 2^10 etc.
|
||||
Unit string // "" (b), "k", "m", "g", "t" (case insensitive), unit size, where k is 2^10 etc.
|
||||
}
|
||||
|
||||
// String returns a string representation of the URI for inclusion in a DMARC
|
||||
@ -33,7 +31,7 @@ func (u URI) String() string {
|
||||
s = strings.ReplaceAll(s, ",", "%2C")
|
||||
s = strings.ReplaceAll(s, "!", "%21")
|
||||
if u.MaxSize > 0 {
|
||||
s += fmt.Sprintf("%d", u.MaxSize)
|
||||
s += fmt.Sprintf("!%d", u.MaxSize)
|
||||
}
|
||||
s += u.Unit
|
||||
return s
|
||||
@ -55,17 +53,17 @@ const (
|
||||
//
|
||||
// v=DMARC1; p=reject; rua=mailto:postmaster@mox.example
|
||||
type Record struct {
|
||||
Version string // "v=DMARC1"
|
||||
Policy DMARCPolicy // Required, for "p=".
|
||||
SubdomainPolicy DMARCPolicy // Like policy but for subdomains. Optional, for "sp=".
|
||||
AggregateReportAddresses []URI // Optional, for "rua=".
|
||||
FailureReportAddresses []URI // Optional, for "ruf="
|
||||
ADKIM Align // "r" (default) for relaxed or "s" for simple. For "adkim=".
|
||||
ASPF Align // "r" (default) for relaxed or "s" for simple. For "aspf=".
|
||||
AggregateReportingInterval int // Default 86400. For "ri="
|
||||
FailureReportingOptions []string // "0" (default), "1", "d", "s". For "fo=".
|
||||
ReportingFormat []string // "afrf" (default). Ffor "rf=".
|
||||
Percentage int // Between 0 and 100, default 100. For "pct=".
|
||||
Version string // "v=DMARC1", fixed.
|
||||
Policy Policy // Required, for "p=".
|
||||
SubdomainPolicy Policy // Like policy but for subdomains. Optional, for "sp=".
|
||||
AggregateReportAddresses []URI // Optional, for "rua=". Destination addresses for aggregate reports.
|
||||
FailureReportAddresses []URI // Optional, for "ruf=". Destination addresses for failure reports.
|
||||
ADKIM Align // Alignment: "r" (default) for relaxed or "s" for simple. For "adkim=".
|
||||
ASPF Align // Alignment: "r" (default) for relaxed or "s" for simple. For "aspf=".
|
||||
AggregateReportingInterval int // In seconds, default 86400. For "ri="
|
||||
FailureReportingOptions []string // "0" (default), "1", "d", "s". For "fo=".
|
||||
ReportingFormat []string // "afrf" (default). For "rf=".
|
||||
Percentage int // Between 0 and 100, default 100. For "pct=". Policy applies randomly to this percentage of messages.
|
||||
}
|
||||
|
||||
// DefaultRecord holds the defaults for a DMARC record.
|
||||
@ -109,13 +107,13 @@ func (r Record) String() string {
|
||||
s := strings.Join(l, ",")
|
||||
write(true, "ruf", s)
|
||||
}
|
||||
write(r.ADKIM != "", "adkim", string(r.ADKIM))
|
||||
write(r.ASPF != "", "aspf", string(r.ASPF))
|
||||
write(r.ADKIM != "" && r.ADKIM != "r", "adkim", string(r.ADKIM))
|
||||
write(r.ASPF != "" && r.ASPF != "r", "aspf", string(r.ASPF))
|
||||
write(r.AggregateReportingInterval != DefaultRecord.AggregateReportingInterval, "ri", fmt.Sprintf("%d", r.AggregateReportingInterval))
|
||||
if len(r.FailureReportingOptions) > 1 || (len(r.FailureReportingOptions) == 1 && r.FailureReportingOptions[0] != "0") {
|
||||
if len(r.FailureReportingOptions) > 1 || len(r.FailureReportingOptions) == 1 && r.FailureReportingOptions[0] != "0" {
|
||||
write(true, "fo", strings.Join(r.FailureReportingOptions, ":"))
|
||||
}
|
||||
if len(r.ReportingFormat) > 1 || (len(r.ReportingFormat) == 1 && strings.EqualFold(r.ReportingFormat[0], "afrf")) {
|
||||
if len(r.ReportingFormat) > 1 || len(r.ReportingFormat) == 1 && !strings.EqualFold(r.ReportingFormat[0], "afrf") {
|
||||
write(true, "rf", strings.Join(r.FailureReportingOptions, ":"))
|
||||
}
|
||||
write(r.Percentage != 100, "pct", fmt.Sprintf("%d", r.Percentage))
|
||||
|
77
dmarcdb/dmarcdb.go
Normal file
77
dmarcdb/dmarcdb.go
Normal file
@ -0,0 +1,77 @@
|
||||
// Package dmarcdb stores incoming DMARC aggrate reports and evaluations for outgoing aggregate reports.
|
||||
//
|
||||
// With DMARC, a domain can request reports with DMARC evaluation results to be
|
||||
// sent to a specified address. Mox parses such reports, stores them in its
|
||||
// database and makes them available through its admin web interface. Mox also
|
||||
// keeps track of the evaluations it does for incoming messages and sends reports
|
||||
// to mail servers that request reports.
|
||||
//
|
||||
// Only aggregate reports are stored and sent. Failure reports about individual
|
||||
// messages are not implemented.
|
||||
package dmarcdb
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/mjl-/bstore"
|
||||
|
||||
"github.com/mjl-/mox/mlog"
|
||||
"github.com/mjl-/mox/mox-"
|
||||
"github.com/mjl-/mox/moxvar"
|
||||
)
|
||||
|
||||
// Init opens the databases.
|
||||
//
|
||||
// The incoming reports and evaluations for outgoing reports are in separate
|
||||
// databases for simpler file-based handling of the databases.
|
||||
func Init() error {
|
||||
if ReportsDB != nil || EvalDB != nil {
|
||||
return fmt.Errorf("already initialized")
|
||||
}
|
||||
|
||||
log := mlog.New("dmarcdb", nil)
|
||||
var err error
|
||||
|
||||
ReportsDB, err = openReportsDB(mox.Shutdown, log)
|
||||
if err != nil {
|
||||
return fmt.Errorf("open reports db: %v", err)
|
||||
}
|
||||
|
||||
EvalDB, err = openEvalDB(mox.Shutdown, log)
|
||||
if err != nil {
|
||||
return fmt.Errorf("open eval db: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func Close() error {
|
||||
if err := ReportsDB.Close(); err != nil {
|
||||
return fmt.Errorf("closing reports db: %w", err)
|
||||
}
|
||||
ReportsDB = nil
|
||||
|
||||
if err := EvalDB.Close(); err != nil {
|
||||
return fmt.Errorf("closing eval db: %w", err)
|
||||
}
|
||||
EvalDB = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func openReportsDB(ctx context.Context, log mlog.Log) (*bstore.DB, error) {
|
||||
p := mox.DataDirPath("dmarcrpt.db")
|
||||
os.MkdirAll(filepath.Dir(p), 0770)
|
||||
opts := bstore.Options{Timeout: 5 * time.Second, Perm: 0660, RegisterLogger: moxvar.RegisterLogger(p, log.Logger)}
|
||||
return bstore.Open(ctx, p, &opts, ReportsDBTypes...)
|
||||
}
|
||||
|
||||
func openEvalDB(ctx context.Context, log mlog.Log) (*bstore.DB, error) {
|
||||
p := mox.DataDirPath("dmarceval.db")
|
||||
os.MkdirAll(filepath.Dir(p), 0770)
|
||||
opts := bstore.Options{Timeout: 5 * time.Second, Perm: 0660, RegisterLogger: moxvar.RegisterLogger(p, log.Logger)}
|
||||
return bstore.Open(ctx, p, &opts, EvalDBTypes...)
|
||||
}
|
1064
dmarcdb/eval.go
Normal file
1064
dmarcdb/eval.go
Normal file
File diff suppressed because it is too large
Load Diff
403
dmarcdb/eval_test.go
Normal file
403
dmarcdb/eval_test.go
Normal file
@ -0,0 +1,403 @@
|
||||
package dmarcdb
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/mjl-/mox/dmarcrpt"
|
||||
"github.com/mjl-/mox/dns"
|
||||
"github.com/mjl-/mox/mlog"
|
||||
"github.com/mjl-/mox/mox-"
|
||||
"github.com/mjl-/mox/moxio"
|
||||
"github.com/mjl-/mox/queue"
|
||||
"slices"
|
||||
)
|
||||
|
||||
func tcheckf(t *testing.T, err error, format string, args ...any) {
|
||||
t.Helper()
|
||||
if err != nil {
|
||||
t.Fatalf("%s: %s", fmt.Sprintf(format, args...), err)
|
||||
}
|
||||
}
|
||||
|
||||
func tcompare(t *testing.T, got, expect any) {
|
||||
t.Helper()
|
||||
if !reflect.DeepEqual(got, expect) {
|
||||
t.Fatalf("got:\n%v\nexpected:\n%v", got, expect)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEvaluations(t *testing.T) {
|
||||
os.RemoveAll("../testdata/dmarcdb/data")
|
||||
mox.Context = ctxbg
|
||||
mox.ConfigStaticPath = filepath.FromSlash("../testdata/dmarcdb/mox.conf")
|
||||
mox.MustLoadConfig(true, false)
|
||||
|
||||
os.Remove(mox.DataDirPath("dmarceval.db"))
|
||||
err := Init()
|
||||
tcheckf(t, err, "init")
|
||||
defer func() {
|
||||
err := Close()
|
||||
tcheckf(t, err, "close")
|
||||
}()
|
||||
|
||||
parseJSON := func(s string) (e Evaluation) {
|
||||
t.Helper()
|
||||
err := json.Unmarshal([]byte(s), &e)
|
||||
tcheckf(t, err, "unmarshal")
|
||||
return
|
||||
}
|
||||
packJSON := func(e Evaluation) string {
|
||||
t.Helper()
|
||||
buf, err := json.Marshal(e)
|
||||
tcheckf(t, err, "marshal")
|
||||
return string(buf)
|
||||
}
|
||||
|
||||
e0 := Evaluation{
|
||||
PolicyDomain: "sender1.example",
|
||||
Evaluated: time.Now().Round(0),
|
||||
IntervalHours: 1,
|
||||
PolicyPublished: dmarcrpt.PolicyPublished{
|
||||
Domain: "sender1.example",
|
||||
ADKIM: dmarcrpt.AlignmentRelaxed,
|
||||
ASPF: dmarcrpt.AlignmentRelaxed,
|
||||
Policy: dmarcrpt.DispositionReject,
|
||||
SubdomainPolicy: dmarcrpt.DispositionReject,
|
||||
Percentage: 100,
|
||||
},
|
||||
SourceIP: "10.1.2.3",
|
||||
Disposition: dmarcrpt.DispositionNone,
|
||||
AlignedDKIMPass: true,
|
||||
AlignedSPFPass: true,
|
||||
EnvelopeTo: "mox.example",
|
||||
EnvelopeFrom: "sender1.example",
|
||||
HeaderFrom: "sender1.example",
|
||||
DKIMResults: []dmarcrpt.DKIMAuthResult{
|
||||
{
|
||||
Domain: "sender1.example",
|
||||
Selector: "test",
|
||||
Result: dmarcrpt.DKIMPass,
|
||||
},
|
||||
},
|
||||
SPFResults: []dmarcrpt.SPFAuthResult{
|
||||
{
|
||||
Domain: "sender1.example",
|
||||
Scope: dmarcrpt.SPFDomainScopeMailFrom,
|
||||
Result: dmarcrpt.SPFPass,
|
||||
},
|
||||
},
|
||||
}
|
||||
e1 := e0
|
||||
e2 := parseJSON(strings.ReplaceAll(packJSON(e0), "sender1.example", "sender2.example"))
|
||||
e3 := parseJSON(strings.ReplaceAll(packJSON(e0), "10.1.2.3", "10.3.2.1"))
|
||||
e3.Optional = true
|
||||
|
||||
for i, e := range []*Evaluation{&e0, &e1, &e2, &e3} {
|
||||
e.Evaluated = e.Evaluated.Add(time.Duration(i) * time.Second)
|
||||
err = AddEvaluation(ctxbg, 3600, e)
|
||||
tcheckf(t, err, "add evaluation")
|
||||
}
|
||||
|
||||
expStats := map[string]EvaluationStat{
|
||||
"sender1.example": {
|
||||
Domain: dns.Domain{ASCII: "sender1.example"},
|
||||
Dispositions: []string{"none"},
|
||||
Count: 3,
|
||||
SendReport: true,
|
||||
},
|
||||
"sender2.example": {
|
||||
Domain: dns.Domain{ASCII: "sender2.example"},
|
||||
Dispositions: []string{"none"},
|
||||
Count: 1,
|
||||
SendReport: true,
|
||||
},
|
||||
}
|
||||
stats, err := EvaluationStats(ctxbg)
|
||||
tcheckf(t, err, "evaluation stats")
|
||||
tcompare(t, stats, expStats)
|
||||
|
||||
// EvaluationsDomain
|
||||
evals, err := EvaluationsDomain(ctxbg, dns.Domain{ASCII: "sender1.example"})
|
||||
tcheckf(t, err, "get evaluations for domain")
|
||||
tcompare(t, evals, []Evaluation{e0, e1, e3})
|
||||
|
||||
evals, err = EvaluationsDomain(ctxbg, dns.Domain{ASCII: "sender2.example"})
|
||||
tcheckf(t, err, "get evaluations for domain")
|
||||
tcompare(t, evals, []Evaluation{e2})
|
||||
|
||||
evals, err = EvaluationsDomain(ctxbg, dns.Domain{ASCII: "bogus.example"})
|
||||
tcheckf(t, err, "get evaluations for domain")
|
||||
tcompare(t, evals, []Evaluation{})
|
||||
|
||||
// RemoveEvaluationsDomain
|
||||
err = RemoveEvaluationsDomain(ctxbg, dns.Domain{ASCII: "sender1.example"})
|
||||
tcheckf(t, err, "remove evaluations")
|
||||
|
||||
expStats = map[string]EvaluationStat{
|
||||
"sender2.example": {
|
||||
Domain: dns.Domain{ASCII: "sender2.example"},
|
||||
Dispositions: []string{"none"},
|
||||
Count: 1,
|
||||
SendReport: true,
|
||||
},
|
||||
}
|
||||
stats, err = EvaluationStats(ctxbg)
|
||||
tcheckf(t, err, "evaluation stats")
|
||||
tcompare(t, stats, expStats)
|
||||
}
|
||||
|
||||
func TestSendReports(t *testing.T) {
|
||||
os.RemoveAll("../testdata/dmarcdb/data")
|
||||
mox.Context = ctxbg
|
||||
mox.ConfigStaticPath = filepath.FromSlash("../testdata/dmarcdb/mox.conf")
|
||||
mox.MustLoadConfig(true, false)
|
||||
|
||||
os.Remove(mox.DataDirPath("dmarceval.db"))
|
||||
err := Init()
|
||||
tcheckf(t, err, "init")
|
||||
defer func() {
|
||||
err := Close()
|
||||
tcheckf(t, err, "close")
|
||||
}()
|
||||
|
||||
resolver := dns.MockResolver{
|
||||
TXT: map[string][]string{
|
||||
"_dmarc.sender.example.": {
|
||||
"v=DMARC1; rua=mailto:dmarcrpt@sender.example; ri=3600",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
end := nextWholeHour(time.Now())
|
||||
|
||||
eval := Evaluation{
|
||||
PolicyDomain: "sender.example",
|
||||
Evaluated: end.Add(-time.Hour / 2),
|
||||
IntervalHours: 1,
|
||||
PolicyPublished: dmarcrpt.PolicyPublished{
|
||||
Domain: "sender.example",
|
||||
ADKIM: dmarcrpt.AlignmentRelaxed,
|
||||
ASPF: dmarcrpt.AlignmentRelaxed,
|
||||
Policy: dmarcrpt.DispositionReject,
|
||||
SubdomainPolicy: dmarcrpt.DispositionReject,
|
||||
Percentage: 100,
|
||||
},
|
||||
SourceIP: "10.1.2.3",
|
||||
Disposition: dmarcrpt.DispositionNone,
|
||||
AlignedDKIMPass: true,
|
||||
AlignedSPFPass: true,
|
||||
EnvelopeTo: "mox.example",
|
||||
EnvelopeFrom: "sender.example",
|
||||
HeaderFrom: "sender.example",
|
||||
DKIMResults: []dmarcrpt.DKIMAuthResult{
|
||||
{
|
||||
Domain: "sender.example",
|
||||
Selector: "test",
|
||||
Result: dmarcrpt.DKIMPass,
|
||||
},
|
||||
},
|
||||
SPFResults: []dmarcrpt.SPFAuthResult{
|
||||
{
|
||||
Domain: "sender.example",
|
||||
Scope: dmarcrpt.SPFDomainScopeMailFrom,
|
||||
Result: dmarcrpt.SPFPass,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
expFeedback := &dmarcrpt.Feedback{
|
||||
XMLName: xml.Name{Local: "feedback"},
|
||||
Version: "1.0",
|
||||
ReportMetadata: dmarcrpt.ReportMetadata{
|
||||
OrgName: "mail.mox.example",
|
||||
Email: "postmaster@mail.mox.example",
|
||||
DateRange: dmarcrpt.DateRange{
|
||||
Begin: end.Add(-1 * time.Hour).Unix(),
|
||||
End: end.Add(-time.Second).Unix(),
|
||||
},
|
||||
},
|
||||
PolicyPublished: dmarcrpt.PolicyPublished{
|
||||
Domain: "sender.example",
|
||||
ADKIM: dmarcrpt.AlignmentRelaxed,
|
||||
ASPF: dmarcrpt.AlignmentRelaxed,
|
||||
Policy: dmarcrpt.DispositionReject,
|
||||
SubdomainPolicy: dmarcrpt.DispositionReject,
|
||||
Percentage: 100,
|
||||
},
|
||||
Records: []dmarcrpt.ReportRecord{
|
||||
{
|
||||
Row: dmarcrpt.Row{
|
||||
SourceIP: "10.1.2.3",
|
||||
Count: 1,
|
||||
PolicyEvaluated: dmarcrpt.PolicyEvaluated{
|
||||
Disposition: dmarcrpt.DispositionNone,
|
||||
DKIM: dmarcrpt.DMARCPass,
|
||||
SPF: dmarcrpt.DMARCPass,
|
||||
},
|
||||
},
|
||||
Identifiers: dmarcrpt.Identifiers{
|
||||
EnvelopeTo: "mox.example",
|
||||
EnvelopeFrom: "sender.example",
|
||||
HeaderFrom: "sender.example",
|
||||
},
|
||||
AuthResults: dmarcrpt.AuthResults{
|
||||
DKIM: []dmarcrpt.DKIMAuthResult{
|
||||
{
|
||||
Domain: "sender.example",
|
||||
Selector: "test",
|
||||
Result: dmarcrpt.DKIMPass,
|
||||
},
|
||||
},
|
||||
SPF: []dmarcrpt.SPFAuthResult{
|
||||
{
|
||||
Domain: "sender.example",
|
||||
Scope: dmarcrpt.SPFDomainScopeMailFrom,
|
||||
Result: dmarcrpt.SPFPass,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Set a timeUntil that we steplock and that causes the actual sleep to return immediately when we want to.
|
||||
wait := make(chan struct{})
|
||||
step := make(chan time.Duration)
|
||||
jitteredTimeUntil = func(_ time.Time) time.Duration {
|
||||
wait <- struct{}{}
|
||||
return <-step
|
||||
}
|
||||
|
||||
sleepBetween = func(ctx context.Context, between time.Duration) (ok bool) { return true }
|
||||
|
||||
test := func(evals []Evaluation, expAggrAddrs map[string]struct{}, expErrorAddrs map[string]struct{}, optExpReport *dmarcrpt.Feedback) {
|
||||
t.Helper()
|
||||
|
||||
mox.Shutdown, mox.ShutdownCancel = context.WithCancel(ctxbg)
|
||||
|
||||
for _, e := range evals {
|
||||
err := EvalDB.Insert(ctxbg, &e)
|
||||
tcheckf(t, err, "inserting evaluation")
|
||||
}
|
||||
|
||||
aggrAddrs := map[string]struct{}{}
|
||||
errorAddrs := map[string]struct{}{}
|
||||
|
||||
queueAdd = func(ctx context.Context, log mlog.Log, senderAccount string, msgFile *os.File, qml ...queue.Msg) error {
|
||||
if len(qml) != 1 {
|
||||
return fmt.Errorf("queued %d messages, expected 1", len(qml))
|
||||
}
|
||||
qm := qml[0]
|
||||
|
||||
// Read message file. Also write copy to disk for inspection.
|
||||
buf, err := io.ReadAll(&moxio.AtReader{R: msgFile})
|
||||
tcheckf(t, err, "read report message")
|
||||
err = os.WriteFile("../testdata/dmarcdb/data/report.eml", slices.Concat(qm.MsgPrefix, buf), 0600)
|
||||
tcheckf(t, err, "write report message")
|
||||
|
||||
var feedback *dmarcrpt.Feedback
|
||||
addr := qm.Recipient().String()
|
||||
isErrorReport := strings.Contains(string(buf), "DMARC aggregate reporting error report")
|
||||
if isErrorReport {
|
||||
errorAddrs[addr] = struct{}{}
|
||||
} else {
|
||||
aggrAddrs[addr] = struct{}{}
|
||||
|
||||
feedback, err = dmarcrpt.ParseMessageReport(log.Logger, msgFile)
|
||||
tcheckf(t, err, "parsing generated report message")
|
||||
}
|
||||
|
||||
if optExpReport != nil {
|
||||
// Parse report in message and compare with expected.
|
||||
optExpReport.ReportMetadata.ReportID = feedback.ReportMetadata.ReportID
|
||||
tcompare(t, feedback, expFeedback)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
Start(resolver)
|
||||
// Run first loop.
|
||||
<-wait
|
||||
step <- 0
|
||||
<-wait
|
||||
tcompare(t, aggrAddrs, expAggrAddrs)
|
||||
tcompare(t, errorAddrs, expErrorAddrs)
|
||||
|
||||
// Second loop. Evaluations cleaned, should not result in report messages.
|
||||
aggrAddrs = map[string]struct{}{}
|
||||
errorAddrs = map[string]struct{}{}
|
||||
step <- 0
|
||||
<-wait
|
||||
tcompare(t, aggrAddrs, map[string]struct{}{})
|
||||
tcompare(t, errorAddrs, map[string]struct{}{})
|
||||
|
||||
// Caus Start to stop.
|
||||
mox.ShutdownCancel()
|
||||
step <- time.Minute
|
||||
}
|
||||
|
||||
// Typical case, with a single address that receives an aggregate report.
|
||||
test([]Evaluation{eval}, map[string]struct{}{"dmarcrpt@sender.example": {}}, map[string]struct{}{}, expFeedback)
|
||||
|
||||
// Only optional evaluations, no report at all.
|
||||
evalOpt := eval
|
||||
evalOpt.Optional = true
|
||||
test([]Evaluation{evalOpt}, map[string]struct{}{}, map[string]struct{}{}, nil)
|
||||
|
||||
// Address is suppressed.
|
||||
sa := SuppressAddress{ReportingAddress: "dmarcrpt@sender.example", Until: time.Now().Add(time.Minute)}
|
||||
err = EvalDB.Insert(ctxbg, &sa)
|
||||
tcheckf(t, err, "insert suppress address")
|
||||
test([]Evaluation{eval}, map[string]struct{}{}, map[string]struct{}{}, nil)
|
||||
|
||||
// Suppression has expired.
|
||||
sa.Until = time.Now().Add(-time.Minute)
|
||||
err = EvalDB.Update(ctxbg, &sa)
|
||||
tcheckf(t, err, "update suppress address")
|
||||
test([]Evaluation{eval}, map[string]struct{}{"dmarcrpt@sender.example": {}}, map[string]struct{}{}, expFeedback)
|
||||
|
||||
// Two RUA's, one with a size limit that doesn't pass, and one that does pass.
|
||||
resolver.TXT["_dmarc.sender.example."] = []string{"v=DMARC1; rua=mailto:dmarcrpt1@sender.example!1,mailto:dmarcrpt2@sender.example!10t; ri=3600"}
|
||||
test([]Evaluation{eval}, map[string]struct{}{"dmarcrpt2@sender.example": {}}, map[string]struct{}{}, nil)
|
||||
|
||||
// Redirect to external domain, without permission, no report sent.
|
||||
resolver.TXT["_dmarc.sender.example."] = []string{"v=DMARC1; rua=mailto:unauthorized@other.example"}
|
||||
test([]Evaluation{eval}, map[string]struct{}{}, map[string]struct{}{}, nil)
|
||||
|
||||
// Redirect to external domain, with basic permission.
|
||||
resolver.TXT = map[string][]string{
|
||||
"_dmarc.sender.example.": {"v=DMARC1; rua=mailto:authorized@other.example"},
|
||||
"sender.example._report._dmarc.other.example.": {"v=DMARC1"},
|
||||
}
|
||||
test([]Evaluation{eval}, map[string]struct{}{"authorized@other.example": {}}, map[string]struct{}{}, nil)
|
||||
|
||||
// Redirect to authorized external domain, with 2 allowed replacements and 1 invalid and 1 refusing due to size.
|
||||
resolver.TXT = map[string][]string{
|
||||
"_dmarc.sender.example.": {"v=DMARC1; rua=mailto:authorized@other.example"},
|
||||
"sender.example._report._dmarc.other.example.": {"v=DMARC1; rua=mailto:good1@other.example,mailto:bad1@yetanother.example,mailto:good2@other.example,mailto:badsize@other.example!1"},
|
||||
}
|
||||
test([]Evaluation{eval}, map[string]struct{}{"good1@other.example": {}, "good2@other.example": {}}, map[string]struct{}{}, nil)
|
||||
|
||||
// Without RUA, we send no message.
|
||||
resolver.TXT = map[string][]string{
|
||||
"_dmarc.sender.example.": {"v=DMARC1;"},
|
||||
}
|
||||
test([]Evaluation{eval}, map[string]struct{}{}, map[string]struct{}{}, nil)
|
||||
|
||||
// If message size limit is reached, an error repor is sent.
|
||||
resolver.TXT = map[string][]string{
|
||||
"_dmarc.sender.example.": {"v=DMARC1; rua=mailto:dmarcrpt@sender.example!1"},
|
||||
}
|
||||
test([]Evaluation{eval}, map[string]struct{}{}, map[string]struct{}{"dmarcrpt@sender.example": {}}, nil)
|
||||
}
|
17
dmarcdb/main_test.go
Normal file
17
dmarcdb/main_test.go
Normal file
@ -0,0 +1,17 @@
|
||||
package dmarcdb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/mjl-/mox/metrics"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
m.Run()
|
||||
if metrics.Panics.Load() > 0 {
|
||||
fmt.Println("unhandled panics encountered")
|
||||
os.Exit(2)
|
||||
}
|
||||
}
|
@ -1,17 +1,8 @@
|
||||
// Package dmarcdb stores incoming DMARC reports.
|
||||
//
|
||||
// With DMARC, a domain can request emails with DMARC verification results by
|
||||
// remote mail servers to be sent to a specified address. Mox parses such
|
||||
// reports, stores them in its database and makes them available through its
|
||||
// admin web interface.
|
||||
package dmarcdb
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
@ -21,15 +12,11 @@ import (
|
||||
|
||||
"github.com/mjl-/mox/dmarcrpt"
|
||||
"github.com/mjl-/mox/dns"
|
||||
"github.com/mjl-/mox/mlog"
|
||||
"github.com/mjl-/mox/mox-"
|
||||
)
|
||||
|
||||
var xlog = mlog.New("dmarcdb")
|
||||
|
||||
var (
|
||||
dmarcDB *bstore.DB
|
||||
mutex sync.Mutex
|
||||
ReportsDBTypes = []any{DomainFeedback{}} // Types stored in DB.
|
||||
ReportsDB *bstore.DB // Exported for backups.
|
||||
)
|
||||
|
||||
var (
|
||||
@ -67,44 +54,18 @@ type DomainFeedback struct {
|
||||
dmarcrpt.Feedback
|
||||
}
|
||||
|
||||
func database() (rdb *bstore.DB, rerr error) {
|
||||
mutex.Lock()
|
||||
defer mutex.Unlock()
|
||||
if dmarcDB == nil {
|
||||
p := mox.DataDirPath("dmarcrpt.db")
|
||||
os.MkdirAll(filepath.Dir(p), 0770)
|
||||
db, err := bstore.Open(p, &bstore.Options{Timeout: 5 * time.Second, Perm: 0660}, DomainFeedback{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dmarcDB = db
|
||||
}
|
||||
return dmarcDB, nil
|
||||
}
|
||||
|
||||
// Init opens the database.
|
||||
func Init() error {
|
||||
_, err := database()
|
||||
return err
|
||||
}
|
||||
|
||||
// AddReport adds a DMARC aggregate feedback report from an email to the database,
|
||||
// and updates prometheus metrics.
|
||||
//
|
||||
// fromDomain is the domain in the report message From header.
|
||||
func AddReport(ctx context.Context, f *dmarcrpt.Feedback, fromDomain dns.Domain) error {
|
||||
db, err := database()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d, err := dns.ParseDomain(f.PolicyPublished.Domain)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing domain in report: %v", err)
|
||||
}
|
||||
|
||||
df := DomainFeedback{0, d.Name(), fromDomain.Name(), *f}
|
||||
if err := db.Insert(&df); err != nil {
|
||||
if err := ReportsDB.Insert(ctx, &df); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -143,38 +104,23 @@ func AddReport(ctx context.Context, f *dmarcrpt.Feedback, fromDomain dns.Domain)
|
||||
|
||||
// Records returns all reports in the database.
|
||||
func Records(ctx context.Context) ([]DomainFeedback, error) {
|
||||
db, err := database()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return bstore.QueryDB[DomainFeedback](db).List()
|
||||
return bstore.QueryDB[DomainFeedback](ctx, ReportsDB).List()
|
||||
}
|
||||
|
||||
// RecordID returns the report for the ID.
|
||||
func RecordID(ctx context.Context, id int64) (DomainFeedback, error) {
|
||||
db, err := database()
|
||||
if err != nil {
|
||||
return DomainFeedback{}, err
|
||||
}
|
||||
|
||||
e := DomainFeedback{ID: id}
|
||||
err = db.Get(&e)
|
||||
err := ReportsDB.Get(ctx, &e)
|
||||
return e, err
|
||||
}
|
||||
|
||||
// RecordsPeriodDomain returns the reports overlapping start and end, for the given
|
||||
// domain. If domain is empty, all records match for domain.
|
||||
func RecordsPeriodDomain(ctx context.Context, start, end time.Time, domain string) ([]DomainFeedback, error) {
|
||||
db, err := database()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s := start.Unix()
|
||||
e := end.Unix()
|
||||
|
||||
q := bstore.QueryDB[DomainFeedback](db)
|
||||
q := bstore.QueryDB[DomainFeedback](ctx, ReportsDB)
|
||||
if domain != "" {
|
||||
q.FilterNonzero(DomainFeedback{Domain: domain})
|
||||
}
|
@ -13,17 +13,20 @@ import (
|
||||
"github.com/mjl-/mox/mox-"
|
||||
)
|
||||
|
||||
var ctxbg = context.Background()
|
||||
|
||||
func TestDMARCDB(t *testing.T) {
|
||||
mox.ConfigStaticPath = "../testdata/dmarcdb/fake.conf"
|
||||
mox.Conf.Static.DataDir = "."
|
||||
mox.Shutdown = ctxbg
|
||||
mox.ConfigStaticPath = filepath.FromSlash("../testdata/dmarcdb/mox.conf")
|
||||
mox.MustLoadConfig(true, false)
|
||||
|
||||
dbpath := mox.DataDirPath("dmarcrpt.db")
|
||||
os.MkdirAll(filepath.Dir(dbpath), 0770)
|
||||
defer os.Remove(dbpath)
|
||||
|
||||
if err := Init(); err != nil {
|
||||
t.Fatalf("init database: %s", err)
|
||||
}
|
||||
os.Remove(mox.DataDirPath("dmarcrpt.db"))
|
||||
err := Init()
|
||||
tcheckf(t, err, "init")
|
||||
defer func() {
|
||||
err := Close()
|
||||
tcheckf(t, err, "close")
|
||||
}()
|
||||
|
||||
feedback := &dmarcrpt.Feedback{
|
||||
ReportMetadata: dmarcrpt.ReportMetadata{
|
||||
@ -76,32 +79,32 @@ func TestDMARCDB(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
if err := AddReport(context.Background(), feedback, dns.Domain{ASCII: "google.com"}); err != nil {
|
||||
if err := AddReport(ctxbg, feedback, dns.Domain{ASCII: "google.com"}); err != nil {
|
||||
t.Fatalf("adding report: %s", err)
|
||||
}
|
||||
|
||||
records, err := Records(context.Background())
|
||||
records, err := Records(ctxbg)
|
||||
if err != nil || len(records) != 1 || !reflect.DeepEqual(&records[0].Feedback, feedback) {
|
||||
t.Fatalf("records: got err %v, records %#v, expected no error, single record with feedback %#v", err, records, feedback)
|
||||
}
|
||||
|
||||
record, err := RecordID(context.Background(), records[0].ID)
|
||||
record, err := RecordID(ctxbg, records[0].ID)
|
||||
if err != nil || !reflect.DeepEqual(&record.Feedback, feedback) {
|
||||
t.Fatalf("record id: got err %v, record %#v, expected feedback %#v", err, record, feedback)
|
||||
}
|
||||
|
||||
start := time.Unix(1596412800, 0)
|
||||
end := time.Unix(1596499199, 0)
|
||||
records, err = RecordsPeriodDomain(context.Background(), start, end, "example.org")
|
||||
records, err = RecordsPeriodDomain(ctxbg, start, end, "example.org")
|
||||
if err != nil || len(records) != 1 || !reflect.DeepEqual(&records[0].Feedback, feedback) {
|
||||
t.Fatalf("records: got err %v, records %#v, expected no error, single record with feedback %#v", err, records, feedback)
|
||||
}
|
||||
|
||||
records, err = RecordsPeriodDomain(context.Background(), end, end, "example.org")
|
||||
records, err = RecordsPeriodDomain(ctxbg, end, end, "example.org")
|
||||
if err != nil || len(records) != 0 {
|
||||
t.Fatalf("records: got err %v, records %#v, expected no error and no records", err, records)
|
||||
}
|
||||
records, err = RecordsPeriodDomain(context.Background(), start, end, "other.example")
|
||||
records, err = RecordsPeriodDomain(ctxbg, start, end, "other.example")
|
||||
if err != nil || len(records) != 0 {
|
||||
t.Fatalf("records: got err %v, records %#v, expected no error and no records", err, records)
|
||||
}
|
@ -1,9 +1,14 @@
|
||||
package dmarcrpt
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
)
|
||||
|
||||
// Initially generated by xsdgen, then modified.
|
||||
|
||||
// Feedback is the top-level XML field returned.
|
||||
type Feedback struct {
|
||||
XMLName xml.Name `xml:"feedback" json:"-"` // todo: removing the json tag triggers bug in sherpadoc, should fix.
|
||||
Version string `xml:"version"`
|
||||
ReportMetadata ReportMetadata `xml:"report_metadata"`
|
||||
PolicyPublished PolicyPublished `xml:"policy_published"`
|
||||
@ -26,6 +31,9 @@ type DateRange struct {
|
||||
|
||||
// PolicyPublished is the policy as found in DNS for the domain.
|
||||
type PolicyPublished struct {
|
||||
// Domain is where DMARC record was found, not necessarily message From. Reports we
|
||||
// generate use unicode names, incoming reports may have either ASCII-only or
|
||||
// Unicode domains.
|
||||
Domain string `xml:"domain"`
|
||||
ADKIM Alignment `xml:"adkim,omitempty"`
|
||||
ASPF Alignment `xml:"aspf,omitempty"`
|
||||
@ -39,6 +47,8 @@ type PolicyPublished struct {
|
||||
type Alignment string
|
||||
|
||||
const (
|
||||
AlignmentAbsent Alignment = ""
|
||||
|
||||
AlignmentRelaxed Alignment = "r" // Subdomains match the DMARC from-domain.
|
||||
AlignmentStrict Alignment = "s" // Only exact from-domain match.
|
||||
)
|
||||
@ -48,6 +58,8 @@ const (
|
||||
type Disposition string
|
||||
|
||||
const (
|
||||
DispositionAbsent Disposition = ""
|
||||
|
||||
DispositionNone Disposition = "none"
|
||||
DispositionQuarantine Disposition = "quarantine"
|
||||
DispositionReject Disposition = "reject"
|
||||
@ -79,6 +91,8 @@ type PolicyEvaluated struct {
|
||||
type DMARCResult string
|
||||
|
||||
const (
|
||||
DMARCAbsent DMARCResult = ""
|
||||
|
||||
DMARCPass DMARCResult = "pass"
|
||||
DMARCFail DMARCResult = "fail"
|
||||
)
|
||||
@ -93,6 +107,8 @@ type PolicyOverrideReason struct {
|
||||
type PolicyOverride string
|
||||
|
||||
const (
|
||||
PolicyOverrideAbsent PolicyOverride = ""
|
||||
|
||||
PolicyOverrideForwarded PolicyOverride = "forwarded"
|
||||
PolicyOverrideSampledOut PolicyOverride = "sampled_out"
|
||||
PolicyOverrideTrustedForwarder PolicyOverride = "trusted_forwarder"
|
||||
@ -122,6 +138,8 @@ type DKIMAuthResult struct {
|
||||
type DKIMResult string
|
||||
|
||||
const (
|
||||
DKIMAbsent DKIMResult = ""
|
||||
|
||||
DKIMNone DKIMResult = "none"
|
||||
DKIMPass DKIMResult = "pass"
|
||||
DKIMFail DKIMResult = "fail"
|
||||
@ -140,6 +158,8 @@ type SPFAuthResult struct {
|
||||
type SPFDomainScope string
|
||||
|
||||
const (
|
||||
SPFDomainScopeAbsent SPFDomainScope = ""
|
||||
|
||||
SPFDomainScopeHelo SPFDomainScope = "helo" // SMTP EHLO
|
||||
SPFDomainScopeMailFrom SPFDomainScope = "mfrom" // SMTP "MAIL FROM".
|
||||
)
|
||||
@ -147,6 +167,8 @@ const (
|
||||
type SPFResult string
|
||||
|
||||
const (
|
||||
SPFAbsent SPFResult = ""
|
||||
|
||||
SPFNone SPFResult = "none"
|
||||
SPFNeutral SPFResult = "neutral"
|
||||
SPFPass SPFResult = "pass"
|
||||
|
@ -9,14 +9,16 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/mjl-/mox/message"
|
||||
"github.com/mjl-/mox/mlog"
|
||||
"github.com/mjl-/mox/moxio"
|
||||
)
|
||||
|
||||
var ErrNoReport = errors.New("no dmarc report found in message")
|
||||
var ErrNoReport = errors.New("no dmarc aggregate report found in message")
|
||||
|
||||
// ParseReport parses an XML aggregate feedback report.
|
||||
// The maximum report size is 20MB.
|
||||
@ -33,34 +35,35 @@ func ParseReport(r io.Reader) (*Feedback, error) {
|
||||
// ParseMessageReport parses an aggregate feedback report from a mail message. The
|
||||
// maximum message size is 15MB, the maximum report size after decompression is
|
||||
// 20MB.
|
||||
func ParseMessageReport(r io.ReaderAt) (*Feedback, error) {
|
||||
func ParseMessageReport(elog *slog.Logger, r io.ReaderAt) (*Feedback, error) {
|
||||
log := mlog.New("dmarcrpt", elog)
|
||||
// ../rfc/7489:1801
|
||||
p, err := message.Parse(&moxio.LimitAtReader{R: r, Limit: 15 * 1024 * 1024})
|
||||
p, err := message.Parse(log.Logger, true, &moxio.LimitAtReader{R: r, Limit: 15 * 1024 * 1024})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parsing mail message: %s", err)
|
||||
}
|
||||
|
||||
return parseMessageReport(p)
|
||||
return parseMessageReport(log, p)
|
||||
}
|
||||
|
||||
func parseMessageReport(p message.Part) (*Feedback, error) {
|
||||
func parseMessageReport(log mlog.Log, p message.Part) (*Feedback, error) {
|
||||
// Pretty much any mime structure is allowed. ../rfc/7489:1861
|
||||
// In practice, some parties will send the report as the only (non-multipart)
|
||||
// content of the message.
|
||||
|
||||
if p.MediaType != "MULTIPART" {
|
||||
return parseReport(p)
|
||||
return parseReport(log, p)
|
||||
}
|
||||
|
||||
for {
|
||||
sp, err := p.ParseNextPart()
|
||||
sp, err := p.ParseNextPart(log.Logger)
|
||||
if err == io.EOF {
|
||||
return nil, ErrNoReport
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
report, err := parseMessageReport(*sp)
|
||||
report, err := parseMessageReport(log, *sp)
|
||||
if err == ErrNoReport {
|
||||
continue
|
||||
} else if err != nil || report != nil {
|
||||
@ -69,12 +72,12 @@ func parseMessageReport(p message.Part) (*Feedback, error) {
|
||||
}
|
||||
}
|
||||
|
||||
func parseReport(p message.Part) (*Feedback, error) {
|
||||
func parseReport(log mlog.Log, p message.Part) (*Feedback, error) {
|
||||
ct := strings.ToLower(p.MediaType + "/" + p.MediaSubType)
|
||||
r := p.Reader()
|
||||
|
||||
// If no (useful) content-type is set, try to detect it.
|
||||
if ct == "" || ct == "application/octect-stream" {
|
||||
if ct == "" || ct == "application/octet-stream" {
|
||||
data := make([]byte, 512)
|
||||
n, err := io.ReadFull(r, data)
|
||||
if err == io.EOF {
|
||||
@ -90,8 +93,8 @@ func parseReport(p message.Part) (*Feedback, error) {
|
||||
switch ct {
|
||||
case "application/zip":
|
||||
// Google sends messages with direct application/zip content-type.
|
||||
return parseZip(r)
|
||||
case "application/gzip":
|
||||
return parseZip(log, r)
|
||||
case "application/gzip", "application/x-gzip":
|
||||
gzr, err := gzip.NewReader(r)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("decoding gzip xml report: %s", err)
|
||||
@ -103,7 +106,7 @@ func parseReport(p message.Part) (*Feedback, error) {
|
||||
return nil, ErrNoReport
|
||||
}
|
||||
|
||||
func parseZip(r io.Reader) (*Feedback, error) {
|
||||
func parseZip(log mlog.Log, r io.Reader) (*Feedback, error) {
|
||||
buf, err := io.ReadAll(r)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading feedback: %s", err)
|
||||
@ -119,6 +122,9 @@ func parseZip(r io.Reader) (*Feedback, error) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("opening file in zip: %s", err)
|
||||
}
|
||||
defer f.Close()
|
||||
defer func() {
|
||||
err := f.Close()
|
||||
log.Check(err, "closing report file in zip file")
|
||||
}()
|
||||
return ParseReport(f)
|
||||
}
|
||||
|
@ -1,12 +1,18 @@
|
||||
package dmarcrpt
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/mjl-/mox/mlog"
|
||||
)
|
||||
|
||||
var pkglog = mlog.New("dmarcrpt", nil)
|
||||
|
||||
const reportExample = `<?xml version="1.0" encoding="UTF-8" ?>
|
||||
<feedback>
|
||||
<report_metadata>
|
||||
@ -57,6 +63,7 @@ const reportExample = `<?xml version="1.0" encoding="UTF-8" ?>
|
||||
|
||||
func TestParseReport(t *testing.T) {
|
||||
var expect = &Feedback{
|
||||
XMLName: xml.Name{Local: "feedback"},
|
||||
ReportMetadata: ReportMetadata{
|
||||
OrgName: "google.com",
|
||||
Email: "noreply-dmarc-support@google.com",
|
||||
@ -118,19 +125,19 @@ func TestParseReport(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestParseMessageReport(t *testing.T) {
|
||||
const dir = "../testdata/dmarc-reports"
|
||||
dir := filepath.FromSlash("../testdata/dmarc-reports")
|
||||
files, err := os.ReadDir(dir)
|
||||
if err != nil {
|
||||
t.Fatalf("listing dmarc report emails: %s", err)
|
||||
t.Fatalf("listing dmarc aggregate report emails: %s", err)
|
||||
}
|
||||
|
||||
for _, file := range files {
|
||||
p := dir + "/" + file.Name()
|
||||
p := filepath.Join(dir, file.Name())
|
||||
f, err := os.Open(p)
|
||||
if err != nil {
|
||||
t.Fatalf("open %q: %s", p, err)
|
||||
}
|
||||
_, err = ParseMessageReport(f)
|
||||
_, err = ParseMessageReport(pkglog.Logger, f)
|
||||
if err != nil {
|
||||
t.Fatalf("ParseMessageReport: %q: %s", p, err)
|
||||
}
|
||||
@ -138,7 +145,7 @@ func TestParseMessageReport(t *testing.T) {
|
||||
}
|
||||
|
||||
// No report in a non-multipart message.
|
||||
_, err = ParseMessageReport(strings.NewReader("From: <mjl@mox.example>\r\n\r\nNo report.\r\n"))
|
||||
_, err = ParseMessageReport(pkglog.Logger, strings.NewReader("From: <mjl@mox.example>\r\n\r\nNo report.\r\n"))
|
||||
if err != ErrNoReport {
|
||||
t.Fatalf("message without report, got err %#v, expected ErrNoreport", err)
|
||||
}
|
||||
@ -164,7 +171,7 @@ MIME-Version: 1.0
|
||||
|
||||
--===============5735553800636657282==--
|
||||
`, "\n", "\r\n")
|
||||
_, err = ParseMessageReport(strings.NewReader(multipartNoreport))
|
||||
_, err = ParseMessageReport(pkglog.Logger, strings.NewReader(multipartNoreport))
|
||||
if err != ErrNoReport {
|
||||
t.Fatalf("message without report, got err %#v, expected ErrNoreport", err)
|
||||
}
|
||||
|
91
dns/dns.go
91
dns/dns.go
@ -9,19 +9,31 @@ import (
|
||||
"strings"
|
||||
|
||||
"golang.org/x/net/idna"
|
||||
|
||||
"github.com/mjl-/adns"
|
||||
)
|
||||
|
||||
var errTrailingDot = errors.New("dns name has trailing dot")
|
||||
// Pedantic enables stricter parsing.
|
||||
var Pedantic bool
|
||||
|
||||
var (
|
||||
errTrailingDot = errors.New("dns name has trailing dot")
|
||||
errUnderscore = errors.New("domain name with underscore")
|
||||
errIDNA = errors.New("idna")
|
||||
errIPNotName = errors.New("ip address while name required")
|
||||
)
|
||||
|
||||
// Domain is a domain name, with one or more labels, with at least an ASCII
|
||||
// representation, and for IDNA non-ASCII domains a unicode representation.
|
||||
// The ASCII string must be used for DNS lookups.
|
||||
// The ASCII string must be used for DNS lookups. The strings do not have a
|
||||
// trailing dot. When using with StrictResolver, add the trailing dot.
|
||||
type Domain struct {
|
||||
// A non-unicode domain, e.g. with A-labels (xn--...) or NR-LDH (non-reserved
|
||||
// letters/digits/hyphens) labels. Always in lower case.
|
||||
// letters/digits/hyphens) labels. Always in lower case. No trailing dot.
|
||||
ASCII string
|
||||
|
||||
// Name as U-labels. Empty if this is an ASCII-only domain.
|
||||
// Name as U-labels, in Unicode NFC. Empty if this is an ASCII-only domain. No
|
||||
// trailing dot.
|
||||
Unicode string
|
||||
}
|
||||
|
||||
@ -56,6 +68,13 @@ func (d Domain) ASCIIExtra(smtputf8 bool) string {
|
||||
// Strings returns a human-readable string.
|
||||
// For IDNA names, the string contains both the unicode and ASCII name.
|
||||
func (d Domain) String() string {
|
||||
return d.LogString()
|
||||
}
|
||||
|
||||
// LogString returns a domain for logging.
|
||||
// For IDNA names, the string is the slash-separated Unicode and ASCII name.
|
||||
// For ASCII-only domain names, just the ASCII string is returned.
|
||||
func (d Domain) LogString() string {
|
||||
if d.Unicode == "" {
|
||||
return d.ASCII
|
||||
}
|
||||
@ -71,18 +90,26 @@ func (d Domain) IsZero() bool {
|
||||
// labels (unicode).
|
||||
// Names are IDN-canonicalized and lower-cased.
|
||||
// Characters in unicode can be replaced by equivalents. E.g. "Ⓡ" to "r". This
|
||||
// means you should only compare parsed domain names, never strings directly.
|
||||
// means you should only compare parsed domain names, never unparsed strings
|
||||
// directly.
|
||||
func ParseDomain(s string) (Domain, error) {
|
||||
if strings.HasSuffix(s, ".") {
|
||||
return Domain{}, errTrailingDot
|
||||
}
|
||||
|
||||
// IPv4 addresses would be accepted by idna lookups. TLDs cannot be all numerical,
|
||||
// so IP addresses are not valid DNS names.
|
||||
if net.ParseIP(s) != nil {
|
||||
return Domain{}, errIPNotName
|
||||
}
|
||||
|
||||
ascii, err := idna.Lookup.ToASCII(s)
|
||||
if err != nil {
|
||||
return Domain{}, fmt.Errorf("to ascii: %w", err)
|
||||
return Domain{}, fmt.Errorf("%w: to ascii: %v", errIDNA, err)
|
||||
}
|
||||
unicode, err := idna.Lookup.ToUnicode(s)
|
||||
if err != nil {
|
||||
return Domain{}, fmt.Errorf("to unicode: %w", err)
|
||||
return Domain{}, fmt.Errorf("%w: to unicode: %w", errIDNA, err)
|
||||
}
|
||||
// todo: should we cause errors for unicode domains that were not in
|
||||
// canonical form? we are now accepting all kinds of obscure spellings
|
||||
@ -94,16 +121,54 @@ func ParseDomain(s string) (Domain, error) {
|
||||
return Domain{ascii, unicode}, nil
|
||||
}
|
||||
|
||||
// IsNotFound returns whether an error is a net.DNSError with IsNotFound set.
|
||||
// ParseDomainLax parses a domain like ParseDomain, but allows labels with
|
||||
// underscores if the entire domain name is ASCII-only non-IDNA and Pedantic mode
|
||||
// is not enabled. Used for interoperability, e.g. domains may specify MX
|
||||
// targets with underscores.
|
||||
func ParseDomainLax(s string) (Domain, error) {
|
||||
if Pedantic || !strings.Contains(s, "_") {
|
||||
return ParseDomain(s)
|
||||
}
|
||||
|
||||
// If there is any non-ASCII, this is certainly not an A-label-only domain.
|
||||
s = strings.ToLower(s)
|
||||
for _, c := range s {
|
||||
if c >= 0x80 {
|
||||
return Domain{}, fmt.Errorf("%w: underscore and non-ascii not allowed", errUnderscore)
|
||||
}
|
||||
}
|
||||
|
||||
// Try parsing with underscores replaced with allowed ASCII character.
|
||||
// If that's not valid, the version with underscore isn't either.
|
||||
repl := strings.ReplaceAll(s, "_", "a")
|
||||
d, err := ParseDomain(repl)
|
||||
if err != nil {
|
||||
return Domain{}, fmt.Errorf("%w: %v", errUnderscore, err)
|
||||
}
|
||||
// If we found an IDNA domain, we're not going to allow it.
|
||||
if d.Unicode != "" {
|
||||
return Domain{}, fmt.Errorf("%w: idna domain with underscores not allowed", errUnderscore)
|
||||
}
|
||||
// Just to be safe, ensure no unexpected conversions happened.
|
||||
if d.ASCII != repl {
|
||||
return Domain{}, fmt.Errorf("%w: underscores and non-canonical names not allowed", errUnderscore)
|
||||
}
|
||||
return Domain{ASCII: s}, nil
|
||||
}
|
||||
|
||||
// IsNotFound returns whether an error is an adns.DNSError or net.DNSError with
|
||||
// IsNotFound set.
|
||||
//
|
||||
// IsNotFound means the requested type does not exist for the given domain (a
|
||||
// nodata or nxdomain response). It doesn't not necessarily mean no other types
|
||||
// for that name exist.
|
||||
// nodata or nxdomain response). It doesn't not necessarily mean no other types for
|
||||
// that name exist.
|
||||
//
|
||||
// A DNS server can respond to a lookup with an error "nxdomain" to indicate a
|
||||
// name does not exist (at all), or with a success status with an empty list.
|
||||
// The Go resolver returns an IsNotFound error for both cases, there is no need
|
||||
// to explicitly check for zero entries.
|
||||
// The adns resolver (just like the Go resolver) returns an IsNotFound error for
|
||||
// both cases, there is no need to explicitly check for zero entries.
|
||||
func IsNotFound(err error) bool {
|
||||
var adnsErr *adns.DNSError
|
||||
var dnsErr *net.DNSError
|
||||
return err != nil && errors.As(err, &dnsErr) && dnsErr.IsNotFound
|
||||
return err != nil && (errors.As(err, &adnsErr) && adnsErr.IsNotFound || errors.As(err, &dnsErr) && dnsErr.IsNotFound)
|
||||
}
|
||||
|
@ -6,9 +6,15 @@ import (
|
||||
)
|
||||
|
||||
func TestParseDomain(t *testing.T) {
|
||||
test := func(s string, exp Domain, expErr error) {
|
||||
test := func(lax bool, s string, exp Domain, expErr error) {
|
||||
t.Helper()
|
||||
dom, err := ParseDomain(s)
|
||||
var dom Domain
|
||||
var err error
|
||||
if lax {
|
||||
dom, err = ParseDomainLax(s)
|
||||
} else {
|
||||
dom, err = ParseDomain(s)
|
||||
}
|
||||
if (err == nil) != (expErr == nil) || expErr != nil && !errors.Is(err, expErr) {
|
||||
t.Fatalf("parse domain %q: err %v, expected %v", s, err, expErr)
|
||||
}
|
||||
@ -18,10 +24,15 @@ func TestParseDomain(t *testing.T) {
|
||||
}
|
||||
|
||||
// We rely on normalization of names throughout the code base.
|
||||
test("xmox.nl", Domain{"xmox.nl", ""}, nil)
|
||||
test("XMOX.NL", Domain{"xmox.nl", ""}, nil)
|
||||
test("TEST☺.XMOX.NL", Domain{"xn--test-3o3b.xmox.nl", "test☺.xmox.nl"}, nil)
|
||||
test("TEST☺.XMOX.NL", Domain{"xn--test-3o3b.xmox.nl", "test☺.xmox.nl"}, nil)
|
||||
test("ℂᵤⓇℒ。𝐒🄴", Domain{"curl.se", ""}, nil) // https://daniel.haxx.se/blog/2022/12/14/idn-is-crazy/
|
||||
test("xmox.nl.", Domain{}, errTrailingDot)
|
||||
test(false, "xmox.nl", Domain{"xmox.nl", ""}, nil)
|
||||
test(false, "XMOX.NL", Domain{"xmox.nl", ""}, nil)
|
||||
test(false, "TEST☺.XMOX.NL", Domain{"xn--test-3o3b.xmox.nl", "test☺.xmox.nl"}, nil)
|
||||
test(false, "TEST☺.XMOX.NL", Domain{"xn--test-3o3b.xmox.nl", "test☺.xmox.nl"}, nil)
|
||||
test(false, "ℂᵤⓇℒ。𝐒🄴", Domain{"curl.se", ""}, nil) // https://daniel.haxx.se/blog/2022/12/14/idn-is-crazy/
|
||||
test(false, "xmox.nl.", Domain{}, errTrailingDot)
|
||||
|
||||
test(false, "_underscore.xmox.nl", Domain{}, errIDNA)
|
||||
test(true, "_underscore.xmox.NL", Domain{ASCII: "_underscore.xmox.nl"}, nil)
|
||||
test(true, "_underscore.☺.xmox.nl", Domain{}, errUnderscore)
|
||||
test(true, "_underscore.xn--test-3o3b.xmox.nl", Domain{}, errUnderscore)
|
||||
}
|
||||
|
36
dns/examples_test.go
Normal file
36
dns/examples_test.go
Normal file
@ -0,0 +1,36 @@
|
||||
package dns_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/mjl-/mox/dns"
|
||||
)
|
||||
|
||||
func ExampleParseDomain() {
|
||||
// ASCII-only domain.
|
||||
basic, err := dns.ParseDomain("example.com")
|
||||
if err != nil {
|
||||
log.Fatalf("parse domain: %v", err)
|
||||
}
|
||||
fmt.Printf("%s\n", basic)
|
||||
|
||||
// IDNA domain xn--74h.example.
|
||||
smile, err := dns.ParseDomain("☺.example")
|
||||
if err != nil {
|
||||
log.Fatalf("parse domain: %v", err)
|
||||
}
|
||||
fmt.Printf("%s\n", smile)
|
||||
|
||||
// ASCII only domain curl.se in surprisingly allowed spelling.
|
||||
surprising, err := dns.ParseDomain("ℂᵤⓇℒ。𝐒🄴")
|
||||
if err != nil {
|
||||
log.Fatalf("parse domain: %v", err)
|
||||
}
|
||||
fmt.Printf("%s\n", surprising)
|
||||
|
||||
// Output:
|
||||
// example.com
|
||||
// ☺.example/xn--74h.example
|
||||
// curl.se
|
||||
}
|
@ -24,6 +24,15 @@ func (d IPDomain) String() string {
|
||||
return d.Domain.Name()
|
||||
}
|
||||
|
||||
// LogString returns a string with both ASCII-only and optional UTF-8
|
||||
// representation.
|
||||
func (d IPDomain) LogString() string {
|
||||
if len(d.IP) > 0 {
|
||||
return d.IP.String()
|
||||
}
|
||||
return d.Domain.LogString()
|
||||
}
|
||||
|
||||
// XString is like String, but only returns UTF-8 domains if utf8 is true.
|
||||
func (d IPDomain) XString(utf8 bool) string {
|
||||
if d.IsIP() {
|
||||
|
242
dns/mock.go
242
dns/mock.go
@ -4,153 +4,249 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"slices"
|
||||
|
||||
"github.com/mjl-/adns"
|
||||
)
|
||||
|
||||
// MockResolver is a Resolver used for testing.
|
||||
// Set DNS records in the fields, which map FQDNs (with trailing dot) to values.
|
||||
type MockResolver struct {
|
||||
PTR map[string][]string
|
||||
A map[string][]string
|
||||
AAAA map[string][]string
|
||||
TXT map[string][]string
|
||||
MX map[string][]*net.MX
|
||||
CNAME map[string]string
|
||||
Fail map[Mockreq]struct{}
|
||||
PTR map[string][]string
|
||||
A map[string][]string
|
||||
AAAA map[string][]string
|
||||
TXT map[string][]string
|
||||
MX map[string][]*net.MX
|
||||
TLSA map[string][]adns.TLSA // Keys are e.g. _25._tcp.<host>.
|
||||
CNAME map[string]string
|
||||
Fail []string // Records of the form "type name", e.g. "cname localhost." that will return a servfail.
|
||||
AllAuthentic bool // Default value for authentic in responses. Overridden with Authentic and Inauthentic
|
||||
Authentic []string // Like Fail, but records that cause the response to be authentic.
|
||||
Inauthentic []string // Like Authentic, but making response inauthentic.
|
||||
}
|
||||
|
||||
type Mockreq struct {
|
||||
type mockReq struct {
|
||||
Type string // E.g. "cname", "txt", "mx", "ptr", etc.
|
||||
Name string
|
||||
Name string // Name of request. For TLSA, the full requested DNS name, e.g. _25._tcp.<host>.
|
||||
}
|
||||
|
||||
func (mr mockReq) String() string {
|
||||
return mr.Type + " " + mr.Name
|
||||
}
|
||||
|
||||
var _ Resolver = MockResolver{}
|
||||
|
||||
func (r MockResolver) nxdomain(s string) *net.DNSError {
|
||||
return &net.DNSError{
|
||||
func (r MockResolver) result(ctx context.Context, mr mockReq) (string, adns.Result, error) {
|
||||
result := adns.Result{Authentic: r.AllAuthentic}
|
||||
|
||||
if err := ctx.Err(); err != nil {
|
||||
return "", result, err
|
||||
}
|
||||
|
||||
updateAuthentic := func(mock string) {
|
||||
if slices.Contains(r.Authentic, mock) {
|
||||
result.Authentic = true
|
||||
}
|
||||
if slices.Contains(r.Inauthentic, mock) {
|
||||
result.Authentic = false
|
||||
}
|
||||
}
|
||||
|
||||
for {
|
||||
if slices.Contains(r.Fail, mr.String()) {
|
||||
updateAuthentic(mr.String())
|
||||
return mr.Name, adns.Result{}, r.servfail(mr.Name)
|
||||
}
|
||||
|
||||
cname, ok := r.CNAME[mr.Name]
|
||||
if !ok {
|
||||
updateAuthentic(mr.String())
|
||||
break
|
||||
}
|
||||
updateAuthentic("cname " + mr.Name)
|
||||
if mr.Type == "cname" {
|
||||
return mr.Name, result, nil
|
||||
}
|
||||
mr.Name = cname
|
||||
}
|
||||
return mr.Name, result, nil
|
||||
}
|
||||
|
||||
func (r MockResolver) nxdomain(s string) error {
|
||||
return &adns.DNSError{
|
||||
Err: "no record",
|
||||
Name: s,
|
||||
Server: "localhost",
|
||||
Server: "mock",
|
||||
IsNotFound: true,
|
||||
}
|
||||
}
|
||||
|
||||
func (r MockResolver) servfail(s string) *net.DNSError {
|
||||
return &net.DNSError{
|
||||
func (r MockResolver) servfail(s string) error {
|
||||
return &adns.DNSError{
|
||||
Err: "temp error",
|
||||
Name: s,
|
||||
Server: "localhost",
|
||||
Server: "mock",
|
||||
IsTemporary: true,
|
||||
}
|
||||
}
|
||||
|
||||
func (r MockResolver) LookupCNAME(ctx context.Context, name string) (string, error) {
|
||||
if _, ok := r.Fail[Mockreq{"cname", name}]; ok {
|
||||
return "", r.servfail(name)
|
||||
func (r MockResolver) LookupPort(ctx context.Context, network, service string) (port int, err error) {
|
||||
if err := ctx.Err(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if cname, ok := r.CNAME[name]; ok {
|
||||
return cname, nil
|
||||
}
|
||||
return "", r.nxdomain(name)
|
||||
return net.LookupPort(network, service)
|
||||
}
|
||||
|
||||
func (r MockResolver) LookupAddr(ctx context.Context, ip string) ([]string, error) {
|
||||
if _, ok := r.Fail[Mockreq{"ptr", ip}]; ok {
|
||||
return nil, r.servfail(ip)
|
||||
func (r MockResolver) LookupCNAME(ctx context.Context, name string) (string, adns.Result, error) {
|
||||
mr := mockReq{"cname", name}
|
||||
name, result, err := r.result(ctx, mr)
|
||||
if err != nil {
|
||||
return name, result, err
|
||||
}
|
||||
cname, ok := r.CNAME[name]
|
||||
if !ok {
|
||||
return cname, result, r.nxdomain(name)
|
||||
}
|
||||
return cname, result, nil
|
||||
}
|
||||
|
||||
func (r MockResolver) LookupAddr(ctx context.Context, ip string) ([]string, adns.Result, error) {
|
||||
mr := mockReq{"ptr", ip}
|
||||
_, result, err := r.result(ctx, mr)
|
||||
if err != nil {
|
||||
return nil, result, err
|
||||
}
|
||||
l, ok := r.PTR[ip]
|
||||
if !ok {
|
||||
return nil, r.nxdomain(ip)
|
||||
return nil, result, r.nxdomain(ip)
|
||||
}
|
||||
return l, nil
|
||||
return l, result, nil
|
||||
}
|
||||
|
||||
func (r MockResolver) LookupNS(ctx context.Context, name string) ([]*net.NS, error) {
|
||||
return nil, r.servfail("ns not implemented")
|
||||
}
|
||||
|
||||
func (r MockResolver) LookupPort(ctx context.Context, network, service string) (port int, err error) {
|
||||
return 0, r.servfail("port not implemented")
|
||||
}
|
||||
|
||||
func (r MockResolver) LookupSRV(ctx context.Context, service, proto, name string) (string, []*net.SRV, error) {
|
||||
return "", nil, r.servfail("srv not implemented")
|
||||
}
|
||||
|
||||
func (r MockResolver) LookupIPAddr(ctx context.Context, host string) ([]net.IPAddr, error) {
|
||||
if _, ok := r.Fail[Mockreq{"ipaddr", host}]; ok {
|
||||
return nil, r.servfail(host)
|
||||
}
|
||||
addrs, err := r.LookupHost(ctx, host)
|
||||
func (r MockResolver) LookupNS(ctx context.Context, name string) ([]*net.NS, adns.Result, error) {
|
||||
mr := mockReq{"ns", name}
|
||||
_, result, err := r.result(ctx, mr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, result, err
|
||||
}
|
||||
return nil, result, r.servfail("ns not implemented")
|
||||
}
|
||||
|
||||
func (r MockResolver) LookupSRV(ctx context.Context, service, proto, name string) (string, []*net.SRV, adns.Result, error) {
|
||||
xname := fmt.Sprintf("_%s._%s.%s", service, proto, name)
|
||||
mr := mockReq{"srv", xname}
|
||||
name, result, err := r.result(ctx, mr)
|
||||
if err != nil {
|
||||
return name, nil, result, err
|
||||
}
|
||||
return name, nil, result, r.servfail("srv not implemented")
|
||||
}
|
||||
|
||||
func (r MockResolver) LookupIPAddr(ctx context.Context, host string) ([]net.IPAddr, adns.Result, error) {
|
||||
// todo: make closer to resolver, doing a & aaaa lookups, including their error/(in)secure status.
|
||||
mr := mockReq{"ipaddr", host}
|
||||
_, result, err := r.result(ctx, mr)
|
||||
if err != nil {
|
||||
return nil, result, err
|
||||
}
|
||||
addrs, result1, err := r.LookupHost(ctx, host)
|
||||
result.Authentic = result.Authentic && result1.Authentic
|
||||
if err != nil {
|
||||
return nil, result, err
|
||||
}
|
||||
ips := make([]net.IPAddr, len(addrs))
|
||||
for i, a := range addrs {
|
||||
ip := net.ParseIP(a)
|
||||
if ip == nil {
|
||||
return nil, fmt.Errorf("malformed ip %q", a)
|
||||
return nil, result, fmt.Errorf("malformed ip %q", a)
|
||||
}
|
||||
ips[i] = net.IPAddr{IP: ip}
|
||||
}
|
||||
return ips, nil
|
||||
return ips, result, nil
|
||||
}
|
||||
|
||||
func (r MockResolver) LookupHost(ctx context.Context, host string) (addrs []string, err error) {
|
||||
if _, ok := r.Fail[Mockreq{"host", host}]; ok {
|
||||
return nil, r.servfail(host)
|
||||
func (r MockResolver) LookupHost(ctx context.Context, host string) ([]string, adns.Result, error) {
|
||||
// todo: make closer to resolver, doing a & aaaa lookups, including their error/(in)secure status.
|
||||
mr := mockReq{"host", host}
|
||||
_, result, err := r.result(ctx, mr)
|
||||
if err != nil {
|
||||
return nil, result, err
|
||||
}
|
||||
var addrs []string
|
||||
addrs = append(addrs, r.A[host]...)
|
||||
addrs = append(addrs, r.AAAA[host]...)
|
||||
if len(addrs) > 0 {
|
||||
return addrs, nil
|
||||
if len(addrs) == 0 {
|
||||
return nil, result, r.nxdomain(host)
|
||||
}
|
||||
if cname, ok := r.CNAME[host]; ok {
|
||||
return []string{cname}, nil
|
||||
}
|
||||
return nil, r.nxdomain(host)
|
||||
return addrs, result, nil
|
||||
}
|
||||
|
||||
func (r MockResolver) LookupIP(ctx context.Context, network, host string) ([]net.IP, error) {
|
||||
if _, ok := r.Fail[Mockreq{"ip", host}]; ok {
|
||||
return nil, r.servfail(host)
|
||||
func (r MockResolver) LookupIP(ctx context.Context, network, host string) ([]net.IP, adns.Result, error) {
|
||||
mr := mockReq{"ip", host}
|
||||
name, result, err := r.result(ctx, mr)
|
||||
if err != nil {
|
||||
return nil, result, err
|
||||
}
|
||||
var ips []net.IP
|
||||
switch network {
|
||||
case "ip", "ip4":
|
||||
for _, ip := range r.A[host] {
|
||||
for _, ip := range r.A[name] {
|
||||
ips = append(ips, net.ParseIP(ip))
|
||||
}
|
||||
}
|
||||
switch network {
|
||||
case "ip", "ip6":
|
||||
for _, ip := range r.AAAA[host] {
|
||||
for _, ip := range r.AAAA[name] {
|
||||
ips = append(ips, net.ParseIP(ip))
|
||||
}
|
||||
}
|
||||
if len(ips) == 0 {
|
||||
return nil, r.nxdomain(host)
|
||||
return nil, result, r.nxdomain(host)
|
||||
}
|
||||
return ips, nil
|
||||
return ips, result, nil
|
||||
}
|
||||
|
||||
func (r MockResolver) LookupMX(ctx context.Context, name string) ([]*net.MX, error) {
|
||||
if _, ok := r.Fail[Mockreq{"mx", name}]; ok {
|
||||
return nil, r.servfail(name)
|
||||
func (r MockResolver) LookupMX(ctx context.Context, name string) ([]*net.MX, adns.Result, error) {
|
||||
mr := mockReq{"mx", name}
|
||||
name, result, err := r.result(ctx, mr)
|
||||
if err != nil {
|
||||
return nil, result, err
|
||||
}
|
||||
l, ok := r.MX[name]
|
||||
if !ok {
|
||||
return nil, r.nxdomain(name)
|
||||
return nil, result, r.nxdomain(name)
|
||||
}
|
||||
return l, nil
|
||||
return l, result, nil
|
||||
}
|
||||
|
||||
func (r MockResolver) LookupTXT(ctx context.Context, name string) ([]string, error) {
|
||||
if _, ok := r.Fail[Mockreq{"txt", name}]; ok {
|
||||
return nil, r.servfail(name)
|
||||
func (r MockResolver) LookupTXT(ctx context.Context, name string) ([]string, adns.Result, error) {
|
||||
mr := mockReq{"txt", name}
|
||||
name, result, err := r.result(ctx, mr)
|
||||
if err != nil {
|
||||
return nil, result, err
|
||||
}
|
||||
l, ok := r.TXT[name]
|
||||
if !ok {
|
||||
return nil, r.nxdomain(name)
|
||||
return nil, result, r.nxdomain(name)
|
||||
}
|
||||
return l, nil
|
||||
return l, result, nil
|
||||
}
|
||||
|
||||
func (r MockResolver) LookupTLSA(ctx context.Context, port int, protocol string, host string) ([]adns.TLSA, adns.Result, error) {
|
||||
var name string
|
||||
if port == 0 && protocol == "" {
|
||||
name = host
|
||||
} else {
|
||||
name = fmt.Sprintf("_%d._%s.%s", port, protocol, host)
|
||||
}
|
||||
mr := mockReq{"tlsa", name}
|
||||
name, result, err := r.result(ctx, mr)
|
||||
if err != nil {
|
||||
return nil, result, err
|
||||
}
|
||||
l, ok := r.TLSA[name]
|
||||
if !ok {
|
||||
return nil, result, r.nxdomain(name)
|
||||
}
|
||||
return l, result, nil
|
||||
}
|
||||
|
276
dns/resolver.go
276
dns/resolver.go
@ -3,50 +3,45 @@ package dns
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/mjl-/adns"
|
||||
|
||||
"github.com/mjl-/mox/mlog"
|
||||
"github.com/mjl-/mox/stub"
|
||||
)
|
||||
|
||||
// todo future: replace with a dnssec capable resolver
|
||||
// todo future: change to interface that is closer to DNS. 1. expose nxdomain vs success with zero entries: nxdomain means the name does not exist for any dns resource record type, success with zero records means the name exists for other types than the requested type; 2. add ability to not follow cname records when resolving. the net resolver automatically follows cnames for LookupHost, LookupIP, LookupIPAddr. when resolving names found in mx records, we explicitly must not follow cnames. that seems impossible at the moment. 3. when looking up a cname, actually lookup the record? "net" LookupCNAME will return the requested name with no error if there is no CNAME record. because it returns the canonical name.
|
||||
// todo future: add option to not use anything in the cache, for the admin pages where you check the latest DNS settings, ignoring old cached info.
|
||||
|
||||
var xlog = mlog.New("dns")
|
||||
func init() {
|
||||
net.DefaultResolver.StrictErrors = true
|
||||
}
|
||||
|
||||
var (
|
||||
metricLookup = promauto.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "mox_dns_lookup_duration_seconds",
|
||||
Help: "DNS lookups.",
|
||||
Buckets: []float64{0.001, 0.005, 0.01, 0.05, 0.100, 0.5, 1, 5, 10, 20, 30},
|
||||
},
|
||||
[]string{
|
||||
"pkg",
|
||||
"type", // Lower-case Resolver method name without leading Lookup.
|
||||
"result", // ok, nxdomain, temporary, timeout, canceled, error
|
||||
},
|
||||
)
|
||||
MetricLookup stub.HistogramVec = stub.HistogramVecIgnore{}
|
||||
)
|
||||
|
||||
// Resolver is the interface strict resolver implements.
|
||||
type Resolver interface {
|
||||
LookupAddr(ctx context.Context, addr string) ([]string, error)
|
||||
LookupCNAME(ctx context.Context, host string) (string, error) // NOTE: returns an error if no CNAME record is present.
|
||||
LookupHost(ctx context.Context, host string) (addrs []string, err error)
|
||||
LookupIP(ctx context.Context, network, host string) ([]net.IP, error)
|
||||
LookupIPAddr(ctx context.Context, host string) ([]net.IPAddr, error)
|
||||
LookupMX(ctx context.Context, name string) ([]*net.MX, error)
|
||||
LookupNS(ctx context.Context, name string) ([]*net.NS, error)
|
||||
LookupPort(ctx context.Context, network, service string) (port int, err error)
|
||||
LookupSRV(ctx context.Context, service, proto, name string) (string, []*net.SRV, error)
|
||||
LookupTXT(ctx context.Context, name string) ([]string, error)
|
||||
LookupAddr(ctx context.Context, addr string) ([]string, adns.Result, error) // Always returns absolute names, with trailing dot.
|
||||
LookupCNAME(ctx context.Context, host string) (string, adns.Result, error) // NOTE: returns an error if no CNAME record is present.
|
||||
LookupHost(ctx context.Context, host string) ([]string, adns.Result, error)
|
||||
LookupIP(ctx context.Context, network, host string) ([]net.IP, adns.Result, error)
|
||||
LookupIPAddr(ctx context.Context, host string) ([]net.IPAddr, adns.Result, error)
|
||||
LookupMX(ctx context.Context, name string) ([]*net.MX, adns.Result, error)
|
||||
LookupNS(ctx context.Context, name string) ([]*net.NS, adns.Result, error)
|
||||
LookupSRV(ctx context.Context, service, proto, name string) (string, []*net.SRV, adns.Result, error)
|
||||
LookupTXT(ctx context.Context, name string) ([]string, adns.Result, error)
|
||||
LookupTLSA(ctx context.Context, port int, protocol, host string) ([]adns.TLSA, adns.Result, error)
|
||||
}
|
||||
|
||||
// WithPackage sets Pkg on resolver if it is a StrictResolve and does not have a package set yet.
|
||||
@ -63,8 +58,17 @@ func WithPackage(resolver Resolver, name string) Resolver {
|
||||
// StrictResolver is a net.Resolver that enforces that DNS names end with a dot,
|
||||
// preventing "search"-relative lookups.
|
||||
type StrictResolver struct {
|
||||
Pkg string // Name of subsystem that is making DNS requests, for metrics.
|
||||
Resolver *net.Resolver // Where the actual lookups are done. If nil, net.DefaultResolver is used for lookups.
|
||||
Pkg string // Name of subsystem that is making DNS requests, for metrics.
|
||||
Resolver *adns.Resolver // Where the actual lookups are done. If nil, adns.DefaultResolver is used for lookups.
|
||||
Log *slog.Logger
|
||||
}
|
||||
|
||||
func (r StrictResolver) log() mlog.Log {
|
||||
pkg := r.Pkg
|
||||
if pkg == "" {
|
||||
pkg = "dns"
|
||||
}
|
||||
return mlog.New(pkg, r.Log)
|
||||
}
|
||||
|
||||
var _ Resolver = StrictResolver{}
|
||||
@ -73,7 +77,7 @@ var ErrRelativeDNSName = errors.New("dns: host to lookup must be absolute, endin
|
||||
|
||||
func metricLookupObserve(pkg, typ string, err error, start time.Time) {
|
||||
var result string
|
||||
var dnsErr *net.DNSError
|
||||
var dnsErr *adns.DNSError
|
||||
switch {
|
||||
case err == nil:
|
||||
result = "ok"
|
||||
@ -88,7 +92,7 @@ func metricLookupObserve(pkg, typ string, err error, start time.Time) {
|
||||
default:
|
||||
result = "error"
|
||||
}
|
||||
metricLookup.WithLabelValues(pkg, typ, result).Observe(float64(time.Since(start)) / float64(time.Second))
|
||||
MetricLookup.ObserveLabels(float64(time.Since(start))/float64(time.Second), pkg, typ, result)
|
||||
}
|
||||
|
||||
func (r StrictResolver) WithPackage(name string) Resolver {
|
||||
@ -99,37 +103,91 @@ func (r StrictResolver) WithPackage(name string) Resolver {
|
||||
|
||||
func (r StrictResolver) resolver() Resolver {
|
||||
if r.Resolver == nil {
|
||||
return net.DefaultResolver
|
||||
return adns.DefaultResolver
|
||||
}
|
||||
return r.Resolver
|
||||
}
|
||||
|
||||
func (r StrictResolver) LookupAddr(ctx context.Context, addr string) (resp []string, err error) {
|
||||
func resolveErrorHint(err *error) {
|
||||
e := *err
|
||||
if e == nil {
|
||||
return
|
||||
}
|
||||
dnserr, ok := e.(*adns.DNSError)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
// If the dns server is not running, and it is one of the default/fallback IPs,
|
||||
// hint at where to look.
|
||||
if dnserr.IsTemporary && runtime.GOOS == "linux" && (dnserr.Server == "127.0.0.1:53" || dnserr.Server == "[::1]:53") && strings.HasSuffix(dnserr.Err, "connection refused") {
|
||||
*err = fmt.Errorf("%w (hint: does /etc/resolv.conf point to a running nameserver? in case of systemd-resolved, see systemd-resolved.service(8); better yet, install a proper dnssec-verifying recursive resolver like unbound)", *err)
|
||||
}
|
||||
}
|
||||
|
||||
func (r StrictResolver) LookupPort(ctx context.Context, network, service string) (resp int, err error) {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
metricLookupObserve(r.Pkg, "port", err, start)
|
||||
r.log().WithContext(ctx).Debugx("dns lookup result", err,
|
||||
slog.String("type", "port"),
|
||||
slog.String("network", network),
|
||||
slog.String("service", service),
|
||||
slog.Int("resp", resp),
|
||||
slog.Duration("duration", time.Since(start)),
|
||||
)
|
||||
}()
|
||||
defer resolveErrorHint(&err)
|
||||
|
||||
resp, err = r.resolver().LookupPort(ctx, network, service)
|
||||
return
|
||||
}
|
||||
|
||||
func (r StrictResolver) LookupAddr(ctx context.Context, addr string) (resp []string, result adns.Result, err error) {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
metricLookupObserve(r.Pkg, "addr", err, start)
|
||||
xlog.WithContext(ctx).Debugx("dns lookup result", err, mlog.Field("pkg", r.Pkg), mlog.Field("type", "addr"), mlog.Field("addr", addr), mlog.Field("resp", resp), mlog.Field("duration", time.Since(start)))
|
||||
r.log().WithContext(ctx).Debugx("dns lookup result", err,
|
||||
slog.String("type", "addr"),
|
||||
slog.String("addr", addr),
|
||||
slog.Any("resp", resp),
|
||||
slog.Bool("authentic", result.Authentic),
|
||||
slog.Duration("duration", time.Since(start)),
|
||||
)
|
||||
}()
|
||||
defer resolveErrorHint(&err)
|
||||
|
||||
resp, err = r.resolver().LookupAddr(ctx, addr)
|
||||
resp, result, err = r.resolver().LookupAddr(ctx, addr)
|
||||
// For addresses from /etc/hosts without dot, we add the missing trailing dot.
|
||||
for i, s := range resp {
|
||||
if !strings.HasSuffix(s, ".") {
|
||||
resp[i] = s + "."
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// LookupCNAME looks up a CNAME. Unlike "net" LookupCNAME, it returns a "not found"
|
||||
// error if there is no CNAME record.
|
||||
func (r StrictResolver) LookupCNAME(ctx context.Context, host string) (resp string, err error) {
|
||||
func (r StrictResolver) LookupCNAME(ctx context.Context, host string) (resp string, result adns.Result, err error) {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
metricLookupObserve(r.Pkg, "cname", err, start)
|
||||
xlog.WithContext(ctx).Debugx("dns lookup result", err, mlog.Field("pkg", r.Pkg), mlog.Field("type", "cname"), mlog.Field("host", host), mlog.Field("resp", resp), mlog.Field("duration", time.Since(start)))
|
||||
r.log().WithContext(ctx).Debugx("dns lookup result", err,
|
||||
slog.String("type", "cname"),
|
||||
slog.String("host", host),
|
||||
slog.String("resp", resp),
|
||||
slog.Bool("authentic", result.Authentic),
|
||||
slog.Duration("duration", time.Since(start)),
|
||||
)
|
||||
}()
|
||||
defer resolveErrorHint(&err)
|
||||
|
||||
if !strings.HasSuffix(host, ".") {
|
||||
return "", ErrRelativeDNSName
|
||||
return "", result, ErrRelativeDNSName
|
||||
}
|
||||
resp, err = r.resolver().LookupCNAME(ctx, host)
|
||||
resp, result, err = r.resolver().LookupCNAME(ctx, host)
|
||||
if err == nil && resp == host {
|
||||
return "", &net.DNSError{
|
||||
return "", result, &adns.DNSError{
|
||||
Err: "no cname record",
|
||||
Name: host,
|
||||
Server: "",
|
||||
@ -138,111 +196,177 @@ func (r StrictResolver) LookupCNAME(ctx context.Context, host string) (resp stri
|
||||
}
|
||||
return
|
||||
}
|
||||
func (r StrictResolver) LookupHost(ctx context.Context, host string) (resp []string, err error) {
|
||||
|
||||
func (r StrictResolver) LookupHost(ctx context.Context, host string) (resp []string, result adns.Result, err error) {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
metricLookupObserve(r.Pkg, "host", err, start)
|
||||
xlog.WithContext(ctx).Debugx("dns lookup result", err, mlog.Field("pkg", r.Pkg), mlog.Field("type", "host"), mlog.Field("host", host), mlog.Field("resp", resp), mlog.Field("duration", time.Since(start)))
|
||||
r.log().WithContext(ctx).Debugx("dns lookup result", err,
|
||||
slog.String("type", "host"),
|
||||
slog.String("host", host),
|
||||
slog.Any("resp", resp),
|
||||
slog.Bool("authentic", result.Authentic),
|
||||
slog.Duration("duration", time.Since(start)),
|
||||
)
|
||||
}()
|
||||
defer resolveErrorHint(&err)
|
||||
|
||||
if !strings.HasSuffix(host, ".") {
|
||||
return nil, ErrRelativeDNSName
|
||||
return nil, result, ErrRelativeDNSName
|
||||
}
|
||||
resp, err = r.resolver().LookupHost(ctx, host)
|
||||
resp, result, err = r.resolver().LookupHost(ctx, host)
|
||||
return
|
||||
}
|
||||
|
||||
func (r StrictResolver) LookupIP(ctx context.Context, network, host string) (resp []net.IP, err error) {
|
||||
func (r StrictResolver) LookupIP(ctx context.Context, network, host string) (resp []net.IP, result adns.Result, err error) {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
metricLookupObserve(r.Pkg, "ip", err, start)
|
||||
xlog.WithContext(ctx).Debugx("dns lookup result", err, mlog.Field("pkg", r.Pkg), mlog.Field("type", "ip"), mlog.Field("network", network), mlog.Field("host", host), mlog.Field("resp", resp), mlog.Field("duration", time.Since(start)))
|
||||
r.log().WithContext(ctx).Debugx("dns lookup result", err,
|
||||
slog.String("type", "ip"),
|
||||
slog.String("network", network),
|
||||
slog.String("host", host),
|
||||
slog.Any("resp", resp),
|
||||
slog.Bool("authentic", result.Authentic),
|
||||
slog.Duration("duration", time.Since(start)),
|
||||
)
|
||||
}()
|
||||
defer resolveErrorHint(&err)
|
||||
|
||||
if !strings.HasSuffix(host, ".") {
|
||||
return nil, ErrRelativeDNSName
|
||||
return nil, result, ErrRelativeDNSName
|
||||
}
|
||||
resp, err = r.resolver().LookupIP(ctx, network, host)
|
||||
resp, result, err = r.resolver().LookupIP(ctx, network, host)
|
||||
return
|
||||
}
|
||||
|
||||
func (r StrictResolver) LookupIPAddr(ctx context.Context, host string) (resp []net.IPAddr, err error) {
|
||||
func (r StrictResolver) LookupIPAddr(ctx context.Context, host string) (resp []net.IPAddr, result adns.Result, err error) {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
metricLookupObserve(r.Pkg, "ipaddr", err, start)
|
||||
xlog.WithContext(ctx).Debugx("dns lookup result", err, mlog.Field("pkg", r.Pkg), mlog.Field("type", "ipaddr"), mlog.Field("host", host), mlog.Field("resp", resp), mlog.Field("duration", time.Since(start)))
|
||||
r.log().WithContext(ctx).Debugx("dns lookup result", err,
|
||||
slog.String("type", "ipaddr"),
|
||||
slog.String("host", host),
|
||||
slog.Any("resp", resp),
|
||||
slog.Bool("authentic", result.Authentic),
|
||||
slog.Duration("duration", time.Since(start)),
|
||||
)
|
||||
}()
|
||||
defer resolveErrorHint(&err)
|
||||
|
||||
if !strings.HasSuffix(host, ".") {
|
||||
return nil, ErrRelativeDNSName
|
||||
return nil, result, ErrRelativeDNSName
|
||||
}
|
||||
resp, err = r.resolver().LookupIPAddr(ctx, host)
|
||||
resp, result, err = r.resolver().LookupIPAddr(ctx, host)
|
||||
return
|
||||
}
|
||||
|
||||
func (r StrictResolver) LookupMX(ctx context.Context, name string) (resp []*net.MX, err error) {
|
||||
func (r StrictResolver) LookupMX(ctx context.Context, name string) (resp []*net.MX, result adns.Result, err error) {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
metricLookupObserve(r.Pkg, "mx", err, start)
|
||||
xlog.WithContext(ctx).Debugx("dns lookup result", err, mlog.Field("pkg", r.Pkg), mlog.Field("type", "mx"), mlog.Field("name", name), mlog.Field("resp", resp), mlog.Field("duration", time.Since(start)))
|
||||
r.log().WithContext(ctx).Debugx("dns lookup result", err,
|
||||
slog.String("type", "mx"),
|
||||
slog.String("name", name),
|
||||
slog.Any("resp", resp),
|
||||
slog.Bool("authentic", result.Authentic),
|
||||
slog.Duration("duration", time.Since(start)),
|
||||
)
|
||||
}()
|
||||
defer resolveErrorHint(&err)
|
||||
|
||||
if !strings.HasSuffix(name, ".") {
|
||||
return nil, ErrRelativeDNSName
|
||||
return nil, result, ErrRelativeDNSName
|
||||
}
|
||||
resp, err = r.resolver().LookupMX(ctx, name)
|
||||
resp, result, err = r.resolver().LookupMX(ctx, name)
|
||||
return
|
||||
}
|
||||
|
||||
func (r StrictResolver) LookupNS(ctx context.Context, name string) (resp []*net.NS, err error) {
|
||||
func (r StrictResolver) LookupNS(ctx context.Context, name string) (resp []*net.NS, result adns.Result, err error) {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
metricLookupObserve(r.Pkg, "ns", err, start)
|
||||
xlog.WithContext(ctx).Debugx("dns lookup result", err, mlog.Field("pkg", r.Pkg), mlog.Field("type", "ns"), mlog.Field("name", name), mlog.Field("resp", resp), mlog.Field("duration", time.Since(start)))
|
||||
r.log().WithContext(ctx).Debugx("dns lookup result", err,
|
||||
slog.String("type", "ns"),
|
||||
slog.String("name", name),
|
||||
slog.Any("resp", resp),
|
||||
slog.Bool("authentic", result.Authentic),
|
||||
slog.Duration("duration", time.Since(start)),
|
||||
)
|
||||
}()
|
||||
defer resolveErrorHint(&err)
|
||||
|
||||
if !strings.HasSuffix(name, ".") {
|
||||
return nil, ErrRelativeDNSName
|
||||
return nil, result, ErrRelativeDNSName
|
||||
}
|
||||
resp, err = r.resolver().LookupNS(ctx, name)
|
||||
resp, result, err = r.resolver().LookupNS(ctx, name)
|
||||
return
|
||||
}
|
||||
|
||||
func (r StrictResolver) LookupPort(ctx context.Context, network, service string) (resp int, err error) {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
metricLookupObserve(r.Pkg, "port", err, start)
|
||||
xlog.WithContext(ctx).Debugx("dns lookup result", err, mlog.Field("pkg", r.Pkg), mlog.Field("type", "port"), mlog.Field("network", network), mlog.Field("service", service), mlog.Field("resp", resp), mlog.Field("duration", time.Since(start)))
|
||||
}()
|
||||
|
||||
resp, err = r.resolver().LookupPort(ctx, network, service)
|
||||
return
|
||||
}
|
||||
|
||||
func (r StrictResolver) LookupSRV(ctx context.Context, service, proto, name string) (resp0 string, resp1 []*net.SRV, err error) {
|
||||
func (r StrictResolver) LookupSRV(ctx context.Context, service, proto, name string) (resp0 string, resp1 []*net.SRV, result adns.Result, err error) {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
metricLookupObserve(r.Pkg, "srv", err, start)
|
||||
xlog.WithContext(ctx).Debugx("dns lookup result", err, mlog.Field("pkg", r.Pkg), mlog.Field("type", "srv"), mlog.Field("service", service), mlog.Field("proto", proto), mlog.Field("name", name), mlog.Field("resp0", resp0), mlog.Field("resp1", resp1), mlog.Field("duration", time.Since(start)))
|
||||
r.log().WithContext(ctx).Debugx("dns lookup result", err,
|
||||
slog.String("type", "srv"),
|
||||
slog.String("service", service),
|
||||
slog.String("proto", proto),
|
||||
slog.String("name", name),
|
||||
slog.String("resp0", resp0),
|
||||
slog.Any("resp1", resp1),
|
||||
slog.Bool("authentic", result.Authentic),
|
||||
slog.Duration("duration", time.Since(start)),
|
||||
)
|
||||
}()
|
||||
defer resolveErrorHint(&err)
|
||||
|
||||
if !strings.HasSuffix(name, ".") {
|
||||
return "", nil, ErrRelativeDNSName
|
||||
return "", nil, result, ErrRelativeDNSName
|
||||
}
|
||||
resp0, resp1, err = r.resolver().LookupSRV(ctx, service, proto, name)
|
||||
resp0, resp1, result, err = r.resolver().LookupSRV(ctx, service, proto, name)
|
||||
return
|
||||
}
|
||||
|
||||
func (r StrictResolver) LookupTXT(ctx context.Context, name string) (resp []string, err error) {
|
||||
func (r StrictResolver) LookupTXT(ctx context.Context, name string) (resp []string, result adns.Result, err error) {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
metricLookupObserve(r.Pkg, "txt", err, start)
|
||||
xlog.WithContext(ctx).Debugx("dns lookup result", err, mlog.Field("pkg", r.Pkg), mlog.Field("type", "txt"), mlog.Field("name", name), mlog.Field("resp", resp), mlog.Field("duration", time.Since(start)))
|
||||
r.log().WithContext(ctx).Debugx("dns lookup result", err,
|
||||
slog.String("type", "txt"),
|
||||
slog.String("name", name),
|
||||
slog.Any("resp", resp),
|
||||
slog.Bool("authentic", result.Authentic),
|
||||
slog.Duration("duration", time.Since(start)),
|
||||
)
|
||||
}()
|
||||
defer resolveErrorHint(&err)
|
||||
|
||||
if !strings.HasSuffix(name, ".") {
|
||||
return nil, ErrRelativeDNSName
|
||||
return nil, result, ErrRelativeDNSName
|
||||
}
|
||||
resp, err = r.resolver().LookupTXT(ctx, name)
|
||||
resp, result, err = r.resolver().LookupTXT(ctx, name)
|
||||
return
|
||||
}
|
||||
|
||||
func (r StrictResolver) LookupTLSA(ctx context.Context, port int, protocol, host string) (resp []adns.TLSA, result adns.Result, err error) {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
metricLookupObserve(r.Pkg, "tlsa", err, start)
|
||||
r.log().WithContext(ctx).Debugx("dns lookup result", err,
|
||||
slog.String("type", "tlsa"),
|
||||
slog.Int("port", port),
|
||||
slog.String("protocol", protocol),
|
||||
slog.String("host", host),
|
||||
slog.Any("resp", resp),
|
||||
slog.Bool("authentic", result.Authentic),
|
||||
slog.Duration("duration", time.Since(start)),
|
||||
)
|
||||
}()
|
||||
defer resolveErrorHint(&err)
|
||||
|
||||
if !strings.HasSuffix(host, ".") {
|
||||
return nil, result, ErrRelativeDNSName
|
||||
}
|
||||
resp, result, err = r.resolver().LookupTLSA(ctx, port, protocol, host)
|
||||
return
|
||||
}
|
||||
|
@ -1,39 +1,39 @@
|
||||
// Package dnsbl implements DNS block lists (RFC 5782), for checking incoming messages from sources without reputation.
|
||||
//
|
||||
// A DNS block list contains IP addresses that should be blocked. The DNSBL is
|
||||
// queried using DNS "A" lookups. The DNSBL starts at a "zone", e.g.
|
||||
// "dnsbl.example". To look up whether an IP address is listed, a DNS name is
|
||||
// composed: For 10.11.12.13, that name would be "13.12.11.10.dnsbl.example". If
|
||||
// the lookup returns "record does not exist", the IP is not listed. If an IP
|
||||
// address is returned, the IP is listed. If an IP is listed, an additional TXT
|
||||
// lookup is done for more information about the block. IPv6 addresses are also
|
||||
// looked up with an DNS "A" lookup of a name similar to an IPv4 address, but with
|
||||
// 4-bit hexadecimal dot-separated characters, in reverse.
|
||||
//
|
||||
// The health of a DNSBL "zone" can be checked through a lookup of 127.0.0.1
|
||||
// (must not be present) and 127.0.0.2 (must be present).
|
||||
package dnsbl
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
|
||||
"github.com/mjl-/mox/dns"
|
||||
"github.com/mjl-/mox/mlog"
|
||||
"github.com/mjl-/mox/stub"
|
||||
)
|
||||
|
||||
var xlog = mlog.New("dnsbl")
|
||||
|
||||
var (
|
||||
metricLookup = promauto.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "mox_dnsbl_lookup_duration_seconds",
|
||||
Help: "DNSBL lookup",
|
||||
Buckets: []float64{0.001, 0.005, 0.01, 0.05, 0.100, 0.5, 1, 5, 10, 20},
|
||||
},
|
||||
[]string{
|
||||
"zone",
|
||||
"status",
|
||||
},
|
||||
)
|
||||
MetricLookup stub.HistogramVec = stub.HistogramVecIgnore{}
|
||||
)
|
||||
|
||||
var ErrDNS = errors.New("dnsbl: dns error")
|
||||
var ErrDNS = errors.New("dnsbl: dns error") // Temporary error.
|
||||
|
||||
// Status is the result of a DNSBL lookup.
|
||||
type Status string
|
||||
@ -45,12 +45,17 @@ var (
|
||||
)
|
||||
|
||||
// Lookup checks if "ip" occurs in the DNS block list "zone" (e.g. dnsbl.example.org).
|
||||
func Lookup(ctx context.Context, resolver dns.Resolver, zone dns.Domain, ip net.IP) (rstatus Status, rexplanation string, rerr error) {
|
||||
log := xlog.WithContext(ctx)
|
||||
func Lookup(ctx context.Context, elog *slog.Logger, resolver dns.Resolver, zone dns.Domain, ip net.IP) (rstatus Status, rexplanation string, rerr error) {
|
||||
log := mlog.New("dnsbl", elog)
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
metricLookup.WithLabelValues(zone.Name(), string(rstatus)).Observe(float64(time.Since(start)) / float64(time.Second))
|
||||
log.Debugx("dnsbl lookup result", rerr, mlog.Field("zone", zone), mlog.Field("ip", ip), mlog.Field("status", rstatus), mlog.Field("explanation", rexplanation), mlog.Field("duration", time.Since(start)))
|
||||
MetricLookup.ObserveLabels(float64(time.Since(start))/float64(time.Second), zone.Name(), string(rstatus))
|
||||
log.Debugx("dnsbl lookup result", rerr,
|
||||
slog.Any("zone", zone),
|
||||
slog.Any("ip", ip),
|
||||
slog.Any("status", rstatus),
|
||||
slog.String("explanation", rexplanation),
|
||||
slog.Duration("duration", time.Since(start)))
|
||||
}()
|
||||
|
||||
b := &strings.Builder{}
|
||||
@ -82,18 +87,18 @@ func Lookup(ctx context.Context, resolver dns.Resolver, zone dns.Domain, ip net.
|
||||
addr := b.String()
|
||||
|
||||
// ../rfc/5782:175
|
||||
_, err := dns.WithPackage(resolver, "dnsbl").LookupIP(ctx, "ip4", addr)
|
||||
_, _, err := dns.WithPackage(resolver, "dnsbl").LookupIP(ctx, "ip4", addr)
|
||||
if dns.IsNotFound(err) {
|
||||
return StatusPass, "", nil
|
||||
} else if err != nil {
|
||||
return StatusTemperr, "", fmt.Errorf("%w: %s", ErrDNS, err)
|
||||
}
|
||||
|
||||
txts, err := dns.WithPackage(resolver, "dnsbl").LookupTXT(ctx, addr)
|
||||
txts, _, err := dns.WithPackage(resolver, "dnsbl").LookupTXT(ctx, addr)
|
||||
if dns.IsNotFound(err) {
|
||||
return StatusFail, "", nil
|
||||
} else if err != nil {
|
||||
log.Debugx("looking up txt record from dnsbl", err, mlog.Field("addr", addr))
|
||||
log.Debugx("looking up txt record from dnsbl", err, slog.String("addr", addr))
|
||||
return StatusFail, "", nil
|
||||
}
|
||||
return StatusFail, strings.Join(txts, "; "), nil
|
||||
@ -104,16 +109,16 @@ func Lookup(ctx context.Context, resolver dns.Resolver, zone dns.Domain, ip net.
|
||||
// Users of a DNSBL should periodically check if the DNSBL is still operating
|
||||
// properly.
|
||||
// For temporary errors, ErrDNS is returned.
|
||||
func CheckHealth(ctx context.Context, resolver dns.Resolver, zone dns.Domain) (rerr error) {
|
||||
log := xlog.WithContext(ctx)
|
||||
func CheckHealth(ctx context.Context, elog *slog.Logger, resolver dns.Resolver, zone dns.Domain) (rerr error) {
|
||||
log := mlog.New("dnsbl", elog)
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
log.Debugx("dnsbl healthcheck result", rerr, mlog.Field("zone", zone), mlog.Field("duration", time.Since(start)))
|
||||
log.Debugx("dnsbl healthcheck result", rerr, slog.Any("zone", zone), slog.Duration("duration", time.Since(start)))
|
||||
}()
|
||||
|
||||
// ../rfc/5782:355
|
||||
status1, _, err1 := Lookup(ctx, resolver, zone, net.IPv4(127, 0, 0, 1))
|
||||
status2, _, err2 := Lookup(ctx, resolver, zone, net.IPv4(127, 0, 0, 2))
|
||||
status1, _, err1 := Lookup(ctx, log.Logger, resolver, zone, net.IPv4(127, 0, 0, 1))
|
||||
status2, _, err2 := Lookup(ctx, log.Logger, resolver, zone, net.IPv4(127, 0, 0, 2))
|
||||
if status1 == StatusPass && status2 == StatusFail {
|
||||
return nil
|
||||
} else if status1 == StatusFail {
|
||||
|
@ -6,10 +6,12 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/mjl-/mox/dns"
|
||||
"github.com/mjl-/mox/mlog"
|
||||
)
|
||||
|
||||
func TestDNSBL(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
log := mlog.New("dnsbl", nil)
|
||||
|
||||
resolver := dns.MockResolver{
|
||||
A: map[string][]string{
|
||||
@ -23,7 +25,7 @@ func TestDNSBL(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
if status, expl, err := Lookup(ctx, resolver, dns.Domain{ASCII: "example.com"}, net.ParseIP("10.0.0.1")); err != nil {
|
||||
if status, expl, err := Lookup(ctx, log.Logger, resolver, dns.Domain{ASCII: "example.com"}, net.ParseIP("10.0.0.1")); err != nil {
|
||||
t.Fatalf("lookup: %v", err)
|
||||
} else if status != StatusFail {
|
||||
t.Fatalf("lookup, got status %v, expected fail", status)
|
||||
@ -31,7 +33,7 @@ func TestDNSBL(t *testing.T) {
|
||||
t.Fatalf("lookup, got explanation %q", expl)
|
||||
}
|
||||
|
||||
if status, expl, err := Lookup(ctx, resolver, dns.Domain{ASCII: "example.com"}, net.ParseIP("2001:db8:1:2:3:4:567:89ab")); err != nil {
|
||||
if status, expl, err := Lookup(ctx, log.Logger, resolver, dns.Domain{ASCII: "example.com"}, net.ParseIP("2001:db8:1:2:3:4:567:89ab")); err != nil {
|
||||
t.Fatalf("lookup: %v", err)
|
||||
} else if status != StatusFail {
|
||||
t.Fatalf("lookup, got status %v, expected fail", status)
|
||||
@ -39,17 +41,17 @@ func TestDNSBL(t *testing.T) {
|
||||
t.Fatalf("lookup, got explanation %q", expl)
|
||||
}
|
||||
|
||||
if status, _, err := Lookup(ctx, resolver, dns.Domain{ASCII: "example.com"}, net.ParseIP("10.0.0.2")); err != nil {
|
||||
if status, _, err := Lookup(ctx, log.Logger, resolver, dns.Domain{ASCII: "example.com"}, net.ParseIP("10.0.0.2")); err != nil {
|
||||
t.Fatalf("lookup: %v", err)
|
||||
} else if status != StatusPass {
|
||||
t.Fatalf("lookup, got status %v, expected pass", status)
|
||||
}
|
||||
|
||||
// ../rfc/5782:357
|
||||
if err := CheckHealth(ctx, resolver, dns.Domain{ASCII: "example.com"}); err != nil {
|
||||
if err := CheckHealth(ctx, log.Logger, resolver, dns.Domain{ASCII: "example.com"}); err != nil {
|
||||
t.Fatalf("dnsbl not healthy: %v", err)
|
||||
}
|
||||
if err := CheckHealth(ctx, resolver, dns.Domain{ASCII: "example.org"}); err == nil {
|
||||
if err := CheckHealth(ctx, log.Logger, resolver, dns.Domain{ASCII: "example.org"}); err == nil {
|
||||
t.Fatalf("bad dnsbl is healthy")
|
||||
}
|
||||
|
||||
@ -58,7 +60,7 @@ func TestDNSBL(t *testing.T) {
|
||||
"1.0.0.127.example.com.": {"127.0.0.2"}, // Should not be present in healthy dnsbl.
|
||||
},
|
||||
}
|
||||
if err := CheckHealth(ctx, unhealthyResolver, dns.Domain{ASCII: "example.com"}); err == nil {
|
||||
if err := CheckHealth(ctx, log.Logger, unhealthyResolver, dns.Domain{ASCII: "example.com"}); err == nil {
|
||||
t.Fatalf("bad dnsbl is healthy")
|
||||
}
|
||||
}
|
||||
|
30
dnsbl/examples_test.go
Normal file
30
dnsbl/examples_test.go
Normal file
@ -0,0 +1,30 @@
|
||||
package dnsbl_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"log/slog"
|
||||
"net"
|
||||
|
||||
"github.com/mjl-/mox/dns"
|
||||
"github.com/mjl-/mox/dnsbl"
|
||||
)
|
||||
|
||||
func ExampleLookup() {
|
||||
ctx := context.Background()
|
||||
resolver := dns.StrictResolver{}
|
||||
|
||||
// Lookup if ip 127.0.0.2 is in spamhaus blocklist at zone sbl.spamhaus.org.
|
||||
status, explanation, err := dnsbl.Lookup(ctx, slog.Default(), resolver, dns.Domain{ASCII: "sbl.spamhaus.org"}, net.ParseIP("127.0.0.2"))
|
||||
if err != nil {
|
||||
log.Fatalf("dnsbl lookup: %v", err)
|
||||
}
|
||||
switch status {
|
||||
case dnsbl.StatusTemperr:
|
||||
log.Printf("dnsbl lookup, temporary dns error: %v", err)
|
||||
case dnsbl.StatusPass:
|
||||
log.Printf("dnsbl lookup, ip not listed")
|
||||
case dnsbl.StatusFail:
|
||||
log.Printf("dnsbl lookup, ip listed: %s", explanation)
|
||||
}
|
||||
}
|
@ -1,16 +1,15 @@
|
||||
version: '3.7'
|
||||
services:
|
||||
mox:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile.moximaptest
|
||||
volumes:
|
||||
- ./testdata/imaptest/config:/mox/config
|
||||
- ./testdata/imaptest/data:/mox/data
|
||||
- ./testdata/imaptest/imaptest.mbox:/mox/imaptest.mbox
|
||||
- ./testdata/imaptest/config:/mox/config:z
|
||||
- ./testdata/imaptest/data:/mox/data:z
|
||||
- ./testdata/imaptest/imaptest.mbox:/mox/imaptest.mbox:z
|
||||
working_dir: /mox
|
||||
tty: true # For job control with set -m.
|
||||
command: sh -c 'set -m; mox serve & sleep 1; echo testtest | mox setaccountpassword mjl@mox.example; fg'
|
||||
command: sh -c 'set -m; mox serve & sleep 1; echo testtest | mox setaccountpassword mjl; fg'
|
||||
healthcheck:
|
||||
test: netstat -nlt | grep ':1143 '
|
||||
interval: 1s
|
||||
@ -24,7 +23,7 @@ services:
|
||||
command: host=mox port=1143 'user=mjl@mox.example' pass=testtest mbox=/imaptest/imaptest.mbox
|
||||
working_dir: /imaptest
|
||||
volumes:
|
||||
- ./testdata/imaptest:/imaptest
|
||||
- ./testdata/imaptest:/imaptest:z
|
||||
depends_on:
|
||||
mox:
|
||||
condition: service_healthy
|
||||
|
@ -1,18 +1,47 @@
|
||||
version: '3.7'
|
||||
services:
|
||||
moxmail:
|
||||
# todo: understand why hostname and/or domainname don't have any influence on the reverse dns set up by docker, requiring us to use our own /etc/resolv.conf...
|
||||
hostname: moxmail1.mox1.example
|
||||
domainname: mox1.example
|
||||
build:
|
||||
dockerfile: Dockerfile.moxmail
|
||||
context: testdata/integration
|
||||
# We run integration_test.go from this container, it connects to the other mox instances.
|
||||
test:
|
||||
hostname: test.mox1.example
|
||||
image: mox_integration_test
|
||||
# We add our cfssl-generated CA (which is in the repo) and acme pebble CA
|
||||
# (generated each time pebble starts) to the list of trusted CA's, so the TLS
|
||||
# dials in integration_test.go succeed.
|
||||
command: ["sh", "-c", "set -ex; cat /integration/tmp-pebble-ca.pem /integration/tls/ca.pem >>/etc/ssl/certs/ca-certificates.crt; go test -tags integration"]
|
||||
volumes:
|
||||
- ./.go:/.go
|
||||
- ./testdata/integration/resolv.conf:/etc/resolv.conf
|
||||
- .:/mox
|
||||
- ./.go:/.go:z
|
||||
- ./testdata/integration/resolv.conf:/etc/resolv.conf:z
|
||||
- ./testdata/integration:/integration:z
|
||||
- ./testdata/integration/moxsubmit.conf:/etc/moxsubmit.conf:z
|
||||
- .:/mox:z
|
||||
environment:
|
||||
GOCACHE: /.go/.cache/go-build
|
||||
depends_on:
|
||||
dns:
|
||||
condition: service_healthy
|
||||
# moxmail2 depends on moxacmepebble, we connect to both.
|
||||
moxmail2:
|
||||
condition: service_healthy
|
||||
postfixmail:
|
||||
condition: service_healthy
|
||||
localserve:
|
||||
condition: service_healthy
|
||||
moxacmepebblealpn:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
mailnet1:
|
||||
ipv4_address: 172.28.1.50
|
||||
|
||||
# First mox instance that uses ACME with pebble.
|
||||
moxacmepebble:
|
||||
hostname: moxacmepebble.mox1.example
|
||||
domainname: mox1.example
|
||||
image: mox_integration_moxmail
|
||||
environment:
|
||||
MOX_UID: "${MOX_UID}"
|
||||
command: ["sh", "-c", "/integration/moxacmepebble.sh"]
|
||||
volumes:
|
||||
- ./testdata/integration/resolv.conf:/etc/resolv.conf:z
|
||||
- ./testdata/integration:/integration:z
|
||||
healthcheck:
|
||||
test: netstat -nlt | grep ':25 '
|
||||
interval: 1s
|
||||
@ -21,15 +50,87 @@ services:
|
||||
depends_on:
|
||||
dns:
|
||||
condition: service_healthy
|
||||
postfixmail:
|
||||
acmepebble:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
mailnet1:
|
||||
ipv4_address: 172.28.1.10
|
||||
mailnet2:
|
||||
ipv4_address: 172.28.2.10
|
||||
mailnet3:
|
||||
ipv4_address: 172.28.3.10
|
||||
|
||||
# Second mox instance, with TLS cert/keys from files.
|
||||
moxmail2:
|
||||
hostname: moxmail2.mox2.example
|
||||
domainname: mox2.example
|
||||
image: mox_integration_moxmail
|
||||
environment:
|
||||
MOX_UID: "${MOX_UID}"
|
||||
command: ["sh", "-c", "/integration/moxmail2.sh"]
|
||||
volumes:
|
||||
- ./testdata/integration/resolv.conf:/etc/resolv.conf:z
|
||||
- ./testdata/integration:/integration:z
|
||||
healthcheck:
|
||||
test: netstat -nlt | grep ':25 '
|
||||
interval: 1s
|
||||
timeout: 1s
|
||||
retries: 10
|
||||
depends_on:
|
||||
dns:
|
||||
condition: service_healthy
|
||||
acmepebble:
|
||||
condition: service_healthy
|
||||
# moxacmepebble creates tmp-pebble-ca.pem, needed by moxmail2 to trust the certificates offered by moxacmepebble.
|
||||
moxacmepebble:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
mailnet1:
|
||||
ipv4_address: 172.28.1.20
|
||||
|
||||
# Third mox instance that uses ACME with pebble and has ALPN enabled.
|
||||
moxacmepebblealpn:
|
||||
hostname: moxacmepebblealpn.mox1.example
|
||||
domainname: mox1.example
|
||||
image: mox_integration_moxmail
|
||||
environment:
|
||||
MOX_UID: "${MOX_UID}"
|
||||
command: ["sh", "-c", "/integration/moxacmepebblealpn.sh"]
|
||||
volumes:
|
||||
- ./testdata/integration/resolv.conf:/etc/resolv.conf:z
|
||||
- ./testdata/integration:/integration:z
|
||||
healthcheck:
|
||||
test: netstat -nlt | grep ':25 '
|
||||
interval: 1s
|
||||
timeout: 1s
|
||||
retries: 10
|
||||
depends_on:
|
||||
dns:
|
||||
condition: service_healthy
|
||||
acmepebble:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
mailnet1:
|
||||
ipv4_address: 172.28.1.80
|
||||
|
||||
localserve:
|
||||
hostname: localserve.mox1.example
|
||||
domainname: mox1.example
|
||||
image: mox_integration_moxmail
|
||||
command: ["sh", "-c", "set -e; chmod o+r /etc/resolv.conf; mox -checkconsistency localserve -ip 172.28.1.60"]
|
||||
volumes:
|
||||
- ./.go:/.go:z
|
||||
- ./testdata/integration/resolv.conf:/etc/resolv.conf:z
|
||||
- .:/mox:z
|
||||
environment:
|
||||
GOCACHE: /.go/.cache/go-build
|
||||
healthcheck:
|
||||
test: netstat -nlt | grep ':1025 '
|
||||
interval: 1s
|
||||
timeout: 1s
|
||||
retries: 10
|
||||
depends_on:
|
||||
dns:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
mailnet1:
|
||||
ipv4_address: 172.28.1.60
|
||||
|
||||
postfixmail:
|
||||
hostname: postfixmail.postfix.example
|
||||
@ -39,8 +140,8 @@ services:
|
||||
context: testdata/integration
|
||||
volumes:
|
||||
# todo: figure out how to mount files with a uid that the process in the container can read...
|
||||
- ./testdata/integration/resolv.conf:/etc/resolv.conf
|
||||
command: ["sh", "-c", "set -e; chmod o+r /etc/resolv.conf; (echo 'maillog_file = /dev/stdout'; echo 'mydestination = $$myhostname, localhost.$$mydomain, localhost, $$mydomain') >>/etc/postfix/main.cf; echo 'root: moxtest1@mox1.example' >>/etc/postfix/aliases; newaliases; postfix start-fg"]
|
||||
- ./testdata/integration/resolv.conf:/etc/resolv.conf:z
|
||||
command: ["sh", "-c", "set -e; chmod o+r /etc/resolv.conf; (echo 'maillog_file = /dev/stdout'; echo 'mydestination = $$myhostname, localhost.$$mydomain, localhost, $$mydomain'; echo 'smtp_tls_security_level = may') >>/etc/postfix/main.cf; echo 'root: postfix@mox1.example' >>/etc/postfix/aliases; newaliases; postfix start-fg"]
|
||||
healthcheck:
|
||||
test: netstat -nlt | grep ':25 '
|
||||
interval: 1s
|
||||
@ -51,7 +152,7 @@ services:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
mailnet1:
|
||||
ipv4_address: 172.28.1.20
|
||||
ipv4_address: 172.28.1.70
|
||||
|
||||
dns:
|
||||
hostname: dns.example
|
||||
@ -60,9 +161,11 @@ services:
|
||||
# todo: figure out how to build from dockerfile with empty context without creating empty dirs in file system.
|
||||
context: testdata/integration
|
||||
volumes:
|
||||
- ./testdata/integration/resolv.conf:/etc/resolv.conf
|
||||
- ./testdata/integration:/integration
|
||||
command: ["sh", "-c", "set -e; chmod o+r /etc/resolv.conf; install -m 640 -o unbound /integration/unbound.conf /integration/*.zone /etc/unbound/; unbound -d -p -v"]
|
||||
- ./testdata/integration/resolv.conf:/etc/resolv.conf:z
|
||||
- ./testdata/integration:/integration:z
|
||||
# We start with a base example.zone, but moxacmepebble appends its records,
|
||||
# followed by moxmail2. They restart unbound after appending records.
|
||||
command: ["sh", "-c", "set -ex; ls -l /etc/resolv.conf; chmod o+r /etc/resolv.conf; install -m 640 -o unbound /integration/unbound.conf /etc/unbound/; chmod 755 /integration; chmod 644 /integration/*.zone; cp /integration/example.zone /integration/example-integration.zone; ls -ld /integration /integration/reverse.zone; unbound -d -p -v"]
|
||||
healthcheck:
|
||||
test: netstat -nlu | grep '172.28.1.30:53 '
|
||||
interval: 1s
|
||||
@ -72,6 +175,31 @@ services:
|
||||
mailnet1:
|
||||
ipv4_address: 172.28.1.30
|
||||
|
||||
# pebble is a small acme server useful for testing. It creates a new CA
|
||||
# certificate each time it starts, so we go through some trouble to configure the
|
||||
# certificate in moxacmepebble and moxmail2.
|
||||
acmepebble:
|
||||
hostname: acmepebble.example
|
||||
image: docker.io/letsencrypt/pebble:v2.3.1@sha256:fc5a537bf8fbc7cc63aa24ec3142283aa9b6ba54529f86eb8ff31fbde7c5b258
|
||||
volumes:
|
||||
- ./testdata/integration/resolv.conf:/etc/resolv.conf:z
|
||||
- ./testdata/integration:/integration:z
|
||||
command: ["sh", "-c", "set -ex; mount; ls -l /etc/resolv.conf; chmod o+r /etc/resolv.conf; pebble -config /integration/pebble-config.json"]
|
||||
ports:
|
||||
- 14000:14000 # ACME port
|
||||
- 15000:15000 # Management port
|
||||
healthcheck:
|
||||
test: netstat -nlt | grep ':14000 '
|
||||
interval: 1s
|
||||
timeout: 1s
|
||||
retries: 10
|
||||
depends_on:
|
||||
dns:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
mailnet1:
|
||||
ipv4_address: 172.28.1.40
|
||||
|
||||
networks:
|
||||
mailnet1:
|
||||
driver: bridge
|
||||
@ -79,15 +207,3 @@ networks:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: "172.28.1.0/24"
|
||||
mailnet2:
|
||||
driver: bridge
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: "172.28.2.0/24"
|
||||
mailnet3:
|
||||
driver: bridge
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: "172.28.3.0/24"
|
||||
|
@ -10,23 +10,39 @@
|
||||
# After following the quickstart instructions you can start mox:
|
||||
#
|
||||
# docker-compose up
|
||||
#
|
||||
#
|
||||
# If you want to run "mox localserve", you could start it like this:
|
||||
#
|
||||
# docker run \
|
||||
# -p 127.0.0.1:25:1025 \
|
||||
# -p 127.0.0.1:465:1465 \
|
||||
# -p 127.0.0.1:587:1587 \
|
||||
# -p 127.0.0.1:993:1993 \
|
||||
# -p 127.0.0.1:143:1143 \
|
||||
# -p 127.0.0.1:443:1443 \
|
||||
# -p 127.0.0.1:80:1080 \
|
||||
# r.xmox.nl/mox:latest mox localserve -ip 0.0.0.0
|
||||
#
|
||||
# The -ip flag ensures connections to the published ports make it to mox, and it
|
||||
# prevents listening on ::1 (IPv6 is not enabled in docker by default).
|
||||
|
||||
version: '3.7'
|
||||
services:
|
||||
mox:
|
||||
# Replace latest with the version you want to run.
|
||||
image: docker.io/moxmail/mox:latest
|
||||
# Replace "latest" with the version you want to run, see https://r.xmox.nl/r/mox/.
|
||||
# Include the @sha256:... digest to ensure you get the listed image.
|
||||
image: r.xmox.nl/mox:latest
|
||||
environment:
|
||||
- MOX_DOCKER=yes # Quickstart won't try to write systemd service file.
|
||||
# Mox needs host networking because it needs access to the IPs of the
|
||||
# machine, and the IPs of incoming connections for spam filtering.
|
||||
network_mode: 'host'
|
||||
volumes:
|
||||
- ./config:/mox/config
|
||||
- ./data:/mox/data
|
||||
- ./config:/mox/config:z
|
||||
- ./data:/mox/data:z
|
||||
# web is optional but recommended to bind in, useful for serving static files with
|
||||
# the webserver.
|
||||
- ./web:/mox/web
|
||||
- ./web:/mox/web:z
|
||||
working_dir: /mox
|
||||
restart: on-failure
|
||||
healthcheck:
|
||||
|
@ -36,19 +36,20 @@ echo Building with $goversion and $alpineversion
|
||||
# needed because the platform in "FROM --platform <image>" in the first stage
|
||||
# seems to override the TARGET* variables.
|
||||
test -d empty || mkdir empty
|
||||
(podman manifest rm moxmail/mox:$moxversion-$goversion-$alpineversion || exit 0)
|
||||
((rm -r tmp/gomod || exit 0); mkdir -p tmp/gomod) # fetch modules through goproxy just once
|
||||
(podman manifest rm mox:$moxversion-$goversion-$alpineversion || exit 0)
|
||||
for platform in $(echo $platforms | sed 's/,/ /g'); do
|
||||
goos=$(echo $platform | sed 's,/.*$,,')
|
||||
goarch=$(echo $platform | sed 's,^.*/,,')
|
||||
podman build --platform $platform -f Dockerfile.release -v $HOME/go/pkg/sumdb:/go/pkg/sumbd:ro --build-arg goos=$goos --build-arg goarch=$goarch --build-arg moxversion=$moxversion --manifest moxmail/mox:$moxversion-$goversion-$alpineversion empty
|
||||
podman build --platform $platform -f Dockerfile.release -v $HOME/go/pkg/sumdb:/go/pkg/sumbd:ro -v $PWD/tmp/gomod:/go/pkg/mod --build-arg goos=$goos --build-arg goarch=$goarch --build-arg moxversion=$moxversion --manifest mox:$moxversion-$goversion-$alpineversion empty
|
||||
done
|
||||
|
||||
cat <<EOF
|
||||
|
||||
# Suggested commands to push images:
|
||||
|
||||
podman manifest push --all moxmail/mox:$moxversion-$goversion-$alpineversion docker.io/moxmail/mox:$moxversion-$goversion-$alpineversion
|
||||
podman manifest push --all mox:$moxversion-$goversion-$alpineversion \$host/mox:$moxversion-$goversion-$alpineversion
|
||||
|
||||
podman manifest push --all moxmail/mox:$moxversion-$goversion-$alpineversion docker.io/moxmail/mox:$moxversion
|
||||
podman manifest push --all moxmail/mox:$moxversion-$goversion-$alpineversion docker.io/moxmail/mox:latest
|
||||
podman manifest push --all mox:$moxversion-$goversion-$alpineversion \$host/mox:$moxversion
|
||||
podman manifest push --all mox:$moxversion-$goversion-$alpineversion \$host/mox:latest
|
||||
EOF
|
||||
|
119
dsn/dsn.go
119
dsn/dsn.go
@ -5,21 +5,17 @@ package dsn
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"mime/multipart"
|
||||
"net/textproto"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/mjl-/mox/dkim"
|
||||
"github.com/mjl-/mox/message"
|
||||
"github.com/mjl-/mox/mlog"
|
||||
"github.com/mjl-/mox/mox-"
|
||||
"github.com/mjl-/mox/smtp"
|
||||
)
|
||||
|
||||
@ -45,6 +41,18 @@ type Message struct {
|
||||
// Message subject header, e.g. describing mail delivery failure.
|
||||
Subject string
|
||||
|
||||
MessageID string
|
||||
|
||||
// References header, with Message-ID of original message this DSN is about. So
|
||||
// mail user-agents will thread the DSN with the original message.
|
||||
References string
|
||||
|
||||
// For message submitted with FUTURERELEASE SMTP extension. Value is either "for;"
|
||||
// plus original interval in seconds or "until;" plus original UTC RFC3339
|
||||
// date-time.
|
||||
FutureReleaseRequest string
|
||||
// ../rfc/4865:315
|
||||
|
||||
// Human-readable text explaining the failure. Line endings should be
|
||||
// bare newlines, not \r\n. They are converted to \r\n when composing.
|
||||
TextBody string
|
||||
@ -91,9 +99,10 @@ type Recipient struct {
|
||||
Action Action
|
||||
|
||||
// Enhanced status code. First digit indicates permanent or temporary
|
||||
// error. If the string contains more than just a status, that
|
||||
// additional text is added as comment when composing a DSN.
|
||||
// error.
|
||||
Status string
|
||||
// For additional details, included in comment.
|
||||
StatusComment string
|
||||
|
||||
// Optional fields.
|
||||
// Original intended recipient of message. Used with the DSN extensions ORCPT
|
||||
@ -105,10 +114,10 @@ type Recipient struct {
|
||||
// deliveries.
|
||||
RemoteMTA NameIP
|
||||
|
||||
// If RemoteMTA is present, DiagnosticCode is from remote. When
|
||||
// creating a DSN, additional text in the string will be added to the
|
||||
// DSN as comment.
|
||||
DiagnosticCode string
|
||||
// DiagnosticCodeSMTP are the full SMTP response lines, space separated. The marshaled
|
||||
// form starts with "smtp; ", this value does not.
|
||||
DiagnosticCodeSMTP string
|
||||
|
||||
LastAttemptDate time.Time
|
||||
FinalLogID string
|
||||
|
||||
@ -126,8 +135,8 @@ type Recipient struct {
|
||||
// supports smtputf8. This influences the message media (sub)types used for the
|
||||
// DSN.
|
||||
//
|
||||
// DKIM signatures are added if DKIM signing is configured for the "from" domain.
|
||||
func (m *Message) Compose(log *mlog.Log, smtputf8 bool) ([]byte, error) {
|
||||
// Called may want to add DKIM-Signature headers.
|
||||
func (m *Message) Compose(log mlog.Log, smtputf8 bool) ([]byte, error) {
|
||||
// ../rfc/3462:119
|
||||
// ../rfc/3464:377
|
||||
// We'll make a multipart/report with 2 or 3 parts:
|
||||
@ -158,7 +167,13 @@ func (m *Message) Compose(log *mlog.Log, smtputf8 bool) ([]byte, error) {
|
||||
header("From", fmt.Sprintf("<%s>", m.From.XString(smtputf8))) // todo: would be good to have a local ascii-only name for this address.
|
||||
header("To", fmt.Sprintf("<%s>", m.To.XString(smtputf8))) // todo: we could just leave this out if it has utf-8 and remote does not support utf-8.
|
||||
header("Subject", m.Subject)
|
||||
header("Message-Id", fmt.Sprintf("<%s>", mox.MessageIDGen(smtputf8)))
|
||||
if m.MessageID == "" {
|
||||
return nil, fmt.Errorf("missing message-id")
|
||||
}
|
||||
header("Message-Id", fmt.Sprintf("<%s>", m.MessageID))
|
||||
if m.References != "" {
|
||||
header("References", m.References)
|
||||
}
|
||||
header("Date", time.Now().Format(message.RFC5322Z))
|
||||
header("MIME-Version", "1.0")
|
||||
mp := multipart.NewWriter(msgw)
|
||||
@ -221,6 +236,10 @@ func (m *Message) Compose(log *mlog.Log, smtputf8 bool) ([]byte, error) {
|
||||
status("Received-From-MTA", fmt.Sprintf("dns;%s (%s)", m.ReceivedFromMTA.Name, smtp.AddressLiteral(m.ReceivedFromMTA.ConnIP)))
|
||||
}
|
||||
status("Arrival-Date", m.ArrivalDate.Format(message.RFC5322Z)) // ../rfc/3464:758
|
||||
if m.FutureReleaseRequest != "" {
|
||||
// ../rfc/4865:320
|
||||
status("Future-Release-Request", m.FutureReleaseRequest)
|
||||
}
|
||||
|
||||
// Then per-recipient fields. ../rfc/3464:769
|
||||
// todo: should also handle other address types. at least recognize "unknown". Probably just store this field. ../rfc/3464:819
|
||||
@ -253,26 +272,23 @@ func (m *Message) Compose(log *mlog.Log, smtputf8 bool) ([]byte, error) {
|
||||
st = "2.0.0"
|
||||
}
|
||||
}
|
||||
var rest string
|
||||
st, rest = codeLine(st)
|
||||
statusLine := st
|
||||
if rest != "" {
|
||||
statusLine += " (" + rest + ")"
|
||||
if r.StatusComment != "" {
|
||||
statusLine += " (" + r.StatusComment + ")"
|
||||
}
|
||||
status("Status", statusLine) // ../rfc/3464:975
|
||||
if !r.RemoteMTA.IsZero() {
|
||||
// ../rfc/3464:1015
|
||||
status("Remote-MTA", fmt.Sprintf("dns;%s (%s)", r.RemoteMTA.Name, smtp.AddressLiteral(r.RemoteMTA.IP)))
|
||||
s := "dns;" + r.RemoteMTA.Name
|
||||
if len(r.RemoteMTA.IP) > 0 {
|
||||
s += " (" + smtp.AddressLiteral(r.RemoteMTA.IP) + ")"
|
||||
}
|
||||
status("Remote-MTA", s)
|
||||
}
|
||||
// Presence of Diagnostic-Code indicates the code is from Remote-MTA. ../rfc/3464:1053
|
||||
if r.DiagnosticCode != "" {
|
||||
diagCode, rest := codeLine(r.DiagnosticCode)
|
||||
diagLine := diagCode
|
||||
if rest != "" {
|
||||
diagLine += " (" + rest + ")"
|
||||
}
|
||||
// ../rfc/6533:589
|
||||
status("Diagnostic-Code", "smtp; "+diagLine)
|
||||
if r.DiagnosticCodeSMTP != "" {
|
||||
// ../rfc/3461:1342 ../rfc/6533:589
|
||||
status("Diagnostic-Code", "smtp; "+r.DiagnosticCodeSMTP)
|
||||
}
|
||||
if !r.LastAttemptDate.IsZero() {
|
||||
status("Last-Attempt-Date", r.LastAttemptDate.Format(message.RFC5322Z)) // ../rfc/3464:1076
|
||||
@ -295,10 +311,8 @@ func (m *Message) Compose(log *mlog.Log, smtputf8 bool) ([]byte, error) {
|
||||
headers = m.Original
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
// This is a whole message. We still only include the headers.
|
||||
// todo: include the whole body.
|
||||
}
|
||||
// Else, this is a whole message. We still only include the headers. todo: include the whole body.
|
||||
|
||||
origHdr := textproto.MIMEHeader{}
|
||||
if smtputf8 {
|
||||
@ -326,10 +340,7 @@ func (m *Message) Compose(log *mlog.Log, smtputf8 bool) ([]byte, error) {
|
||||
data := base64.StdEncoding.EncodeToString(headers)
|
||||
for len(data) > 0 {
|
||||
line := data
|
||||
n := len(line)
|
||||
if n > 78 {
|
||||
n = 78
|
||||
}
|
||||
n := min(len(line), 76) // ../rfc/2045:1372
|
||||
line, data = data[:n], data[n:]
|
||||
if _, err := origp.Write([]byte(line + "\r\n")); err != nil {
|
||||
return nil, err
|
||||
@ -351,17 +362,6 @@ func (m *Message) Compose(log *mlog.Log, smtputf8 bool) ([]byte, error) {
|
||||
}
|
||||
|
||||
data := msgw.w.Bytes()
|
||||
|
||||
fd := m.From.IPDomain.Domain
|
||||
confDom, _ := mox.Conf.Domain(fd)
|
||||
if len(confDom.DKIM.Sign) > 0 {
|
||||
if dkimHeaders, err := dkim.Sign(context.Background(), m.From.Localpart, fd, confDom.DKIM, smtputf8, bytes.NewReader(data)); err != nil {
|
||||
log.Errorx("dsn: dkim sign for domain, returning unsigned dsn", err, mlog.Field("domain", fd))
|
||||
} else {
|
||||
data = append([]byte(dkimHeaders), data...)
|
||||
}
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
@ -378,34 +378,3 @@ func (w *errWriter) Write(buf []byte) (int, error) {
|
||||
w.err = err
|
||||
return n, err
|
||||
}
|
||||
|
||||
// split a line into enhanced status code and rest.
|
||||
func codeLine(s string) (string, string) {
|
||||
t := strings.SplitN(s, " ", 2)
|
||||
l := strings.Split(t[0], ".")
|
||||
if len(l) != 3 {
|
||||
return "", s
|
||||
}
|
||||
for i, e := range l {
|
||||
_, err := strconv.ParseInt(e, 10, 32)
|
||||
if err != nil {
|
||||
return "", s
|
||||
}
|
||||
if i == 0 && len(e) != 1 {
|
||||
return "", s
|
||||
}
|
||||
}
|
||||
|
||||
var rest string
|
||||
if len(t) == 2 {
|
||||
rest = t[1]
|
||||
}
|
||||
return t[0], rest
|
||||
}
|
||||
|
||||
// HasCode returns whether line starts with an enhanced SMTP status code.
|
||||
func HasCode(line string) bool {
|
||||
// ../rfc/3464:986
|
||||
ecode, _ := codeLine(line)
|
||||
return ecode != ""
|
||||
}
|
||||
|
@ -2,7 +2,6 @@ package dsn
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
@ -11,14 +10,14 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/mjl-/mox/dkim"
|
||||
"github.com/mjl-/mox/dns"
|
||||
"github.com/mjl-/mox/message"
|
||||
"github.com/mjl-/mox/mlog"
|
||||
"github.com/mjl-/mox/mox-"
|
||||
"github.com/mjl-/mox/smtp"
|
||||
)
|
||||
|
||||
var pkglog = mlog.New("dsn", nil)
|
||||
|
||||
func xparseDomain(s string) dns.Domain {
|
||||
d, err := dns.ParseDomain(s)
|
||||
if err != nil {
|
||||
@ -33,7 +32,7 @@ func xparseIPDomain(s string) dns.IPDomain {
|
||||
|
||||
func tparseMessage(t *testing.T, data []byte, nparts int) (*Message, *message.Part) {
|
||||
t.Helper()
|
||||
m, p, err := Parse(bytes.NewReader(data))
|
||||
m, p, err := Parse(pkglog.Logger, bytes.NewReader(data))
|
||||
if err != nil {
|
||||
t.Fatalf("parsing dsn: %v", err)
|
||||
}
|
||||
@ -51,8 +50,8 @@ func tcheckType(t *testing.T, p *message.Part, mt, mst, cte string) {
|
||||
if !strings.EqualFold(p.MediaSubType, mst) {
|
||||
t.Fatalf("got mediasubtype %q, expected %q", p.MediaSubType, mst)
|
||||
}
|
||||
if !strings.EqualFold(p.ContentTransferEncoding, cte) {
|
||||
t.Fatalf("got content-transfer-encoding %q, expected %q", p.ContentTransferEncoding, cte)
|
||||
if !(cte == "" && p.ContentTransferEncoding == nil || cte != "" && p.ContentTransferEncoding != nil && strings.EqualFold(cte, *p.ContentTransferEncoding)) {
|
||||
t.Fatalf("got content-transfer-encoding %v, expected %v", p.ContentTransferEncoding, cte)
|
||||
}
|
||||
}
|
||||
|
||||
@ -72,7 +71,7 @@ func tcompareReader(t *testing.T, r io.Reader, exp []byte) {
|
||||
}
|
||||
|
||||
func TestDSN(t *testing.T) {
|
||||
log := mlog.New("dsn")
|
||||
log := mlog.New("dsn", nil)
|
||||
|
||||
now := time.Now()
|
||||
|
||||
@ -80,14 +79,16 @@ func TestDSN(t *testing.T) {
|
||||
m := Message{
|
||||
SMTPUTF8: false,
|
||||
|
||||
From: smtp.Path{Localpart: "postmaster", IPDomain: xparseIPDomain("mox.example")},
|
||||
To: smtp.Path{Localpart: "mjl", IPDomain: xparseIPDomain("remote.example")},
|
||||
Subject: "dsn",
|
||||
TextBody: "delivery failure\n",
|
||||
From: smtp.Path{Localpart: "postmaster", IPDomain: xparseIPDomain("mox.example")},
|
||||
To: smtp.Path{Localpart: "mjl", IPDomain: xparseIPDomain("remote.example")},
|
||||
Subject: "dsn",
|
||||
MessageID: "test@localhost",
|
||||
TextBody: "delivery failure\n",
|
||||
|
||||
ReportingMTA: "mox.example",
|
||||
ReceivedFromMTA: smtp.Ehlo{Name: xparseIPDomain("relay.example"), ConnIP: net.ParseIP("10.10.10.10")},
|
||||
ArrivalDate: now,
|
||||
ReportingMTA: "mox.example",
|
||||
ReceivedFromMTA: smtp.Ehlo{Name: xparseIPDomain("relay.example"), ConnIP: net.ParseIP("10.10.10.10")},
|
||||
ArrivalDate: now,
|
||||
FutureReleaseRequest: "for;123",
|
||||
|
||||
Recipients: []Recipient{
|
||||
{
|
||||
@ -104,6 +105,7 @@ func TestDSN(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("composing dsn: %v", err)
|
||||
}
|
||||
|
||||
pmsg, part := tparseMessage(t, msgbuf, 3)
|
||||
tcheckType(t, part, "multipart", "report", "")
|
||||
tcheckType(t, &part.Parts[0], "text", "plain", "7bit")
|
||||
@ -127,35 +129,15 @@ func TestDSN(t *testing.T) {
|
||||
tcompareReader(t, part.Parts[2].Reader(), m.Original)
|
||||
tcompare(t, pmsg.Recipients[0].FinalRecipient, m.Recipients[0].FinalRecipient)
|
||||
|
||||
// Test for valid DKIM signature.
|
||||
mox.Context = context.Background()
|
||||
mox.ConfigStaticPath = "../testdata/dsn/mox.conf"
|
||||
mox.MustLoadConfig(false)
|
||||
msgbuf, err = m.Compose(log, false)
|
||||
if err != nil {
|
||||
t.Fatalf("composing utf-8 dsn with utf-8 support: %v", err)
|
||||
}
|
||||
resolver := &dns.MockResolver{
|
||||
TXT: map[string][]string{
|
||||
"testsel._domainkey.mox.example.": {"v=DKIM1;h=sha256;t=s;p=MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA3ZId3ys70VFspp/VMFaxMOrNjHNPg04NOE1iShih16b3Ex7hHBOgC1UvTGSmrMlbCB1OxTXkvf6jW6S4oYRnZYVNygH6zKUwYYhaSaGIg1xA/fDn+IgcTRyLoXizMUgUgpTGyxhNrwIIWv+i7jjbs3TKpP3NU4owQ/rxowmSNqg+fHIF1likSvXvljYS" + "jaFXXnWfYibW7TdDCFFpN4sB5o13+as0u4vLw6MvOi59B1tLype1LcHpi1b9PfxNtznTTdet3kL0paxIcWtKHT0LDPUos8YYmiPa5nGbUqlC7d+4YT2jQPvwGxCws1oo2Tw6nj1UaihneYGAyvEky49FBwIDAQAB"},
|
||||
},
|
||||
}
|
||||
results, err := dkim.Verify(context.Background(), resolver, false, func(*dkim.Sig) error { return nil }, bytes.NewReader(msgbuf), false)
|
||||
if err != nil {
|
||||
t.Fatalf("dkim verify: %v", err)
|
||||
}
|
||||
if len(results) != 1 || results[0].Status != dkim.StatusPass {
|
||||
t.Fatalf("dkim result not pass, %#v", results)
|
||||
}
|
||||
|
||||
// An utf-8 message.
|
||||
m = Message{
|
||||
SMTPUTF8: true,
|
||||
|
||||
From: smtp.Path{Localpart: "postmæster", IPDomain: xparseIPDomain("møx.example")},
|
||||
To: smtp.Path{Localpart: "møx", IPDomain: xparseIPDomain("remøte.example")},
|
||||
Subject: "dsn¡",
|
||||
TextBody: "delivery failure¿\n",
|
||||
From: smtp.Path{Localpart: "postmæster", IPDomain: xparseIPDomain("møx.example")},
|
||||
To: smtp.Path{Localpart: "møx", IPDomain: xparseIPDomain("remøte.example")},
|
||||
Subject: "dsn¡",
|
||||
MessageID: "test@localhost",
|
||||
TextBody: "delivery failure¿\n",
|
||||
|
||||
ReportingMTA: "mox.example",
|
||||
ReceivedFromMTA: smtp.Ehlo{Name: xparseIPDomain("reläy.example"), ConnIP: net.ParseIP("10.10.10.10")},
|
||||
@ -210,34 +192,3 @@ func TestDSN(t *testing.T) {
|
||||
tcheckType(t, &part.Parts[1], "message", "global-delivery-status", "8bit")
|
||||
tcompare(t, pmsg.Recipients[0].FinalRecipient, m.Recipients[0].FinalRecipient)
|
||||
}
|
||||
|
||||
func TestCode(t *testing.T) {
|
||||
testCodeLine := func(line, ecode, rest string) {
|
||||
t.Helper()
|
||||
e, r := codeLine(line)
|
||||
if e != ecode || r != rest {
|
||||
t.Fatalf("codeLine %q: got %q %q, expected %q %q", line, e, r, ecode, rest)
|
||||
}
|
||||
}
|
||||
testCodeLine("4.0.0", "4.0.0", "")
|
||||
testCodeLine("4.0.0 more", "4.0.0", "more")
|
||||
testCodeLine("other", "", "other")
|
||||
testCodeLine("other more", "", "other more")
|
||||
|
||||
testHasCode := func(line string, exp bool) {
|
||||
t.Helper()
|
||||
got := HasCode(line)
|
||||
if got != exp {
|
||||
t.Fatalf("HasCode %q: got %v, expected %v", line, got, exp)
|
||||
}
|
||||
}
|
||||
testHasCode("4.0.0", true)
|
||||
testHasCode("5.7.28", true)
|
||||
testHasCode("10.0.0", false) // first number must be single digit.
|
||||
testHasCode("4.1.1 more", true)
|
||||
testHasCode("other ", false)
|
||||
testHasCode("4.2.", false)
|
||||
testHasCode("4.2. ", false)
|
||||
testHasCode(" 4.2.4", false)
|
||||
testHasCode(" 4.2.4 ", false)
|
||||
}
|
||||
|
54
dsn/parse.go
54
dsn/parse.go
@ -4,6 +4,7 @@ import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"net/textproto"
|
||||
"strconv"
|
||||
"strings"
|
||||
@ -11,7 +12,9 @@ import (
|
||||
|
||||
"github.com/mjl-/mox/dns"
|
||||
"github.com/mjl-/mox/message"
|
||||
"github.com/mjl-/mox/mlog"
|
||||
"github.com/mjl-/mox/smtp"
|
||||
"slices"
|
||||
)
|
||||
|
||||
// Parse reads a DSN message.
|
||||
@ -22,17 +25,19 @@ import (
|
||||
// The first return value is the machine-parsed DSN message. The second value is
|
||||
// the entire MIME multipart message. Use its Parts field to access the
|
||||
// human-readable text and optional original message/headers.
|
||||
func Parse(r io.ReaderAt) (*Message, *message.Part, error) {
|
||||
func Parse(elog *slog.Logger, r io.ReaderAt) (*Message, *message.Part, error) {
|
||||
log := mlog.New("dsn", elog)
|
||||
|
||||
// DSNs can mix and match subtypes with and without utf-8. ../rfc/6533:441
|
||||
|
||||
part, err := message.Parse(r)
|
||||
part, err := message.Parse(log.Logger, false, r)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("parsing message: %v", err)
|
||||
}
|
||||
if part.MediaType != "MULTIPART" || part.MediaSubType != "REPORT" {
|
||||
return nil, nil, fmt.Errorf(`message has content-type %q, must have "message/report"`, strings.ToLower(part.MediaType+"/"+part.MediaSubType))
|
||||
}
|
||||
err = part.Walk()
|
||||
err = part.Walk(log.Logger, nil)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("parsing message parts: %v", err)
|
||||
}
|
||||
@ -61,7 +66,11 @@ func Parse(r io.ReaderAt) (*Message, *message.Part, error) {
|
||||
if err != nil {
|
||||
return smtp.Path{}, fmt.Errorf("parsing domain: %v", err)
|
||||
}
|
||||
return smtp.Path{Localpart: smtp.Localpart(a.User), IPDomain: dns.IPDomain{Domain: d}}, nil
|
||||
lp, err := smtp.ParseLocalpart(a.User)
|
||||
if err != nil {
|
||||
return smtp.Path{}, fmt.Errorf("parsing localpart: %v", err)
|
||||
}
|
||||
return smtp.Path{Localpart: lp, IPDomain: dns.IPDomain{Domain: d}}, nil
|
||||
}
|
||||
if len(part.Envelope.From) == 1 {
|
||||
m.From, err = addressPath(part.Envelope.From[0])
|
||||
@ -76,7 +85,7 @@ func Parse(r io.ReaderAt) (*Message, *message.Part, error) {
|
||||
}
|
||||
}
|
||||
m.Subject = part.Envelope.Subject
|
||||
buf, err := io.ReadAll(p0.Reader())
|
||||
buf, err := io.ReadAll(p0.ReaderUTF8OrBinary())
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("reading human-readable text part: %v", err)
|
||||
}
|
||||
@ -209,19 +218,21 @@ func parseRecipientHeader(mr *textproto.Reader, utf8 bool) (Recipient, error) {
|
||||
case "Action":
|
||||
a := Action(strings.ToLower(v))
|
||||
actions := []Action{Failed, Delayed, Delivered, Relayed, Expanded}
|
||||
var ok bool
|
||||
for _, x := range actions {
|
||||
if a == x {
|
||||
ok = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !ok {
|
||||
if slices.Contains(actions, a) {
|
||||
r.Action = a
|
||||
} else {
|
||||
err = fmt.Errorf("unrecognized action %q", v)
|
||||
}
|
||||
case "Status":
|
||||
// todo: parse the enhanced status code?
|
||||
r.Status = v
|
||||
t := strings.SplitN(v, "(", 2)
|
||||
v = strings.TrimSpace(v)
|
||||
if len(t) == 2 && strings.HasSuffix(v, ")") {
|
||||
r.Status = strings.TrimSpace(t[0])
|
||||
r.StatusComment = strings.TrimSpace(strings.TrimSuffix(t[1], ")"))
|
||||
}
|
||||
|
||||
case "Remote-Mta":
|
||||
r.RemoteMTA = NameIP{Name: v}
|
||||
case "Diagnostic-Code":
|
||||
@ -233,7 +244,7 @@ func parseRecipientHeader(mr *textproto.Reader, utf8 bool) (Recipient, error) {
|
||||
} else if len(t) != 2 {
|
||||
err = fmt.Errorf("missing semicolon to separate diagnostic-type from code")
|
||||
} else {
|
||||
r.DiagnosticCode = strings.TrimSpace(t[1])
|
||||
r.DiagnosticCodeSMTP = strings.TrimSpace(t[1])
|
||||
}
|
||||
case "Last-Attempt-Date":
|
||||
r.LastAttemptDate, err = parseDateTime(v)
|
||||
@ -306,17 +317,18 @@ func parseAddress(s string, utf8 bool) (smtp.Path, error) {
|
||||
}
|
||||
}
|
||||
// todo: more proper parser
|
||||
t = strings.SplitN(s, "@", 2)
|
||||
if len(t) != 2 || t[0] == "" || t[1] == "" {
|
||||
t = strings.Split(s, "@")
|
||||
if len(t) == 1 {
|
||||
return smtp.Path{}, fmt.Errorf("invalid email address")
|
||||
}
|
||||
d, err := dns.ParseDomain(t[1])
|
||||
d, err := dns.ParseDomain(t[len(t)-1])
|
||||
if err != nil {
|
||||
return smtp.Path{}, fmt.Errorf("parsing domain: %v", err)
|
||||
}
|
||||
var lp string
|
||||
var esc string
|
||||
for _, c := range t[0] {
|
||||
lead := strings.Join(t[:len(t)-1], "@")
|
||||
for _, c := range lead {
|
||||
if esc == "" && c == '\\' || esc == `\` && (c == 'x' || c == 'X') || esc == `\x` && c == '{' {
|
||||
if c == 'X' {
|
||||
c = 'x'
|
||||
@ -340,7 +352,11 @@ func parseAddress(s string, utf8 bool) (smtp.Path, error) {
|
||||
if esc != "" {
|
||||
return smtp.Path{}, fmt.Errorf("parsing localpart: unfinished embedded unicode char")
|
||||
}
|
||||
p := smtp.Path{Localpart: smtp.Localpart(lp), IPDomain: dns.IPDomain{Domain: d}}
|
||||
localpart, err := smtp.ParseLocalpart(lp)
|
||||
if err != nil {
|
||||
return smtp.Path{}, fmt.Errorf("parsing localpart: %v", err)
|
||||
}
|
||||
p := smtp.Path{Localpart: localpart, IPDomain: dns.IPDomain{Domain: d}}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
|
325
examples.go
Normal file
325
examples.go
Normal file
@ -0,0 +1,325 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/mjl-/sconf"
|
||||
|
||||
"github.com/mjl-/mox/config"
|
||||
"github.com/mjl-/mox/mox-"
|
||||
"github.com/mjl-/mox/smtp"
|
||||
"github.com/mjl-/mox/webhook"
|
||||
)
|
||||
|
||||
func cmdExample(c *cmd) {
|
||||
c.params = "[name]"
|
||||
c.help = `List available examples, or print a specific example.`
|
||||
|
||||
args := c.Parse()
|
||||
if len(args) > 1 {
|
||||
c.Usage()
|
||||
}
|
||||
|
||||
var match func() string
|
||||
for _, ex := range examples {
|
||||
if len(args) == 0 {
|
||||
fmt.Println(ex.Name)
|
||||
} else if args[0] == ex.Name {
|
||||
match = ex.Get
|
||||
}
|
||||
}
|
||||
if len(args) == 0 {
|
||||
return
|
||||
}
|
||||
if match == nil {
|
||||
log.Fatalln("not found")
|
||||
}
|
||||
fmt.Print(match())
|
||||
}
|
||||
|
||||
func cmdConfigExample(c *cmd) {
|
||||
c.params = "[name]"
|
||||
c.help = `List available config examples, or print a specific example.`
|
||||
|
||||
args := c.Parse()
|
||||
if len(args) > 1 {
|
||||
c.Usage()
|
||||
}
|
||||
|
||||
var match func() string
|
||||
for _, ex := range configExamples {
|
||||
if len(args) == 0 {
|
||||
fmt.Println(ex.Name)
|
||||
} else if args[0] == ex.Name {
|
||||
match = ex.Get
|
||||
}
|
||||
}
|
||||
if len(args) == 0 {
|
||||
return
|
||||
}
|
||||
if match == nil {
|
||||
log.Fatalln("not found")
|
||||
}
|
||||
fmt.Print(match())
|
||||
}
|
||||
|
||||
var configExamples = []struct {
|
||||
Name string
|
||||
Get func() string
|
||||
}{
|
||||
{
|
||||
"webhandlers",
|
||||
func() string {
|
||||
const webhandlers = `# Snippet of domains.conf to configure WebDomainRedirects and WebHandlers.
|
||||
|
||||
# Redirect all requests for mox.example to https://www.mox.example.
|
||||
WebDomainRedirects:
|
||||
mox.example: www.mox.example
|
||||
|
||||
# Each request is matched against these handlers until one matches and serves it.
|
||||
WebHandlers:
|
||||
-
|
||||
# Redirect all plain http requests to https, leaving path, query strings, etc
|
||||
# intact. When the request is already to https, the destination URL would have the
|
||||
# same scheme, host and path, causing this redirect handler to not match the
|
||||
# request (and not cause a redirect loop) and the webserver to serve the request
|
||||
# with a later handler.
|
||||
LogName: redirhttps
|
||||
Domain: www.mox.example
|
||||
PathRegexp: ^/
|
||||
# Could leave DontRedirectPlainHTTP at false if it wasn't for this being an
|
||||
# example for doing this redirect.
|
||||
DontRedirectPlainHTTP: true
|
||||
WebRedirect:
|
||||
BaseURL: https://www.mox.example
|
||||
-
|
||||
# The name of the handler, used in logging and metrics.
|
||||
LogName: staticmjl
|
||||
# With ACME configured, each configured domain will automatically get a TLS
|
||||
# certificate on first request.
|
||||
Domain: www.mox.example
|
||||
PathRegexp: ^/who/mjl/
|
||||
WebStatic:
|
||||
StripPrefix: /who/mjl
|
||||
# Requested path /who/mjl/inferno/ resolves to local web/mjl/inferno.
|
||||
# If a directory contains an index.html, it is served when a directory is requested.
|
||||
Root: web/mjl
|
||||
# With ListFiles true, if a directory does not contain an index.html, the contents are listed.
|
||||
ListFiles: true
|
||||
ResponseHeaders:
|
||||
X-Mox: hi
|
||||
-
|
||||
LogName: redir
|
||||
Domain: www.mox.example
|
||||
PathRegexp: ^/redir/a/b/c
|
||||
# Don't redirect from plain HTTP to HTTPS.
|
||||
DontRedirectPlainHTTP: true
|
||||
WebRedirect:
|
||||
# Just change the domain and add query string set fragment. No change to scheme.
|
||||
# Path will start with /redir/a/b/c (and whathever came after) because no
|
||||
# OrigPathRegexp+ReplacePath is set.
|
||||
BaseURL: //moxest.example?q=1#frag
|
||||
# Default redirection is 308 - Permanent Redirect.
|
||||
StatusCode: 307
|
||||
-
|
||||
LogName: oldnew
|
||||
Domain: www.mox.example
|
||||
PathRegexp: ^/old/
|
||||
WebRedirect:
|
||||
# Replace path, leaving rest of URL intact.
|
||||
OrigPathRegexp: ^/old/(.*)
|
||||
ReplacePath: /new/$1
|
||||
-
|
||||
LogName: app
|
||||
Domain: www.mox.example
|
||||
PathRegexp: ^/app/
|
||||
WebForward:
|
||||
# Strip the path matched by PathRegexp before forwarding the request. So original
|
||||
# request /app/api become just /api.
|
||||
StripPath: true
|
||||
# URL of backend, where requests are forwarded to. The path in the URL is kept,
|
||||
# so for incoming request URL /app/api, the outgoing request URL has path /app-v2/api.
|
||||
# Requests are made with Go's net/http DefaultTransporter, including using
|
||||
# HTTP_PROXY and HTTPS_PROXY environment variables.
|
||||
URL: http://127.0.0.1:8900/app-v2/
|
||||
# Add headers to response.
|
||||
ResponseHeaders:
|
||||
X-Frame-Options: deny
|
||||
X-Content-Type-Options: nosniff
|
||||
`
|
||||
// Parse just so we know we have the syntax right.
|
||||
// todo: ideally we would have a complete config file and parse it fully.
|
||||
var conf struct {
|
||||
WebDomainRedirects map[string]string
|
||||
WebHandlers []config.WebHandler
|
||||
}
|
||||
err := sconf.Parse(strings.NewReader(webhandlers), &conf)
|
||||
xcheckf(err, "parsing webhandlers example")
|
||||
return webhandlers
|
||||
},
|
||||
},
|
||||
{
|
||||
"transport",
|
||||
func() string {
|
||||
const moxconf = `# Snippet for mox.conf, defining a transport called Example that connects on the
|
||||
# SMTP submission with TLS port 465 ("submissions"), authenticating with
|
||||
# SCRAM-SHA-256-PLUS (other providers may not support SCRAM-SHA-256-PLUS, but they
|
||||
# typically do support the older CRAM-MD5).:
|
||||
|
||||
# Transport are mechanisms for delivering messages. Transports can be referenced
|
||||
# from Routes in accounts, domains and the global configuration. There is always
|
||||
# an implicit/fallback delivery transport doing direct delivery with SMTP from the
|
||||
# outgoing message queue. Transports are typically only configured when using
|
||||
# smarthosts, i.e. when delivering through another SMTP server. Zero or one
|
||||
# transport methods must be set in a transport, never multiple. When using an
|
||||
# external party to send email for a domain, keep in mind you may have to add
|
||||
# their IP address to your domain's SPF record, and possibly additional DKIM
|
||||
# records. (optional)
|
||||
Transports:
|
||||
Example:
|
||||
# Submission SMTP over a TLS connection to submit email to a remote queue.
|
||||
# (optional)
|
||||
Submissions:
|
||||
# Host name to connect to and for verifying its TLS certificate.
|
||||
Host: smtp.example.com
|
||||
|
||||
# If set, authentication credentials for the remote server. (optional)
|
||||
Auth:
|
||||
Username: user@example.com
|
||||
Password: test1234
|
||||
Mechanisms:
|
||||
# Allowed authentication mechanisms. Defaults to SCRAM-SHA-256-PLUS,
|
||||
# SCRAM-SHA-256, SCRAM-SHA-1-PLUS, SCRAM-SHA-1, CRAM-MD5. Not included by default:
|
||||
# PLAIN. Specify the strongest mechanism known to be implemented by the server to
|
||||
# prevent mechanism downgrade attacks. (optional)
|
||||
|
||||
- SCRAM-SHA-256-PLUS
|
||||
`
|
||||
|
||||
const domainsconf = `# Snippet for domains.conf, specifying a route that sends through the transport:
|
||||
|
||||
# Routes for delivering outgoing messages through the queue. Each delivery attempt
|
||||
# evaluates account routes, domain routes and finally these global routes. The
|
||||
# transport of the first matching route is used in the delivery attempt. If no
|
||||
# routes match, which is the default with no configured routes, messages are
|
||||
# delivered directly from the queue. (optional)
|
||||
Routes:
|
||||
-
|
||||
Transport: Example
|
||||
`
|
||||
|
||||
var static struct {
|
||||
Transports map[string]config.Transport
|
||||
}
|
||||
var dynamic struct {
|
||||
Routes []config.Route
|
||||
}
|
||||
err := sconf.Parse(strings.NewReader(moxconf), &static)
|
||||
xcheckf(err, "parsing moxconf example")
|
||||
err = sconf.Parse(strings.NewReader(domainsconf), &dynamic)
|
||||
xcheckf(err, "parsing domainsconf example")
|
||||
return moxconf + "\n\n" + domainsconf
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
var exampleTime = time.Date(2024, time.March, 27, 0, 0, 0, 0, time.UTC)
|
||||
|
||||
var examples = []struct {
|
||||
Name string
|
||||
Get func() string
|
||||
}{
|
||||
{
|
||||
"webhook-outgoing-delivered",
|
||||
func() string {
|
||||
v := webhook.Outgoing{
|
||||
Version: 0,
|
||||
Event: webhook.EventDelivered,
|
||||
QueueMsgID: 101,
|
||||
FromID: base64.RawURLEncoding.EncodeToString([]byte("0123456789abcdef")),
|
||||
MessageID: "<QnxzgulZK51utga6agH_rg@mox.example>",
|
||||
Subject: "subject of original message",
|
||||
WebhookQueued: exampleTime,
|
||||
Extra: map[string]string{},
|
||||
SMTPCode: smtp.C250Completed,
|
||||
}
|
||||
return "Example webhook HTTP POST JSON body for successful outgoing delivery:\n\n\t" + formatJSON(v)
|
||||
},
|
||||
},
|
||||
{
|
||||
"webhook-outgoing-dsn-failed",
|
||||
func() string {
|
||||
v := webhook.Outgoing{
|
||||
Version: 0,
|
||||
Event: webhook.EventFailed,
|
||||
DSN: true,
|
||||
Suppressing: true,
|
||||
QueueMsgID: 102,
|
||||
FromID: base64.RawURLEncoding.EncodeToString([]byte("0123456789abcdef")),
|
||||
MessageID: "<QnxzgulZK51utga6agH_rg@mox.example>",
|
||||
Subject: "subject of original message",
|
||||
WebhookQueued: exampleTime,
|
||||
Extra: map[string]string{"userid": "456"},
|
||||
Error: "timeout connecting to host",
|
||||
SMTPCode: smtp.C554TransactionFailed,
|
||||
SMTPEnhancedCode: "5." + smtp.SeNet4Other0,
|
||||
}
|
||||
return `Example webhook HTTP POST JSON body for failed delivery based on incoming DSN
|
||||
message, with custom extra data fields (from original submission), and adding address to the suppression list:
|
||||
|
||||
` + formatJSON(v)
|
||||
},
|
||||
},
|
||||
{
|
||||
"webhook-incoming-basic",
|
||||
func() string {
|
||||
v := webhook.Incoming{
|
||||
Version: 0,
|
||||
From: []webhook.NameAddress{{Address: "mox@localhost"}},
|
||||
To: []webhook.NameAddress{{Address: "mjl@localhost"}},
|
||||
Subject: "hi",
|
||||
MessageID: "<QnxzgulZK51utga6agH_rg@mox.example>",
|
||||
Date: &exampleTime,
|
||||
Text: "hello world ☺\n",
|
||||
Structure: webhook.Structure{
|
||||
ContentType: "text/plain",
|
||||
ContentTypeParams: map[string]string{"charset": "utf-8"},
|
||||
DecodedSize: int64(len("hello world ☺\r\n")),
|
||||
Parts: []webhook.Structure{},
|
||||
},
|
||||
Meta: webhook.IncomingMeta{
|
||||
MsgID: 201,
|
||||
MailFrom: "mox@localhost",
|
||||
MailFromValidated: false,
|
||||
MsgFromValidated: true,
|
||||
RcptTo: "mjl@localhost",
|
||||
DKIMVerifiedDomains: []string{"localhost"},
|
||||
RemoteIP: "127.0.0.1",
|
||||
Received: exampleTime.Add(3 * time.Second),
|
||||
MailboxName: "Inbox",
|
||||
Automated: false,
|
||||
},
|
||||
}
|
||||
return "Example JSON body for webhooks for incoming delivery of basic message:\n\n\t" + formatJSON(v)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func formatJSON(v any) string {
|
||||
nv, _ := mox.FillNil(reflect.ValueOf(v))
|
||||
v = nv.Interface()
|
||||
var b bytes.Buffer
|
||||
enc := json.NewEncoder(&b)
|
||||
enc.SetIndent("\t", "\t")
|
||||
enc.SetEscapeHTML(false)
|
||||
err := enc.Encode(v)
|
||||
xcheckf(err, "encoding to json")
|
||||
return b.String()
|
||||
}
|
27
export.go
27
export.go
@ -1,31 +1,33 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/mjl-/bstore"
|
||||
|
||||
"github.com/mjl-/mox/mlog"
|
||||
"github.com/mjl-/mox/store"
|
||||
)
|
||||
|
||||
func cmdExportMaildir(c *cmd) {
|
||||
c.params = "dst-dir account-path [mailbox]"
|
||||
c.params = "[-single] dst-dir account-path [mailbox]"
|
||||
c.help = `Export one or all mailboxes from an account in maildir format.
|
||||
|
||||
Export bypasses a running mox instance. It opens the account mailbox/message
|
||||
database file directly. This may block if a running mox instance also has the
|
||||
database open, e.g. for IMAP connections. To export from a running instance, use
|
||||
the accounts web page.
|
||||
the accounts web page or webmail.
|
||||
`
|
||||
var single bool
|
||||
c.flag.BoolVar(&single, "single", false, "export single mailbox, without any children. disabled if mailbox isn't specified.")
|
||||
args := c.Parse()
|
||||
xcmdExport(false, args, c)
|
||||
xcmdExport(false, single, args, c)
|
||||
}
|
||||
|
||||
func cmdExportMbox(c *cmd) {
|
||||
c.params = "dst-dir account-path [mailbox]"
|
||||
c.params = "[-single] dst-dir account-path [mailbox]"
|
||||
c.help = `Export messages from one or all mailboxes in an account in mbox format.
|
||||
|
||||
Using mbox is not recommended. Maildir is a better format.
|
||||
@ -33,17 +35,19 @@ Using mbox is not recommended. Maildir is a better format.
|
||||
Export bypasses a running mox instance. It opens the account mailbox/message
|
||||
database file directly. This may block if a running mox instance also has the
|
||||
database open, e.g. for IMAP connections. To export from a running instance, use
|
||||
the accounts web page.
|
||||
the accounts web page or webmail.
|
||||
|
||||
For mbox export, "mboxrd" is used where message lines starting with the magic
|
||||
"From " string are escaped by prepending a >. All ">*From " are escaped,
|
||||
otherwise reconstructing the original could lose a ">".
|
||||
`
|
||||
var single bool
|
||||
c.flag.BoolVar(&single, "single", false, "export single mailbox, without any children. disabled if mailbox isn't specified.")
|
||||
args := c.Parse()
|
||||
xcmdExport(true, args, c)
|
||||
xcmdExport(true, single, args, c)
|
||||
}
|
||||
|
||||
func xcmdExport(mbox bool, args []string, c *cmd) {
|
||||
func xcmdExport(mbox, single bool, args []string, c *cmd) {
|
||||
if len(args) != 2 && len(args) != 3 {
|
||||
c.Usage()
|
||||
}
|
||||
@ -53,10 +57,13 @@ func xcmdExport(mbox bool, args []string, c *cmd) {
|
||||
var mailbox string
|
||||
if len(args) == 3 {
|
||||
mailbox = args[2]
|
||||
} else {
|
||||
single = false
|
||||
}
|
||||
|
||||
dbpath := filepath.Join(accountDir, "index.db")
|
||||
db, err := bstore.Open(dbpath, &bstore.Options{Timeout: 5 * time.Second, Perm: 0660}, store.Message{}, store.Recipient{}, store.Mailbox{})
|
||||
opts := bstore.Options{Timeout: 5 * time.Second, Perm: 0660, RegisterLogger: c.log.Logger}
|
||||
db, err := bstore.Open(context.Background(), dbpath, &opts, store.DBTypes...)
|
||||
xcheckf(err, "open database %q", dbpath)
|
||||
defer func() {
|
||||
if err := db.Close(); err != nil {
|
||||
@ -65,7 +72,7 @@ func xcmdExport(mbox bool, args []string, c *cmd) {
|
||||
}()
|
||||
|
||||
a := store.DirArchiver{Dir: dst}
|
||||
err = store.ExportMessages(mlog.New("export"), db, accountDir, a, !mbox, mailbox)
|
||||
err = store.ExportMessages(context.Background(), c.log, db, accountDir, a, !mbox, mailbox, nil, !single)
|
||||
xcheckf(err, "exporting messages")
|
||||
err = a.Close()
|
||||
xcheckf(err, "closing archiver")
|
||||
|
10
genapidoc.sh
Executable file
10
genapidoc.sh
Executable file
@ -0,0 +1,10 @@
|
||||
#!/bin/sh
|
||||
set -eu
|
||||
|
||||
# we rewrite some dmarcprt and tlsrpt enums into untyped strings: real-world
|
||||
# reports have invalid values, and our loose Go typed strings accept all values,
|
||||
# but we don't want the typescript runtime checker to fail on those unrecognized
|
||||
# values.
|
||||
(cd webadmin && CGO_ENABLED=0 go run ../vendor/github.com/mjl-/sherpadoc/cmd/sherpadoc/*.go -adjust-function-names none -rename 'config Domain ConfigDomain,dmarc Policy DMARCPolicy,mtasts MX STSMX,tlsrptdb Record TLSReportRecord,tlsrptdb SuppressAddress TLSRPTSuppressAddress,dmarcrpt DKIMResult string,dmarcrpt SPFResult string,dmarcrpt SPFDomainScope string,dmarcrpt DMARCResult string,dmarcrpt PolicyOverride string,dmarcrpt Alignment string,dmarcrpt Disposition string,tlsrpt PolicyType string,tlsrpt ResultType string' Admin) >webadmin/api.json
|
||||
(cd webaccount && CGO_ENABLED=0 go run ../vendor/github.com/mjl-/sherpadoc/cmd/sherpadoc/*.go -adjust-function-names none Account) >webaccount/api.json
|
||||
(cd webmail && CGO_ENABLED=0 go run ../vendor/github.com/mjl-/sherpadoc/cmd/sherpadoc/*.go -adjust-function-names none Webmail) >webmail/api.json
|
94
gendoc.sh
94
gendoc.sh
@ -1,33 +1,33 @@
|
||||
#!/bin/sh
|
||||
#!/usr/bin/env sh
|
||||
|
||||
# ./doc.go
|
||||
(
|
||||
cat <<EOF
|
||||
/*
|
||||
Command mox is a modern full-featured open source secure mail server for
|
||||
Command mox is a modern, secure, full-featured, open source mail server for
|
||||
low-maintenance self-hosted email.
|
||||
|
||||
- Quick and easy to set up with quickstart and automatic TLS with ACME and
|
||||
Let's Encrypt.
|
||||
- IMAP4 with extensions for accessing email.
|
||||
- SMTP with SPF, DKIM, DMARC, DNSBL, MTA-STS, TLSRPT for exchanging email.
|
||||
- Reputation-based and content-based spam filtering.
|
||||
- Internationalized email.
|
||||
- Admin web interface.
|
||||
Mox is started with the "serve" subcommand, but mox also has many other
|
||||
subcommands.
|
||||
|
||||
# Commands
|
||||
|
||||
EOF
|
||||
|
||||
./mox 2>&1 | sed 's/^\( *\|usage: \)/\t/'
|
||||
|
||||
cat <<EOF
|
||||
|
||||
Many commands talk to a running mox instance, through the ctl file in the data
|
||||
directory. Specify the configuration file (that holds the path to the data
|
||||
directory) through the -config flag or MOXCONF environment variable.
|
||||
Many of those commands talk to a running mox instance, through the ctl file in
|
||||
the data directory. Specify the configuration file (that holds the path to the
|
||||
data directory) through the -config flag or MOXCONF environment variable.
|
||||
|
||||
Commands that don't talk to a running mox instance are often for
|
||||
testing/debugging email functionality. For example for parsing an email message,
|
||||
or looking up SPF/DKIM/DMARC records.
|
||||
|
||||
Below is the usage information as printed by the command when started without
|
||||
any parameters. Followed by the help and usage information for each command.
|
||||
|
||||
|
||||
# Usage
|
||||
|
||||
EOF
|
||||
|
||||
./mox 2>&1 | sed -e 's/^usage: */ /' -e 's/^ */ /'
|
||||
echo
|
||||
./mox helpall 2>&1
|
||||
|
||||
cat <<EOF
|
||||
@ -39,42 +39,70 @@ EOF
|
||||
)>doc.go
|
||||
gofmt -w doc.go
|
||||
|
||||
# ./config/doc.go
|
||||
(
|
||||
cat <<EOF
|
||||
/*
|
||||
Package config holds the configuration file definitions for mox.conf (Static)
|
||||
and domains.conf (Dynamic).
|
||||
Package config holds the configuration file definitions.
|
||||
|
||||
Mox uses two config files:
|
||||
|
||||
1. mox.conf, also called the static configuration file.
|
||||
2. domains.conf, also called the dynamic configuration file.
|
||||
|
||||
The static configuration file is never reloaded during the lifetime of a
|
||||
running mox instance. After changes to mox.conf, mox must be restarted for the
|
||||
changes to take effect.
|
||||
|
||||
The dynamic configuration file is reloaded automatically when it changes.
|
||||
If the file contains an error after the change, the reload is aborted and the
|
||||
previous version remains active.
|
||||
|
||||
Below are "empty" config files, generated from the config file definitions in
|
||||
the source code, along with comments explaining the fields. Fields named "x" are
|
||||
placeholders for user-chosen map keys.
|
||||
|
||||
# sconf
|
||||
|
||||
The config files are in "sconf" format. Properties of sconf files:
|
||||
|
||||
- Indentation with tabs only.
|
||||
- "#" as first non-whitespace character makes the line a comment. Lines with a
|
||||
value cannot also have a comment.
|
||||
- Values don't have syntax indicating their type. For example, strings are
|
||||
not quoted/escaped and can never span multiple lines.
|
||||
- Fields that are optional can be left out completely. But the value of an
|
||||
optional field may itself have required fields.
|
||||
|
||||
See https://pkg.go.dev/github.com/mjl-/sconf for details.
|
||||
|
||||
Annotated empty/default configuration files you could use as a starting point
|
||||
for your mox.conf and domains.conf, as generated by "mox config
|
||||
describe-static" and "mox config describe-domains":
|
||||
|
||||
# mox.conf
|
||||
|
||||
EOF
|
||||
./mox config describe-static | sed 's/^/\t/'
|
||||
./mox config describe-static | sed 's/^/ /'
|
||||
|
||||
cat <<EOF
|
||||
|
||||
# domains.conf
|
||||
|
||||
EOF
|
||||
./mox config describe-domains | sed 's/^/\t/'
|
||||
./mox config describe-domains | sed 's/^/ /'
|
||||
|
||||
cat <<EOF
|
||||
|
||||
# Examples
|
||||
|
||||
Mox includes configuration files to illustrate common setups. You can see these
|
||||
examples with "mox example", and print a specific example with "mox example
|
||||
<name>". Below are all examples included in mox.
|
||||
examples with "mox config example", and print a specific example with "mox
|
||||
config example <name>". Below are all examples included in mox.
|
||||
|
||||
EOF
|
||||
|
||||
for ex in $(./mox example); do
|
||||
for ex in $(./mox config example); do
|
||||
echo '# Example '$ex
|
||||
echo
|
||||
./mox example $ex | sed 's/^/\t/'
|
||||
./mox config example $ex | sed 's/^/ /'
|
||||
echo
|
||||
done
|
||||
|
||||
@ -86,3 +114,7 @@ package config
|
||||
EOF
|
||||
)>config/doc.go
|
||||
gofmt -w config/doc.go
|
||||
|
||||
# ./webapi/doc.go
|
||||
./webapi/gendoc.sh >webapi/doc.go
|
||||
gofmt -w webapi/doc.go
|
||||
|
7
genlicenses.sh
Executable file
7
genlicenses.sh
Executable file
@ -0,0 +1,7 @@
|
||||
#!/bin/sh
|
||||
rm -r licenses
|
||||
set -e
|
||||
for p in $(cd vendor && find . -iname '*license*' -or -iname '*licence*' -or -iname '*notice*' -or -iname '*patent*'); do
|
||||
(set +e; mkdir -p $(dirname licenses/$p))
|
||||
cp vendor/$p licenses/$p
|
||||
done
|
376
gentestdata.go
Normal file
376
gentestdata.go
Normal file
@ -0,0 +1,376 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/mjl-/bstore"
|
||||
"github.com/mjl-/sconf"
|
||||
|
||||
"github.com/mjl-/mox/config"
|
||||
"github.com/mjl-/mox/dmarcdb"
|
||||
"github.com/mjl-/mox/dmarcrpt"
|
||||
"github.com/mjl-/mox/dns"
|
||||
"github.com/mjl-/mox/mlog"
|
||||
"github.com/mjl-/mox/mox-"
|
||||
"github.com/mjl-/mox/moxvar"
|
||||
"github.com/mjl-/mox/mtasts"
|
||||
"github.com/mjl-/mox/mtastsdb"
|
||||
"github.com/mjl-/mox/queue"
|
||||
"github.com/mjl-/mox/smtp"
|
||||
"github.com/mjl-/mox/store"
|
||||
"github.com/mjl-/mox/tlsrpt"
|
||||
"github.com/mjl-/mox/tlsrptdb"
|
||||
)
|
||||
|
||||
func cmdGentestdata(c *cmd) {
|
||||
c.unlisted = true
|
||||
c.params = "destdir"
|
||||
c.help = `Generate a data directory populated, for testing upgrades.`
|
||||
args := c.Parse()
|
||||
if len(args) != 1 {
|
||||
c.Usage()
|
||||
}
|
||||
|
||||
destDataDir, err := filepath.Abs(args[0])
|
||||
xcheckf(err, "making destination directory an absolute path")
|
||||
|
||||
if _, err := os.Stat(destDataDir); err == nil {
|
||||
log.Fatalf("destination directory already exists, refusing to generate test data")
|
||||
}
|
||||
err = os.MkdirAll(destDataDir, 0770)
|
||||
xcheckf(err, "creating destination data directory")
|
||||
err = os.MkdirAll(filepath.Join(destDataDir, "tmp"), 0770)
|
||||
xcheckf(err, "creating tmp directory")
|
||||
|
||||
tempfile := func() *os.File {
|
||||
f, err := os.CreateTemp(filepath.Join(destDataDir, "tmp"), "temp")
|
||||
xcheckf(err, "creating temp file")
|
||||
return f
|
||||
}
|
||||
|
||||
ctxbg := context.Background()
|
||||
mox.Conf.Log[""] = mlog.LevelInfo
|
||||
mlog.SetConfig(mox.Conf.Log)
|
||||
|
||||
const domainsConf = `
|
||||
Domains:
|
||||
mox.example: nil
|
||||
☺.example: nil
|
||||
Accounts:
|
||||
test0:
|
||||
Domain: mox.example
|
||||
Destinations:
|
||||
test0@mox.example: nil
|
||||
test1:
|
||||
Domain: mox.example
|
||||
Destinations:
|
||||
test1@mox.example: nil
|
||||
test2:
|
||||
Domain: ☺.example
|
||||
Destinations:
|
||||
☹@☺.example: nil
|
||||
JunkFilter:
|
||||
Threshold: 0.95
|
||||
Params:
|
||||
Twograms: true
|
||||
MaxPower: 0.1
|
||||
TopWords: 10
|
||||
IgnoreWords: 0.1
|
||||
`
|
||||
|
||||
mox.ConfigStaticPath = filepath.FromSlash("/tmp/mox-bogus/mox.conf")
|
||||
mox.ConfigDynamicPath = filepath.FromSlash("/tmp/mox-bogus/domains.conf")
|
||||
mox.Conf.DynamicLastCheck = time.Now() // Should prevent warning.
|
||||
mox.Conf.Static = config.Static{
|
||||
DataDir: destDataDir,
|
||||
}
|
||||
err = sconf.Parse(strings.NewReader(domainsConf), &mox.Conf.Dynamic)
|
||||
xcheckf(err, "parsing domains config")
|
||||
|
||||
const dmarcReport = `<?xml version="1.0" encoding="UTF-8" ?>
|
||||
<feedback>
|
||||
<report_metadata>
|
||||
<org_name>google.com</org_name>
|
||||
<email>noreply-dmarc-support@google.com</email>
|
||||
<extra_contact_info>https://support.google.com/a/answer/2466580</extra_contact_info>
|
||||
<report_id>10051505501689795560</report_id>
|
||||
<date_range>
|
||||
<begin>1596412800</begin>
|
||||
<end>1596499199</end>
|
||||
</date_range>
|
||||
</report_metadata>
|
||||
<policy_published>
|
||||
<domain>mox.example</domain>
|
||||
<adkim>r</adkim>
|
||||
<aspf>r</aspf>
|
||||
<p>reject</p>
|
||||
<sp>reject</sp>
|
||||
<pct>100</pct>
|
||||
</policy_published>
|
||||
<record>
|
||||
<row>
|
||||
<source_ip>127.0.0.1</source_ip>
|
||||
<count>1</count>
|
||||
<policy_evaluated>
|
||||
<disposition>none</disposition>
|
||||
<dkim>pass</dkim>
|
||||
<spf>pass</spf>
|
||||
</policy_evaluated>
|
||||
</row>
|
||||
<identifiers>
|
||||
<header_from>example.org</header_from>
|
||||
</identifiers>
|
||||
<auth_results>
|
||||
<dkim>
|
||||
<domain>example.org</domain>
|
||||
<result>pass</result>
|
||||
<selector>example</selector>
|
||||
</dkim>
|
||||
<spf>
|
||||
<domain>example.org</domain>
|
||||
<result>pass</result>
|
||||
</spf>
|
||||
</auth_results>
|
||||
</record>
|
||||
</feedback>
|
||||
`
|
||||
|
||||
const tlsReport = `{
|
||||
"organization-name": "Company-X",
|
||||
"date-range": {
|
||||
"start-datetime": "2016-04-01T00:00:00Z",
|
||||
"end-datetime": "2016-04-01T23:59:59Z"
|
||||
},
|
||||
"contact-info": "sts-reporting@company-x.example",
|
||||
"report-id": "5065427c-23d3-47ca-b6e0-946ea0e8c4be",
|
||||
"policies": [{
|
||||
"policy": {
|
||||
"policy-type": "sts",
|
||||
"policy-string": ["version: STSv1","mode: testing",
|
||||
"mx: *.mail.company-y.example","max_age: 86400"],
|
||||
"policy-domain": "mox.example",
|
||||
"mx-host": ["*.mail.company-y.example"]
|
||||
},
|
||||
"summary": {
|
||||
"total-successful-session-count": 5326,
|
||||
"total-failure-session-count": 303
|
||||
},
|
||||
"failure-details": [{
|
||||
"result-type": "certificate-expired",
|
||||
"sending-mta-ip": "2001:db8:abcd:0012::1",
|
||||
"receiving-mx-hostname": "mx1.mail.company-y.example",
|
||||
"failed-session-count": 100
|
||||
}, {
|
||||
"result-type": "starttls-not-supported",
|
||||
"sending-mta-ip": "2001:db8:abcd:0013::1",
|
||||
"receiving-mx-hostname": "mx2.mail.company-y.example",
|
||||
"receiving-ip": "203.0.113.56",
|
||||
"failed-session-count": 200,
|
||||
"additional-information": "https://reports.company-x.example/report_info ? id = 5065427 c - 23 d3# StarttlsNotSupported "
|
||||
}, {
|
||||
"result-type": "validation-failure",
|
||||
"sending-mta-ip": "198.51.100.62",
|
||||
"receiving-ip": "203.0.113.58",
|
||||
"receiving-mx-hostname": "mx-backup.mail.company-y.example",
|
||||
"failed-session-count": 3,
|
||||
"failure-reason-code": "X509_V_ERR_PROXY_PATH_LENGTH_EXCEEDED"
|
||||
}]
|
||||
}]
|
||||
}`
|
||||
|
||||
err = os.WriteFile(filepath.Join(destDataDir, "moxversion"), []byte(moxvar.Version), 0660)
|
||||
xcheckf(err, "writing moxversion")
|
||||
|
||||
// Populate auth.db
|
||||
err = store.Init(ctxbg)
|
||||
xcheckf(err, "store init")
|
||||
err = store.TLSPublicKeyAdd(ctxbg, &store.TLSPublicKey{Name: "testkey", Fingerprint: "...", Type: "ecdsa-p256", CertDER: []byte("..."), Account: "test0", LoginAddress: "test0@mox.example"})
|
||||
xcheckf(err, "adding tlspubkey")
|
||||
|
||||
// Populate dmarc.db.
|
||||
err = dmarcdb.Init()
|
||||
xcheckf(err, "dmarcdb init")
|
||||
report, err := dmarcrpt.ParseReport(strings.NewReader(dmarcReport))
|
||||
xcheckf(err, "parsing dmarc aggregate report")
|
||||
err = dmarcdb.AddReport(ctxbg, report, dns.Domain{ASCII: "mox.example"})
|
||||
xcheckf(err, "adding dmarc aggregate report")
|
||||
|
||||
// Populate mtasts.db.
|
||||
err = mtastsdb.Init(false)
|
||||
xcheckf(err, "mtastsdb init")
|
||||
mtastsPolicy := mtasts.Policy{
|
||||
Version: "STSv1",
|
||||
Mode: mtasts.ModeTesting,
|
||||
MX: []mtasts.MX{
|
||||
{Domain: dns.Domain{ASCII: "mx1.example.com"}},
|
||||
{Domain: dns.Domain{ASCII: "mx2.example.com"}},
|
||||
{Domain: dns.Domain{ASCII: "backup-example.com"}, Wildcard: true},
|
||||
},
|
||||
MaxAgeSeconds: 1296000,
|
||||
}
|
||||
err = mtastsdb.Upsert(ctxbg, dns.Domain{ASCII: "mox.example"}, "123", &mtastsPolicy, mtastsPolicy.String())
|
||||
xcheckf(err, "adding mtastsdb report")
|
||||
|
||||
// Populate tlsrpt.db.
|
||||
err = tlsrptdb.Init()
|
||||
xcheckf(err, "tlsrptdb init")
|
||||
tlsreportJSON, err := tlsrpt.Parse(strings.NewReader(tlsReport))
|
||||
xcheckf(err, "parsing tls report")
|
||||
tlsr := tlsreportJSON.Convert()
|
||||
err = tlsrptdb.AddReport(ctxbg, c.log, dns.Domain{ASCII: "mox.example"}, "tlsrpt@mox.example", false, &tlsr)
|
||||
xcheckf(err, "adding tls report")
|
||||
|
||||
// Populate queue, with a message.
|
||||
err = queue.Init()
|
||||
xcheckf(err, "queue init")
|
||||
mailfrom := smtp.Path{Localpart: "other", IPDomain: dns.IPDomain{Domain: dns.Domain{ASCII: "other.example"}}}
|
||||
rcptto := smtp.Path{Localpart: "test0", IPDomain: dns.IPDomain{Domain: dns.Domain{ASCII: "mox.example"}}}
|
||||
prefix := []byte{}
|
||||
mf := tempfile()
|
||||
xcheckf(err, "temp file for queue message")
|
||||
defer store.CloseRemoveTempFile(c.log, mf, "test message")
|
||||
const qmsg = "From: <test0@mox.example>\r\nTo: <other@remote.example>\r\nSubject: test\r\n\r\nthe message...\r\n"
|
||||
_, err = fmt.Fprint(mf, qmsg)
|
||||
xcheckf(err, "writing message")
|
||||
qm := queue.MakeMsg(mailfrom, rcptto, false, false, int64(len(qmsg)), "<test@localhost>", prefix, nil, time.Now(), "test")
|
||||
err = queue.Add(ctxbg, c.log, "test0", mf, qm)
|
||||
xcheckf(err, "enqueue message")
|
||||
|
||||
// Create three accounts.
|
||||
// First account without messages.
|
||||
accTest0, err := store.OpenAccount(c.log, "test0", false)
|
||||
xcheckf(err, "open account test0")
|
||||
err = accTest0.ThreadingWait(c.log)
|
||||
xcheckf(err, "wait for threading to finish")
|
||||
err = accTest0.Close()
|
||||
xcheckf(err, "close account")
|
||||
|
||||
// Second account with one message.
|
||||
accTest1, err := store.OpenAccount(c.log, "test1", false)
|
||||
xcheckf(err, "open account test1")
|
||||
err = accTest1.ThreadingWait(c.log)
|
||||
xcheckf(err, "wait for threading to finish")
|
||||
err = accTest1.DB.Write(ctxbg, func(tx *bstore.Tx) error {
|
||||
inbox, err := bstore.QueryTx[store.Mailbox](tx).FilterNonzero(store.Mailbox{Name: "Inbox"}).Get()
|
||||
xcheckf(err, "looking up inbox")
|
||||
const msg = "From: <other@remote.example>\r\nTo: <test1@mox.example>\r\nSubject: test\r\n\r\nthe message...\r\n"
|
||||
m := store.Message{
|
||||
MailboxID: inbox.ID,
|
||||
MailboxOrigID: inbox.ID,
|
||||
RemoteIP: "1.2.3.4",
|
||||
RemoteIPMasked1: "1.2.3.4",
|
||||
RemoteIPMasked2: "1.2.3.0",
|
||||
RemoteIPMasked3: "1.2.0.0",
|
||||
EHLODomain: "other.example",
|
||||
MailFrom: "other@remote.example",
|
||||
MailFromLocalpart: smtp.Localpart("other"),
|
||||
MailFromDomain: "remote.example",
|
||||
RcptToLocalpart: "test1",
|
||||
RcptToDomain: "mox.example",
|
||||
MsgFromLocalpart: "other",
|
||||
MsgFromDomain: "remote.example",
|
||||
MsgFromOrgDomain: "remote.example",
|
||||
EHLOValidated: true,
|
||||
MailFromValidated: true,
|
||||
MsgFromValidated: true,
|
||||
EHLOValidation: store.ValidationStrict,
|
||||
MailFromValidation: store.ValidationPass,
|
||||
MsgFromValidation: store.ValidationStrict,
|
||||
DKIMDomains: []string{"other.example"},
|
||||
Size: int64(len(msg)),
|
||||
}
|
||||
mf := tempfile()
|
||||
xcheckf(err, "creating temp file for delivery")
|
||||
defer store.CloseRemoveTempFile(c.log, mf, "test message")
|
||||
_, err = fmt.Fprint(mf, msg)
|
||||
xcheckf(err, "writing deliver message to file")
|
||||
|
||||
err = accTest1.MessageAdd(c.log, tx, &inbox, &m, mf, store.AddOpts{})
|
||||
xcheckf(err, "deliver message")
|
||||
|
||||
err = tx.Update(&inbox)
|
||||
xcheckf(err, "update inbox")
|
||||
|
||||
return nil
|
||||
})
|
||||
xcheckf(err, "write transaction with new message")
|
||||
err = accTest1.Close()
|
||||
xcheckf(err, "close account")
|
||||
|
||||
// Third account with two messages and junkfilter.
|
||||
accTest2, err := store.OpenAccount(c.log, "test2", false)
|
||||
xcheckf(err, "open account test2")
|
||||
err = accTest2.ThreadingWait(c.log)
|
||||
xcheckf(err, "wait for threading to finish")
|
||||
err = accTest2.DB.Write(ctxbg, func(tx *bstore.Tx) error {
|
||||
inbox, err := bstore.QueryTx[store.Mailbox](tx).FilterNonzero(store.Mailbox{Name: "Inbox"}).Get()
|
||||
xcheckf(err, "looking up inbox")
|
||||
const msg0 = "From: <other@remote.example>\r\nTo: <☹@xn--74h.example>\r\nSubject: test\r\n\r\nthe message...\r\n"
|
||||
m0 := store.Message{
|
||||
MailboxID: inbox.ID,
|
||||
MailboxOrigID: inbox.ID,
|
||||
RemoteIP: "::1",
|
||||
RemoteIPMasked1: "::",
|
||||
RemoteIPMasked2: "::",
|
||||
RemoteIPMasked3: "::",
|
||||
EHLODomain: "other.example",
|
||||
MailFrom: "other@remote.example",
|
||||
MailFromLocalpart: smtp.Localpart("other"),
|
||||
MailFromDomain: "remote.example",
|
||||
RcptToLocalpart: "☹",
|
||||
RcptToDomain: "☺.example",
|
||||
MsgFromLocalpart: "other",
|
||||
MsgFromDomain: "remote.example",
|
||||
MsgFromOrgDomain: "remote.example",
|
||||
EHLOValidated: true,
|
||||
MailFromValidated: true,
|
||||
MsgFromValidated: true,
|
||||
EHLOValidation: store.ValidationStrict,
|
||||
MailFromValidation: store.ValidationPass,
|
||||
MsgFromValidation: store.ValidationStrict,
|
||||
DKIMDomains: []string{"other.example"},
|
||||
Size: int64(len(msg0)),
|
||||
}
|
||||
mf0 := tempfile()
|
||||
xcheckf(err, "creating temp file for delivery")
|
||||
defer store.CloseRemoveTempFile(c.log, mf0, "test message")
|
||||
_, err = fmt.Fprint(mf0, msg0)
|
||||
xcheckf(err, "writing deliver message to file")
|
||||
err = accTest2.MessageAdd(c.log, tx, &inbox, &m0, mf0, store.AddOpts{})
|
||||
xcheckf(err, "add message to account test2")
|
||||
err = tx.Update(&inbox)
|
||||
xcheckf(err, "update inbox")
|
||||
|
||||
sent, err := bstore.QueryTx[store.Mailbox](tx).FilterNonzero(store.Mailbox{Name: "Sent"}).Get()
|
||||
xcheckf(err, "looking up inbox")
|
||||
const prefix1 = "Extra: test\r\n"
|
||||
const msg1 = "From: <other@remote.example>\r\nTo: <☹@xn--74h.example>\r\nSubject: test\r\n\r\nthe message...\r\n"
|
||||
m1 := store.Message{
|
||||
MailboxID: sent.ID,
|
||||
MailboxOrigID: sent.ID,
|
||||
Flags: store.Flags{Seen: true, Junk: true},
|
||||
Size: int64(len(prefix1) + len(msg1)),
|
||||
MsgPrefix: []byte(prefix1),
|
||||
}
|
||||
mf1 := tempfile()
|
||||
xcheckf(err, "creating temp file for delivery")
|
||||
defer store.CloseRemoveTempFile(c.log, mf1, "test message")
|
||||
_, err = fmt.Fprint(mf1, msg1)
|
||||
xcheckf(err, "writing deliver message to file")
|
||||
err = accTest2.MessageAdd(c.log, tx, &sent, &m1, mf1, store.AddOpts{})
|
||||
xcheckf(err, "add message to account test2")
|
||||
err = tx.Update(&sent)
|
||||
xcheckf(err, "update sent")
|
||||
|
||||
return nil
|
||||
})
|
||||
xcheckf(err, "write transaction with new message")
|
||||
err = accTest2.Close()
|
||||
xcheckf(err, "close account")
|
||||
}
|
11
gents.sh
Executable file
11
gents.sh
Executable file
@ -0,0 +1,11 @@
|
||||
#!/bin/sh
|
||||
set -eu
|
||||
|
||||
# generate new typescript client, only install it when it is different, so we
|
||||
# don't trigger frontend builds needlessly.
|
||||
go run vendor/github.com/mjl-/sherpats/cmd/sherpats/main.go -bytes-to-string -slices-nullable -maps-nullable -nullable-optional -namespace api api <$1 >$2.tmp
|
||||
if cmp -s $2 $2.tmp; then
|
||||
rm $2.tmp
|
||||
else
|
||||
mv $2.tmp $2
|
||||
fi
|
117
genwebsite.sh
Executable file
117
genwebsite.sh
Executable file
@ -0,0 +1,117 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
mkdir website/html 2>/dev/null
|
||||
rm -r website/html/* 2>/dev/null
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
commithash=$(git rev-parse --short HEAD)
|
||||
commitdate=$(git log -1 --date=format:"%Y-%m-%d" --format="%ad")
|
||||
export commithash
|
||||
export commitdate
|
||||
|
||||
# Link to static files and cross-references.
|
||||
ln -sf ../../../mox-website-files/files website/html/files
|
||||
ln -sf ../../rfc/xr website/html/xr
|
||||
|
||||
|
||||
# All commands below are executed relative to ./website/
|
||||
cd website
|
||||
|
||||
go run website.go -root -title 'Mox: modern, secure, all-in-one mail server' 'Mox' < index.md >html/index.html
|
||||
|
||||
mkdir html/features
|
||||
(
|
||||
cat features/index.md
|
||||
echo
|
||||
sed -n -e 's/^# Roadmap/## Roadmap/' -e '/# FAQ/q' -e '/# Roadmap/,/# FAQ/p' < ../README.md
|
||||
echo
|
||||
echo 'Also see the [Protocols](../protocols/) page for implementation status, and (non)-plans.'
|
||||
) | go run website.go 'Features' >html/features/index.html
|
||||
|
||||
mkdir html/screenshots
|
||||
go run website.go 'Screenshots' < screenshots/index.md >html/screenshots/index.html
|
||||
|
||||
mkdir html/install
|
||||
go run website.go 'Install' < install/index.md >html/install/index.html
|
||||
|
||||
mkdir html/faq
|
||||
sed -n '/# FAQ/,//p' < ../README.md | go run website.go 'FAQ' >html/faq/index.html
|
||||
|
||||
mkdir html/config
|
||||
(
|
||||
echo '# Config reference'
|
||||
echo
|
||||
sed -n '/^Package config holds /,/\*\//p' < ../config/doc.go | grep -v -E '^(Package config holds |\*/)' | sed 's/^# /## /'
|
||||
) | go run website.go 'Config reference' >html/config/index.html
|
||||
|
||||
mkdir html/commands
|
||||
(
|
||||
echo '# Command reference'
|
||||
echo
|
||||
sed -n '/^Mox is started /,/\*\//p' < ../doc.go | grep -v '\*/' | sed 's/^# /## /'
|
||||
) | go run website.go 'Command reference' >html/commands/index.html
|
||||
|
||||
mkdir html/protocols
|
||||
go run website.go -protocols 'Protocols' <../rfc/index.txt >html/protocols/index.html
|
||||
|
||||
mkdir html/b
|
||||
cat <<'EOF' >html/b/index.html
|
||||
<!doctype html>
|
||||
<html>
|
||||
<head>
|
||||
<meta charset="utf-8" />
|
||||
<title>mox build</title>
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1" />
|
||||
<link rel="icon" href="noNeedlessFaviconRequestsPlease:" />
|
||||
<style>
|
||||
body { padding: 1em; }
|
||||
* { font-size: 18px; font-family: ubuntu, lato, sans-serif; margin: 0; padding: 0; box-sizing: border-box; }
|
||||
p { max-width: 50em; margin-bottom: 2ex; }
|
||||
pre { font-family: 'ubuntu mono', monospace; }
|
||||
pre, blockquote { padding: 1em; background-color: #eee; border-radius: .25em; display: inline-block; margin-bottom: 1em; }
|
||||
h1 { margin: 1em 0 .5em 0; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<script>
|
||||
const elem = (name, ...s) => {
|
||||
const e = document.createElement(name)
|
||||
e.append(...s)
|
||||
return e
|
||||
}
|
||||
const link = (url, anchor) => {
|
||||
const e = document.createElement('a')
|
||||
e.setAttribute('href', url)
|
||||
e.setAttribute('rel', 'noopener')
|
||||
e.append(anchor || url)
|
||||
return e
|
||||
}
|
||||
let h = location.hash.substring(1)
|
||||
const ok = /^[a-zA-Z0-9_\.]+$/.test(h)
|
||||
if (!ok) {
|
||||
h = '<tag-or-branch-or-commithash>'
|
||||
}
|
||||
const init = () => {
|
||||
document.body.append(
|
||||
elem('p', 'Compile or download any version of mox, by tag (release), branch or commit hash.'),
|
||||
elem('h1', 'Compile'),
|
||||
elem('p', 'Run:'),
|
||||
elem('pre', 'CGO_ENABLED=0 GOBIN=$PWD go install github.com/mjl-/mox@'+h),
|
||||
elem('p', 'Mox is tested with the Go toolchain versions that are still have support: The most recent version, and the version before.'),
|
||||
elem('h1', 'Download'),
|
||||
elem('p', 'Download a binary for your platform:'),
|
||||
elem('blockquote', ok ?
|
||||
link('https://beta.gobuilds.org/github.com/mjl-/mox@'+h) :
|
||||
'https://beta.gobuilds.org/github.com/mjl-/mox@'+h
|
||||
),
|
||||
elem('p', 'Because mox is written in Go, builds are reproducible, also when cross-compiling. Gobuilds.org is a service that builds Go applications on-demand with the latest Go toolchain/runtime.'),
|
||||
elem('h1', 'Localserve'),
|
||||
elem('p', 'Changes to mox can often be most easily tested locally with ', link('../features/#hdr-localserve', '"mox localserve"'), ', without having to update your running mail server.'),
|
||||
)
|
||||
}
|
||||
window.addEventListener('load', init)
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
EOF
|
48
go.mod
48
go.mod
@ -1,31 +1,37 @@
|
||||
module github.com/mjl-/mox
|
||||
|
||||
go 1.18
|
||||
go 1.23.0
|
||||
|
||||
require (
|
||||
github.com/mjl-/bstore v0.0.0-20230211204415-a9899ef6e782
|
||||
github.com/mjl-/sconf v0.0.4
|
||||
github.com/mjl-/sherpa v0.6.5
|
||||
github.com/mjl-/sherpadoc v0.0.10
|
||||
github.com/mjl-/adns v0.0.0-20250321173553-ab04b05bdfea
|
||||
github.com/mjl-/autocert v0.0.0-20250321204043-abab2b936e31
|
||||
github.com/mjl-/bstore v0.0.9
|
||||
github.com/mjl-/flate v0.0.0-20250221133712-6372d09eb978
|
||||
github.com/mjl-/sconf v0.0.7
|
||||
github.com/mjl-/sherpa v0.6.7
|
||||
github.com/mjl-/sherpadoc v0.0.16
|
||||
github.com/mjl-/sherpaprom v0.0.2
|
||||
github.com/prometheus/client_golang v1.14.0
|
||||
go.etcd.io/bbolt v1.3.7
|
||||
golang.org/x/crypto v0.7.0
|
||||
golang.org/x/net v0.8.0
|
||||
golang.org/x/text v0.8.0
|
||||
github.com/mjl-/sherpats v0.0.6
|
||||
github.com/prometheus/client_golang v1.18.0
|
||||
github.com/russross/blackfriday/v2 v2.1.0
|
||||
go.etcd.io/bbolt v1.3.11
|
||||
golang.org/x/crypto v0.37.0
|
||||
golang.org/x/net v0.39.0
|
||||
golang.org/x/sys v0.32.0
|
||||
golang.org/x/text v0.24.0
|
||||
rsc.io/qr v0.2.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
|
||||
github.com/mjl-/xfmt v0.0.0-20190521151243-39d9c00752ce // indirect
|
||||
github.com/prometheus/client_model v0.3.0 // indirect
|
||||
github.com/prometheus/common v0.37.0 // indirect
|
||||
github.com/prometheus/procfs v0.8.0 // indirect
|
||||
golang.org/x/mod v0.8.0 // indirect
|
||||
golang.org/x/sys v0.6.0 // indirect
|
||||
golang.org/x/tools v0.6.0 // indirect
|
||||
google.golang.org/protobuf v1.28.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect
|
||||
github.com/mjl-/xfmt v0.0.2 // indirect
|
||||
github.com/prometheus/client_model v0.5.0 // indirect
|
||||
github.com/prometheus/common v0.45.0 // indirect
|
||||
github.com/prometheus/procfs v0.12.0 // indirect
|
||||
golang.org/x/mod v0.24.0 // indirect
|
||||
golang.org/x/sync v0.13.0 // indirect
|
||||
golang.org/x/tools v0.32.0 // indirect
|
||||
google.golang.org/protobuf v1.31.0 // indirect
|
||||
)
|
||||
|
505
go.sum
505
go.sum
@ -1,510 +1,117 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
||||
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
|
||||
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
|
||||
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
|
||||
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
|
||||
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
|
||||
cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
|
||||
cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
|
||||
cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
|
||||
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
|
||||
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
|
||||
cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
|
||||
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
||||
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
|
||||
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
|
||||
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
|
||||
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
|
||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
|
||||
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
||||
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
|
||||
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
|
||||
cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
|
||||
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
|
||||
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
|
||||
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
|
||||
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
|
||||
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
|
||||
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
|
||||
github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
|
||||
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
|
||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/mjl-/bstore v0.0.0-20230211204415-a9899ef6e782 h1:dVwJA/wXzXXUROM9oM3Stg3cmqixiFh4Zi1Xumvtj74=
|
||||
github.com/mjl-/bstore v0.0.0-20230211204415-a9899ef6e782/go.mod h1:/cD25FNBaDfvL/plFRxI3Ba3E+wcB0XVOS8nJDqndg0=
|
||||
github.com/mjl-/sconf v0.0.4 h1:uyfn4vv5qOULSgiwQsPbbgkiONKnMFMsSOhsHfAiYwI=
|
||||
github.com/mjl-/sconf v0.0.4/go.mod h1:ezf7YOn7gtClo8y71SqgZKaEkyMQ5Te7vkv4PmTTfwM=
|
||||
github.com/mjl-/sherpa v0.6.5 h1:d90uG/j8fw+2M+ohCTAcVwTSUURGm8ktYDScJO1nKog=
|
||||
github.com/mjl-/sherpa v0.6.5/go.mod h1:dSpAOdgpwdqQZ72O4n3EHo/tR68eKyan8tYYraUMPNc=
|
||||
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg=
|
||||
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k=
|
||||
github.com/mjl-/adns v0.0.0-20250321173553-ab04b05bdfea h1:8dftsVL1tHhRksXzFZRhSJ7gSlcy/t87Nvucs3JnTGE=
|
||||
github.com/mjl-/adns v0.0.0-20250321173553-ab04b05bdfea/go.mod h1:rWZMqGA2HoBm5b5q/A5J8u1sSVuEYh6zBz9tMoVs+RU=
|
||||
github.com/mjl-/autocert v0.0.0-20250321204043-abab2b936e31 h1:6MFGOLPGf6VzHWkKv8waSzJMMS98EFY2LVKPRHffCyo=
|
||||
github.com/mjl-/autocert v0.0.0-20250321204043-abab2b936e31/go.mod h1:taMFU86abMxKLPV4Bynhv8enbYmS67b8LG80qZv2Qus=
|
||||
github.com/mjl-/bstore v0.0.9 h1:j8HVXL10Arbk4ujeRGwns8gipH1N1TZn853inQ42FgY=
|
||||
github.com/mjl-/bstore v0.0.9/go.mod h1:xzIpSfcFosgPJ6h+vsdIt0pzCq4i8hhMuHPQJ0aHQhM=
|
||||
github.com/mjl-/flate v0.0.0-20250221133712-6372d09eb978 h1:Eg5DfI3/00URzGErujKus6a3O0kyXzF8vjoDZzH/gig=
|
||||
github.com/mjl-/flate v0.0.0-20250221133712-6372d09eb978/go.mod h1:QBkFtjai3AiQQuUu7pVh6PA06Vd3oa68E+vddf/UBOs=
|
||||
github.com/mjl-/sconf v0.0.7 h1:bdBcSFZCDFMm/UdBsgNCsjkYmKrSgYwp7rAOoufwHe4=
|
||||
github.com/mjl-/sconf v0.0.7/go.mod h1:uF8OdWtLT8La3i4ln176i1pB0ps9pXGCaABEU55ZkE0=
|
||||
github.com/mjl-/sherpa v0.6.7 h1:C5F8XQdV5nCuS4fvB+ye/ziUQrajEhOoj/t2w5T14BY=
|
||||
github.com/mjl-/sherpa v0.6.7/go.mod h1:dSpAOdgpwdqQZ72O4n3EHo/tR68eKyan8tYYraUMPNc=
|
||||
github.com/mjl-/sherpadoc v0.0.0-20190505200843-c0a7f43f5f1d/go.mod h1:5khTKxoKKNXcB8bkVUO6GlzC7PFtMmkHq578lPbmnok=
|
||||
github.com/mjl-/sherpadoc v0.0.10 h1:tvRVd37IIGg70ZmNkNKNnjDSPtKI5/DdEIukMkWtZYE=
|
||||
github.com/mjl-/sherpadoc v0.0.10/go.mod h1:vh5zcsk3j/Tvm725EY+unTZb3EZcZcpiEQzrODSa6+I=
|
||||
github.com/mjl-/sherpadoc v0.0.16 h1:BdlFNXfnTaA7qO54kof4xpNFJxYBTY0cIObRk7QAP6M=
|
||||
github.com/mjl-/sherpadoc v0.0.16/go.mod h1:vh5zcsk3j/Tvm725EY+unTZb3EZcZcpiEQzrODSa6+I=
|
||||
github.com/mjl-/sherpaprom v0.0.2 h1:1dlbkScsNafM5jURI44uiWrZMSwfZtcOFEEq7vx2C1Y=
|
||||
github.com/mjl-/sherpaprom v0.0.2/go.mod h1:cl5nMNOvqhzMiQJ2FzccQ9ReivjHXe53JhOVkPfSvw4=
|
||||
github.com/mjl-/xfmt v0.0.0-20190521151243-39d9c00752ce h1:oyFmIHo3GLWZzb0odAzN9QUy0MTW6P8JaNRnNVGCBCk=
|
||||
github.com/mjl-/xfmt v0.0.0-20190521151243-39d9c00752ce/go.mod h1:DIEOLmETMQHHr4OgwPG7iC37rDiN9MaZIZxNm5hBtL8=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/mjl-/sherpats v0.0.6 h1:2lSoJbb+jkjLOdlvoMxItq0QQrrnkH+rnm3PMRfpbmA=
|
||||
github.com/mjl-/sherpats v0.0.6/go.mod h1:MoNZJtLmu8oCZ4Ocv5vZksENN4pp6/SJMlg9uTII4KA=
|
||||
github.com/mjl-/xfmt v0.0.2 h1:6dLgd6U3bmDJKtTxsaSYYyMaORoO4hKBAJo4XKkPRko=
|
||||
github.com/mjl-/xfmt v0.0.2/go.mod h1:DIEOLmETMQHHr4OgwPG7iC37rDiN9MaZIZxNm5hBtL8=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
|
||||
github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
|
||||
github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
|
||||
github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk=
|
||||
github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
|
||||
github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
|
||||
github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
|
||||
github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
|
||||
github.com/prometheus/common v0.3.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE=
|
||||
github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
|
||||
github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM=
|
||||
github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190503130316-740c07785007/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
|
||||
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
|
||||
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
|
||||
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ=
|
||||
go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0=
|
||||
go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A=
|
||||
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
|
||||
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
|
||||
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
|
||||
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
||||
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE=
|
||||
golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc=
|
||||
golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
|
||||
golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU=
|
||||
golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ=
|
||||
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY=
|
||||
golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610=
|
||||
golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20=
|
||||
golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68=
|
||||
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0=
|
||||
golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
|
||||
golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
|
||||
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
|
||||
golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.32.0 h1:Q7N1vhpkQv7ybVzLFtTjvQya2ewbwNDZzUgfXGqtMWU=
|
||||
golang.org/x/tools v0.32.0/go.mod h1:ZxrU41P/wAbZD8EDa6dDCa6XfpkhJ7HFMjHJXfBDu8s=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
|
||||
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
|
||||
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
||||
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
|
||||
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
|
||||
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
|
||||
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
|
||||
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
|
||||
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
|
||||
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
|
||||
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
rsc.io/qr v0.2.0 h1:6vBLea5/NRMVTz8V66gipeLycZMl/+UlFmk8DvqQ6WY=
|
||||
rsc.io/qr v0.2.0/go.mod h1:IF+uZjkb9fqyeF/4tlBoynqmQxUoPfWEKh921coOuXs=
|
||||
|
356
http/account.go
356
http/account.go
@ -1,356 +0,0 @@
|
||||
package http
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"archive/zip"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
_ "embed"
|
||||
|
||||
"github.com/mjl-/sherpa"
|
||||
"github.com/mjl-/sherpaprom"
|
||||
|
||||
"github.com/mjl-/mox/config"
|
||||
"github.com/mjl-/mox/dns"
|
||||
"github.com/mjl-/mox/metrics"
|
||||
"github.com/mjl-/mox/mlog"
|
||||
"github.com/mjl-/mox/mox-"
|
||||
"github.com/mjl-/mox/moxvar"
|
||||
"github.com/mjl-/mox/store"
|
||||
)
|
||||
|
||||
//go:embed accountapi.json
|
||||
var accountapiJSON []byte
|
||||
|
||||
//go:embed account.html
|
||||
var accountHTML []byte
|
||||
|
||||
var accountDoc = mustParseAPI(accountapiJSON)
|
||||
|
||||
var accountSherpaHandler http.Handler
|
||||
|
||||
func init() {
|
||||
collector, err := sherpaprom.NewCollector("moxaccount", nil)
|
||||
if err != nil {
|
||||
xlog.Fatalx("creating sherpa prometheus collector", err)
|
||||
}
|
||||
|
||||
accountSherpaHandler, err = sherpa.NewHandler("/api/", moxvar.Version, Account{}, &accountDoc, &sherpa.HandlerOpts{Collector: collector, AdjustFunctionNames: "none"})
|
||||
if err != nil {
|
||||
xlog.Fatalx("sherpa handler", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Account exports web API functions for the account web interface. All its
|
||||
// methods are exported under /api/. Function calls require valid HTTP
|
||||
// Authentication credentials of a user.
|
||||
type Account struct{}
|
||||
|
||||
// check http basic auth, returns account name if valid, and writes http response
|
||||
// and returns empty string otherwise.
|
||||
func checkAccountAuth(ctx context.Context, log *mlog.Log, w http.ResponseWriter, r *http.Request) string {
|
||||
authResult := "error"
|
||||
start := time.Now()
|
||||
var addr *net.TCPAddr
|
||||
defer func() {
|
||||
metrics.AuthenticationInc("httpaccount", "httpbasic", authResult)
|
||||
if authResult == "ok" && addr != nil {
|
||||
mox.LimiterFailedAuth.Reset(addr.IP, start)
|
||||
}
|
||||
}()
|
||||
|
||||
var err error
|
||||
addr, err = net.ResolveTCPAddr("tcp", r.RemoteAddr)
|
||||
if err != nil {
|
||||
log.Errorx("parsing remote address", err, mlog.Field("addr", r.RemoteAddr))
|
||||
}
|
||||
if addr != nil && !mox.LimiterFailedAuth.Add(addr.IP, start, 1) {
|
||||
metrics.AuthenticationRatelimitedInc("httpaccount")
|
||||
http.Error(w, "429 - too many auth attempts", http.StatusTooManyRequests)
|
||||
return ""
|
||||
}
|
||||
|
||||
// store.OpenEmailAuth has an auth cache, so we don't bcrypt for every auth attempt.
|
||||
if auth := r.Header.Get("Authorization"); auth == "" || !strings.HasPrefix(auth, "Basic ") {
|
||||
} else if authBuf, err := base64.StdEncoding.DecodeString(strings.TrimPrefix(auth, "Basic ")); err != nil {
|
||||
log.Debugx("parsing base64", err)
|
||||
} else if t := strings.SplitN(string(authBuf), ":", 2); len(t) != 2 {
|
||||
log.Debug("bad user:pass form")
|
||||
} else if acc, err := store.OpenEmailAuth(t[0], t[1]); err != nil {
|
||||
if errors.Is(err, store.ErrUnknownCredentials) {
|
||||
authResult = "badcreds"
|
||||
}
|
||||
log.Errorx("open account", err)
|
||||
} else {
|
||||
authResult = "ok"
|
||||
accName := acc.Name
|
||||
err := acc.Close()
|
||||
log.Check(err, "closing account")
|
||||
return accName
|
||||
}
|
||||
// note: browsers don't display the realm to prevent users getting confused by malicious realm messages.
|
||||
w.Header().Set("WWW-Authenticate", `Basic realm="mox account - login with email address and password"`)
|
||||
http.Error(w, "http 401 - unauthorized - mox account - login with email address and password", http.StatusUnauthorized)
|
||||
return ""
|
||||
}
|
||||
|
||||
func accountHandle(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := context.WithValue(r.Context(), mlog.CidKey, mox.Cid())
|
||||
log := xlog.WithContext(ctx).Fields(mlog.Field("userauth", ""))
|
||||
|
||||
// Without authentication. The token is unguessable.
|
||||
if r.URL.Path == "/importprogress" {
|
||||
if r.Method != "GET" {
|
||||
http.Error(w, "405 - method not allowed - get required", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
q := r.URL.Query()
|
||||
token := q.Get("token")
|
||||
if token == "" {
|
||||
http.Error(w, "400 - bad request - missing token", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
flusher, ok := w.(http.Flusher)
|
||||
if !ok {
|
||||
log.Error("internal error: ResponseWriter not a http.Flusher")
|
||||
http.Error(w, "500 - internal error - cannot sync to http connection", 500)
|
||||
return
|
||||
}
|
||||
|
||||
l := importListener{token, make(chan importEvent, 100), make(chan bool, 1)}
|
||||
importers.Register <- &l
|
||||
ok = <-l.Register
|
||||
if !ok {
|
||||
http.Error(w, "400 - bad request - unknown token, import may have finished more than a minute ago", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
importers.Unregister <- &l
|
||||
}()
|
||||
|
||||
h := w.Header()
|
||||
h.Set("Content-Type", "text/event-stream")
|
||||
h.Set("Cache-Control", "no-cache")
|
||||
_, err := w.Write([]byte(": keepalive\n\n"))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
flusher.Flush()
|
||||
|
||||
cctx := r.Context()
|
||||
for {
|
||||
select {
|
||||
case e := <-l.Events:
|
||||
_, err := w.Write(e.SSEMsg)
|
||||
flusher.Flush()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
case <-cctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
accName := checkAccountAuth(ctx, log, w, r)
|
||||
if accName == "" {
|
||||
// Response already sent.
|
||||
return
|
||||
}
|
||||
|
||||
switch r.URL.Path {
|
||||
case "/":
|
||||
if r.Method != "GET" {
|
||||
http.Error(w, "405 - method not allowed - post required", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Type", "text/html; charset=utf-8")
|
||||
w.Header().Set("Cache-Control", "no-cache; max-age=0")
|
||||
// We typically return the embedded admin.html, but during development it's handy
|
||||
// to load from disk.
|
||||
f, err := os.Open("http/account.html")
|
||||
if err == nil {
|
||||
defer f.Close()
|
||||
_, _ = io.Copy(w, f)
|
||||
} else {
|
||||
_, _ = w.Write(accountHTML)
|
||||
}
|
||||
|
||||
case "/mail-export-maildir.tgz", "/mail-export-maildir.zip", "/mail-export-mbox.tgz", "/mail-export-mbox.zip":
|
||||
maildir := strings.Contains(r.URL.Path, "maildir")
|
||||
tgz := strings.Contains(r.URL.Path, ".tgz")
|
||||
|
||||
acc, err := store.OpenAccount(accName)
|
||||
if err != nil {
|
||||
log.Errorx("open account for export", err)
|
||||
http.Error(w, "500 - internal server error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
err := acc.Close()
|
||||
log.Check(err, "closing account")
|
||||
}()
|
||||
|
||||
var archiver store.Archiver
|
||||
if tgz {
|
||||
// Don't tempt browsers to "helpfully" decompress.
|
||||
w.Header().Set("Content-Type", "application/octet-stream")
|
||||
|
||||
gzw := gzip.NewWriter(w)
|
||||
defer func() {
|
||||
_ = gzw.Close()
|
||||
}()
|
||||
archiver = store.TarArchiver{Writer: tar.NewWriter(gzw)}
|
||||
} else {
|
||||
w.Header().Set("Content-Type", "application/zip")
|
||||
archiver = store.ZipArchiver{Writer: zip.NewWriter(w)}
|
||||
}
|
||||
defer func() {
|
||||
err := archiver.Close()
|
||||
log.Check(err, "exporting mail close")
|
||||
}()
|
||||
if err := store.ExportMessages(log, acc.DB, acc.Dir, archiver, maildir, ""); err != nil {
|
||||
log.Errorx("exporting mail", err)
|
||||
}
|
||||
|
||||
case "/import":
|
||||
if r.Method != "POST" {
|
||||
http.Error(w, "405 - method not allowed - post required", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
f, _, err := r.FormFile("file")
|
||||
if err != nil {
|
||||
if errors.Is(err, http.ErrMissingFile) {
|
||||
http.Error(w, "400 - bad request - missing file", http.StatusBadRequest)
|
||||
} else {
|
||||
http.Error(w, "500 - internal server error - "+err.Error(), http.StatusInternalServerError)
|
||||
}
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
err := f.Close()
|
||||
log.Check(err, "closing form file")
|
||||
}()
|
||||
skipMailboxPrefix := r.FormValue("skipMailboxPrefix")
|
||||
tmpf, err := os.CreateTemp("", "mox-import")
|
||||
if err != nil {
|
||||
http.Error(w, "500 - internal server error - "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
if tmpf != nil {
|
||||
err := tmpf.Close()
|
||||
log.Check(err, "closing uploaded file")
|
||||
}
|
||||
}()
|
||||
if err := os.Remove(tmpf.Name()); err != nil {
|
||||
log.Errorx("removing temporary file", err)
|
||||
http.Error(w, "500 - internal server error - "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if _, err := io.Copy(tmpf, f); err != nil {
|
||||
log.Errorx("copying import to temporary file", err)
|
||||
http.Error(w, "500 - internal server error - "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
token, err := importStart(log, accName, tmpf, skipMailboxPrefix)
|
||||
if err != nil {
|
||||
log.Errorx("starting import", err)
|
||||
http.Error(w, "500 - internal server error - "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
tmpf = nil // importStart is now responsible for closing.
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
_ = json.NewEncoder(w).Encode(map[string]string{"ImportToken": token})
|
||||
|
||||
default:
|
||||
if strings.HasPrefix(r.URL.Path, "/api/") {
|
||||
accountSherpaHandler.ServeHTTP(w, r.WithContext(context.WithValue(ctx, authCtxKey, accName)))
|
||||
return
|
||||
}
|
||||
http.NotFound(w, r)
|
||||
}
|
||||
}
|
||||
|
||||
type ctxKey string
|
||||
|
||||
var authCtxKey ctxKey = "account"
|
||||
|
||||
// SetPassword saves a new password for the account, invalidating the previous password.
|
||||
// Sessions are not interrupted, and will keep working. New login attempts must use the new password.
|
||||
// Password must be at least 8 characters.
|
||||
func (Account) SetPassword(ctx context.Context, password string) {
|
||||
if len(password) < 8 {
|
||||
panic(&sherpa.Error{Code: "user:error", Message: "password must be at least 8 characters"})
|
||||
}
|
||||
accountName := ctx.Value(authCtxKey).(string)
|
||||
acc, err := store.OpenAccount(accountName)
|
||||
xcheckf(ctx, err, "open account")
|
||||
defer func() {
|
||||
err := acc.Close()
|
||||
xlog.Check(err, "closing account")
|
||||
}()
|
||||
err = acc.SetPassword(password)
|
||||
xcheckf(ctx, err, "setting password")
|
||||
}
|
||||
|
||||
// Destinations returns the default domain, and the destinations (keys are email
|
||||
// addresses, or localparts to the default domain).
|
||||
// todo: replace with a function that returns the whole account, when sherpadoc understands unnamed struct fields.
|
||||
func (Account) Destinations(ctx context.Context) (dns.Domain, map[string]config.Destination) {
|
||||
accountName := ctx.Value(authCtxKey).(string)
|
||||
accConf, ok := mox.Conf.Account(accountName)
|
||||
if !ok {
|
||||
xcheckf(ctx, errors.New("not found"), "looking up account")
|
||||
}
|
||||
return accConf.DNSDomain, accConf.Destinations
|
||||
}
|
||||
|
||||
// DestinationSave updates a destination.
|
||||
// OldDest is compared against the current destination. If it does not match, an
|
||||
// error is returned. Otherwise newDest is saved and the configuration reloaded.
|
||||
func (Account) DestinationSave(ctx context.Context, destName string, oldDest, newDest config.Destination) {
|
||||
accountName := ctx.Value(authCtxKey).(string)
|
||||
accConf, ok := mox.Conf.Account(accountName)
|
||||
if !ok {
|
||||
xcheckf(ctx, errors.New("not found"), "looking up account")
|
||||
}
|
||||
curDest, ok := accConf.Destinations[destName]
|
||||
if !ok {
|
||||
xcheckf(ctx, errors.New("not found"), "looking up destination")
|
||||
}
|
||||
|
||||
if !curDest.Equal(oldDest) {
|
||||
xcheckf(ctx, errors.New("modified"), "checking stored destination")
|
||||
}
|
||||
|
||||
// Keep fields we manage.
|
||||
newDest.DMARCReports = curDest.DMARCReports
|
||||
newDest.TLSReports = curDest.TLSReports
|
||||
|
||||
err := mox.DestinationSave(ctx, accountName, destName, newDest)
|
||||
xcheckf(ctx, err, "saving destination")
|
||||
}
|
||||
|
||||
// ImportAbort aborts an import that is in progress. If the import exists and isn't
|
||||
// finished, no changes will have been made by the import.
|
||||
func (Account) ImportAbort(ctx context.Context, importToken string) error {
|
||||
req := importAbortRequest{importToken, make(chan error)}
|
||||
importers.Abort <- req
|
||||
return <-req.Response
|
||||
}
|
@ -1,672 +0,0 @@
|
||||
<!doctype html>
|
||||
<html>
|
||||
<head>
|
||||
<title>Mox Account</title>
|
||||
<meta charset="utf-8" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1" />
|
||||
<style>
|
||||
body, html { padding: 1em; font-size: 16px; }
|
||||
* { font-size: inherit; font-family: ubuntu, lato, sans-serif; margin: 0; padding: 0; box-sizing: border-box; }
|
||||
h1, h2, h3, h4 { margin-bottom: 1ex; }
|
||||
h1 { font-size: 1.2rem; }
|
||||
h2 { font-size: 1.1rem; }
|
||||
h3, h4 { font-size: 1rem; }
|
||||
ul { padding-left: 1rem; }
|
||||
.literal { background-color: #fdfdfd; padding: .5em 1em; border: 1px solid #eee; border-radius: 4px; white-space: pre-wrap; font-family: monospace; font-size: 15px; tab-size: 4; }
|
||||
table td, table th { padding: .2em .5em; }
|
||||
table > tbody > tr:nth-child(odd) { background-color: #f8f8f8; }
|
||||
.text { max-width: 50em; }
|
||||
p { margin-bottom: 1em; max-width: 50em; }
|
||||
[title] { text-decoration: underline; text-decoration-style: dotted; }
|
||||
fieldset { border: 0; }
|
||||
#page { opacity: 1; animation: fadein 0.15s ease-in; }
|
||||
#page.loading { opacity: 0.1; animation: fadeout 1s ease-out; }
|
||||
@keyframes fadein { 0% { opacity: 0 } 100% { opacity: 1 } }
|
||||
@keyframes fadeout { 0% { opacity: 1 } 100% { opacity: 0.1 } }
|
||||
</style>
|
||||
<script src="api/sherpa.js"></script>
|
||||
<script>api._sherpa.baseurl = 'api/'</script>
|
||||
</head>
|
||||
<body>
|
||||
<div id="page">Loading...</div>
|
||||
|
||||
<script>
|
||||
const [dom, style, attr, prop] = (function() {
|
||||
function _domKids(e, ...kl) {
|
||||
kl.forEach(k => {
|
||||
if (typeof k === 'string' || k instanceof String) {
|
||||
e.appendChild(document.createTextNode(k))
|
||||
} else if (k instanceof Node) {
|
||||
e.appendChild(k)
|
||||
} else if (Array.isArray(k)) {
|
||||
_domKids(e, ...k)
|
||||
} else if (typeof k === 'function') {
|
||||
if (!k.name) {
|
||||
throw new Error('function without name', k)
|
||||
}
|
||||
e.addEventListener(k.name, k)
|
||||
} else if (typeof k === 'object' && k !== null) {
|
||||
if (k.root) {
|
||||
e.appendChild(k.root)
|
||||
return
|
||||
}
|
||||
for (const key in k) {
|
||||
const value = k[key]
|
||||
if (key === '_prop') {
|
||||
for (const prop in value) {
|
||||
e[prop] = value[prop]
|
||||
}
|
||||
} else if (key === '_attr') {
|
||||
for (const prop in value) {
|
||||
e.setAttribute(prop, value[prop])
|
||||
}
|
||||
} else if (key === '_listen') {
|
||||
e.addEventListener(...value)
|
||||
} else {
|
||||
e.style[key] = value
|
||||
}
|
||||
}
|
||||
} else {
|
||||
console.log('bad kid', k)
|
||||
throw new Error('bad kid')
|
||||
}
|
||||
})
|
||||
}
|
||||
const _dom = (kind, ...kl) => {
|
||||
const t = kind.split('.')
|
||||
const e = document.createElement(t[0])
|
||||
for (let i = 1; i < t.length; i++) {
|
||||
e.classList.add(t[i])
|
||||
}
|
||||
_domKids(e, kl)
|
||||
return e
|
||||
}
|
||||
_dom._kids = function(e, ...kl) {
|
||||
while(e.firstChild) {
|
||||
e.removeChild(e.firstChild)
|
||||
}
|
||||
_domKids(e, kl)
|
||||
}
|
||||
const dom = new Proxy(_dom, {
|
||||
get: function(dom, prop) {
|
||||
if (prop in dom) {
|
||||
return dom[prop]
|
||||
}
|
||||
const fn = (...kl) => _dom(prop, kl)
|
||||
dom[prop] = fn
|
||||
return fn
|
||||
},
|
||||
apply: function(target, that, args) {
|
||||
if (args.length === 1 && typeof args[0] === 'object' && !Array.isArray(args[0])) {
|
||||
return {_attr: args[0]}
|
||||
}
|
||||
return _dom(...args)
|
||||
},
|
||||
})
|
||||
const style = x => x
|
||||
const attr = x => { return {_attr: x} }
|
||||
const prop = x => { return {_prop: x} }
|
||||
return [dom, style, attr, prop]
|
||||
})()
|
||||
|
||||
const link = (href, anchorOpt) => dom.a(attr({href: href, rel: 'noopener noreferrer'}), anchorOpt || href)
|
||||
|
||||
const crumblink = (text, link) => dom.a(text, attr({href: link}))
|
||||
const crumbs = (...l) => [dom.h1(l.map((e, index) => index === 0 ? e : [' / ', e])), dom.br()]
|
||||
|
||||
const footer = dom.div(
|
||||
style({marginTop: '6ex', opacity: 0.75}),
|
||||
link('https://github.com/mjl-/mox', 'mox'),
|
||||
' ',
|
||||
api._sherpa.version,
|
||||
)
|
||||
|
||||
const domainName = d => {
|
||||
return d.Unicode || d.ASCII
|
||||
}
|
||||
|
||||
const domainString = d => {
|
||||
if (d.Unicode) {
|
||||
return d.Unicode+" ("+d.ASCII+")"
|
||||
}
|
||||
return d.ASCII
|
||||
}
|
||||
|
||||
const box = (color, ...l) => [
|
||||
dom.div(
|
||||
style({
|
||||
display: 'inline-block',
|
||||
padding: '.25em .5em',
|
||||
backgroundColor: color,
|
||||
borderRadius: '3px',
|
||||
margin: '.5ex 0',
|
||||
}),
|
||||
l,
|
||||
),
|
||||
dom.br(),
|
||||
]
|
||||
|
||||
const green = '#1dea20'
|
||||
const yellow = '#ffe400'
|
||||
const red = '#ff7443'
|
||||
const blue = '#8bc8ff'
|
||||
|
||||
const index = async () => {
|
||||
const [domain, destinations] = await api.Destinations()
|
||||
|
||||
let passwordForm, passwordFieldset, password1, password2, passwordHint
|
||||
|
||||
let importForm, importFieldset, mailboxFile, mailboxFileHint, mailboxPrefix, mailboxPrefixHint, importProgress, importAbortBox, importAbort
|
||||
|
||||
const importTrack = async (token) => {
|
||||
const importConnection = dom.div('Waiting for updates...')
|
||||
importProgress.appendChild(importConnection)
|
||||
|
||||
let countsTbody
|
||||
let counts = {} // mailbox -> elem
|
||||
|
||||
let problems // element
|
||||
|
||||
await new Promise((resolve, reject) => {
|
||||
const eventSource = new window.EventSource('importprogress?token=' + encodeURIComponent(token))
|
||||
eventSource.addEventListener('open', function(e) {
|
||||
console.log('eventsource open', {e})
|
||||
dom._kids(importConnection, dom.div('Waiting for updates, connected...'))
|
||||
|
||||
dom._kids(importAbortBox,
|
||||
importAbort=dom.button('Abort import', attr({title: 'If the import is not yet finished, it can be aborted and no messages will have been imported.'}), async function click(e) {
|
||||
try {
|
||||
await api.ImportAbort(token)
|
||||
} catch (err) {
|
||||
console.log({err})
|
||||
window.alert('Error: ' + err.message)
|
||||
}
|
||||
// On success, the event source will get an aborted notification and shutdown the connection.
|
||||
})
|
||||
)
|
||||
})
|
||||
eventSource.addEventListener('error', function(e) {
|
||||
console.log('eventsource error', {e})
|
||||
dom._kids(importConnection, box(red, 'Connection error'))
|
||||
reject({message: 'Connection error'})
|
||||
})
|
||||
eventSource.addEventListener('count', (e) => {
|
||||
const data = JSON.parse(e.data) // {Mailbox: ..., Count: ...}
|
||||
console.log('import count event', {e, data})
|
||||
if (!countsTbody) {
|
||||
importProgress.appendChild(
|
||||
dom.div(
|
||||
dom.br(),
|
||||
dom.h3('Importing mailboxes and messages...'),
|
||||
dom.table(
|
||||
dom.thead(
|
||||
dom.tr(dom.th('Mailbox'), dom.th('Messages')),
|
||||
),
|
||||
countsTbody=dom.tbody(),
|
||||
),
|
||||
)
|
||||
)
|
||||
}
|
||||
let elem = counts[data.Mailbox]
|
||||
if (!elem) {
|
||||
countsTbody.appendChild(
|
||||
dom.tr(
|
||||
dom.td(data.Mailbox),
|
||||
elem=dom.td(style({textAlign: 'right'}), ''+data.Count),
|
||||
),
|
||||
)
|
||||
counts[data.Mailbox] = elem
|
||||
}
|
||||
dom._kids(elem, ''+data.Count)
|
||||
})
|
||||
eventSource.addEventListener('problem', (e) => {
|
||||
const data = JSON.parse(e.data) // {Message: ...}
|
||||
console.log('import problem event', {e, data})
|
||||
if (!problems) {
|
||||
importProgress.appendChild(
|
||||
dom.div(
|
||||
dom.br(),
|
||||
dom.h3('Problems during import'),
|
||||
problems=dom.div(),
|
||||
),
|
||||
)
|
||||
}
|
||||
problems.appendChild(dom.div(box(yellow, data.Message)))
|
||||
})
|
||||
eventSource.addEventListener('done', (e) => {
|
||||
console.log('import done event', {e})
|
||||
importProgress.appendChild(dom.div(dom.br(), box(blue, 'Import finished')))
|
||||
|
||||
eventSource.close()
|
||||
dom._kids(importConnection)
|
||||
dom._kids(importAbortBox)
|
||||
window.sessionStorage.removeItem('ImportToken')
|
||||
|
||||
resolve()
|
||||
})
|
||||
eventSource.addEventListener('aborted', function(e) {
|
||||
console.log('import aborted event', {e})
|
||||
|
||||
importProgress.appendChild(dom.div(dom.br(), box(red, 'Import aborted, no message imported')))
|
||||
|
||||
eventSource.close()
|
||||
dom._kids(importConnection)
|
||||
dom._kids(importAbortBox)
|
||||
window.sessionStorage.removeItem('ImportToken')
|
||||
|
||||
reject({message: 'Import aborted'})
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
const page = document.getElementById('page')
|
||||
dom._kids(page,
|
||||
crumbs('Mox Account'),
|
||||
dom.p('NOTE: Not all account settings can be configured through these pages yet. See the configuration file for more options.'),
|
||||
dom.div(
|
||||
'Default domain: ',
|
||||
domain.ASCII ? domainString(domain) : '(none)',
|
||||
),
|
||||
dom.br(),
|
||||
dom.h2('Addresses'),
|
||||
dom.ul(
|
||||
Object.entries(destinations).sort().map(t =>
|
||||
dom.li(
|
||||
dom.a(t[0], attr({href: '#destinations/'+t[0]})),
|
||||
),
|
||||
),
|
||||
),
|
||||
dom.br(),
|
||||
dom.h2('Change password'),
|
||||
passwordForm=dom.form(
|
||||
passwordFieldset=dom.fieldset(
|
||||
dom.label(
|
||||
style({display: 'inline-block'}),
|
||||
'New password',
|
||||
dom.br(),
|
||||
password1=dom.input(attr({type: 'password', required: ''}), function focus() {
|
||||
passwordHint.style.display = ''
|
||||
}),
|
||||
),
|
||||
' ',
|
||||
dom.label(
|
||||
style({display: 'inline-block'}),
|
||||
'New password repeat',
|
||||
dom.br(),
|
||||
password2=dom.input(attr({type: 'password', required: ''})),
|
||||
),
|
||||
' ',
|
||||
dom.button('Change password'),
|
||||
),
|
||||
passwordHint=dom.div(
|
||||
style({display: 'none', marginTop: '.5ex'}),
|
||||
dom.button('Generate random password', attr({type: 'button'}), function click(e) {
|
||||
e.preventDefault()
|
||||
let b = new Uint8Array(1)
|
||||
let s = ''
|
||||
const chars = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!@#$%^&*-_;:,<.>/'
|
||||
while (s.length < 12) {
|
||||
self.crypto.getRandomValues(b)
|
||||
if (Math.ceil(b[0]/chars.length)*chars.length > 255) {
|
||||
continue // Prevent bias.
|
||||
}
|
||||
s += chars[b[0]%chars.length]
|
||||
}
|
||||
password1.type = 'text'
|
||||
password2.type = 'text'
|
||||
password1.value = s
|
||||
password2.value = s
|
||||
}),
|
||||
dom('div.text',
|
||||
box(yellow, 'Important: Bots will try to bruteforce your password. Connections with failed authentication attempts will be rate limited but attackers WILL find weak passwords. If your account is compromised, spammers are likely to abuse your system, spamming your address and the wider internet in your name. So please pick a random, unguessable password, preferrably at least 12 characters.'),
|
||||
),
|
||||
),
|
||||
async function submit(e) {
|
||||
e.stopPropagation()
|
||||
e.preventDefault()
|
||||
if (!password1.value || password1.value !== password2.value) {
|
||||
window.alert('Passwords do not match.')
|
||||
return
|
||||
}
|
||||
passwordFieldset.disabled = true
|
||||
try {
|
||||
await api.SetPassword(password1.value)
|
||||
window.alert('Password has been changed.')
|
||||
passwordForm.reset()
|
||||
} catch (err) {
|
||||
console.log({err})
|
||||
window.alert('Error: ' + err.message)
|
||||
} finally {
|
||||
passwordFieldset.disabled = false
|
||||
}
|
||||
},
|
||||
),
|
||||
dom.br(),
|
||||
dom.h2('Export'),
|
||||
dom.p('Export all messages in all mailboxes. Either in maildir format (with flags like Replied, Forwarded, Junk, etc) or in mbox format (without flags). And either as .zip file or .tgz file.'),
|
||||
dom.ul(
|
||||
dom.li(dom.a('mail-export-maildir.tgz', attr({href: 'mail-export-maildir.tgz'}))),
|
||||
dom.li(dom.a('mail-export-maildir.zip', attr({href: 'mail-export-maildir.zip'}))),
|
||||
dom.li(dom.a('mail-export-mbox.tgz', attr({href: 'mail-export-mbox.tgz'}))),
|
||||
dom.li(dom.a('mail-export-mbox.zip', attr({href: 'mail-export-mbox.zip'}))),
|
||||
),
|
||||
dom.br(),
|
||||
dom.h2('Import'),
|
||||
dom.p('Import messages from a .zip or .tgz file with maildirs and/or mbox files.'),
|
||||
importForm=dom.form(
|
||||
async function submit(e) {
|
||||
e.preventDefault()
|
||||
e.stopPropagation()
|
||||
|
||||
const request = () => {
|
||||
return new Promise((resolve, reject) => {
|
||||
// Browsers can do everything. Except show a progress bar while uploading...
|
||||
let progressBox, progressPercentage, progressBar
|
||||
dom._kids(importProgress,
|
||||
progressBox=dom.div(
|
||||
dom.div('Uploading... ', progressPercentage=dom.span()),
|
||||
),
|
||||
)
|
||||
importProgress.style.display = ''
|
||||
|
||||
const xhr = new window.XMLHttpRequest()
|
||||
xhr.open('POST', 'import', true)
|
||||
xhr.upload.addEventListener('progress', (e) => {
|
||||
if (!e.lengthComputable) {
|
||||
return
|
||||
}
|
||||
const pct = Math.floor(100*e.loaded/e.total)
|
||||
dom._kids(progressPercentage, pct+'%')
|
||||
})
|
||||
xhr.addEventListener('load', () => {
|
||||
console.log('upload done', {xhr: xhr, status: xhr.status})
|
||||
if (xhr.status !== 200) {
|
||||
reject({message: 'status '+xhr.status})
|
||||
return
|
||||
}
|
||||
let resp
|
||||
try {
|
||||
resp = JSON.parse(xhr.responseText)
|
||||
} catch (err) {
|
||||
reject({message: 'parsing resonse json: '+err.message})
|
||||
return
|
||||
}
|
||||
resolve(resp)
|
||||
})
|
||||
xhr.addEventListener('error', (e) => reject({message: 'upload error', event: e}))
|
||||
xhr.addEventListener('abort', (e) => reject({message: 'upload aborted', event: e}))
|
||||
xhr.send(new window.FormData(importForm))
|
||||
})
|
||||
}
|
||||
try {
|
||||
const p = request()
|
||||
importFieldset.disabled = true
|
||||
const result = await p
|
||||
|
||||
try {
|
||||
window.sessionStorage.setItem('ImportToken', result.ImportToken)
|
||||
} catch (err) {
|
||||
console.log('storing import token in session storage', {err})
|
||||
// Ignore error, could be some browser security thing like private browsing.
|
||||
}
|
||||
|
||||
await importTrack(result.ImportToken)
|
||||
} catch (err) {
|
||||
console.log({err})
|
||||
window.alert('Error: '+err.message)
|
||||
} finally {
|
||||
importFieldset.disabled = false
|
||||
}
|
||||
},
|
||||
importFieldset=dom.fieldset(
|
||||
dom.div(
|
||||
style({marginBottom: '1ex'}),
|
||||
dom.label(
|
||||
dom.div(style({marginBottom: '.5ex'}), 'File'),
|
||||
mailboxFile=dom.input(attr({type: 'file', required: '', name: 'file'}), function focus() {
|
||||
mailboxFileHint.style.display = ''
|
||||
}),
|
||||
),
|
||||
mailboxFileHint=dom.p(style({display: 'none', fontStyle: 'italic', marginTop: '.5ex'}), 'This file must either be a zip file or a gzipped tar file with mbox and/or maildir mailboxes. For maildirs, an optional file "dovecot-keywords" is read additional keywords, like Forwarded/Junk/NotJunk. If an imported mailbox already exists by name, messages are added to the existing mailbox. If a mailbox does not yet exist it will be created.'),
|
||||
),
|
||||
dom.div(
|
||||
style({marginBottom: '1ex'}),
|
||||
dom.label(
|
||||
dom.div(style({marginBottom: '.5ex'}), 'Skip mailbox prefix (optional)'),
|
||||
mailboxPrefix=dom.input(attr({name: 'skipMailboxPrefix'}), function focus() {
|
||||
mailboxPrefixHint.style.display = ''
|
||||
}),
|
||||
),
|
||||
mailboxPrefixHint=dom.p(style({display: 'none', fontStyle: 'italic', marginTop: '.5ex'}), 'If set, any mbox/maildir path with this prefix will have it stripped before importing. For example, if all mailboxes are in a directory "Takeout", specify that path in the field above so mailboxes like "Takeout/Inbox.mbox" are imported into a mailbox called "Inbox" instead of "Takeout/Inbox".'),
|
||||
),
|
||||
dom.div(
|
||||
dom.button('Upload and import'),
|
||||
dom.p(style({fontStyle: 'italic', marginTop: '.5ex'}), 'The file is uploaded first, then its messages are imported. Importing is done in a transaction, you can abort the entire import before it is finished.'),
|
||||
),
|
||||
),
|
||||
),
|
||||
importAbortBox=dom.div(), // Outside fieldset because it gets disabled, above progress because may be scrolling it down quickly with problems.
|
||||
importProgress=dom.div(
|
||||
style({display: 'none'}),
|
||||
),
|
||||
footer,
|
||||
)
|
||||
|
||||
// Try to show the progress of an earlier import session. The user may have just
|
||||
// refreshed the browser.
|
||||
let importToken
|
||||
try {
|
||||
importToken = window.sessionStorage.getItem('ImportToken')
|
||||
} catch (err) {
|
||||
console.log('looking up ImportToken in session storage', {err})
|
||||
return
|
||||
}
|
||||
if (!importToken) {
|
||||
return
|
||||
}
|
||||
importFieldset.disabled = true
|
||||
dom._kids(importProgress,
|
||||
dom.div(
|
||||
dom.div('Reconnecting to import...'),
|
||||
),
|
||||
)
|
||||
importProgress.style.display = ''
|
||||
importTrack(importToken)
|
||||
.catch((err) => {
|
||||
if (window.confirm('Error reconnecting to import. Remove this import session?')) {
|
||||
window.sessionStorage.removeItem('ImportToken')
|
||||
dom._kids(importProgress)
|
||||
importProgress.style.display = 'none'
|
||||
}
|
||||
})
|
||||
.finally(() => {
|
||||
importFieldset.disabled = false
|
||||
})
|
||||
}
|
||||
|
||||
const destination = async (name) => {
|
||||
const [domain, destinations] = await api.Destinations()
|
||||
let dest = destinations[name]
|
||||
if (!dest) {
|
||||
throw new Error('destination not found')
|
||||
}
|
||||
|
||||
let rulesetsTbody = dom.tbody()
|
||||
let rulesetsRows = []
|
||||
|
||||
const addRulesetsRow = (rs) => {
|
||||
let headersCell = dom.td()
|
||||
let headers = [] // Holds objects: {key, value, root}
|
||||
const addHeader = (k, v) => {
|
||||
let h = {}
|
||||
h.root = dom.div(
|
||||
h.key=dom.input(attr({value: k})),
|
||||
' ',
|
||||
h.value=dom.input(attr({value: v})),
|
||||
' ',
|
||||
dom.button('-', style({width: '1.5em'}), function click(e) {
|
||||
h.root.remove()
|
||||
headers = headers.filter(x => x !== h)
|
||||
if (headers.length === 0) {
|
||||
const b = dom.button('+', style({width: '1.5em'}), function click(e) {
|
||||
e.target.remove()
|
||||
addHeader('', '')
|
||||
})
|
||||
headersCell.appendChild(dom.div(style({textAlign: 'right'}), b))
|
||||
}
|
||||
}),
|
||||
' ',
|
||||
dom.button('+', style({width: '1.5em'}), function click(e) {
|
||||
addHeader('', '')
|
||||
}),
|
||||
)
|
||||
headers.push(h)
|
||||
headersCell.appendChild(h.root)
|
||||
}
|
||||
Object.entries(rs.HeadersRegexp || {}).sort().forEach(t =>
|
||||
addHeader(t[0], t[1])
|
||||
)
|
||||
if (Object.entries(rs.HeadersRegexp || {}).length === 0) {
|
||||
const b = dom.button('+', style({width: '1.5em'}), function click(e) {
|
||||
e.target.remove()
|
||||
addHeader('', '')
|
||||
})
|
||||
headersCell.appendChild(dom.div(style({textAlign: 'right'}), b))
|
||||
}
|
||||
|
||||
let row = {headers}
|
||||
row.root=dom.tr(
|
||||
dom.td(row.SMTPMailFromRegexp=dom.input(attr({value: rs.SMTPMailFromRegexp || ''}))),
|
||||
dom.td(row.VerifiedDomain=dom.input(attr({value: rs.VerifiedDomain || ''}))),
|
||||
headersCell,
|
||||
dom.td(row.ListAllowDomain=dom.input(attr({value: rs.ListAllowDomain || ''}))),
|
||||
dom.td(row.Mailbox=dom.input(attr({value: rs.Mailbox || ''}))),
|
||||
dom.td(
|
||||
dom.button('Remove ruleset', function click(e) {
|
||||
row.root.remove()
|
||||
rulesetsRows = rulesetsRows.filter(e => e !== row)
|
||||
}),
|
||||
),
|
||||
)
|
||||
rulesetsRows.push(row)
|
||||
rulesetsTbody.appendChild(row.root)
|
||||
}
|
||||
|
||||
(dest.Rulesets || []).forEach(rs => {
|
||||
addRulesetsRow(rs)
|
||||
})
|
||||
|
||||
let defaultMailbox
|
||||
let saveButton
|
||||
|
||||
const page = document.getElementById('page')
|
||||
dom._kids(page,
|
||||
crumbs(
|
||||
crumblink('Mox Account', '#'),
|
||||
'Destination ' + name,
|
||||
),
|
||||
dom.div(
|
||||
dom.span('Default mailbox', attr({title: 'Default mailbox where email for this recipient is delivered to if it does not match any ruleset. Default is Inbox.'})),
|
||||
dom.br(),
|
||||
defaultMailbox=dom.input(attr({value: dest.Mailbox, placeholder: 'Inbox'})),
|
||||
dom
|
||||
),
|
||||
dom.br(),
|
||||
dom.h2('Rulesets'),
|
||||
dom.p('Incoming messages are checked against the rulesets. If a ruleset matches, the message is delivered to the mailbox configured for the ruleset instead of to the default mailbox.'),
|
||||
dom.p('The "List allow domain" does not affect the matching, but skips the regular spam checks if one of the verified domains is a (sub)domain of the domain mentioned here.'),
|
||||
dom.table(
|
||||
dom.thead(
|
||||
dom.tr(
|
||||
dom.th('SMTP "MAIL FROM" regexp', attr({title: 'Matches if this regular expression matches (a substring of) the SMTP MAIL FROM address (not the message From-header). E.g. user@example.org.'})),
|
||||
dom.th('Verified domain', attr({title: 'Matches if this domain matches an SPF- and/or DKIM-verified (sub)domain.'})),
|
||||
dom.th('Headers regexp', attr({title: 'Matches if these header field/value regular expressions all match (substrings of) the message headers. Header fields and valuees are converted to lower case before matching. Whitespace is trimmed from the value before matching. A header field can occur multiple times in a message, only one instance has to match. For mailing lists, you could match on ^list-id$ with the value typically the mailing list address in angled brackets with @ replaced with a dot, e.g. <name\\.lists\\.example\\.org>.'})),
|
||||
dom.th('List allow domain', attr({title: "Influence the spam filtering, this does not change whether this ruleset applies to a message. If this domain matches an SPF- and/or DKIM-verified (sub)domain, the message is accepted without further spam checks, such as a junk filter or DMARC reject evaluation. DMARC rejects should not apply for mailing lists that are not configured to rewrite the From-header of messages that don't have a passing DKIM signature of the From-domain. Otherwise, by rejecting messages, you may be automatically unsubscribed from the mailing list. The assumption is that mailing lists do their own spam filtering/moderation."})),
|
||||
dom.th('Mailbox', attr({title: 'Mailbox to deliver to if this ruleset matches.'})),
|
||||
dom.th('Action'),
|
||||
)
|
||||
),
|
||||
rulesetsTbody,
|
||||
dom.tfoot(
|
||||
dom.tr(
|
||||
dom.td(attr({colspan: '5'})),
|
||||
dom.td(
|
||||
dom.button('Add ruleset', function click(e) {
|
||||
addRulesetsRow({})
|
||||
}),
|
||||
),
|
||||
),
|
||||
),
|
||||
),
|
||||
dom.br(),
|
||||
saveButton=dom.button('Save', async function click(e) {
|
||||
saveButton.disabled = true
|
||||
try {
|
||||
const newDest = {
|
||||
Mailbox: defaultMailbox.value,
|
||||
Rulesets: rulesetsRows.map(row => {
|
||||
return {
|
||||
SMTPMailFromRegexp: row.SMTPMailFromRegexp.value,
|
||||
VerifiedDomain: row.VerifiedDomain.value,
|
||||
HeadersRegexp: Object.fromEntries(row.headers.map(h => [h.key.value, h.value.value])),
|
||||
ListAllowDomain: row.ListAllowDomain.value,
|
||||
Mailbox: row.Mailbox.value,
|
||||
}
|
||||
}),
|
||||
}
|
||||
page.classList.add('loading')
|
||||
await api.DestinationSave(name, dest, newDest)
|
||||
dest = newDest // Set new dest, for if user edits again. Without this, they would get an error that the config has been modified.
|
||||
} catch (err) {
|
||||
console.log({err})
|
||||
window.alert('Error: '+err.message)
|
||||
return
|
||||
} finally {
|
||||
saveButton.disabled = false
|
||||
page.classList.remove('loading')
|
||||
}
|
||||
}),
|
||||
)
|
||||
}
|
||||
|
||||
const init = async () => {
|
||||
let curhash
|
||||
|
||||
const page = document.getElementById('page')
|
||||
|
||||
const hashChange = async () => {
|
||||
if (curhash === window.location.hash) {
|
||||
return
|
||||
}
|
||||
let h = decodeURIComponent(window.location.hash)
|
||||
if (h !== '' && h.substring(0, 1) == '#') {
|
||||
h = h.substring(1)
|
||||
}
|
||||
const t = h.split('/')
|
||||
page.classList.add('loading')
|
||||
try {
|
||||
if (h === '') {
|
||||
await index()
|
||||
} else if (t[0] === 'destinations' && t.length === 2) {
|
||||
await destination(t[1])
|
||||
} else {
|
||||
dom._kids(page, 'page not found')
|
||||
}
|
||||
} catch (err) {
|
||||
console.log({err})
|
||||
window.alert('Error: ' + err.message)
|
||||
window.location.hash = curhash
|
||||
curhash = window.location.hash
|
||||
return
|
||||
}
|
||||
curhash = window.location.hash
|
||||
page.classList.remove('loading')
|
||||
}
|
||||
window.addEventListener('hashchange', hashChange)
|
||||
hashChange()
|
||||
}
|
||||
|
||||
window.addEventListener('load', init)
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
@ -1,181 +0,0 @@
|
||||
package http
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"archive/zip"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/mjl-/mox/mlog"
|
||||
"github.com/mjl-/mox/mox-"
|
||||
"github.com/mjl-/mox/store"
|
||||
)
|
||||
|
||||
func tcheck(t *testing.T, err error, msg string) {
|
||||
t.Helper()
|
||||
if err != nil {
|
||||
t.Fatalf("%s: %s", msg, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAccount(t *testing.T) {
|
||||
os.RemoveAll("../testdata/httpaccount/data")
|
||||
mox.ConfigStaticPath = "../testdata/httpaccount/mox.conf"
|
||||
mox.ConfigDynamicPath = filepath.Join(filepath.Dir(mox.ConfigStaticPath), "domains.conf")
|
||||
mox.MustLoadConfig(false)
|
||||
acc, err := store.OpenAccount("mjl")
|
||||
tcheck(t, err, "open account")
|
||||
defer acc.Close()
|
||||
switchDone := store.Switchboard()
|
||||
defer close(switchDone)
|
||||
|
||||
log := mlog.New("store")
|
||||
|
||||
test := func(authHdr string, expect string) {
|
||||
t.Helper()
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
r := httptest.NewRequest("GET", "/ignored", nil)
|
||||
if authHdr != "" {
|
||||
r.Header.Add("Authorization", authHdr)
|
||||
}
|
||||
ok := checkAccountAuth(context.Background(), log, w, r)
|
||||
if ok != expect {
|
||||
t.Fatalf("got %v, expected %v", ok, expect)
|
||||
}
|
||||
}
|
||||
|
||||
const authOK = "Basic bWpsQG1veC5leGFtcGxlOnRlc3QxMjM0" // mjl@mox.example:test1234
|
||||
const authBad = "Basic bWpsQG1veC5leGFtcGxlOmJhZHBhc3N3b3Jk" // mjl@mox.example:badpassword
|
||||
|
||||
authCtx := context.WithValue(context.Background(), authCtxKey, "mjl")
|
||||
|
||||
test(authOK, "") // No password set yet.
|
||||
Account{}.SetPassword(authCtx, "test1234")
|
||||
test(authOK, "mjl")
|
||||
test(authBad, "")
|
||||
|
||||
_, dests := Account{}.Destinations(authCtx)
|
||||
Account{}.DestinationSave(authCtx, "mjl", dests["mjl"], dests["mjl"]) // todo: save modified value and compare it afterwards
|
||||
|
||||
go importManage()
|
||||
|
||||
// Import mbox/maildir tgz/zip.
|
||||
testImport := func(filename string, expect int) {
|
||||
t.Helper()
|
||||
|
||||
var reqBody bytes.Buffer
|
||||
mpw := multipart.NewWriter(&reqBody)
|
||||
part, err := mpw.CreateFormFile("file", path.Base(filename))
|
||||
tcheck(t, err, "creating form file")
|
||||
buf, err := os.ReadFile(filename)
|
||||
tcheck(t, err, "reading file")
|
||||
_, err = part.Write(buf)
|
||||
tcheck(t, err, "write part")
|
||||
err = mpw.Close()
|
||||
tcheck(t, err, "close multipart writer")
|
||||
|
||||
r := httptest.NewRequest("POST", "/import", &reqBody)
|
||||
r.Header.Add("Content-Type", mpw.FormDataContentType())
|
||||
r.Header.Add("Authorization", authOK)
|
||||
w := httptest.NewRecorder()
|
||||
accountHandle(w, r)
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("import, got status code %d, expected 200: %s", w.Code, w.Body.Bytes())
|
||||
}
|
||||
m := map[string]string{}
|
||||
if err := json.Unmarshal(w.Body.Bytes(), &m); err != nil {
|
||||
t.Fatalf("parsing import response: %v", err)
|
||||
}
|
||||
token := m["ImportToken"]
|
||||
|
||||
l := importListener{token, make(chan importEvent, 100), make(chan bool)}
|
||||
importers.Register <- &l
|
||||
if !<-l.Register {
|
||||
t.Fatalf("register failed")
|
||||
}
|
||||
defer func() {
|
||||
importers.Unregister <- &l
|
||||
}()
|
||||
count := 0
|
||||
loop:
|
||||
for {
|
||||
e := <-l.Events
|
||||
switch x := e.Event.(type) {
|
||||
case importCount:
|
||||
count += x.Count
|
||||
case importProblem:
|
||||
t.Fatalf("unexpected problem: %q", x.Message)
|
||||
case importDone:
|
||||
break loop
|
||||
case importAborted:
|
||||
t.Fatalf("unexpected aborted import")
|
||||
default:
|
||||
panic("missing case")
|
||||
}
|
||||
}
|
||||
if count != expect {
|
||||
t.Fatalf("imported %d messages, expected %d", count, expect)
|
||||
}
|
||||
}
|
||||
testImport("../testdata/importtest.mbox.zip", 2)
|
||||
testImport("../testdata/importtest.maildir.tgz", 2)
|
||||
|
||||
testExport := func(httppath string, iszip bool, expectFiles int) {
|
||||
t.Helper()
|
||||
|
||||
r := httptest.NewRequest("GET", httppath, nil)
|
||||
r.Header.Add("Authorization", authOK)
|
||||
w := httptest.NewRecorder()
|
||||
accountHandle(w, r)
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("export, got status code %d, expected 200: %s", w.Code, w.Body.Bytes())
|
||||
}
|
||||
var count int
|
||||
if iszip {
|
||||
buf := w.Body.Bytes()
|
||||
zr, err := zip.NewReader(bytes.NewReader(buf), int64(len(buf)))
|
||||
tcheck(t, err, "reading zip")
|
||||
for _, f := range zr.File {
|
||||
if !strings.HasSuffix(f.Name, "/") {
|
||||
count++
|
||||
}
|
||||
}
|
||||
} else {
|
||||
gzr, err := gzip.NewReader(w.Body)
|
||||
tcheck(t, err, "gzip reader")
|
||||
tr := tar.NewReader(gzr)
|
||||
for {
|
||||
h, err := tr.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
tcheck(t, err, "next file in tar")
|
||||
if !strings.HasSuffix(h.Name, "/") {
|
||||
count++
|
||||
}
|
||||
_, err = io.Copy(io.Discard, tr)
|
||||
tcheck(t, err, "reading from tar")
|
||||
}
|
||||
}
|
||||
if count != expectFiles {
|
||||
t.Fatalf("export, has %d files, expected %d", count, expectFiles)
|
||||
}
|
||||
}
|
||||
|
||||
testExport("/mail-export-maildir.tgz", false, 6) // 2 mailboxes, each with 2 messages and a dovecot-keyword file
|
||||
testExport("/mail-export-maildir.zip", true, 6)
|
||||
testExport("/mail-export-mbox.tgz", false, 2)
|
||||
testExport("/mail-export-mbox.zip", true, 2)
|
||||
}
|
@ -1,181 +0,0 @@
|
||||
{
|
||||
"Name": "Account",
|
||||
"Docs": "Account exports web API functions for the account web interface. All its\nmethods are exported under /api/. Function calls require valid HTTP\nAuthentication credentials of a user.",
|
||||
"Functions": [
|
||||
{
|
||||
"Name": "SetPassword",
|
||||
"Docs": "SetPassword saves a new password for the account, invalidating the previous password.\nSessions are not interrupted, and will keep working. New login attempts must use the new password.\nPassword must be at least 8 characters.",
|
||||
"Params": [
|
||||
{
|
||||
"Name": "password",
|
||||
"Typewords": [
|
||||
"string"
|
||||
]
|
||||
}
|
||||
],
|
||||
"Returns": []
|
||||
},
|
||||
{
|
||||
"Name": "Destinations",
|
||||
"Docs": "Destinations returns the default domain, and the destinations (keys are email\naddresses, or localparts to the default domain).\ntodo: replace with a function that returns the whole account, when sherpadoc understands unnamed struct fields.",
|
||||
"Params": [],
|
||||
"Returns": [
|
||||
{
|
||||
"Name": "r0",
|
||||
"Typewords": [
|
||||
"Domain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Name": "r1",
|
||||
"Typewords": [
|
||||
"{}",
|
||||
"Destination"
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"Name": "DestinationSave",
|
||||
"Docs": "DestinationSave updates a destination.\nOldDest is compared against the current destination. If it does not match, an\nerror is returned. Otherwise newDest is saved and the configuration reloaded.",
|
||||
"Params": [
|
||||
{
|
||||
"Name": "destName",
|
||||
"Typewords": [
|
||||
"string"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Name": "oldDest",
|
||||
"Typewords": [
|
||||
"Destination"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Name": "newDest",
|
||||
"Typewords": [
|
||||
"Destination"
|
||||
]
|
||||
}
|
||||
],
|
||||
"Returns": []
|
||||
},
|
||||
{
|
||||
"Name": "ImportAbort",
|
||||
"Docs": "ImportAbort aborts an import that is in progress. If the import exists and isn't\nfinished, no changes will have been made by the import.",
|
||||
"Params": [
|
||||
{
|
||||
"Name": "importToken",
|
||||
"Typewords": [
|
||||
"string"
|
||||
]
|
||||
}
|
||||
],
|
||||
"Returns": []
|
||||
}
|
||||
],
|
||||
"Sections": [],
|
||||
"Structs": [
|
||||
{
|
||||
"Name": "Domain",
|
||||
"Docs": "Domain is a domain name, with one or more labels, with at least an ASCII\nrepresentation, and for IDNA non-ASCII domains a unicode representation.\nThe ASCII string must be used for DNS lookups.",
|
||||
"Fields": [
|
||||
{
|
||||
"Name": "ASCII",
|
||||
"Docs": "A non-unicode domain, e.g. with A-labels (xn--...) or NR-LDH (non-reserved letters/digits/hyphens) labels. Always in lower case.",
|
||||
"Typewords": [
|
||||
"string"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Name": "Unicode",
|
||||
"Docs": "Name as U-labels. Empty if this is an ASCII-only domain.",
|
||||
"Typewords": [
|
||||
"string"
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"Name": "Destination",
|
||||
"Docs": "",
|
||||
"Fields": [
|
||||
{
|
||||
"Name": "Mailbox",
|
||||
"Docs": "",
|
||||
"Typewords": [
|
||||
"string"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Name": "Rulesets",
|
||||
"Docs": "",
|
||||
"Typewords": [
|
||||
"[]",
|
||||
"Ruleset"
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"Name": "Ruleset",
|
||||
"Docs": "",
|
||||
"Fields": [
|
||||
{
|
||||
"Name": "SMTPMailFromRegexp",
|
||||
"Docs": "",
|
||||
"Typewords": [
|
||||
"string"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Name": "VerifiedDomain",
|
||||
"Docs": "",
|
||||
"Typewords": [
|
||||
"string"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Name": "HeadersRegexp",
|
||||
"Docs": "",
|
||||
"Typewords": [
|
||||
"{}",
|
||||
"string"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Name": "ListAllowDomain",
|
||||
"Docs": "",
|
||||
"Typewords": [
|
||||
"string"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Name": "Mailbox",
|
||||
"Docs": "",
|
||||
"Typewords": [
|
||||
"string"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Name": "VerifiedDNSDomain",
|
||||
"Docs": "",
|
||||
"Typewords": [
|
||||
"Domain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Name": "ListAllowDNSDomain",
|
||||
"Docs": "",
|
||||
"Typewords": [
|
||||
"Domain"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"Ints": [],
|
||||
"Strings": [],
|
||||
"SherpaVersion": 0,
|
||||
"SherpadocVersion": 1
|
||||
}
|
1599
http/admin.go
1599
http/admin.go
File diff suppressed because it is too large
Load Diff
2174
http/admin.html
2174
http/admin.html
File diff suppressed because it is too large
Load Diff
@ -1,133 +0,0 @@
|
||||
package http
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/ed25519"
|
||||
"net"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
|
||||
"github.com/mjl-/mox/config"
|
||||
"github.com/mjl-/mox/dns"
|
||||
"github.com/mjl-/mox/mox-"
|
||||
)
|
||||
|
||||
func init() {
|
||||
mox.LimitersInit()
|
||||
}
|
||||
|
||||
func TestAdminAuth(t *testing.T) {
|
||||
test := func(passwordfile, authHdr string, expect bool) {
|
||||
t.Helper()
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
r := httptest.NewRequest("GET", "/ignored", nil)
|
||||
if authHdr != "" {
|
||||
r.Header.Add("Authorization", authHdr)
|
||||
}
|
||||
ok := checkAdminAuth(context.Background(), passwordfile, w, r)
|
||||
if ok != expect {
|
||||
t.Fatalf("got %v, expected %v", ok, expect)
|
||||
}
|
||||
}
|
||||
|
||||
const authOK = "Basic YWRtaW46bW94dGVzdDEyMw==" // admin:moxtest123
|
||||
const authBad = "Basic YWRtaW46YmFkcGFzc3dvcmQ=" // admin:badpassword
|
||||
|
||||
const path = "../testdata/http-passwordfile"
|
||||
os.Remove(path)
|
||||
defer os.Remove(path)
|
||||
|
||||
test(path, authOK, false) // Password file does not exist.
|
||||
|
||||
adminpwhash, err := bcrypt.GenerateFromPassword([]byte("moxtest123"), bcrypt.DefaultCost)
|
||||
if err != nil {
|
||||
t.Fatalf("generate bcrypt hash: %v", err)
|
||||
}
|
||||
if err := os.WriteFile(path, adminpwhash, 0660); err != nil {
|
||||
t.Fatalf("write password file: %v", err)
|
||||
}
|
||||
// We loop to also exercise the auth cache.
|
||||
for i := 0; i < 2; i++ {
|
||||
test(path, "", false) // Empty/missing header.
|
||||
test(path, "Malformed ", false) // Not "Basic"
|
||||
test(path, "Basic malformed ", false) // Bad base64.
|
||||
test(path, "Basic dGVzdA== ", false) // base64 is ok, but wrong tokens inside.
|
||||
test(path, authBad, false) // Wrong password.
|
||||
test(path, authOK, true)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckDomain(t *testing.T) {
|
||||
// NOTE: we aren't currently looking at the results, having the code paths executed is better than nothing.
|
||||
|
||||
resolver := dns.MockResolver{
|
||||
MX: map[string][]*net.MX{
|
||||
"mox.example.": {{Host: "mail.mox.example.", Pref: 10}},
|
||||
},
|
||||
A: map[string][]string{
|
||||
"mail.mox.example.": {"127.0.0.2"},
|
||||
},
|
||||
AAAA: map[string][]string{
|
||||
"mail.mox.example.": {"127.0.0.2"},
|
||||
},
|
||||
TXT: map[string][]string{
|
||||
"mox.example.": {"v=spf1 mx -all"},
|
||||
"test._domainkey.mox.example.": {"v=DKIM1;h=sha256;k=ed25519;p=ln5zd/JEX4Jy60WAhUOv33IYm2YZMyTQAdr9stML504="},
|
||||
"_dmarc.mox.example.": {"v=DMARC1; p=reject; rua=mailto:mjl@mox.example"},
|
||||
"_smtp._tls.mox.example": {"v=TLSRPTv1; rua=mailto:tlsrpt@mox.example;"},
|
||||
"_mta-sts.mox.example": {"v=STSv1; id=20160831085700Z"},
|
||||
},
|
||||
CNAME: map[string]string{},
|
||||
}
|
||||
|
||||
listener := config.Listener{
|
||||
IPs: []string{"127.0.0.2"},
|
||||
Hostname: "mox.example",
|
||||
HostnameDomain: dns.Domain{ASCII: "mox.example"},
|
||||
}
|
||||
listener.SMTP.Enabled = true
|
||||
listener.AutoconfigHTTPS.Enabled = true
|
||||
listener.MTASTSHTTPS.Enabled = true
|
||||
|
||||
mox.Conf.Static.Listeners = map[string]config.Listener{
|
||||
"public": listener,
|
||||
}
|
||||
domain := config.Domain{
|
||||
DKIM: config.DKIM{
|
||||
Selectors: map[string]config.Selector{
|
||||
"test": {
|
||||
HashEffective: "sha256",
|
||||
HeadersEffective: []string{"From", "Date", "Subject"},
|
||||
Key: ed25519.NewKeyFromSeed(make([]byte, 32)), // warning: fake zero key, do not copy this code.
|
||||
Domain: dns.Domain{ASCII: "test"},
|
||||
},
|
||||
"missing": {
|
||||
HashEffective: "sha256",
|
||||
HeadersEffective: []string{"From", "Date", "Subject"},
|
||||
Key: ed25519.NewKeyFromSeed(make([]byte, 32)), // warning: fake zero key, do not copy this code.
|
||||
Domain: dns.Domain{ASCII: "missing"},
|
||||
},
|
||||
},
|
||||
Sign: []string{"test", "test2"},
|
||||
},
|
||||
}
|
||||
mox.Conf.Dynamic.Domains = map[string]config.Domain{
|
||||
"mox.example": domain,
|
||||
}
|
||||
|
||||
// Make a dialer that fails immediately before actually connecting.
|
||||
done := make(chan struct{})
|
||||
close(done)
|
||||
dialer := &net.Dialer{Deadline: time.Now().Add(-time.Second), Cancel: done}
|
||||
|
||||
checkDomain(context.Background(), resolver, dialer, "mox.example")
|
||||
// todo: check returned data
|
||||
|
||||
Admin{}.Domains(context.Background()) // todo: check results
|
||||
dnsblsStatus(context.Background(), resolver) // todo: check results
|
||||
}
|
3476
http/adminapi.json
3476
http/adminapi.json
File diff suppressed because it is too large
Load Diff
16
http/atime.go
Normal file
16
http/atime.go
Normal file
@ -0,0 +1,16 @@
|
||||
//go:build !netbsd && !freebsd && !darwin && !windows
|
||||
|
||||
package http
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func statAtime(sys any) (int64, error) {
|
||||
x, ok := sys.(*syscall.Stat_t)
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("sys is a %T, expected *syscall.Stat_t", sys)
|
||||
}
|
||||
return int64(x.Atim.Sec)*1000*1000*1000 + int64(x.Atim.Nsec), nil
|
||||
}
|
16
http/atime_bsd.go
Normal file
16
http/atime_bsd.go
Normal file
@ -0,0 +1,16 @@
|
||||
//go:build netbsd || freebsd || darwin
|
||||
|
||||
package http
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func statAtime(sys any) (int64, error) {
|
||||
x, ok := sys.(*syscall.Stat_t)
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("stat sys is a %T, expected *syscall.Stat_t", sys)
|
||||
}
|
||||
return int64(x.Atimespec.Sec)*1000*1000*1000 + int64(x.Atimespec.Nsec), nil
|
||||
}
|
16
http/atime_windows.go
Normal file
16
http/atime_windows.go
Normal file
@ -0,0 +1,16 @@
|
||||
//go:build windows
|
||||
|
||||
package http
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func statAtime(sys any) (int64, error) {
|
||||
x, ok := sys.(*syscall.Win32FileAttributeData)
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("sys is a %T, expected *syscall.Win32FileAttributeData", sys)
|
||||
}
|
||||
return x.LastAccessTime.Nanoseconds(), nil
|
||||
}
|
374
http/autoconf.go
374
http/autoconf.go
@ -3,14 +3,16 @@ package http
|
||||
import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"rsc.io/qr"
|
||||
|
||||
"github.com/mjl-/mox/config"
|
||||
"github.com/mjl-/mox/mlog"
|
||||
"github.com/mjl-/mox/mox-"
|
||||
"github.com/mjl-/mox/admin"
|
||||
"github.com/mjl-/mox/dns"
|
||||
"github.com/mjl-/mox/smtp"
|
||||
)
|
||||
|
||||
@ -36,7 +38,9 @@ var (
|
||||
// - Thunderbird will request an "autoconfig" xml file.
|
||||
// - Microsoft tools will request an "autodiscovery" xml file.
|
||||
// - In my tests on an internal domain, iOS mail only talks to Apple servers, then
|
||||
// does not attempt autoconfiguration. Possibly due to them being private DNS names.
|
||||
// does not attempt autoconfiguration. Possibly due to them being private DNS
|
||||
// names. Apple software can be provisioned with "mobileconfig" profile files,
|
||||
// which users can download after logging in.
|
||||
//
|
||||
// DNS records seem optional, but autoconfig.<domain> and autodiscover.<domain>
|
||||
// (both CNAME or A) are useful, and so is SRV _autodiscovery._tcp.<domain> 0 0 443
|
||||
@ -52,7 +56,7 @@ var (
|
||||
// User should create a DNS record: autoconfig.<domain> (CNAME or A).
|
||||
// See https://wiki.mozilla.org/Thunderbird:Autoconfiguration:ConfigFileFormat
|
||||
func autoconfHandle(w http.ResponseWriter, r *http.Request) {
|
||||
log := xlog.WithContext(r.Context())
|
||||
log := pkglog.WithContext(r.Context())
|
||||
|
||||
var addrDom string
|
||||
defer func() {
|
||||
@ -60,99 +64,123 @@ func autoconfHandle(w http.ResponseWriter, r *http.Request) {
|
||||
}()
|
||||
|
||||
email := r.FormValue("emailaddress")
|
||||
log.Debug("autoconfig request", mlog.Field("email", email))
|
||||
addr, err := smtp.ParseAddress(email)
|
||||
log.Debug("autoconfig request", slog.String("email", email))
|
||||
var domain dns.Domain
|
||||
if email == "" {
|
||||
email = "%EMAILADDRESS%"
|
||||
// Declare this here rather than using := to avoid shadowing domain from
|
||||
// the outer scope.
|
||||
var err error
|
||||
domain, err = dns.ParseDomain(r.Host)
|
||||
if err != nil {
|
||||
http.Error(w, fmt.Sprintf("400 - bad request - invalid domain: %s", r.Host), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
domain.ASCII = strings.TrimPrefix(domain.ASCII, "autoconfig.")
|
||||
domain.Unicode = strings.TrimPrefix(domain.Unicode, "autoconfig.")
|
||||
} else {
|
||||
addr, err := smtp.ParseAddress(email)
|
||||
if err != nil {
|
||||
http.Error(w, "400 - bad request - invalid parameter emailaddress", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
domain = addr.Domain
|
||||
}
|
||||
|
||||
socketType := func(tlsMode admin.TLSMode) (string, error) {
|
||||
switch tlsMode {
|
||||
case admin.TLSModeImmediate:
|
||||
return "SSL", nil
|
||||
case admin.TLSModeSTARTTLS:
|
||||
return "STARTTLS", nil
|
||||
case admin.TLSModeNone:
|
||||
return "plain", nil
|
||||
default:
|
||||
return "", fmt.Errorf("unknown tls mode %v", tlsMode)
|
||||
}
|
||||
}
|
||||
|
||||
var imapTLS, submissionTLS string
|
||||
config, err := admin.ClientConfigDomain(domain)
|
||||
if err == nil {
|
||||
imapTLS, err = socketType(config.IMAP.TLSMode)
|
||||
}
|
||||
if err == nil {
|
||||
submissionTLS, err = socketType(config.Submission.TLSMode)
|
||||
}
|
||||
if err != nil {
|
||||
http.Error(w, "400 - bad request - invalid parameter emailaddress", http.StatusBadRequest)
|
||||
http.Error(w, "400 - bad request - "+err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if _, ok := mox.Conf.Domain(addr.Domain); !ok {
|
||||
http.Error(w, "400 - bad request - unknown domain", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
addrDom = addr.Domain.Name()
|
||||
|
||||
hostname := mox.Conf.Static.HostnameDomain
|
||||
|
||||
// Thunderbird doesn't seem to allow U-labels, always return ASCII names.
|
||||
var resp autoconfigResponse
|
||||
resp.Version = "1.1"
|
||||
resp.EmailProvider.ID = addr.Domain.ASCII
|
||||
resp.EmailProvider.Domain = addr.Domain.ASCII
|
||||
resp.EmailProvider.ID = domain.ASCII
|
||||
resp.EmailProvider.Domain = domain.ASCII
|
||||
resp.EmailProvider.DisplayName = email
|
||||
resp.EmailProvider.DisplayShortName = addr.Domain.ASCII
|
||||
|
||||
var imapPort int
|
||||
var imapSocket string
|
||||
for _, l := range mox.Conf.Static.Listeners {
|
||||
if l.IMAPS.Enabled {
|
||||
imapSocket = "SSL"
|
||||
imapPort = config.Port(l.IMAPS.Port, 993)
|
||||
} else if l.IMAP.Enabled {
|
||||
if l.TLS != nil && imapSocket != "SSL" {
|
||||
imapSocket = "STARTTLS"
|
||||
imapPort = config.Port(l.IMAP.Port, 143)
|
||||
} else if imapSocket == "" {
|
||||
imapSocket = "plain"
|
||||
imapPort = config.Port(l.IMAP.Port, 143)
|
||||
}
|
||||
}
|
||||
}
|
||||
if imapPort == 0 {
|
||||
log.Error("autoconfig: no imap configured?")
|
||||
}
|
||||
resp.EmailProvider.DisplayShortName = domain.ASCII
|
||||
|
||||
// todo: specify SCRAM-SHA-256 once thunderbird and autoconfig supports it. or perhaps that will fall under "password-encrypted" by then.
|
||||
// todo: let user configure they prefer or require tls client auth and specify "TLS-client-cert"
|
||||
|
||||
resp.EmailProvider.IncomingServer.Type = "imap"
|
||||
resp.EmailProvider.IncomingServer.Hostname = hostname.ASCII
|
||||
resp.EmailProvider.IncomingServer.Port = imapPort
|
||||
resp.EmailProvider.IncomingServer.SocketType = imapSocket
|
||||
resp.EmailProvider.IncomingServer.Username = email
|
||||
resp.EmailProvider.IncomingServer.Authentication = "password-encrypted"
|
||||
|
||||
var smtpPort int
|
||||
var smtpSocket string
|
||||
for _, l := range mox.Conf.Static.Listeners {
|
||||
if l.Submissions.Enabled {
|
||||
smtpSocket = "SSL"
|
||||
smtpPort = config.Port(l.Submissions.Port, 465)
|
||||
} else if l.Submission.Enabled {
|
||||
if l.TLS != nil && smtpSocket != "SSL" {
|
||||
smtpSocket = "STARTTLS"
|
||||
smtpPort = config.Port(l.Submission.Port, 587)
|
||||
} else if smtpSocket == "" {
|
||||
smtpSocket = "plain"
|
||||
smtpPort = config.Port(l.Submission.Port, 587)
|
||||
}
|
||||
incoming := incomingServer{
|
||||
"imap",
|
||||
config.IMAP.Host.ASCII,
|
||||
config.IMAP.Port,
|
||||
imapTLS,
|
||||
email,
|
||||
"password-encrypted",
|
||||
}
|
||||
resp.EmailProvider.IncomingServers = append(resp.EmailProvider.IncomingServers, incoming)
|
||||
if config.IMAP.EnabledOnHTTPS {
|
||||
tlsMode, _ := socketType(admin.TLSModeImmediate)
|
||||
incomingALPN := incomingServer{
|
||||
"imap",
|
||||
config.IMAP.Host.ASCII,
|
||||
443,
|
||||
tlsMode,
|
||||
email,
|
||||
"password-encrypted",
|
||||
}
|
||||
}
|
||||
if smtpPort == 0 {
|
||||
log.Error("autoconfig: no smtp submission configured?")
|
||||
resp.EmailProvider.IncomingServers = append(resp.EmailProvider.IncomingServers, incomingALPN)
|
||||
}
|
||||
|
||||
resp.EmailProvider.OutgoingServer.Type = "smtp"
|
||||
resp.EmailProvider.OutgoingServer.Hostname = hostname.ASCII
|
||||
resp.EmailProvider.OutgoingServer.Port = smtpPort
|
||||
resp.EmailProvider.OutgoingServer.SocketType = smtpSocket
|
||||
resp.EmailProvider.OutgoingServer.Username = email
|
||||
resp.EmailProvider.OutgoingServer.Authentication = "password-encrypted"
|
||||
outgoing := outgoingServer{
|
||||
"smtp",
|
||||
config.Submission.Host.ASCII,
|
||||
config.Submission.Port,
|
||||
submissionTLS,
|
||||
email,
|
||||
"password-encrypted",
|
||||
}
|
||||
resp.EmailProvider.OutgoingServers = append(resp.EmailProvider.OutgoingServers, outgoing)
|
||||
if config.Submission.EnabledOnHTTPS {
|
||||
tlsMode, _ := socketType(admin.TLSModeImmediate)
|
||||
outgoingALPN := outgoingServer{
|
||||
"smtp",
|
||||
config.Submission.Host.ASCII,
|
||||
443,
|
||||
tlsMode,
|
||||
email,
|
||||
"password-encrypted",
|
||||
}
|
||||
resp.EmailProvider.OutgoingServers = append(resp.EmailProvider.OutgoingServers, outgoingALPN)
|
||||
}
|
||||
|
||||
// todo: should we put the email address in the URL?
|
||||
resp.ClientConfigUpdate.URL = fmt.Sprintf("https://%s/mail/config-v1.1.xml", hostname.ASCII)
|
||||
resp.ClientConfigUpdate.URL = fmt.Sprintf("https://autoconfig.%s/mail/config-v1.1.xml", domain.ASCII)
|
||||
|
||||
w.Header().Set("Content-Type", "application/xml; charset=utf-8")
|
||||
enc := xml.NewEncoder(w)
|
||||
enc.Indent("", "\t")
|
||||
fmt.Fprint(w, xml.Header)
|
||||
if err := enc.Encode(resp); err != nil {
|
||||
log.Errorx("marshal autoconfig response", err)
|
||||
}
|
||||
err = enc.Encode(resp)
|
||||
log.Check(err, "write autoconfig xml response")
|
||||
}
|
||||
|
||||
// Autodiscover from Microsoft, also used by Thunderbird.
|
||||
// User should create a DNS record: _autodiscover._tcp.<domain> IN SRV 0 0 443 <hostname or autodiscover.<domain>>
|
||||
// User should create a DNS record: _autodiscover._tcp.<domain> SRV 0 0 443 <hostname>
|
||||
//
|
||||
// In practice, autodiscover does not seem to work wit microsoft clients. A
|
||||
// connectivity test tool for outlook is available on
|
||||
@ -162,7 +190,7 @@ func autoconfHandle(w http.ResponseWriter, r *http.Request) {
|
||||
//
|
||||
// Thunderbird does understand autodiscover.
|
||||
func autodiscoverHandle(w http.ResponseWriter, r *http.Request) {
|
||||
log := xlog.WithContext(r.Context())
|
||||
log := pkglog.WithContext(r.Context())
|
||||
|
||||
var addrDom string
|
||||
defer func() {
|
||||
@ -180,7 +208,7 @@ func autodiscoverHandle(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
log.Debug("autodiscover request", mlog.Field("email", req.Request.EmailAddress))
|
||||
log.Debug("autodiscover request", slog.String("email", req.Request.EmailAddress))
|
||||
|
||||
addr, err := smtp.ParseAddress(req.Request.EmailAddress)
|
||||
if err != nil {
|
||||
@ -188,13 +216,33 @@ func autodiscoverHandle(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
if _, ok := mox.Conf.Domain(addr.Domain); !ok {
|
||||
http.Error(w, "400 - bad request - unknown domain", http.StatusBadRequest)
|
||||
// tlsmode returns the "ssl" and "encryption" fields.
|
||||
tlsmode := func(tlsMode admin.TLSMode) (string, string, error) {
|
||||
switch tlsMode {
|
||||
case admin.TLSModeImmediate:
|
||||
return "on", "TLS", nil
|
||||
case admin.TLSModeSTARTTLS:
|
||||
return "on", "", nil
|
||||
case admin.TLSModeNone:
|
||||
return "off", "", nil
|
||||
default:
|
||||
return "", "", fmt.Errorf("unknown tls mode %v", tlsMode)
|
||||
}
|
||||
}
|
||||
|
||||
var imapSSL, imapEncryption string
|
||||
var submissionSSL, submissionEncryption string
|
||||
config, err := admin.ClientConfigDomain(addr.Domain)
|
||||
if err == nil {
|
||||
imapSSL, imapEncryption, err = tlsmode(config.IMAP.TLSMode)
|
||||
}
|
||||
if err == nil {
|
||||
submissionSSL, submissionEncryption, err = tlsmode(config.Submission.TLSMode)
|
||||
}
|
||||
if err != nil {
|
||||
http.Error(w, "400 - bad request - "+err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
addrDom = addr.Domain.Name()
|
||||
|
||||
hostname := mox.Conf.Static.HostnameDomain
|
||||
|
||||
// The docs are generated and fragmented in many tiny pages, hard to follow.
|
||||
// High-level starting point, https://learn.microsoft.com/en-us/openspecs/exchange_server_protocols/ms-oxdscli/78530279-d042-4eb0-a1f4-03b18143cd19
|
||||
@ -205,49 +253,10 @@ func autodiscoverHandle(w http.ResponseWriter, r *http.Request) {
|
||||
// use. See
|
||||
// https://learn.microsoft.com/en-us/openspecs/exchange_server_protocols/ms-oxdscli/21fd2dd5-c4ee-485b-94fb-e7db5da93726
|
||||
|
||||
var imapPort int
|
||||
imapSSL := "off"
|
||||
var imapEncryption string
|
||||
|
||||
var smtpPort int
|
||||
smtpSSL := "off"
|
||||
var smtpEncryption string
|
||||
for _, l := range mox.Conf.Static.Listeners {
|
||||
if l.IMAPS.Enabled {
|
||||
imapPort = config.Port(l.IMAPS.Port, 993)
|
||||
imapSSL = "on"
|
||||
imapEncryption = "TLS" // Assuming this means direct TLS.
|
||||
} else if l.IMAP.Enabled {
|
||||
if l.TLS != nil && imapEncryption != "TLS" {
|
||||
imapSSL = "on"
|
||||
imapPort = config.Port(l.IMAP.Port, 143)
|
||||
} else if imapSSL == "" {
|
||||
imapPort = config.Port(l.IMAP.Port, 143)
|
||||
}
|
||||
}
|
||||
|
||||
if l.Submissions.Enabled {
|
||||
smtpPort = config.Port(l.Submissions.Port, 465)
|
||||
smtpSSL = "on"
|
||||
smtpEncryption = "TLS" // Assuming this means direct TLS.
|
||||
} else if l.Submission.Enabled {
|
||||
if l.TLS != nil && smtpEncryption != "TLS" {
|
||||
smtpSSL = "on"
|
||||
smtpPort = config.Port(l.Submission.Port, 587)
|
||||
} else if smtpSSL == "" {
|
||||
smtpPort = config.Port(l.Submission.Port, 587)
|
||||
}
|
||||
}
|
||||
}
|
||||
if imapPort == 0 {
|
||||
log.Error("autoconfig: no smtp submission configured?")
|
||||
}
|
||||
if smtpPort == 0 {
|
||||
log.Error("autoconfig: no imap configured?")
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/xml; charset=utf-8")
|
||||
|
||||
// todo: let user configure they prefer or require tls client auth and add "AuthPackage" with value "certificate" to Protocol? see https://learn.microsoft.com/en-us/openspecs/exchange_server_protocols/ms-oxdscli/21fd2dd5-c4ee-485b-94fb-e7db5da93726
|
||||
|
||||
resp := autodiscoverResponse{}
|
||||
resp.XMLName.Local = "Autodiscover"
|
||||
resp.XMLName.Space = "http://schemas.microsoft.com/exchange/autodiscover/responseschema/2006"
|
||||
@ -259,8 +268,8 @@ func autodiscoverHandle(w http.ResponseWriter, r *http.Request) {
|
||||
Protocol: []autodiscoverProtocol{
|
||||
{
|
||||
Type: "IMAP",
|
||||
Server: hostname.ASCII,
|
||||
Port: imapPort,
|
||||
Server: config.IMAP.Host.ASCII,
|
||||
Port: config.IMAP.Port,
|
||||
LoginName: req.Request.EmailAddress,
|
||||
SSL: imapSSL,
|
||||
Encryption: imapEncryption,
|
||||
@ -269,11 +278,11 @@ func autodiscoverHandle(w http.ResponseWriter, r *http.Request) {
|
||||
},
|
||||
{
|
||||
Type: "SMTP",
|
||||
Server: hostname.ASCII,
|
||||
Port: smtpPort,
|
||||
Server: config.Submission.Host.ASCII,
|
||||
Port: config.Submission.Port,
|
||||
LoginName: req.Request.EmailAddress,
|
||||
SSL: smtpSSL,
|
||||
Encryption: smtpEncryption,
|
||||
SSL: submissionSSL,
|
||||
Encryption: submissionEncryption,
|
||||
SPA: "off", // Override default "on", this is Microsofts proprietary authentication protocol.
|
||||
AuthRequired: "on",
|
||||
},
|
||||
@ -282,9 +291,8 @@ func autodiscoverHandle(w http.ResponseWriter, r *http.Request) {
|
||||
enc := xml.NewEncoder(w)
|
||||
enc.Indent("", "\t")
|
||||
fmt.Fprint(w, xml.Header)
|
||||
if err := enc.Encode(resp); err != nil {
|
||||
log.Errorx("marshal autodiscover response", err)
|
||||
}
|
||||
err = enc.Encode(resp)
|
||||
log.Check(err, "marshal autodiscover xml response")
|
||||
}
|
||||
|
||||
// Thunderbird requests these URLs for autoconfig/autodiscover:
|
||||
@ -292,6 +300,22 @@ func autodiscoverHandle(w http.ResponseWriter, r *http.Request) {
|
||||
// https://autodiscover.example.org/autodiscover/autodiscover.xml
|
||||
// https://example.org/.well-known/autoconfig/mail/config-v1.1.xml?emailaddress=user%40example.org
|
||||
// https://example.org/autodiscover/autodiscover.xml
|
||||
type incomingServer struct {
|
||||
Type string `xml:"type,attr"`
|
||||
Hostname string `xml:"hostname"`
|
||||
Port int `xml:"port"`
|
||||
SocketType string `xml:"socketType"`
|
||||
Username string `xml:"username"`
|
||||
Authentication string `xml:"authentication"`
|
||||
}
|
||||
type outgoingServer struct {
|
||||
Type string `xml:"type,attr"`
|
||||
Hostname string `xml:"hostname"`
|
||||
Port int `xml:"port"`
|
||||
SocketType string `xml:"socketType"`
|
||||
Username string `xml:"username"`
|
||||
Authentication string `xml:"authentication"`
|
||||
}
|
||||
type autoconfigResponse struct {
|
||||
XMLName xml.Name `xml:"clientConfig"`
|
||||
Version string `xml:"version,attr"`
|
||||
@ -302,23 +326,8 @@ type autoconfigResponse struct {
|
||||
DisplayName string `xml:"displayName"`
|
||||
DisplayShortName string `xml:"displayShortName"`
|
||||
|
||||
IncomingServer struct {
|
||||
Type string `xml:"type,attr"`
|
||||
Hostname string `xml:"hostname"`
|
||||
Port int `xml:"port"`
|
||||
SocketType string `xml:"socketType"`
|
||||
Username string `xml:"username"`
|
||||
Authentication string `xml:"authentication"`
|
||||
} `xml:"incomingServer"`
|
||||
|
||||
OutgoingServer struct {
|
||||
Type string `xml:"type,attr"`
|
||||
Hostname string `xml:"hostname"`
|
||||
Port int `xml:"port"`
|
||||
SocketType string `xml:"socketType"`
|
||||
Username string `xml:"username"`
|
||||
Authentication string `xml:"authentication"`
|
||||
} `xml:"outgoingServer"`
|
||||
IncomingServers []incomingServer `xml:"incomingServer"`
|
||||
OutgoingServers []outgoingServer `xml:"outgoingServer"`
|
||||
} `xml:"emailProvider"`
|
||||
|
||||
ClientConfigUpdate struct {
|
||||
@ -360,3 +369,72 @@ type autodiscoverProtocol struct {
|
||||
SPA string
|
||||
AuthRequired string
|
||||
}
|
||||
|
||||
// Serve a .mobileconfig file. This endpoint is not a standard place where Apple
|
||||
// devices look. We point to it from the account page.
|
||||
func mobileconfigHandle(w http.ResponseWriter, r *http.Request) {
|
||||
log := pkglog.WithContext(r.Context())
|
||||
|
||||
if r.Method != "GET" {
|
||||
http.Error(w, "405 - method not allowed - get required", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
addresses := r.FormValue("addresses")
|
||||
fullName := r.FormValue("name")
|
||||
var buf []byte
|
||||
var err error
|
||||
if addresses == "" {
|
||||
err = fmt.Errorf("missing/empty field addresses")
|
||||
}
|
||||
l := strings.Split(addresses, ",")
|
||||
if err == nil {
|
||||
buf, err = MobileConfig(l, fullName)
|
||||
}
|
||||
if err != nil {
|
||||
http.Error(w, "400 - bad request - "+err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
h := w.Header()
|
||||
filename := l[0]
|
||||
filename = strings.ReplaceAll(filename, ".", "-")
|
||||
filename = strings.ReplaceAll(filename, "@", "-at-")
|
||||
filename = "email-account-" + filename + ".mobileconfig"
|
||||
h.Set("Content-Disposition", fmt.Sprintf(`attachment; filename="%s"`, filename))
|
||||
_, err = w.Write(buf)
|
||||
log.Check(err, "writing mobileconfig response")
|
||||
}
|
||||
|
||||
// Serve a png file with qrcode with the link to the .mobileconfig file, should be
|
||||
// helpful for mobile devices.
|
||||
func mobileconfigQRCodeHandle(w http.ResponseWriter, r *http.Request) {
|
||||
log := pkglog.WithContext(r.Context())
|
||||
|
||||
if r.Method != "GET" {
|
||||
http.Error(w, "405 - method not allowed - get required", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
if !strings.HasSuffix(r.URL.Path, ".qrcode.png") {
|
||||
http.NotFound(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
// Compose URL, scheme and host are not set.
|
||||
u := *r.URL
|
||||
if r.TLS == nil {
|
||||
u.Scheme = "http"
|
||||
} else {
|
||||
u.Scheme = "https"
|
||||
}
|
||||
u.Host = r.Host
|
||||
u.Path = strings.TrimSuffix(u.Path, ".qrcode.png")
|
||||
|
||||
code, err := qr.Encode(u.String(), qr.L)
|
||||
if err != nil {
|
||||
http.Error(w, "500 - internal server error - generating qr-code: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
h := w.Header()
|
||||
h.Set("Content-Type", "image/png")
|
||||
_, err = w.Write(code.PNG())
|
||||
log.Check(err, "writing mobileconfig qr code")
|
||||
}
|
||||
|
BIN
http/favicon.ico
Normal file
BIN
http/favicon.ico
Normal file
Binary file not shown.
After Width: | Height: | Size: 823 B |
429
http/gzcache.go
Normal file
429
http/gzcache.go
Normal file
@ -0,0 +1,429 @@
|
||||
package http
|
||||
|
||||
import (
|
||||
"compress/gzip"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/mjl-/mox/mlog"
|
||||
)
|
||||
|
||||
// todo: consider caching gzipped responses from forward handlers too. we would need to read the responses (handle up to perhaps 2mb), hash the data (blake2b seems fast), check if we have the gzip content for that hash, cache it on second request. keep around entries for non-yet-cached hashes, with some limit and lru eviction policy. we have to recognize some content-types as not applicable and do direct streaming compression, e.g. for text/event-stream. and we need to detect when backend server could be slowly sending out data and abort the caching attempt. downside is always that we need to read the whole response before and hash it before we can send our response. it is best if the backend just responds with gzip itself though. compression needs more cpu than hashing (at least 10x), but it's only worth it with enough hits.
|
||||
|
||||
// Cache for gzipped static files.
|
||||
var staticgzcache gzcache
|
||||
|
||||
type gzcache struct {
|
||||
dir string // Where all files are stored.
|
||||
|
||||
// Max total size of combined files in cache. When adding a new entry, the least
|
||||
// recently used entries are evicted to stay below this size.
|
||||
maxSize int64
|
||||
|
||||
sync.Mutex
|
||||
|
||||
// Total on-disk size of compressed data. Not larger than maxSize. We can
|
||||
// temporarily have more bytes in use because while/after evicting, a writer may
|
||||
// still have the old removed file open.
|
||||
size int64
|
||||
|
||||
// Indexed by effective path, based on handler.
|
||||
paths map[string]gzfile
|
||||
|
||||
// Only with files we completed compressing, kept ordered by atime. We evict from
|
||||
// oldest. On use, we take entries out and put them at newest.
|
||||
oldest, newest *pathUse
|
||||
}
|
||||
|
||||
type gzfile struct {
|
||||
// Whether compressing in progress. If a new request comes in while we are already
|
||||
// compressing, for simplicity of code we just compress again for that client.
|
||||
compressing bool
|
||||
|
||||
mtime int64 // If mtime changes, we remove entry from cache.
|
||||
atime int64 // For LRU.
|
||||
gzsize int64 // Compressed size, used in Content-Length header.
|
||||
use *pathUse // Only set after compressing finished.
|
||||
}
|
||||
|
||||
type pathUse struct {
|
||||
prev, next *pathUse // Double-linked list.
|
||||
path string
|
||||
}
|
||||
|
||||
// Initialize staticgzcache from on-disk directory.
|
||||
// The path and mtime are in the filename, the atime is in the file itself.
|
||||
func loadStaticGzipCache(dir string, maxSize int64) {
|
||||
staticgzcache = gzcache{
|
||||
dir: dir,
|
||||
maxSize: maxSize,
|
||||
paths: map[string]gzfile{},
|
||||
}
|
||||
|
||||
// todo future: should we split cached files in sub directories, so we don't end up with one huge directory?
|
||||
os.MkdirAll(dir, 0700)
|
||||
entries, err := os.ReadDir(dir)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
pkglog.Errorx("listing static gzip cache files", err, slog.String("dir", dir))
|
||||
}
|
||||
for _, e := range entries {
|
||||
name := e.Name()
|
||||
var err error
|
||||
if !strings.HasSuffix(name, ".gz") {
|
||||
err = errors.New("missing .gz suffix")
|
||||
}
|
||||
var path, xpath, mtimestr string
|
||||
if err == nil {
|
||||
var ok bool
|
||||
xpath, mtimestr, ok = strings.Cut(strings.TrimRight(name, ".gz"), "+")
|
||||
if !ok {
|
||||
err = fmt.Errorf("missing + in filename")
|
||||
}
|
||||
}
|
||||
if err == nil {
|
||||
var pathbuf []byte
|
||||
pathbuf, err = base64.RawURLEncoding.DecodeString(xpath)
|
||||
if err == nil {
|
||||
path = string(pathbuf)
|
||||
}
|
||||
}
|
||||
var mtime int64
|
||||
if err == nil {
|
||||
mtime, err = strconv.ParseInt(mtimestr, 16, 64)
|
||||
}
|
||||
var fi fs.FileInfo
|
||||
if err == nil {
|
||||
fi, err = e.Info()
|
||||
}
|
||||
var atime int64
|
||||
if err == nil {
|
||||
atime, err = statAtime(fi.Sys())
|
||||
}
|
||||
if err != nil {
|
||||
pkglog.Infox("removing unusable/unrecognized file in static gzip cache dir", err)
|
||||
xerr := os.Remove(filepath.Join(dir, name))
|
||||
pkglog.Check(xerr, "removing unusable file in static gzip cache dir",
|
||||
slog.Any("error", err),
|
||||
slog.String("dir", dir),
|
||||
slog.String("filename", name))
|
||||
continue
|
||||
}
|
||||
staticgzcache.paths[path] = gzfile{
|
||||
mtime: mtime,
|
||||
atime: atime,
|
||||
gzsize: fi.Size(),
|
||||
use: &pathUse{path: path},
|
||||
}
|
||||
staticgzcache.size += fi.Size()
|
||||
}
|
||||
|
||||
pathatimes := make([]struct {
|
||||
path string
|
||||
atime int64
|
||||
}, len(staticgzcache.paths))
|
||||
i := 0
|
||||
for k, gf := range staticgzcache.paths {
|
||||
pathatimes[i].path = k
|
||||
pathatimes[i].atime = gf.atime
|
||||
i++
|
||||
}
|
||||
sort.Slice(pathatimes, func(i, j int) bool {
|
||||
return pathatimes[i].atime < pathatimes[j].atime
|
||||
})
|
||||
for _, pa := range pathatimes {
|
||||
staticgzcache.push(staticgzcache.paths[pa.path].use)
|
||||
}
|
||||
|
||||
// Ensure cache size is OK for current config.
|
||||
staticgzcache.evictFor(0)
|
||||
}
|
||||
|
||||
// Evict entries so size bytes are available.
|
||||
// Must be called with lock held.
|
||||
func (c *gzcache) evictFor(size int64) {
|
||||
for c.size+size > c.maxSize && c.oldest != nil {
|
||||
c.evictPath(c.oldest.path)
|
||||
}
|
||||
}
|
||||
|
||||
// remove path from cache.
|
||||
// Must be called with lock held.
|
||||
func (c *gzcache) evictPath(path string) {
|
||||
gf := c.paths[path]
|
||||
|
||||
delete(c.paths, path)
|
||||
c.unlink(gf.use)
|
||||
c.size -= gf.gzsize
|
||||
err := os.Remove(staticCachePath(c.dir, path, gf.mtime))
|
||||
pkglog.Check(err, "removing cached gzipped static file", slog.String("path", path))
|
||||
}
|
||||
|
||||
// Open cached file for path, requiring it has mtime. If there is no usable cached
|
||||
// file, a nil file is returned and the caller should compress and add to the cache
|
||||
// with startPath and finishPath. No usable cached file means the path isn't in the
|
||||
// cache, or its mtime is different, or there is an entry but it is new and being
|
||||
// compressed at the moment. If a usable cached file was found, it is opened and
|
||||
// returned, along with its compressed/on-disk size.
|
||||
func (c *gzcache) openPath(path string, mtime int64) (*os.File, int64) {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
gf, ok := c.paths[path]
|
||||
if !ok || gf.compressing {
|
||||
return nil, 0
|
||||
}
|
||||
if gf.mtime != mtime {
|
||||
// File has changed, remove old entry. Caller will add to cache again.
|
||||
c.evictPath(path)
|
||||
return nil, 0
|
||||
}
|
||||
|
||||
p := staticCachePath(c.dir, path, gf.mtime)
|
||||
f, err := os.Open(p)
|
||||
if err != nil {
|
||||
pkglog.Errorx("open static cached gzip file, removing from cache", err, slog.String("path", path))
|
||||
// Perhaps someone removed the file? Remove from cache, it will be recreated.
|
||||
c.evictPath(path)
|
||||
return nil, 0
|
||||
}
|
||||
|
||||
gf.atime = time.Now().UnixNano()
|
||||
c.unlink(gf.use)
|
||||
c.push(gf.use)
|
||||
c.paths[path] = gf
|
||||
|
||||
return f, gf.gzsize
|
||||
}
|
||||
|
||||
// startPath attempts to add an entry to the cache for a new cached compressed
|
||||
// file. If there is already an entry but it isn't done compressing yet, false is
|
||||
// returned and the caller can still compress and respond but the entry cannot be
|
||||
// added to the cache. If the entry is being added, the caller must call finishPath
|
||||
// or abortPath.
|
||||
func (c *gzcache) startPath(path string, mtime int64) bool {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
if _, ok := c.paths[path]; ok {
|
||||
return false
|
||||
}
|
||||
// note: no "use" yet, we only set that when we finish, so we don't have to clean up on abort.
|
||||
c.paths[path] = gzfile{compressing: true, mtime: mtime}
|
||||
return true
|
||||
}
|
||||
|
||||
// finishPath completes adding an entry to the cache, marking the entry as
|
||||
// compressed, accounting for its size, and marking its atime.
|
||||
func (c *gzcache) finishPath(path string, gzsize int64) {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
c.evictFor(gzsize)
|
||||
|
||||
gf := c.paths[path]
|
||||
gf.compressing = false
|
||||
gf.gzsize = gzsize
|
||||
gf.atime = time.Now().UnixNano()
|
||||
gf.use = &pathUse{path: path}
|
||||
c.paths[path] = gf
|
||||
c.size += gzsize
|
||||
c.push(gf.use)
|
||||
}
|
||||
|
||||
// abortPath marks an entry as no longer being added to the cache.
|
||||
func (c *gzcache) abortPath(path string) {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
delete(c.paths, path)
|
||||
// note: gzfile.use isn't set yet.
|
||||
}
|
||||
|
||||
// push inserts the "pathUse" to the head of the LRU doubly-linked list, unlinking
|
||||
// it first if needed.
|
||||
func (c *gzcache) push(u *pathUse) {
|
||||
c.unlink(u)
|
||||
u.prev = c.newest
|
||||
if c.newest != nil {
|
||||
c.newest.next = u
|
||||
}
|
||||
if c.oldest == nil {
|
||||
c.oldest = u
|
||||
}
|
||||
c.newest = u
|
||||
}
|
||||
|
||||
// unlink removes the "pathUse" from the LRU doubly-linked list.
|
||||
func (c *gzcache) unlink(u *pathUse) {
|
||||
if c.oldest == u {
|
||||
c.oldest = u.next
|
||||
}
|
||||
if c.newest == u {
|
||||
c.newest = u.prev
|
||||
}
|
||||
if u.prev != nil {
|
||||
u.prev.next = u.next
|
||||
}
|
||||
if u.next != nil {
|
||||
u.next.prev = u.prev
|
||||
}
|
||||
u.prev = nil
|
||||
u.next = nil
|
||||
}
|
||||
|
||||
// Return path to the on-disk gzipped cached file.
|
||||
func staticCachePath(dir, path string, mtime int64) string {
|
||||
p := base64.RawURLEncoding.EncodeToString([]byte(path))
|
||||
return filepath.Join(dir, fmt.Sprintf("%s+%x.gz", p, mtime))
|
||||
}
|
||||
|
||||
// staticgzcacheReplacer intercepts responses for cacheable static files,
|
||||
// responding with the cached content if appropriate and failing further writes so
|
||||
// the regular response writer stops.
|
||||
type staticgzcacheReplacer struct {
|
||||
w http.ResponseWriter
|
||||
r *http.Request // For its context, or logging.
|
||||
uncomprPath string
|
||||
uncomprFile *os.File
|
||||
uncomprMtime time.Time
|
||||
uncomprSize int64
|
||||
|
||||
statusCode int
|
||||
|
||||
// Set during WriteHeader to indicate a compressed file has been written, further
|
||||
// Writes result in an error to stop the writer of the uncompressed content.
|
||||
handled bool
|
||||
}
|
||||
|
||||
func (w *staticgzcacheReplacer) logger() mlog.Log {
|
||||
return pkglog.WithContext(w.r.Context())
|
||||
}
|
||||
|
||||
// Header returns the header of the underlying ResponseWriter.
|
||||
func (w *staticgzcacheReplacer) Header() http.Header {
|
||||
return w.w.Header()
|
||||
}
|
||||
|
||||
// WriteHeader checks whether the response is eligible for compressing. If not,
|
||||
// WriteHeader on the underlying ResponseWriter is called. If so, headers for gzip
|
||||
// content are set and the gzip content is written, either from disk or compressed
|
||||
// and stored in the cache.
|
||||
func (w *staticgzcacheReplacer) WriteHeader(statusCode int) {
|
||||
if w.statusCode != 0 {
|
||||
return
|
||||
}
|
||||
w.statusCode = statusCode
|
||||
if statusCode != http.StatusOK {
|
||||
w.w.WriteHeader(statusCode)
|
||||
return
|
||||
}
|
||||
|
||||
gzf, gzsize := staticgzcache.openPath(w.uncomprPath, w.uncomprMtime.UnixNano())
|
||||
if gzf == nil {
|
||||
// Not in cache, or work in progress.
|
||||
started := staticgzcache.startPath(w.uncomprPath, w.uncomprMtime.UnixNano())
|
||||
if !started {
|
||||
// Another request is already compressing and storing this file.
|
||||
// todo: we should just wait for the other compression to finish, then use its result.
|
||||
w.w.(*loggingWriter).UncompressedSize = w.uncomprSize
|
||||
h := w.w.Header()
|
||||
h.Set("Content-Encoding", "gzip")
|
||||
h.Del("Content-Length") // We don't know this, we compress streamingly.
|
||||
gzw, _ := gzip.NewWriterLevel(w.w, gzip.BestSpeed)
|
||||
_, err := io.Copy(gzw, w.uncomprFile)
|
||||
if err == nil {
|
||||
err = gzw.Close()
|
||||
}
|
||||
w.handled = true
|
||||
if err != nil {
|
||||
w.w.(*loggingWriter).error(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Compress and write to cache.
|
||||
p := staticCachePath(staticgzcache.dir, w.uncomprPath, w.uncomprMtime.UnixNano())
|
||||
ngzf, err := os.OpenFile(p, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0600)
|
||||
if err != nil {
|
||||
w.logger().Errorx("create new static gzip cache file", err, slog.String("requestpath", w.uncomprPath), slog.String("fspath", p))
|
||||
staticgzcache.abortPath(w.uncomprPath)
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
if ngzf != nil {
|
||||
staticgzcache.abortPath(w.uncomprPath)
|
||||
err := ngzf.Close()
|
||||
w.logger().Check(err, "closing failed static gzip cache file", slog.String("requestpath", w.uncomprPath), slog.String("fspath", p))
|
||||
err = os.Remove(p)
|
||||
w.logger().Check(err, "removing failed static gzip cache file", slog.String("requestpath", w.uncomprPath), slog.String("fspath", p))
|
||||
}
|
||||
}()
|
||||
|
||||
gzw := gzip.NewWriter(ngzf)
|
||||
_, err = io.Copy(gzw, w.uncomprFile)
|
||||
if err == nil {
|
||||
err = gzw.Close()
|
||||
}
|
||||
if err == nil {
|
||||
err = ngzf.Sync()
|
||||
}
|
||||
if err == nil {
|
||||
gzsize, err = ngzf.Seek(0, 1)
|
||||
}
|
||||
if err == nil {
|
||||
_, err = ngzf.Seek(0, 0)
|
||||
}
|
||||
if err != nil {
|
||||
w.w.(*loggingWriter).error(err)
|
||||
return
|
||||
}
|
||||
staticgzcache.finishPath(w.uncomprPath, gzsize)
|
||||
gzf = ngzf
|
||||
ngzf = nil
|
||||
}
|
||||
defer func() {
|
||||
if gzf != nil {
|
||||
err := gzf.Close()
|
||||
if err != nil {
|
||||
w.logger().Errorx("closing static gzip cache file", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Signal to Write that we aleady (attempted to) write the responses.
|
||||
w.handled = true
|
||||
|
||||
w.w.(*loggingWriter).UncompressedSize = w.uncomprSize
|
||||
h := w.w.Header()
|
||||
h.Set("Content-Encoding", "gzip")
|
||||
h.Set("Content-Length", fmt.Sprintf("%d", gzsize))
|
||||
w.w.WriteHeader(statusCode)
|
||||
if _, err := io.Copy(w.w, gzf); err != nil {
|
||||
w.w.(*loggingWriter).error(err)
|
||||
}
|
||||
}
|
||||
|
||||
var errHandledCompressed = errors.New("response written with compression")
|
||||
|
||||
func (w *staticgzcacheReplacer) Write(buf []byte) (int, error) {
|
||||
if w.statusCode == 0 {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
if w.handled {
|
||||
// For 200 OK, we already wrote the response and just want the caller to stop processing.
|
||||
return 0, errHandledCompressed
|
||||
}
|
||||
return w.w.Write(buf)
|
||||
}
|
17
http/main_test.go
Normal file
17
http/main_test.go
Normal file
@ -0,0 +1,17 @@
|
||||
package http
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/mjl-/mox/metrics"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
m.Run()
|
||||
if metrics.Panics.Load() > 0 {
|
||||
fmt.Println("unhandled panics encountered")
|
||||
os.Exit(2)
|
||||
}
|
||||
}
|
205
http/mobileconfig.go
Normal file
205
http/mobileconfig.go
Normal file
@ -0,0 +1,205 @@
|
||||
package http
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/hmac"
|
||||
"crypto/sha256"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"maps"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/mjl-/mox/admin"
|
||||
"github.com/mjl-/mox/smtp"
|
||||
)
|
||||
|
||||
// Apple software isn't good at autoconfig/autodiscovery, but it can import a
|
||||
// device management profile containing account settings.
|
||||
//
|
||||
// See https://developer.apple.com/documentation/devicemanagement/mail.
|
||||
type deviceManagementProfile struct {
|
||||
XMLName xml.Name `xml:"plist"`
|
||||
Version string `xml:"version,attr"`
|
||||
Dict dict `xml:"dict"`
|
||||
}
|
||||
|
||||
type array []dict
|
||||
|
||||
type dict map[string]any
|
||||
|
||||
// MarshalXML marshals as <dict> with multiple pairs of <key> and a value of various types.
|
||||
func (m dict) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
// The plist format isn't that easy to generate with Go's xml package, it's leaving
|
||||
// out reasonable structure, instead just concatenating key/value pairs. Perhaps
|
||||
// there is a better way?
|
||||
|
||||
if err := e.EncodeToken(xml.StartElement{Name: xml.Name{Local: "dict"}}); err != nil {
|
||||
return err
|
||||
}
|
||||
l := slices.Sorted(maps.Keys(m))
|
||||
for _, k := range l {
|
||||
tokens := []xml.Token{
|
||||
xml.StartElement{Name: xml.Name{Local: "key"}},
|
||||
xml.CharData([]byte(k)),
|
||||
xml.EndElement{Name: xml.Name{Local: "key"}},
|
||||
}
|
||||
for _, t := range tokens {
|
||||
if err := e.EncodeToken(t); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
tokens = nil
|
||||
|
||||
switch v := m[k].(type) {
|
||||
case string:
|
||||
tokens = []xml.Token{
|
||||
xml.StartElement{Name: xml.Name{Local: "string"}},
|
||||
xml.CharData([]byte(v)),
|
||||
xml.EndElement{Name: xml.Name{Local: "string"}},
|
||||
}
|
||||
case int:
|
||||
tokens = []xml.Token{
|
||||
xml.StartElement{Name: xml.Name{Local: "integer"}},
|
||||
xml.CharData(fmt.Appendf(nil, "%d", v)),
|
||||
xml.EndElement{Name: xml.Name{Local: "integer"}},
|
||||
}
|
||||
case bool:
|
||||
tag := "false"
|
||||
if v {
|
||||
tag = "true"
|
||||
}
|
||||
tokens = []xml.Token{
|
||||
xml.StartElement{Name: xml.Name{Local: tag}},
|
||||
xml.EndElement{Name: xml.Name{Local: tag}},
|
||||
}
|
||||
case array:
|
||||
if err := e.EncodeToken(xml.StartElement{Name: xml.Name{Local: "array"}}); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, d := range v {
|
||||
if err := d.MarshalXML(e, xml.StartElement{Name: xml.Name{Local: "array"}}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := e.EncodeToken(xml.EndElement{Name: xml.Name{Local: "array"}}); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("unexpected dict value of type %T", v)
|
||||
}
|
||||
for _, t := range tokens {
|
||||
if err := e.EncodeToken(t); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
if err := e.EncodeToken(xml.EndElement{Name: xml.Name{Local: "dict"}}); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MobileConfig returns a device profile for a macOS Mail email account. The file
|
||||
// should have a .mobileconfig extension. Opening the file adds it to Profiles in
|
||||
// System Preferences, where it can be installed. This profile does not contain a
|
||||
// password because sending opaque files containing passwords around to users seems
|
||||
// like bad security practice.
|
||||
//
|
||||
// Multiple addresses can be passed, the first is used for IMAP/submission login,
|
||||
// and likely seen as primary account by Apple software.
|
||||
//
|
||||
// The config is not signed, so users must ignore warnings about unsigned profiles.
|
||||
func MobileConfig(addresses []string, fullName string) ([]byte, error) {
|
||||
if len(addresses) == 0 {
|
||||
return nil, fmt.Errorf("need at least 1 address")
|
||||
}
|
||||
addr, err := smtp.ParseAddress(addresses[0])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parsing address: %v", err)
|
||||
}
|
||||
|
||||
config, err := admin.ClientConfigDomain(addr.Domain)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getting config for domain: %v", err)
|
||||
}
|
||||
|
||||
// Apple software wants identifiers...
|
||||
t := strings.Split(addr.Domain.Name(), ".")
|
||||
slices.Reverse(t)
|
||||
reverseAddr := strings.Join(t, ".") + "." + addr.Localpart.String()
|
||||
|
||||
// Apple software wants UUIDs... We generate them deterministically based on address
|
||||
// and our code (through key, which we must change if code changes).
|
||||
const key = "mox0"
|
||||
uuid := func(prefix string) string {
|
||||
mac := hmac.New(sha256.New, []byte(key))
|
||||
mac.Write([]byte(prefix + "\n" + "\n" + strings.Join(addresses, ",")))
|
||||
sum := mac.Sum(nil)
|
||||
uuid := fmt.Sprintf("%x-%x-%x-%x-%x", sum[0:4], sum[4:6], sum[6:8], sum[8:10], sum[10:16])
|
||||
return uuid
|
||||
}
|
||||
|
||||
uuidConfig := uuid("config")
|
||||
uuidAccount := uuid("account")
|
||||
|
||||
// The "UseSSL" fields are underspecified in Apple's format. They say "If true,
|
||||
// enables SSL for authentication on the incoming mail server.". I'm assuming they
|
||||
// want to know if they should start immediately with a handshake, instead of
|
||||
// starting out plain. There is no way to require STARTTLS though. You could even
|
||||
// interpret their wording as this field enable authentication through client-side
|
||||
// TLS certificates, given their "on the incoming mail server", instead of "of the
|
||||
// incoming mail server".
|
||||
|
||||
var w bytes.Buffer
|
||||
p := deviceManagementProfile{
|
||||
Version: "1.0",
|
||||
Dict: dict(map[string]any{
|
||||
"PayloadDisplayName": fmt.Sprintf("%s email account", addresses[0]),
|
||||
"PayloadIdentifier": reverseAddr + ".email",
|
||||
"PayloadType": "Configuration",
|
||||
"PayloadUUID": uuidConfig,
|
||||
"PayloadVersion": 1,
|
||||
"PayloadContent": array{
|
||||
dict(map[string]any{
|
||||
"EmailAccountDescription": addresses[0],
|
||||
"EmailAccountName": fullName,
|
||||
"EmailAccountType": "EmailTypeIMAP",
|
||||
// Comma-separated multiple addresses are not documented at Apple, but seem to
|
||||
// work.
|
||||
"EmailAddress": strings.Join(addresses, ","),
|
||||
"IncomingMailServerAuthentication": "EmailAuthCRAMMD5", // SCRAM not an option at time of writing..
|
||||
"IncomingMailServerUsername": addresses[0],
|
||||
"IncomingMailServerHostName": config.IMAP.Host.ASCII,
|
||||
"IncomingMailServerPortNumber": config.IMAP.Port,
|
||||
"IncomingMailServerUseSSL": config.IMAP.TLSMode == admin.TLSModeImmediate,
|
||||
"OutgoingMailServerAuthentication": "EmailAuthCRAMMD5", // SCRAM not an option at time of writing...
|
||||
"OutgoingMailServerHostName": config.Submission.Host.ASCII,
|
||||
"OutgoingMailServerPortNumber": config.Submission.Port,
|
||||
"OutgoingMailServerUsername": addresses[0],
|
||||
"OutgoingMailServerUseSSL": config.Submission.TLSMode == admin.TLSModeImmediate,
|
||||
"OutgoingPasswordSameAsIncomingPassword": true,
|
||||
"PayloadIdentifier": reverseAddr + ".email.account",
|
||||
"PayloadType": "com.apple.mail.managed",
|
||||
"PayloadUUID": uuidAccount,
|
||||
"PayloadVersion": 1,
|
||||
}),
|
||||
},
|
||||
}),
|
||||
}
|
||||
if _, err := fmt.Fprint(&w, xml.Header); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, err := fmt.Fprint(&w, "<!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
enc := xml.NewEncoder(&w)
|
||||
enc.Indent("", "\t")
|
||||
if err := enc.Encode(p); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, err := fmt.Fprintln(&w); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return w.Bytes(), nil
|
||||
}
|
@ -1,6 +1,7 @@
|
||||
package http
|
||||
|
||||
import (
|
||||
"log/slog"
|
||||
"net"
|
||||
"net/http"
|
||||
"strings"
|
||||
@ -13,8 +14,8 @@ import (
|
||||
)
|
||||
|
||||
func mtastsPolicyHandle(w http.ResponseWriter, r *http.Request) {
|
||||
log := func() *mlog.Log {
|
||||
return xlog.WithContext(r.Context())
|
||||
log := func() mlog.Log {
|
||||
return pkglog.WithContext(r.Context())
|
||||
}
|
||||
|
||||
host := strings.ToLower(r.Host)
|
||||
@ -30,7 +31,7 @@ func mtastsPolicyHandle(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
domain, err := dns.ParseDomain(host)
|
||||
if err != nil {
|
||||
log().Errorx("mtasts policy request: bad domain", err, mlog.Field("host", host))
|
||||
log().Errorx("mtasts policy request: bad domain", err, slog.String("host", host))
|
||||
http.NotFound(w, r)
|
||||
return
|
||||
}
|
||||
@ -42,16 +43,16 @@ func mtastsPolicyHandle(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
var mxs []mtasts.STSMX
|
||||
var mxs []mtasts.MX
|
||||
for _, s := range sts.MX {
|
||||
var mx mtasts.STSMX
|
||||
var mx mtasts.MX
|
||||
if strings.HasPrefix(s, "*.") {
|
||||
mx.Wildcard = true
|
||||
s = s[2:]
|
||||
}
|
||||
d, err := dns.ParseDomain(s)
|
||||
if err != nil {
|
||||
log().Errorx("bad domain in mtasts config", err, mlog.Field("domain", s))
|
||||
log().Errorx("bad domain in mtasts config", err, slog.String("domain", s))
|
||||
http.Error(w, "500 - internal server error - invalid domain in configuration", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
@ -59,7 +60,7 @@ func mtastsPolicyHandle(w http.ResponseWriter, r *http.Request) {
|
||||
mxs = append(mxs, mx)
|
||||
}
|
||||
if len(mxs) == 0 {
|
||||
mxs = []mtasts.STSMX{{Domain: mox.Conf.Static.HostnameDomain}}
|
||||
mxs = []mtasts.MX{{Domain: mox.Conf.Static.HostnameDomain}}
|
||||
}
|
||||
|
||||
policy := mtasts.Policy{
|
||||
|
1050
http/web.go
1050
http/web.go
File diff suppressed because it is too large
Load Diff
@ -6,33 +6,19 @@ import (
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/mjl-/mox/dns"
|
||||
"github.com/mjl-/mox/mox-"
|
||||
)
|
||||
|
||||
func TestServeHTTP(t *testing.T) {
|
||||
os.RemoveAll("../testdata/web/data")
|
||||
mox.ConfigStaticPath = "../testdata/web/mox.conf"
|
||||
mox.ConfigStaticPath = filepath.FromSlash("../testdata/web/mox.conf")
|
||||
mox.ConfigDynamicPath = filepath.Join(filepath.Dir(mox.ConfigStaticPath), "domains.conf")
|
||||
mox.MustLoadConfig(false)
|
||||
mox.MustLoadConfig(true, false)
|
||||
|
||||
srv := &serve{
|
||||
PathHandlers: []pathHandler{
|
||||
{
|
||||
HostMatch: func(dom dns.Domain) bool {
|
||||
return strings.HasPrefix(dom.ASCII, "mta-sts.")
|
||||
},
|
||||
Path: "/.well-known/mta-sts.txt",
|
||||
Handle: func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Write([]byte("mta-sts!"))
|
||||
},
|
||||
},
|
||||
},
|
||||
Webserver: true,
|
||||
}
|
||||
portSrvs := portServes("local", mox.Conf.Static.Listeners["local"])
|
||||
srv := portSrvs[80]
|
||||
|
||||
test := func(method, target string, expCode int, expContent string, expHeaders map[string]string) {
|
||||
t.Helper()
|
||||
@ -43,22 +29,22 @@ func TestServeHTTP(t *testing.T) {
|
||||
srv.ServeHTTP(rw, req)
|
||||
resp := rw.Result()
|
||||
if resp.StatusCode != expCode {
|
||||
t.Fatalf("got statuscode %d, expected %d", resp.StatusCode, expCode)
|
||||
t.Errorf("got statuscode %d, expected %d", resp.StatusCode, expCode)
|
||||
}
|
||||
if expContent != "" {
|
||||
s := rw.Body.String()
|
||||
if s != expContent {
|
||||
t.Fatalf("got response data %q, expected %q", s, expContent)
|
||||
t.Errorf("got response data %q, expected %q", s, expContent)
|
||||
}
|
||||
}
|
||||
for k, v := range expHeaders {
|
||||
if xv := resp.Header.Get(k); xv != v {
|
||||
t.Fatalf("got %q for header %q, expected %q", xv, k, v)
|
||||
t.Errorf("got %q for header %q, expected %q", xv, k, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
test("GET", "http://mta-sts.mox.example/.well-known/mta-sts.txt", http.StatusOK, "mta-sts!", nil)
|
||||
test("GET", "http://mta-sts.mox.example/.well-known/mta-sts.txt", http.StatusOK, "version: STSv1\nmode: enforce\nmax_age: 86400\nmx: mox.example\n", nil)
|
||||
test("GET", "http://mox.example/.well-known/mta-sts.txt", http.StatusNotFound, "", nil) // mta-sts endpoint not in this domain.
|
||||
test("GET", "http://mta-sts.mox.example/static/", http.StatusNotFound, "", nil) // static not served on this domain.
|
||||
test("GET", "http://mta-sts.mox.example/other", http.StatusNotFound, "", nil)
|
||||
@ -66,4 +52,24 @@ func TestServeHTTP(t *testing.T) {
|
||||
test("GET", "http://mox.example/static/index.html", http.StatusOK, "html\n", map[string]string{"X-Test": "mox"})
|
||||
test("GET", "http://mox.example/static/dir/", http.StatusOK, "", map[string]string{"X-Test": "mox"}) // Dir listing.
|
||||
test("GET", "http://mox.example/other", http.StatusNotFound, "", nil)
|
||||
|
||||
// Webmail on IP, localhost, mail host, clientsettingsdomain, not others.
|
||||
test("GET", "http://127.0.0.1/webmail/", http.StatusOK, "", nil)
|
||||
test("GET", "http://localhost/webmail/", http.StatusOK, "", nil)
|
||||
test("GET", "http://mox.example/webmail/", http.StatusOK, "", nil)
|
||||
test("GET", "http://mail.mox.example/webmail/", http.StatusOK, "", nil)
|
||||
test("GET", "http://mail.other.example/webmail/", http.StatusNotFound, "", nil)
|
||||
test("GET", "http://remotehost/webmail/", http.StatusNotFound, "", nil)
|
||||
|
||||
// admin on IP, localhost, mail host, not clientsettingsdomain.
|
||||
test("GET", "http://127.0.0.1/admin/", http.StatusOK, "", nil)
|
||||
test("GET", "http://localhost/admin/", http.StatusOK, "", nil)
|
||||
test("GET", "http://mox.example/admin/", http.StatusPermanentRedirect, "", nil) // Override by WebHandler.
|
||||
test("GET", "http://mail.mox.example/admin/", http.StatusNotFound, "", nil)
|
||||
|
||||
// account is off.
|
||||
test("GET", "http://127.0.0.1/", http.StatusNotFound, "", nil)
|
||||
test("GET", "http://localhost/", http.StatusNotFound, "", nil)
|
||||
test("GET", "http://mox.example/", http.StatusNotFound, "", nil)
|
||||
test("GET", "http://mail.mox.example/", http.StatusNotFound, "", nil)
|
||||
}
|
||||
|
@ -1,35 +1,57 @@
|
||||
package http
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha1"
|
||||
"crypto/tls"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
htmltemplate "html/template"
|
||||
"io"
|
||||
"io/fs"
|
||||
golog "log"
|
||||
"log/slog"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"net/textproto"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/mjl-/mox/config"
|
||||
"github.com/mjl-/mox/dns"
|
||||
"github.com/mjl-/mox/mlog"
|
||||
"github.com/mjl-/mox/mox-"
|
||||
"github.com/mjl-/mox/moxio"
|
||||
)
|
||||
|
||||
func recvid(r *http.Request) string {
|
||||
cid := mox.CidFromCtx(r.Context())
|
||||
if cid <= 0 {
|
||||
return ""
|
||||
}
|
||||
return " (id " + mox.ReceivedID(cid) + ")"
|
||||
}
|
||||
|
||||
// WebHandle serves an HTTP request by going through the list of WebHandlers,
|
||||
// check if there is a domain+path match, and running the handler if so.
|
||||
// WebHandle runs after the built-in handlers for mta-sts, autoconfig, etc.
|
||||
// If no handler matched, false is returned.
|
||||
// WebHandle sets w.Name to that of the matching handler.
|
||||
func WebHandle(w *loggingWriter, r *http.Request, host dns.Domain) (handled bool) {
|
||||
redirects, handlers := mox.Conf.WebServer()
|
||||
func WebHandle(w *loggingWriter, r *http.Request, host dns.IPDomain) (handled bool) {
|
||||
conf := mox.Conf.DynamicConfig()
|
||||
redirects := conf.WebDNSDomainRedirects
|
||||
handlers := conf.WebHandlers
|
||||
|
||||
for from, to := range redirects {
|
||||
if host != from {
|
||||
if host.Domain != from {
|
||||
continue
|
||||
}
|
||||
u := r.URL
|
||||
@ -41,7 +63,7 @@ func WebHandle(w *loggingWriter, r *http.Request, host dns.Domain) (handled bool
|
||||
}
|
||||
|
||||
for _, h := range handlers {
|
||||
if host != h.DNSDomain {
|
||||
if host.Domain != h.DNSDomain {
|
||||
continue
|
||||
}
|
||||
loc := h.Path.FindStringIndex(r.URL.Path)
|
||||
@ -57,11 +79,14 @@ func WebHandle(w *loggingWriter, r *http.Request, host dns.Domain) (handled bool
|
||||
u.Scheme = "https"
|
||||
u.Host = h.DNSDomain.Name()
|
||||
w.Handler = h.Name
|
||||
w.Compress = h.Compress
|
||||
http.Redirect(w, r, u.String(), http.StatusPermanentRedirect)
|
||||
return true
|
||||
}
|
||||
|
||||
if h.WebStatic != nil && HandleStatic(h.WebStatic, w, r) {
|
||||
// We don't want the loggingWriter to override the static handler's decisions to compress.
|
||||
w.Compress = h.Compress
|
||||
if h.WebStatic != nil && HandleStatic(h.WebStatic, h.Compress, w, r) {
|
||||
w.Handler = h.Name
|
||||
return true
|
||||
}
|
||||
@ -73,7 +98,12 @@ func WebHandle(w *loggingWriter, r *http.Request, host dns.Domain) (handled bool
|
||||
w.Handler = h.Name
|
||||
return true
|
||||
}
|
||||
if h.WebInternal != nil && HandleInternal(h.WebInternal, w, r) {
|
||||
w.Handler = h.Name
|
||||
return true
|
||||
}
|
||||
}
|
||||
w.Compress = false
|
||||
return false
|
||||
}
|
||||
|
||||
@ -124,18 +154,10 @@ table > tbody > tr:nth-child(odd) { background-color: #f8f8f8; }
|
||||
// slash is written. If a directory is requested and an index.html exists, that
|
||||
// file is returned. Otherwise, for directories with ListFiles configured, a
|
||||
// directory listing is returned.
|
||||
func HandleStatic(h *config.WebStatic, w http.ResponseWriter, r *http.Request) (handled bool) {
|
||||
log := func() *mlog.Log {
|
||||
return xlog.WithContext(r.Context())
|
||||
func HandleStatic(h *config.WebStatic, compress bool, w http.ResponseWriter, r *http.Request) (handled bool) {
|
||||
log := func() mlog.Log {
|
||||
return pkglog.WithContext(r.Context())
|
||||
}
|
||||
recvid := func() string {
|
||||
cid := mox.CidFromCtx(r.Context())
|
||||
if cid <= 0 {
|
||||
return ""
|
||||
}
|
||||
return " (id " + mox.ReceivedID(cid) + ")"
|
||||
}
|
||||
|
||||
if r.Method != "GET" && r.Method != "HEAD" {
|
||||
if h.ContinueNotFound {
|
||||
// Give another handler that is presumbly configured, for the same path, a chance.
|
||||
@ -160,61 +182,90 @@ func HandleStatic(h *config.WebStatic, w http.ResponseWriter, r *http.Request) (
|
||||
} else {
|
||||
fspath = filepath.Join(h.Root, r.URL.Path)
|
||||
}
|
||||
// fspath will not have a trailing slash anymore, we'll correct for it
|
||||
// later when the path turns out to be file instead of a directory.
|
||||
|
||||
serveFile := func(name string, mtime time.Time, content *os.File) {
|
||||
serveFile := func(name string, fi fs.FileInfo, content *os.File) {
|
||||
// ServeContent only sets a content-type if not already present in the response headers.
|
||||
hdr := w.Header()
|
||||
for k, v := range h.ResponseHeaders {
|
||||
hdr.Add(k, v)
|
||||
}
|
||||
http.ServeContent(w, r, name, mtime, content)
|
||||
// We transparently compress here, but still use ServeContent, because it handles
|
||||
// conditional requests, range requests. It's a bit of a hack, but on first write
|
||||
// to staticgzcacheReplacer where we are compressing, we write the full compressed
|
||||
// file instead, and return an error to ServeContent so it stops. We still have all
|
||||
// the useful behaviour (status code and headers) from ServeContent.
|
||||
xw := w
|
||||
if compress && acceptsGzip(r) && compressibleContent(content) {
|
||||
xw = &staticgzcacheReplacer{w, r, content.Name(), content, fi.ModTime(), fi.Size(), 0, false}
|
||||
} else {
|
||||
w.(*loggingWriter).Compress = false
|
||||
}
|
||||
http.ServeContent(xw, r, name, fi.ModTime(), content)
|
||||
}
|
||||
|
||||
f, err := os.Open(fspath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
if os.IsNotExist(err) || errors.Is(err, syscall.ENOTDIR) {
|
||||
if h.ContinueNotFound {
|
||||
// We haven't handled this request, try a next WebHandler in the list.
|
||||
return false
|
||||
}
|
||||
http.NotFound(w, r)
|
||||
return true
|
||||
} else if errors.Is(err, syscall.ENAMETOOLONG) {
|
||||
http.NotFound(w, r)
|
||||
return true
|
||||
} else if os.IsPermission(err) {
|
||||
// If we tried opening a directory, we may not have permission to read it, but
|
||||
// still access files inside it (execute bit), such as index.html. So try to serve it.
|
||||
index, err := os.Open(filepath.Join(fspath, "index.html"))
|
||||
if err == nil {
|
||||
defer index.Close()
|
||||
var ifi os.FileInfo
|
||||
ifi, err = index.Stat()
|
||||
if err != nil {
|
||||
log().Errorx("stat index.html in directory we cannot list", err, mlog.Field("url", r.URL), mlog.Field("fspath", fspath))
|
||||
http.Error(w, "500 - internal server error"+recvid(), http.StatusInternalServerError)
|
||||
return true
|
||||
}
|
||||
w.Header().Set("Content-Type", "text/html; charset=utf-8")
|
||||
serveFile("index.html", ifi.ModTime(), index)
|
||||
if err != nil {
|
||||
http.Error(w, "403 - permission denied", http.StatusForbidden)
|
||||
return true
|
||||
}
|
||||
http.Error(w, "403 - permission denied", http.StatusForbidden)
|
||||
defer func() {
|
||||
err := index.Close()
|
||||
log().Check(err, "closing index file for serving")
|
||||
}()
|
||||
var ifi os.FileInfo
|
||||
ifi, err = index.Stat()
|
||||
if err != nil {
|
||||
log().Errorx("stat index.html in directory we cannot list", err, slog.Any("url", r.URL), slog.String("fspath", fspath))
|
||||
http.Error(w, "500 - internal server error"+recvid(r), http.StatusInternalServerError)
|
||||
return true
|
||||
}
|
||||
w.Header().Set("Content-Type", "text/html; charset=utf-8")
|
||||
serveFile("index.html", ifi, index)
|
||||
return true
|
||||
}
|
||||
log().Errorx("open file for static file serving", err, mlog.Field("url", r.URL), mlog.Field("fspath", fspath))
|
||||
http.Error(w, "500 - internal server error"+recvid(), http.StatusInternalServerError)
|
||||
log().Errorx("open file for static file serving", err, slog.Any("url", r.URL), slog.String("fspath", fspath))
|
||||
http.Error(w, "500 - internal server error"+recvid(r), http.StatusInternalServerError)
|
||||
return true
|
||||
}
|
||||
defer f.Close()
|
||||
defer func() {
|
||||
if err := f.Close(); err != nil {
|
||||
log().Check(err, "closing file for static file serving")
|
||||
}
|
||||
}()
|
||||
|
||||
fi, err := f.Stat()
|
||||
if err != nil {
|
||||
log().Errorx("stat file for static file serving", err, mlog.Field("url", r.URL), mlog.Field("fspath", fspath))
|
||||
http.Error(w, "500 - internal server error"+recvid(), http.StatusInternalServerError)
|
||||
log().Errorx("stat file for static file serving", err, slog.Any("url", r.URL), slog.String("fspath", fspath))
|
||||
http.Error(w, "500 - internal server error"+recvid(r), http.StatusInternalServerError)
|
||||
return true
|
||||
}
|
||||
// Redirect if the local path is a directory.
|
||||
if fi.IsDir() && !strings.HasSuffix(r.URL.Path, "/") {
|
||||
http.Redirect(w, r, r.URL.Path+"/", http.StatusTemporaryRedirect)
|
||||
return true
|
||||
} else if !fi.IsDir() && strings.HasSuffix(r.URL.Path, "/") {
|
||||
if h.ContinueNotFound {
|
||||
return false
|
||||
}
|
||||
http.NotFound(w, r)
|
||||
return true
|
||||
}
|
||||
|
||||
if fi.IsDir() {
|
||||
@ -229,18 +280,23 @@ func HandleStatic(h *config.WebStatic, w http.ResponseWriter, r *http.Request) (
|
||||
http.Error(w, "403 - permission denied", http.StatusForbidden)
|
||||
return true
|
||||
} else if err == nil {
|
||||
defer index.Close()
|
||||
defer func() {
|
||||
if err := index.Close(); err != nil {
|
||||
log().Check(err, "closing index file for serving")
|
||||
}
|
||||
}()
|
||||
|
||||
var ifi os.FileInfo
|
||||
ifi, err = index.Stat()
|
||||
if err == nil {
|
||||
w.Header().Set("Content-Type", "text/html; charset=utf-8")
|
||||
serveFile("index.html", ifi.ModTime(), index)
|
||||
serveFile("index.html", ifi, index)
|
||||
return true
|
||||
}
|
||||
}
|
||||
if !os.IsNotExist(err) {
|
||||
log().Errorx("stat for static file serving", err, mlog.Field("url", r.URL), mlog.Field("fspath", fspath))
|
||||
http.Error(w, "500 - internal server error"+recvid(), http.StatusInternalServerError)
|
||||
log().Errorx("stat for static file serving", err, slog.Any("url", r.URL), slog.String("fspath", fspath))
|
||||
http.Error(w, "500 - internal server error"+recvid(r), http.StatusInternalServerError)
|
||||
return true
|
||||
}
|
||||
|
||||
@ -280,8 +336,8 @@ func HandleStatic(h *config.WebStatic, w http.ResponseWriter, r *http.Request) (
|
||||
if err == io.EOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
log().Errorx("reading directory for file listing", err, mlog.Field("url", r.URL), mlog.Field("fspath", fspath))
|
||||
http.Error(w, "500 - internal server error"+recvid(), http.StatusInternalServerError)
|
||||
log().Errorx("reading directory for file listing", err, slog.Any("url", r.URL), slog.String("fspath", fspath))
|
||||
http.Error(w, "500 - internal server error"+recvid(r), http.StatusInternalServerError)
|
||||
return true
|
||||
}
|
||||
}
|
||||
@ -296,13 +352,13 @@ func HandleStatic(h *config.WebStatic, w http.ResponseWriter, r *http.Request) (
|
||||
}
|
||||
}
|
||||
err = lsTemplate.Execute(w, map[string]any{"Files": files})
|
||||
if err != nil && !moxio.IsClosed(err) {
|
||||
log().Errorx("executing directory listing template", err)
|
||||
if err != nil {
|
||||
log().Check(err, "executing directory listing template")
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
serveFile(fspath, fi.ModTime(), f)
|
||||
serveFile(fspath, fi, f)
|
||||
return true
|
||||
}
|
||||
|
||||
@ -341,22 +397,36 @@ func HandleRedirect(h *config.WebRedirect, w http.ResponseWriter, r *http.Reques
|
||||
if h.StatusCode != 0 {
|
||||
code = h.StatusCode
|
||||
}
|
||||
|
||||
// If we would be redirecting to the same scheme,host,path, we would get here again
|
||||
// causing a redirect loop. Instead, this causes this redirect to not match,
|
||||
// allowing to try the next WebHandler. This can be used to redirect all plain http
|
||||
// requests to https.
|
||||
reqscheme := "http"
|
||||
if r.TLS != nil {
|
||||
reqscheme = "https"
|
||||
}
|
||||
if reqscheme == u.Scheme && r.Host == u.Host && r.URL.Path == u.Path {
|
||||
return false
|
||||
}
|
||||
|
||||
http.Redirect(w, r, u.String(), code)
|
||||
return true
|
||||
}
|
||||
|
||||
// HandleInternal passes the request to an internal service.
|
||||
func HandleInternal(h *config.WebInternal, w http.ResponseWriter, r *http.Request) (handled bool) {
|
||||
h.Handler.ServeHTTP(w, r)
|
||||
return true
|
||||
}
|
||||
|
||||
// HandleForward handles a request by forwarding it to another webserver and
|
||||
// passing the response on. I.e. a reverse proxy.
|
||||
// passing the response on. I.e. a reverse proxy. It handles websocket
|
||||
// connections by monitoring the websocket handshake and then just passing along the
|
||||
// websocket frames.
|
||||
func HandleForward(h *config.WebForward, w http.ResponseWriter, r *http.Request, path string) (handled bool) {
|
||||
log := func() *mlog.Log {
|
||||
return xlog.WithContext(r.Context())
|
||||
}
|
||||
recvid := func() string {
|
||||
cid := mox.CidFromCtx(r.Context())
|
||||
if cid <= 0 {
|
||||
return ""
|
||||
}
|
||||
return " (id " + mox.ReceivedID(cid) + ")"
|
||||
log := func() mlog.Log {
|
||||
return pkglog.WithContext(r.Context())
|
||||
}
|
||||
|
||||
xr := *r
|
||||
@ -364,6 +434,9 @@ func HandleForward(h *config.WebForward, w http.ResponseWriter, r *http.Request,
|
||||
if h.StripPath {
|
||||
u := *r.URL
|
||||
u.Path = r.URL.Path[len(path):]
|
||||
if !strings.HasPrefix(u.Path, "/") {
|
||||
u.Path = "/" + u.Path
|
||||
}
|
||||
u.RawPath = ""
|
||||
r.URL = &u
|
||||
}
|
||||
@ -385,18 +458,45 @@ func HandleForward(h *config.WebForward, w http.ResponseWriter, r *http.Request,
|
||||
proto = "https"
|
||||
}
|
||||
r.Header["X-Forwarded-Proto"] = []string{proto}
|
||||
// note: We are not using "ws" or "wss" for websocket. The request we are
|
||||
// forwarding is http(s), and we don't yet know if the backend even supports
|
||||
// websockets.
|
||||
|
||||
// todo: add Forwarded header? is anyone using it?
|
||||
|
||||
// If we see an Upgrade: websocket, we're going to assume the client needs
|
||||
// websocket and only attempt to talk websocket with the backend. If the backend
|
||||
// doesn't do websocket, we'll send back a "bad request" response. For other values
|
||||
// of Upgrade, we don't do anything special.
|
||||
// https://www.iana.org/assignments/http-upgrade-tokens/http-upgrade-tokens.xhtml
|
||||
// Upgrade: ../rfc/9110:2798
|
||||
// Upgrade headers are not for http/1.0, ../rfc/9110:2880
|
||||
// Websocket client "handshake" is described at ../rfc/6455:1134
|
||||
upgrade := r.Header.Get("Upgrade")
|
||||
if upgrade != "" && !(r.ProtoMajor == 1 && r.ProtoMinor == 0) {
|
||||
// Websockets have case-insensitive string "websocket".
|
||||
for _, s := range strings.Split(upgrade, ",") {
|
||||
if strings.EqualFold(textproto.TrimString(s), "websocket") {
|
||||
forwardWebsocket(h, w, r, path)
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ReverseProxy will append any remaining path to the configured target URL.
|
||||
proxy := httputil.NewSingleHostReverseProxy(h.TargetURL)
|
||||
proxy.FlushInterval = time.Duration(-1) // Flush after each write.
|
||||
proxy.ErrorLog = golog.New(mlog.ErrWriter(mlog.New("net/http/httputil").WithContext(r.Context()), mlog.LevelDebug, "reverseproxy error"), "", 0)
|
||||
proxy.ErrorLog = golog.New(mlog.LogWriter(mlog.New("net/http/httputil", nil).WithContext(r.Context()), mlog.LevelDebug, "reverseproxy error"), "", 0)
|
||||
proxy.ErrorHandler = func(w http.ResponseWriter, r *http.Request, err error) {
|
||||
log().Errorx("forwarding request to backend webserver", err, mlog.Field("url", r.URL))
|
||||
if errors.Is(err, context.Canceled) {
|
||||
log().Debugx("forwarding request to backend webserver", err, slog.Any("url", r.URL))
|
||||
return
|
||||
}
|
||||
log().Errorx("forwarding request to backend webserver", err, slog.Any("url", r.URL))
|
||||
if os.IsTimeout(err) {
|
||||
http.Error(w, "504 - gateway timeout"+recvid(), http.StatusGatewayTimeout)
|
||||
http.Error(w, "504 - gateway timeout"+recvid(r), http.StatusGatewayTimeout)
|
||||
} else {
|
||||
http.Error(w, "502 - bad gateway"+recvid(), http.StatusBadGateway)
|
||||
http.Error(w, "502 - bad gateway"+recvid(r), http.StatusBadGateway)
|
||||
}
|
||||
}
|
||||
whdr := w.Header()
|
||||
@ -406,3 +506,365 @@ func HandleForward(h *config.WebForward, w http.ResponseWriter, r *http.Request,
|
||||
proxy.ServeHTTP(w, r)
|
||||
return true
|
||||
}
|
||||
|
||||
var errResponseNotWebsocket = errors.New("not a valid websocket response to request")
|
||||
var errNotImplemented = errors.New("functionality not yet implemented")
|
||||
|
||||
// Request has an Upgrade: websocket header. Check more websocketiness about the
|
||||
// request. If it looks good, we forward it to the backend. If the backend responds
|
||||
// with a valid websocket response, indicating it is indeed a websocket server, we
|
||||
// pass the response along and start copying data between the client and the
|
||||
// backend. We don't look at the frames and payloads. The backend already needs to
|
||||
// know enough websocket to handle the frames. It wouldn't necessarily hurt to
|
||||
// monitor the frames too, and check if they are valid, but it's quite a bit of
|
||||
// work for little benefit. Besides, the whole point of websockets is to exchange
|
||||
// bytes without HTTP being in the way, so let's do that.
|
||||
func forwardWebsocket(h *config.WebForward, w http.ResponseWriter, r *http.Request, path string) (handled bool) {
|
||||
log := func() mlog.Log {
|
||||
return pkglog.WithContext(r.Context())
|
||||
}
|
||||
|
||||
lw := w.(*loggingWriter)
|
||||
lw.WebsocketRequest = true // For correct protocol in metrics.
|
||||
|
||||
// We check the requested websocket version first. A future websocket version may
|
||||
// have different request requirements.
|
||||
// ../rfc/6455:1160
|
||||
wsversion := r.Header.Get("Sec-WebSocket-Version")
|
||||
if wsversion != "13" {
|
||||
// Indicate we only support version 13. Should get a client from the future to fall back to version 13.
|
||||
// ../rfc/6455:1435
|
||||
w.Header().Set("Sec-WebSocket-Version", "13")
|
||||
http.Error(w, "400 - bad request - websockets only supported with version 13"+recvid(r), http.StatusBadRequest)
|
||||
lw.error(fmt.Errorf("Sec-WebSocket-Version %q not supported", wsversion))
|
||||
return true
|
||||
}
|
||||
|
||||
// ../rfc/6455:1143
|
||||
if r.Method != "GET" {
|
||||
http.Error(w, "400 - bad request - websockets only allowed with method GET"+recvid(r), http.StatusBadRequest)
|
||||
lw.error(fmt.Errorf("websocket request only allowed with method GET"))
|
||||
return true
|
||||
}
|
||||
|
||||
// ../rfc/6455:1153
|
||||
var connectionUpgrade bool
|
||||
for _, s := range strings.Split(r.Header.Get("Connection"), ",") {
|
||||
if strings.EqualFold(textproto.TrimString(s), "upgrade") {
|
||||
connectionUpgrade = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !connectionUpgrade {
|
||||
http.Error(w, "400 - bad request - connection header must be \"upgrade\""+recvid(r), http.StatusBadRequest)
|
||||
lw.error(fmt.Errorf(`connection header is %q, must be "upgrade"`, r.Header.Get("Connection")))
|
||||
return true
|
||||
}
|
||||
|
||||
// ../rfc/6455:1156
|
||||
wskey := r.Header.Get("Sec-WebSocket-Key")
|
||||
key, err := base64.StdEncoding.DecodeString(wskey)
|
||||
if err != nil || len(key) != 16 {
|
||||
http.Error(w, "400 - bad request - websockets requires Sec-WebSocket-Key with 16 bytes base64-encoded value"+recvid(r), http.StatusBadRequest)
|
||||
lw.error(fmt.Errorf("bad Sec-WebSocket-Key %q, must be 16 byte base64-encoded value", wskey))
|
||||
return true
|
||||
}
|
||||
|
||||
// ../rfc/6455:1162
|
||||
// We don't look at the origin header. The backend needs to handle it, if it thinks
|
||||
// that helps...
|
||||
// We also don't look at Sec-WebSocket-Protocol and Sec-WebSocket-Extensions. The
|
||||
// backend can set them, but it doesn't influence our forwarding of the data.
|
||||
|
||||
// If this is not a hijacker, there is not point in connecting to the backend.
|
||||
hj, ok := lw.W.(http.Hijacker)
|
||||
var cbr *bufio.ReadWriter
|
||||
if !ok {
|
||||
log().Info("cannot turn http connection into tcp connection (http.Hijacker)")
|
||||
http.Error(w, "501 - not implemented - cannot turn this connection into websocket"+recvid(r), http.StatusNotImplemented)
|
||||
lw.error(fmt.Errorf("connection not a http.Hijacker (%T)", lw.W))
|
||||
return
|
||||
}
|
||||
|
||||
freq := *r
|
||||
freq.Proto = "HTTP/1.1"
|
||||
freq.ProtoMajor = 1
|
||||
freq.ProtoMinor = 1
|
||||
fresp, beconn, err := websocketTransact(r.Context(), h.TargetURL, &freq)
|
||||
if err != nil {
|
||||
if errors.Is(err, errResponseNotWebsocket) {
|
||||
http.Error(w, "400 - bad request - websocket not supported"+recvid(r), http.StatusBadRequest)
|
||||
} else if errors.Is(err, errNotImplemented) {
|
||||
http.Error(w, "501 - not implemented - "+err.Error()+recvid(r), http.StatusNotImplemented)
|
||||
} else if os.IsTimeout(err) {
|
||||
http.Error(w, "504 - gateway timeout"+recvid(r), http.StatusGatewayTimeout)
|
||||
} else {
|
||||
http.Error(w, "502 - bad gateway"+recvid(r), http.StatusBadGateway)
|
||||
}
|
||||
lw.error(err)
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
if beconn != nil {
|
||||
if err := beconn.Close(); err != nil {
|
||||
log().Check(err, "closing backend websocket connection")
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Hijack the client connection so we can write the response ourselves, and start
|
||||
// copying the websocket frames.
|
||||
var cconn net.Conn
|
||||
cconn, cbr, err = hj.Hijack()
|
||||
if err != nil {
|
||||
log().Debugx("cannot turn http transaction into websocket connection", err)
|
||||
http.Error(w, "501 - not implemented - cannot turn this connection into websocket"+recvid(r), http.StatusNotImplemented)
|
||||
lw.error(err)
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
if cconn != nil {
|
||||
if err := cconn.Close(); err != nil {
|
||||
log().Check(err, "closing client websocket connection")
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Below this point, we can no longer write to the ResponseWriter.
|
||||
|
||||
// Mark as websocket response, for logging.
|
||||
lw.WebsocketResponse = true
|
||||
lw.setStatusCode(fresp.StatusCode)
|
||||
|
||||
for k, v := range h.ResponseHeaders {
|
||||
fresp.Header.Add(k, v)
|
||||
}
|
||||
|
||||
// Write the response to the client, completing its websocket handshake.
|
||||
if err := fresp.Write(cconn); err != nil {
|
||||
lw.error(fmt.Errorf("writing websocket response to client: %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
errc := make(chan error, 1)
|
||||
|
||||
// Copy from client to backend.
|
||||
go func() {
|
||||
buf, err := cbr.Peek(cbr.Reader.Buffered())
|
||||
if err != nil {
|
||||
errc <- err
|
||||
return
|
||||
}
|
||||
if len(buf) > 0 {
|
||||
n, err := beconn.Write(buf)
|
||||
if err != nil {
|
||||
errc <- err
|
||||
return
|
||||
}
|
||||
lw.SizeFromClient += int64(n)
|
||||
}
|
||||
n, err := io.Copy(beconn, cconn)
|
||||
lw.SizeFromClient += n
|
||||
errc <- err
|
||||
}()
|
||||
|
||||
// Copy from backend to client.
|
||||
go func() {
|
||||
n, err := io.Copy(cconn, beconn)
|
||||
lw.SizeToClient = n
|
||||
errc <- err
|
||||
}()
|
||||
|
||||
// Stop and close connection on first error from either size, typically a closed
|
||||
// connection whose closing was already announced with a websocket frame.
|
||||
lw.error(<-errc)
|
||||
// Close connections so other goroutine stops as well.
|
||||
if err := cconn.Close(); err != nil {
|
||||
log().Check(err, "closing client websocket connection")
|
||||
}
|
||||
if err := beconn.Close(); err != nil {
|
||||
log().Check(err, "closing backend websocket connection")
|
||||
}
|
||||
// Wait for goroutine so it has updated the logWriter.Size*Client fields before we
|
||||
// continue with logging.
|
||||
<-errc
|
||||
cconn = nil
|
||||
return true
|
||||
}
|
||||
|
||||
func websocketTransact(ctx context.Context, targetURL *url.URL, r *http.Request) (rresp *http.Response, rconn net.Conn, rerr error) {
|
||||
log := func() mlog.Log {
|
||||
return pkglog.WithContext(r.Context())
|
||||
}
|
||||
|
||||
// Dial the backend, possibly doing TLS. We assume the net/http DefaultTransport is
|
||||
// unmodified.
|
||||
transport := http.DefaultTransport.(*http.Transport)
|
||||
|
||||
// We haven't implemented using a proxy for websocket requests yet. If we need one,
|
||||
// return an error instead of trying to connect directly, which would be a
|
||||
// potential security issue.
|
||||
treq := *r
|
||||
treq.URL = targetURL
|
||||
if purl, err := transport.Proxy(&treq); err != nil {
|
||||
return nil, nil, fmt.Errorf("determining proxy for websocket backend connection: %w", err)
|
||||
} else if purl != nil {
|
||||
return nil, nil, fmt.Errorf("%w: proxy required for websocket connection to backend", errNotImplemented) // todo: implement?
|
||||
}
|
||||
|
||||
host, port, err := net.SplitHostPort(targetURL.Host)
|
||||
if err != nil {
|
||||
host = targetURL.Host
|
||||
if targetURL.Scheme == "https" {
|
||||
port = "443"
|
||||
} else {
|
||||
port = "80"
|
||||
}
|
||||
}
|
||||
addr := net.JoinHostPort(host, port)
|
||||
conn, err := transport.DialContext(r.Context(), "tcp", addr)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("dial: %w", err)
|
||||
}
|
||||
if targetURL.Scheme == "https" {
|
||||
tlsconn := tls.Client(conn, transport.TLSClientConfig)
|
||||
ctx, cancel := context.WithTimeout(r.Context(), transport.TLSHandshakeTimeout)
|
||||
defer cancel()
|
||||
if err := tlsconn.HandshakeContext(ctx); err != nil {
|
||||
return nil, nil, fmt.Errorf("tls handshake: %w", err)
|
||||
}
|
||||
conn = tlsconn
|
||||
}
|
||||
defer func() {
|
||||
if rerr != nil {
|
||||
if xerr := conn.Close(); xerr != nil {
|
||||
log().Check(xerr, "cleaning up websocket connection")
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// todo: make timeout configurable?
|
||||
if err := conn.SetDeadline(time.Now().Add(30 * time.Second)); err != nil {
|
||||
log().Check(err, "set deadline for websocket request to backend")
|
||||
}
|
||||
|
||||
// Set clean connection headers.
|
||||
removeHopByHopHeaders(r.Header)
|
||||
r.Header.Set("Connection", "Upgrade")
|
||||
r.Header.Set("Upgrade", "websocket")
|
||||
|
||||
// Write the websocket request to the backend.
|
||||
if err := r.Write(conn); err != nil {
|
||||
return nil, nil, fmt.Errorf("writing request to backend: %w", err)
|
||||
}
|
||||
|
||||
// Read response from backend.
|
||||
br := bufio.NewReader(conn)
|
||||
resp, err := http.ReadResponse(br, r)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("reading response from backend: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if rerr != nil {
|
||||
if xerr := resp.Body.Close(); xerr != nil {
|
||||
log().Check(xerr, "closing response body after error")
|
||||
}
|
||||
}
|
||||
}()
|
||||
if err := conn.SetDeadline(time.Time{}); err != nil {
|
||||
log().Check(err, "clearing deadline on websocket connection to backend")
|
||||
}
|
||||
|
||||
// Check that the response from the backend server indicates it is websocket. If
|
||||
// not, don't pass the backend response, but an error that websocket is not
|
||||
// appropriate.
|
||||
if err := checkWebsocketResponse(resp, r); err != nil {
|
||||
return resp, nil, err
|
||||
}
|
||||
|
||||
// note: net/http.Response.Body documents that it implements io.Writer for a
|
||||
// status: 101 response. But that's not the case when the response has been read
|
||||
// with http.ReadResponse. We'll write to the connection directly.
|
||||
|
||||
buf, err := br.Peek(br.Buffered())
|
||||
if err != nil {
|
||||
return resp, nil, fmt.Errorf("peek at buffered data written by backend: %w", err)
|
||||
}
|
||||
return resp, websocketConn{io.MultiReader(bytes.NewReader(buf), conn), conn}, nil
|
||||
}
|
||||
|
||||
// A net.Conn but with reads coming from an io multireader (due to buffered reader
|
||||
// needed for http.ReadResponse).
|
||||
type websocketConn struct {
|
||||
r io.Reader
|
||||
net.Conn
|
||||
}
|
||||
|
||||
func (c websocketConn) Read(buf []byte) (int, error) {
|
||||
return c.r.Read(buf)
|
||||
}
|
||||
|
||||
// Check that an HTTP response (from a backend) is a valid websocket response, i.e.
|
||||
// that it accepts the WebSocket "upgrade".
|
||||
// ../rfc/6455:1299
|
||||
func checkWebsocketResponse(resp *http.Response, req *http.Request) error {
|
||||
if resp.StatusCode != 101 {
|
||||
return fmt.Errorf("%w: response http status not 101 but %s", errResponseNotWebsocket, resp.Status)
|
||||
}
|
||||
if upgrade := resp.Header.Get("Upgrade"); !strings.EqualFold(upgrade, "websocket") {
|
||||
return fmt.Errorf(`%w: response http status is 101, but Upgrade header is %q, should be "websocket"`, errResponseNotWebsocket, upgrade)
|
||||
}
|
||||
if connection := resp.Header.Get("Connection"); !strings.EqualFold(connection, "upgrade") {
|
||||
return fmt.Errorf(`%w: response http status is 101, Upgrade is websocket, but Connection header is %q, should be "Upgrade"`, errResponseNotWebsocket, connection)
|
||||
}
|
||||
accept, err := base64.StdEncoding.DecodeString(resp.Header.Get("Sec-WebSocket-Accept"))
|
||||
if err != nil {
|
||||
return fmt.Errorf(`%w: response http status, Upgrade and Connection header are websocket, but Sec-WebSocket-Accept header is not valid base64: %v`, errResponseNotWebsocket, err)
|
||||
}
|
||||
exp := sha1.Sum([]byte(req.Header.Get("Sec-WebSocket-Key") + "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"))
|
||||
if !bytes.Equal(accept, exp[:]) {
|
||||
return fmt.Errorf(`%w: response http status, Upgrade and Connection header are websocket, but backend Sec-WebSocket-Accept value does not match`, errResponseNotWebsocket)
|
||||
}
|
||||
// We don't have requirements for the other Sec-WebSocket headers. ../rfc/6455:1340
|
||||
return nil
|
||||
}
|
||||
|
||||
// From Go 1.20.4 src/net/http/httputil/reverseproxy.go:
|
||||
// Hop-by-hop headers. These are removed when sent to the backend.
|
||||
// As of RFC 7230, hop-by-hop headers are required to appear in the
|
||||
// Connection header field. These are the headers defined by the
|
||||
// obsoleted RFC 2616 (section 13.5.1) and are used for backward
|
||||
// compatibility.
|
||||
// ../rfc/2616:5128
|
||||
var hopHeaders = []string{
|
||||
"Connection",
|
||||
"Proxy-Connection", // non-standard but still sent by libcurl and rejected by e.g. google
|
||||
"Keep-Alive",
|
||||
"Proxy-Authenticate",
|
||||
"Proxy-Authorization",
|
||||
"Te", // canonicalized version of "TE"
|
||||
"Trailer", // not Trailers per URL above; https://www.rfc-editor.org/errata_search.php?eid=4522
|
||||
"Transfer-Encoding",
|
||||
"Upgrade",
|
||||
}
|
||||
|
||||
// From Go 1.20.4 src/net/http/httputil/reverseproxy.go:
|
||||
// removeHopByHopHeaders removes hop-by-hop headers.
|
||||
func removeHopByHopHeaders(h http.Header) {
|
||||
// RFC 7230, section 6.1: Remove headers listed in the "Connection" header.
|
||||
// ../rfc/7230:2817
|
||||
for _, f := range h["Connection"] {
|
||||
for _, sf := range strings.Split(f, ",") {
|
||||
if sf = textproto.TrimString(sf); sf != "" {
|
||||
h.Del(sf)
|
||||
}
|
||||
}
|
||||
}
|
||||
// RFC 2616, section 13.5.1: Remove a set of known hop-by-hop headers.
|
||||
// This behavior is superseded by the RFC 7230 Connection header, but
|
||||
// preserve it for backwards compatibility.
|
||||
// ../rfc/2616:5128
|
||||
for _, f := range hopHeaders {
|
||||
h.Del(f)
|
||||
}
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user