mirror of
https://github.com/mjl-/mox.git
synced 2025-06-28 03:08:14 +03:00
Compare commits
344 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
833a67fe3d | ||
![]() |
f5b8c64b84 | ||
![]() |
f1259ee80e | ||
![]() |
bb438488c5 | ||
![]() |
91bfff220e | ||
![]() |
cc627af263 | ||
![]() |
76e58f4a63 | ||
![]() |
4a14abc254 | ||
![]() |
70bbfc8f10 | ||
![]() |
aff279711c | ||
![]() |
2e0eea88b0 | ||
![]() |
baacdbca18 | ||
![]() |
ee99e82cf4 | ||
![]() |
b7262d536d | ||
![]() |
794ef75d17 | ||
![]() |
4eddf5885d | ||
![]() |
53f391ad18 | ||
![]() |
14af5bbb12 | ||
![]() |
75bb1bfa2f | ||
![]() |
5f9f45983d | ||
![]() |
0ce0296a9f | ||
![]() |
805ae0d827 | ||
![]() |
1b2b152cb5 | ||
![]() |
31c22618f5 | ||
![]() |
07533252b3 | ||
![]() |
3fe765dce9 | ||
![]() |
e7b562e3f2 | ||
![]() |
2c1283f032 | ||
![]() |
af3e9351bc | ||
![]() |
fd5167fdb3 | ||
![]() |
1a6d268e1d | ||
![]() |
507ca73b96 | ||
![]() |
8bab38eac4 | ||
![]() |
5a7d5fce98 | ||
![]() |
902de0e1f9 | ||
![]() |
39c21f80cd | ||
![]() |
462568d878 | ||
![]() |
2defbce0bc | ||
![]() |
69d2699961 | ||
![]() |
00c8db98e6 | ||
![]() |
deb57462a4 | ||
![]() |
479bf29124 | ||
![]() |
5dcf674761 | ||
![]() |
aba0061073 | ||
![]() |
cc5e3165ea | ||
![]() |
3e128d744e | ||
![]() |
3a3a11560e | ||
![]() |
eeeabdc6de | ||
![]() |
3ac38aacca | ||
![]() |
6ab31c15b7 | ||
![]() |
a5d74eb718 | ||
![]() |
d6e55b5f36 | ||
![]() |
68729fa5a3 | ||
![]() |
789e4875ca | ||
![]() |
6bf80d91bc | ||
![]() |
aa631c604c | ||
![]() |
8b418a9ca2 | ||
![]() |
027e5754a0 | ||
![]() |
7a87522be0 | ||
![]() |
a2c79e25c1 | ||
![]() |
15a8ce8c0b | ||
![]() |
04b1f030b7 | ||
![]() |
88ec5c6fbe | ||
![]() |
a68a9d8a48 | ||
![]() |
b37faa06bd | ||
![]() |
b0e4dcdb61 | ||
![]() |
773d8cc959 | ||
![]() |
70aedddc90 | ||
![]() |
297e83188c | ||
![]() |
75036c3a71 | ||
![]() |
99f9eb438f | ||
![]() |
9ca50ab207 | ||
![]() |
5294a63c26 | ||
![]() |
719dc2bee1 | ||
![]() |
26793e407a | ||
![]() |
ac4b006ecd | ||
![]() |
c4255a96f8 | ||
![]() |
eadbda027c | ||
![]() |
0cf0bfb8a6 | ||
![]() |
60da7f34b8 | ||
![]() |
397fd1f5e7 | ||
![]() |
a553a107f0 | ||
![]() |
0857e81a6c | ||
![]() |
2314397078 | ||
![]() |
1c58d38280 | ||
![]() |
9a8bb1134b | ||
![]() |
d0b241499f | ||
![]() |
2fc75b5b7b | ||
![]() |
d78aa9d1d7 | ||
![]() |
51f58a52c9 | ||
![]() |
493cfee3e1 | ||
![]() |
64f2f788b1 | ||
![]() |
f6132bdbc0 | ||
![]() |
e572d01341 | ||
![]() |
7872b138a5 | ||
![]() |
aa2b24d861 | ||
![]() |
06b7c8bba0 | ||
![]() |
edfc24a701 | ||
![]() |
96667a87eb | ||
![]() |
a5c64e4361 | ||
![]() |
577944310c | ||
![]() |
684c716e4d | ||
![]() |
2da280f2bb | ||
![]() |
bc50c3bf7f | ||
![]() |
f5b67b5d3d | ||
![]() |
2beb30cc20 | ||
![]() |
7855a32852 | ||
![]() |
82371ad15b | ||
![]() |
9ce552368b | ||
![]() |
ea64936a67 | ||
![]() |
5ba51adb14 | ||
![]() |
3b731b7afe | ||
![]() |
7756150a69 | ||
![]() |
ffc7ed96bc | ||
![]() |
1037a756fa | ||
![]() |
3050baa15a | ||
![]() |
b822533df3 | ||
![]() |
caaace403a | ||
![]() |
f10bb2c1ae | ||
![]() |
44d37892b8 | ||
![]() |
d7bd50b5a5 | ||
![]() |
f235b6ad83 | ||
![]() |
9c40205343 | ||
![]() |
062c3ac182 | ||
![]() |
394bdef39d | ||
![]() |
aa85baf511 | ||
![]() |
17de90e29d | ||
![]() |
ea55c85938 | ||
![]() |
92a87acfcb | ||
![]() |
1066eb4c9f | ||
![]() |
88a68e9143 | ||
![]() |
78e0c0255f | ||
![]() |
b56d6c4061 | ||
![]() |
d27fc1e7fc | ||
![]() |
f117cc0fe1 | ||
![]() |
0ed820e3b0 | ||
![]() |
2809136451 | ||
![]() |
463e801909 | ||
![]() |
3b224ea0c2 | ||
![]() |
151729af08 | ||
![]() |
797c1cf9f0 | ||
![]() |
cad585a70e | ||
![]() |
9f3cb7340b | ||
![]() |
7c7473ef0e | ||
![]() |
f40f94670e | ||
![]() |
3f6c45a41f | ||
![]() |
95d2002e77 | ||
![]() |
a458920721 | ||
![]() |
6ed97469b7 | ||
![]() |
02c4715724 | ||
![]() |
5e4d80d48e | ||
![]() |
dcaa99a85c | ||
![]() |
7288e038e6 | ||
![]() |
cbe5bb235c | ||
![]() |
de6262b90a | ||
![]() |
f30c44eddb | ||
![]() |
9dff879164 | ||
![]() |
1c4bf8909c | ||
![]() |
4765bf3b2c | ||
![]() |
3d0dc3a79d | ||
![]() |
6f678125a5 | ||
![]() |
1d6f45e592 | ||
![]() |
6da5f8f586 | ||
![]() |
f33870ba85 | ||
![]() |
3e53abc4db | ||
![]() |
09975a3100 | ||
![]() |
46c1693ee9 | ||
![]() |
93b627ceab | ||
![]() |
c210b50433 | ||
![]() |
2f0997682b | ||
![]() |
c7354cc22b | ||
![]() |
7b3ebb2647 | ||
![]() |
e5e15a3965 | ||
![]() |
1277d78cb1 | ||
![]() |
d08e0d3882 | ||
![]() |
091faa8048 | ||
![]() |
ef77f58e08 | ||
![]() |
ad26fd265d | ||
![]() |
c8fd9ca664 | ||
![]() |
f9280b0891 | ||
![]() |
807d01ee21 | ||
![]() |
ec7904c0ee | ||
![]() |
df17ae2321 | ||
![]() |
6ed736241d | ||
![]() |
49e2eba52b | ||
![]() |
2d3d726f05 | ||
![]() |
132efdd9fb | ||
![]() |
3e2695323c | ||
![]() |
8b26e3c972 | ||
![]() |
890c75367a | ||
![]() |
76e96ee673 | ||
![]() |
3d52efbdf9 | ||
![]() |
6aa2139a54 | ||
![]() |
8fac9f862b | ||
![]() |
7df54071d7 | ||
![]() |
acc1c133b0 | ||
![]() |
3c77e076e2 | ||
![]() |
0203dfa9d9 | ||
![]() |
008de1cafb | ||
![]() |
7647264a72 | ||
![]() |
f15f2d68fc | ||
![]() |
315f10d5f2 | ||
![]() |
5fcea1eb3b | ||
![]() |
be1065a6c4 | ||
![]() |
b85401a83d | ||
![]() |
dd92ed5117 | ||
![]() |
871f70151c | ||
![]() |
d4d2a0fd99 | ||
![]() |
1e15a10b66 | ||
![]() |
f7193bd4c3 | ||
![]() |
5a14a5b067 | ||
![]() |
b8bf99e082 | ||
![]() |
eb88e2651a | ||
![]() |
e5eaf4d46f | ||
![]() |
9b429cce4f | ||
![]() |
965a2b426f | ||
![]() |
f7666d1582 | ||
![]() |
aa9a06680f | ||
![]() |
d082aaada8 | ||
![]() |
5320ec1c5b | ||
![]() |
2255ebcf11 | ||
![]() |
35af7e30a6 | ||
![]() |
cbe418ec59 | ||
![]() |
f7b58c87b1 | ||
![]() |
94fb48c2dc | ||
![]() |
17baf9a883 | ||
![]() |
69a4995449 | ||
![]() |
0871bf5219 | ||
![]() |
3f727cf380 | ||
![]() |
4d3c4115f8 | ||
![]() |
0a77bc5955 | ||
![]() |
ce75852b7c | ||
![]() |
b750668152 | ||
![]() |
056b571fb6 | ||
![]() |
e59f894a94 | ||
![]() |
42793834f8 | ||
![]() |
8804d6b60e | ||
![]() |
5f7831a7f0 | ||
![]() |
de435fceba | ||
![]() |
96a3ecd52c | ||
![]() |
afb182cb14 | ||
![]() |
09e7ddba9e | ||
![]() |
96d86ad6f1 | ||
![]() |
9e8c8ca583 | ||
![]() |
1f604c6a3d | ||
![]() |
ee48cf0dfd | ||
![]() |
bd693805fd | ||
![]() |
d7f057709f | ||
![]() |
636bb91df6 | ||
![]() |
01deecb684 | ||
![]() |
7f5e1087d4 | ||
![]() |
726c0931f7 | ||
![]() |
501f594a0a | ||
![]() |
32d4e9a14c | ||
![]() |
3d4cd00430 | ||
![]() |
0e338b0530 | ||
![]() |
c13f1814fc | ||
![]() |
355488028d | ||
![]() |
68c130f60e | ||
![]() |
22c8911bf3 | ||
![]() |
76f7b9ebf6 | ||
![]() |
8fa197b19d | ||
![]() |
598c5ea6ac | ||
![]() |
879477a01f | ||
![]() |
04305722a7 | ||
![]() |
0fbf24160c | ||
![]() |
354b9f4d98 | ||
![]() |
bd842d3ff5 | ||
![]() |
5699686870 | ||
![]() |
fdc0560ac4 | ||
![]() |
fb65ec0676 | ||
![]() |
5d97bf198a | ||
![]() |
81c179bb4c | ||
![]() |
edb6e8d15c | ||
![]() |
32b549b260 | ||
![]() |
98d0ff22bb | ||
![]() |
9a4fa8633f | ||
![]() |
8f7fc3773b | ||
![]() |
7d3f307156 | ||
![]() |
7ecc3f68ce | ||
![]() |
bbc419c6ab | ||
![]() |
c7315cb72d | ||
![]() |
b0c4b09010 | ||
![]() |
a7bdc41cd4 | ||
![]() |
0977b7a6d3 | ||
![]() |
661e77c622 | ||
![]() |
b7ba0482ba | ||
![]() |
594182aae5 | ||
![]() |
a977082b89 | ||
![]() |
dfe4a54e0b | ||
![]() |
b77f44ab58 | ||
![]() |
fe9afb40bc | ||
![]() |
a485df830d | ||
![]() |
6c488ead0b | ||
![]() |
62bd2f4427 | ||
![]() |
7e7f6d48f1 | ||
![]() |
17346d6def | ||
![]() |
c16162eebc | ||
![]() |
09b13ed4d5 | ||
![]() |
e7e023c6d0 | ||
![]() |
5678b03324 | ||
![]() |
0bb4501472 | ||
![]() |
016fde8d78 | ||
![]() |
79b641cdc6 | ||
![]() |
2c003991bb | ||
![]() |
0a4999f33e | ||
![]() |
aead738836 | ||
![]() |
c629ae26af | ||
![]() |
151bd1a9c0 | ||
![]() |
7e54280a9d | ||
![]() |
367e968199 | ||
![]() |
73373a19c1 | ||
![]() |
e350af7eed | ||
![]() |
beee03574a | ||
![]() |
fdcd2eb0eb | ||
![]() |
9bab3124f6 | ||
![]() |
ac3596a7d7 | ||
![]() |
8254e9ce66 | ||
![]() |
a4f7e71457 | ||
![]() |
f56b04805b | ||
![]() |
dde2258f69 | ||
![]() |
aef99a72d8 | ||
![]() |
614576e409 | ||
![]() |
9152384fd3 | ||
![]() |
bf8cfd9724 | ||
![]() |
3e4cce826e | ||
![]() |
3f000fd4e0 | ||
![]() |
ebb8ad06b5 | ||
![]() |
1179d9d80a | ||
![]() |
a06a4de5ec | ||
![]() |
1a0a396713 | ||
![]() |
1fc8f165f7 | ||
![]() |
83004bb18e | ||
![]() |
30ac690c8f | ||
![]() |
a2c9cfc55b | ||
![]() |
44a6927379 | ||
![]() |
4d28a02621 | ||
![]() |
76aa96ab6f | ||
![]() |
98ce133203 | ||
![]() |
09ee89d5c8 | ||
![]() |
72be3e8423 | ||
![]() |
db3e44913c | ||
![]() |
587beb75b1 | ||
![]() |
a16c08681b |
3
.github/workflows/build-test.yml
vendored
3
.github/workflows/build-test.yml
vendored
@ -27,8 +27,9 @@ jobs:
|
||||
# Need to run tests with a temp dir on same file system for os.Rename to succeed.
|
||||
- run: 'mkdir -p tmp && TMPDIR=$PWD/tmp make test'
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: coverage-${{ matrix.go-version }}
|
||||
path: cover.html
|
||||
|
||||
# Format code, we check below if nothing changed.
|
||||
|
2
.gitignore
vendored
2
.gitignore
vendored
@ -5,7 +5,7 @@
|
||||
/local/
|
||||
/testdata/check/
|
||||
/testdata/*/data/
|
||||
/testdata/ctl/dkim/
|
||||
/testdata/ctl/config/dkim/
|
||||
/testdata/empty/
|
||||
/testdata/exportmaildir/
|
||||
/testdata/exportmbox/
|
||||
|
127
Makefile
127
Makefile
@ -7,9 +7,7 @@ build0:
|
||||
CGO_ENABLED=0 go build
|
||||
CGO_ENABLED=0 go vet ./...
|
||||
./gendoc.sh
|
||||
(cd webadmin && CGO_ENABLED=0 go run ../vendor/github.com/mjl-/sherpadoc/cmd/sherpadoc/*.go -adjust-function-names none -rename 'config Domain ConfigDomain,dmarc Policy DMARCPolicy,mtasts MX STSMX,tlsrptdb Record TLSReportRecord,tlsrptdb SuppressAddress TLSRPTSuppressAddress' Admin) >webadmin/api.json
|
||||
(cd webaccount && CGO_ENABLED=0 go run ../vendor/github.com/mjl-/sherpadoc/cmd/sherpadoc/*.go -adjust-function-names none Account) >webaccount/api.json
|
||||
(cd webmail && CGO_ENABLED=0 go run ../vendor/github.com/mjl-/sherpadoc/cmd/sherpadoc/*.go -adjust-function-names none Webmail) >webmail/api.json
|
||||
./genapidoc.sh
|
||||
./gents.sh webadmin/api.json webadmin/api.ts
|
||||
./gents.sh webaccount/api.json webaccount/api.ts
|
||||
./gents.sh webmail/api.json webmail/api.ts
|
||||
@ -25,13 +23,16 @@ race: build0
|
||||
go build -race
|
||||
|
||||
test:
|
||||
CGO_ENABLED=0 go test -shuffle=on -coverprofile cover.out ./...
|
||||
CGO_ENABLED=0 go test -fullpath -shuffle=on -coverprofile cover.out ./...
|
||||
go tool cover -html=cover.out -o cover.html
|
||||
|
||||
test-race:
|
||||
CGO_ENABLED=1 go test -race -shuffle=on -covermode atomic -coverprofile cover.out ./...
|
||||
CGO_ENABLED=1 go test -fullpath -race -shuffle=on -covermode atomic -coverprofile cover.out ./...
|
||||
go tool cover -html=cover.out -o cover.html
|
||||
|
||||
test-more:
|
||||
TZ= CGO_ENABLED=0 go test -fullpath -shuffle=on -count 2 ./...
|
||||
|
||||
# note: if testdata/upgradetest.mbox.gz exists, its messages will be imported
|
||||
# during tests. helpful for performance/resource consumption tests.
|
||||
test-upgrade: build
|
||||
@ -39,7 +40,10 @@ test-upgrade: build
|
||||
|
||||
# needed for "check" target
|
||||
install-staticcheck:
|
||||
go install honnef.co/go/tools/cmd/staticcheck@v0.4.7
|
||||
CGO_ENABLED=0 go install honnef.co/go/tools/cmd/staticcheck@latest
|
||||
|
||||
install-ineffassign:
|
||||
CGO_ENABLED=0 go install github.com/gordonklaus/ineffassign@v0.1.0
|
||||
|
||||
check:
|
||||
CGO_ENABLED=0 go vet -tags integration
|
||||
@ -48,59 +52,68 @@ check:
|
||||
CGO_ENABLED=0 go vet -tags errata rfc/errata.go
|
||||
CGO_ENABLED=0 go vet -tags xr rfc/xr.go
|
||||
GOARCH=386 CGO_ENABLED=0 go vet ./...
|
||||
staticcheck ./...
|
||||
staticcheck -tags integration
|
||||
staticcheck -tags website website/website.go
|
||||
staticcheck -tags link rfc/link.go
|
||||
staticcheck -tags errata rfc/errata.go
|
||||
staticcheck -tags xr rfc/xr.go
|
||||
CGO_ENABLED=0 ineffassign ./...
|
||||
CGO_ENABLED=0 staticcheck ./...
|
||||
CGO_ENABLED=0 staticcheck -tags integration
|
||||
CGO_ENABLED=0 staticcheck -tags website website/website.go
|
||||
CGO_ENABLED=0 staticcheck -tags link rfc/link.go
|
||||
CGO_ENABLED=0 staticcheck -tags errata rfc/errata.go
|
||||
CGO_ENABLED=0 staticcheck -tags xr rfc/xr.go
|
||||
|
||||
# needed for check-shadow
|
||||
install-shadow:
|
||||
go install golang.org/x/tools/go/analysis/passes/shadow/cmd/shadow@v0.19.0
|
||||
CGO_ENABLED=0 go install golang.org/x/tools/go/analysis/passes/shadow/cmd/shadow@latest
|
||||
|
||||
# having "err" shadowed is common, best to not have others
|
||||
check-shadow:
|
||||
go vet -vettool=$$(which shadow) ./... 2>&1 | grep -v '"err"'
|
||||
go vet -tags integration -vettool=$$(which shadow) 2>&1 | grep -v '"err"'
|
||||
go vet -tags website -vettool=$$(which shadow) website/website.go 2>&1 | grep -v '"err"'
|
||||
go vet -tags link -vettool=$$(which shadow) rfc/link.go 2>&1 | grep -v '"err"'
|
||||
go vet -tags errata -vettool=$$(which shadow) rfc/errata.go 2>&1 | grep -v '"err"'
|
||||
go vet -tags xr -vettool=$$(which shadow) rfc/xr.go 2>&1 | grep -v '"err"'
|
||||
CGO_ENABLED=0 go vet -vettool=$$(which shadow) ./... 2>&1 | grep -v '"err"'
|
||||
CGO_ENABLED=0 go vet -tags integration -vettool=$$(which shadow) 2>&1 | grep -v '"err"'
|
||||
CGO_ENABLED=0 go vet -tags website -vettool=$$(which shadow) website/website.go 2>&1 | grep -v '"err"'
|
||||
CGO_ENABLED=0 go vet -tags link -vettool=$$(which shadow) rfc/link.go 2>&1 | grep -v '"err"'
|
||||
CGO_ENABLED=0 go vet -tags errata -vettool=$$(which shadow) rfc/errata.go 2>&1 | grep -v '"err"'
|
||||
CGO_ENABLED=0 go vet -tags xr -vettool=$$(which shadow) rfc/xr.go 2>&1 | grep -v '"err"'
|
||||
|
||||
fuzz:
|
||||
go test -fuzz FuzzParseSignature -fuzztime 5m ./dkim
|
||||
go test -fuzz FuzzParseRecord -fuzztime 5m ./dkim
|
||||
go test -fuzz . -fuzztime 5m ./dmarc
|
||||
go test -fuzz . -fuzztime 5m ./dmarcrpt
|
||||
go test -fuzz . -parallel 1 -fuzztime 5m ./imapserver
|
||||
go test -fuzz . -parallel 1 -fuzztime 5m ./junk
|
||||
go test -fuzz FuzzParseRecord -fuzztime 5m ./mtasts
|
||||
go test -fuzz FuzzParsePolicy -fuzztime 5m ./mtasts
|
||||
go test -fuzz . -parallel 1 -fuzztime 5m ./smtpserver
|
||||
go test -fuzz . -fuzztime 5m ./spf
|
||||
go test -fuzz FuzzParseRecord -fuzztime 5m ./tlsrpt
|
||||
go test -fuzz FuzzParseMessage -fuzztime 5m ./tlsrpt
|
||||
go test -fullpath -fuzz FuzzParseSignature -fuzztime 5m ./dkim
|
||||
go test -fullpath -fuzz FuzzParseRecord -fuzztime 5m ./dkim
|
||||
go test -fullpath -fuzz . -fuzztime 5m ./dmarc
|
||||
go test -fullpath -fuzz . -fuzztime 5m ./dmarcrpt
|
||||
go test -fullpath -fuzz . -parallel 1 -fuzztime 5m ./imapserver
|
||||
go test -fullpath -fuzz . -fuzztime 5m ./imapclient
|
||||
go test -fullpath -fuzz . -parallel 1 -fuzztime 5m ./junk
|
||||
go test -fullpath -fuzz FuzzParseRecord -fuzztime 5m ./mtasts
|
||||
go test -fullpath -fuzz FuzzParsePolicy -fuzztime 5m ./mtasts
|
||||
go test -fullpath -fuzz . -fuzztime 5m ./smtp
|
||||
go test -fullpath -fuzz . -parallel 1 -fuzztime 5m ./smtpserver
|
||||
go test -fullpath -fuzz . -fuzztime 5m ./spf
|
||||
go test -fullpath -fuzz FuzzParseRecord -fuzztime 5m ./tlsrpt
|
||||
go test -fullpath -fuzz FuzzParseMessage -fuzztime 5m ./tlsrpt
|
||||
|
||||
govendor:
|
||||
go mod tidy
|
||||
go mod vendor
|
||||
./genlicenses.sh
|
||||
|
||||
test-integration:
|
||||
-docker compose -f docker-compose-integration.yml kill
|
||||
-docker compose -f docker-compose-integration.yml down
|
||||
docker image build --pull --no-cache -f Dockerfile -t mox_integration_moxmail .
|
||||
docker image build --pull --no-cache -f testdata/integration/Dockerfile.test -t mox_integration_test testdata/integration
|
||||
-rm -rf testdata/integration/moxacmepebble/data
|
||||
-rm -rf testdata/integration/moxmail2/data
|
||||
-rm -f testdata/integration/tmp-pebble-ca.pem
|
||||
MOX_UID=$$(id -u) docker-compose -f docker-compose-integration.yml run test
|
||||
docker-compose -f docker-compose-integration.yml down --timeout 1
|
||||
MOX_UID=$$(id -u) docker compose -f docker-compose-integration.yml run test
|
||||
docker compose -f docker-compose-integration.yml kill
|
||||
|
||||
|
||||
imaptest-build:
|
||||
-docker-compose -f docker-compose-imaptest.yml build --no-cache --pull mox
|
||||
-docker compose -f docker-compose-imaptest.yml build --no-cache --pull mox
|
||||
|
||||
imaptest-run:
|
||||
-rm -r testdata/imaptest/data
|
||||
mkdir testdata/imaptest/data
|
||||
docker-compose -f docker-compose-imaptest.yml run --entrypoint /usr/local/bin/imaptest imaptest host=mox port=1143 user=mjl@mox.example pass=testtest mbox=imaptest.mbox
|
||||
docker-compose -f docker-compose-imaptest.yml down
|
||||
docker compose -f docker-compose-imaptest.yml run --entrypoint /usr/local/bin/imaptest imaptest host=mox port=1143 user=mjl@mox.example pass=testtest mbox=imaptest.mbox
|
||||
docker compose -f docker-compose-imaptest.yml down
|
||||
|
||||
|
||||
fmt:
|
||||
@ -112,33 +125,33 @@ tswatch:
|
||||
|
||||
node_modules/.bin/tsc:
|
||||
-mkdir -p node_modules/.bin
|
||||
npm ci
|
||||
npm ci --ignore-scripts
|
||||
|
||||
install-js: node_modules/.bin/tsc
|
||||
|
||||
install-js0:
|
||||
-mkdir -p node_modules/.bin
|
||||
npm install --save-dev --save-exact typescript@5.1.6
|
||||
npm install --ignore-scripts --save-dev --save-exact typescript@5.1.6
|
||||
|
||||
webmail/webmail.js: lib.ts webmail/api.ts webmail/lib.ts webmail/webmail.ts
|
||||
./tsc.sh $@ $^
|
||||
./tsc.sh $@ lib.ts webmail/api.ts webmail/lib.ts webmail/webmail.ts
|
||||
|
||||
webmail/msg.js: lib.ts webmail/api.ts webmail/lib.ts webmail/msg.ts
|
||||
./tsc.sh $@ $^
|
||||
./tsc.sh $@ lib.ts webmail/api.ts webmail/lib.ts webmail/msg.ts
|
||||
|
||||
webmail/text.js: lib.ts webmail/api.ts webmail/lib.ts webmail/text.ts
|
||||
./tsc.sh $@ $^
|
||||
./tsc.sh $@ lib.ts webmail/api.ts webmail/lib.ts webmail/text.ts
|
||||
|
||||
webadmin/admin.js: lib.ts webadmin/api.ts webadmin/admin.ts
|
||||
./tsc.sh $@ $^
|
||||
./tsc.sh $@ lib.ts webadmin/api.ts webadmin/admin.ts
|
||||
|
||||
webaccount/account.js: lib.ts webaccount/api.ts webaccount/account.ts
|
||||
./tsc.sh $@ $^
|
||||
./tsc.sh $@ lib.ts webaccount/api.ts webaccount/account.ts
|
||||
|
||||
frontend: node_modules/.bin/tsc webadmin/admin.js webaccount/account.js webmail/webmail.js webmail/msg.js webmail/text.js
|
||||
|
||||
install-apidiff:
|
||||
go install golang.org/x/exp/cmd/apidiff@v0.0.0-20231206192017-f3f8817b8deb
|
||||
CGO_ENABLED=0 go install golang.org/x/exp/cmd/apidiff@v0.0.0-20231206192017-f3f8817b8deb
|
||||
|
||||
genapidiff:
|
||||
./apidiff.sh
|
||||
@ -153,17 +166,17 @@ genwebsite:
|
||||
./genwebsite.sh
|
||||
|
||||
buildall:
|
||||
GOOS=linux GOARCH=arm go build
|
||||
GOOS=linux GOARCH=arm64 go build
|
||||
GOOS=linux GOARCH=amd64 go build
|
||||
GOOS=linux GOARCH=386 go build
|
||||
GOOS=openbsd GOARCH=amd64 go build
|
||||
GOOS=freebsd GOARCH=amd64 go build
|
||||
GOOS=netbsd GOARCH=amd64 go build
|
||||
GOOS=darwin GOARCH=amd64 go build
|
||||
GOOS=dragonfly GOARCH=amd64 go build
|
||||
GOOS=illumos GOARCH=amd64 go build
|
||||
GOOS=solaris GOARCH=amd64 go build
|
||||
GOOS=aix GOARCH=ppc64 go build
|
||||
GOOS=windows GOARCH=amd64 go build
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=arm go build
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=386 go build
|
||||
CGO_ENABLED=0 GOOS=openbsd GOARCH=amd64 go build
|
||||
CGO_ENABLED=0 GOOS=freebsd GOARCH=amd64 go build
|
||||
CGO_ENABLED=0 GOOS=netbsd GOARCH=amd64 go build
|
||||
CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 go build
|
||||
CGO_ENABLED=0 GOOS=dragonfly GOARCH=amd64 go build
|
||||
CGO_ENABLED=0 GOOS=illumos GOARCH=amd64 go build
|
||||
CGO_ENABLED=0 GOOS=solaris GOARCH=amd64 go build
|
||||
CGO_ENABLED=0 GOOS=aix GOARCH=ppc64 go build
|
||||
CGO_ENABLED=0 GOOS=windows GOARCH=amd64 go build
|
||||
# no plan9 for now
|
||||
|
83
README.md
83
README.md
@ -19,7 +19,7 @@ See Quickstart below to get started.
|
||||
(similar to greylisting). Rejected emails are stored in a mailbox called Rejects
|
||||
for a short period, helping with misclassified legitimate synchronous
|
||||
signup/login/transactional emails.
|
||||
- Internationalized email, with unicode in email address usernames
|
||||
- Internationalized email (EIA), with unicode in email address usernames
|
||||
("localparts"), and in domain names (IDNA).
|
||||
- Automatic TLS with ACME, for use with Let's Encrypt and other CA's.
|
||||
- DANE and MTA-STS for inbound and outbound delivery over SMTP with STARTTLS,
|
||||
@ -99,7 +99,7 @@ for other platforms.
|
||||
# Compiling
|
||||
|
||||
You can easily (cross) compile mox yourself. You need a recent Go toolchain
|
||||
installed. Run `go version`, it must be >= 1.21. Download the latest version
|
||||
installed. Run `go version`, it must be >= 1.23. Download the latest version
|
||||
from https://go.dev/dl/ or see https://go.dev/doc/manage-install.
|
||||
|
||||
To download the source code of the latest release, and compile it to binary "mox":
|
||||
@ -125,42 +125,53 @@ It is important to run with docker host networking, so mox can use the public
|
||||
IPs and has correct remote IP information for incoming connections (important
|
||||
for junk filtering and rate-limiting).
|
||||
|
||||
# Future/development
|
||||
# Development
|
||||
|
||||
See develop.txt for instructions/tips for developing on mox.
|
||||
|
||||
Mox will receive funding for essentially full-time continued work from August
|
||||
2023 to August 2024 through NLnet/EU's NGI0 Entrust, see
|
||||
https://nlnet.nl/project/Mox/.
|
||||
# Sponsors
|
||||
|
||||
## Roadmap
|
||||
Thanks to NLnet foundation, the European Commission's NGI programme, and the
|
||||
Netherlands Ministry of the Interior and Kingdom Relations for financial
|
||||
support:
|
||||
|
||||
- 2024/2025, NLnet NGI0 Zero Core, https://nlnet.nl/project/Mox-Automation/
|
||||
- 2024, NLnet e-Commons Fund, https://nlnet.nl/project/Mox-API/
|
||||
- 2023/2024, NLnet NGI0 Entrust, https://nlnet.nl/project/Mox/
|
||||
|
||||
# Roadmap
|
||||
|
||||
- "mox setup" command, using admin web interface for interactive setup
|
||||
- Automate DNS management, for setup and maintenance, such as DANE/DKIM key rotation
|
||||
- Config options for "transactional email domains", for which mox will only
|
||||
send messages
|
||||
- Encrypted storage of files (email messages, TLS keys), also with per account keys
|
||||
- Recognize common deliverability issues and help postmasters solve them
|
||||
- JMAP, IMAP OBJECTID extension, IMAP JMAPACCESS extension
|
||||
- Calendaring with CalDAV/iCal
|
||||
- More IMAP extensions (PREVIEW, WITHIN, IMPORTANT, COMPRESS=DEFLATE,
|
||||
CREATE-SPECIAL-USE, SAVEDATE, UNAUTHENTICATE, REPLACE, QUOTA, NOTIFY,
|
||||
MULTIAPPEND, OBJECTID, MULTISEARCH, THREAD, SORT)
|
||||
- SMTP DSN extension
|
||||
- ARC, with forwarded email from trusted source
|
||||
- Forwarding (to an external address)
|
||||
- Introbox, to which first-time senders are delivered
|
||||
- Add special IMAP mailbox ("Queue?") that contains queued but
|
||||
undelivered messages, updated with IMAP flags/keywords/tags and message headers.
|
||||
- External addresses in aliases/lists.
|
||||
- Autoresponder (out of office/vacation)
|
||||
- OAUTH2 support, for single sign on
|
||||
- Mailing list manager
|
||||
- IMAP extensions for "online"/non-syncing/webmail clients (SORT (including
|
||||
DISPLAYFROM, DISPLAYTO), THREAD, PARTIAL, CONTEXT=SEARCH CONTEXT=SORT ESORT,
|
||||
FILTERS)
|
||||
- IMAP ACL support, for account sharing (interacts with many extensions and code)
|
||||
- Improve support for mobile clients with extensions: IMAP URLAUTH, SMTP
|
||||
CHUNKING and BINARYMIME, IMAP CATENATE
|
||||
- Mailing list manager
|
||||
- Privilege separation, isolating parts of the application to more restricted
|
||||
sandbox (e.g. new unauthenticated connections)
|
||||
- Using mox as backup MX
|
||||
- JMAP
|
||||
- Sieve for filtering (for now see Rulesets in the account config)
|
||||
- ARC, with forwarded email from trusted source
|
||||
- Milter support, for integration with external tools
|
||||
- SMTP DSN extension
|
||||
- IMAP Sieve extension, to run Sieve scripts after message changes (not only
|
||||
new deliveries)
|
||||
- OAUTH2 support, for single sign on
|
||||
- Forwarding (to an external address)
|
||||
|
||||
There are many smaller improvements to make as well, search for "todo" in the code.
|
||||
|
||||
@ -169,12 +180,10 @@ There are many smaller improvements to make as well, search for "todo" in the co
|
||||
There is currently no plan to implement the following. Though this may
|
||||
change in the future.
|
||||
|
||||
- Functioning as SMTP relay
|
||||
- Functioning as an SMTP relay without authentication
|
||||
- POP3
|
||||
- Delivery to (unix) OS system users
|
||||
- Delivery to (unix) OS system users (mbox/Maildir)
|
||||
- Support for pluggable delivery mechanisms
|
||||
- iOS Mail push notifications (with XAPPLEPUSHSERVICE undocumented imap
|
||||
extension and hard to get APNS certificate)
|
||||
|
||||
|
||||
# FAQ - Frequently Asked Questions
|
||||
@ -286,7 +295,8 @@ MIT license (like mox), and have the rights to do so.
|
||||
|
||||
## Where can I discuss mox?
|
||||
|
||||
Join #mox on irc.oftc.net, or #mox:matrix.org, or #mox on the "Gopher slack".
|
||||
Join #mox on irc.oftc.net, or #mox:matrix.org (https://matrix.to/#/#mox:matrix.org),
|
||||
or #mox on the "Gopher slack".
|
||||
|
||||
For bug reports, please file an issue at https://github.com/mjl-/mox/issues/new.
|
||||
|
||||
@ -344,15 +354,18 @@ in place and restart. If manual actions are required, the release notes mention
|
||||
them. Check the release notes of all version between your current installation
|
||||
and the release you're upgrading to.
|
||||
|
||||
Before upgrading, make a backup of the data directory with `mox backup
|
||||
<destdir>`. This writes consistent snapshots of the database files, and
|
||||
duplicates message files from the outgoing queue and accounts. Using the new
|
||||
mox binary, run `mox verifydata <backupdir>` (do NOT use the "live" data
|
||||
directory!) for a dry run. If this fails, an upgrade will probably fail too.
|
||||
Important: verifydata with the new mox binary can modify the database files (due
|
||||
to automatic schema upgrades). So make a fresh backup again before the actual
|
||||
upgrade. See the help output of the "backup" and "verifydata" commands for more
|
||||
details.
|
||||
Before upgrading, make a backup of the config & data directory with `mox backup
|
||||
<destdir>`. This copies all files from the config directory to
|
||||
`<destdir>/config`, and creates `<destdir>/data` with a consistent snapshots of
|
||||
the database files, and message files from the outgoing queue and accounts.
|
||||
Using the new mox binary, run `mox verifydata <destdir>/data` (do NOT use the
|
||||
"live" data directory!) for a dry run. If this fails, an upgrade will probably
|
||||
fail too.
|
||||
|
||||
Important: verifydata with the new mox binary can modify the database files
|
||||
(due to automatic schema upgrades). So make a fresh backup again before the
|
||||
actual upgrade. See the help output of the "backup" and "verifydata" commands
|
||||
for more details.
|
||||
|
||||
During backup, message files are hardlinked if possible, and copied otherwise.
|
||||
Using a destination directory like `data/tmp/backup` increases the odds
|
||||
@ -527,3 +540,13 @@ ensuring they don't become too large. The message index database file for an
|
||||
account is at `data/accounts/<account>/index.db`, accessed with the bstore
|
||||
database library, which uses bbolt (formerly BoltDB) for storage, a
|
||||
transactional key/value library/file format inspired by LMDB.
|
||||
|
||||
## How do I block IPs with authentication failures with fail2ban?
|
||||
|
||||
Mox includes a rate limiter for IPs/networks that cause too many authentication
|
||||
failures. It automatically unblocks such IPs/networks after a while. So you may
|
||||
not need fail2ban. If you want to use fail2ban, you could use this snippet:
|
||||
|
||||
[Definition]
|
||||
failregex = .*failed authentication attempt.*remote=<HOST>
|
||||
ignoreregex =
|
||||
|
File diff suppressed because it is too large
Load Diff
175
admin/clientconfig.go
Normal file
175
admin/clientconfig.go
Normal file
@ -0,0 +1,175 @@
|
||||
package admin
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"maps"
|
||||
"slices"
|
||||
|
||||
"github.com/mjl-/mox/config"
|
||||
"github.com/mjl-/mox/dns"
|
||||
"github.com/mjl-/mox/mox-"
|
||||
)
|
||||
|
||||
type TLSMode uint8
|
||||
|
||||
const (
|
||||
TLSModeImmediate TLSMode = 0
|
||||
TLSModeSTARTTLS TLSMode = 1
|
||||
TLSModeNone TLSMode = 2
|
||||
)
|
||||
|
||||
type ProtocolConfig struct {
|
||||
Host dns.Domain
|
||||
Port int
|
||||
TLSMode TLSMode
|
||||
EnabledOnHTTPS bool
|
||||
}
|
||||
|
||||
type ClientConfig struct {
|
||||
IMAP ProtocolConfig
|
||||
Submission ProtocolConfig
|
||||
}
|
||||
|
||||
// ClientConfigDomain returns a single IMAP and Submission client configuration for
|
||||
// a domain.
|
||||
func ClientConfigDomain(d dns.Domain) (rconfig ClientConfig, rerr error) {
|
||||
var haveIMAP, haveSubmission bool
|
||||
|
||||
domConf, ok := mox.Conf.Domain(d)
|
||||
if !ok {
|
||||
return ClientConfig{}, fmt.Errorf("%w: unknown domain", ErrRequest)
|
||||
}
|
||||
|
||||
gather := func(l config.Listener) (done bool) {
|
||||
host := mox.Conf.Static.HostnameDomain
|
||||
if l.Hostname != "" {
|
||||
host = l.HostnameDomain
|
||||
}
|
||||
if domConf.ClientSettingsDomain != "" {
|
||||
host = domConf.ClientSettingsDNSDomain
|
||||
}
|
||||
if !haveIMAP && l.IMAPS.Enabled {
|
||||
rconfig.IMAP.Host = host
|
||||
rconfig.IMAP.Port = config.Port(l.IMAPS.Port, 993)
|
||||
rconfig.IMAP.TLSMode = TLSModeImmediate
|
||||
rconfig.IMAP.EnabledOnHTTPS = l.IMAPS.EnabledOnHTTPS
|
||||
haveIMAP = true
|
||||
}
|
||||
if !haveIMAP && l.IMAP.Enabled {
|
||||
rconfig.IMAP.Host = host
|
||||
rconfig.IMAP.Port = config.Port(l.IMAP.Port, 143)
|
||||
rconfig.IMAP.TLSMode = TLSModeSTARTTLS
|
||||
if l.TLS == nil {
|
||||
rconfig.IMAP.TLSMode = TLSModeNone
|
||||
}
|
||||
haveIMAP = true
|
||||
}
|
||||
if !haveSubmission && l.Submissions.Enabled {
|
||||
rconfig.Submission.Host = host
|
||||
rconfig.Submission.Port = config.Port(l.Submissions.Port, 465)
|
||||
rconfig.Submission.TLSMode = TLSModeImmediate
|
||||
rconfig.Submission.EnabledOnHTTPS = l.Submissions.EnabledOnHTTPS
|
||||
haveSubmission = true
|
||||
}
|
||||
if !haveSubmission && l.Submission.Enabled {
|
||||
rconfig.Submission.Host = host
|
||||
rconfig.Submission.Port = config.Port(l.Submission.Port, 587)
|
||||
rconfig.Submission.TLSMode = TLSModeSTARTTLS
|
||||
if l.TLS == nil {
|
||||
rconfig.Submission.TLSMode = TLSModeNone
|
||||
}
|
||||
haveSubmission = true
|
||||
}
|
||||
return haveIMAP && haveSubmission
|
||||
}
|
||||
|
||||
// Look at the public listener first. Most likely the intended configuration.
|
||||
if public, ok := mox.Conf.Static.Listeners["public"]; ok {
|
||||
if gather(public) {
|
||||
return
|
||||
}
|
||||
}
|
||||
// Go through the other listeners in consistent order.
|
||||
names := slices.Sorted(maps.Keys(mox.Conf.Static.Listeners))
|
||||
for _, name := range names {
|
||||
if gather(mox.Conf.Static.Listeners[name]) {
|
||||
return
|
||||
}
|
||||
}
|
||||
return ClientConfig{}, fmt.Errorf("%w: no listeners found for imap and/or submission", ErrRequest)
|
||||
}
|
||||
|
||||
// ClientConfigs holds the client configuration for IMAP/Submission for a
|
||||
// domain.
|
||||
type ClientConfigs struct {
|
||||
Entries []ClientConfigsEntry
|
||||
}
|
||||
|
||||
type ClientConfigsEntry struct {
|
||||
Protocol string
|
||||
Host dns.Domain
|
||||
Port int
|
||||
Listener string
|
||||
Note string
|
||||
}
|
||||
|
||||
// ClientConfigsDomain returns the client configs for IMAP/Submission for a
|
||||
// domain.
|
||||
func ClientConfigsDomain(d dns.Domain) (ClientConfigs, error) {
|
||||
domConf, ok := mox.Conf.Domain(d)
|
||||
if !ok {
|
||||
return ClientConfigs{}, fmt.Errorf("%w: unknown domain", ErrRequest)
|
||||
}
|
||||
|
||||
c := ClientConfigs{}
|
||||
c.Entries = []ClientConfigsEntry{}
|
||||
var listeners []string
|
||||
|
||||
for name := range mox.Conf.Static.Listeners {
|
||||
listeners = append(listeners, name)
|
||||
}
|
||||
slices.Sort(listeners)
|
||||
|
||||
note := func(tls bool, requiretls bool) string {
|
||||
if !tls {
|
||||
return "plain text, no STARTTLS configured"
|
||||
}
|
||||
if requiretls {
|
||||
return "STARTTLS required"
|
||||
}
|
||||
return "STARTTLS optional"
|
||||
}
|
||||
|
||||
for _, name := range listeners {
|
||||
l := mox.Conf.Static.Listeners[name]
|
||||
host := mox.Conf.Static.HostnameDomain
|
||||
if l.Hostname != "" {
|
||||
host = l.HostnameDomain
|
||||
}
|
||||
if domConf.ClientSettingsDomain != "" {
|
||||
host = domConf.ClientSettingsDNSDomain
|
||||
}
|
||||
if l.Submissions.Enabled {
|
||||
note := "with TLS"
|
||||
if l.Submissions.EnabledOnHTTPS {
|
||||
note += "; also served on port 443 with TLS ALPN \"smtp\""
|
||||
}
|
||||
c.Entries = append(c.Entries, ClientConfigsEntry{"Submission (SMTP)", host, config.Port(l.Submissions.Port, 465), name, note})
|
||||
}
|
||||
if l.IMAPS.Enabled {
|
||||
note := "with TLS"
|
||||
if l.IMAPS.EnabledOnHTTPS {
|
||||
note += "; also served on port 443 with TLS ALPN \"imap\""
|
||||
}
|
||||
c.Entries = append(c.Entries, ClientConfigsEntry{"IMAP", host, config.Port(l.IMAPS.Port, 993), name, note})
|
||||
}
|
||||
if l.Submission.Enabled {
|
||||
c.Entries = append(c.Entries, ClientConfigsEntry{"Submission (SMTP)", host, config.Port(l.Submission.Port, 587), name, note(l.TLS != nil, !l.Submission.NoRequireSTARTTLS)})
|
||||
}
|
||||
if l.IMAP.Enabled {
|
||||
c.Entries = append(c.Entries, ClientConfigsEntry{"IMAP", host, config.Port(l.IMAPS.Port, 143), name, note(l.TLS != nil, !l.IMAP.NoRequireSTARTTLS)})
|
||||
}
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
318
admin/dnsrecords.go
Normal file
318
admin/dnsrecords.go
Normal file
@ -0,0 +1,318 @@
|
||||
package admin
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/ed25519"
|
||||
"crypto/rsa"
|
||||
"crypto/sha256"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/mjl-/adns"
|
||||
|
||||
"github.com/mjl-/mox/config"
|
||||
"github.com/mjl-/mox/dkim"
|
||||
"github.com/mjl-/mox/dmarc"
|
||||
"github.com/mjl-/mox/dns"
|
||||
"github.com/mjl-/mox/mox-"
|
||||
"github.com/mjl-/mox/smtp"
|
||||
"github.com/mjl-/mox/spf"
|
||||
"github.com/mjl-/mox/tlsrpt"
|
||||
"slices"
|
||||
)
|
||||
|
||||
// todo: find a way to automatically create the dns records as it would greatly simplify setting up email for a domain. we could also dynamically make changes, e.g. providing grace periods after disabling a dkim key, only automatically removing the dkim dns key after a few days. but this requires some kind of api and authentication to the dns server. there doesn't appear to be a single commonly used api for dns management. each of the numerous cloud providers have their own APIs and rather large SKDs to use them. we don't want to link all of them in.
|
||||
|
||||
// DomainRecords returns text lines describing DNS records required for configuring
|
||||
// a domain.
|
||||
//
|
||||
// If certIssuerDomainName is set, CAA records to limit TLS certificate issuance to
|
||||
// that caID will be suggested. If acmeAccountURI is also set, CAA records also
|
||||
// restricting issuance to that account ID will be suggested.
|
||||
func DomainRecords(domConf config.Domain, domain dns.Domain, hasDNSSEC bool, certIssuerDomainName, acmeAccountURI string) ([]string, error) {
|
||||
d := domain.ASCII
|
||||
h := mox.Conf.Static.HostnameDomain.ASCII
|
||||
|
||||
// The first line with ";" is used by ../testdata/integration/moxacmepebble.sh and
|
||||
// ../testdata/integration/moxmail2.sh for selecting DNS records
|
||||
records := []string{
|
||||
"; Time To Live of 5 minutes, may be recognized if importing as a zone file.",
|
||||
"; Once your setup is working, you may want to increase the TTL.",
|
||||
"$TTL 300",
|
||||
"",
|
||||
}
|
||||
|
||||
if public, ok := mox.Conf.Static.Listeners["public"]; ok && public.TLS != nil && (len(public.TLS.HostPrivateRSA2048Keys) > 0 || len(public.TLS.HostPrivateECDSAP256Keys) > 0) {
|
||||
records = append(records,
|
||||
`; DANE: These records indicate that a remote mail server trying to deliver email`,
|
||||
`; with SMTP (TCP port 25) must verify the TLS certificate with DANE-EE (3), based`,
|
||||
`; on the certificate public key ("SPKI", 1) that is SHA2-256-hashed (1) to the`,
|
||||
`; hexadecimal hash. DANE-EE verification means only the certificate or public`,
|
||||
`; key is verified, not whether the certificate is signed by a (centralized)`,
|
||||
`; certificate authority (CA), is expired, or matches the host name.`,
|
||||
`;`,
|
||||
`; NOTE: Create the records below only once: They are for the machine, and apply`,
|
||||
`; to all hosted domains.`,
|
||||
)
|
||||
if !hasDNSSEC {
|
||||
records = append(records,
|
||||
";",
|
||||
"; WARNING: Domain does not appear to be DNSSEC-signed. To enable DANE, first",
|
||||
"; enable DNSSEC on your domain, then add the TLSA records. Records below have been",
|
||||
"; commented out.",
|
||||
)
|
||||
}
|
||||
addTLSA := func(privKey crypto.Signer) error {
|
||||
spkiBuf, err := x509.MarshalPKIXPublicKey(privKey.Public())
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshal SubjectPublicKeyInfo for DANE record: %v", err)
|
||||
}
|
||||
sum := sha256.Sum256(spkiBuf)
|
||||
tlsaRecord := adns.TLSA{
|
||||
Usage: adns.TLSAUsageDANEEE,
|
||||
Selector: adns.TLSASelectorSPKI,
|
||||
MatchType: adns.TLSAMatchTypeSHA256,
|
||||
CertAssoc: sum[:],
|
||||
}
|
||||
var s string
|
||||
if hasDNSSEC {
|
||||
s = fmt.Sprintf("_25._tcp.%-*s TLSA %s", 20+len(d)-len("_25._tcp."), h+".", tlsaRecord.Record())
|
||||
} else {
|
||||
s = fmt.Sprintf(";; _25._tcp.%-*s TLSA %s", 20+len(d)-len(";; _25._tcp."), h+".", tlsaRecord.Record())
|
||||
}
|
||||
records = append(records, s)
|
||||
return nil
|
||||
}
|
||||
for _, privKey := range public.TLS.HostPrivateECDSAP256Keys {
|
||||
if err := addTLSA(privKey); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
for _, privKey := range public.TLS.HostPrivateRSA2048Keys {
|
||||
if err := addTLSA(privKey); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
records = append(records, "")
|
||||
}
|
||||
|
||||
if d != h {
|
||||
records = append(records,
|
||||
"; For the machine, only needs to be created once, for the first domain added:",
|
||||
"; ",
|
||||
"; SPF-allow host for itself, resulting in relaxed DMARC pass for (postmaster)",
|
||||
"; messages (DSNs) sent from host:",
|
||||
fmt.Sprintf(`%-*s TXT "v=spf1 a -all"`, 20+len(d), h+"."), // ../rfc/7208:2263 ../rfc/7208:2287
|
||||
"",
|
||||
)
|
||||
}
|
||||
if d != h && mox.Conf.Static.HostTLSRPT.ParsedLocalpart != "" {
|
||||
uri := url.URL{
|
||||
Scheme: "mailto",
|
||||
Opaque: smtp.NewAddress(mox.Conf.Static.HostTLSRPT.ParsedLocalpart, mox.Conf.Static.HostnameDomain).Pack(false),
|
||||
}
|
||||
tlsrptr := tlsrpt.Record{Version: "TLSRPTv1", RUAs: [][]tlsrpt.RUA{{tlsrpt.RUA(uri.String())}}}
|
||||
records = append(records,
|
||||
"; For the machine, only needs to be created once, for the first domain added:",
|
||||
"; ",
|
||||
"; Request reporting about success/failures of TLS connections to (MX) host, for DANE.",
|
||||
fmt.Sprintf(`_smtp._tls.%-*s TXT "%s"`, 20+len(d)-len("_smtp._tls."), h+".", tlsrptr.String()),
|
||||
"",
|
||||
)
|
||||
}
|
||||
|
||||
records = append(records,
|
||||
"; Deliver email for the domain to this host.",
|
||||
fmt.Sprintf("%s. MX 10 %s.", d, h),
|
||||
"",
|
||||
|
||||
"; Outgoing messages will be signed with the first two DKIM keys. The other two",
|
||||
"; configured for backup, switching to them is just a config change.",
|
||||
)
|
||||
var selectors []string
|
||||
for name := range domConf.DKIM.Selectors {
|
||||
selectors = append(selectors, name)
|
||||
}
|
||||
slices.Sort(selectors)
|
||||
for _, name := range selectors {
|
||||
sel := domConf.DKIM.Selectors[name]
|
||||
dkimr := dkim.Record{
|
||||
Version: "DKIM1",
|
||||
Hashes: []string{"sha256"},
|
||||
PublicKey: sel.Key.Public(),
|
||||
}
|
||||
if _, ok := sel.Key.(ed25519.PrivateKey); ok {
|
||||
dkimr.Key = "ed25519"
|
||||
} else if _, ok := sel.Key.(*rsa.PrivateKey); !ok {
|
||||
return nil, fmt.Errorf("unrecognized private key for DKIM selector %q: %T", name, sel.Key)
|
||||
}
|
||||
txt, err := dkimr.Record()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("making DKIM DNS TXT record: %v", err)
|
||||
}
|
||||
|
||||
if len(txt) > 100 {
|
||||
records = append(records,
|
||||
"; NOTE: The following is a single long record split over several lines for use",
|
||||
"; in zone files. When adding through a DNS operator web interface, combine the",
|
||||
"; strings into a single string, without ().",
|
||||
)
|
||||
}
|
||||
s := fmt.Sprintf("%s._domainkey.%s. TXT %s", name, d, mox.TXTStrings(txt))
|
||||
records = append(records, s)
|
||||
|
||||
}
|
||||
dmarcr := dmarc.DefaultRecord
|
||||
dmarcr.Policy = "reject"
|
||||
if domConf.DMARC != nil {
|
||||
uri := url.URL{
|
||||
Scheme: "mailto",
|
||||
Opaque: smtp.NewAddress(domConf.DMARC.ParsedLocalpart, domConf.DMARC.DNSDomain).Pack(false),
|
||||
}
|
||||
dmarcr.AggregateReportAddresses = []dmarc.URI{
|
||||
{Address: uri.String(), MaxSize: 10, Unit: "m"},
|
||||
}
|
||||
}
|
||||
dspfr := spf.Record{Version: "spf1"}
|
||||
for _, ip := range mox.DomainSPFIPs() {
|
||||
mech := "ip4"
|
||||
if ip.To4() == nil {
|
||||
mech = "ip6"
|
||||
}
|
||||
dspfr.Directives = append(dspfr.Directives, spf.Directive{Mechanism: mech, IP: ip})
|
||||
}
|
||||
dspfr.Directives = append(dspfr.Directives,
|
||||
spf.Directive{Mechanism: "mx"},
|
||||
spf.Directive{Qualifier: "~", Mechanism: "all"},
|
||||
)
|
||||
dspftxt, err := dspfr.Record()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("making domain spf record: %v", err)
|
||||
}
|
||||
records = append(records,
|
||||
"",
|
||||
|
||||
"; Specify the MX host is allowed to send for our domain and for itself (for DSNs).",
|
||||
"; ~all means softfail for anything else, which is done instead of -all to prevent older",
|
||||
"; mail servers from rejecting the message because they never get to looking for a dkim/dmarc pass.",
|
||||
fmt.Sprintf(`%s. TXT "%s"`, d, dspftxt),
|
||||
"",
|
||||
|
||||
"; Emails that fail the DMARC check (without aligned DKIM and without aligned SPF)",
|
||||
"; should be rejected, and request reports. If you email through mailing lists that",
|
||||
"; strip DKIM-Signature headers and don't rewrite the From header, you may want to",
|
||||
"; set the policy to p=none.",
|
||||
fmt.Sprintf(`_dmarc.%s. TXT "%s"`, d, dmarcr.String()),
|
||||
"",
|
||||
)
|
||||
|
||||
if sts := domConf.MTASTS; sts != nil {
|
||||
records = append(records,
|
||||
"; Remote servers can use MTA-STS to verify our TLS certificate with the",
|
||||
"; WebPKI pool of CA's (certificate authorities) when delivering over SMTP with",
|
||||
"; STARTTLS.",
|
||||
fmt.Sprintf(`mta-sts.%s. CNAME %s.`, d, h),
|
||||
fmt.Sprintf(`_mta-sts.%s. TXT "v=STSv1; id=%s"`, d, sts.PolicyID),
|
||||
"",
|
||||
)
|
||||
} else {
|
||||
records = append(records,
|
||||
"; Note: No MTA-STS to indicate TLS should be used. Either because disabled for the",
|
||||
"; domain or because mox.conf does not have a listener with MTA-STS configured.",
|
||||
"",
|
||||
)
|
||||
}
|
||||
|
||||
if domConf.TLSRPT != nil {
|
||||
uri := url.URL{
|
||||
Scheme: "mailto",
|
||||
Opaque: smtp.NewAddress(domConf.TLSRPT.ParsedLocalpart, domConf.TLSRPT.DNSDomain).Pack(false),
|
||||
}
|
||||
tlsrptr := tlsrpt.Record{Version: "TLSRPTv1", RUAs: [][]tlsrpt.RUA{{tlsrpt.RUA(uri.String())}}}
|
||||
records = append(records,
|
||||
"; Request reporting about TLS failures.",
|
||||
fmt.Sprintf(`_smtp._tls.%s. TXT "%s"`, d, tlsrptr.String()),
|
||||
"",
|
||||
)
|
||||
}
|
||||
|
||||
if domConf.ClientSettingsDomain != "" && domConf.ClientSettingsDNSDomain != mox.Conf.Static.HostnameDomain {
|
||||
records = append(records,
|
||||
"; Client settings will reference a subdomain of the hosted domain, making it",
|
||||
"; easier to migrate to a different server in the future by not requiring settings",
|
||||
"; in all clients to be updated.",
|
||||
fmt.Sprintf(`%-*s CNAME %s.`, 20+len(d), domConf.ClientSettingsDNSDomain.ASCII+".", h),
|
||||
"",
|
||||
)
|
||||
}
|
||||
|
||||
records = append(records,
|
||||
"; Autoconfig is used by Thunderbird. Autodiscover is (in theory) used by Microsoft.",
|
||||
fmt.Sprintf(`autoconfig.%s. CNAME %s.`, d, h),
|
||||
fmt.Sprintf(`_autodiscover._tcp.%s. SRV 0 1 443 %s.`, d, h),
|
||||
"",
|
||||
|
||||
// ../rfc/6186:133 ../rfc/8314:692
|
||||
"; For secure IMAP and submission autoconfig, point to mail host.",
|
||||
fmt.Sprintf(`_imaps._tcp.%s. SRV 0 1 993 %s.`, d, h),
|
||||
fmt.Sprintf(`_submissions._tcp.%s. SRV 0 1 465 %s.`, d, h),
|
||||
"",
|
||||
// ../rfc/6186:242
|
||||
"; Next records specify POP3 and non-TLS ports are not to be used.",
|
||||
"; These are optional and safe to leave out (e.g. if you have to click a lot in a",
|
||||
"; DNS admin web interface).",
|
||||
fmt.Sprintf(`_imap._tcp.%s. SRV 0 0 0 .`, d),
|
||||
fmt.Sprintf(`_submission._tcp.%s. SRV 0 0 0 .`, d),
|
||||
fmt.Sprintf(`_pop3._tcp.%s. SRV 0 0 0 .`, d),
|
||||
fmt.Sprintf(`_pop3s._tcp.%s. SRV 0 0 0 .`, d),
|
||||
)
|
||||
|
||||
if certIssuerDomainName != "" {
|
||||
// ../rfc/8659:18 for CAA records.
|
||||
records = append(records,
|
||||
"",
|
||||
"; Optional:",
|
||||
"; You could mark Let's Encrypt as the only Certificate Authority allowed to",
|
||||
"; sign TLS certificates for your domain.",
|
||||
fmt.Sprintf(`%s. CAA 0 issue "%s"`, d, certIssuerDomainName),
|
||||
)
|
||||
if acmeAccountURI != "" {
|
||||
// ../rfc/8657:99 for accounturi.
|
||||
// ../rfc/8657:147 for validationmethods.
|
||||
records = append(records,
|
||||
";",
|
||||
"; Optionally limit certificates for this domain to the account ID and methods used by mox.",
|
||||
fmt.Sprintf(`;; %s. CAA 0 issue "%s; accounturi=%s; validationmethods=tls-alpn-01,http-01"`, d, certIssuerDomainName, acmeAccountURI),
|
||||
";",
|
||||
"; Or alternatively only limit for email-specific subdomains, so you can use",
|
||||
"; other accounts/methods for other subdomains.",
|
||||
fmt.Sprintf(`;; autoconfig.%s. CAA 0 issue "%s; accounturi=%s; validationmethods=tls-alpn-01,http-01"`, d, certIssuerDomainName, acmeAccountURI),
|
||||
fmt.Sprintf(`;; mta-sts.%s. CAA 0 issue "%s; accounturi=%s; validationmethods=tls-alpn-01,http-01"`, d, certIssuerDomainName, acmeAccountURI),
|
||||
)
|
||||
if domConf.ClientSettingsDomain != "" && domConf.ClientSettingsDNSDomain != mox.Conf.Static.HostnameDomain {
|
||||
records = append(records,
|
||||
fmt.Sprintf(`;; %-*s CAA 0 issue "%s; accounturi=%s; validationmethods=tls-alpn-01,http-01"`, 20-3+len(d), domConf.ClientSettingsDNSDomain.ASCII, certIssuerDomainName, acmeAccountURI),
|
||||
)
|
||||
}
|
||||
if strings.HasSuffix(h, "."+d) {
|
||||
records = append(records,
|
||||
";",
|
||||
"; And the mail hostname.",
|
||||
fmt.Sprintf(`;; %-*s CAA 0 issue "%s; accounturi=%s; validationmethods=tls-alpn-01,http-01"`, 20-3+len(d), h+".", certIssuerDomainName, acmeAccountURI),
|
||||
)
|
||||
}
|
||||
} else {
|
||||
// The string "will be suggested" is used by
|
||||
// ../testdata/integration/moxacmepebble.sh and ../testdata/integration/moxmail2.sh
|
||||
// as end of DNS records.
|
||||
records = append(records,
|
||||
";",
|
||||
"; Note: After starting up, once an ACME account has been created, CAA records",
|
||||
"; that restrict issuance to the account will be suggested.",
|
||||
)
|
||||
}
|
||||
}
|
||||
return records, nil
|
||||
}
|
31
apidiff.sh
31
apidiff.sh
@ -8,20 +8,31 @@ if ! test -d tmp/mox-$prevversion; then
|
||||
fi
|
||||
(rm -r tmp/apidiff || exit 0)
|
||||
mkdir -p tmp/apidiff/$prevversion tmp/apidiff/next
|
||||
(rm apidiff/next.txt || exit 0)
|
||||
(
|
||||
echo "Below are the incompatible changes between $prevversion and next, per package."
|
||||
echo
|
||||
) >>apidiff/next.txt
|
||||
(rm apidiff/next.txt.new 2>/dev/null || exit 0)
|
||||
touch apidiff/next.txt.new
|
||||
for p in $(cat apidiff/packages.txt); do
|
||||
if ! test -d tmp/mox-$prevversion/$p; then
|
||||
continue
|
||||
fi
|
||||
(cd tmp/mox-$prevversion && apidiff -w ../apidiff/$prevversion/$p.api ./$p)
|
||||
apidiff -w tmp/apidiff/next/$p.api ./$p
|
||||
(
|
||||
echo '#' $p
|
||||
apidiff -incompatible tmp/apidiff/$prevversion/$p.api tmp/apidiff/next/$p.api
|
||||
echo
|
||||
) >>apidiff/next.txt
|
||||
apidiff -incompatible tmp/apidiff/$prevversion/$p.api tmp/apidiff/next/$p.api >$p.diff
|
||||
if test -s $p.diff; then
|
||||
(
|
||||
echo '#' $p
|
||||
cat $p.diff
|
||||
echo
|
||||
) >>apidiff/next.txt.new
|
||||
fi
|
||||
rm $p.diff
|
||||
done
|
||||
if test -s apidiff/next.txt.new; then
|
||||
(
|
||||
echo "Below are the incompatible changes between $prevversion and next, per package."
|
||||
echo
|
||||
cat apidiff/next.txt.new
|
||||
) >apidiff/next.txt
|
||||
rm apidiff/next.txt.new
|
||||
else
|
||||
mv apidiff/next.txt.new apidiff/next.txt
|
||||
fi
|
||||
|
@ -0,0 +1,5 @@
|
||||
Below are the incompatible changes between v0.0.15 and next, per package.
|
||||
|
||||
# smtpclient
|
||||
- GatherDestinations: changed from func(context.Context, *log/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.IPDomain) (bool, bool, bool, github.com/mjl-/mox/dns.Domain, []github.com/mjl-/mox/dns.IPDomain, bool, error) to func(context.Context, *log/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.IPDomain) (bool, bool, bool, github.com/mjl-/mox/dns.Domain, []HostPref, bool, error)
|
||||
|
43
apidiff/v0.0.12.txt
Normal file
43
apidiff/v0.0.12.txt
Normal file
@ -0,0 +1,43 @@
|
||||
Below are the incompatible changes between v0.0.11 and next, per package.
|
||||
|
||||
# dane
|
||||
|
||||
# dmarc
|
||||
|
||||
# dmarcrpt
|
||||
|
||||
# dns
|
||||
|
||||
# dnsbl
|
||||
|
||||
# iprev
|
||||
|
||||
# message
|
||||
- (*HeaderWriter).AddWrap: changed from func([]byte) to func([]byte, bool)
|
||||
|
||||
# mtasts
|
||||
|
||||
# publicsuffix
|
||||
|
||||
# ratelimit
|
||||
|
||||
# sasl
|
||||
|
||||
# scram
|
||||
|
||||
# smtp
|
||||
|
||||
# smtpclient
|
||||
|
||||
# spf
|
||||
|
||||
# subjectpass
|
||||
|
||||
# tlsrpt
|
||||
|
||||
# updates
|
||||
|
||||
# webapi
|
||||
|
||||
# webhook
|
||||
|
5
apidiff/v0.0.13.txt
Normal file
5
apidiff/v0.0.13.txt
Normal file
@ -0,0 +1,5 @@
|
||||
Below are the incompatible changes between v0.0.13 and next, per package.
|
||||
|
||||
# webhook
|
||||
- PartStructure: removed
|
||||
|
7
apidiff/v0.0.15.txt
Normal file
7
apidiff/v0.0.15.txt
Normal file
@ -0,0 +1,7 @@
|
||||
Below are the incompatible changes between v0.0.14 and next, per package.
|
||||
|
||||
# message
|
||||
- Part.ContentDescription: changed from string to *string
|
||||
- Part.ContentID: changed from string to *string
|
||||
- Part.ContentTransferEncoding: changed from string to *string
|
||||
|
@ -42,6 +42,24 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
metricMissingServerName = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "mox_autotls_missing_servername_total",
|
||||
Help: "Number of failed TLS connection attempts with missing SNI where no fallback hostname was configured.",
|
||||
},
|
||||
)
|
||||
metricUnknownServerName = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "mox_autotls_unknown_servername_total",
|
||||
Help: "Number of failed TLS connection attempts with an unrecognized SNI name where no fallback hostname was configured.",
|
||||
},
|
||||
)
|
||||
metricCertRequestErrors = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "mox_autotls_cert_request_errors_total",
|
||||
Help: "Number of errors trying to retrieve a certificate for a hostname, possibly ACME verification errors.",
|
||||
},
|
||||
)
|
||||
metricCertput = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "mox_autotls_certput_total",
|
||||
@ -54,7 +72,6 @@ var (
|
||||
// certificates for allowlisted hosts.
|
||||
type Manager struct {
|
||||
ACMETLSConfig *tls.Config // For serving HTTPS on port 443, which is required for certificate requests to succeed.
|
||||
TLSConfig *tls.Config // For all TLS servers not used for validating ACME requests. Like SMTP and IMAP (including with STARTTLS) and HTTPS on ports other than 443.
|
||||
Manager *autocert.Manager
|
||||
|
||||
shutdown <-chan struct{}
|
||||
@ -77,7 +94,7 @@ type Manager struct {
|
||||
// host, or a newly generated key.
|
||||
//
|
||||
// When shutdown is closed, no new TLS connections can be created.
|
||||
func Load(name, acmeDir, contactEmail, directoryURL string, eabKeyID string, eabKey []byte, getPrivateKey func(host string, keyType autocert.KeyType) (crypto.Signer, error), shutdown <-chan struct{}) (*Manager, error) {
|
||||
func Load(log mlog.Log, name, acmeDir, contactEmail, directoryURL string, eabKeyID string, eabKey []byte, getPrivateKey func(host string, keyType autocert.KeyType) (crypto.Signer, error), shutdown <-chan struct{}) (*Manager, error) {
|
||||
if directoryURL == "" {
|
||||
return nil, fmt.Errorf("empty ACME directory URL")
|
||||
}
|
||||
@ -90,7 +107,10 @@ func Load(name, acmeDir, contactEmail, directoryURL string, eabKeyID string, eab
|
||||
var key crypto.Signer
|
||||
f, err := os.Open(p)
|
||||
if f != nil {
|
||||
defer f.Close()
|
||||
defer func() {
|
||||
err := f.Close()
|
||||
log.Check(err, "closing identify key file")
|
||||
}()
|
||||
}
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
key, err = ecdsa.GenerateKey(elliptic.P256(), cryptorand.Reader)
|
||||
@ -158,52 +178,111 @@ func Load(name, acmeDir, contactEmail, directoryURL string, eabKeyID string, eab
|
||||
}
|
||||
}
|
||||
|
||||
loggingGetCertificate := func(hello *tls.ClientHelloInfo) (*tls.Certificate, error) {
|
||||
log := mlog.New("autotls", nil).WithContext(hello.Context())
|
||||
|
||||
// We handle missing invalid hostnames/ip's by returning a nil certificate and nil
|
||||
// error, which crypto/tls turns into a TLS alert "unrecognized name", which can be
|
||||
// interpreted by clients as a hint that they are using the wrong hostname, or a
|
||||
// certificate is missing.
|
||||
|
||||
// Handle missing SNI to prevent logging an error below.
|
||||
// At startup, during config initialization, we already adjust the tls config to
|
||||
// inject the listener hostname if there isn't one in the TLS client hello. This is
|
||||
// common for SMTP STARTTLS connections, which often do not care about the
|
||||
// verification of the certificate.
|
||||
if hello.ServerName == "" {
|
||||
log.Debug("tls request without sni servername, rejecting", slog.Any("localaddr", hello.Conn.LocalAddr()), slog.Any("supportedprotos", hello.SupportedProtos))
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
cert, err := m.GetCertificate(hello)
|
||||
if err != nil && errors.Is(err, errHostNotAllowed) {
|
||||
log.Debugx("requesting certificate", err, slog.String("host", hello.ServerName))
|
||||
return nil, nil
|
||||
} else if err != nil {
|
||||
log.Errorx("requesting certificate", err, slog.String("host", hello.ServerName))
|
||||
}
|
||||
return cert, err
|
||||
}
|
||||
|
||||
acmeTLSConfig := *m.TLSConfig()
|
||||
acmeTLSConfig.GetCertificate = loggingGetCertificate
|
||||
|
||||
tlsConfig := tls.Config{
|
||||
GetCertificate: loggingGetCertificate,
|
||||
}
|
||||
|
||||
a := &Manager{
|
||||
ACMETLSConfig: &acmeTLSConfig,
|
||||
TLSConfig: &tlsConfig,
|
||||
Manager: m,
|
||||
shutdown: shutdown,
|
||||
hosts: map[dns.Domain]struct{}{},
|
||||
Manager: m,
|
||||
shutdown: shutdown,
|
||||
hosts: map[dns.Domain]struct{}{},
|
||||
}
|
||||
m.HostPolicy = a.HostPolicy
|
||||
acmeTLSConfig := *m.TLSConfig()
|
||||
acmeTLSConfig.GetCertificate = func(hello *tls.ClientHelloInfo) (*tls.Certificate, error) {
|
||||
return a.loggingGetCertificate(hello, dns.Domain{}, false, false)
|
||||
}
|
||||
a.ACMETLSConfig = &acmeTLSConfig
|
||||
return a, nil
|
||||
}
|
||||
|
||||
// loggingGetCertificate is a helper to implement crypto/tls.Config.GetCertificate,
|
||||
// optionally falling back to a certificate for fallbackHostname in case SNI is
|
||||
// absent or for an unknown hostname.
|
||||
func (m *Manager) loggingGetCertificate(hello *tls.ClientHelloInfo, fallbackHostname dns.Domain, fallbackNoSNI, fallbackUnknownSNI bool) (*tls.Certificate, error) {
|
||||
log := mlog.New("autotls", nil).WithContext(hello.Context()).With(
|
||||
slog.Any("localaddr", hello.Conn.LocalAddr()),
|
||||
slog.Any("supportedprotos", hello.SupportedProtos),
|
||||
slog.String("servername", hello.ServerName),
|
||||
)
|
||||
|
||||
// If we can't find a certificate (depending on fallback parameters), we return a
|
||||
// nil certificate and nil error, which crypto/tls turns into a TLS alert
|
||||
// "unrecognized name", which can be interpreted by clients as a hint that they are
|
||||
// using the wrong hostname, or a certificate is missing. ../rfc/9325:578
|
||||
|
||||
// IP addresses for ServerName are not allowed, but happen in practice. If we
|
||||
// should be lenient (fallbackUnknownSNI), we switch to the fallback hostname,
|
||||
// otherwise we return an error. We don't want to pass IP addresses to
|
||||
// GetCertificate because it will return an error for IPv6 addresses.
|
||||
// ../rfc/6066:367 ../rfc/4366:535
|
||||
if net.ParseIP(hello.ServerName) != nil {
|
||||
if fallbackUnknownSNI {
|
||||
hello.ServerName = fallbackHostname.ASCII
|
||||
log = log.With(slog.String("servername", hello.ServerName))
|
||||
} else {
|
||||
log.Debug("tls request with ip for server name, rejecting")
|
||||
return nil, fmt.Errorf("invalid ip address for sni server name")
|
||||
}
|
||||
}
|
||||
|
||||
if hello.ServerName == "" && fallbackNoSNI {
|
||||
hello.ServerName = fallbackHostname.ASCII
|
||||
log = log.With(slog.String("servername", hello.ServerName))
|
||||
}
|
||||
|
||||
// Handle missing SNI to prevent logging an error below.
|
||||
if hello.ServerName == "" {
|
||||
metricMissingServerName.Inc()
|
||||
log.Debug("tls request without sni server name, rejecting")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
cert, err := m.Manager.GetCertificate(hello)
|
||||
if err != nil && errors.Is(err, errHostNotAllowed) {
|
||||
if !fallbackUnknownSNI {
|
||||
metricUnknownServerName.Inc()
|
||||
log.Debugx("requesting certificate", err)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Some legitimate email deliveries over SMTP use an unknown SNI, e.g. a bare
|
||||
// domain instead of the MX hostname. We "should" return an error, but that would
|
||||
// break email delivery, so we use the fallback name if it is configured.
|
||||
// ../rfc/9325:589
|
||||
|
||||
log = log.With(slog.String("servername", hello.ServerName))
|
||||
log.Debug("certificate for unknown hostname, using fallback hostname")
|
||||
hello.ServerName = fallbackHostname.ASCII
|
||||
cert, err = m.Manager.GetCertificate(hello)
|
||||
if err != nil {
|
||||
metricCertRequestErrors.Inc()
|
||||
log.Errorx("requesting certificate for fallback hostname", err)
|
||||
} else {
|
||||
log.Debug("using certificate for fallback hostname")
|
||||
}
|
||||
return cert, err
|
||||
} else if err != nil {
|
||||
metricCertRequestErrors.Inc()
|
||||
log.Errorx("requesting certificate", err)
|
||||
}
|
||||
return cert, err
|
||||
}
|
||||
|
||||
// TLSConfig returns a TLS server config that optionally returns a certificate for
|
||||
// fallbackHostname if no SNI was done, or for an unknown hostname.
|
||||
//
|
||||
// If fallbackNoSNI is set, TLS connections without SNI will use a certificate for
|
||||
// fallbackHostname. Otherwise, connections without SNI will fail with a message
|
||||
// that no TLS certificate is available.
|
||||
//
|
||||
// If fallbackUnknownSNI is set, TLS connections with an SNI hostname that is not
|
||||
// allowlisted will instead use a certificate for fallbackHostname. Otherwise, such
|
||||
// TLS connections will fail.
|
||||
func (m *Manager) TLSConfig(fallbackHostname dns.Domain, fallbackNoSNI, fallbackUnknownSNI bool) *tls.Config {
|
||||
return &tls.Config{
|
||||
GetCertificate: func(hello *tls.ClientHelloInfo) (*tls.Certificate, error) {
|
||||
return m.loggingGetCertificate(hello, fallbackHostname, fallbackNoSNI, fallbackUnknownSNI)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// CertAvailable checks whether a non-expired ECDSA certificate is available in the
|
||||
// cache for host. No other checks than expiration are done.
|
||||
func (m *Manager) CertAvailable(ctx context.Context, log mlog.Log, host dns.Domain) (bool, error) {
|
||||
@ -284,12 +363,12 @@ func (m *Manager) SetAllowedHostnames(log mlog.Log, resolver dns.Resolver, hostn
|
||||
for _, h := range added {
|
||||
ips, _, err := resolver.LookupIP(ctx, "ip", h.ASCII+".")
|
||||
if err != nil {
|
||||
log.Errorx("warning: acme tls cert validation for host may fail due to dns lookup error", err, slog.Any("host", h))
|
||||
log.Warnx("acme tls cert validation for host may fail due to dns lookup error", err, slog.Any("host", h))
|
||||
continue
|
||||
}
|
||||
for _, ip := range ips {
|
||||
if _, ok := publicIPstrs[ip.String()]; !ok {
|
||||
log.Error("warning: acme tls cert validation for host is likely to fail because not all its ips are being listened on",
|
||||
log.Warn("acme tls cert validation for host is likely to fail because not all its ips are being listened on",
|
||||
slog.Any("hostname", h),
|
||||
slog.Any("listenedips", publicIPs),
|
||||
slog.Any("hostips", ips),
|
||||
|
@ -25,7 +25,7 @@ func TestAutotls(t *testing.T) {
|
||||
getPrivateKey := func(host string, keyType autocert.KeyType) (crypto.Signer, error) {
|
||||
return nil, fmt.Errorf("not used")
|
||||
}
|
||||
m, err := Load("test", "../testdata/autotls", "mox@localhost", "https://localhost/", "", nil, getPrivateKey, shutdown)
|
||||
m, err := Load(log, "test", "../testdata/autotls", "mox@localhost", "https://localhost/", "", nil, getPrivateKey, shutdown)
|
||||
if err != nil {
|
||||
t.Fatalf("load manager: %v", err)
|
||||
}
|
||||
@ -82,7 +82,7 @@ func TestAutotls(t *testing.T) {
|
||||
|
||||
key0 := m.Manager.Client.Key
|
||||
|
||||
m, err = Load("test", "../testdata/autotls", "mox@localhost", "https://localhost/", "", nil, getPrivateKey, shutdown)
|
||||
m, err = Load(log, "test", "../testdata/autotls", "mox@localhost", "https://localhost/", "", nil, getPrivateKey, shutdown)
|
||||
if err != nil {
|
||||
t.Fatalf("load manager again: %v", err)
|
||||
}
|
||||
@ -95,7 +95,7 @@ func TestAutotls(t *testing.T) {
|
||||
t.Fatalf("hostpolicy, got err %v, expected no error", err)
|
||||
}
|
||||
|
||||
m2, err := Load("test2", "../testdata/autotls", "mox@localhost", "https://localhost/", "", nil, nil, shutdown)
|
||||
m2, err := Load(log, "test2", "../testdata/autotls", "mox@localhost", "https://localhost/", "", nil, nil, shutdown)
|
||||
if err != nil {
|
||||
t.Fatalf("load another manager: %v", err)
|
||||
}
|
||||
|
254
backup.go
254
backup.go
@ -10,7 +10,10 @@ import (
|
||||
"log/slog"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/mjl-/bstore"
|
||||
@ -24,7 +27,7 @@ import (
|
||||
"github.com/mjl-/mox/tlsrptdb"
|
||||
)
|
||||
|
||||
func backupctl(ctx context.Context, ctl *ctl) {
|
||||
func xbackupctl(ctx context.Context, xctl *ctl) {
|
||||
/* protocol:
|
||||
> "backup"
|
||||
> destdir
|
||||
@ -38,14 +41,14 @@ func backupctl(ctx context.Context, ctl *ctl) {
|
||||
// "src" or "dst" are incomplete paths relative to the source or destination data
|
||||
// directories.
|
||||
|
||||
dstDataDir := ctl.xread()
|
||||
verbose := ctl.xread() == "verbose"
|
||||
dstDir := xctl.xread()
|
||||
verbose := xctl.xread() == "verbose"
|
||||
|
||||
// Set when an error is encountered. At the end, we warn if set.
|
||||
var incomplete bool
|
||||
|
||||
// We'll be writing output, and logging both to mox and the ctl stream.
|
||||
writer := ctl.writer()
|
||||
xwriter := xctl.writer()
|
||||
|
||||
// Format easily readable output for the user.
|
||||
formatLog := func(prefix, text string, err error, attrs ...slog.Attr) []byte {
|
||||
@ -64,10 +67,8 @@ func backupctl(ctx context.Context, ctl *ctl) {
|
||||
|
||||
// Log an error to both the mox service as the user running "mox backup".
|
||||
pkglogx := func(prefix, text string, err error, attrs ...slog.Attr) {
|
||||
ctl.log.Errorx(text, err, attrs...)
|
||||
|
||||
_, werr := writer.Write(formatLog(prefix, text, err, attrs...))
|
||||
ctl.xcheck(werr, "write to ctl")
|
||||
xctl.log.Errorx(text, err, attrs...)
|
||||
xwriter.Write(formatLog(prefix, text, err, attrs...))
|
||||
}
|
||||
|
||||
// Log an error but don't mark backup as failed.
|
||||
@ -84,15 +85,100 @@ func backupctl(ctx context.Context, ctl *ctl) {
|
||||
|
||||
// If verbose is enabled, log to the cli command. Always log as info level.
|
||||
xvlog := func(text string, attrs ...slog.Attr) {
|
||||
ctl.log.Info(text, attrs...)
|
||||
xctl.log.Info(text, attrs...)
|
||||
if verbose {
|
||||
_, werr := writer.Write(formatLog("", text, nil, attrs...))
|
||||
ctl.xcheck(werr, "write to ctl")
|
||||
xwriter.Write(formatLog("", text, nil, attrs...))
|
||||
}
|
||||
}
|
||||
|
||||
dstConfigDir := filepath.Join(dstDir, "config")
|
||||
dstDataDir := filepath.Join(dstDir, "data")
|
||||
|
||||
// Warn if directories already exist, will likely cause failures when trying to
|
||||
// write files that already exist.
|
||||
if _, err := os.Stat(dstConfigDir); err == nil {
|
||||
xwarnx("destination config directory already exists", nil, slog.String("configdir", dstConfigDir))
|
||||
}
|
||||
if _, err := os.Stat(dstDataDir); err == nil {
|
||||
xwarnx("destination data directory already exists", nil, slog.String("dir", dstDataDir))
|
||||
xwarnx("destination data directory already exists", nil, slog.String("datadir", dstDataDir))
|
||||
}
|
||||
|
||||
os.MkdirAll(dstDir, 0770)
|
||||
os.MkdirAll(dstConfigDir, 0770)
|
||||
os.MkdirAll(dstDataDir, 0770)
|
||||
|
||||
// Copy all files in the config dir.
|
||||
srcConfigDir := filepath.Clean(mox.ConfigDirPath("."))
|
||||
err := filepath.WalkDir(srcConfigDir, func(srcPath string, d fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if srcConfigDir == srcPath {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Trim directory and separator.
|
||||
relPath := srcPath[len(srcConfigDir)+1:]
|
||||
|
||||
destPath := filepath.Join(dstConfigDir, relPath)
|
||||
|
||||
if d.IsDir() {
|
||||
if info, err := os.Stat(srcPath); err != nil {
|
||||
return fmt.Errorf("stat config dir %s: %v", srcPath, err)
|
||||
} else if err := os.Mkdir(destPath, info.Mode()&0777); err != nil {
|
||||
return fmt.Errorf("mkdir %s: %v", destPath, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if d.Type()&fs.ModeSymlink != 0 {
|
||||
linkDest, err := os.Readlink(srcPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("reading symlink %s: %v", srcPath, err)
|
||||
}
|
||||
if err := os.Symlink(linkDest, destPath); err != nil {
|
||||
return fmt.Errorf("creating symlink %s: %v", destPath, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if !d.Type().IsRegular() {
|
||||
xwarnx("skipping non-regular/dir/symlink file in config dir", nil, slog.String("path", srcPath))
|
||||
return nil
|
||||
}
|
||||
|
||||
sf, err := os.Open(srcPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("open config file %s: %v", srcPath, err)
|
||||
}
|
||||
info, err := sf.Stat()
|
||||
if err != nil {
|
||||
return fmt.Errorf("stat config file %s: %v", srcPath, err)
|
||||
}
|
||||
df, err := os.OpenFile(destPath, os.O_CREATE|os.O_EXCL|os.O_WRONLY, 0777&info.Mode())
|
||||
if err != nil {
|
||||
return fmt.Errorf("create destination config file %s: %v", destPath, err)
|
||||
}
|
||||
defer func() {
|
||||
if df != nil {
|
||||
err := df.Close()
|
||||
xctl.log.Check(err, "closing file")
|
||||
}
|
||||
}()
|
||||
defer func() {
|
||||
err := sf.Close()
|
||||
xctl.log.Check(err, "closing file")
|
||||
}()
|
||||
if _, err := io.Copy(df, sf); err != nil {
|
||||
return fmt.Errorf("copying config file %s to %s: %v", srcPath, destPath, err)
|
||||
}
|
||||
if err := df.Close(); err != nil {
|
||||
return fmt.Errorf("closing destination config file %s: %v", srcPath, err)
|
||||
}
|
||||
df = nil
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
xerrx("storing config directory", err)
|
||||
}
|
||||
|
||||
srcDataDir := filepath.Clean(mox.DataDirPath("."))
|
||||
@ -122,7 +208,10 @@ func backupctl(ctx context.Context, ctl *ctl) {
|
||||
xerrx("open source file (not backed up)", err, slog.String("srcpath", srcpath), slog.String("dstpath", dstpath))
|
||||
return
|
||||
}
|
||||
defer sf.Close()
|
||||
defer func() {
|
||||
err := sf.Close()
|
||||
xctl.log.Check(err, "closing source file")
|
||||
}()
|
||||
|
||||
ensureDestDir(dstpath)
|
||||
df, err := os.OpenFile(dstpath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0660)
|
||||
@ -132,7 +221,8 @@ func backupctl(ctx context.Context, ctl *ctl) {
|
||||
}
|
||||
defer func() {
|
||||
if df != nil {
|
||||
df.Close()
|
||||
err := df.Close()
|
||||
xctl.log.Check(err, "closing destination file")
|
||||
}
|
||||
}()
|
||||
if _, err := io.Copy(df, sf); err != nil {
|
||||
@ -174,18 +264,9 @@ func backupctl(ctx context.Context, ctl *ctl) {
|
||||
xvlog("backed up directory", slog.String("dir", dir), slog.Duration("duration", time.Since(tmDir)))
|
||||
}
|
||||
|
||||
// Backup a database by copying it in a readonly transaction.
|
||||
// Always logs on error, so caller doesn't have to, but also returns the error so
|
||||
// callers can see result.
|
||||
backupDB := func(db *bstore.DB, path string) (rerr error) {
|
||||
defer func() {
|
||||
if rerr != nil {
|
||||
xerrx("backing up database", rerr, slog.String("path", path))
|
||||
}
|
||||
}()
|
||||
|
||||
tmDB := time.Now()
|
||||
|
||||
// Backup a database by copying it in a readonly transaction. Wrapped by backupDB
|
||||
// which logs and returns just a bool.
|
||||
backupDB0 := func(db *bstore.DB, path string) error {
|
||||
dstpath := filepath.Join(dstDataDir, path)
|
||||
ensureDestDir(dstpath)
|
||||
df, err := os.OpenFile(dstpath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0660)
|
||||
@ -194,7 +275,8 @@ func backupctl(ctx context.Context, ctl *ctl) {
|
||||
}
|
||||
defer func() {
|
||||
if df != nil {
|
||||
df.Close()
|
||||
err := df.Close()
|
||||
xctl.log.Check(err, "closing destination database file")
|
||||
}
|
||||
}()
|
||||
err = db.Read(ctx, func(tx *bstore.Tx) error {
|
||||
@ -219,10 +301,20 @@ func backupctl(ctx context.Context, ctl *ctl) {
|
||||
if err != nil {
|
||||
return fmt.Errorf("closing destination database after copy: %v", err)
|
||||
}
|
||||
xvlog("backed up database file", slog.String("path", path), slog.Duration("duration", time.Since(tmDB)))
|
||||
return nil
|
||||
}
|
||||
|
||||
backupDB := func(db *bstore.DB, path string) bool {
|
||||
start := time.Now()
|
||||
err := backupDB0(db, path)
|
||||
if err != nil {
|
||||
xerrx("backing up database", err, slog.String("path", path), slog.Duration("duration", time.Since(start)))
|
||||
return false
|
||||
}
|
||||
xvlog("backed up database file", slog.String("path", path), slog.Duration("duration", time.Since(start)))
|
||||
return true
|
||||
}
|
||||
|
||||
// Try to create a hardlink. Fall back to copying the file (e.g. when on different file system).
|
||||
warnedHardlink := false // We warn once about failing to hardlink.
|
||||
linkOrCopy := func(srcpath, dstpath string) (bool, error) {
|
||||
@ -234,7 +326,11 @@ func backupctl(ctx context.Context, ctl *ctl) {
|
||||
// No point in trying with regular copy, we would warn twice.
|
||||
return false, err
|
||||
} else if !warnedHardlink {
|
||||
xwarnx("creating hardlink to message failed, will be doing regular file copies and not warn again", err, slog.String("srcpath", srcpath), slog.String("dstpath", dstpath))
|
||||
var hardlinkHint string
|
||||
if runtime.GOOS == "linux" && errors.Is(err, syscall.EXDEV) {
|
||||
hardlinkHint = " (hint: if running under systemd, ReadWritePaths in mox.service may cause multiple mountpoints; consider merging paths into a single parent directory to prevent cross-device/mountpoint hardlinks)"
|
||||
}
|
||||
xwarnx("creating hardlink to message failed, will be doing regular file copies and not warn again"+hardlinkHint, err, slog.String("srcpath", srcpath), slog.String("dstpath", dstpath))
|
||||
warnedHardlink = true
|
||||
}
|
||||
|
||||
@ -245,7 +341,7 @@ func backupctl(ctx context.Context, ctl *ctl) {
|
||||
}
|
||||
defer func() {
|
||||
err := sf.Close()
|
||||
ctl.log.Check(err, "closing copied source file")
|
||||
xctl.log.Check(err, "closing copied source file")
|
||||
}()
|
||||
|
||||
df, err := os.OpenFile(dstpath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0660)
|
||||
@ -255,7 +351,7 @@ func backupctl(ctx context.Context, ctl *ctl) {
|
||||
defer func() {
|
||||
if df != nil {
|
||||
err := df.Close()
|
||||
ctl.log.Check(err, "closing partial destination file")
|
||||
xctl.log.Check(err, "closing partial destination file")
|
||||
}
|
||||
}()
|
||||
if _, err := io.Copy(df, sf); err != nil {
|
||||
@ -272,16 +368,16 @@ func backupctl(ctx context.Context, ctl *ctl) {
|
||||
// Start making the backup.
|
||||
tmStart := time.Now()
|
||||
|
||||
ctl.log.Print("making backup", slog.String("destdir", dstDataDir))
|
||||
xctl.log.Print("making backup", slog.String("destdir", dstDataDir))
|
||||
|
||||
err := os.MkdirAll(dstDataDir, 0770)
|
||||
if err != nil {
|
||||
if err := os.MkdirAll(dstDataDir, 0770); err != nil {
|
||||
xerrx("creating destination data directory", err)
|
||||
}
|
||||
|
||||
if err := os.WriteFile(filepath.Join(dstDataDir, "moxversion"), []byte(moxvar.Version), 0660); err != nil {
|
||||
xerrx("writing moxversion", err)
|
||||
}
|
||||
backupDB(store.AuthDB, "auth.db")
|
||||
backupDB(dmarcdb.ReportsDB, "dmarcrpt.db")
|
||||
backupDB(dmarcdb.EvalDB, "dmarceval.db")
|
||||
backupDB(mtastsdb.DB, "mtasts.db")
|
||||
@ -293,7 +389,7 @@ func backupctl(ctx context.Context, ctl *ctl) {
|
||||
srcAcmeDir := filepath.Join(srcDataDir, "acme")
|
||||
if _, err := os.Stat(srcAcmeDir); err == nil {
|
||||
backupDir("acme")
|
||||
} else if err != nil && !os.IsNotExist(err) {
|
||||
} else if !os.IsNotExist(err) {
|
||||
xerrx("copying acme/", err)
|
||||
}
|
||||
|
||||
@ -301,13 +397,13 @@ func backupctl(ctx context.Context, ctl *ctl) {
|
||||
backupQueue := func(path string) {
|
||||
tmQueue := time.Now()
|
||||
|
||||
if err := backupDB(queue.DB, path); err != nil {
|
||||
xerrx("queue not backed up", err, slog.String("path", path), slog.Duration("duration", time.Since(tmQueue)))
|
||||
if !backupDB(queue.DB, path) {
|
||||
return
|
||||
}
|
||||
|
||||
dstdbpath := filepath.Join(dstDataDir, path)
|
||||
db, err := bstore.Open(ctx, dstdbpath, &bstore.Options{MustExist: true}, queue.DBTypes...)
|
||||
opts := bstore.Options{MustExist: true, RegisterLogger: xctl.log.Logger}
|
||||
db, err := bstore.Open(ctx, dstdbpath, &opts, queue.DBTypes...)
|
||||
if err != nil {
|
||||
xerrx("open copied queue database", err, slog.String("dstpath", dstdbpath), slog.Duration("duration", time.Since(tmQueue)))
|
||||
return
|
||||
@ -316,17 +412,20 @@ func backupctl(ctx context.Context, ctl *ctl) {
|
||||
defer func() {
|
||||
if db != nil {
|
||||
err := db.Close()
|
||||
ctl.log.Check(err, "closing new queue db")
|
||||
xctl.log.Check(err, "closing new queue db")
|
||||
}
|
||||
}()
|
||||
|
||||
// Link/copy known message files. Warn if files are missing or unexpected
|
||||
// (though a message file could have been removed just now due to delivery, or a
|
||||
// new message may have been queued).
|
||||
// Link/copy known message files. If a message has been removed while we read the
|
||||
// database, our backup is not consistent and the backup will be marked failed.
|
||||
tmMsgs := time.Now()
|
||||
seen := map[string]struct{}{}
|
||||
var nlinked, ncopied int
|
||||
var maxID int64
|
||||
err = bstore.QueryDB[queue.Msg](ctx, db).ForEach(func(m queue.Msg) error {
|
||||
if m.ID > maxID {
|
||||
maxID = m.ID
|
||||
}
|
||||
mp := store.MessagePath(m.ID)
|
||||
seen[mp] = struct{}{}
|
||||
srcpath := filepath.Join(srcDataDir, "queue", mp)
|
||||
@ -349,7 +448,9 @@ func backupctl(ctx context.Context, ctl *ctl) {
|
||||
slog.Duration("duration", time.Since(tmMsgs)))
|
||||
}
|
||||
|
||||
// Read through all files in queue directory and warn about anything we haven't handled yet.
|
||||
// Read through all files in queue directory and warn about anything we haven't
|
||||
// handled yet. Message files that are newer than we expect from our consistent
|
||||
// database snapshot are ignored.
|
||||
tmWalk := time.Now()
|
||||
srcqdir := filepath.Join(srcDataDir, "queue")
|
||||
err = filepath.WalkDir(srcqdir, func(srcqpath string, d fs.DirEntry, err error) error {
|
||||
@ -367,6 +468,12 @@ func backupctl(ctx context.Context, ctl *ctl) {
|
||||
if p == "index.db" {
|
||||
return nil
|
||||
}
|
||||
// Skip any messages that were added since we started on our consistent snapshot.
|
||||
// We don't want to cause spurious backup warnings.
|
||||
if id, err := strconv.ParseInt(filepath.Base(p), 10, 64); err == nil && maxID > 0 && id > maxID && p == store.MessagePath(id) {
|
||||
return nil
|
||||
}
|
||||
|
||||
qp := filepath.Join("queue", p)
|
||||
xwarnx("backing up unrecognized file in queue directory", nil, slog.String("path", qp))
|
||||
backupFile(qp)
|
||||
@ -383,21 +490,21 @@ func backupctl(ctx context.Context, ctl *ctl) {
|
||||
backupQueue(filepath.FromSlash("queue/index.db"))
|
||||
|
||||
backupAccount := func(acc *store.Account) {
|
||||
defer acc.Close()
|
||||
defer func() {
|
||||
err := acc.Close()
|
||||
xctl.log.Check(err, "closing account")
|
||||
}()
|
||||
|
||||
tmAccount := time.Now()
|
||||
|
||||
// Copy database file.
|
||||
dbpath := filepath.Join("accounts", acc.Name, "index.db")
|
||||
err := backupDB(acc.DB, dbpath)
|
||||
if err != nil {
|
||||
xerrx("copying account database", err, slog.String("path", dbpath), slog.Duration("duration", time.Since(tmAccount)))
|
||||
}
|
||||
backupDB(acc.DB, dbpath)
|
||||
|
||||
// todo: should document/check not taking a rlock on account.
|
||||
|
||||
// Copy junkfilter files, if configured.
|
||||
if jf, _, err := acc.OpenJunkFilter(ctx, ctl.log); err != nil {
|
||||
if jf, _, err := acc.OpenJunkFilter(ctx, xctl.log); err != nil {
|
||||
if !errors.Is(err, store.ErrNoJunkFilter) {
|
||||
xerrx("opening junk filter for account (not backed up)", err)
|
||||
}
|
||||
@ -407,13 +514,13 @@ func backupctl(ctx context.Context, ctl *ctl) {
|
||||
backupDB(db, jfpath)
|
||||
bloompath := filepath.Join("accounts", acc.Name, "junkfilter.bloom")
|
||||
backupFile(bloompath)
|
||||
db = nil
|
||||
err := jf.Close()
|
||||
ctl.log.Check(err, "closing junkfilter")
|
||||
xctl.log.Check(err, "closing junkfilter")
|
||||
}
|
||||
|
||||
dstdbpath := filepath.Join(dstDataDir, dbpath)
|
||||
db, err := bstore.Open(ctx, dstdbpath, &bstore.Options{MustExist: true}, store.DBTypes...)
|
||||
opts := bstore.Options{MustExist: true, RegisterLogger: xctl.log.Logger}
|
||||
db, err := bstore.Open(ctx, dstdbpath, &opts, store.DBTypes...)
|
||||
if err != nil {
|
||||
xerrx("open copied account database", err, slog.String("dstpath", dstdbpath), slog.Duration("duration", time.Since(tmAccount)))
|
||||
return
|
||||
@ -422,17 +529,19 @@ func backupctl(ctx context.Context, ctl *ctl) {
|
||||
defer func() {
|
||||
if db != nil {
|
||||
err := db.Close()
|
||||
ctl.log.Check(err, "close account database")
|
||||
xctl.log.Check(err, "close account database")
|
||||
}
|
||||
}()
|
||||
|
||||
// Link/copy known message files. Warn if files are missing or unexpected (though a
|
||||
// message file could have been added just now due to delivery, or a message have
|
||||
// been removed).
|
||||
// Link/copy known message files.
|
||||
tmMsgs := time.Now()
|
||||
seen := map[string]struct{}{}
|
||||
var maxID int64
|
||||
var nlinked, ncopied int
|
||||
err = bstore.QueryDB[store.Message](ctx, db).FilterEqual("Expunged", false).ForEach(func(m store.Message) error {
|
||||
if m.ID > maxID {
|
||||
maxID = m.ID
|
||||
}
|
||||
mp := store.MessagePath(m.ID)
|
||||
seen[mp] = struct{}{}
|
||||
amp := filepath.Join("accounts", acc.Name, "msg", mp)
|
||||
@ -456,7 +565,18 @@ func backupctl(ctx context.Context, ctl *ctl) {
|
||||
slog.Duration("duration", time.Since(tmMsgs)))
|
||||
}
|
||||
|
||||
// Read through all files in account directory and warn about anything we haven't handled yet.
|
||||
eraseIDs := map[int64]struct{}{}
|
||||
err = bstore.QueryDB[store.MessageErase](ctx, db).ForEach(func(me store.MessageErase) error {
|
||||
eraseIDs[me.ID] = struct{}{}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
xerrx("listing erased messages", err)
|
||||
}
|
||||
|
||||
// Read through all files in queue directory and warn about anything we haven't
|
||||
// handled yet. Message files that are newer than we expect from our consistent
|
||||
// database snapshot are ignored.
|
||||
tmWalk := time.Now()
|
||||
srcadir := filepath.Join(srcDataDir, "accounts", acc.Name)
|
||||
err = filepath.WalkDir(srcadir, func(srcapath string, d fs.DirEntry, err error) error {
|
||||
@ -474,6 +594,16 @@ func backupctl(ctx context.Context, ctl *ctl) {
|
||||
if _, ok := seen[mp]; ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Skip any messages that were added since we started on our consistent snapshot,
|
||||
// or messages that will be erased. We don't want to cause spurious backup
|
||||
// warnings.
|
||||
id, err := strconv.ParseInt(l[len(l)-1], 10, 64)
|
||||
if err == nil && id > maxID && mp == store.MessagePath(id) {
|
||||
return nil
|
||||
} else if _, ok := eraseIDs[id]; err == nil && ok {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
switch p {
|
||||
case "index.db", "junkfilter.db", "junkfilter.bloom":
|
||||
@ -502,7 +632,7 @@ func backupctl(ctx context.Context, ctl *ctl) {
|
||||
// account directories when handling "all other files" below.
|
||||
accounts := map[string]struct{}{}
|
||||
for _, accName := range mox.Conf.Accounts() {
|
||||
acc, err := store.OpenAccount(ctl.log, accName)
|
||||
acc, err := store.OpenAccount(xctl.log, accName, false)
|
||||
if err != nil {
|
||||
xerrx("opening account for copying (will try to copy as regular files later)", err, slog.String("account", accName))
|
||||
continue
|
||||
@ -540,7 +670,7 @@ func backupctl(ctx context.Context, ctl *ctl) {
|
||||
}
|
||||
|
||||
switch p {
|
||||
case "dmarcrpt.db", "dmarceval.db", "mtasts.db", "tlsrpt.db", "tlsrptresult.db", "receivedid.key", "ctl":
|
||||
case "auth.db", "dmarcrpt.db", "dmarceval.db", "mtasts.db", "tlsrpt.db", "tlsrptresult.db", "receivedid.key", "ctl":
|
||||
// Already handled.
|
||||
return nil
|
||||
case "lastknownversion": // Optional file, not yet handled.
|
||||
@ -558,11 +688,11 @@ func backupctl(ctx context.Context, ctl *ctl) {
|
||||
|
||||
xvlog("backup finished", slog.Duration("duration", time.Since(tmStart)))
|
||||
|
||||
writer.xclose()
|
||||
xwriter.xclose()
|
||||
|
||||
if incomplete {
|
||||
ctl.xwrite("errors were encountered during backup")
|
||||
xctl.xwrite("errors were encountered during backup")
|
||||
} else {
|
||||
ctl.xwriteok()
|
||||
xctl.xwriteok()
|
||||
}
|
||||
}
|
||||
|
124
config/config.go
124
config/config.go
@ -5,6 +5,7 @@ import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"regexp"
|
||||
@ -60,11 +61,11 @@ type Static struct {
|
||||
HostTLSRPT struct {
|
||||
Account string `sconf-doc:"Account to deliver TLS reports to. Typically same account as for postmaster."`
|
||||
Mailbox string `sconf-doc:"Mailbox to deliver TLS reports to. Recommended value: TLSRPT."`
|
||||
Localpart string `sconf-doc:"Localpart at hostname to accept TLS reports at. Recommended value: tls-reports."`
|
||||
Localpart string `sconf-doc:"Localpart at hostname to accept TLS reports at. Recommended value: tlsreports."`
|
||||
|
||||
ParsedLocalpart smtp.Localpart `sconf:"-"`
|
||||
} `sconf:"optional" sconf-doc:"Destination for per-host TLS reports (TLSRPT). TLS reports can be per recipient domain (for MTA-STS), or per MX host (for DANE). The per-domain TLS reporting configuration is in domains.conf. This is the TLS reporting configuration for this host. If absent, no host-based TLSRPT address is configured, and no host TLSRPT DNS record is suggested."`
|
||||
InitialMailboxes InitialMailboxes `sconf:"optional" sconf-doc:"Mailboxes to create for new accounts. Inbox is always created. Mailboxes can be given a 'special-use' role, which are understood by most mail clients. If absent/empty, the following mailboxes are created: Sent, Archive, Trash, Drafts and Junk."`
|
||||
InitialMailboxes InitialMailboxes `sconf:"optional" sconf-doc:"Mailboxes to create for new accounts. Inbox is always created. Mailboxes can be given a 'special-use' role, which are understood by most mail clients. If absent/empty, the following additional mailboxes are created: Sent, Archive, Trash, Drafts and Junk."`
|
||||
DefaultMailboxes []string `sconf:"optional" sconf-doc:"Deprecated in favor of InitialMailboxes. Mailboxes to create when adding an account. Inbox is always created. If no mailboxes are specified, the following are automatically created: Sent, Archive, Trash, Drafts and Junk."`
|
||||
Transports map[string]Transport `sconf:"optional" sconf-doc:"Transport are mechanisms for delivering messages. Transports can be referenced from Routes in accounts, domains and the global configuration. There is always an implicit/fallback delivery transport doing direct delivery with SMTP from the outgoing message queue. Transports are typically only configured when using smarthosts, i.e. when delivering through another SMTP server. Zero or one transport methods must be set in a transport, never multiple. When using an external party to send email for a domain, keep in mind you may have to add their IP address to your domain's SPF record, and possibly additional DKIM records."`
|
||||
// Awkward naming of fields to get intended default behaviour for zero values.
|
||||
@ -109,19 +110,20 @@ type Dynamic struct {
|
||||
Domains map[string]Domain `sconf-doc:"NOTE: This config file is in 'sconf' format. Indent with tabs. Comments must be on their own line, they don't end a line. Do not escape or quote strings. Details: https://pkg.go.dev/github.com/mjl-/sconf.\n\n\nDomains for which email is accepted. For internationalized domains, use their IDNA names in UTF-8."`
|
||||
Accounts map[string]Account `sconf-doc:"Accounts represent mox users, each with a password and email address(es) to which email can be delivered (possibly at different domains). Each account has its own on-disk directory holding its messages and index database. An account name is not an email address."`
|
||||
WebDomainRedirects map[string]string `sconf:"optional" sconf-doc:"Redirect all requests from domain (key) to domain (value). Always redirects to HTTPS. For plain HTTP redirects, use a WebHandler with a WebRedirect."`
|
||||
WebHandlers []WebHandler `sconf:"optional" sconf-doc:"Handle webserver requests by serving static files, redirecting or reverse-proxying HTTP(s). The first matching WebHandler will handle the request. Built-in handlers, e.g. for account, admin, autoconfig and mta-sts always run first. If no handler matches, the response status code is file not found (404). If functionality you need is missng, simply forward the requests to an application that can provide the needed functionality."`
|
||||
WebHandlers []WebHandler `sconf:"optional" sconf-doc:"Handle webserver requests by serving static files, redirecting, reverse-proxying HTTP(s) or passing the request to an internal service. The first matching WebHandler will handle the request. Built-in system handlers, e.g. for ACME validation, autoconfig and mta-sts always run first. Built-in handlers for admin, account, webmail and webapi are evaluated after all handlers, including webhandlers (allowing for overrides of internal services for some domains). If no handler matches, the response status code is file not found (404). If webserver features are missing, forward the requests to an application that provides the needed functionality itself."`
|
||||
Routes []Route `sconf:"optional" sconf-doc:"Routes for delivering outgoing messages through the queue. Each delivery attempt evaluates account routes, domain routes and finally these global routes. The transport of the first matching route is used in the delivery attempt. If no routes match, which is the default with no configured routes, messages are delivered directly from the queue."`
|
||||
MonitorDNSBLs []string `sconf:"optional" sconf-doc:"DNS blocklists to periodically check with if IPs we send from are present, without using them for checking incoming deliveries.. Also see DNSBLs in SMTP listeners in mox.conf, which specifies DNSBLs to use both for incoming deliveries and for checking our IPs against. Example DNSBLs: sbl.spamhaus.org, bl.spamcop.net."`
|
||||
|
||||
WebDNSDomainRedirects map[dns.Domain]dns.Domain `sconf:"-" json:"-"`
|
||||
MonitorDNSBLZones []dns.Domain `sconf:"-"`
|
||||
ClientSettingDomains map[dns.Domain]struct{} `sconf:"-" json:"-"`
|
||||
}
|
||||
|
||||
type ACME struct {
|
||||
DirectoryURL string `sconf-doc:"For letsencrypt, use https://acme-v02.api.letsencrypt.org/directory."`
|
||||
RenewBefore time.Duration `sconf:"optional" sconf-doc:"How long before expiration to renew the certificate. Default is 30 days."`
|
||||
ContactEmail string `sconf-doc:"Email address to register at ACME provider. The provider can email you when certificates are about to expire. If you configure an address for which email is delivered by this server, keep in mind that TLS misconfigurations could result in such notification emails not arriving."`
|
||||
Port int `sconf:"optional" sconf-doc:"TLS port for ACME validation, 443 by default. You should only override this if you cannot listen on port 443 directly. ACME will make requests to port 443, so you'll have to add an external mechanism to get the connection here, e.g. by configuring port forwarding."`
|
||||
Port int `sconf:"optional" sconf-doc:"TLS port for ACME validation, 443 by default. You should only override this if you cannot listen on port 443 directly. ACME will make requests to port 443, so you'll have to add an external mechanism to get the tls connection here, e.g. by configuring firewall-level port forwarding. Validation over the https port uses tls-alpn-01 with application-layer protocol negotiation, which essentially means the original tls connection must make it here unmodified, an https reverse proxy will not work."`
|
||||
IssuerDomainName string `sconf:"optional" sconf-doc:"If set, used for suggested CAA DNS records, for restricting TLS certificate issuance to a Certificate Authority. If empty and DirectyURL is for Let's Encrypt, this value is set automatically to letsencrypt.org."`
|
||||
ExternalAccountBinding *ExternalAccountBinding `sconf:"optional" sconf-doc:"ACME providers can require that a request for a new ACME account reference an existing non-ACME account known to the provider. External account binding references that account by a key id, and authorizes new ACME account requests by signing it with a key known both by the ACME client and ACME provider."`
|
||||
// ../rfc/8555:2111
|
||||
@ -138,7 +140,7 @@ type Listener struct {
|
||||
IPs []string `sconf-doc:"Use 0.0.0.0 to listen on all IPv4 and/or :: to listen on all IPv6 addresses, but it is better to explicitly specify the IPs you want to use for email, as mox will make sure outgoing connections will only be made from one of those IPs. If both outgoing IPv4 and IPv6 connectivity is possible, and only one family has explicitly configured addresses, both address families are still used for outgoing connections. Use the \"direct\" transport to limit address families for outgoing connections."`
|
||||
NATIPs []string `sconf:"optional" sconf-doc:"If set, the mail server is configured behind a NAT and field IPs are internal instead of the public IPs, while NATIPs lists the public IPs. Used during IP-related DNS self-checks, such as for iprev, mx, spf, autoconfig, autodiscover, and for autotls."`
|
||||
IPsNATed bool `sconf:"optional" sconf-doc:"Deprecated, use NATIPs instead. If set, IPs are not the public IPs, but are NATed. Skips IP-related DNS self-checks."`
|
||||
Hostname string `sconf:"optional" sconf-doc:"If empty, the config global Hostname is used."`
|
||||
Hostname string `sconf:"optional" sconf-doc:"If empty, the config global Hostname is used. The internal services webadmin, webaccount, webmail and webapi only match requests to IPs, this hostname, \"localhost\". All except webadmin also match for any client settings domain."`
|
||||
HostnameDomain dns.Domain `sconf:"-" json:"-"` // Set when parsing config.
|
||||
|
||||
TLS *TLS `sconf:"optional" sconf-doc:"For SMTP/IMAP STARTTLS, direct TLS and HTTPS connections."`
|
||||
@ -156,6 +158,8 @@ type Listener struct {
|
||||
|
||||
FirstTimeSenderDelay *time.Duration `sconf:"optional" sconf-doc:"Delay before accepting a message from a first-time sender for the destination account. Default: 15s."`
|
||||
|
||||
TLSSessionTicketsDisabled *bool `sconf:"optional" sconf-doc:"Override default setting for enabling TLS session tickets. Disabling session tickets may work around TLS interoperability issues."`
|
||||
|
||||
DNSBLZones []dns.Domain `sconf:"-"`
|
||||
} `sconf:"optional"`
|
||||
Submission struct {
|
||||
@ -164,8 +168,9 @@ type Listener struct {
|
||||
NoRequireSTARTTLS bool `sconf:"optional" sconf-doc:"Do not require STARTTLS. Since users must login, this means password may be sent without encryption. Not recommended."`
|
||||
} `sconf:"optional" sconf-doc:"SMTP for submitting email, e.g. by email applications. Starts out in plain text, can be upgraded to TLS with the STARTTLS command. Prefer using Submissions which is always a TLS connection."`
|
||||
Submissions struct {
|
||||
Enabled bool
|
||||
Port int `sconf:"optional" sconf-doc:"Default 465."`
|
||||
Enabled bool
|
||||
Port int `sconf:"optional" sconf-doc:"Default 465."`
|
||||
EnabledOnHTTPS bool `sconf:"optional" sconf-doc:"Additionally enable submission on HTTPS port 443 via TLS ALPN. TLS Application Layer Protocol Negotiation allows clients to request a specific protocol from the server as part of the TLS connection setup. When this setting is enabled and a client requests the 'smtp' protocol after TLS, it will be able to talk SMTP to Mox on port 443. This is meant to be useful as a censorship circumvention technique for Delta Chat."`
|
||||
} `sconf:"optional" sconf-doc:"SMTP over TLS for submitting email, by email applications. Requires a TLS config."`
|
||||
IMAP struct {
|
||||
Enabled bool
|
||||
@ -173,8 +178,9 @@ type Listener struct {
|
||||
NoRequireSTARTTLS bool `sconf:"optional" sconf-doc:"Enable this only when the connection is otherwise encrypted (e.g. through a VPN)."`
|
||||
} `sconf:"optional" sconf-doc:"IMAP for reading email, by email applications. Starts out in plain text, can be upgraded to TLS with the STARTTLS command. Prefer using IMAPS instead which is always a TLS connection."`
|
||||
IMAPS struct {
|
||||
Enabled bool
|
||||
Port int `sconf:"optional" sconf-doc:"Default 993."`
|
||||
Enabled bool
|
||||
Port int `sconf:"optional" sconf-doc:"Default 993."`
|
||||
EnabledOnHTTPS bool `sconf:"optional" sconf-doc:"Additionally enable IMAP on HTTPS port 443 via TLS ALPN. TLS Application Layer Protocol Negotiation allows clients to request a specific protocol from the server as part of the TLS connection setup. When this setting is enabled and a client requests the 'imap' protocol after TLS, it will be able to talk IMAP to Mox on port 443. This is meant to be useful as a censorship circumvention technique for Delta Chat."`
|
||||
} `sconf:"optional" sconf-doc:"IMAP over TLS for reading email, by email applications. Requires a TLS config."`
|
||||
AccountHTTP WebService `sconf:"optional" sconf-doc:"Account web interface, for email users wanting to change their accounts, e.g. set new password, set new delivery rulesets. Default path is /."`
|
||||
AccountHTTPS WebService `sconf:"optional" sconf-doc:"Account web interface listener like AccountHTTP, but for HTTPS. Requires a TLS config."`
|
||||
@ -203,20 +209,22 @@ type Listener struct {
|
||||
NonTLS bool `sconf:"optional" sconf-doc:"If set, plain HTTP instead of HTTPS is spoken on the configured port. Can be useful when the mta-sts domain is reverse proxied."`
|
||||
} `sconf:"optional" sconf-doc:"Serve MTA-STS policies describing SMTP TLS requirements. Requires a TLS config."`
|
||||
WebserverHTTP struct {
|
||||
Enabled bool
|
||||
Port int `sconf:"optional" sconf-doc:"Port for plain HTTP (non-TLS) webserver."`
|
||||
Enabled bool
|
||||
Port int `sconf:"optional" sconf-doc:"Port for plain HTTP (non-TLS) webserver."`
|
||||
RateLimitDisabled bool `sconf:"optional" sconf-doc:"Disable rate limiting for all requests to this port."`
|
||||
} `sconf:"optional" sconf-doc:"All configured WebHandlers will serve on an enabled listener."`
|
||||
WebserverHTTPS struct {
|
||||
Enabled bool
|
||||
Port int `sconf:"optional" sconf-doc:"Port for HTTPS webserver."`
|
||||
Enabled bool
|
||||
Port int `sconf:"optional" sconf-doc:"Port for HTTPS webserver."`
|
||||
RateLimitDisabled bool `sconf:"optional" sconf-doc:"Disable rate limiting for all requests to this port."`
|
||||
} `sconf:"optional" sconf-doc:"All configured WebHandlers will serve on an enabled listener. Either ACME must be configured, or for each WebHandler domain a TLS certificate must be configured."`
|
||||
}
|
||||
|
||||
// WebService is an internal web interface: webmail, webaccount, webadmin, webapi.
|
||||
type WebService struct {
|
||||
Enabled bool
|
||||
Port int `sconf:"optional" sconf-doc:"Default 80 for HTTP and 443 for HTTPS."`
|
||||
Path string `sconf:"optional" sconf-doc:"Path to serve requests on."`
|
||||
Port int `sconf:"optional" sconf-doc:"Default 80 for HTTP and 443 for HTTPS. See Hostname at Listener for hostname matching behaviour."`
|
||||
Path string `sconf:"optional" sconf-doc:"Path to serve requests on. Should end with a slash, related to cookie paths."`
|
||||
Forwarded bool `sconf:"optional" sconf-doc:"If set, X-Forwarded-* headers are used for the remote IP address for rate limiting and for the \"secure\" status of cookies."`
|
||||
}
|
||||
|
||||
@ -229,6 +237,7 @@ type Transport struct {
|
||||
SMTP *TransportSMTP `sconf:"optional" sconf-doc:"SMTP over a plain connection (possibly with STARTTLS), typically for old-fashioned unauthenticated relaying to a remote queue."`
|
||||
Socks *TransportSocks `sconf:"optional" sconf-doc:"Like regular direct delivery, but makes outgoing connections through a SOCKS proxy."`
|
||||
Direct *TransportDirect `sconf:"optional" sconf-doc:"Like regular direct delivery, but allows to tweak outgoing connections."`
|
||||
Fail *TransportFail `sconf:"optional" sconf-doc:"Immediately fails the delivery attempt."`
|
||||
}
|
||||
|
||||
// TransportSMTP delivers messages by "submission" (SMTP, typically
|
||||
@ -272,17 +281,29 @@ type TransportDirect struct {
|
||||
IPFamily string `sconf:"-" json:"-"`
|
||||
}
|
||||
|
||||
// TransportFail is a transport that fails all delivery attempts.
|
||||
type TransportFail struct {
|
||||
SMTPCode int `sconf:"optional" sconf-doc:"SMTP error code and optional enhanced error code to use for the failure. If empty, 554 is used (transaction failed)."`
|
||||
SMTPMessage string `sconf:"optional" sconf-doc:"Message to include for the rejection. It will be shown in the DSN."`
|
||||
|
||||
// Effective values to use, set when parsing.
|
||||
Code int `sconf:"-"`
|
||||
Message string `sconf:"-"`
|
||||
}
|
||||
|
||||
type Domain struct {
|
||||
Description string `sconf:"optional" sconf-doc:"Free-form description of domain."`
|
||||
ClientSettingsDomain string `sconf:"optional" sconf-doc:"Hostname for client settings instead of the mail server hostname. E.g. mail.<domain>. For future migration to another mail operator without requiring all clients to update their settings, it is convenient to have client settings that reference a subdomain of the hosted domain instead of the hostname of the server where the mail is currently hosted. If empty, the hostname of the mail server is used for client configurations. Unicode name."`
|
||||
LocalpartCatchallSeparator string `sconf:"optional" sconf-doc:"If not empty, only the string before the separator is used to for email delivery decisions. For example, if set to \"+\", you+anything@example.com will be delivered to you@example.com."`
|
||||
LocalpartCaseSensitive bool `sconf:"optional" sconf-doc:"If set, upper/lower case is relevant for email delivery."`
|
||||
DKIM DKIM `sconf:"optional" sconf-doc:"With DKIM signing, a domain is taking responsibility for (content of) emails it sends, letting receiving mail servers build up a (hopefully positive) reputation of the domain, which can help with mail delivery."`
|
||||
DMARC *DMARC `sconf:"optional" sconf-doc:"With DMARC, a domain publishes, in DNS, a policy on how other mail servers should handle incoming messages with the From-header matching this domain and/or subdomain (depending on the configured alignment). Receiving mail servers use this to build up a reputation of this domain, which can help with mail delivery. A domain can also publish an email address to which reports about DMARC verification results can be sent by verifying mail servers, useful for monitoring. Incoming DMARC reports are automatically parsed, validated, added to metrics and stored in the reporting database for later display in the admin web pages."`
|
||||
MTASTS *MTASTS `sconf:"optional" sconf-doc:"MTA-STS is a mechanism that allows publishing a policy with requirements for WebPKI-verified SMTP STARTTLS connections for email delivered to a domain. Existence of a policy is announced in a DNS TXT record (often unprotected/unverified, MTA-STS's weak spot). If a policy exists, it is fetched with a WebPKI-verified HTTPS request. The policy can indicate that WebPKI-verified SMTP STARTTLS is required, and which MX hosts (optionally with a wildcard pattern) are allowd. MX hosts to deliver to are still taken from DNS (again, not necessarily protected/verified), but messages will only be delivered to domains matching the MX hosts from the published policy. Mail servers look up the MTA-STS policy when first delivering to a domain, then keep a cached copy, periodically checking the DNS record if a new policy is available, and fetching and caching it if so. To update a policy, first serve a new policy with an updated policy ID, then update the DNS record (not the other way around). To remove an enforced policy, publish an updated policy with mode \"none\" for a long enough period so all cached policies have been refreshed (taking DNS TTL and policy max age into account), then remove the policy from DNS, wait for TTL to expire, and stop serving the policy."`
|
||||
TLSRPT *TLSRPT `sconf:"optional" sconf-doc:"With TLSRPT a domain specifies in DNS where reports about encountered SMTP TLS behaviour should be sent. Useful for monitoring. Incoming TLS reports are automatically parsed, validated, added to metrics and stored in the reporting database for later display in the admin web pages."`
|
||||
Routes []Route `sconf:"optional" sconf-doc:"Routes for delivering outgoing messages through the queue. Each delivery attempt evaluates account routes, these domain routes and finally global routes. The transport of the first matching route is used in the delivery attempt. If no routes match, which is the default with no configured routes, messages are delivered directly from the queue."`
|
||||
Aliases map[string]Alias `sconf:"optional" sconf-doc:"Aliases that cause messages to be delivered to one or more locally configured addresses. Keys are localparts (encoded, as they appear in email addresses)."`
|
||||
Disabled bool `sconf:"optional" sconf-doc:"Disabled domains can be useful during/before migrations. Domains that are disabled can still be configured like normal, including adding addresses using the domain to accounts. However, disabled domains: 1. Do not try to fetch ACME certificates. TLS connections to host names involving the email domain will fail. A TLS certificate for the hostname (that wil be used as MX) itself will be requested. 2. Incoming deliveries over SMTP are rejected with a temporary error '450 4.2.1 recipient domain temporarily disabled'. 3. Submissions over SMTP using an (envelope) SMTP MAIL FROM address or message 'From' address of a disabled domain will be rejected with a temporary error '451 4.3.0 sender domain temporarily disabled'. Note that accounts with addresses at disabled domains can still log in and read email (unless the account itself is disabled)."`
|
||||
Description string `sconf:"optional" sconf-doc:"Free-form description of domain."`
|
||||
ClientSettingsDomain string `sconf:"optional" sconf-doc:"Hostname for client settings instead of the mail server hostname. E.g. mail.<domain>. For future migration to another mail operator without requiring all clients to update their settings, it is convenient to have client settings that reference a subdomain of the hosted domain instead of the hostname of the server where the mail is currently hosted. If empty, the hostname of the mail server is used for client configurations. Unicode name."`
|
||||
LocalpartCatchallSeparator string `sconf:"optional" sconf-doc:"If not empty, only the string before the separator is used to for email delivery decisions. For example, if set to \"+\", you+anything@example.com will be delivered to you@example.com."`
|
||||
LocalpartCatchallSeparators []string `sconf:"optional" sconf-doc:"Similar to LocalpartCatchallSeparator, but in case multiple are needed. For example both \"+\" and \"-\". Only of one LocalpartCatchallSeparator or LocalpartCatchallSeparators can be set. If set, the first separator is used to make unique addresses for outgoing SMTP connections with FromIDLoginAddresses."`
|
||||
LocalpartCaseSensitive bool `sconf:"optional" sconf-doc:"If set, upper/lower case is relevant for email delivery."`
|
||||
DKIM DKIM `sconf:"optional" sconf-doc:"With DKIM signing, a domain is taking responsibility for (content of) emails it sends, letting receiving mail servers build up a (hopefully positive) reputation of the domain, which can help with mail delivery."`
|
||||
DMARC *DMARC `sconf:"optional" sconf-doc:"With DMARC, a domain publishes, in DNS, a policy on how other mail servers should handle incoming messages with the From-header matching this domain and/or subdomain (depending on the configured alignment). Receiving mail servers use this to build up a reputation of this domain, which can help with mail delivery. A domain can also publish an email address to which reports about DMARC verification results can be sent by verifying mail servers, useful for monitoring. Incoming DMARC reports are automatically parsed, validated, added to metrics and stored in the reporting database for later display in the admin web pages."`
|
||||
MTASTS *MTASTS `sconf:"optional" sconf-doc:"MTA-STS is a mechanism that allows publishing a policy with requirements for WebPKI-verified SMTP STARTTLS connections for email delivered to a domain. Existence of a policy is announced in a DNS TXT record (often unprotected/unverified, MTA-STS's weak spot). If a policy exists, it is fetched with a WebPKI-verified HTTPS request. The policy can indicate that WebPKI-verified SMTP STARTTLS is required, and which MX hosts (optionally with a wildcard pattern) are allowd. MX hosts to deliver to are still taken from DNS (again, not necessarily protected/verified), but messages will only be delivered to domains matching the MX hosts from the published policy. Mail servers look up the MTA-STS policy when first delivering to a domain, then keep a cached copy, periodically checking the DNS record if a new policy is available, and fetching and caching it if so. To update a policy, first serve a new policy with an updated policy ID, then update the DNS record (not the other way around). To remove an enforced policy, publish an updated policy with mode \"none\" for a long enough period so all cached policies have been refreshed (taking DNS TTL and policy max age into account), then remove the policy from DNS, wait for TTL to expire, and stop serving the policy."`
|
||||
TLSRPT *TLSRPT `sconf:"optional" sconf-doc:"With TLSRPT a domain specifies in DNS where reports about encountered SMTP TLS behaviour should be sent. Useful for monitoring. Incoming TLS reports are automatically parsed, validated, added to metrics and stored in the reporting database for later display in the admin web pages."`
|
||||
Routes []Route `sconf:"optional" sconf-doc:"Routes for delivering outgoing messages through the queue. Each delivery attempt evaluates account routes, these domain routes and finally global routes. The transport of the first matching route is used in the delivery attempt. If no routes match, which is the default with no configured routes, messages are delivered directly from the queue."`
|
||||
Aliases map[string]Alias `sconf:"optional" sconf-doc:"Aliases that cause messages to be delivered to one or more locally configured addresses. Keys are localparts (encoded, as they appear in email addresses)."`
|
||||
|
||||
Domain dns.Domain `sconf:"-"`
|
||||
ClientSettingsDNSDomain dns.Domain `sconf:"-" json:"-"`
|
||||
@ -290,7 +311,8 @@ type Domain struct {
|
||||
// Set when DMARC and TLSRPT (when set) has an address with different domain (we're
|
||||
// hosting the reporting), and there are no destination addresses configured for
|
||||
// the domain. Disables some functionality related to hosting a domain.
|
||||
ReportsOnly bool `sconf:"-" json:"-"`
|
||||
ReportsOnly bool `sconf:"-" json:"-"`
|
||||
LocalpartCatchallSeparatorsEffective []string `sconf:"-"` // Either LocalpartCatchallSeparators, the value of LocalpartCatchallSeparator, or empty.
|
||||
}
|
||||
|
||||
// todo: allow external addresses as members of aliases. we would add messages for them to the queue for outgoing delivery. we should require an admin addresses to which delivery failures will be delivered (locally, and to use in smtp mail from, so dsns go there). also take care to evaluate smtputf8 (if external address requires utf8 and incoming transaction didn't).
|
||||
@ -315,12 +337,12 @@ type AliasAddress struct {
|
||||
}
|
||||
|
||||
type DMARC struct {
|
||||
Localpart string `sconf-doc:"Address-part before the @ that accepts DMARC reports. Must be non-internationalized. Recommended value: dmarc-reports."`
|
||||
Localpart string `sconf-doc:"Address-part before the @ that accepts DMARC reports. Must be non-internationalized. Recommended value: dmarcreports."`
|
||||
Domain string `sconf:"optional" sconf-doc:"Alternative domain for reporting address, for incoming reports. Typically empty, causing the domain wherein this config exists to be used. Can be used to receive reports for domains that aren't fully hosted on this server. Configure such a domain as a hosted domain without making all the DNS changes, and configure this field with a domain that is fully hosted on this server, so the localpart and the domain of this field form a reporting address. Then only update the DMARC DNS record for the not fully hosted domain, ensuring the reporting address is specified in its \"rua\" field as shown in the suggested DNS settings. Unicode name."`
|
||||
Account string `sconf-doc:"Account to deliver to."`
|
||||
Mailbox string `sconf-doc:"Mailbox to deliver to, e.g. DMARC."`
|
||||
|
||||
ParsedLocalpart smtp.Localpart `sconf:"-"`
|
||||
ParsedLocalpart smtp.Localpart `sconf:"-"` // Lower-case if case-sensitivity is not configured for domain. Not "canonical" for catchall separators for backwards compatibility.
|
||||
DNSDomain dns.Domain `sconf:"-"` // Effective domain, always set based on Domain field or Domain where this is configured.
|
||||
}
|
||||
|
||||
@ -333,12 +355,12 @@ type MTASTS struct {
|
||||
}
|
||||
|
||||
type TLSRPT struct {
|
||||
Localpart string `sconf-doc:"Address-part before the @ that accepts TLSRPT reports. Recommended value: tls-reports."`
|
||||
Localpart string `sconf-doc:"Address-part before the @ that accepts TLSRPT reports. Recommended value: tlsreports."`
|
||||
Domain string `sconf:"optional" sconf-doc:"Alternative domain for reporting address, for incoming reports. Typically empty, causing the domain wherein this config exists to be used. Can be used to receive reports for domains that aren't fully hosted on this server. Configure such a domain as a hosted domain without making all the DNS changes, and configure this field with a domain that is fully hosted on this server, so the localpart and the domain of this field form a reporting address. Then only update the TLSRPT DNS record for the not fully hosted domain, ensuring the reporting address is specified in its \"rua\" field as shown in the suggested DNS settings. Unicode name."`
|
||||
Account string `sconf-doc:"Account to deliver to."`
|
||||
Mailbox string `sconf-doc:"Mailbox to deliver to, e.g. TLSRPT."`
|
||||
|
||||
ParsedLocalpart smtp.Localpart `sconf:"-"`
|
||||
ParsedLocalpart smtp.Localpart `sconf:"-"` // Lower-case if case-sensitivity is not configured for domain. Not "canonical" for catchall separators for backwards compatibility.
|
||||
DNSDomain dns.Domain `sconf:"-"` // Effective domain, always set based on Domain field or Domain where this is configured.
|
||||
}
|
||||
|
||||
@ -414,6 +436,7 @@ type Account struct {
|
||||
KeepRetiredMessagePeriod time.Duration `sconf:"optional" sconf-doc:"Period to keep messages retired from the queue (delivered or failed) around. Keeping retired messages is useful for maintaining the suppression list for transactional email, for matching incoming DSNs to sent messages, and for debugging. The time at which to clean up (remove) is calculated at retire time. E.g. 168h (1 week)."`
|
||||
KeepRetiredWebhookPeriod time.Duration `sconf:"optional" sconf-doc:"Period to keep webhooks retired from the queue (delivered or failed) around. Useful for debugging. The time at which to clean up (remove) is calculated at retire time. E.g. 168h (1 week)."`
|
||||
|
||||
LoginDisabled string `sconf:"optional" sconf-doc:"If non-empty, login attempts on all protocols (e.g. SMTP/IMAP, web interfaces) is rejected with this error message. Useful during migrations. Incoming deliveries for addresses of this account are still accepted as normal."`
|
||||
Domain string `sconf-doc:"Default domain for account. Deprecated behaviour: If a destination is not a full address but only a localpart, this domain is added to form a full address."`
|
||||
Description string `sconf:"optional" sconf-doc:"Free form description, e.g. full name or alternative contact info."`
|
||||
FullName string `sconf:"optional" sconf-doc:"Full name, to use in message From header when composing messages in webmail. Can be overridden per destination."`
|
||||
@ -427,6 +450,7 @@ type Account struct {
|
||||
MaxOutgoingMessagesPerDay int `sconf:"optional" sconf-doc:"Maximum number of outgoing messages for this account in a 24 hour window. This limits the damage to recipients and the reputation of this mail server in case of account compromise. Default 1000."`
|
||||
MaxFirstTimeRecipientsPerDay int `sconf:"optional" sconf-doc:"Maximum number of first-time recipients in outgoing messages for this account in a 24 hour window. This limits the damage to recipients and the reputation of this mail server in case of account compromise. Default 200."`
|
||||
NoFirstTimeSenderDelay bool `sconf:"optional" sconf-doc:"Do not apply a delay to SMTP connections before accepting an incoming message from a first-time sender. Can be useful for accounts that sends automated responses and want instant replies."`
|
||||
NoCustomPassword bool `sconf:"optional" sconf-doc:"If set, this account cannot set a password of their own choice, but can only set a new randomly generated password, preventing password reuse across services and use of weak passwords. Custom account passwords can be set by the admin."`
|
||||
Routes []Route `sconf:"optional" sconf-doc:"Routes for delivering outgoing messages through the queue. Each delivery attempt evaluates these account routes, domain routes and finally global routes. The transport of the first matching route is used in the delivery attempt. If no routes match, which is the default with no configured routes, messages are delivered directly from the queue."`
|
||||
|
||||
DNSDomain dns.Domain `sconf:"-"` // Parsed form of Domain.
|
||||
@ -449,13 +473,19 @@ type JunkFilter struct {
|
||||
}
|
||||
|
||||
type Destination struct {
|
||||
Mailbox string `sconf:"optional" sconf-doc:"Mailbox to deliver to if none of Rulesets match. Default: Inbox."`
|
||||
Rulesets []Ruleset `sconf:"optional" sconf-doc:"Delivery rules based on message and SMTP transaction. You may want to match each mailing list by SMTP MailFrom address, VerifiedDomain and/or List-ID header (typically <listname.example.org> if the list address is listname@example.org), delivering them to their own mailbox."`
|
||||
FullName string `sconf:"optional" sconf-doc:"Full name to use in message From header when composing messages coming from this address with webmail."`
|
||||
Mailbox string `sconf:"optional" sconf-doc:"Mailbox to deliver to if none of Rulesets match. Default: Inbox."`
|
||||
Rulesets []Ruleset `sconf:"optional" sconf-doc:"Delivery rules based on message and SMTP transaction. You may want to match each mailing list by SMTP MailFrom address, VerifiedDomain and/or List-ID header (typically <listname.example.org> if the list address is listname@example.org), delivering them to their own mailbox."`
|
||||
SMTPError string `sconf:"optional" sconf-doc:"If non-empty, incoming delivery attempts to this destination will be rejected during SMTP RCPT TO with this error response line. Useful when a catchall address is configured for the domain and messages to some addresses should be rejected. The response line must start with an error code. Currently the following error resonse codes are allowed: 421 (temporary local error), 550 (user not found). If the line consists of only an error code, an appropriate error message is added. Rejecting messages with a 4xx code invites later retries by the remote, while 5xx codes should prevent further delivery attempts."`
|
||||
MessageAuthRequiredSMTPError string `sconf:"optional" sconf-doc:"If non-empty, an additional DMARC-like message authentication check is done for incoming messages, validating the domain in the From-header of the message. Messages without either an aligned SPF or aligned DKIM pass are rejected during the SMTP DATA command with a permanent error code followed by the message in this field. The domain in the message 'From' header is matched in relaxed or strict mode according to the domain's DMARC policy if present, or relaxed mode (organizational instead of exact domain match) otherwise. Useful for autoresponders that don't want to accept messages they don't want to send an automated reply to."`
|
||||
FullName string `sconf:"optional" sconf-doc:"Full name to use in message From header when composing messages coming from this address with webmail."`
|
||||
|
||||
DMARCReports bool `sconf:"-" json:"-"`
|
||||
HostTLSReports bool `sconf:"-" json:"-"`
|
||||
DomainTLSReports bool `sconf:"-" json:"-"`
|
||||
// Ready to use in SMTP responses.
|
||||
SMTPErrorCode int `sconf:"-" json:"-"`
|
||||
SMTPErrorSecode string `sconf:"-" json:"-"`
|
||||
SMTPErrorMsg string `sconf:"-" json:"-"`
|
||||
}
|
||||
|
||||
// Equal returns whether d and o are equal, only looking at their user-changeable fields.
|
||||
@ -514,22 +544,27 @@ type TLS struct {
|
||||
KeyCerts []KeyCert `sconf:"optional" sconf-doc:"Keys and certificates to use for this listener. The files are opened by the privileged root process and passed to the unprivileged mox process, so no special permissions are required on the files. If the private key will not be replaced when refreshing certificates, also consider adding the private key to HostPrivateKeyFiles and configuring DANE TLSA DNS records."`
|
||||
MinVersion string `sconf:"optional" sconf-doc:"Minimum TLS version. Default: TLSv1.2."`
|
||||
HostPrivateKeyFiles []string `sconf:"optional" sconf-doc:"Private keys used for ACME certificates. Specified explicitly so DANE TLSA DNS records can be generated, even before the certificates are requested. DANE is a mechanism to authenticate remote TLS certificates based on a public key or certificate specified in DNS, protected with DNSSEC. DANE is opportunistic and attempted when delivering SMTP with STARTTLS. The private key files must be in PEM format. PKCS8 is recommended, but PKCS1 and EC private keys are recognized as well. Only RSA 2048 bit and ECDSA P-256 keys are currently used. The first of each is used when requesting new certificates through ACME."`
|
||||
ClientAuthDisabled bool `sconf:"optional" sconf-doc:"Disable TLS client authentication with certificates/keys, preventing the TLS server from requesting a TLS certificate from clients. Useful for working around clients that don't handle TLS client authentication well."`
|
||||
|
||||
Config *tls.Config `sconf:"-" json:"-"` // TLS config for non-ACME-verification connections, i.e. SMTP and IMAP, and not port 443.
|
||||
Config *tls.Config `sconf:"-" json:"-"` // TLS config for non-ACME-verification connections, i.e. SMTP and IMAP, and not port 443. Connections without SNI will use a certificate for the hostname of the listener, connections with an SNI hostname that isn't allowed will be rejected.
|
||||
ConfigFallback *tls.Config `sconf:"-" json:"-"` // Like Config, but uses the certificate for the listener hostname when the requested SNI hostname is not allowed, instead of causing the connection to fail.
|
||||
ACMEConfig *tls.Config `sconf:"-" json:"-"` // TLS config that handles ACME verification, for serving on port 443.
|
||||
HostPrivateRSA2048Keys []crypto.Signer `sconf:"-" json:"-"` // Private keys for new TLS certificates for listener host name, for new certificates with ACME, and for DANE records.
|
||||
HostPrivateECDSAP256Keys []crypto.Signer `sconf:"-" json:"-"`
|
||||
}
|
||||
|
||||
// todo: we could implement matching WebHandler.Domain as IPs too
|
||||
|
||||
type WebHandler struct {
|
||||
LogName string `sconf:"optional" sconf-doc:"Name to use in logging and metrics."`
|
||||
Domain string `sconf-doc:"Both Domain and PathRegexp must match for this WebHandler to match a request. Exactly one of WebStatic, WebRedirect, WebForward must be set."`
|
||||
Domain string `sconf-doc:"Both Domain and PathRegexp must match for this WebHandler to match a request. Exactly one of WebStatic, WebRedirect, WebForward, WebInternal must be set."`
|
||||
PathRegexp string `sconf-doc:"Regular expression matched against request path, must always start with ^ to ensure matching from the start of the path. The matching prefix can optionally be stripped by WebForward. The regular expression does not have to end with $."`
|
||||
DontRedirectPlainHTTP bool `sconf:"optional" sconf-doc:"If set, plain HTTP requests are not automatically permanently redirected (308) to HTTPS. If you don't have a HTTPS webserver configured, set this to true."`
|
||||
Compress bool `sconf:"optional" sconf-doc:"Transparently compress responses (currently with gzip) if the client supports it, the status is 200 OK, no Content-Encoding is set on the response yet and the Content-Type of the response hints that the data is compressible (text/..., specific application/... and .../...+json and .../...+xml). For static files only, a cache with compressed files is kept."`
|
||||
WebStatic *WebStatic `sconf:"optional" sconf-doc:"Serve static files."`
|
||||
WebRedirect *WebRedirect `sconf:"optional" sconf-doc:"Redirect requests to configured URL."`
|
||||
WebForward *WebForward `sconf:"optional" sconf-doc:"Forward requests to another webserver, i.e. reverse proxy."`
|
||||
WebInternal *WebInternal `sconf:"optional" sconf-doc:"Pass request to internal service, like webmail, webapi, etc."`
|
||||
|
||||
Name string `sconf:"-"` // Either LogName, or numeric index if LogName was empty. Used instead of LogName in logging/metrics.
|
||||
DNSDomain dns.Domain `sconf:"-"`
|
||||
@ -545,6 +580,7 @@ func (wh WebHandler) Equal(o WebHandler) bool {
|
||||
x.WebStatic = nil
|
||||
x.WebRedirect = nil
|
||||
x.WebForward = nil
|
||||
x.WebInternal = nil
|
||||
return x
|
||||
}
|
||||
cwh := clean(wh)
|
||||
@ -552,7 +588,7 @@ func (wh WebHandler) Equal(o WebHandler) bool {
|
||||
if cwh != co {
|
||||
return false
|
||||
}
|
||||
if (wh.WebStatic == nil) != (o.WebStatic == nil) || (wh.WebRedirect == nil) != (o.WebRedirect == nil) || (wh.WebForward == nil) != (o.WebForward == nil) {
|
||||
if (wh.WebStatic == nil) != (o.WebStatic == nil) || (wh.WebRedirect == nil) != (o.WebRedirect == nil) || (wh.WebForward == nil) != (o.WebForward == nil) || (wh.WebInternal == nil) != (o.WebInternal == nil) {
|
||||
return false
|
||||
}
|
||||
if wh.WebStatic != nil {
|
||||
@ -564,6 +600,9 @@ func (wh WebHandler) Equal(o WebHandler) bool {
|
||||
if wh.WebForward != nil {
|
||||
return wh.WebForward.equal(*o.WebForward)
|
||||
}
|
||||
if wh.WebInternal != nil {
|
||||
return wh.WebInternal.equal(*o.WebInternal)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
@ -606,3 +645,16 @@ func (wf WebForward) equal(o WebForward) bool {
|
||||
o.TargetURL = nil
|
||||
return reflect.DeepEqual(wf, o)
|
||||
}
|
||||
|
||||
type WebInternal struct {
|
||||
BasePath string `sconf-doc:"Path to use as root of internal service, e.g. /webmail/."`
|
||||
Service string `sconf-doc:"Name of the service, values: admin, account, webmail, webapi."`
|
||||
|
||||
Handler http.Handler `sconf:"-" json:"-"`
|
||||
}
|
||||
|
||||
func (wi WebInternal) equal(o WebInternal) bool {
|
||||
wi.Handler = nil
|
||||
o.Handler = nil
|
||||
return reflect.DeepEqual(wi, o)
|
||||
}
|
||||
|
192
config/doc.go
192
config/doc.go
@ -113,8 +113,11 @@ See https://pkg.go.dev/github.com/mjl-/sconf for details.
|
||||
|
||||
# TLS port for ACME validation, 443 by default. You should only override this if
|
||||
# you cannot listen on port 443 directly. ACME will make requests to port 443, so
|
||||
# you'll have to add an external mechanism to get the connection here, e.g. by
|
||||
# configuring port forwarding. (optional)
|
||||
# you'll have to add an external mechanism to get the tls connection here, e.g. by
|
||||
# configuring firewall-level port forwarding. Validation over the https port uses
|
||||
# tls-alpn-01 with application-layer protocol negotiation, which essentially means
|
||||
# the original tls connection must make it here unmodified, an https reverse proxy
|
||||
# will not work. (optional)
|
||||
Port: 0
|
||||
|
||||
# If set, used for suggested CAA DNS records, for restricting TLS certificate
|
||||
@ -172,7 +175,10 @@ See https://pkg.go.dev/github.com/mjl-/sconf for details.
|
||||
# NATed. Skips IP-related DNS self-checks. (optional)
|
||||
IPsNATed: false
|
||||
|
||||
# If empty, the config global Hostname is used. (optional)
|
||||
# If empty, the config global Hostname is used. The internal services webadmin,
|
||||
# webaccount, webmail and webapi only match requests to IPs, this hostname,
|
||||
# "localhost". All except webadmin also match for any client settings domain.
|
||||
# (optional)
|
||||
Hostname:
|
||||
|
||||
# For SMTP/IMAP STARTTLS, direct TLS and HTTPS connections. (optional)
|
||||
@ -211,6 +217,11 @@ See https://pkg.go.dev/github.com/mjl-/sconf for details.
|
||||
HostPrivateKeyFiles:
|
||||
-
|
||||
|
||||
# Disable TLS client authentication with certificates/keys, preventing the TLS
|
||||
# server from requesting a TLS certificate from clients. Useful for working around
|
||||
# clients that don't handle TLS client authentication well. (optional)
|
||||
ClientAuthDisabled: false
|
||||
|
||||
# Maximum size in bytes for incoming and outgoing messages. Default is 100MB.
|
||||
# (optional)
|
||||
SMTPMaxMessageSize: 0
|
||||
@ -256,6 +267,10 @@ See https://pkg.go.dev/github.com/mjl-/sconf for details.
|
||||
# account. Default: 15s. (optional)
|
||||
FirstTimeSenderDelay: 0s
|
||||
|
||||
# Override default setting for enabling TLS session tickets. Disabling session
|
||||
# tickets may work around TLS interoperability issues. (optional)
|
||||
TLSSessionTicketsDisabled: false
|
||||
|
||||
# SMTP for submitting email, e.g. by email applications. Starts out in plain text,
|
||||
# can be upgraded to TLS with the STARTTLS command. Prefer using Submissions which
|
||||
# is always a TLS connection. (optional)
|
||||
@ -277,6 +292,14 @@ See https://pkg.go.dev/github.com/mjl-/sconf for details.
|
||||
# Default 465. (optional)
|
||||
Port: 0
|
||||
|
||||
# Additionally enable submission on HTTPS port 443 via TLS ALPN. TLS Application
|
||||
# Layer Protocol Negotiation allows clients to request a specific protocol from
|
||||
# the server as part of the TLS connection setup. When this setting is enabled and
|
||||
# a client requests the 'smtp' protocol after TLS, it will be able to talk SMTP to
|
||||
# Mox on port 443. This is meant to be useful as a censorship circumvention
|
||||
# technique for Delta Chat. (optional)
|
||||
EnabledOnHTTPS: false
|
||||
|
||||
# IMAP for reading email, by email applications. Starts out in plain text, can be
|
||||
# upgraded to TLS with the STARTTLS command. Prefer using IMAPS instead which is
|
||||
# always a TLS connection. (optional)
|
||||
@ -298,15 +321,25 @@ See https://pkg.go.dev/github.com/mjl-/sconf for details.
|
||||
# Default 993. (optional)
|
||||
Port: 0
|
||||
|
||||
# Additionally enable IMAP on HTTPS port 443 via TLS ALPN. TLS Application Layer
|
||||
# Protocol Negotiation allows clients to request a specific protocol from the
|
||||
# server as part of the TLS connection setup. When this setting is enabled and a
|
||||
# client requests the 'imap' protocol after TLS, it will be able to talk IMAP to
|
||||
# Mox on port 443. This is meant to be useful as a censorship circumvention
|
||||
# technique for Delta Chat. (optional)
|
||||
EnabledOnHTTPS: false
|
||||
|
||||
# Account web interface, for email users wanting to change their accounts, e.g.
|
||||
# set new password, set new delivery rulesets. Default path is /. (optional)
|
||||
AccountHTTP:
|
||||
Enabled: false
|
||||
|
||||
# Default 80 for HTTP and 443 for HTTPS. (optional)
|
||||
# Default 80 for HTTP and 443 for HTTPS. See Hostname at Listener for hostname
|
||||
# matching behaviour. (optional)
|
||||
Port: 0
|
||||
|
||||
# Path to serve requests on. (optional)
|
||||
# Path to serve requests on. Should end with a slash, related to cookie paths.
|
||||
# (optional)
|
||||
Path:
|
||||
|
||||
# If set, X-Forwarded-* headers are used for the remote IP address for rate
|
||||
@ -318,10 +351,12 @@ See https://pkg.go.dev/github.com/mjl-/sconf for details.
|
||||
AccountHTTPS:
|
||||
Enabled: false
|
||||
|
||||
# Default 80 for HTTP and 443 for HTTPS. (optional)
|
||||
# Default 80 for HTTP and 443 for HTTPS. See Hostname at Listener for hostname
|
||||
# matching behaviour. (optional)
|
||||
Port: 0
|
||||
|
||||
# Path to serve requests on. (optional)
|
||||
# Path to serve requests on. Should end with a slash, related to cookie paths.
|
||||
# (optional)
|
||||
Path:
|
||||
|
||||
# If set, X-Forwarded-* headers are used for the remote IP address for rate
|
||||
@ -336,10 +371,12 @@ See https://pkg.go.dev/github.com/mjl-/sconf for details.
|
||||
AdminHTTP:
|
||||
Enabled: false
|
||||
|
||||
# Default 80 for HTTP and 443 for HTTPS. (optional)
|
||||
# Default 80 for HTTP and 443 for HTTPS. See Hostname at Listener for hostname
|
||||
# matching behaviour. (optional)
|
||||
Port: 0
|
||||
|
||||
# Path to serve requests on. (optional)
|
||||
# Path to serve requests on. Should end with a slash, related to cookie paths.
|
||||
# (optional)
|
||||
Path:
|
||||
|
||||
# If set, X-Forwarded-* headers are used for the remote IP address for rate
|
||||
@ -351,10 +388,12 @@ See https://pkg.go.dev/github.com/mjl-/sconf for details.
|
||||
AdminHTTPS:
|
||||
Enabled: false
|
||||
|
||||
# Default 80 for HTTP and 443 for HTTPS. (optional)
|
||||
# Default 80 for HTTP and 443 for HTTPS. See Hostname at Listener for hostname
|
||||
# matching behaviour. (optional)
|
||||
Port: 0
|
||||
|
||||
# Path to serve requests on. (optional)
|
||||
# Path to serve requests on. Should end with a slash, related to cookie paths.
|
||||
# (optional)
|
||||
Path:
|
||||
|
||||
# If set, X-Forwarded-* headers are used for the remote IP address for rate
|
||||
@ -365,10 +404,12 @@ See https://pkg.go.dev/github.com/mjl-/sconf for details.
|
||||
WebmailHTTP:
|
||||
Enabled: false
|
||||
|
||||
# Default 80 for HTTP and 443 for HTTPS. (optional)
|
||||
# Default 80 for HTTP and 443 for HTTPS. See Hostname at Listener for hostname
|
||||
# matching behaviour. (optional)
|
||||
Port: 0
|
||||
|
||||
# Path to serve requests on. (optional)
|
||||
# Path to serve requests on. Should end with a slash, related to cookie paths.
|
||||
# (optional)
|
||||
Path:
|
||||
|
||||
# If set, X-Forwarded-* headers are used for the remote IP address for rate
|
||||
@ -380,10 +421,12 @@ See https://pkg.go.dev/github.com/mjl-/sconf for details.
|
||||
WebmailHTTPS:
|
||||
Enabled: false
|
||||
|
||||
# Default 80 for HTTP and 443 for HTTPS. (optional)
|
||||
# Default 80 for HTTP and 443 for HTTPS. See Hostname at Listener for hostname
|
||||
# matching behaviour. (optional)
|
||||
Port: 0
|
||||
|
||||
# Path to serve requests on. (optional)
|
||||
# Path to serve requests on. Should end with a slash, related to cookie paths.
|
||||
# (optional)
|
||||
Path:
|
||||
|
||||
# If set, X-Forwarded-* headers are used for the remote IP address for rate
|
||||
@ -394,10 +437,12 @@ See https://pkg.go.dev/github.com/mjl-/sconf for details.
|
||||
WebAPIHTTP:
|
||||
Enabled: false
|
||||
|
||||
# Default 80 for HTTP and 443 for HTTPS. (optional)
|
||||
# Default 80 for HTTP and 443 for HTTPS. See Hostname at Listener for hostname
|
||||
# matching behaviour. (optional)
|
||||
Port: 0
|
||||
|
||||
# Path to serve requests on. (optional)
|
||||
# Path to serve requests on. Should end with a slash, related to cookie paths.
|
||||
# (optional)
|
||||
Path:
|
||||
|
||||
# If set, X-Forwarded-* headers are used for the remote IP address for rate
|
||||
@ -409,10 +454,12 @@ See https://pkg.go.dev/github.com/mjl-/sconf for details.
|
||||
WebAPIHTTPS:
|
||||
Enabled: false
|
||||
|
||||
# Default 80 for HTTP and 443 for HTTPS. (optional)
|
||||
# Default 80 for HTTP and 443 for HTTPS. See Hostname at Listener for hostname
|
||||
# matching behaviour. (optional)
|
||||
Port: 0
|
||||
|
||||
# Path to serve requests on. (optional)
|
||||
# Path to serve requests on. Should end with a slash, related to cookie paths.
|
||||
# (optional)
|
||||
Path:
|
||||
|
||||
# If set, X-Forwarded-* headers are used for the remote IP address for rate
|
||||
@ -472,6 +519,9 @@ See https://pkg.go.dev/github.com/mjl-/sconf for details.
|
||||
# Port for plain HTTP (non-TLS) webserver. (optional)
|
||||
Port: 0
|
||||
|
||||
# Disable rate limiting for all requests to this port. (optional)
|
||||
RateLimitDisabled: false
|
||||
|
||||
# All configured WebHandlers will serve on an enabled listener. Either ACME must
|
||||
# be configured, or for each WebHandler domain a TLS certificate must be
|
||||
# configured. (optional)
|
||||
@ -481,6 +531,9 @@ See https://pkg.go.dev/github.com/mjl-/sconf for details.
|
||||
# Port for HTTPS webserver. (optional)
|
||||
Port: 0
|
||||
|
||||
# Disable rate limiting for all requests to this port. (optional)
|
||||
RateLimitDisabled: false
|
||||
|
||||
# Destination for emails delivered to postmaster addresses: a plain 'postmaster'
|
||||
# without domain, 'postmaster@<hostname>' (also for each listener with SMTP
|
||||
# enabled), and as fallback for each domain without explicitly configured
|
||||
@ -504,13 +557,13 @@ See https://pkg.go.dev/github.com/mjl-/sconf for details.
|
||||
# Mailbox to deliver TLS reports to. Recommended value: TLSRPT.
|
||||
Mailbox:
|
||||
|
||||
# Localpart at hostname to accept TLS reports at. Recommended value: tls-reports.
|
||||
# Localpart at hostname to accept TLS reports at. Recommended value: tlsreports.
|
||||
Localpart:
|
||||
|
||||
# Mailboxes to create for new accounts. Inbox is always created. Mailboxes can be
|
||||
# given a 'special-use' role, which are understood by most mail clients. If
|
||||
# absent/empty, the following mailboxes are created: Sent, Archive, Trash, Drafts
|
||||
# and Junk. (optional)
|
||||
# absent/empty, the following additional mailboxes are created: Sent, Archive,
|
||||
# Trash, Drafts and Junk. (optional)
|
||||
InitialMailboxes:
|
||||
|
||||
# Special-use roles to mailbox to create. (optional)
|
||||
@ -683,6 +736,16 @@ See https://pkg.go.dev/github.com/mjl-/sconf for details.
|
||||
# remote SMTP servers. (optional)
|
||||
DisableIPv6: false
|
||||
|
||||
# Immediately fails the delivery attempt. (optional)
|
||||
Fail:
|
||||
|
||||
# SMTP error code and optional enhanced error code to use for the failure. If
|
||||
# empty, 554 is used (transaction failed). (optional)
|
||||
SMTPCode: 0
|
||||
|
||||
# Message to include for the rejection. It will be shown in the DSN. (optional)
|
||||
SMTPMessage:
|
||||
|
||||
# Do not send DMARC reports (aggregate only). By default, aggregate reports on
|
||||
# DMARC evaluations are sent to domains if their DMARC policy requests them.
|
||||
# Reports are sent at whole hours, with a minimum of 1 hour and maximum of 24
|
||||
@ -726,6 +789,19 @@ See https://pkg.go.dev/github.com/mjl-/sconf for details.
|
||||
Domains:
|
||||
x:
|
||||
|
||||
# Disabled domains can be useful during/before migrations. Domains that are
|
||||
# disabled can still be configured like normal, including adding addresses using
|
||||
# the domain to accounts. However, disabled domains: 1. Do not try to fetch ACME
|
||||
# certificates. TLS connections to host names involving the email domain will
|
||||
# fail. A TLS certificate for the hostname (that wil be used as MX) itself will be
|
||||
# requested. 2. Incoming deliveries over SMTP are rejected with a temporary error
|
||||
# '450 4.2.1 recipient domain temporarily disabled'. 3. Submissions over SMTP
|
||||
# using an (envelope) SMTP MAIL FROM address or message 'From' address of a
|
||||
# disabled domain will be rejected with a temporary error '451 4.3.0 sender domain
|
||||
# temporarily disabled'. Note that accounts with addresses at disabled domains can
|
||||
# still log in and read email (unless the account itself is disabled). (optional)
|
||||
Disabled: false
|
||||
|
||||
# Free-form description of domain. (optional)
|
||||
Description:
|
||||
|
||||
@ -742,6 +818,14 @@ See https://pkg.go.dev/github.com/mjl-/sconf for details.
|
||||
# delivered to you@example.com. (optional)
|
||||
LocalpartCatchallSeparator:
|
||||
|
||||
# Similar to LocalpartCatchallSeparator, but in case multiple are needed. For
|
||||
# example both "+" and "-". Only of one LocalpartCatchallSeparator or
|
||||
# LocalpartCatchallSeparators can be set. If set, the first separator is used to
|
||||
# make unique addresses for outgoing SMTP connections with FromIDLoginAddresses.
|
||||
# (optional)
|
||||
LocalpartCatchallSeparators:
|
||||
-
|
||||
|
||||
# If set, upper/lower case is relevant for email delivery. (optional)
|
||||
LocalpartCaseSensitive: false
|
||||
|
||||
@ -801,7 +885,7 @@ See https://pkg.go.dev/github.com/mjl-/sconf for details.
|
||||
DMARC:
|
||||
|
||||
# Address-part before the @ that accepts DMARC reports. Must be
|
||||
# non-internationalized. Recommended value: dmarc-reports.
|
||||
# non-internationalized. Recommended value: dmarcreports.
|
||||
Localpart:
|
||||
|
||||
# Alternative domain for reporting address, for incoming reports. Typically empty,
|
||||
@ -869,7 +953,7 @@ See https://pkg.go.dev/github.com/mjl-/sconf for details.
|
||||
TLSRPT:
|
||||
|
||||
# Address-part before the @ that accepts TLSRPT reports. Recommended value:
|
||||
# tls-reports.
|
||||
# tlsreports.
|
||||
Localpart:
|
||||
|
||||
# Alternative domain for reporting address, for incoming reports. Typically empty,
|
||||
@ -994,6 +1078,12 @@ See https://pkg.go.dev/github.com/mjl-/sconf for details.
|
||||
# retire time. E.g. 168h (1 week). (optional)
|
||||
KeepRetiredWebhookPeriod: 0s
|
||||
|
||||
# If non-empty, login attempts on all protocols (e.g. SMTP/IMAP, web interfaces)
|
||||
# is rejected with this error message. Useful during migrations. Incoming
|
||||
# deliveries for addresses of this account are still accepted as normal.
|
||||
# (optional)
|
||||
LoginDisabled:
|
||||
|
||||
# Default domain for account. Deprecated behaviour: If a destination is not a full
|
||||
# address but only a localpart, this domain is added to form a full address.
|
||||
Domain:
|
||||
@ -1086,6 +1176,28 @@ See https://pkg.go.dev/github.com/mjl-/sconf for details.
|
||||
# Free-form comments. (optional)
|
||||
Comment:
|
||||
|
||||
# If non-empty, incoming delivery attempts to this destination will be rejected
|
||||
# during SMTP RCPT TO with this error response line. Useful when a catchall
|
||||
# address is configured for the domain and messages to some addresses should be
|
||||
# rejected. The response line must start with an error code. Currently the
|
||||
# following error resonse codes are allowed: 421 (temporary local error), 550
|
||||
# (user not found). If the line consists of only an error code, an appropriate
|
||||
# error message is added. Rejecting messages with a 4xx code invites later retries
|
||||
# by the remote, while 5xx codes should prevent further delivery attempts.
|
||||
# (optional)
|
||||
SMTPError:
|
||||
|
||||
# If non-empty, an additional DMARC-like message authentication check is done for
|
||||
# incoming messages, validating the domain in the From-header of the message.
|
||||
# Messages without either an aligned SPF or aligned DKIM pass are rejected during
|
||||
# the SMTP DATA command with a permanent error code followed by the message in
|
||||
# this field. The domain in the message 'From' header is matched in relaxed or
|
||||
# strict mode according to the domain's DMARC policy if present, or relaxed mode
|
||||
# (organizational instead of exact domain match) otherwise. Useful for
|
||||
# autoresponders that don't want to accept messages they don't want to send an
|
||||
# automated reply to. (optional)
|
||||
MessageAuthRequiredSMTPError:
|
||||
|
||||
# Full name to use in message From header when composing messages coming from this
|
||||
# address with webmail. (optional)
|
||||
FullName:
|
||||
@ -1196,6 +1308,12 @@ See https://pkg.go.dev/github.com/mjl-/sconf for details.
|
||||
# responses and want instant replies. (optional)
|
||||
NoFirstTimeSenderDelay: false
|
||||
|
||||
# If set, this account cannot set a password of their own choice, but can only set
|
||||
# a new randomly generated password, preventing password reuse across services and
|
||||
# use of weak passwords. Custom account passwords can be set by the admin.
|
||||
# (optional)
|
||||
NoCustomPassword: false
|
||||
|
||||
# Routes for delivering outgoing messages through the queue. Each delivery attempt
|
||||
# evaluates these account routes, domain routes and finally global routes. The
|
||||
# transport of the first matching route is used in the delivery attempt. If no
|
||||
@ -1225,12 +1343,15 @@ See https://pkg.go.dev/github.com/mjl-/sconf for details.
|
||||
WebDomainRedirects:
|
||||
x:
|
||||
|
||||
# Handle webserver requests by serving static files, redirecting or
|
||||
# reverse-proxying HTTP(s). The first matching WebHandler will handle the request.
|
||||
# Built-in handlers, e.g. for account, admin, autoconfig and mta-sts always run
|
||||
# first. If no handler matches, the response status code is file not found (404).
|
||||
# If functionality you need is missng, simply forward the requests to an
|
||||
# application that can provide the needed functionality. (optional)
|
||||
# Handle webserver requests by serving static files, redirecting, reverse-proxying
|
||||
# HTTP(s) or passing the request to an internal service. The first matching
|
||||
# WebHandler will handle the request. Built-in system handlers, e.g. for ACME
|
||||
# validation, autoconfig and mta-sts always run first. Built-in handlers for
|
||||
# admin, account, webmail and webapi are evaluated after all handlers, including
|
||||
# webhandlers (allowing for overrides of internal services for some domains). If
|
||||
# no handler matches, the response status code is file not found (404). If
|
||||
# webserver features are missing, forward the requests to an application that
|
||||
# provides the needed functionality itself. (optional)
|
||||
WebHandlers:
|
||||
-
|
||||
|
||||
@ -1238,7 +1359,7 @@ See https://pkg.go.dev/github.com/mjl-/sconf for details.
|
||||
LogName:
|
||||
|
||||
# Both Domain and PathRegexp must match for this WebHandler to match a request.
|
||||
# Exactly one of WebStatic, WebRedirect, WebForward must be set.
|
||||
# Exactly one of WebStatic, WebRedirect, WebForward, WebInternal must be set.
|
||||
Domain:
|
||||
|
||||
# Regular expression matched against request path, must always start with ^ to
|
||||
@ -1345,6 +1466,15 @@ See https://pkg.go.dev/github.com/mjl-/sconf for details.
|
||||
ResponseHeaders:
|
||||
x:
|
||||
|
||||
# Pass request to internal service, like webmail, webapi, etc. (optional)
|
||||
WebInternal:
|
||||
|
||||
# Path to use as root of internal service, e.g. /webmail/.
|
||||
BasePath:
|
||||
|
||||
# Name of the service, values: admin, account, webmail, webapi.
|
||||
Service:
|
||||
|
||||
# Routes for delivering outgoing messages through the queue. Each delivery attempt
|
||||
# evaluates account routes, domain routes and finally these global routes. The
|
||||
# transport of the first matching route is used in the delivery attempt. If no
|
||||
|
410
ctl_test.go
410
ctl_test.go
@ -4,8 +4,13 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/ed25519"
|
||||
cryptorand "crypto/rand"
|
||||
"crypto/x509"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"math/big"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@ -15,6 +20,7 @@ import (
|
||||
"github.com/mjl-/mox/config"
|
||||
"github.com/mjl-/mox/dmarcdb"
|
||||
"github.com/mjl-/mox/dns"
|
||||
"github.com/mjl-/mox/imapclient"
|
||||
"github.com/mjl-/mox/mlog"
|
||||
"github.com/mjl-/mox/mox-"
|
||||
"github.com/mjl-/mox/mtastsdb"
|
||||
@ -39,58 +45,70 @@ func tcheck(t *testing.T, err error, errmsg string) {
|
||||
// unhandled errors would cause a panic.
|
||||
func TestCtl(t *testing.T) {
|
||||
os.RemoveAll("testdata/ctl/data")
|
||||
mox.ConfigStaticPath = filepath.FromSlash("testdata/ctl/mox.conf")
|
||||
mox.ConfigDynamicPath = filepath.FromSlash("testdata/ctl/domains.conf")
|
||||
mox.ConfigStaticPath = filepath.FromSlash("testdata/ctl/config/mox.conf")
|
||||
mox.ConfigDynamicPath = filepath.FromSlash("testdata/ctl/config/domains.conf")
|
||||
if errs := mox.LoadConfig(ctxbg, pkglog, true, false); len(errs) > 0 {
|
||||
t.Fatalf("loading mox config: %v", errs)
|
||||
}
|
||||
err := store.Init(ctxbg)
|
||||
tcheck(t, err, "store init")
|
||||
defer store.Close()
|
||||
defer store.Switchboard()()
|
||||
|
||||
err := queue.Init()
|
||||
err = queue.Init()
|
||||
tcheck(t, err, "queue init")
|
||||
defer queue.Shutdown()
|
||||
|
||||
testctl := func(fn func(clientctl *ctl)) {
|
||||
var cid int64
|
||||
|
||||
testctl := func(fn func(clientxctl *ctl)) {
|
||||
t.Helper()
|
||||
|
||||
cconn, sconn := net.Pipe()
|
||||
clientctl := ctl{conn: cconn, log: pkglog}
|
||||
serverctl := ctl{conn: sconn, log: pkglog}
|
||||
go servectlcmd(ctxbg, &serverctl, func() {})
|
||||
fn(&clientctl)
|
||||
clientxctl := ctl{conn: cconn, log: pkglog}
|
||||
serverxctl := ctl{conn: sconn, log: pkglog}
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
cid++
|
||||
servectlcmd(ctxbg, &serverxctl, cid, func() {})
|
||||
close(done)
|
||||
}()
|
||||
fn(&clientxctl)
|
||||
cconn.Close()
|
||||
<-done
|
||||
sconn.Close()
|
||||
}
|
||||
|
||||
// "deliver"
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdDeliver(ctl, "mjl@mox.example")
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdDeliver(xctl, "mjl@mox.example")
|
||||
})
|
||||
|
||||
// "setaccountpassword"
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdSetaccountpassword(ctl, "mjl", "test4321")
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdSetaccountpassword(xctl, "mjl", "test4321")
|
||||
})
|
||||
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdQueueHoldrulesList(ctl)
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueHoldrulesList(xctl)
|
||||
})
|
||||
|
||||
// All messages.
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdQueueHoldrulesAdd(ctl, "", "", "")
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueHoldrulesAdd(xctl, "", "", "")
|
||||
})
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdQueueHoldrulesAdd(ctl, "mjl", "", "")
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueHoldrulesAdd(xctl, "mjl", "", "")
|
||||
})
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdQueueHoldrulesAdd(ctl, "", "☺.mox.example", "")
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueHoldrulesAdd(xctl, "", "☺.mox.example", "")
|
||||
})
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdQueueHoldrulesAdd(ctl, "mox", "☺.mox.example", "example.com")
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueHoldrulesAdd(xctl, "mox", "☺.mox.example", "example.com")
|
||||
})
|
||||
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdQueueHoldrulesRemove(ctl, 1)
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueHoldrulesRemove(xctl, 1)
|
||||
})
|
||||
|
||||
// Queue a message to list/change/dump.
|
||||
@ -110,262 +128,321 @@ func TestCtl(t *testing.T) {
|
||||
qmid := qml[0].ID
|
||||
|
||||
// Has entries now.
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdQueueHoldrulesList(ctl)
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueHoldrulesList(xctl)
|
||||
})
|
||||
|
||||
// "queuelist"
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdQueueList(ctl, queue.Filter{}, queue.Sort{})
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueList(xctl, queue.Filter{}, queue.Sort{})
|
||||
})
|
||||
|
||||
// "queueholdset"
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdQueueHoldSet(ctl, queue.Filter{}, true)
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueHoldSet(xctl, queue.Filter{}, true)
|
||||
})
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdQueueHoldSet(ctl, queue.Filter{}, false)
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueHoldSet(xctl, queue.Filter{}, false)
|
||||
})
|
||||
|
||||
// "queueschedule"
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdQueueSchedule(ctl, queue.Filter{}, true, time.Minute)
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueSchedule(xctl, queue.Filter{}, true, time.Minute)
|
||||
})
|
||||
|
||||
// "queuetransport"
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdQueueTransport(ctl, queue.Filter{}, "socks")
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueTransport(xctl, queue.Filter{}, "socks")
|
||||
})
|
||||
|
||||
// "queuerequiretls"
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdQueueRequireTLS(ctl, queue.Filter{}, nil)
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueRequireTLS(xctl, queue.Filter{}, nil)
|
||||
})
|
||||
|
||||
// "queuedump"
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdQueueDump(ctl, fmt.Sprintf("%d", qmid))
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueDump(xctl, fmt.Sprintf("%d", qmid))
|
||||
})
|
||||
|
||||
// "queuefail"
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdQueueFail(ctl, queue.Filter{})
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueFail(xctl, queue.Filter{})
|
||||
})
|
||||
|
||||
// "queuedrop"
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdQueueDrop(ctl, queue.Filter{})
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueDrop(xctl, queue.Filter{})
|
||||
})
|
||||
|
||||
// "queueholdruleslist"
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdQueueHoldrulesList(ctl)
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueHoldrulesList(xctl)
|
||||
})
|
||||
|
||||
// "queueholdrulesadd"
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdQueueHoldrulesAdd(ctl, "mjl", "", "")
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueHoldrulesAdd(xctl, "mjl", "", "")
|
||||
})
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdQueueHoldrulesAdd(ctl, "mjl", "localhost", "")
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueHoldrulesAdd(xctl, "mjl", "localhost", "")
|
||||
})
|
||||
|
||||
// "queueholdrulesremove"
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdQueueHoldrulesRemove(ctl, 2)
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueHoldrulesRemove(xctl, 2)
|
||||
})
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdQueueHoldrulesList(ctl)
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueHoldrulesList(xctl)
|
||||
})
|
||||
|
||||
// "queuesuppresslist"
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdQueueSuppressList(ctl, "mjl")
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueSuppressList(xctl, "mjl")
|
||||
})
|
||||
|
||||
// "queuesuppressadd"
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdQueueSuppressAdd(ctl, "mjl", "base@localhost")
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueSuppressAdd(xctl, "mjl", "base@localhost")
|
||||
})
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdQueueSuppressAdd(ctl, "mjl", "other@localhost")
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueSuppressAdd(xctl, "mjl", "other@localhost")
|
||||
})
|
||||
|
||||
// "queuesuppresslookup"
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdQueueSuppressLookup(ctl, "mjl", "base@localhost")
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueSuppressLookup(xctl, "mjl", "base@localhost")
|
||||
})
|
||||
|
||||
// "queuesuppressremove"
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdQueueSuppressRemove(ctl, "mjl", "base@localhost")
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueSuppressRemove(xctl, "mjl", "base@localhost")
|
||||
})
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdQueueSuppressList(ctl, "mjl")
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueSuppressList(xctl, "mjl")
|
||||
})
|
||||
|
||||
// "queueretiredlist"
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdQueueRetiredList(ctl, queue.RetiredFilter{}, queue.RetiredSort{})
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueRetiredList(xctl, queue.RetiredFilter{}, queue.RetiredSort{})
|
||||
})
|
||||
|
||||
// "queueretiredprint"
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdQueueRetiredPrint(ctl, "1")
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueRetiredPrint(xctl, "1")
|
||||
})
|
||||
|
||||
// "queuehooklist"
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdQueueHookList(ctl, queue.HookFilter{}, queue.HookSort{})
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueHookList(xctl, queue.HookFilter{}, queue.HookSort{})
|
||||
})
|
||||
|
||||
// "queuehookschedule"
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdQueueHookSchedule(ctl, queue.HookFilter{}, true, time.Minute)
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueHookSchedule(xctl, queue.HookFilter{}, true, time.Minute)
|
||||
})
|
||||
|
||||
// "queuehookprint"
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdQueueHookPrint(ctl, "1")
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueHookPrint(xctl, "1")
|
||||
})
|
||||
|
||||
// "queuehookcancel"
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdQueueHookCancel(ctl, queue.HookFilter{})
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueHookCancel(xctl, queue.HookFilter{})
|
||||
})
|
||||
|
||||
// "queuehookretiredlist"
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdQueueHookRetiredList(ctl, queue.HookRetiredFilter{}, queue.HookRetiredSort{})
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueHookRetiredList(xctl, queue.HookRetiredFilter{}, queue.HookRetiredSort{})
|
||||
})
|
||||
|
||||
// "queuehookretiredprint"
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdQueueHookRetiredPrint(ctl, "1")
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueHookRetiredPrint(xctl, "1")
|
||||
})
|
||||
|
||||
// "importmbox"
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdImport(ctl, true, "mjl", "inbox", "testdata/importtest.mbox")
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdImport(xctl, true, "mjl", "inbox", "testdata/importtest.mbox")
|
||||
})
|
||||
|
||||
// "importmaildir"
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdImport(ctl, false, "mjl", "inbox", "testdata/importtest.maildir")
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdImport(xctl, false, "mjl", "inbox", "testdata/importtest.maildir")
|
||||
})
|
||||
|
||||
// "domainadd"
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdConfigDomainAdd(ctl, dns.Domain{ASCII: "mox2.example"}, "mjl", "")
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdConfigDomainAdd(xctl, false, dns.Domain{ASCII: "mox2.example"}, "mjl", "")
|
||||
})
|
||||
|
||||
// "accountadd"
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdConfigAccountAdd(ctl, "mjl2", "mjl2@mox2.example")
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdConfigAccountAdd(xctl, "mjl2", "mjl2@mox2.example")
|
||||
})
|
||||
|
||||
// "addressadd"
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdConfigAddressAdd(ctl, "mjl3@mox2.example", "mjl2")
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdConfigAddressAdd(xctl, "mjl3@mox2.example", "mjl2")
|
||||
})
|
||||
|
||||
// Add a message.
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdDeliver(ctl, "mjl3@mox2.example")
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdDeliver(xctl, "mjl3@mox2.example")
|
||||
})
|
||||
// "retrain", retrain junk filter.
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdRetrain(ctl, "mjl2")
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdRetrain(xctl, "mjl2")
|
||||
})
|
||||
|
||||
// "addressrm"
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdConfigAddressRemove(ctl, "mjl3@mox2.example")
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdConfigAddressRemove(xctl, "mjl3@mox2.example")
|
||||
})
|
||||
|
||||
// "accountdisabled"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdConfigAccountDisabled(xctl, "mjl2", "testing")
|
||||
})
|
||||
|
||||
// "accountlist"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdConfigAccountList(xctl)
|
||||
})
|
||||
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdConfigAccountDisabled(xctl, "mjl2", "")
|
||||
})
|
||||
|
||||
// "accountrm"
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdConfigAccountRemove(ctl, "mjl2")
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdConfigAccountRemove(xctl, "mjl2")
|
||||
})
|
||||
|
||||
// "domaindisabled"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdConfigDomainDisabled(xctl, dns.Domain{ASCII: "mox2.example"}, true)
|
||||
})
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdConfigDomainDisabled(xctl, dns.Domain{ASCII: "mox2.example"}, false)
|
||||
})
|
||||
|
||||
// "domainrm"
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdConfigDomainRemove(ctl, dns.Domain{ASCII: "mox2.example"})
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdConfigDomainRemove(xctl, dns.Domain{ASCII: "mox2.example"})
|
||||
})
|
||||
|
||||
// "aliasadd"
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdConfigAliasAdd(ctl, "support@mox.example", config.Alias{Addresses: []string{"mjl@mox.example"}})
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdConfigAliasAdd(xctl, "support@mox.example", config.Alias{Addresses: []string{"mjl@mox.example"}})
|
||||
})
|
||||
|
||||
// "aliaslist"
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdConfigAliasList(ctl, "mox.example")
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdConfigAliasList(xctl, "mox.example")
|
||||
})
|
||||
|
||||
// "aliasprint"
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdConfigAliasPrint(ctl, "support@mox.example")
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdConfigAliasPrint(xctl, "support@mox.example")
|
||||
})
|
||||
|
||||
// "aliasupdate"
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdConfigAliasUpdate(ctl, "support@mox.example", "true", "true", "true")
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdConfigAliasUpdate(xctl, "support@mox.example", "true", "true", "true")
|
||||
})
|
||||
|
||||
// "aliasaddaddr"
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdConfigAliasAddaddr(ctl, "support@mox.example", []string{"mjl2@mox.example"})
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdConfigAliasAddaddr(xctl, "support@mox.example", []string{"mjl2@mox.example"})
|
||||
})
|
||||
|
||||
// "aliasrmaddr"
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdConfigAliasRmaddr(ctl, "support@mox.example", []string{"mjl2@mox.example"})
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdConfigAliasRmaddr(xctl, "support@mox.example", []string{"mjl2@mox.example"})
|
||||
})
|
||||
|
||||
// "aliasrm"
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdConfigAliasRemove(ctl, "support@mox.example")
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdConfigAliasRemove(xctl, "support@mox.example")
|
||||
})
|
||||
|
||||
// accounttlspubkeyadd
|
||||
certDER := fakeCert(t)
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdConfigTlspubkeyAdd(xctl, "mjl@mox.example", "testkey", false, certDER)
|
||||
})
|
||||
|
||||
// "accounttlspubkeylist"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdConfigTlspubkeyList(xctl, "")
|
||||
})
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdConfigTlspubkeyList(xctl, "mjl")
|
||||
})
|
||||
|
||||
tpkl, err := store.TLSPublicKeyList(ctxbg, "")
|
||||
tcheck(t, err, "list tls public keys")
|
||||
if len(tpkl) != 1 {
|
||||
t.Fatalf("got %d tls public keys, expected 1", len(tpkl))
|
||||
}
|
||||
fingerprint := tpkl[0].Fingerprint
|
||||
|
||||
// "accounttlspubkeyget"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdConfigTlspubkeyGet(xctl, fingerprint)
|
||||
})
|
||||
|
||||
// "accounttlspubkeyrm"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdConfigTlspubkeyRemove(xctl, fingerprint)
|
||||
})
|
||||
|
||||
tpkl, err = store.TLSPublicKeyList(ctxbg, "")
|
||||
tcheck(t, err, "list tls public keys")
|
||||
if len(tpkl) != 0 {
|
||||
t.Fatalf("got %d tls public keys, expected 0", len(tpkl))
|
||||
}
|
||||
|
||||
// "loglevels"
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdLoglevels(ctl)
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdLoglevels(xctl)
|
||||
})
|
||||
|
||||
// "setloglevels"
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdSetLoglevels(ctl, "", "debug")
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdSetLoglevels(xctl, "", "debug")
|
||||
})
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdSetLoglevels(ctl, "smtpserver", "debug")
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdSetLoglevels(xctl, "smtpserver", "debug")
|
||||
})
|
||||
|
||||
// Export data, import it again
|
||||
xcmdExport(true, false, []string{filepath.FromSlash("testdata/ctl/data/tmp/export/mbox/"), filepath.FromSlash("testdata/ctl/data/accounts/mjl")}, &cmd{log: pkglog})
|
||||
xcmdExport(false, false, []string{filepath.FromSlash("testdata/ctl/data/tmp/export/maildir/"), filepath.FromSlash("testdata/ctl/data/accounts/mjl")}, &cmd{log: pkglog})
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdImport(ctl, true, "mjl", "inbox", filepath.FromSlash("testdata/ctl/data/tmp/export/mbox/Inbox.mbox"))
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdImport(xctl, true, "mjl", "inbox", filepath.FromSlash("testdata/ctl/data/tmp/export/mbox/Inbox.mbox"))
|
||||
})
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdImport(ctl, false, "mjl", "inbox", filepath.FromSlash("testdata/ctl/data/tmp/export/maildir/Inbox"))
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdImport(xctl, false, "mjl", "inbox", filepath.FromSlash("testdata/ctl/data/tmp/export/maildir/Inbox"))
|
||||
})
|
||||
|
||||
// "recalculatemailboxcounts"
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdRecalculateMailboxCounts(ctl, "mjl")
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdRecalculateMailboxCounts(xctl, "mjl")
|
||||
})
|
||||
|
||||
// "fixmsgsize"
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdFixmsgsize(ctl, "mjl")
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdFixmsgsize(xctl, "mjl")
|
||||
})
|
||||
testctl(func(ctl *ctl) {
|
||||
acc, err := store.OpenAccount(ctl.log, "mjl")
|
||||
testctl(func(xctl *ctl) {
|
||||
acc, err := store.OpenAccount(xctl.log, "mjl", false)
|
||||
tcheck(t, err, "open account")
|
||||
defer func() {
|
||||
acc.Close()
|
||||
acc.CheckClosed()
|
||||
acc.WaitClosed()
|
||||
}()
|
||||
|
||||
content := []byte("Subject: hi\r\n\r\nbody\r\n")
|
||||
@ -373,14 +450,17 @@ func TestCtl(t *testing.T) {
|
||||
deliver := func(m *store.Message) {
|
||||
t.Helper()
|
||||
m.Size = int64(len(content))
|
||||
msgf, err := store.CreateMessageTemp(ctl.log, "ctltest")
|
||||
msgf, err := store.CreateMessageTemp(xctl.log, "ctltest")
|
||||
tcheck(t, err, "create temp file")
|
||||
defer os.Remove(msgf.Name())
|
||||
defer msgf.Close()
|
||||
_, err = msgf.Write(content)
|
||||
tcheck(t, err, "write message file")
|
||||
err = acc.DeliverMailbox(ctl.log, "Inbox", m, msgf)
|
||||
tcheck(t, err, "deliver message")
|
||||
|
||||
acc.WithWLock(func() {
|
||||
err = acc.DeliverMailbox(xctl.log, "Inbox", m, msgf)
|
||||
tcheck(t, err, "deliver message")
|
||||
})
|
||||
}
|
||||
|
||||
var msgBadSize store.Message
|
||||
@ -398,7 +478,7 @@ func TestCtl(t *testing.T) {
|
||||
tcheck(t, err, "update mailbox size")
|
||||
|
||||
// Fix up the size.
|
||||
ctlcmdFixmsgsize(ctl, "")
|
||||
ctlcmdFixmsgsize(xctl, "")
|
||||
|
||||
err = acc.DB.Get(ctxbg, &msgBadSize)
|
||||
tcheck(t, err, "get message")
|
||||
@ -408,39 +488,71 @@ func TestCtl(t *testing.T) {
|
||||
})
|
||||
|
||||
// "reparse"
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdReparse(ctl, "mjl")
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdReparse(xctl, "mjl")
|
||||
})
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdReparse(ctl, "")
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdReparse(xctl, "")
|
||||
})
|
||||
|
||||
// "reassignthreads"
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdReassignthreads(ctl, "mjl")
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdReassignthreads(xctl, "mjl")
|
||||
})
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdReassignthreads(ctl, "")
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdReassignthreads(xctl, "")
|
||||
})
|
||||
|
||||
// "backup", backup account.
|
||||
err = dmarcdb.Init()
|
||||
tcheck(t, err, "dmarcdb init")
|
||||
defer dmarcdb.Close()
|
||||
err = mtastsdb.Init(false)
|
||||
tcheck(t, err, "mtastsdb init")
|
||||
defer mtastsdb.Close()
|
||||
err = tlsrptdb.Init()
|
||||
tcheck(t, err, "tlsrptdb init")
|
||||
testctl(func(ctl *ctl) {
|
||||
os.RemoveAll("testdata/ctl/data/tmp/backup-data")
|
||||
defer tlsrptdb.Close()
|
||||
testctl(func(xctl *ctl) {
|
||||
os.RemoveAll("testdata/ctl/data/tmp/backup")
|
||||
err := os.WriteFile("testdata/ctl/data/receivedid.key", make([]byte, 16), 0600)
|
||||
tcheck(t, err, "writing receivedid.key")
|
||||
ctlcmdBackup(ctl, filepath.FromSlash("testdata/ctl/data/tmp/backup-data"), false)
|
||||
ctlcmdBackup(xctl, filepath.FromSlash("testdata/ctl/data/tmp/backup"), false)
|
||||
})
|
||||
|
||||
// Verify the backup.
|
||||
xcmd := cmd{
|
||||
flag: flag.NewFlagSet("", flag.ExitOnError),
|
||||
flagArgs: []string{filepath.FromSlash("testdata/ctl/data/tmp/backup-data")},
|
||||
flagArgs: []string{filepath.FromSlash("testdata/ctl/data/tmp/backup/data")},
|
||||
}
|
||||
cmdVerifydata(&xcmd)
|
||||
|
||||
// IMAP connection.
|
||||
testctl(func(xctl *ctl) {
|
||||
a, b := net.Pipe()
|
||||
go func() {
|
||||
opts := imapclient.Opts{
|
||||
Logger: slog.Default().With("cid", mox.Cid()),
|
||||
Error: func(err error) { panic(err) },
|
||||
}
|
||||
client, err := imapclient.New(a, &opts)
|
||||
tcheck(t, err, "new imapclient")
|
||||
client.Select("inbox")
|
||||
client.Logout()
|
||||
defer a.Close()
|
||||
}()
|
||||
ctlcmdIMAPServe(xctl, "mjl@mox.example", b, b)
|
||||
})
|
||||
}
|
||||
|
||||
func fakeCert(t *testing.T) []byte {
|
||||
t.Helper()
|
||||
seed := make([]byte, ed25519.SeedSize)
|
||||
privKey := ed25519.NewKeyFromSeed(seed) // Fake key, don't use this for real!
|
||||
template := &x509.Certificate{
|
||||
SerialNumber: big.NewInt(1), // Required field...
|
||||
}
|
||||
localCertBuf, err := x509.CreateCertificate(cryptorand.Reader, template, template, privKey.Public(), privKey)
|
||||
tcheck(t, err, "making certificate")
|
||||
return localCertBuf
|
||||
}
|
||||
|
14
curves.go
Normal file
14
curves.go
Normal file
@ -0,0 +1,14 @@
|
||||
//go:build !go1.24
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
)
|
||||
|
||||
var curvesList = []tls.CurveID{
|
||||
tls.CurveP256,
|
||||
tls.CurveP384,
|
||||
tls.CurveP521,
|
||||
tls.X25519,
|
||||
}
|
15
curves_go124.go
Normal file
15
curves_go124.go
Normal file
@ -0,0 +1,15 @@
|
||||
//go:build go1.24
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
)
|
||||
|
||||
var curvesList = []tls.CurveID{
|
||||
tls.CurveP256,
|
||||
tls.CurveP384,
|
||||
tls.CurveP521,
|
||||
tls.X25519,
|
||||
tls.X25519MLKEM768,
|
||||
}
|
20
dane/dane.go
20
dane/dane.go
@ -65,6 +65,7 @@ import (
|
||||
"github.com/mjl-/mox/dns"
|
||||
"github.com/mjl-/mox/mlog"
|
||||
"github.com/mjl-/mox/stub"
|
||||
"slices"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -214,12 +215,9 @@ func Dial(ctx context.Context, elog *slog.Logger, resolver dns.Resolver, network
|
||||
if allowedUsages != nil {
|
||||
o := 0
|
||||
for _, r := range records {
|
||||
for _, usage := range allowedUsages {
|
||||
if r.Usage == usage {
|
||||
records[o] = r
|
||||
o++
|
||||
break
|
||||
}
|
||||
if slices.Contains(allowedUsages, r.Usage) {
|
||||
records[o] = r
|
||||
o++
|
||||
}
|
||||
}
|
||||
records = records[:o]
|
||||
@ -263,7 +261,8 @@ func Dial(ctx context.Context, elog *slog.Logger, resolver dns.Resolver, network
|
||||
config := TLSClientConfig(log.Logger, records, baseDom, moreAllowedHosts, &verifiedRecord, pkixRoots)
|
||||
tlsConn := tls.Client(conn, &config)
|
||||
if err := tlsConn.HandshakeContext(ctx); err != nil {
|
||||
conn.Close()
|
||||
xerr := conn.Close()
|
||||
log.Check(xerr, "closing connection")
|
||||
return nil, adns.TLSA{}, err
|
||||
}
|
||||
return tlsConn, verifiedRecord, nil
|
||||
@ -448,7 +447,8 @@ func verifySingle(log mlog.Log, tlsa adns.TLSA, cs tls.ConnectionState, allowedH
|
||||
// We set roots, so the system defaults don't get used. Verify checks the host name
|
||||
// (set below) and checks for expiration.
|
||||
opts := x509.VerifyOptions{
|
||||
Roots: x509.NewCertPool(),
|
||||
Intermediates: x509.NewCertPool(),
|
||||
Roots: x509.NewCertPool(),
|
||||
}
|
||||
|
||||
// If the full certificate was included, we must add it to the valid roots, the TLS
|
||||
@ -465,11 +465,13 @@ func verifySingle(log mlog.Log, tlsa adns.TLSA, cs tls.ConnectionState, allowedH
|
||||
}
|
||||
}
|
||||
|
||||
for _, cert := range cs.PeerCertificates {
|
||||
for i, cert := range cs.PeerCertificates {
|
||||
if match(cert) {
|
||||
opts.Roots.AddCert(cert)
|
||||
found = true
|
||||
break
|
||||
} else if i > 0 {
|
||||
opts.Intermediates.AddCert(cert)
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
|
@ -12,7 +12,6 @@ import (
|
||||
"crypto/x509/pkix"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"math/big"
|
||||
"net"
|
||||
"reflect"
|
||||
@ -36,7 +35,6 @@ func tcheckf(t *testing.T, err error, format string, args ...any) {
|
||||
|
||||
// Test dialing and DANE TLS verification.
|
||||
func TestDial(t *testing.T) {
|
||||
mlog.SetConfig(map[string]slog.Level{"": mlog.LevelDebug})
|
||||
log := mlog.New("dane", nil)
|
||||
|
||||
// Create fake CA/trusted-anchor certificate.
|
||||
|
40
develop.txt
40
develop.txt
@ -13,11 +13,12 @@ code paths are reachable/testable with mox localserve, but some use cases will
|
||||
require a full setup.
|
||||
|
||||
Before committing, run at least "make fmt" and "make check" (which requires
|
||||
staticcheck, run "make install-staticcheck" once). Also run "make check-shadow"
|
||||
and fix any shadowed variables other than "err" (which are filtered out, but
|
||||
causes the command to always exit with an error code; run "make install-shadow"
|
||||
once to install the shadow command). If you've updated RFC references, run
|
||||
"make" in rfc/, it verifies the referenced files exist.
|
||||
staticcheck and ineffassign, run "make install-staticcheck install-ineffassign"
|
||||
once). Also run "make check-shadow" and fix any shadowed variables other than
|
||||
"err" (which are filtered out, but causes the command to always exit with an
|
||||
error code; run "make install-shadow" once to install the shadow command). If
|
||||
you've updated RFC references, run "make" in rfc/, it verifies the referenced
|
||||
files exist.
|
||||
|
||||
When making changes to the public API of a package listed in
|
||||
apidiff/packages.txt, run "make genapidiff" to update the list of changes in
|
||||
@ -46,6 +47,18 @@ instructions below.
|
||||
standard slog package for logging, not our mlog package. Packages not intended
|
||||
for reuse do use mlog as it is more convenient. Internally, we always use
|
||||
mlog.Log to do the logging, wrapping an slog.Logger.
|
||||
- The code uses panic for error handling in quite a few places, including
|
||||
smtpserver, imapserver and web API calls. Functions/methods, variables, struct
|
||||
fields and types that begin with an "x" indicate they can panic on errors. Both
|
||||
for i/o errors that are fatal for a connection, and also often for user-induced
|
||||
errors, for example bad IMAP commands or invalid web API requests. These panics
|
||||
are caught again at the top of a command or top of the connection. Write code
|
||||
that is panic-safe, using defer to clean up and release resources.
|
||||
- Try to check all errors, at the minimum using mlog.Log.Check() to log an error
|
||||
at the appropriate level. Also when just closing a file. Log messages sometimes
|
||||
unexpectedly point out latent issues. Only when there is no point in logging,
|
||||
for example when previous writes to stderr failed, can error logging be skipped.
|
||||
Test code is less strict about checking errors.
|
||||
|
||||
|
||||
# Reusable packages
|
||||
@ -101,10 +114,10 @@ Large files (images/videos) are in https://github.com/mjl-/mox-website-files to
|
||||
keep the repository reasonably sized.
|
||||
|
||||
The public website may serve the content from the "website" branch. After a
|
||||
release release, the main branch (with latest development code and
|
||||
corresponding changes to the website about new features) is merged into the
|
||||
website branch. Commits to the website branch (e.g. for a news item, or any
|
||||
other change unrelated to a new release) is merged back into the main branch.
|
||||
release, the main branch (with latest development code and corresponding
|
||||
changes to the website about new features) is merged into the website branch.
|
||||
Commits to the website branch (e.g. for a news item, or any other change
|
||||
unrelated to a new release) is merged back into the main branch.
|
||||
|
||||
|
||||
# TLS certificates
|
||||
@ -303,12 +316,13 @@ done
|
||||
|
||||
- Gather feedback on recent changes.
|
||||
- Check if dependencies need updates.
|
||||
- Update to latest publicsuffix/ list.
|
||||
- Check code if there are deprecated features that can be removed.
|
||||
- Generate apidiff and check if breaking changes can be prevented. Update moxtools.
|
||||
- Update features & roadmap in README.md and website.
|
||||
- Write release notes, copy from previous.
|
||||
- Build and run tests with previous major Go release.
|
||||
- Run tests, including with race detector, also with TZ= for UTC-behaviour.
|
||||
- Build and run tests with previous major Go release, run "make docker-release" to test building images.
|
||||
- Run tests, including with race detector, also with TZ= for UTC-behaviour, and with -count 2.
|
||||
- Run integration and upgrade tests.
|
||||
- Run fuzzing tests for a while.
|
||||
- Deploy to test environment. Test the update instructions.
|
||||
@ -320,8 +334,7 @@ done
|
||||
- Move apidiff/next.txt to apidiff/<version>.txt, and create empty next.txt.
|
||||
- Add release to the Latest release & News sections of website/index.md.
|
||||
- Create git tag (note: "#" is comment, not title/header), push code.
|
||||
- Publish new docker image.
|
||||
- Publish signed release notes for updates.xmox.nl and update DNS record.
|
||||
- Build and publish new docker image.
|
||||
- Deploy update to website.
|
||||
- Create new release on the github page, so watchers get a notification.
|
||||
Copy/paste it manually from the tag text, and add link to download/compile
|
||||
@ -329,3 +342,4 @@ done
|
||||
- Publish new cross-referenced code/rfc to www.xmox.nl/xr/.
|
||||
- Update moxtools with latest version.
|
||||
- Update implementations support matrix.
|
||||
- Publish signed release notes for updates.xmox.nl and update DNS record.
|
||||
|
@ -31,6 +31,7 @@ import (
|
||||
"github.com/mjl-/mox/publicsuffix"
|
||||
"github.com/mjl-/mox/smtp"
|
||||
"github.com/mjl-/mox/stub"
|
||||
"slices"
|
||||
)
|
||||
|
||||
// If set, signatures for top-level domain "localhost" are accepted.
|
||||
@ -173,7 +174,7 @@ func Sign(ctx context.Context, elog *slog.Logger, localpart smtp.Localpart, doma
|
||||
sig.Domain = domain
|
||||
sig.Selector = sel.Domain
|
||||
sig.Identity = &Identity{&localpart, domain}
|
||||
sig.SignedHeaders = append([]string{}, sel.Headers...)
|
||||
sig.SignedHeaders = slices.Clone(sel.Headers)
|
||||
if sel.SealHeaders {
|
||||
// ../rfc/6376:2156
|
||||
// Each time a header name is added to the signature, the next unused value is
|
||||
@ -548,7 +549,7 @@ func verifySignatureRecord(r *Record, sig *Sig, hash crypto.Hash, canonHeaderSim
|
||||
if r.PublicKey == nil {
|
||||
return StatusPermerror, ErrKeyRevoked
|
||||
} else if rsaKey, ok := r.PublicKey.(*rsa.PublicKey); ok && rsaKey.N.BitLen() < 1024 {
|
||||
// todo: find a reference that supports this.
|
||||
// ../rfc/8301:157
|
||||
return StatusPermerror, ErrWeakKey
|
||||
}
|
||||
|
||||
@ -839,8 +840,8 @@ func parseHeaders(br *bufio.Reader) ([]header, int, error) {
|
||||
return nil, 0, fmt.Errorf("empty header key")
|
||||
}
|
||||
lkey = strings.ToLower(key)
|
||||
value = append([]byte{}, t[1]...)
|
||||
raw = append([]byte{}, line...)
|
||||
value = slices.Clone(t[1])
|
||||
raw = slices.Clone(line)
|
||||
}
|
||||
if key != "" {
|
||||
l = append(l, header{key, lkey, value, raw})
|
||||
|
@ -117,7 +117,7 @@ func (s *Sig) Header() (string, error) {
|
||||
} else if i == len(s.SignedHeaders)-1 {
|
||||
v += ";"
|
||||
}
|
||||
w.Addf(sep, v)
|
||||
w.Addf(sep, "%s", v)
|
||||
}
|
||||
}
|
||||
if len(s.CopiedHeaders) > 0 {
|
||||
@ -139,7 +139,7 @@ func (s *Sig) Header() (string, error) {
|
||||
} else if i == len(s.CopiedHeaders)-1 {
|
||||
v += ";"
|
||||
}
|
||||
w.Addf(sep, v)
|
||||
w.Addf(sep, "%s", v)
|
||||
}
|
||||
}
|
||||
|
||||
@ -147,7 +147,7 @@ func (s *Sig) Header() (string, error) {
|
||||
|
||||
w.Addf(" ", "b=")
|
||||
if len(s.Signature) > 0 {
|
||||
w.AddWrap([]byte(base64.StdEncoding.EncodeToString(s.Signature)))
|
||||
w.AddWrap([]byte(base64.StdEncoding.EncodeToString(s.Signature)), false)
|
||||
}
|
||||
w.Add("\r\n")
|
||||
return w.String(), nil
|
||||
|
@ -32,7 +32,7 @@ func TestParseRecord(t *testing.T) {
|
||||
}
|
||||
if r != nil {
|
||||
pk := r.Pubkey
|
||||
for i := 0; i < 2; i++ {
|
||||
for range 2 {
|
||||
ntxt, err := r.Record()
|
||||
if err != nil {
|
||||
t.Fatalf("making record: %v", err)
|
||||
|
@ -15,7 +15,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
mathrand "math/rand"
|
||||
mathrand2 "math/rand/v2"
|
||||
"time"
|
||||
|
||||
"github.com/mjl-/mox/dkim"
|
||||
@ -257,7 +257,7 @@ func Verify(ctx context.Context, elog *slog.Logger, resolver dns.Resolver, msgFr
|
||||
|
||||
// Record can request sampling of messages to apply policy.
|
||||
// See ../rfc/7489:1432
|
||||
useResult = !applyRandomPercentage || record.Percentage == 100 || mathrand.Intn(100) < record.Percentage
|
||||
useResult = !applyRandomPercentage || record.Percentage == 100 || mathrand2.IntN(100) < record.Percentage
|
||||
|
||||
// We treat "quarantine" and "reject" the same. Thus, we also don't "downgrade"
|
||||
// from reject to quarantine if this message was sampled out.
|
||||
|
@ -11,7 +11,17 @@
|
||||
package dmarcdb
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/mjl-/bstore"
|
||||
|
||||
"github.com/mjl-/mox/mlog"
|
||||
"github.com/mjl-/mox/mox-"
|
||||
"github.com/mjl-/mox/moxvar"
|
||||
)
|
||||
|
||||
// Init opens the databases.
|
||||
@ -19,11 +29,49 @@ import (
|
||||
// The incoming reports and evaluations for outgoing reports are in separate
|
||||
// databases for simpler file-based handling of the databases.
|
||||
func Init() error {
|
||||
if _, err := reportsDB(mox.Shutdown); err != nil {
|
||||
return err
|
||||
if ReportsDB != nil || EvalDB != nil {
|
||||
return fmt.Errorf("already initialized")
|
||||
}
|
||||
if _, err := evalDB(mox.Shutdown); err != nil {
|
||||
return err
|
||||
|
||||
log := mlog.New("dmarcdb", nil)
|
||||
var err error
|
||||
|
||||
ReportsDB, err = openReportsDB(mox.Shutdown, log)
|
||||
if err != nil {
|
||||
return fmt.Errorf("open reports db: %v", err)
|
||||
}
|
||||
|
||||
EvalDB, err = openEvalDB(mox.Shutdown, log)
|
||||
if err != nil {
|
||||
return fmt.Errorf("open eval db: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func Close() error {
|
||||
if err := ReportsDB.Close(); err != nil {
|
||||
return fmt.Errorf("closing reports db: %w", err)
|
||||
}
|
||||
ReportsDB = nil
|
||||
|
||||
if err := EvalDB.Close(); err != nil {
|
||||
return fmt.Errorf("closing eval db: %w", err)
|
||||
}
|
||||
EvalDB = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func openReportsDB(ctx context.Context, log mlog.Log) (*bstore.DB, error) {
|
||||
p := mox.DataDirPath("dmarcrpt.db")
|
||||
os.MkdirAll(filepath.Dir(p), 0770)
|
||||
opts := bstore.Options{Timeout: 5 * time.Second, Perm: 0660, RegisterLogger: moxvar.RegisterLogger(p, log.Logger)}
|
||||
return bstore.Open(ctx, p, &opts, ReportsDBTypes...)
|
||||
}
|
||||
|
||||
func openEvalDB(ctx context.Context, log mlog.Log) (*bstore.DB, error) {
|
||||
p := mox.DataDirPath("dmarceval.db")
|
||||
os.MkdirAll(filepath.Dir(p), 0770)
|
||||
opts := bstore.Options{Timeout: 5 * time.Second, Perm: 0660, RegisterLogger: moxvar.RegisterLogger(p, log.Logger)}
|
||||
return bstore.Open(ctx, p, &opts, EvalDBTypes...)
|
||||
}
|
||||
|
110
dmarcdb/eval.go
110
dmarcdb/eval.go
@ -10,21 +10,18 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"maps"
|
||||
"mime"
|
||||
"mime/multipart"
|
||||
"net/textproto"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime/debug"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/exp/maps"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
|
||||
@ -66,8 +63,7 @@ var (
|
||||
// Exported for backups. For incoming deliveries the SMTP server adds evaluations
|
||||
// to the database. Every hour, a goroutine wakes up that gathers evaluations from
|
||||
// the last hour(s), sends a report, and removes the evaluations from the database.
|
||||
EvalDB *bstore.DB
|
||||
evalMutex sync.Mutex
|
||||
EvalDB *bstore.DB
|
||||
)
|
||||
|
||||
// Evaluation is the result of an evaluation of a DMARC policy, to be included
|
||||
@ -162,21 +158,6 @@ func (e Evaluation) ReportRecord(count int) dmarcrpt.ReportRecord {
|
||||
}
|
||||
}
|
||||
|
||||
func evalDB(ctx context.Context) (rdb *bstore.DB, rerr error) {
|
||||
evalMutex.Lock()
|
||||
defer evalMutex.Unlock()
|
||||
if EvalDB == nil {
|
||||
p := mox.DataDirPath("dmarceval.db")
|
||||
os.MkdirAll(filepath.Dir(p), 0770)
|
||||
db, err := bstore.Open(ctx, p, &bstore.Options{Timeout: 5 * time.Second, Perm: 0660}, EvalDBTypes...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
EvalDB = db
|
||||
}
|
||||
return EvalDB, nil
|
||||
}
|
||||
|
||||
var intervalOpts = []int{24, 12, 8, 6, 4, 3, 2}
|
||||
|
||||
func intervalHours(seconds int) int {
|
||||
@ -197,23 +178,13 @@ func intervalHours(seconds int) int {
|
||||
func AddEvaluation(ctx context.Context, aggregateReportingIntervalSeconds int, e *Evaluation) error {
|
||||
e.IntervalHours = intervalHours(aggregateReportingIntervalSeconds)
|
||||
|
||||
db, err := evalDB(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
e.ID = 0
|
||||
return db.Insert(ctx, e)
|
||||
return EvalDB.Insert(ctx, e)
|
||||
}
|
||||
|
||||
// Evaluations returns all evaluations in the database.
|
||||
func Evaluations(ctx context.Context) ([]Evaluation, error) {
|
||||
db, err := evalDB(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
q := bstore.QueryDB[Evaluation](ctx, db)
|
||||
q := bstore.QueryDB[Evaluation](ctx, EvalDB)
|
||||
q.SortAsc("Evaluated")
|
||||
return q.List()
|
||||
}
|
||||
@ -229,14 +200,9 @@ type EvaluationStat struct {
|
||||
|
||||
// EvaluationStats returns evaluation counts and report-sending status per domain.
|
||||
func EvaluationStats(ctx context.Context) (map[string]EvaluationStat, error) {
|
||||
db, err := evalDB(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
r := map[string]EvaluationStat{}
|
||||
|
||||
err = bstore.QueryDB[Evaluation](ctx, db).ForEach(func(e Evaluation) error {
|
||||
err := bstore.QueryDB[Evaluation](ctx, EvalDB).ForEach(func(e Evaluation) error {
|
||||
if stat, ok := r[e.PolicyDomain]; ok {
|
||||
if !slices.Contains(stat.Dispositions, string(e.Disposition)) {
|
||||
stat.Dispositions = append(stat.Dispositions, string(e.Disposition))
|
||||
@ -263,12 +229,7 @@ func EvaluationStats(ctx context.Context) (map[string]EvaluationStat, error) {
|
||||
|
||||
// EvaluationsDomain returns all evaluations for a domain.
|
||||
func EvaluationsDomain(ctx context.Context, domain dns.Domain) ([]Evaluation, error) {
|
||||
db, err := evalDB(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
q := bstore.QueryDB[Evaluation](ctx, db)
|
||||
q := bstore.QueryDB[Evaluation](ctx, EvalDB)
|
||||
q.FilterNonzero(Evaluation{PolicyDomain: domain.Name()})
|
||||
q.SortAsc("Evaluated")
|
||||
return q.List()
|
||||
@ -277,14 +238,9 @@ func EvaluationsDomain(ctx context.Context, domain dns.Domain) ([]Evaluation, er
|
||||
// RemoveEvaluationsDomain removes evaluations for domain so they won't be sent in
|
||||
// an aggregate report.
|
||||
func RemoveEvaluationsDomain(ctx context.Context, domain dns.Domain) error {
|
||||
db, err := evalDB(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
q := bstore.QueryDB[Evaluation](ctx, db)
|
||||
q := bstore.QueryDB[Evaluation](ctx, EvalDB)
|
||||
q.FilterNonzero(Evaluation{PolicyDomain: domain.Name()})
|
||||
_, err = q.Delete()
|
||||
_, err := q.Delete()
|
||||
return err
|
||||
}
|
||||
|
||||
@ -294,7 +250,7 @@ var jitterRand = mox.NewPseudoRand()
|
||||
// Jitter so we don't cause load at exactly whole hours, other processes may
|
||||
// already be doing that.
|
||||
var jitteredTimeUntil = func(t time.Time) time.Duration {
|
||||
return time.Until(t.Add(time.Duration(30+jitterRand.Intn(60)) * time.Second))
|
||||
return time.Until(t.Add(time.Duration(30+jitterRand.IntN(60)) * time.Second))
|
||||
}
|
||||
|
||||
// Start launches a goroutine that wakes up at each whole hour (plus jitter) and
|
||||
@ -318,12 +274,6 @@ func Start(resolver dns.Resolver) {
|
||||
|
||||
ctx := mox.Shutdown
|
||||
|
||||
db, err := evalDB(ctx)
|
||||
if err != nil {
|
||||
log.Errorx("opening dmarc evaluations database for sending dmarc aggregate reports, not sending reports", err)
|
||||
return
|
||||
}
|
||||
|
||||
for {
|
||||
now := time.Now()
|
||||
nextEnd := nextWholeHour(now)
|
||||
@ -355,12 +305,12 @@ func Start(resolver dns.Resolver) {
|
||||
// 24 hour interval). They should have been processed by now. We may have kept them
|
||||
// during temporary errors, but persistent temporary errors shouldn't fill up our
|
||||
// database. This also cleans up evaluations that were all optional for a domain.
|
||||
_, err := bstore.QueryDB[Evaluation](ctx, db).FilterLess("Evaluated", nextEnd.Add(-48*time.Hour)).Delete()
|
||||
_, err := bstore.QueryDB[Evaluation](ctx, EvalDB).FilterLess("Evaluated", nextEnd.Add(-48*time.Hour)).Delete()
|
||||
log.Check(err, "removing stale dmarc evaluations from database")
|
||||
|
||||
clog := log.WithCid(mox.Cid())
|
||||
clog.Info("sending dmarc aggregate reports", slog.Time("end", nextEnd.UTC()), slog.Any("intervals", intervals))
|
||||
if err := sendReports(ctx, clog, resolver, db, nextEnd, intervals); err != nil {
|
||||
if err := sendReports(ctx, clog, resolver, EvalDB, nextEnd, intervals); err != nil {
|
||||
clog.Errorx("sending dmarc aggregate reports", err)
|
||||
metricReportError.Inc()
|
||||
} else {
|
||||
@ -737,9 +687,7 @@ func sendReportDomain(ctx context.Context, log mlog.Log, resolver dns.Resolver,
|
||||
report.PolicyPublished = last.PolicyPublished
|
||||
|
||||
// Process records in-order for testable results.
|
||||
recstrs := maps.Keys(counts)
|
||||
sort.Strings(recstrs)
|
||||
for _, recstr := range recstrs {
|
||||
for _, recstr := range slices.Sorted(maps.Keys(counts)) {
|
||||
rc := counts[recstr]
|
||||
rc.ReportRecord.Row.Count = rc.count
|
||||
report.Records = append(report.Records, rc.ReportRecord)
|
||||
@ -779,7 +727,7 @@ func sendReportDomain(ctx context.Context, log mlog.Log, resolver dns.Resolver,
|
||||
// DKIM keys, so we can DKIM-sign our reports. SPF should pass anyway.
|
||||
// A single report can contain deliveries from a single policy domain
|
||||
// to multiple of our configured domains.
|
||||
from := smtp.Address{Localpart: "postmaster", Domain: mox.Conf.Static.HostnameDomain}
|
||||
from := smtp.NewAddress("postmaster", mox.Conf.Static.HostnameDomain)
|
||||
|
||||
// Subject follows the form in RFC. ../rfc/7489:1871
|
||||
subject := fmt.Sprintf("Report Domain: %s Submitter: %s Report-ID: <%s>", dom.ASCII, mox.Conf.Static.HostnameDomain.ASCII, report.ReportMetadata.ReportID)
|
||||
@ -1069,7 +1017,7 @@ func dkimSign(ctx context.Context, log mlog.Log, fromAddr smtp.Address, smtputf8
|
||||
for fd != zerodom {
|
||||
confDom, ok := mox.Conf.Domain(fd)
|
||||
selectors := mox.DKIMSelectors(confDom.DKIM)
|
||||
if len(selectors) > 0 {
|
||||
if len(selectors) > 0 && !confDom.Disabled {
|
||||
dkimHeaders, err := dkim.Sign(ctx, log.Logger, fromAddr.Localpart, fd, selectors, smtputf8, mf)
|
||||
if err != nil {
|
||||
log.Errorx("dkim-signing dmarc report, continuing without signature", err)
|
||||
@ -1091,46 +1039,26 @@ func dkimSign(ctx context.Context, log mlog.Log, fromAddr smtp.Address, smtputf8
|
||||
|
||||
// SuppressAdd adds an address to the suppress list.
|
||||
func SuppressAdd(ctx context.Context, ba *SuppressAddress) error {
|
||||
db, err := evalDB(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return db.Insert(ctx, ba)
|
||||
return EvalDB.Insert(ctx, ba)
|
||||
}
|
||||
|
||||
// SuppressList returns all reporting addresses on the suppress list.
|
||||
func SuppressList(ctx context.Context) ([]SuppressAddress, error) {
|
||||
db, err := evalDB(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return bstore.QueryDB[SuppressAddress](ctx, db).SortDesc("ID").List()
|
||||
return bstore.QueryDB[SuppressAddress](ctx, EvalDB).SortDesc("ID").List()
|
||||
}
|
||||
|
||||
// SuppressRemove removes a reporting address record from the suppress list.
|
||||
func SuppressRemove(ctx context.Context, id int64) error {
|
||||
db, err := evalDB(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return db.Delete(ctx, &SuppressAddress{ID: id})
|
||||
return EvalDB.Delete(ctx, &SuppressAddress{ID: id})
|
||||
}
|
||||
|
||||
// SuppressUpdate updates the until field of a reporting address record.
|
||||
func SuppressUpdate(ctx context.Context, id int64, until time.Time) error {
|
||||
db, err := evalDB(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ba := SuppressAddress{ID: id}
|
||||
err = db.Get(ctx, &ba)
|
||||
err := EvalDB.Get(ctx, &ba)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ba.Until = until
|
||||
return db.Update(ctx, &ba)
|
||||
return EvalDB.Update(ctx, &ba)
|
||||
}
|
||||
|
@ -6,7 +6,6 @@ import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
@ -20,6 +19,7 @@ import (
|
||||
"github.com/mjl-/mox/mox-"
|
||||
"github.com/mjl-/mox/moxio"
|
||||
"github.com/mjl-/mox/queue"
|
||||
"slices"
|
||||
)
|
||||
|
||||
func tcheckf(t *testing.T, err error, format string, args ...any) {
|
||||
@ -41,13 +41,13 @@ func TestEvaluations(t *testing.T) {
|
||||
mox.Context = ctxbg
|
||||
mox.ConfigStaticPath = filepath.FromSlash("../testdata/dmarcdb/mox.conf")
|
||||
mox.MustLoadConfig(true, false)
|
||||
EvalDB = nil
|
||||
|
||||
_, err := evalDB(ctxbg)
|
||||
tcheckf(t, err, "database")
|
||||
os.Remove(mox.DataDirPath("dmarceval.db"))
|
||||
err := Init()
|
||||
tcheckf(t, err, "init")
|
||||
defer func() {
|
||||
EvalDB.Close()
|
||||
EvalDB = nil
|
||||
err := Close()
|
||||
tcheckf(t, err, "close")
|
||||
}()
|
||||
|
||||
parseJSON := func(s string) (e Evaluation) {
|
||||
@ -157,19 +157,17 @@ func TestEvaluations(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSendReports(t *testing.T) {
|
||||
mlog.SetConfig(map[string]slog.Level{"": slog.LevelDebug})
|
||||
|
||||
os.RemoveAll("../testdata/dmarcdb/data")
|
||||
mox.Context = ctxbg
|
||||
mox.ConfigStaticPath = filepath.FromSlash("../testdata/dmarcdb/mox.conf")
|
||||
mox.MustLoadConfig(true, false)
|
||||
EvalDB = nil
|
||||
|
||||
db, err := evalDB(ctxbg)
|
||||
tcheckf(t, err, "database")
|
||||
os.Remove(mox.DataDirPath("dmarceval.db"))
|
||||
err := Init()
|
||||
tcheckf(t, err, "init")
|
||||
defer func() {
|
||||
EvalDB.Close()
|
||||
EvalDB = nil
|
||||
err := Close()
|
||||
tcheckf(t, err, "close")
|
||||
}()
|
||||
|
||||
resolver := dns.MockResolver{
|
||||
@ -288,7 +286,7 @@ func TestSendReports(t *testing.T) {
|
||||
mox.Shutdown, mox.ShutdownCancel = context.WithCancel(ctxbg)
|
||||
|
||||
for _, e := range evals {
|
||||
err := db.Insert(ctxbg, &e)
|
||||
err := EvalDB.Insert(ctxbg, &e)
|
||||
tcheckf(t, err, "inserting evaluation")
|
||||
}
|
||||
|
||||
@ -304,7 +302,7 @@ func TestSendReports(t *testing.T) {
|
||||
// Read message file. Also write copy to disk for inspection.
|
||||
buf, err := io.ReadAll(&moxio.AtReader{R: msgFile})
|
||||
tcheckf(t, err, "read report message")
|
||||
err = os.WriteFile("../testdata/dmarcdb/data/report.eml", append(append([]byte{}, qm.MsgPrefix...), buf...), 0600)
|
||||
err = os.WriteFile("../testdata/dmarcdb/data/report.eml", slices.Concat(qm.MsgPrefix, buf), 0600)
|
||||
tcheckf(t, err, "write report message")
|
||||
|
||||
var feedback *dmarcrpt.Feedback
|
||||
@ -359,13 +357,13 @@ func TestSendReports(t *testing.T) {
|
||||
|
||||
// Address is suppressed.
|
||||
sa := SuppressAddress{ReportingAddress: "dmarcrpt@sender.example", Until: time.Now().Add(time.Minute)}
|
||||
err = db.Insert(ctxbg, &sa)
|
||||
err = EvalDB.Insert(ctxbg, &sa)
|
||||
tcheckf(t, err, "insert suppress address")
|
||||
test([]Evaluation{eval}, map[string]struct{}{}, map[string]struct{}{}, nil)
|
||||
|
||||
// Suppression has expired.
|
||||
sa.Until = time.Now().Add(-time.Minute)
|
||||
err = db.Update(ctxbg, &sa)
|
||||
err = EvalDB.Update(ctxbg, &sa)
|
||||
tcheckf(t, err, "update suppress address")
|
||||
test([]Evaluation{eval}, map[string]struct{}{"dmarcrpt@sender.example": {}}, map[string]struct{}{}, expFeedback)
|
||||
|
||||
|
17
dmarcdb/main_test.go
Normal file
17
dmarcdb/main_test.go
Normal file
@ -0,0 +1,17 @@
|
||||
package dmarcdb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/mjl-/mox/metrics"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
m.Run()
|
||||
if metrics.Panics.Load() > 0 {
|
||||
fmt.Println("unhandled panics encountered")
|
||||
os.Exit(2)
|
||||
}
|
||||
}
|
@ -3,9 +3,6 @@ package dmarcdb
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
@ -15,13 +12,11 @@ import (
|
||||
|
||||
"github.com/mjl-/mox/dmarcrpt"
|
||||
"github.com/mjl-/mox/dns"
|
||||
"github.com/mjl-/mox/mox-"
|
||||
)
|
||||
|
||||
var (
|
||||
ReportsDBTypes = []any{DomainFeedback{}} // Types stored in DB.
|
||||
ReportsDB *bstore.DB // Exported for backups.
|
||||
reportsMutex sync.Mutex
|
||||
)
|
||||
|
||||
var (
|
||||
@ -59,38 +54,18 @@ type DomainFeedback struct {
|
||||
dmarcrpt.Feedback
|
||||
}
|
||||
|
||||
func reportsDB(ctx context.Context) (rdb *bstore.DB, rerr error) {
|
||||
reportsMutex.Lock()
|
||||
defer reportsMutex.Unlock()
|
||||
if ReportsDB == nil {
|
||||
p := mox.DataDirPath("dmarcrpt.db")
|
||||
os.MkdirAll(filepath.Dir(p), 0770)
|
||||
db, err := bstore.Open(ctx, p, &bstore.Options{Timeout: 5 * time.Second, Perm: 0660}, ReportsDBTypes...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ReportsDB = db
|
||||
}
|
||||
return ReportsDB, nil
|
||||
}
|
||||
|
||||
// AddReport adds a DMARC aggregate feedback report from an email to the database,
|
||||
// and updates prometheus metrics.
|
||||
//
|
||||
// fromDomain is the domain in the report message From header.
|
||||
func AddReport(ctx context.Context, f *dmarcrpt.Feedback, fromDomain dns.Domain) error {
|
||||
db, err := reportsDB(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d, err := dns.ParseDomain(f.PolicyPublished.Domain)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing domain in report: %v", err)
|
||||
}
|
||||
|
||||
df := DomainFeedback{0, d.Name(), fromDomain.Name(), *f}
|
||||
if err := db.Insert(ctx, &df); err != nil {
|
||||
if err := ReportsDB.Insert(ctx, &df); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -129,38 +104,23 @@ func AddReport(ctx context.Context, f *dmarcrpt.Feedback, fromDomain dns.Domain)
|
||||
|
||||
// Records returns all reports in the database.
|
||||
func Records(ctx context.Context) ([]DomainFeedback, error) {
|
||||
db, err := reportsDB(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return bstore.QueryDB[DomainFeedback](ctx, db).List()
|
||||
return bstore.QueryDB[DomainFeedback](ctx, ReportsDB).List()
|
||||
}
|
||||
|
||||
// RecordID returns the report for the ID.
|
||||
func RecordID(ctx context.Context, id int64) (DomainFeedback, error) {
|
||||
db, err := reportsDB(ctx)
|
||||
if err != nil {
|
||||
return DomainFeedback{}, err
|
||||
}
|
||||
|
||||
e := DomainFeedback{ID: id}
|
||||
err = db.Get(ctx, &e)
|
||||
err := ReportsDB.Get(ctx, &e)
|
||||
return e, err
|
||||
}
|
||||
|
||||
// RecordsPeriodDomain returns the reports overlapping start and end, for the given
|
||||
// domain. If domain is empty, all records match for domain.
|
||||
func RecordsPeriodDomain(ctx context.Context, start, end time.Time, domain string) ([]DomainFeedback, error) {
|
||||
db, err := reportsDB(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s := start.Unix()
|
||||
e := end.Unix()
|
||||
|
||||
q := bstore.QueryDB[DomainFeedback](ctx, db)
|
||||
q := bstore.QueryDB[DomainFeedback](ctx, ReportsDB)
|
||||
if domain != "" {
|
||||
q.FilterNonzero(DomainFeedback{Domain: domain})
|
||||
}
|
||||
|
@ -20,16 +20,12 @@ func TestDMARCDB(t *testing.T) {
|
||||
mox.ConfigStaticPath = filepath.FromSlash("../testdata/dmarcdb/mox.conf")
|
||||
mox.MustLoadConfig(true, false)
|
||||
|
||||
dbpath := mox.DataDirPath("dmarcrpt.db")
|
||||
os.MkdirAll(filepath.Dir(dbpath), 0770)
|
||||
|
||||
if err := Init(); err != nil {
|
||||
t.Fatalf("init database: %s", err)
|
||||
}
|
||||
defer os.Remove(dbpath)
|
||||
os.Remove(mox.DataDirPath("dmarcrpt.db"))
|
||||
err := Init()
|
||||
tcheckf(t, err, "init")
|
||||
defer func() {
|
||||
ReportsDB.Close()
|
||||
ReportsDB = nil
|
||||
err := Close()
|
||||
tcheckf(t, err, "close")
|
||||
}()
|
||||
|
||||
feedback := &dmarcrpt.Feedback{
|
||||
|
@ -52,7 +52,7 @@ func parseMessageReport(log mlog.Log, p message.Part) (*Feedback, error) {
|
||||
// content of the message.
|
||||
|
||||
if p.MediaType != "MULTIPART" {
|
||||
return parseReport(p)
|
||||
return parseReport(log, p)
|
||||
}
|
||||
|
||||
for {
|
||||
@ -72,7 +72,7 @@ func parseMessageReport(log mlog.Log, p message.Part) (*Feedback, error) {
|
||||
}
|
||||
}
|
||||
|
||||
func parseReport(p message.Part) (*Feedback, error) {
|
||||
func parseReport(log mlog.Log, p message.Part) (*Feedback, error) {
|
||||
ct := strings.ToLower(p.MediaType + "/" + p.MediaSubType)
|
||||
r := p.Reader()
|
||||
|
||||
@ -93,7 +93,7 @@ func parseReport(p message.Part) (*Feedback, error) {
|
||||
switch ct {
|
||||
case "application/zip":
|
||||
// Google sends messages with direct application/zip content-type.
|
||||
return parseZip(r)
|
||||
return parseZip(log, r)
|
||||
case "application/gzip", "application/x-gzip":
|
||||
gzr, err := gzip.NewReader(r)
|
||||
if err != nil {
|
||||
@ -106,7 +106,7 @@ func parseReport(p message.Part) (*Feedback, error) {
|
||||
return nil, ErrNoReport
|
||||
}
|
||||
|
||||
func parseZip(r io.Reader) (*Feedback, error) {
|
||||
func parseZip(log mlog.Log, r io.Reader) (*Feedback, error) {
|
||||
buf, err := io.ReadAll(r)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading feedback: %s", err)
|
||||
@ -122,6 +122,9 @@ func parseZip(r io.Reader) (*Feedback, error) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("opening file in zip: %s", err)
|
||||
}
|
||||
defer f.Close()
|
||||
defer func() {
|
||||
err := f.Close()
|
||||
log.Check(err, "closing report file in zip file")
|
||||
}()
|
||||
return ParseReport(f)
|
||||
}
|
||||
|
17
dns/dns.go
17
dns/dns.go
@ -5,6 +5,7 @@ package dns
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/net/idna"
|
||||
@ -19,6 +20,7 @@ var (
|
||||
errTrailingDot = errors.New("dns name has trailing dot")
|
||||
errUnderscore = errors.New("domain name with underscore")
|
||||
errIDNA = errors.New("idna")
|
||||
errIPNotName = errors.New("ip address while name required")
|
||||
)
|
||||
|
||||
// Domain is a domain name, with one or more labels, with at least an ASCII
|
||||
@ -95,6 +97,12 @@ func ParseDomain(s string) (Domain, error) {
|
||||
return Domain{}, errTrailingDot
|
||||
}
|
||||
|
||||
// IPv4 addresses would be accepted by idna lookups. TLDs cannot be all numerical,
|
||||
// so IP addresses are not valid DNS names.
|
||||
if net.ParseIP(s) != nil {
|
||||
return Domain{}, errIPNotName
|
||||
}
|
||||
|
||||
ascii, err := idna.Lookup.ToASCII(s)
|
||||
if err != nil {
|
||||
return Domain{}, fmt.Errorf("%w: to ascii: %v", errIDNA, err)
|
||||
@ -148,7 +156,9 @@ func ParseDomainLax(s string) (Domain, error) {
|
||||
return Domain{ASCII: s}, nil
|
||||
}
|
||||
|
||||
// IsNotFound returns whether an error is an adns.DNSError with IsNotFound set.
|
||||
// IsNotFound returns whether an error is an adns.DNSError or net.DNSError with
|
||||
// IsNotFound set.
|
||||
//
|
||||
// IsNotFound means the requested type does not exist for the given domain (a
|
||||
// nodata or nxdomain response). It doesn't not necessarily mean no other types for
|
||||
// that name exist.
|
||||
@ -158,6 +168,7 @@ func ParseDomainLax(s string) (Domain, error) {
|
||||
// The adns resolver (just like the Go resolver) returns an IsNotFound error for
|
||||
// both cases, there is no need to explicitly check for zero entries.
|
||||
func IsNotFound(err error) bool {
|
||||
var dnsErr *adns.DNSError
|
||||
return err != nil && errors.As(err, &dnsErr) && dnsErr.IsNotFound
|
||||
var adnsErr *adns.DNSError
|
||||
var dnsErr *net.DNSError
|
||||
return err != nil && (errors.As(err, &adnsErr) && adnsErr.IsNotFound || errors.As(err, &dnsErr) && dnsErr.IsNotFound)
|
||||
}
|
||||
|
@ -10,7 +10,7 @@
|
||||
// looked up with an DNS "A" lookup of a name similar to an IPv4 address, but with
|
||||
// 4-bit hexadecimal dot-separated characters, in reverse.
|
||||
//
|
||||
// The health of a DNSBL "zone" can be check through a lookup of 127.0.0.1
|
||||
// The health of a DNSBL "zone" can be checked through a lookup of 127.0.0.1
|
||||
// (must not be present) and 127.0.0.2 (must be present).
|
||||
package dnsbl
|
||||
|
||||
|
280
doc.go
280
doc.go
@ -55,19 +55,30 @@ any parameters. Followed by the help and usage information for each command.
|
||||
mox export mbox [-single] dst-dir account-path [mailbox]
|
||||
mox localserve
|
||||
mox help [command ...]
|
||||
mox backup dest-dir
|
||||
mox backup destdir
|
||||
mox verifydata data-dir
|
||||
mox licenses
|
||||
mox config test
|
||||
mox config dnscheck domain
|
||||
mox config dnsrecords domain
|
||||
mox config describe-domains >domains.conf
|
||||
mox config describe-static >mox.conf
|
||||
mox config account list
|
||||
mox config account add account address
|
||||
mox config account rm account
|
||||
mox config account disable account message
|
||||
mox config account enable account
|
||||
mox config address add address account
|
||||
mox config address rm address
|
||||
mox config domain add domain account [localpart]
|
||||
mox config domain add [-disabled] domain account [localpart]
|
||||
mox config domain rm domain
|
||||
mox config domain disable domain
|
||||
mox config domain enable domain
|
||||
mox config tlspubkey list [account]
|
||||
mox config tlspubkey get fingerprint
|
||||
mox config tlspubkey add address [name] < cert.pem
|
||||
mox config tlspubkey rm fingerprint
|
||||
mox config tlspubkey gen stem
|
||||
mox config alias list domain
|
||||
mox config alias print alias
|
||||
mox config alias add alias@domain rcpt1@domain ...
|
||||
@ -79,6 +90,7 @@ any parameters. Followed by the help and usage information for each command.
|
||||
mox config printservice >mox.service
|
||||
mox config ensureacmehostprivatekeys
|
||||
mox config example [name]
|
||||
mox admin imapserve preauth-address
|
||||
mox checkupdate
|
||||
mox cid cid
|
||||
mox clientconfig domain
|
||||
@ -99,8 +111,10 @@ any parameters. Followed by the help and usage information for each command.
|
||||
mox dnsbl check zone ip
|
||||
mox dnsbl checkhealth zone
|
||||
mox mtasts lookup domain
|
||||
mox retrain accountname
|
||||
mox rdap domainage domain
|
||||
mox retrain [accountname]
|
||||
mox sendmail [-Fname] [ignoredflags] [-t] [<message]
|
||||
mox smtp dial host[:port]
|
||||
mox spf check domain ip
|
||||
mox spf lookup domain
|
||||
mox spf parse txtrecord
|
||||
@ -140,6 +154,8 @@ Quickstart writes configuration files, prints initial admin and account
|
||||
passwords, DNS records you should create. If you run it on Linux it writes a
|
||||
systemd service file and prints commands to enable and start mox as service.
|
||||
|
||||
All output is written to quickstart.log for later reference.
|
||||
|
||||
The user or uid is optional, defaults to "mox", and is the user or uid/gid mox
|
||||
will run as after initialization.
|
||||
|
||||
@ -173,7 +189,7 @@ output of "mox config describe-domains" and see the output of
|
||||
-hostname string
|
||||
hostname mox will run on, by default the hostname of the machine quickstart runs on; if specified, the IPs for the hostname are configured for the public listener
|
||||
-skipdial
|
||||
skip check for outgoing smtp (port 25) connectivity
|
||||
skip check for outgoing smtp (port 25) connectivity or for domain age with rdap
|
||||
|
||||
# mox stop
|
||||
|
||||
@ -695,6 +711,9 @@ recipients to be accepted, unless other reputation signals prevent that.
|
||||
Users can also import mailboxes/messages through the account web page by
|
||||
uploading a zip or tgz file with mbox and/or maildirs.
|
||||
|
||||
Messages are imported even if already present. Importing messages twice will
|
||||
result in duplicate messages.
|
||||
|
||||
Mailbox flags, like "seen", "answered", will be imported. An optional
|
||||
dovecot-keywords file can specify additional flags, like Forwarded/Junk/NotJunk.
|
||||
|
||||
@ -722,6 +741,9 @@ recipients to be accepted, unless other reputation signals prevent that.
|
||||
Users can also import mailboxes/messages through the account web page by
|
||||
uploading a zip or tgz file with mbox and/or maildirs.
|
||||
|
||||
Messages are imported even if already present. Importing messages twice will
|
||||
result in duplicate messages.
|
||||
|
||||
usage: mox import mbox accountname mailboxname mbox
|
||||
|
||||
# mox export maildir
|
||||
@ -805,13 +827,14 @@ If a single command matches, its usage and full help text is printed.
|
||||
|
||||
# mox backup
|
||||
|
||||
Creates a backup of the data directory.
|
||||
Creates a backup of the config and data directory.
|
||||
|
||||
Backup creates consistent snapshots of the databases and message files and
|
||||
copies other files in the data directory. Empty directories are not copied.
|
||||
These files can then be stored elsewhere for long-term storage, or used to fall
|
||||
back to should an upgrade fail. Simply copying files in the data directory
|
||||
while mox is running can result in unusable database files.
|
||||
Backup copies the config directory to <destdir>/config, and creates
|
||||
<destdir>/data with a consistent snapshot of the databases and message files
|
||||
and copies other files from the data directory. Empty directories are not
|
||||
copied. The backup can then be stored elsewhere for long-term storage, or used
|
||||
to fall back to should an upgrade fail. Simply copying files in the data
|
||||
directory while mox is running can result in unusable database files.
|
||||
|
||||
Message files never change (they are read-only, though can be removed) and are
|
||||
hard-linked so they don't consume additional space. If hardlinking fails, for
|
||||
@ -825,22 +848,27 @@ All files in the data directory that aren't recognized (i.e. other than known
|
||||
database files, message files, an acme directory, the "tmp" directory, etc),
|
||||
are stored, but with a warning.
|
||||
|
||||
A clean successful backup does not print any output by default. Use the
|
||||
-verbose flag for details, including timing.
|
||||
Remove files in the destination directory before doing another backup. The
|
||||
backup command will not overwrite files, but print and return errors.
|
||||
|
||||
Exit code 0 indicates the backup was successful. A clean successful backup does
|
||||
not print any output, but may print warnings. Use the -verbose flag for
|
||||
details, including timing.
|
||||
|
||||
To restore a backup, first shut down mox, move away the old data directory and
|
||||
move an earlier backed up directory in its place, run "mox verifydata",
|
||||
possibly with the "-fix" option, and restart mox. After the restore, you may
|
||||
also want to run "mox bumpuidvalidity" for each account for which messages in a
|
||||
mailbox changed, to force IMAP clients to synchronize mailbox state.
|
||||
move an earlier backed up directory in its place, run "mox verifydata
|
||||
<datadir>", possibly with the "-fix" option, and restart mox. After the
|
||||
restore, you may also want to run "mox bumpuidvalidity" for each account for
|
||||
which messages in a mailbox changed, to force IMAP clients to synchronize
|
||||
mailbox state.
|
||||
|
||||
Before upgrading, to check if the upgrade will likely succeed, first make a
|
||||
backup, then use the new mox binary to run "mox verifydata" on the backup. This
|
||||
can change the backup files (e.g. upgrade database files, move away
|
||||
backup, then use the new mox binary to run "mox verifydata <backupdir>/data".
|
||||
This can change the backup files (e.g. upgrade database files, move away
|
||||
unrecognized message files), so you should make a new backup before actually
|
||||
upgrading.
|
||||
|
||||
usage: mox backup dest-dir
|
||||
usage: mox backup destdir
|
||||
-verbose
|
||||
print progress
|
||||
|
||||
@ -870,6 +898,12 @@ possibly making them potentially no longer readable by the previous version.
|
||||
-skip-size-check
|
||||
skip the check for message size
|
||||
|
||||
# mox licenses
|
||||
|
||||
Print licenses of mox source code and dependencies.
|
||||
|
||||
usage: mox licenses
|
||||
|
||||
# mox config test
|
||||
|
||||
Parses and validates the configuration files.
|
||||
@ -921,6 +955,15 @@ may contain unfinished list items.
|
||||
|
||||
usage: mox config describe-static >mox.conf
|
||||
|
||||
# mox config account list
|
||||
|
||||
List all accounts.
|
||||
|
||||
Each account is printed on a line, with optional additional tab-separated
|
||||
information, such as "(disabled)".
|
||||
|
||||
usage: mox config account list
|
||||
|
||||
# mox config account add
|
||||
|
||||
Add an account with an email address and reload the configuration.
|
||||
@ -937,8 +980,30 @@ Remove an account and reload the configuration.
|
||||
Email addresses for this account will also be removed, and incoming email for
|
||||
these addresses will be rejected.
|
||||
|
||||
All data for the account will be removed.
|
||||
|
||||
usage: mox config account rm account
|
||||
|
||||
# mox config account disable
|
||||
|
||||
Disable login for an account, showing message to users when they try to login.
|
||||
|
||||
Incoming email will still be accepted for the account, and queued email from the
|
||||
account will still be delivered. No new login sessions are possible.
|
||||
|
||||
Message must be non-empty, ascii-only without control characters including
|
||||
newline, and maximum 256 characters because it is used in SMTP/IMAP.
|
||||
|
||||
usage: mox config account disable account message
|
||||
|
||||
# mox config account enable
|
||||
|
||||
Enable login again for an account.
|
||||
|
||||
Login attempts by the user no long result in an error message.
|
||||
|
||||
usage: mox config account enable account
|
||||
|
||||
# mox config address add
|
||||
|
||||
Adds an address to an account and reloads the configuration.
|
||||
@ -964,7 +1029,13 @@ The account is used for the postmaster mailboxes the domain, including as DMARC
|
||||
TLS reporting. Localpart is the "username" at the domain for this account. If
|
||||
must be set if and only if account does not yet exist.
|
||||
|
||||
usage: mox config domain add domain account [localpart]
|
||||
The domain can be created in disabled mode, preventing automatically requesting
|
||||
TLS certificates with ACME, and rejecting incoming/outgoing messages involving
|
||||
the domain, but allowing further configuration of the domain.
|
||||
|
||||
usage: mox config domain add [-disabled] domain account [localpart]
|
||||
-disabled
|
||||
disable the new domain
|
||||
|
||||
# mox config domain rm
|
||||
|
||||
@ -975,27 +1046,103 @@ rejected.
|
||||
|
||||
usage: mox config domain rm domain
|
||||
|
||||
# mox config domain disable
|
||||
|
||||
Disable a domain and reload the configuration.
|
||||
|
||||
This is a dangerous operation. Incoming/outgoing messages involving this domain
|
||||
will be rejected.
|
||||
|
||||
usage: mox config domain disable domain
|
||||
|
||||
# mox config domain enable
|
||||
|
||||
Enable a domain and reload the configuration.
|
||||
|
||||
Incoming/outgoing messages involving this domain will be accepted again.
|
||||
|
||||
usage: mox config domain enable domain
|
||||
|
||||
# mox config tlspubkey list
|
||||
|
||||
List TLS public keys for TLS client certificate authentication.
|
||||
|
||||
If account is absent, the TLS public keys for all accounts are listed.
|
||||
|
||||
usage: mox config tlspubkey list [account]
|
||||
|
||||
# mox config tlspubkey get
|
||||
|
||||
Get a TLS public key for a fingerprint.
|
||||
|
||||
Prints the type, name, account and address for the key, and the certificate in
|
||||
PEM format.
|
||||
|
||||
usage: mox config tlspubkey get fingerprint
|
||||
|
||||
# mox config tlspubkey add
|
||||
|
||||
Add a TLS public key to the account of the given address.
|
||||
|
||||
The public key is read from the certificate.
|
||||
|
||||
The optional name is a human-readable descriptive name of the key. If absent,
|
||||
the CommonName from the certificate is used.
|
||||
|
||||
usage: mox config tlspubkey add address [name] < cert.pem
|
||||
-no-imap-preauth
|
||||
Don't automatically switch new IMAP connections authenticated with this key to "authenticated" state after the TLS handshake. For working around clients that ignore the untagged IMAP PREAUTH response and try to authenticate while already authenticated.
|
||||
|
||||
# mox config tlspubkey rm
|
||||
|
||||
Remove TLS public key for fingerprint.
|
||||
|
||||
usage: mox config tlspubkey rm fingerprint
|
||||
|
||||
# mox config tlspubkey gen
|
||||
|
||||
Generate an ed25519 private key and minimal certificate for use a TLS public key and write to files starting with stem.
|
||||
|
||||
The private key is written to $stem.$timestamp.ed25519privatekey.pkcs8.pem.
|
||||
The certificate is written to $stem.$timestamp.certificate.pem.
|
||||
The private key and certificate are also written to
|
||||
$stem.$timestamp.ed25519privatekey-certificate.pem.
|
||||
|
||||
The certificate can be added to an account with "mox config account tlspubkey add".
|
||||
|
||||
The combined file can be used with "mox sendmail".
|
||||
|
||||
The private key is also written to standard error in raw-url-base64-encoded
|
||||
form, also for use with "mox sendmail". The fingerprint is written to standard
|
||||
error too, for reference.
|
||||
|
||||
usage: mox config tlspubkey gen stem
|
||||
|
||||
# mox config alias list
|
||||
|
||||
List aliases for domain.
|
||||
Show aliases (lists) for domain.
|
||||
|
||||
usage: mox config alias list domain
|
||||
|
||||
# mox config alias print
|
||||
|
||||
Print settings and members of alias.
|
||||
Print settings and members of alias (list).
|
||||
|
||||
usage: mox config alias print alias
|
||||
|
||||
# mox config alias add
|
||||
|
||||
Add new alias with one or more addresses.
|
||||
Add new alias (list) with one or more addresses and public posting enabled.
|
||||
|
||||
An alias is used for delivering incoming email to multiple recipients. If you
|
||||
want to add an address to an account, don't use an alias, just add the address
|
||||
to the account.
|
||||
|
||||
usage: mox config alias add alias@domain rcpt1@domain ...
|
||||
|
||||
# mox config alias update
|
||||
|
||||
Update alias configuration.
|
||||
Update alias (list) configuration.
|
||||
|
||||
usage: mox config alias update alias@domain [-postpublic false|true -listmembers false|true -allowmsgfrom false|true]
|
||||
-allowmsgfrom string
|
||||
@ -1007,19 +1154,19 @@ Update alias configuration.
|
||||
|
||||
# mox config alias rm
|
||||
|
||||
Remove alias.
|
||||
Remove alias (list).
|
||||
|
||||
usage: mox config alias rm alias@domain
|
||||
|
||||
# mox config alias addaddr
|
||||
|
||||
Add addresses to alias.
|
||||
Add addresses to alias (list).
|
||||
|
||||
usage: mox config alias addaddr alias@domain rcpt1@domain ...
|
||||
|
||||
# mox config alias rmaddr
|
||||
|
||||
Remove addresses from alias.
|
||||
Remove addresses from alias (list).
|
||||
|
||||
usage: mox config alias rmaddr alias@domain rcpt1@domain ...
|
||||
|
||||
@ -1070,6 +1217,18 @@ List available config examples, or print a specific example.
|
||||
|
||||
usage: mox config example [name]
|
||||
|
||||
# mox admin imapserve
|
||||
|
||||
Initiate a preauthenticated IMAP connection on file descriptor 0.
|
||||
|
||||
For use with tools that can do IMAP over tunneled connections, e.g. with SSH
|
||||
during migrations. TLS is not possible on the connection, and authentication
|
||||
does not require TLS.
|
||||
|
||||
usage: mox admin imapserve preauth-address
|
||||
-fd0
|
||||
write IMAP to file descriptor 0 instead of stdout
|
||||
|
||||
# mox checkupdate
|
||||
|
||||
Check if a newer version of mox is available.
|
||||
@ -1309,14 +1468,32 @@ should be used, and how long the policy can be cached.
|
||||
|
||||
usage: mox mtasts lookup domain
|
||||
|
||||
# mox rdap domainage
|
||||
|
||||
Lookup the age of domain in RDAP based on latest registration.
|
||||
|
||||
RDAP is the registration data access protocol. Registries run RDAP services for
|
||||
their top level domains, providing information such as the registration date of
|
||||
domains. This command looks up the "age" of a domain by looking at the most
|
||||
recent "registration", "reregistration" or "reinstantiation" event.
|
||||
|
||||
Email messages from recently registered domains are often treated with
|
||||
suspicion, and some mail systems are more likely to classify them as junk.
|
||||
|
||||
On each invocation, a bootstrap file with a list of registries (of top-level
|
||||
domains) is retrieved, without caching. Do not run this command too often with
|
||||
automation.
|
||||
|
||||
usage: mox rdap domainage domain
|
||||
|
||||
# mox retrain
|
||||
|
||||
Recreate and retrain the junk filter for the account.
|
||||
Recreate and retrain the junk filter for the account or all accounts.
|
||||
|
||||
Useful after having made changes to the junk filter configuration, or if the
|
||||
implementation has changed.
|
||||
|
||||
usage: mox retrain accountname
|
||||
usage: mox retrain [accountname]
|
||||
|
||||
# mox sendmail
|
||||
|
||||
@ -1351,6 +1528,51 @@ binary should be setgid that group:
|
||||
|
||||
usage: mox sendmail [-Fname] [ignoredflags] [-t] [<message]
|
||||
|
||||
# mox smtp dial
|
||||
|
||||
Dial the address, initialize the SMTP session, including using STARTTLS to enable TLS if the server supports it.
|
||||
|
||||
If no port is specified, SMTP port 25 is used.
|
||||
|
||||
Data is copied between connection and stdin/stdout until either side closes the
|
||||
connection.
|
||||
|
||||
The flags influence the TLS configuration, useful for debugging interoperability
|
||||
issues.
|
||||
|
||||
No MTA-STS or DANE verification is done.
|
||||
|
||||
Hint: Use "mox -loglevel trace smtp dial ..." to see the protocol messages
|
||||
exchanged during connection set up.
|
||||
|
||||
usage: mox smtp dial host[:port]
|
||||
-ehlohostname string
|
||||
our hostname to use during the SMTP EHLO command
|
||||
-forcetls
|
||||
use TLS, even if remote SMTP server does not announce STARTTLS extension
|
||||
-notls
|
||||
do not use TLS
|
||||
-remotehostname string
|
||||
remote hostname to use for TLS verification, if enabled; the hostname from the parameter is used by default
|
||||
-tlscerts string
|
||||
path to root ca certificates in pem form, for verification
|
||||
-tlsciphersuites string
|
||||
ciphersuites to allow, comma-separated, order is ignored, only for TLS 1.2 and earlier, empty value uses TLS stack defaults; values: tls_ecdhe_ecdsa_with_aes_128_cbc_sha, tls_ecdhe_ecdsa_with_aes_128_gcm_sha256, tls_ecdhe_ecdsa_with_aes_256_cbc_sha, tls_ecdhe_ecdsa_with_aes_256_gcm_sha384, tls_ecdhe_ecdsa_with_chacha20_poly1305_sha256, tls_ecdhe_rsa_with_aes_128_cbc_sha, tls_ecdhe_rsa_with_aes_128_gcm_sha256, tls_ecdhe_rsa_with_aes_256_cbc_sha, tls_ecdhe_rsa_with_aes_256_gcm_sha384, tls_ecdhe_rsa_with_chacha20_poly1305_sha256, and insecure: tls_ecdhe_ecdsa_with_aes_128_cbc_sha256, tls_ecdhe_ecdsa_with_rc4_128_sha, tls_ecdhe_rsa_with_3des_ede_cbc_sha, tls_ecdhe_rsa_with_aes_128_cbc_sha256, tls_ecdhe_rsa_with_rc4_128_sha, tls_rsa_with_3des_ede_cbc_sha, tls_rsa_with_aes_128_cbc_sha, tls_rsa_with_aes_128_cbc_sha256, tls_rsa_with_aes_128_gcm_sha256, tls_rsa_with_aes_256_cbc_sha, tls_rsa_with_aes_256_gcm_sha384, tls_rsa_with_rc4_128_sha
|
||||
-tlscurves string
|
||||
tls ecc key exchange mechanisms to allow, comma-separated, order is ignored, empty value uses TLS stack defaults; values: curvep256, curvep384, curvep521, x25519, x25519mlkem768
|
||||
-tlsnodynamicrecordsizing
|
||||
disable TLS dynamic record sizing
|
||||
-tlsnosessiontickets
|
||||
disable TLS session tickets
|
||||
-tlsrenegotiation string
|
||||
when to allow renegotiation; only applies to tls1.2 and earlier, not tls1.3; values: never, once, always (default "never")
|
||||
-tlsverify
|
||||
verify remote hostname during TLS
|
||||
-tlsversionmax string
|
||||
maximum TLS version, empty value uses TLS stack default; values: tls1.2, etc.
|
||||
-tlsversionmin string
|
||||
minimum TLS version, empty value uses TLS stack default; values: tls1.2, etc.
|
||||
|
||||
# mox spf check
|
||||
|
||||
Check the status of IP for the policy published in DNS for the domain.
|
||||
|
@ -1,13 +1,12 @@
|
||||
version: '3.7'
|
||||
services:
|
||||
mox:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile.moximaptest
|
||||
volumes:
|
||||
- ./testdata/imaptest/config:/mox/config
|
||||
- ./testdata/imaptest/data:/mox/data
|
||||
- ./testdata/imaptest/imaptest.mbox:/mox/imaptest.mbox
|
||||
- ./testdata/imaptest/config:/mox/config:z
|
||||
- ./testdata/imaptest/data:/mox/data:z
|
||||
- ./testdata/imaptest/imaptest.mbox:/mox/imaptest.mbox:z
|
||||
working_dir: /mox
|
||||
tty: true # For job control with set -m.
|
||||
command: sh -c 'set -m; mox serve & sleep 1; echo testtest | mox setaccountpassword mjl; fg'
|
||||
@ -24,7 +23,7 @@ services:
|
||||
command: host=mox port=1143 'user=mjl@mox.example' pass=testtest mbox=/imaptest/imaptest.mbox
|
||||
working_dir: /imaptest
|
||||
volumes:
|
||||
- ./testdata/imaptest:/imaptest
|
||||
- ./testdata/imaptest:/imaptest:z
|
||||
depends_on:
|
||||
mox:
|
||||
condition: service_healthy
|
||||
|
@ -1,4 +1,3 @@
|
||||
version: '3.7'
|
||||
services:
|
||||
# We run integration_test.go from this container, it connects to the other mox instances.
|
||||
test:
|
||||
@ -9,11 +8,11 @@ services:
|
||||
# dials in integration_test.go succeed.
|
||||
command: ["sh", "-c", "set -ex; cat /integration/tmp-pebble-ca.pem /integration/tls/ca.pem >>/etc/ssl/certs/ca-certificates.crt; go test -tags integration"]
|
||||
volumes:
|
||||
- ./.go:/.go
|
||||
- ./testdata/integration/resolv.conf:/etc/resolv.conf
|
||||
- ./testdata/integration:/integration
|
||||
- ./testdata/integration/moxsubmit.conf:/etc/moxsubmit.conf
|
||||
- .:/mox
|
||||
- ./.go:/.go:z
|
||||
- ./testdata/integration/resolv.conf:/etc/resolv.conf:z
|
||||
- ./testdata/integration:/integration:z
|
||||
- ./testdata/integration/moxsubmit.conf:/etc/moxsubmit.conf:z
|
||||
- .:/mox:z
|
||||
environment:
|
||||
GOCACHE: /.go/.cache/go-build
|
||||
depends_on:
|
||||
@ -26,6 +25,8 @@ services:
|
||||
condition: service_healthy
|
||||
localserve:
|
||||
condition: service_healthy
|
||||
moxacmepebblealpn:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
mailnet1:
|
||||
ipv4_address: 172.28.1.50
|
||||
@ -39,8 +40,8 @@ services:
|
||||
MOX_UID: "${MOX_UID}"
|
||||
command: ["sh", "-c", "/integration/moxacmepebble.sh"]
|
||||
volumes:
|
||||
- ./testdata/integration/resolv.conf:/etc/resolv.conf
|
||||
- ./testdata/integration:/integration
|
||||
- ./testdata/integration/resolv.conf:/etc/resolv.conf:z
|
||||
- ./testdata/integration:/integration:z
|
||||
healthcheck:
|
||||
test: netstat -nlt | grep ':25 '
|
||||
interval: 1s
|
||||
@ -64,8 +65,8 @@ services:
|
||||
MOX_UID: "${MOX_UID}"
|
||||
command: ["sh", "-c", "/integration/moxmail2.sh"]
|
||||
volumes:
|
||||
- ./testdata/integration/resolv.conf:/etc/resolv.conf
|
||||
- ./testdata/integration:/integration
|
||||
- ./testdata/integration/resolv.conf:/etc/resolv.conf:z
|
||||
- ./testdata/integration:/integration:z
|
||||
healthcheck:
|
||||
test: netstat -nlt | grep ':25 '
|
||||
interval: 1s
|
||||
@ -83,15 +84,40 @@ services:
|
||||
mailnet1:
|
||||
ipv4_address: 172.28.1.20
|
||||
|
||||
# Third mox instance that uses ACME with pebble and has ALPN enabled.
|
||||
moxacmepebblealpn:
|
||||
hostname: moxacmepebblealpn.mox1.example
|
||||
domainname: mox1.example
|
||||
image: mox_integration_moxmail
|
||||
environment:
|
||||
MOX_UID: "${MOX_UID}"
|
||||
command: ["sh", "-c", "/integration/moxacmepebblealpn.sh"]
|
||||
volumes:
|
||||
- ./testdata/integration/resolv.conf:/etc/resolv.conf:z
|
||||
- ./testdata/integration:/integration:z
|
||||
healthcheck:
|
||||
test: netstat -nlt | grep ':25 '
|
||||
interval: 1s
|
||||
timeout: 1s
|
||||
retries: 10
|
||||
depends_on:
|
||||
dns:
|
||||
condition: service_healthy
|
||||
acmepebble:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
mailnet1:
|
||||
ipv4_address: 172.28.1.80
|
||||
|
||||
localserve:
|
||||
hostname: localserve.mox1.example
|
||||
domainname: mox1.example
|
||||
image: mox_integration_moxmail
|
||||
command: ["sh", "-c", "set -e; chmod o+r /etc/resolv.conf; mox -checkconsistency localserve -ip 172.28.1.60"]
|
||||
volumes:
|
||||
- ./.go:/.go
|
||||
- ./testdata/integration/resolv.conf:/etc/resolv.conf
|
||||
- .:/mox
|
||||
- ./.go:/.go:z
|
||||
- ./testdata/integration/resolv.conf:/etc/resolv.conf:z
|
||||
- .:/mox:z
|
||||
environment:
|
||||
GOCACHE: /.go/.cache/go-build
|
||||
healthcheck:
|
||||
@ -114,7 +140,7 @@ services:
|
||||
context: testdata/integration
|
||||
volumes:
|
||||
# todo: figure out how to mount files with a uid that the process in the container can read...
|
||||
- ./testdata/integration/resolv.conf:/etc/resolv.conf
|
||||
- ./testdata/integration/resolv.conf:/etc/resolv.conf:z
|
||||
command: ["sh", "-c", "set -e; chmod o+r /etc/resolv.conf; (echo 'maillog_file = /dev/stdout'; echo 'mydestination = $$myhostname, localhost.$$mydomain, localhost, $$mydomain'; echo 'smtp_tls_security_level = may') >>/etc/postfix/main.cf; echo 'root: postfix@mox1.example' >>/etc/postfix/aliases; newaliases; postfix start-fg"]
|
||||
healthcheck:
|
||||
test: netstat -nlt | grep ':25 '
|
||||
@ -135,8 +161,8 @@ services:
|
||||
# todo: figure out how to build from dockerfile with empty context without creating empty dirs in file system.
|
||||
context: testdata/integration
|
||||
volumes:
|
||||
- ./testdata/integration/resolv.conf:/etc/resolv.conf
|
||||
- ./testdata/integration:/integration
|
||||
- ./testdata/integration/resolv.conf:/etc/resolv.conf:z
|
||||
- ./testdata/integration:/integration:z
|
||||
# We start with a base example.zone, but moxacmepebble appends its records,
|
||||
# followed by moxmail2. They restart unbound after appending records.
|
||||
command: ["sh", "-c", "set -ex; ls -l /etc/resolv.conf; chmod o+r /etc/resolv.conf; install -m 640 -o unbound /integration/unbound.conf /etc/unbound/; chmod 755 /integration; chmod 644 /integration/*.zone; cp /integration/example.zone /integration/example-integration.zone; ls -ld /integration /integration/reverse.zone; unbound -d -p -v"]
|
||||
@ -156,8 +182,8 @@ services:
|
||||
hostname: acmepebble.example
|
||||
image: docker.io/letsencrypt/pebble:v2.3.1@sha256:fc5a537bf8fbc7cc63aa24ec3142283aa9b6ba54529f86eb8ff31fbde7c5b258
|
||||
volumes:
|
||||
- ./testdata/integration/resolv.conf:/etc/resolv.conf
|
||||
- ./testdata/integration:/integration
|
||||
- ./testdata/integration/resolv.conf:/etc/resolv.conf:z
|
||||
- ./testdata/integration:/integration:z
|
||||
command: ["sh", "-c", "set -ex; mount; ls -l /etc/resolv.conf; chmod o+r /etc/resolv.conf; pebble -config /integration/pebble-config.json"]
|
||||
ports:
|
||||
- 14000:14000 # ACME port
|
||||
|
@ -27,7 +27,6 @@
|
||||
# The -ip flag ensures connections to the published ports make it to mox, and it
|
||||
# prevents listening on ::1 (IPv6 is not enabled in docker by default).
|
||||
|
||||
version: '3.7'
|
||||
services:
|
||||
mox:
|
||||
# Replace "latest" with the version you want to run, see https://r.xmox.nl/r/mox/.
|
||||
@ -39,11 +38,11 @@ services:
|
||||
# machine, and the IPs of incoming connections for spam filtering.
|
||||
network_mode: 'host'
|
||||
volumes:
|
||||
- ./config:/mox/config
|
||||
- ./data:/mox/data
|
||||
- ./config:/mox/config:z
|
||||
- ./data:/mox/data:z
|
||||
# web is optional but recommended to bind in, useful for serving static files with
|
||||
# the webserver.
|
||||
- ./web:/mox/web
|
||||
- ./web:/mox/web:z
|
||||
working_dir: /mox
|
||||
restart: on-failure
|
||||
healthcheck:
|
||||
|
@ -340,10 +340,7 @@ func (m *Message) Compose(log mlog.Log, smtputf8 bool) ([]byte, error) {
|
||||
data := base64.StdEncoding.EncodeToString(headers)
|
||||
for len(data) > 0 {
|
||||
line := data
|
||||
n := len(line)
|
||||
if n > 78 {
|
||||
n = 78
|
||||
}
|
||||
n := min(len(line), 76) // ../rfc/2045:1372
|
||||
line, data = data[:n], data[n:]
|
||||
if _, err := origp.Write([]byte(line + "\r\n")); err != nil {
|
||||
return nil, err
|
||||
|
@ -50,8 +50,8 @@ func tcheckType(t *testing.T, p *message.Part, mt, mst, cte string) {
|
||||
if !strings.EqualFold(p.MediaSubType, mst) {
|
||||
t.Fatalf("got mediasubtype %q, expected %q", p.MediaSubType, mst)
|
||||
}
|
||||
if !strings.EqualFold(p.ContentTransferEncoding, cte) {
|
||||
t.Fatalf("got content-transfer-encoding %q, expected %q", p.ContentTransferEncoding, cte)
|
||||
if !(cte == "" && p.ContentTransferEncoding == nil || cte != "" && p.ContentTransferEncoding != nil && strings.EqualFold(cte, *p.ContentTransferEncoding)) {
|
||||
t.Fatalf("got content-transfer-encoding %v, expected %v", p.ContentTransferEncoding, cte)
|
||||
}
|
||||
}
|
||||
|
||||
|
13
dsn/parse.go
13
dsn/parse.go
@ -14,6 +14,7 @@ import (
|
||||
"github.com/mjl-/mox/message"
|
||||
"github.com/mjl-/mox/mlog"
|
||||
"github.com/mjl-/mox/smtp"
|
||||
"slices"
|
||||
)
|
||||
|
||||
// Parse reads a DSN message.
|
||||
@ -217,15 +218,9 @@ func parseRecipientHeader(mr *textproto.Reader, utf8 bool) (Recipient, error) {
|
||||
case "Action":
|
||||
a := Action(strings.ToLower(v))
|
||||
actions := []Action{Failed, Delayed, Delivered, Relayed, Expanded}
|
||||
var ok bool
|
||||
for _, x := range actions {
|
||||
if a == x {
|
||||
ok = true
|
||||
r.Action = a
|
||||
break
|
||||
}
|
||||
}
|
||||
if !ok {
|
||||
if slices.Contains(actions, a) {
|
||||
r.Action = a
|
||||
} else {
|
||||
err = fmt.Errorf("unrecognized action %q", v)
|
||||
}
|
||||
case "Status":
|
||||
|
@ -62,7 +62,8 @@ func xcmdExport(mbox, single bool, args []string, c *cmd) {
|
||||
}
|
||||
|
||||
dbpath := filepath.Join(accountDir, "index.db")
|
||||
db, err := bstore.Open(context.Background(), dbpath, &bstore.Options{Timeout: 5 * time.Second, Perm: 0660}, store.DBTypes...)
|
||||
opts := bstore.Options{Timeout: 5 * time.Second, Perm: 0660, RegisterLogger: c.log.Logger}
|
||||
db, err := bstore.Open(context.Background(), dbpath, &opts, store.DBTypes...)
|
||||
xcheckf(err, "open database %q", dbpath)
|
||||
defer func() {
|
||||
if err := db.Close(); err != nil {
|
||||
@ -71,7 +72,7 @@ func xcmdExport(mbox, single bool, args []string, c *cmd) {
|
||||
}()
|
||||
|
||||
a := store.DirArchiver{Dir: dst}
|
||||
err = store.ExportMessages(context.Background(), c.log, db, accountDir, a, !mbox, mailbox, !single)
|
||||
err = store.ExportMessages(context.Background(), c.log, db, accountDir, a, !mbox, mailbox, nil, !single)
|
||||
xcheckf(err, "exporting messages")
|
||||
err = a.Close()
|
||||
xcheckf(err, "closing archiver")
|
||||
|
10
genapidoc.sh
Executable file
10
genapidoc.sh
Executable file
@ -0,0 +1,10 @@
|
||||
#!/bin/sh
|
||||
set -eu
|
||||
|
||||
# we rewrite some dmarcprt and tlsrpt enums into untyped strings: real-world
|
||||
# reports have invalid values, and our loose Go typed strings accept all values,
|
||||
# but we don't want the typescript runtime checker to fail on those unrecognized
|
||||
# values.
|
||||
(cd webadmin && CGO_ENABLED=0 go run ../vendor/github.com/mjl-/sherpadoc/cmd/sherpadoc/*.go -adjust-function-names none -rename 'config Domain ConfigDomain,dmarc Policy DMARCPolicy,mtasts MX STSMX,tlsrptdb Record TLSReportRecord,tlsrptdb SuppressAddress TLSRPTSuppressAddress,dmarcrpt DKIMResult string,dmarcrpt SPFResult string,dmarcrpt SPFDomainScope string,dmarcrpt DMARCResult string,dmarcrpt PolicyOverride string,dmarcrpt Alignment string,dmarcrpt Disposition string,tlsrpt PolicyType string,tlsrpt ResultType string' Admin) >webadmin/api.json
|
||||
(cd webaccount && CGO_ENABLED=0 go run ../vendor/github.com/mjl-/sherpadoc/cmd/sherpadoc/*.go -adjust-function-names none Account) >webaccount/api.json
|
||||
(cd webmail && CGO_ENABLED=0 go run ../vendor/github.com/mjl-/sherpadoc/cmd/sherpadoc/*.go -adjust-function-names none Webmail) >webmail/api.json
|
@ -26,7 +26,7 @@ any parameters. Followed by the help and usage information for each command.
|
||||
|
||||
EOF
|
||||
|
||||
./mox 2>&1 | sed -e 's/^usage: */\t/' -e 's/^ */\t/'
|
||||
./mox 2>&1 | sed -e 's/^usage: */ /' -e 's/^ */ /'
|
||||
echo
|
||||
./mox helpall 2>&1
|
||||
|
||||
@ -80,14 +80,14 @@ See https://pkg.go.dev/github.com/mjl-/sconf for details.
|
||||
# mox.conf
|
||||
|
||||
EOF
|
||||
./mox config describe-static | sed 's/^/\t/'
|
||||
./mox config describe-static | sed 's/^/ /'
|
||||
|
||||
cat <<EOF
|
||||
|
||||
# domains.conf
|
||||
|
||||
EOF
|
||||
./mox config describe-domains | sed 's/^/\t/'
|
||||
./mox config describe-domains | sed 's/^/ /'
|
||||
|
||||
cat <<EOF
|
||||
|
||||
@ -102,7 +102,7 @@ EOF
|
||||
for ex in $(./mox config example); do
|
||||
echo '# Example '$ex
|
||||
echo
|
||||
./mox config example $ex | sed 's/^/\t/'
|
||||
./mox config example $ex | sed 's/^/ /'
|
||||
echo
|
||||
done
|
||||
|
||||
|
7
genlicenses.sh
Executable file
7
genlicenses.sh
Executable file
@ -0,0 +1,7 @@
|
||||
#!/bin/sh
|
||||
rm -r licenses
|
||||
set -e
|
||||
for p in $(cd vendor && find . -iname '*license*' -or -iname '*licence*' -or -iname '*notice*' -or -iname '*patent*'); do
|
||||
(set +e; mkdir -p $(dirname licenses/$p))
|
||||
cp vendor/$p licenses/$p
|
||||
done
|
@ -30,7 +30,7 @@ import (
|
||||
|
||||
func cmdGentestdata(c *cmd) {
|
||||
c.unlisted = true
|
||||
c.params = "dest-dir"
|
||||
c.params = "destdir"
|
||||
c.help = `Generate a data directory populated, for testing upgrades.`
|
||||
args := c.Parse()
|
||||
if len(args) != 1 {
|
||||
@ -187,6 +187,12 @@ Accounts:
|
||||
err = os.WriteFile(filepath.Join(destDataDir, "moxversion"), []byte(moxvar.Version), 0660)
|
||||
xcheckf(err, "writing moxversion")
|
||||
|
||||
// Populate auth.db
|
||||
err = store.Init(ctxbg)
|
||||
xcheckf(err, "store init")
|
||||
err = store.TLSPublicKeyAdd(ctxbg, &store.TLSPublicKey{Name: "testkey", Fingerprint: "...", Type: "ecdsa-p256", CertDER: []byte("..."), Account: "test0", LoginAddress: "test0@mox.example"})
|
||||
xcheckf(err, "adding tlspubkey")
|
||||
|
||||
// Populate dmarc.db.
|
||||
err = dmarcdb.Init()
|
||||
xcheckf(err, "dmarcdb init")
|
||||
@ -228,8 +234,7 @@ Accounts:
|
||||
prefix := []byte{}
|
||||
mf := tempfile()
|
||||
xcheckf(err, "temp file for queue message")
|
||||
defer os.Remove(mf.Name())
|
||||
defer mf.Close()
|
||||
defer store.CloseRemoveTempFile(c.log, mf, "test message")
|
||||
const qmsg = "From: <test0@mox.example>\r\nTo: <other@remote.example>\r\nSubject: test\r\n\r\nthe message...\r\n"
|
||||
_, err = fmt.Fprint(mf, qmsg)
|
||||
xcheckf(err, "writing message")
|
||||
@ -239,7 +244,7 @@ Accounts:
|
||||
|
||||
// Create three accounts.
|
||||
// First account without messages.
|
||||
accTest0, err := store.OpenAccount(c.log, "test0")
|
||||
accTest0, err := store.OpenAccount(c.log, "test0", false)
|
||||
xcheckf(err, "open account test0")
|
||||
err = accTest0.ThreadingWait(c.log)
|
||||
xcheckf(err, "wait for threading to finish")
|
||||
@ -247,7 +252,7 @@ Accounts:
|
||||
xcheckf(err, "close account")
|
||||
|
||||
// Second account with one message.
|
||||
accTest1, err := store.OpenAccount(c.log, "test1")
|
||||
accTest1, err := store.OpenAccount(c.log, "test1", false)
|
||||
xcheckf(err, "open account test1")
|
||||
err = accTest1.ThreadingWait(c.log)
|
||||
xcheckf(err, "wait for threading to finish")
|
||||
@ -258,7 +263,6 @@ Accounts:
|
||||
m := store.Message{
|
||||
MailboxID: inbox.ID,
|
||||
MailboxOrigID: inbox.ID,
|
||||
MailboxDestinedID: inbox.ID,
|
||||
RemoteIP: "1.2.3.4",
|
||||
RemoteIPMasked1: "1.2.3.4",
|
||||
RemoteIPMasked2: "1.2.3.0",
|
||||
@ -283,20 +287,13 @@ Accounts:
|
||||
}
|
||||
mf := tempfile()
|
||||
xcheckf(err, "creating temp file for delivery")
|
||||
defer store.CloseRemoveTempFile(c.log, mf, "test message")
|
||||
_, err = fmt.Fprint(mf, msg)
|
||||
xcheckf(err, "writing deliver message to file")
|
||||
err = accTest1.DeliverMessage(c.log, tx, &m, mf, false, true, false, true)
|
||||
|
||||
mfname := mf.Name()
|
||||
xcheckf(err, "add message to account test1")
|
||||
err = mf.Close()
|
||||
xcheckf(err, "closing file")
|
||||
err = os.Remove(mfname)
|
||||
xcheckf(err, "removing temp message file")
|
||||
err = accTest1.MessageAdd(c.log, tx, &inbox, &m, mf, store.AddOpts{})
|
||||
xcheckf(err, "deliver message")
|
||||
|
||||
err = tx.Get(&inbox)
|
||||
xcheckf(err, "get inbox")
|
||||
inbox.Add(m.MailboxCounts())
|
||||
err = tx.Update(&inbox)
|
||||
xcheckf(err, "update inbox")
|
||||
|
||||
@ -307,7 +304,7 @@ Accounts:
|
||||
xcheckf(err, "close account")
|
||||
|
||||
// Third account with two messages and junkfilter.
|
||||
accTest2, err := store.OpenAccount(c.log, "test2")
|
||||
accTest2, err := store.OpenAccount(c.log, "test2", false)
|
||||
xcheckf(err, "open account test2")
|
||||
err = accTest2.ThreadingWait(c.log)
|
||||
xcheckf(err, "wait for threading to finish")
|
||||
@ -318,7 +315,6 @@ Accounts:
|
||||
m0 := store.Message{
|
||||
MailboxID: inbox.ID,
|
||||
MailboxOrigID: inbox.ID,
|
||||
MailboxDestinedID: inbox.ID,
|
||||
RemoteIP: "::1",
|
||||
RemoteIPMasked1: "::",
|
||||
RemoteIPMasked2: "::",
|
||||
@ -343,20 +339,11 @@ Accounts:
|
||||
}
|
||||
mf0 := tempfile()
|
||||
xcheckf(err, "creating temp file for delivery")
|
||||
defer store.CloseRemoveTempFile(c.log, mf0, "test message")
|
||||
_, err = fmt.Fprint(mf0, msg0)
|
||||
xcheckf(err, "writing deliver message to file")
|
||||
err = accTest2.DeliverMessage(c.log, tx, &m0, mf0, false, false, false, true)
|
||||
err = accTest2.MessageAdd(c.log, tx, &inbox, &m0, mf0, store.AddOpts{})
|
||||
xcheckf(err, "add message to account test2")
|
||||
|
||||
mf0name := mf0.Name()
|
||||
err = mf0.Close()
|
||||
xcheckf(err, "closing file")
|
||||
err = os.Remove(mf0name)
|
||||
xcheckf(err, "removing temp message file")
|
||||
|
||||
err = tx.Get(&inbox)
|
||||
xcheckf(err, "get inbox")
|
||||
inbox.Add(m0.MailboxCounts())
|
||||
err = tx.Update(&inbox)
|
||||
xcheckf(err, "update inbox")
|
||||
|
||||
@ -365,29 +352,19 @@ Accounts:
|
||||
const prefix1 = "Extra: test\r\n"
|
||||
const msg1 = "From: <other@remote.example>\r\nTo: <☹@xn--74h.example>\r\nSubject: test\r\n\r\nthe message...\r\n"
|
||||
m1 := store.Message{
|
||||
MailboxID: sent.ID,
|
||||
MailboxOrigID: sent.ID,
|
||||
MailboxDestinedID: sent.ID,
|
||||
Flags: store.Flags{Seen: true, Junk: true},
|
||||
Size: int64(len(prefix1) + len(msg1)),
|
||||
MsgPrefix: []byte(prefix1),
|
||||
MailboxID: sent.ID,
|
||||
MailboxOrigID: sent.ID,
|
||||
Flags: store.Flags{Seen: true, Junk: true},
|
||||
Size: int64(len(prefix1) + len(msg1)),
|
||||
MsgPrefix: []byte(prefix1),
|
||||
}
|
||||
mf1 := tempfile()
|
||||
xcheckf(err, "creating temp file for delivery")
|
||||
defer store.CloseRemoveTempFile(c.log, mf1, "test message")
|
||||
_, err = fmt.Fprint(mf1, msg1)
|
||||
xcheckf(err, "writing deliver message to file")
|
||||
err = accTest2.DeliverMessage(c.log, tx, &m1, mf1, false, false, false, true)
|
||||
err = accTest2.MessageAdd(c.log, tx, &sent, &m1, mf1, store.AddOpts{})
|
||||
xcheckf(err, "add message to account test2")
|
||||
|
||||
mf1name := mf1.Name()
|
||||
err = mf1.Close()
|
||||
xcheckf(err, "closing file")
|
||||
err = os.Remove(mf1name)
|
||||
xcheckf(err, "removing temp message file")
|
||||
|
||||
err = tx.Get(&sent)
|
||||
xcheckf(err, "get sent")
|
||||
sent.Add(m1.MailboxCounts())
|
||||
err = tx.Update(&sent)
|
||||
xcheckf(err, "update sent")
|
||||
|
||||
|
@ -24,7 +24,7 @@ mkdir html/features
|
||||
(
|
||||
cat features/index.md
|
||||
echo
|
||||
sed -n -e '/# FAQ/q' -e '/## Roadmap/,/# FAQ/p' < ../README.md
|
||||
sed -n -e 's/^# Roadmap/## Roadmap/' -e '/# FAQ/q' -e '/# Roadmap/,/# FAQ/p' < ../README.md
|
||||
echo
|
||||
echo 'Also see the [Protocols](../protocols/) page for implementation status, and (non)-plans.'
|
||||
) | go run website.go 'Features' >html/features/index.html
|
||||
|
30
go.mod
30
go.mod
@ -1,23 +1,24 @@
|
||||
module github.com/mjl-/mox
|
||||
|
||||
go 1.21
|
||||
go 1.23.0
|
||||
|
||||
require (
|
||||
github.com/mjl-/adns v0.0.0-20240309142737-2a1aacf346dc
|
||||
github.com/mjl-/autocert v0.0.0-20231214125928-31b7400acb05
|
||||
github.com/mjl-/bstore v0.0.5
|
||||
github.com/mjl-/sconf v0.0.6
|
||||
github.com/mjl-/adns v0.0.0-20250321173553-ab04b05bdfea
|
||||
github.com/mjl-/autocert v0.0.0-20250321204043-abab2b936e31
|
||||
github.com/mjl-/bstore v0.0.9
|
||||
github.com/mjl-/flate v0.0.0-20250221133712-6372d09eb978
|
||||
github.com/mjl-/sconf v0.0.7
|
||||
github.com/mjl-/sherpa v0.6.7
|
||||
github.com/mjl-/sherpadoc v0.0.14
|
||||
github.com/mjl-/sherpadoc v0.0.16
|
||||
github.com/mjl-/sherpaprom v0.0.2
|
||||
github.com/mjl-/sherpats v0.0.6
|
||||
github.com/prometheus/client_golang v1.18.0
|
||||
github.com/russross/blackfriday/v2 v2.1.0
|
||||
go.etcd.io/bbolt v1.3.9
|
||||
golang.org/x/crypto v0.22.0
|
||||
golang.org/x/exp v0.0.0-20240416160154-fe59bbe5cc7f
|
||||
golang.org/x/net v0.24.0
|
||||
golang.org/x/text v0.14.0
|
||||
go.etcd.io/bbolt v1.3.11
|
||||
golang.org/x/crypto v0.37.0
|
||||
golang.org/x/net v0.39.0
|
||||
golang.org/x/sys v0.32.0
|
||||
golang.org/x/text v0.24.0
|
||||
rsc.io/qr v0.2.0
|
||||
)
|
||||
|
||||
@ -29,9 +30,8 @@ require (
|
||||
github.com/prometheus/client_model v0.5.0 // indirect
|
||||
github.com/prometheus/common v0.45.0 // indirect
|
||||
github.com/prometheus/procfs v0.12.0 // indirect
|
||||
golang.org/x/mod v0.17.0 // indirect
|
||||
golang.org/x/sync v0.7.0 // indirect
|
||||
golang.org/x/sys v0.19.0 // indirect
|
||||
golang.org/x/tools v0.20.0 // indirect
|
||||
golang.org/x/mod v0.24.0 // indirect
|
||||
golang.org/x/sync v0.13.0 // indirect
|
||||
golang.org/x/tools v0.32.0 // indirect
|
||||
google.golang.org/protobuf v1.31.0 // indirect
|
||||
)
|
||||
|
60
go.sum
60
go.sum
@ -16,27 +16,29 @@ github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg=
|
||||
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k=
|
||||
github.com/mjl-/adns v0.0.0-20240309142737-2a1aacf346dc h1:ghTx3KsrO0hSJW0bCFCGwjSrYeXZ6Bj5hdv9FTTFV4M=
|
||||
github.com/mjl-/adns v0.0.0-20240309142737-2a1aacf346dc/go.mod h1:v47qUMJnipnmDTRGaHwpCwzE6oypa5K33mUvBfzZBn8=
|
||||
github.com/mjl-/autocert v0.0.0-20231214125928-31b7400acb05 h1:s6ay4bh4tmpPLdxjyeWG45mcwHfEluBMuGPkqxHWUJ4=
|
||||
github.com/mjl-/autocert v0.0.0-20231214125928-31b7400acb05/go.mod h1:taMFU86abMxKLPV4Bynhv8enbYmS67b8LG80qZv2Qus=
|
||||
github.com/mjl-/bstore v0.0.5 h1:Cx+LWEBnFBsqSxZNMxeVujkfc0kG10lUJaAU4vWSRHo=
|
||||
github.com/mjl-/bstore v0.0.5/go.mod h1:/cD25FNBaDfvL/plFRxI3Ba3E+wcB0XVOS8nJDqndg0=
|
||||
github.com/mjl-/sconf v0.0.6 h1:5Dt58488ZOoVx680zgK2K3vUrokLsp5mXDUACrJlrUc=
|
||||
github.com/mjl-/sconf v0.0.6/go.mod h1:uF8OdWtLT8La3i4ln176i1pB0ps9pXGCaABEU55ZkE0=
|
||||
github.com/mjl-/adns v0.0.0-20250321173553-ab04b05bdfea h1:8dftsVL1tHhRksXzFZRhSJ7gSlcy/t87Nvucs3JnTGE=
|
||||
github.com/mjl-/adns v0.0.0-20250321173553-ab04b05bdfea/go.mod h1:rWZMqGA2HoBm5b5q/A5J8u1sSVuEYh6zBz9tMoVs+RU=
|
||||
github.com/mjl-/autocert v0.0.0-20250321204043-abab2b936e31 h1:6MFGOLPGf6VzHWkKv8waSzJMMS98EFY2LVKPRHffCyo=
|
||||
github.com/mjl-/autocert v0.0.0-20250321204043-abab2b936e31/go.mod h1:taMFU86abMxKLPV4Bynhv8enbYmS67b8LG80qZv2Qus=
|
||||
github.com/mjl-/bstore v0.0.9 h1:j8HVXL10Arbk4ujeRGwns8gipH1N1TZn853inQ42FgY=
|
||||
github.com/mjl-/bstore v0.0.9/go.mod h1:xzIpSfcFosgPJ6h+vsdIt0pzCq4i8hhMuHPQJ0aHQhM=
|
||||
github.com/mjl-/flate v0.0.0-20250221133712-6372d09eb978 h1:Eg5DfI3/00URzGErujKus6a3O0kyXzF8vjoDZzH/gig=
|
||||
github.com/mjl-/flate v0.0.0-20250221133712-6372d09eb978/go.mod h1:QBkFtjai3AiQQuUu7pVh6PA06Vd3oa68E+vddf/UBOs=
|
||||
github.com/mjl-/sconf v0.0.7 h1:bdBcSFZCDFMm/UdBsgNCsjkYmKrSgYwp7rAOoufwHe4=
|
||||
github.com/mjl-/sconf v0.0.7/go.mod h1:uF8OdWtLT8La3i4ln176i1pB0ps9pXGCaABEU55ZkE0=
|
||||
github.com/mjl-/sherpa v0.6.7 h1:C5F8XQdV5nCuS4fvB+ye/ziUQrajEhOoj/t2w5T14BY=
|
||||
github.com/mjl-/sherpa v0.6.7/go.mod h1:dSpAOdgpwdqQZ72O4n3EHo/tR68eKyan8tYYraUMPNc=
|
||||
github.com/mjl-/sherpadoc v0.0.0-20190505200843-c0a7f43f5f1d/go.mod h1:5khTKxoKKNXcB8bkVUO6GlzC7PFtMmkHq578lPbmnok=
|
||||
github.com/mjl-/sherpadoc v0.0.14 h1:Xrdg8RhAmTDQXlEU+qDSlige4zfhMHr+VKBJNpPeWe4=
|
||||
github.com/mjl-/sherpadoc v0.0.14/go.mod h1:vh5zcsk3j/Tvm725EY+unTZb3EZcZcpiEQzrODSa6+I=
|
||||
github.com/mjl-/sherpadoc v0.0.16 h1:BdlFNXfnTaA7qO54kof4xpNFJxYBTY0cIObRk7QAP6M=
|
||||
github.com/mjl-/sherpadoc v0.0.16/go.mod h1:vh5zcsk3j/Tvm725EY+unTZb3EZcZcpiEQzrODSa6+I=
|
||||
github.com/mjl-/sherpaprom v0.0.2 h1:1dlbkScsNafM5jURI44uiWrZMSwfZtcOFEEq7vx2C1Y=
|
||||
github.com/mjl-/sherpaprom v0.0.2/go.mod h1:cl5nMNOvqhzMiQJ2FzccQ9ReivjHXe53JhOVkPfSvw4=
|
||||
github.com/mjl-/sherpats v0.0.6 h1:2lSoJbb+jkjLOdlvoMxItq0QQrrnkH+rnm3PMRfpbmA=
|
||||
@ -68,40 +70,38 @@ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
go.etcd.io/bbolt v1.3.9 h1:8x7aARPEXiXbHmtUwAIv7eV2fQFHrLLavdiJ3uzJXoI=
|
||||
go.etcd.io/bbolt v1.3.9/go.mod h1:zaO32+Ti0PK1ivdPtgMESzuzL2VPoIG1PCQNvOdo/dE=
|
||||
go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0=
|
||||
go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30=
|
||||
golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M=
|
||||
golang.org/x/exp v0.0.0-20240416160154-fe59bbe5cc7f h1:99ci1mjWVBWwJiEKYY6jWa4d2nTQVIEhZIptnrVb1XY=
|
||||
golang.org/x/exp v0.0.0-20240416160154-fe59bbe5cc7f/go.mod h1:/lliqkxwWAhPjf5oSOIJup2XcqJaw8RGS6k3TGEc7GI=
|
||||
golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE=
|
||||
golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc=
|
||||
golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
|
||||
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
|
||||
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU=
|
||||
golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w=
|
||||
golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8=
|
||||
golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY=
|
||||
golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
|
||||
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610=
|
||||
golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o=
|
||||
golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20=
|
||||
golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0=
|
||||
golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.20.0 h1:hz/CVckiOxybQvFw6h7b/q80NTr9IUQb4s1IIzW7KNY=
|
||||
golang.org/x/tools v0.20.0/go.mod h1:WvitBU7JJf6A4jOdg4S1tviW9bhUxkgeCui/0JHctQg=
|
||||
golang.org/x/tools v0.32.0 h1:Q7N1vhpkQv7ybVzLFtTjvQya2ewbwNDZzUgfXGqtMWU=
|
||||
golang.org/x/tools v0.32.0/go.mod h1:ZxrU41P/wAbZD8EDa6dDCa6XfpkhJ7HFMjHJXfBDu8s=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
167
http/autoconf.go
167
http/autoconf.go
@ -11,7 +11,8 @@ import (
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"rsc.io/qr"
|
||||
|
||||
"github.com/mjl-/mox/mox-"
|
||||
"github.com/mjl-/mox/admin"
|
||||
"github.com/mjl-/mox/dns"
|
||||
"github.com/mjl-/mox/smtp"
|
||||
)
|
||||
|
||||
@ -64,19 +65,35 @@ func autoconfHandle(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
email := r.FormValue("emailaddress")
|
||||
log.Debug("autoconfig request", slog.String("email", email))
|
||||
addr, err := smtp.ParseAddress(email)
|
||||
if err != nil {
|
||||
http.Error(w, "400 - bad request - invalid parameter emailaddress", http.StatusBadRequest)
|
||||
return
|
||||
var domain dns.Domain
|
||||
if email == "" {
|
||||
email = "%EMAILADDRESS%"
|
||||
// Declare this here rather than using := to avoid shadowing domain from
|
||||
// the outer scope.
|
||||
var err error
|
||||
domain, err = dns.ParseDomain(r.Host)
|
||||
if err != nil {
|
||||
http.Error(w, fmt.Sprintf("400 - bad request - invalid domain: %s", r.Host), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
domain.ASCII = strings.TrimPrefix(domain.ASCII, "autoconfig.")
|
||||
domain.Unicode = strings.TrimPrefix(domain.Unicode, "autoconfig.")
|
||||
} else {
|
||||
addr, err := smtp.ParseAddress(email)
|
||||
if err != nil {
|
||||
http.Error(w, "400 - bad request - invalid parameter emailaddress", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
domain = addr.Domain
|
||||
}
|
||||
|
||||
socketType := func(tlsMode mox.TLSMode) (string, error) {
|
||||
socketType := func(tlsMode admin.TLSMode) (string, error) {
|
||||
switch tlsMode {
|
||||
case mox.TLSModeImmediate:
|
||||
case admin.TLSModeImmediate:
|
||||
return "SSL", nil
|
||||
case mox.TLSModeSTARTTLS:
|
||||
case admin.TLSModeSTARTTLS:
|
||||
return "STARTTLS", nil
|
||||
case mox.TLSModeNone:
|
||||
case admin.TLSModeNone:
|
||||
return "plain", nil
|
||||
default:
|
||||
return "", fmt.Errorf("unknown tls mode %v", tlsMode)
|
||||
@ -84,7 +101,7 @@ func autoconfHandle(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
var imapTLS, submissionTLS string
|
||||
config, err := mox.ClientConfigDomain(addr.Domain)
|
||||
config, err := admin.ClientConfigDomain(domain)
|
||||
if err == nil {
|
||||
imapTLS, err = socketType(config.IMAP.TLSMode)
|
||||
}
|
||||
@ -99,37 +116,67 @@ func autoconfHandle(w http.ResponseWriter, r *http.Request) {
|
||||
// Thunderbird doesn't seem to allow U-labels, always return ASCII names.
|
||||
var resp autoconfigResponse
|
||||
resp.Version = "1.1"
|
||||
resp.EmailProvider.ID = addr.Domain.ASCII
|
||||
resp.EmailProvider.Domain = addr.Domain.ASCII
|
||||
resp.EmailProvider.ID = domain.ASCII
|
||||
resp.EmailProvider.Domain = domain.ASCII
|
||||
resp.EmailProvider.DisplayName = email
|
||||
resp.EmailProvider.DisplayShortName = addr.Domain.ASCII
|
||||
resp.EmailProvider.DisplayShortName = domain.ASCII
|
||||
|
||||
// todo: specify SCRAM-SHA-256 once thunderbird and autoconfig supports it. or perhaps that will fall under "password-encrypted" by then.
|
||||
// todo: let user configure they prefer or require tls client auth and specify "TLS-client-cert"
|
||||
|
||||
resp.EmailProvider.IncomingServer.Type = "imap"
|
||||
resp.EmailProvider.IncomingServer.Hostname = config.IMAP.Host.ASCII
|
||||
resp.EmailProvider.IncomingServer.Port = config.IMAP.Port
|
||||
resp.EmailProvider.IncomingServer.SocketType = imapTLS
|
||||
resp.EmailProvider.IncomingServer.Username = email
|
||||
resp.EmailProvider.IncomingServer.Authentication = "password-encrypted"
|
||||
incoming := incomingServer{
|
||||
"imap",
|
||||
config.IMAP.Host.ASCII,
|
||||
config.IMAP.Port,
|
||||
imapTLS,
|
||||
email,
|
||||
"password-encrypted",
|
||||
}
|
||||
resp.EmailProvider.IncomingServers = append(resp.EmailProvider.IncomingServers, incoming)
|
||||
if config.IMAP.EnabledOnHTTPS {
|
||||
tlsMode, _ := socketType(admin.TLSModeImmediate)
|
||||
incomingALPN := incomingServer{
|
||||
"imap",
|
||||
config.IMAP.Host.ASCII,
|
||||
443,
|
||||
tlsMode,
|
||||
email,
|
||||
"password-encrypted",
|
||||
}
|
||||
resp.EmailProvider.IncomingServers = append(resp.EmailProvider.IncomingServers, incomingALPN)
|
||||
}
|
||||
|
||||
resp.EmailProvider.OutgoingServer.Type = "smtp"
|
||||
resp.EmailProvider.OutgoingServer.Hostname = config.Submission.Host.ASCII
|
||||
resp.EmailProvider.OutgoingServer.Port = config.Submission.Port
|
||||
resp.EmailProvider.OutgoingServer.SocketType = submissionTLS
|
||||
resp.EmailProvider.OutgoingServer.Username = email
|
||||
resp.EmailProvider.OutgoingServer.Authentication = "password-encrypted"
|
||||
outgoing := outgoingServer{
|
||||
"smtp",
|
||||
config.Submission.Host.ASCII,
|
||||
config.Submission.Port,
|
||||
submissionTLS,
|
||||
email,
|
||||
"password-encrypted",
|
||||
}
|
||||
resp.EmailProvider.OutgoingServers = append(resp.EmailProvider.OutgoingServers, outgoing)
|
||||
if config.Submission.EnabledOnHTTPS {
|
||||
tlsMode, _ := socketType(admin.TLSModeImmediate)
|
||||
outgoingALPN := outgoingServer{
|
||||
"smtp",
|
||||
config.Submission.Host.ASCII,
|
||||
443,
|
||||
tlsMode,
|
||||
email,
|
||||
"password-encrypted",
|
||||
}
|
||||
resp.EmailProvider.OutgoingServers = append(resp.EmailProvider.OutgoingServers, outgoingALPN)
|
||||
}
|
||||
|
||||
// todo: should we put the email address in the URL?
|
||||
resp.ClientConfigUpdate.URL = fmt.Sprintf("https://autoconfig.%s/mail/config-v1.1.xml", addr.Domain.ASCII)
|
||||
resp.ClientConfigUpdate.URL = fmt.Sprintf("https://autoconfig.%s/mail/config-v1.1.xml", domain.ASCII)
|
||||
|
||||
w.Header().Set("Content-Type", "application/xml; charset=utf-8")
|
||||
enc := xml.NewEncoder(w)
|
||||
enc.Indent("", "\t")
|
||||
fmt.Fprint(w, xml.Header)
|
||||
if err := enc.Encode(resp); err != nil {
|
||||
log.Errorx("marshal autoconfig response", err)
|
||||
}
|
||||
err = enc.Encode(resp)
|
||||
log.Check(err, "write autoconfig xml response")
|
||||
}
|
||||
|
||||
// Autodiscover from Microsoft, also used by Thunderbird.
|
||||
@ -170,13 +217,13 @@ func autodiscoverHandle(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
// tlsmode returns the "ssl" and "encryption" fields.
|
||||
tlsmode := func(tlsMode mox.TLSMode) (string, string, error) {
|
||||
tlsmode := func(tlsMode admin.TLSMode) (string, string, error) {
|
||||
switch tlsMode {
|
||||
case mox.TLSModeImmediate:
|
||||
case admin.TLSModeImmediate:
|
||||
return "on", "TLS", nil
|
||||
case mox.TLSModeSTARTTLS:
|
||||
case admin.TLSModeSTARTTLS:
|
||||
return "on", "", nil
|
||||
case mox.TLSModeNone:
|
||||
case admin.TLSModeNone:
|
||||
return "off", "", nil
|
||||
default:
|
||||
return "", "", fmt.Errorf("unknown tls mode %v", tlsMode)
|
||||
@ -185,7 +232,7 @@ func autodiscoverHandle(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
var imapSSL, imapEncryption string
|
||||
var submissionSSL, submissionEncryption string
|
||||
config, err := mox.ClientConfigDomain(addr.Domain)
|
||||
config, err := admin.ClientConfigDomain(addr.Domain)
|
||||
if err == nil {
|
||||
imapSSL, imapEncryption, err = tlsmode(config.IMAP.TLSMode)
|
||||
}
|
||||
@ -208,6 +255,8 @@ func autodiscoverHandle(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
w.Header().Set("Content-Type", "application/xml; charset=utf-8")
|
||||
|
||||
// todo: let user configure they prefer or require tls client auth and add "AuthPackage" with value "certificate" to Protocol? see https://learn.microsoft.com/en-us/openspecs/exchange_server_protocols/ms-oxdscli/21fd2dd5-c4ee-485b-94fb-e7db5da93726
|
||||
|
||||
resp := autodiscoverResponse{}
|
||||
resp.XMLName.Local = "Autodiscover"
|
||||
resp.XMLName.Space = "http://schemas.microsoft.com/exchange/autodiscover/responseschema/2006"
|
||||
@ -242,9 +291,8 @@ func autodiscoverHandle(w http.ResponseWriter, r *http.Request) {
|
||||
enc := xml.NewEncoder(w)
|
||||
enc.Indent("", "\t")
|
||||
fmt.Fprint(w, xml.Header)
|
||||
if err := enc.Encode(resp); err != nil {
|
||||
log.Errorx("marshal autodiscover response", err)
|
||||
}
|
||||
err = enc.Encode(resp)
|
||||
log.Check(err, "marshal autodiscover xml response")
|
||||
}
|
||||
|
||||
// Thunderbird requests these URLs for autoconfig/autodiscover:
|
||||
@ -252,6 +300,22 @@ func autodiscoverHandle(w http.ResponseWriter, r *http.Request) {
|
||||
// https://autodiscover.example.org/autodiscover/autodiscover.xml
|
||||
// https://example.org/.well-known/autoconfig/mail/config-v1.1.xml?emailaddress=user%40example.org
|
||||
// https://example.org/autodiscover/autodiscover.xml
|
||||
type incomingServer struct {
|
||||
Type string `xml:"type,attr"`
|
||||
Hostname string `xml:"hostname"`
|
||||
Port int `xml:"port"`
|
||||
SocketType string `xml:"socketType"`
|
||||
Username string `xml:"username"`
|
||||
Authentication string `xml:"authentication"`
|
||||
}
|
||||
type outgoingServer struct {
|
||||
Type string `xml:"type,attr"`
|
||||
Hostname string `xml:"hostname"`
|
||||
Port int `xml:"port"`
|
||||
SocketType string `xml:"socketType"`
|
||||
Username string `xml:"username"`
|
||||
Authentication string `xml:"authentication"`
|
||||
}
|
||||
type autoconfigResponse struct {
|
||||
XMLName xml.Name `xml:"clientConfig"`
|
||||
Version string `xml:"version,attr"`
|
||||
@ -262,23 +326,8 @@ type autoconfigResponse struct {
|
||||
DisplayName string `xml:"displayName"`
|
||||
DisplayShortName string `xml:"displayShortName"`
|
||||
|
||||
IncomingServer struct {
|
||||
Type string `xml:"type,attr"`
|
||||
Hostname string `xml:"hostname"`
|
||||
Port int `xml:"port"`
|
||||
SocketType string `xml:"socketType"`
|
||||
Username string `xml:"username"`
|
||||
Authentication string `xml:"authentication"`
|
||||
} `xml:"incomingServer"`
|
||||
|
||||
OutgoingServer struct {
|
||||
Type string `xml:"type,attr"`
|
||||
Hostname string `xml:"hostname"`
|
||||
Port int `xml:"port"`
|
||||
SocketType string `xml:"socketType"`
|
||||
Username string `xml:"username"`
|
||||
Authentication string `xml:"authentication"`
|
||||
} `xml:"outgoingServer"`
|
||||
IncomingServers []incomingServer `xml:"incomingServer"`
|
||||
OutgoingServers []outgoingServer `xml:"outgoingServer"`
|
||||
} `xml:"emailProvider"`
|
||||
|
||||
ClientConfigUpdate struct {
|
||||
@ -324,6 +373,8 @@ type autodiscoverProtocol struct {
|
||||
// Serve a .mobileconfig file. This endpoint is not a standard place where Apple
|
||||
// devices look. We point to it from the account page.
|
||||
func mobileconfigHandle(w http.ResponseWriter, r *http.Request) {
|
||||
log := pkglog.WithContext(r.Context())
|
||||
|
||||
if r.Method != "GET" {
|
||||
http.Error(w, "405 - method not allowed - get required", http.StatusMethodNotAllowed)
|
||||
return
|
||||
@ -349,12 +400,15 @@ func mobileconfigHandle(w http.ResponseWriter, r *http.Request) {
|
||||
filename = strings.ReplaceAll(filename, "@", "-at-")
|
||||
filename = "email-account-" + filename + ".mobileconfig"
|
||||
h.Set("Content-Disposition", fmt.Sprintf(`attachment; filename="%s"`, filename))
|
||||
w.Write(buf)
|
||||
_, err = w.Write(buf)
|
||||
log.Check(err, "writing mobileconfig response")
|
||||
}
|
||||
|
||||
// Serve a png file with qrcode with the link to the .mobileconfig file, should be
|
||||
// helpful for mobile devices.
|
||||
func mobileconfigQRCodeHandle(w http.ResponseWriter, r *http.Request) {
|
||||
log := pkglog.WithContext(r.Context())
|
||||
|
||||
if r.Method != "GET" {
|
||||
http.Error(w, "405 - method not allowed - get required", http.StatusMethodNotAllowed)
|
||||
return
|
||||
@ -381,5 +435,6 @@ func mobileconfigQRCodeHandle(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
h := w.Header()
|
||||
h.Set("Content-Type", "image/png")
|
||||
w.Write(code.PNG())
|
||||
_, err = w.Write(code.PNG())
|
||||
log.Check(err, "writing mobileconfig qr code")
|
||||
}
|
||||
|
BIN
http/favicon.ico
Normal file
BIN
http/favicon.ico
Normal file
Binary file not shown.
After Width: | Height: | Size: 823 B |
17
http/main_test.go
Normal file
17
http/main_test.go
Normal file
@ -0,0 +1,17 @@
|
||||
package http
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/mjl-/mox/metrics"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
m.Run()
|
||||
if metrics.Panics.Load() > 0 {
|
||||
fmt.Println("unhandled panics encountered")
|
||||
os.Exit(2)
|
||||
}
|
||||
}
|
@ -6,13 +6,11 @@ import (
|
||||
"crypto/sha256"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"maps"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/exp/maps"
|
||||
|
||||
"github.com/mjl-/mox/mox-"
|
||||
"github.com/mjl-/mox/admin"
|
||||
"github.com/mjl-/mox/smtp"
|
||||
)
|
||||
|
||||
@ -39,8 +37,7 @@ func (m dict) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
if err := e.EncodeToken(xml.StartElement{Name: xml.Name{Local: "dict"}}); err != nil {
|
||||
return err
|
||||
}
|
||||
l := maps.Keys(m)
|
||||
sort.Strings(l)
|
||||
l := slices.Sorted(maps.Keys(m))
|
||||
for _, k := range l {
|
||||
tokens := []xml.Token{
|
||||
xml.StartElement{Name: xml.Name{Local: "key"}},
|
||||
@ -64,7 +61,7 @@ func (m dict) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
case int:
|
||||
tokens = []xml.Token{
|
||||
xml.StartElement{Name: xml.Name{Local: "integer"}},
|
||||
xml.CharData([]byte(fmt.Sprintf("%d", v))),
|
||||
xml.CharData(fmt.Appendf(nil, "%d", v)),
|
||||
xml.EndElement{Name: xml.Name{Local: "integer"}},
|
||||
}
|
||||
case bool:
|
||||
@ -122,7 +119,7 @@ func MobileConfig(addresses []string, fullName string) ([]byte, error) {
|
||||
return nil, fmt.Errorf("parsing address: %v", err)
|
||||
}
|
||||
|
||||
config, err := mox.ClientConfigDomain(addr.Domain)
|
||||
config, err := admin.ClientConfigDomain(addr.Domain)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getting config for domain: %v", err)
|
||||
}
|
||||
@ -175,12 +172,12 @@ func MobileConfig(addresses []string, fullName string) ([]byte, error) {
|
||||
"IncomingMailServerUsername": addresses[0],
|
||||
"IncomingMailServerHostName": config.IMAP.Host.ASCII,
|
||||
"IncomingMailServerPortNumber": config.IMAP.Port,
|
||||
"IncomingMailServerUseSSL": config.IMAP.TLSMode == mox.TLSModeImmediate,
|
||||
"IncomingMailServerUseSSL": config.IMAP.TLSMode == admin.TLSModeImmediate,
|
||||
"OutgoingMailServerAuthentication": "EmailAuthCRAMMD5", // SCRAM not an option at time of writing...
|
||||
"OutgoingMailServerHostName": config.Submission.Host.ASCII,
|
||||
"OutgoingMailServerPortNumber": config.Submission.Port,
|
||||
"OutgoingMailServerUsername": addresses[0],
|
||||
"OutgoingMailServerUseSSL": config.Submission.TLSMode == mox.TLSModeImmediate,
|
||||
"OutgoingMailServerUseSSL": config.Submission.TLSMode == admin.TLSModeImmediate,
|
||||
"OutgoingPasswordSameAsIncomingPassword": true,
|
||||
"PayloadIdentifier": reverseAddr + ".email.account",
|
||||
"PayloadType": "com.apple.mail.managed",
|
||||
|
783
http/web.go
783
http/web.go
@ -11,17 +11,20 @@ import (
|
||||
"io"
|
||||
golog "log"
|
||||
"log/slog"
|
||||
"maps"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
_ "embed"
|
||||
_ "net/http/pprof"
|
||||
|
||||
"golang.org/x/exp/maps"
|
||||
"golang.org/x/net/http2"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
@ -30,9 +33,11 @@ import (
|
||||
"github.com/mjl-/mox/autotls"
|
||||
"github.com/mjl-/mox/config"
|
||||
"github.com/mjl-/mox/dns"
|
||||
"github.com/mjl-/mox/imapserver"
|
||||
"github.com/mjl-/mox/mlog"
|
||||
"github.com/mjl-/mox/mox-"
|
||||
"github.com/mjl-/mox/ratelimit"
|
||||
"github.com/mjl-/mox/smtpserver"
|
||||
"github.com/mjl-/mox/webaccount"
|
||||
"github.com/mjl-/mox/webadmin"
|
||||
"github.com/mjl-/mox/webapisrv"
|
||||
@ -74,6 +79,29 @@ var (
|
||||
)
|
||||
)
|
||||
|
||||
// We serve a favicon when webaccount/webmail/webadmin/webapi for account-related
|
||||
// domains. They are configured as "service handler", which have a lower priority
|
||||
// than web handler. Admins can configure a custom /favicon.ico route to override
|
||||
// the builtin favicon. In the future, we may want to make it easier to customize
|
||||
// the favicon, possibly per client settings domain.
|
||||
//
|
||||
//go:embed favicon.ico
|
||||
var faviconIco string
|
||||
var faviconModTime = time.Now()
|
||||
|
||||
func init() {
|
||||
p, err := os.Executable()
|
||||
if err == nil {
|
||||
if st, err := os.Stat(p); err == nil {
|
||||
faviconModTime = st.ModTime()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func faviconHandle(w http.ResponseWriter, r *http.Request) {
|
||||
http.ServeContent(w, r, "favicon.ico", faviconModTime, strings.NewReader(faviconIco))
|
||||
}
|
||||
|
||||
type responseWriterFlusher interface {
|
||||
http.ResponseWriter
|
||||
http.Flusher
|
||||
@ -324,7 +352,7 @@ func (w *loggingWriter) Done() {
|
||||
slog.Any("remoteaddr", w.R.RemoteAddr),
|
||||
slog.String("tlsinfo", tlsinfo),
|
||||
slog.String("useragent", w.R.Header.Get("User-Agent")),
|
||||
slog.String("referrr", w.R.Header.Get("Referrer")),
|
||||
slog.String("referer", w.R.Header.Get("Referer")),
|
||||
}
|
||||
if w.WebsocketRequest {
|
||||
attrs = append(attrs,
|
||||
@ -351,37 +379,45 @@ func (w *loggingWriter) Done() {
|
||||
pkglog.WithContext(w.R.Context()).Debugx("http request", err, attrs...)
|
||||
}
|
||||
|
||||
// Set some http headers that should prevent potential abuse. Better safe than sorry.
|
||||
func safeHeaders(fn http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
h := w.Header()
|
||||
h.Set("X-Frame-Options", "deny")
|
||||
h.Set("X-Content-Type-Options", "nosniff")
|
||||
h.Set("Content-Security-Policy", "default-src 'self' 'unsafe-inline' data:")
|
||||
h.Set("Referrer-Policy", "same-origin")
|
||||
fn.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
// Built-in handlers, e.g. mta-sts and autoconfig.
|
||||
type pathHandler struct {
|
||||
Name string // For logging/metrics.
|
||||
HostMatch func(dom dns.Domain) bool // If not nil, called to see if domain of requests matches. Only called if requested host is a valid domain.
|
||||
Path string // Path to register, like on http.ServeMux.
|
||||
Name string // For logging/metrics.
|
||||
HostMatch func(host dns.IPDomain) bool // If not nil, called to see if domain of requests matches. Host can be zero value for invalid domain/ip.
|
||||
Path string // Path to register, like on http.ServeMux.
|
||||
Handler http.Handler
|
||||
}
|
||||
|
||||
type serve struct {
|
||||
Kinds []string // Type of handler and protocol (e.g. acme-tls-alpn-01, account-http, admin-https).
|
||||
TLSConfig *tls.Config
|
||||
PathHandlers []pathHandler // Sorted, longest first.
|
||||
Webserver bool // Whether serving WebHandler. PathHandlers are always evaluated before WebHandlers.
|
||||
Kinds []string // Type of handler and protocol (e.g. acme-tls-alpn-01, account-http, admin-https, imap-https, smtp-https).
|
||||
TLSConfig *tls.Config
|
||||
NextProto tlsNextProtoMap // For HTTP server, when we do submission/imap with ALPN over the HTTPS port.
|
||||
Favicon bool
|
||||
Forwarded bool // Requests are coming from a reverse proxy, we'll use X-Forwarded-For for the IP address to ratelimit.
|
||||
RateLimitDisabled bool // Don't apply ratelimiting.
|
||||
|
||||
// SystemHandlers are for MTA-STS, autoconfig, ACME validation. They can't be
|
||||
// overridden by WebHandlers. WebHandlers are evaluated next, and the internal
|
||||
// service handlers from Listeners in mox.conf (for admin, account, webmail, webapi
|
||||
// interfaces) last. WebHandlers can also pass requests to the internal servers.
|
||||
// This order allows admins to serve other content on domains serving the mox.conf
|
||||
// internal services.
|
||||
SystemHandlers []pathHandler // Sorted, longest first.
|
||||
Webserver bool
|
||||
ServiceHandlers []pathHandler // Sorted, longest first.
|
||||
}
|
||||
|
||||
// Handle registers a named handler for a path and optional host. If path ends with
|
||||
// a slash, it is used as prefix match, otherwise a full path match is required. If
|
||||
// hostOpt is set, only requests to those host are handled by this handler.
|
||||
func (s *serve) Handle(name string, hostMatch func(dns.Domain) bool, path string, fn http.Handler) {
|
||||
s.PathHandlers = append(s.PathHandlers, pathHandler{name, hostMatch, path, fn})
|
||||
// SystemHandle registers a named system handler for a path and optional host. If
|
||||
// path ends with a slash, it is used as prefix match, otherwise a full path match
|
||||
// is required. If hostOpt is set, only requests to those host are handled by this
|
||||
// handler.
|
||||
func (s *serve) SystemHandle(name string, hostMatch func(dns.IPDomain) bool, path string, fn http.Handler) {
|
||||
s.SystemHandlers = append(s.SystemHandlers, pathHandler{name, hostMatch, path, fn})
|
||||
}
|
||||
|
||||
// Like SystemHandle, but for internal services "admin", "account", "webmail",
|
||||
// "webapi" configured in the mox.conf Listener.
|
||||
func (s *serve) ServiceHandle(name string, hostMatch func(dns.IPDomain) bool, path string, fn http.Handler) {
|
||||
s.ServiceHandlers = append(s.ServiceHandlers, pathHandler{name, hostMatch, path, fn})
|
||||
}
|
||||
|
||||
var (
|
||||
@ -404,23 +440,41 @@ var (
|
||||
// metrics.
|
||||
func (s *serve) ServeHTTP(xw http.ResponseWriter, r *http.Request) {
|
||||
now := time.Now()
|
||||
// Rate limiting as early as possible.
|
||||
ipstr, _, err := net.SplitHostPort(r.RemoteAddr)
|
||||
if err != nil {
|
||||
pkglog.Debugx("split host:port client remoteaddr", err, slog.Any("remoteaddr", r.RemoteAddr))
|
||||
} else if ip := net.ParseIP(ipstr); ip == nil {
|
||||
pkglog.Debug("parsing ip for client remoteaddr", slog.Any("remoteaddr", r.RemoteAddr))
|
||||
} else if !limiterConnectionrate.Add(ip, now, 1) {
|
||||
method := metricHTTPMethod(r.Method)
|
||||
proto := "http"
|
||||
if r.TLS != nil {
|
||||
proto = "https"
|
||||
}
|
||||
metricRequest.WithLabelValues("(ratelimited)", proto, method, "429").Observe(0)
|
||||
// No logging, that's just noise.
|
||||
|
||||
http.Error(xw, "429 - too many auth attempts", http.StatusTooManyRequests)
|
||||
return
|
||||
// Rate limiting as early as possible, if enabled.
|
||||
if !s.RateLimitDisabled {
|
||||
// If requests are coming from a reverse proxy, use the IP from X-Forwarded-For.
|
||||
// Otherwise the remote IP for this connection.
|
||||
var ipstr string
|
||||
if s.Forwarded {
|
||||
s := r.Header.Get("X-Forwarded-For")
|
||||
ipstr = strings.TrimSpace(strings.Split(s, ",")[0])
|
||||
if ipstr == "" {
|
||||
pkglog.Debug("ratelimit: no ip address in X-Forwarded-For header")
|
||||
}
|
||||
} else {
|
||||
var err error
|
||||
ipstr, _, err = net.SplitHostPort(r.RemoteAddr)
|
||||
if err != nil {
|
||||
pkglog.Debugx("ratelimit: parsing remote address", err, slog.String("remoteaddr", r.RemoteAddr))
|
||||
}
|
||||
}
|
||||
ip := net.ParseIP(ipstr)
|
||||
if ip == nil && ipstr != "" {
|
||||
pkglog.Debug("ratelimit: invalid ip", slog.String("ip", ipstr))
|
||||
}
|
||||
if ip != nil && !limiterConnectionrate.Add(ip, now, 1) {
|
||||
method := metricHTTPMethod(r.Method)
|
||||
proto := "http"
|
||||
if r.TLS != nil {
|
||||
proto = "https"
|
||||
}
|
||||
metricRequest.WithLabelValues("(ratelimited)", proto, method, "429").Observe(0)
|
||||
// No logging, that's just noise.
|
||||
|
||||
http.Error(xw, "429 - too many auth attempts", http.StatusTooManyRequests)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
ctx := context.WithValue(r.Context(), mlog.CidKey, mox.Cid())
|
||||
@ -452,28 +506,44 @@ func (s *serve) ServeHTTP(xw http.ResponseWriter, r *http.Request) {
|
||||
r.URL.Path += "/"
|
||||
}
|
||||
|
||||
var dom dns.Domain
|
||||
host := r.Host
|
||||
nhost, _, err := net.SplitHostPort(host)
|
||||
if err == nil {
|
||||
host = nhost
|
||||
}
|
||||
// host could be an IP, some handles may match, not an error.
|
||||
dom, domErr := dns.ParseDomain(host)
|
||||
ipdom := dns.IPDomain{IP: net.ParseIP(host)}
|
||||
if ipdom.IP == nil {
|
||||
dom, domErr := dns.ParseDomain(host)
|
||||
if domErr == nil {
|
||||
ipdom = dns.IPDomain{Domain: dom}
|
||||
}
|
||||
}
|
||||
|
||||
for _, h := range s.PathHandlers {
|
||||
if h.HostMatch != nil && (domErr != nil || !h.HostMatch(dom)) {
|
||||
continue
|
||||
handle := func(h pathHandler) bool {
|
||||
if h.HostMatch != nil && !h.HostMatch(ipdom) {
|
||||
return false
|
||||
}
|
||||
if r.URL.Path == h.Path || strings.HasSuffix(h.Path, "/") && strings.HasPrefix(r.URL.Path, h.Path) {
|
||||
nw.Handler = h.Name
|
||||
nw.Compress = true
|
||||
h.Handler.ServeHTTP(nw, r)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
for _, h := range s.SystemHandlers {
|
||||
if handle(h) {
|
||||
return
|
||||
}
|
||||
}
|
||||
if s.Webserver && domErr == nil {
|
||||
if WebHandle(nw, r, dom) {
|
||||
if s.Webserver {
|
||||
if WebHandle(nw, r, ipdom) {
|
||||
return
|
||||
}
|
||||
}
|
||||
for _, h := range s.ServiceHandlers {
|
||||
if handle(h) {
|
||||
return
|
||||
}
|
||||
}
|
||||
@ -481,256 +551,348 @@ func (s *serve) ServeHTTP(xw http.ResponseWriter, r *http.Request) {
|
||||
http.NotFound(nw, r)
|
||||
}
|
||||
|
||||
func redirectToTrailingSlash(srv *serve, hostMatch func(dns.IPDomain) bool, name, path string) {
|
||||
// Helpfully redirect user to version with ending slash.
|
||||
if path != "/" && strings.HasSuffix(path, "/") {
|
||||
handler := mox.SafeHeaders(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
http.Redirect(w, r, path, http.StatusSeeOther)
|
||||
}))
|
||||
srv.ServiceHandle(name, hostMatch, strings.TrimRight(path, "/"), handler)
|
||||
}
|
||||
}
|
||||
|
||||
// Listen binds to sockets for HTTP listeners, including those required for ACME to
|
||||
// generate TLS certificates. It stores the listeners so Serve can start serving them.
|
||||
func Listen() {
|
||||
redirectToTrailingSlash := func(srv *serve, name, path string) {
|
||||
// Helpfully redirect user to version with ending slash.
|
||||
if path != "/" && strings.HasSuffix(path, "/") {
|
||||
handler := safeHeaders(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
http.Redirect(w, r, path, http.StatusSeeOther)
|
||||
}))
|
||||
srv.Handle(name, nil, path[:len(path)-1], handler)
|
||||
// Initialize listeners in deterministic order for the same potential error
|
||||
// messages.
|
||||
names := slices.Sorted(maps.Keys(mox.Conf.Static.Listeners))
|
||||
for _, name := range names {
|
||||
l := mox.Conf.Static.Listeners[name]
|
||||
portServe := portServes(name, l)
|
||||
|
||||
ports := slices.Sorted(maps.Keys(portServe))
|
||||
for _, port := range ports {
|
||||
srv := portServe[port]
|
||||
for _, ip := range l.IPs {
|
||||
listen1(ip, port, srv.TLSConfig, name, srv.Kinds, srv, srv.NextProto)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func portServes(name string, l config.Listener) map[int]*serve {
|
||||
portServe := map[int]*serve{}
|
||||
|
||||
// For system/services, we serve on host localhost too, for ssh tunnel scenario's.
|
||||
localhost := dns.Domain{ASCII: "localhost"}
|
||||
|
||||
ldom := l.HostnameDomain
|
||||
if l.Hostname == "" {
|
||||
ldom = mox.Conf.Static.HostnameDomain
|
||||
}
|
||||
listenerHostMatch := func(host dns.IPDomain) bool {
|
||||
if host.IsIP() {
|
||||
return true
|
||||
}
|
||||
return host.Domain == ldom || host.Domain == localhost
|
||||
}
|
||||
accountHostMatch := func(host dns.IPDomain) bool {
|
||||
if listenerHostMatch(host) {
|
||||
return true
|
||||
}
|
||||
return mox.Conf.IsClientSettingsDomain(host.Domain)
|
||||
}
|
||||
|
||||
var ensureServe func(https, forwarded, noRateLimiting bool, port int, kind string, favicon bool) *serve
|
||||
ensureServe = func(https, forwarded, rateLimitDisabled bool, port int, kind string, favicon bool) *serve {
|
||||
s := portServe[port]
|
||||
if s == nil {
|
||||
s = &serve{nil, nil, tlsNextProtoMap{}, false, false, false, nil, false, nil}
|
||||
portServe[port] = s
|
||||
}
|
||||
s.Kinds = append(s.Kinds, kind)
|
||||
if favicon && !s.Favicon {
|
||||
s.ServiceHandle("favicon", accountHostMatch, "/favicon.ico", mox.SafeHeaders(http.HandlerFunc(faviconHandle)))
|
||||
s.Favicon = true
|
||||
}
|
||||
s.Forwarded = s.Forwarded || forwarded
|
||||
s.RateLimitDisabled = s.RateLimitDisabled || rateLimitDisabled
|
||||
|
||||
// We clone TLS configs because we may modify it later on for this server, for
|
||||
// ALPN. And we need copies because multiple listeners on http.Server where the
|
||||
// config is used will try to modify it concurrently.
|
||||
if https && l.TLS.ACME != "" {
|
||||
s.TLSConfig = l.TLS.ACMEConfig.Clone()
|
||||
|
||||
tlsport := config.Port(mox.Conf.Static.ACME[l.TLS.ACME].Port, 443)
|
||||
if portServe[tlsport] == nil || !slices.Contains(portServe[tlsport].Kinds, "acme-tls-alpn-01") {
|
||||
ensureServe(true, false, false, tlsport, "acme-tls-alpn-01", false)
|
||||
}
|
||||
} else if https {
|
||||
s.TLSConfig = l.TLS.Config.Clone()
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// If TLS with ACME is enabled on this plain HTTP port, and it hasn't been enabled
|
||||
// yet, add http-01 validation mechanism handler to server.
|
||||
ensureACMEHTTP01 := func(srv *serve) {
|
||||
if l.TLS != nil && l.TLS.ACME != "" && !slices.Contains(srv.Kinds, "acme-http-01") {
|
||||
m := mox.Conf.Static.ACME[l.TLS.ACME].Manager
|
||||
srv.Kinds = append(srv.Kinds, "acme-http-01")
|
||||
srv.SystemHandle("acme-http-01", nil, "/.well-known/acme-challenge/", m.Manager.HTTPHandler(nil))
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize listeners in deterministic order for the same potential error
|
||||
// messages.
|
||||
names := maps.Keys(mox.Conf.Static.Listeners)
|
||||
sort.Strings(names)
|
||||
for _, name := range names {
|
||||
l := mox.Conf.Static.Listeners[name]
|
||||
|
||||
portServe := map[int]*serve{}
|
||||
|
||||
var ensureServe func(https bool, port int, kind string) *serve
|
||||
ensureServe = func(https bool, port int, kind string) *serve {
|
||||
s := portServe[port]
|
||||
if s == nil {
|
||||
s = &serve{nil, nil, nil, false}
|
||||
portServe[port] = s
|
||||
}
|
||||
s.Kinds = append(s.Kinds, kind)
|
||||
if https && l.TLS.ACME != "" {
|
||||
s.TLSConfig = l.TLS.ACMEConfig
|
||||
} else if https {
|
||||
s.TLSConfig = l.TLS.Config
|
||||
if l.TLS.ACME != "" {
|
||||
tlsport := config.Port(mox.Conf.Static.ACME[l.TLS.ACME].Port, 443)
|
||||
ensureServe(true, tlsport, "acme-tls-alpn-01")
|
||||
}
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
if l.TLS != nil && l.TLS.ACME != "" && (l.SMTP.Enabled && !l.SMTP.NoSTARTTLS || l.Submissions.Enabled || l.IMAPS.Enabled) {
|
||||
port := config.Port(mox.Conf.Static.ACME[l.TLS.ACME].Port, 443)
|
||||
ensureServe(true, port, "acme-tls-alpn-01")
|
||||
}
|
||||
|
||||
if l.AccountHTTP.Enabled {
|
||||
port := config.Port(l.AccountHTTP.Port, 80)
|
||||
path := "/"
|
||||
if l.AccountHTTP.Path != "" {
|
||||
path = l.AccountHTTP.Path
|
||||
}
|
||||
srv := ensureServe(false, port, "account-http at "+path)
|
||||
handler := safeHeaders(http.StripPrefix(path[:len(path)-1], http.HandlerFunc(webaccount.Handler(path, l.AccountHTTP.Forwarded))))
|
||||
srv.Handle("account", nil, path, handler)
|
||||
redirectToTrailingSlash(srv, "account", path)
|
||||
}
|
||||
if l.AccountHTTPS.Enabled {
|
||||
port := config.Port(l.AccountHTTPS.Port, 443)
|
||||
path := "/"
|
||||
if l.AccountHTTPS.Path != "" {
|
||||
path = l.AccountHTTPS.Path
|
||||
}
|
||||
srv := ensureServe(true, port, "account-https at "+path)
|
||||
handler := safeHeaders(http.StripPrefix(path[:len(path)-1], http.HandlerFunc(webaccount.Handler(path, l.AccountHTTPS.Forwarded))))
|
||||
srv.Handle("account", nil, path, handler)
|
||||
redirectToTrailingSlash(srv, "account", path)
|
||||
}
|
||||
|
||||
if l.AdminHTTP.Enabled {
|
||||
port := config.Port(l.AdminHTTP.Port, 80)
|
||||
path := "/admin/"
|
||||
if l.AdminHTTP.Path != "" {
|
||||
path = l.AdminHTTP.Path
|
||||
}
|
||||
srv := ensureServe(false, port, "admin-http at "+path)
|
||||
handler := safeHeaders(http.StripPrefix(path[:len(path)-1], http.HandlerFunc(webadmin.Handler(path, l.AdminHTTP.Forwarded))))
|
||||
srv.Handle("admin", nil, path, handler)
|
||||
redirectToTrailingSlash(srv, "admin", path)
|
||||
}
|
||||
if l.AdminHTTPS.Enabled {
|
||||
port := config.Port(l.AdminHTTPS.Port, 443)
|
||||
path := "/admin/"
|
||||
if l.AdminHTTPS.Path != "" {
|
||||
path = l.AdminHTTPS.Path
|
||||
}
|
||||
srv := ensureServe(true, port, "admin-https at "+path)
|
||||
handler := safeHeaders(http.StripPrefix(path[:len(path)-1], http.HandlerFunc(webadmin.Handler(path, l.AdminHTTPS.Forwarded))))
|
||||
srv.Handle("admin", nil, path, handler)
|
||||
redirectToTrailingSlash(srv, "admin", path)
|
||||
if l.TLS != nil && l.TLS.ACME != "" && (l.SMTP.Enabled && !l.SMTP.NoSTARTTLS || l.Submissions.Enabled || l.IMAPS.Enabled) {
|
||||
port := config.Port(mox.Conf.Static.ACME[l.TLS.ACME].Port, 443)
|
||||
ensureServe(true, false, false, port, "acme-tls-alpn-01", false)
|
||||
}
|
||||
if l.Submissions.Enabled && l.Submissions.EnabledOnHTTPS {
|
||||
s := ensureServe(true, false, false, 443, "smtp-https", false)
|
||||
hostname := mox.Conf.Static.HostnameDomain
|
||||
if l.Hostname != "" {
|
||||
hostname = l.HostnameDomain
|
||||
}
|
||||
|
||||
maxMsgSize := l.SMTPMaxMessageSize
|
||||
if maxMsgSize == 0 {
|
||||
maxMsgSize = config.DefaultMaxMsgSize
|
||||
}
|
||||
requireTLS := !l.SMTP.NoRequireTLS
|
||||
|
||||
if l.WebAPIHTTP.Enabled {
|
||||
port := config.Port(l.WebAPIHTTP.Port, 80)
|
||||
path := "/webapi/"
|
||||
if l.WebAPIHTTP.Path != "" {
|
||||
path = l.WebAPIHTTP.Path
|
||||
}
|
||||
srv := ensureServe(false, port, "webapi-http at "+path)
|
||||
handler := safeHeaders(http.StripPrefix(path[:len(path)-1], webapisrv.NewServer(maxMsgSize, path, l.WebAPIHTTP.Forwarded)))
|
||||
srv.Handle("webapi", nil, path, handler)
|
||||
redirectToTrailingSlash(srv, "webapi", path)
|
||||
s.NextProto["smtp"] = func(_ *http.Server, conn *tls.Conn, _ http.Handler) {
|
||||
smtpserver.ServeTLSConn(name, hostname, conn, s.TLSConfig, true, true, maxMsgSize, requireTLS)
|
||||
}
|
||||
if l.WebAPIHTTPS.Enabled {
|
||||
port := config.Port(l.WebAPIHTTPS.Port, 443)
|
||||
path := "/webapi/"
|
||||
if l.WebAPIHTTPS.Path != "" {
|
||||
path = l.WebAPIHTTPS.Path
|
||||
}
|
||||
if l.IMAPS.Enabled && l.IMAPS.EnabledOnHTTPS {
|
||||
s := ensureServe(true, false, false, 443, "imap-https", false)
|
||||
s.NextProto["imap"] = func(_ *http.Server, conn *tls.Conn, _ http.Handler) {
|
||||
imapserver.ServeTLSConn(name, conn, s.TLSConfig)
|
||||
}
|
||||
}
|
||||
if l.AccountHTTP.Enabled {
|
||||
port := config.Port(l.AccountHTTP.Port, 80)
|
||||
path := "/"
|
||||
if l.AccountHTTP.Path != "" {
|
||||
path = l.AccountHTTP.Path
|
||||
}
|
||||
srv := ensureServe(false, l.AccountHTTP.Forwarded, false, port, "account-http at "+path, true)
|
||||
handler := mox.SafeHeaders(http.StripPrefix(strings.TrimRight(path, "/"), http.HandlerFunc(webaccount.Handler(path, l.AccountHTTP.Forwarded))))
|
||||
srv.ServiceHandle("account", accountHostMatch, path, handler)
|
||||
redirectToTrailingSlash(srv, accountHostMatch, "account", path)
|
||||
ensureACMEHTTP01(srv)
|
||||
}
|
||||
if l.AccountHTTPS.Enabled {
|
||||
port := config.Port(l.AccountHTTPS.Port, 443)
|
||||
path := "/"
|
||||
if l.AccountHTTPS.Path != "" {
|
||||
path = l.AccountHTTPS.Path
|
||||
}
|
||||
srv := ensureServe(true, l.AccountHTTPS.Forwarded, false, port, "account-https at "+path, true)
|
||||
handler := mox.SafeHeaders(http.StripPrefix(strings.TrimRight(path, "/"), http.HandlerFunc(webaccount.Handler(path, l.AccountHTTPS.Forwarded))))
|
||||
srv.ServiceHandle("account", accountHostMatch, path, handler)
|
||||
redirectToTrailingSlash(srv, accountHostMatch, "account", path)
|
||||
}
|
||||
|
||||
if l.AdminHTTP.Enabled {
|
||||
port := config.Port(l.AdminHTTP.Port, 80)
|
||||
path := "/admin/"
|
||||
if l.AdminHTTP.Path != "" {
|
||||
path = l.AdminHTTP.Path
|
||||
}
|
||||
srv := ensureServe(false, l.AdminHTTP.Forwarded, false, port, "admin-http at "+path, true)
|
||||
handler := mox.SafeHeaders(http.StripPrefix(strings.TrimRight(path, "/"), http.HandlerFunc(webadmin.Handler(path, l.AdminHTTP.Forwarded))))
|
||||
srv.ServiceHandle("admin", listenerHostMatch, path, handler)
|
||||
redirectToTrailingSlash(srv, listenerHostMatch, "admin", path)
|
||||
ensureACMEHTTP01(srv)
|
||||
}
|
||||
if l.AdminHTTPS.Enabled {
|
||||
port := config.Port(l.AdminHTTPS.Port, 443)
|
||||
path := "/admin/"
|
||||
if l.AdminHTTPS.Path != "" {
|
||||
path = l.AdminHTTPS.Path
|
||||
}
|
||||
srv := ensureServe(true, l.AdminHTTPS.Forwarded, false, port, "admin-https at "+path, true)
|
||||
handler := mox.SafeHeaders(http.StripPrefix(strings.TrimRight(path, "/"), http.HandlerFunc(webadmin.Handler(path, l.AdminHTTPS.Forwarded))))
|
||||
srv.ServiceHandle("admin", listenerHostMatch, path, handler)
|
||||
redirectToTrailingSlash(srv, listenerHostMatch, "admin", path)
|
||||
}
|
||||
|
||||
maxMsgSize := l.SMTPMaxMessageSize
|
||||
if maxMsgSize == 0 {
|
||||
maxMsgSize = config.DefaultMaxMsgSize
|
||||
}
|
||||
|
||||
if l.WebAPIHTTP.Enabled {
|
||||
port := config.Port(l.WebAPIHTTP.Port, 80)
|
||||
path := "/webapi/"
|
||||
if l.WebAPIHTTP.Path != "" {
|
||||
path = l.WebAPIHTTP.Path
|
||||
}
|
||||
srv := ensureServe(false, l.WebAPIHTTP.Forwarded, false, port, "webapi-http at "+path, true)
|
||||
handler := mox.SafeHeaders(http.StripPrefix(strings.TrimRight(path, "/"), webapisrv.NewServer(maxMsgSize, path, l.WebAPIHTTP.Forwarded)))
|
||||
srv.ServiceHandle("webapi", accountHostMatch, path, handler)
|
||||
redirectToTrailingSlash(srv, accountHostMatch, "webapi", path)
|
||||
ensureACMEHTTP01(srv)
|
||||
}
|
||||
if l.WebAPIHTTPS.Enabled {
|
||||
port := config.Port(l.WebAPIHTTPS.Port, 443)
|
||||
path := "/webapi/"
|
||||
if l.WebAPIHTTPS.Path != "" {
|
||||
path = l.WebAPIHTTPS.Path
|
||||
}
|
||||
srv := ensureServe(true, l.WebAPIHTTPS.Forwarded, false, port, "webapi-https at "+path, true)
|
||||
handler := mox.SafeHeaders(http.StripPrefix(strings.TrimRight(path, "/"), webapisrv.NewServer(maxMsgSize, path, l.WebAPIHTTPS.Forwarded)))
|
||||
srv.ServiceHandle("webapi", accountHostMatch, path, handler)
|
||||
redirectToTrailingSlash(srv, accountHostMatch, "webapi", path)
|
||||
}
|
||||
|
||||
if l.WebmailHTTP.Enabled {
|
||||
port := config.Port(l.WebmailHTTP.Port, 80)
|
||||
path := "/webmail/"
|
||||
if l.WebmailHTTP.Path != "" {
|
||||
path = l.WebmailHTTP.Path
|
||||
}
|
||||
srv := ensureServe(false, l.WebmailHTTP.Forwarded, false, port, "webmail-http at "+path, true)
|
||||
var accountPath string
|
||||
if l.AccountHTTP.Enabled {
|
||||
accountPath = "/"
|
||||
if l.AccountHTTP.Path != "" {
|
||||
accountPath = l.AccountHTTP.Path
|
||||
}
|
||||
srv := ensureServe(true, port, "webapi-https at "+path)
|
||||
handler := safeHeaders(http.StripPrefix(path[:len(path)-1], webapisrv.NewServer(maxMsgSize, path, l.WebAPIHTTPS.Forwarded)))
|
||||
srv.Handle("webapi", nil, path, handler)
|
||||
redirectToTrailingSlash(srv, "webapi", path)
|
||||
}
|
||||
handler := http.StripPrefix(strings.TrimRight(path, "/"), http.HandlerFunc(webmail.Handler(maxMsgSize, path, l.WebmailHTTP.Forwarded, accountPath)))
|
||||
srv.ServiceHandle("webmail", accountHostMatch, path, handler)
|
||||
redirectToTrailingSlash(srv, accountHostMatch, "webmail", path)
|
||||
ensureACMEHTTP01(srv)
|
||||
}
|
||||
if l.WebmailHTTPS.Enabled {
|
||||
port := config.Port(l.WebmailHTTPS.Port, 443)
|
||||
path := "/webmail/"
|
||||
if l.WebmailHTTPS.Path != "" {
|
||||
path = l.WebmailHTTPS.Path
|
||||
}
|
||||
srv := ensureServe(true, l.WebmailHTTPS.Forwarded, false, port, "webmail-https at "+path, true)
|
||||
var accountPath string
|
||||
if l.AccountHTTPS.Enabled {
|
||||
accountPath = "/"
|
||||
if l.AccountHTTPS.Path != "" {
|
||||
accountPath = l.AccountHTTPS.Path
|
||||
}
|
||||
}
|
||||
handler := http.StripPrefix(strings.TrimRight(path, "/"), http.HandlerFunc(webmail.Handler(maxMsgSize, path, l.WebmailHTTPS.Forwarded, accountPath)))
|
||||
srv.ServiceHandle("webmail", accountHostMatch, path, handler)
|
||||
redirectToTrailingSlash(srv, accountHostMatch, "webmail", path)
|
||||
}
|
||||
|
||||
if l.MetricsHTTP.Enabled {
|
||||
port := config.Port(l.MetricsHTTP.Port, 8010)
|
||||
srv := ensureServe(false, false, false, port, "metrics-http", false)
|
||||
srv.SystemHandle("metrics", nil, "/metrics", mox.SafeHeaders(promhttp.Handler()))
|
||||
srv.SystemHandle("metrics", nil, "/", mox.SafeHeaders(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path != "/" {
|
||||
http.NotFound(w, r)
|
||||
return
|
||||
} else if r.Method != "GET" {
|
||||
http.Error(w, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Type", "text/html")
|
||||
fmt.Fprint(w, `<html><body>see <a href="metrics">metrics</a></body></html>`)
|
||||
})))
|
||||
}
|
||||
if l.AutoconfigHTTPS.Enabled {
|
||||
port := config.Port(l.AutoconfigHTTPS.Port, 443)
|
||||
srv := ensureServe(!l.AutoconfigHTTPS.NonTLS, false, false, port, "autoconfig-https", false)
|
||||
if l.AutoconfigHTTPS.NonTLS {
|
||||
ensureACMEHTTP01(srv)
|
||||
}
|
||||
autoconfigMatch := func(ipdom dns.IPDomain) bool {
|
||||
dom := ipdom.Domain
|
||||
if dom.IsZero() {
|
||||
return false
|
||||
}
|
||||
// Thunderbird requests an autodiscovery URL at the email address domain name, so
|
||||
// autoconfig prefix is optional.
|
||||
if strings.HasPrefix(dom.ASCII, "autoconfig.") {
|
||||
dom.ASCII = strings.TrimPrefix(dom.ASCII, "autoconfig.")
|
||||
dom.Unicode = strings.TrimPrefix(dom.Unicode, "autoconfig.")
|
||||
}
|
||||
// Autodiscovery uses a SRV record. It shouldn't point to a CNAME. So we directly
|
||||
// use the mail server's host name.
|
||||
if dom == mox.Conf.Static.HostnameDomain || dom == mox.Conf.Static.Listeners["public"].HostnameDomain {
|
||||
return true
|
||||
}
|
||||
dc, ok := mox.Conf.Domain(dom)
|
||||
return ok && !dc.ReportsOnly
|
||||
}
|
||||
srv.SystemHandle("autoconfig", autoconfigMatch, "/mail/config-v1.1.xml", mox.SafeHeaders(http.HandlerFunc(autoconfHandle)))
|
||||
srv.SystemHandle("autodiscover", autoconfigMatch, "/autodiscover/autodiscover.xml", mox.SafeHeaders(http.HandlerFunc(autodiscoverHandle)))
|
||||
srv.SystemHandle("mobileconfig", autoconfigMatch, "/profile.mobileconfig", mox.SafeHeaders(http.HandlerFunc(mobileconfigHandle)))
|
||||
srv.SystemHandle("mobileconfigqrcodepng", autoconfigMatch, "/profile.mobileconfig.qrcode.png", mox.SafeHeaders(http.HandlerFunc(mobileconfigQRCodeHandle)))
|
||||
}
|
||||
if l.MTASTSHTTPS.Enabled {
|
||||
port := config.Port(l.MTASTSHTTPS.Port, 443)
|
||||
srv := ensureServe(!l.MTASTSHTTPS.NonTLS, false, false, port, "mtasts-https", false)
|
||||
if l.MTASTSHTTPS.NonTLS {
|
||||
ensureACMEHTTP01(srv)
|
||||
}
|
||||
mtastsMatch := func(ipdom dns.IPDomain) bool {
|
||||
// todo: may want to check this against the configured domains, could in theory be just a webserver.
|
||||
dom := ipdom.Domain
|
||||
if dom.IsZero() {
|
||||
return false
|
||||
}
|
||||
return strings.HasPrefix(dom.ASCII, "mta-sts.")
|
||||
}
|
||||
srv.SystemHandle("mtasts", mtastsMatch, "/.well-known/mta-sts.txt", mox.SafeHeaders(http.HandlerFunc(mtastsPolicyHandle)))
|
||||
}
|
||||
if l.PprofHTTP.Enabled {
|
||||
// Importing net/http/pprof registers handlers on the default serve mux.
|
||||
port := config.Port(l.PprofHTTP.Port, 8011)
|
||||
if _, ok := portServe[port]; ok {
|
||||
pkglog.Fatal("cannot serve pprof on same endpoint as other http services")
|
||||
}
|
||||
srv := &serve{[]string{"pprof-http"}, nil, nil, false, false, false, nil, false, nil}
|
||||
portServe[port] = srv
|
||||
srv.SystemHandle("pprof", nil, "/", http.DefaultServeMux)
|
||||
}
|
||||
if l.WebserverHTTP.Enabled {
|
||||
port := config.Port(l.WebserverHTTP.Port, 80)
|
||||
srv := ensureServe(false, false, l.WebserverHTTP.RateLimitDisabled, port, "webserver-http", false)
|
||||
srv.Webserver = true
|
||||
ensureACMEHTTP01(srv)
|
||||
}
|
||||
if l.WebserverHTTPS.Enabled {
|
||||
port := config.Port(l.WebserverHTTPS.Port, 443)
|
||||
srv := ensureServe(true, false, l.WebserverHTTPS.RateLimitDisabled, port, "webserver-https", false)
|
||||
srv.Webserver = true
|
||||
}
|
||||
|
||||
if l.TLS != nil && l.TLS.ACME != "" {
|
||||
m := mox.Conf.Static.ACME[l.TLS.ACME].Manager
|
||||
if ensureManagerHosts[m] == nil {
|
||||
ensureManagerHosts[m] = map[dns.Domain]struct{}{}
|
||||
}
|
||||
hosts := ensureManagerHosts[m]
|
||||
hosts[mox.Conf.Static.HostnameDomain] = struct{}{}
|
||||
|
||||
if l.HostnameDomain.ASCII != "" {
|
||||
hosts[l.HostnameDomain] = struct{}{}
|
||||
}
|
||||
|
||||
if l.WebmailHTTP.Enabled {
|
||||
port := config.Port(l.WebmailHTTP.Port, 80)
|
||||
path := "/webmail/"
|
||||
if l.WebmailHTTP.Path != "" {
|
||||
path = l.WebmailHTTP.Path
|
||||
}
|
||||
srv := ensureServe(false, port, "webmail-http at "+path)
|
||||
var accountPath string
|
||||
if l.AccountHTTP.Enabled {
|
||||
accountPath = "/"
|
||||
if l.AccountHTTP.Path != "" {
|
||||
accountPath = l.AccountHTTP.Path
|
||||
}
|
||||
}
|
||||
handler := http.StripPrefix(path[:len(path)-1], http.HandlerFunc(webmail.Handler(maxMsgSize, path, l.WebmailHTTP.Forwarded, accountPath)))
|
||||
srv.Handle("webmail", nil, path, handler)
|
||||
redirectToTrailingSlash(srv, "webmail", path)
|
||||
}
|
||||
if l.WebmailHTTPS.Enabled {
|
||||
port := config.Port(l.WebmailHTTPS.Port, 443)
|
||||
path := "/webmail/"
|
||||
if l.WebmailHTTPS.Path != "" {
|
||||
path = l.WebmailHTTPS.Path
|
||||
}
|
||||
srv := ensureServe(true, port, "webmail-https at "+path)
|
||||
var accountPath string
|
||||
if l.AccountHTTPS.Enabled {
|
||||
accountPath = "/"
|
||||
if l.AccountHTTPS.Path != "" {
|
||||
accountPath = l.AccountHTTPS.Path
|
||||
}
|
||||
}
|
||||
handler := http.StripPrefix(path[:len(path)-1], http.HandlerFunc(webmail.Handler(maxMsgSize, path, l.WebmailHTTPS.Forwarded, accountPath)))
|
||||
srv.Handle("webmail", nil, path, handler)
|
||||
redirectToTrailingSlash(srv, "webmail", path)
|
||||
}
|
||||
// All domains are served on all listeners. Gather autoconfig hostnames to ensure
|
||||
// presence of TLS certificates. Fetching a certificate on-demand may be too slow
|
||||
// for the timeouts of clients doing autoconfig.
|
||||
|
||||
if l.MetricsHTTP.Enabled {
|
||||
port := config.Port(l.MetricsHTTP.Port, 8010)
|
||||
srv := ensureServe(false, port, "metrics-http")
|
||||
srv.Handle("metrics", nil, "/metrics", safeHeaders(promhttp.Handler()))
|
||||
srv.Handle("metrics", nil, "/", safeHeaders(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path != "/" {
|
||||
http.NotFound(w, r)
|
||||
return
|
||||
} else if r.Method != "GET" {
|
||||
http.Error(w, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Type", "text/html")
|
||||
fmt.Fprint(w, `<html><body>see <a href="metrics">metrics</a></body></html>`)
|
||||
})))
|
||||
}
|
||||
if l.AutoconfigHTTPS.Enabled {
|
||||
port := config.Port(l.AutoconfigHTTPS.Port, 443)
|
||||
srv := ensureServe(!l.AutoconfigHTTPS.NonTLS, port, "autoconfig-https")
|
||||
autoconfigMatch := func(dom dns.Domain) bool {
|
||||
// Thunderbird requests an autodiscovery URL at the email address domain name, so
|
||||
// autoconfig prefix is optional.
|
||||
if strings.HasPrefix(dom.ASCII, "autoconfig.") {
|
||||
dom.ASCII = strings.TrimPrefix(dom.ASCII, "autoconfig.")
|
||||
dom.Unicode = strings.TrimPrefix(dom.Unicode, "autoconfig.")
|
||||
}
|
||||
// Autodiscovery uses a SRV record. It shouldn't point to a CNAME. So we directly
|
||||
// use the mail server's host name.
|
||||
if dom == mox.Conf.Static.HostnameDomain || dom == mox.Conf.Static.Listeners["public"].HostnameDomain {
|
||||
return true
|
||||
}
|
||||
dc, ok := mox.Conf.Domain(dom)
|
||||
return ok && !dc.ReportsOnly
|
||||
}
|
||||
srv.Handle("autoconfig", autoconfigMatch, "/mail/config-v1.1.xml", safeHeaders(http.HandlerFunc(autoconfHandle)))
|
||||
srv.Handle("autodiscover", autoconfigMatch, "/autodiscover/autodiscover.xml", safeHeaders(http.HandlerFunc(autodiscoverHandle)))
|
||||
srv.Handle("mobileconfig", autoconfigMatch, "/profile.mobileconfig", safeHeaders(http.HandlerFunc(mobileconfigHandle)))
|
||||
srv.Handle("mobileconfigqrcodepng", autoconfigMatch, "/profile.mobileconfig.qrcode.png", safeHeaders(http.HandlerFunc(mobileconfigQRCodeHandle)))
|
||||
}
|
||||
if l.MTASTSHTTPS.Enabled {
|
||||
port := config.Port(l.MTASTSHTTPS.Port, 443)
|
||||
srv := ensureServe(!l.MTASTSHTTPS.NonTLS, port, "mtasts-https")
|
||||
mtastsMatch := func(dom dns.Domain) bool {
|
||||
// todo: may want to check this against the configured domains, could in theory be just a webserver.
|
||||
return strings.HasPrefix(dom.ASCII, "mta-sts.")
|
||||
}
|
||||
srv.Handle("mtasts", mtastsMatch, "/.well-known/mta-sts.txt", safeHeaders(http.HandlerFunc(mtastsPolicyHandle)))
|
||||
}
|
||||
if l.PprofHTTP.Enabled {
|
||||
// Importing net/http/pprof registers handlers on the default serve mux.
|
||||
port := config.Port(l.PprofHTTP.Port, 8011)
|
||||
if _, ok := portServe[port]; ok {
|
||||
pkglog.Fatal("cannot serve pprof on same endpoint as other http services")
|
||||
}
|
||||
srv := &serve{[]string{"pprof-http"}, nil, nil, false}
|
||||
portServe[port] = srv
|
||||
srv.Handle("pprof", nil, "/", http.DefaultServeMux)
|
||||
}
|
||||
if l.WebserverHTTP.Enabled {
|
||||
port := config.Port(l.WebserverHTTP.Port, 80)
|
||||
srv := ensureServe(false, port, "webserver-http")
|
||||
srv.Webserver = true
|
||||
}
|
||||
if l.WebserverHTTPS.Enabled {
|
||||
port := config.Port(l.WebserverHTTPS.Port, 443)
|
||||
srv := ensureServe(true, port, "webserver-https")
|
||||
srv.Webserver = true
|
||||
}
|
||||
|
||||
if l.TLS != nil && l.TLS.ACME != "" {
|
||||
m := mox.Conf.Static.ACME[l.TLS.ACME].Manager
|
||||
|
||||
// If we are listening on port 80 for plain http, also register acme http-01
|
||||
// validation handler.
|
||||
if srv, ok := portServe[80]; ok && srv.TLSConfig == nil {
|
||||
srv.Kinds = append(srv.Kinds, "acme-http-01")
|
||||
srv.Handle("acme-http-01", nil, "/.well-known/acme-challenge/", m.Manager.HTTPHandler(nil))
|
||||
}
|
||||
|
||||
hosts := map[dns.Domain]struct{}{
|
||||
mox.Conf.Static.HostnameDomain: {},
|
||||
}
|
||||
if l.HostnameDomain.ASCII != "" {
|
||||
hosts[l.HostnameDomain] = struct{}{}
|
||||
}
|
||||
// All domains are served on all listeners. Gather autoconfig hostnames to ensure
|
||||
// presence of TLS certificates for.
|
||||
if l.AutoconfigHTTPS.Enabled && !l.AutoconfigHTTPS.NonTLS {
|
||||
for _, name := range mox.Conf.Domains() {
|
||||
if dom, err := dns.ParseDomain(name); err != nil {
|
||||
pkglog.Errorx("parsing domain from config", err)
|
||||
} else if d, _ := mox.Conf.Domain(dom); d.ReportsOnly {
|
||||
// Do not gather autoconfig name if we aren't accepting email for this domain.
|
||||
} else if d, _ := mox.Conf.Domain(dom); d.ReportsOnly || d.Disabled {
|
||||
// Do not gather autoconfig name if we aren't accepting email for this domain or when it is disabled.
|
||||
continue
|
||||
}
|
||||
|
||||
@ -741,29 +903,32 @@ func Listen() {
|
||||
hosts[autoconfdom] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
ensureManagerHosts[m] = hosts
|
||||
}
|
||||
|
||||
ports := maps.Keys(portServe)
|
||||
sort.Ints(ports)
|
||||
for _, port := range ports {
|
||||
srv := portServe[port]
|
||||
sort.Slice(srv.PathHandlers, func(i, j int) bool {
|
||||
a := srv.PathHandlers[i].Path
|
||||
b := srv.PathHandlers[j].Path
|
||||
if len(a) == len(b) {
|
||||
// For consistent order.
|
||||
return a < b
|
||||
}
|
||||
// Longest paths first.
|
||||
return len(a) > len(b)
|
||||
})
|
||||
for _, ip := range l.IPs {
|
||||
listen1(ip, port, srv.TLSConfig, name, srv.Kinds, srv)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if s := portServe[443]; s != nil && s.TLSConfig != nil && len(s.NextProto) > 0 {
|
||||
s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, slices.Collect(maps.Keys(s.NextProto))...)
|
||||
}
|
||||
|
||||
for _, srv := range portServe {
|
||||
sortPathHandlers(srv.SystemHandlers)
|
||||
sortPathHandlers(srv.ServiceHandlers)
|
||||
}
|
||||
|
||||
return portServe
|
||||
}
|
||||
|
||||
func sortPathHandlers(l []pathHandler) {
|
||||
sort.Slice(l, func(i, j int) bool {
|
||||
a := l[i].Path
|
||||
b := l[j].Path
|
||||
if len(a) == len(b) {
|
||||
// For consistent order.
|
||||
return a < b
|
||||
}
|
||||
// Longest paths first.
|
||||
return len(a) > len(b)
|
||||
})
|
||||
}
|
||||
|
||||
// functions to be launched in goroutine that will serve on a listener.
|
||||
@ -776,8 +941,10 @@ var servers []func()
|
||||
// the certificate to be given during the first https connection.
|
||||
var ensureManagerHosts = map[*autotls.Manager]map[dns.Domain]struct{}{}
|
||||
|
||||
type tlsNextProtoMap = map[string]func(*http.Server, *tls.Conn, http.Handler)
|
||||
|
||||
// listen prepares a listener, and adds it to "servers", to be launched (if not running as root) through Serve.
|
||||
func listen1(ip string, port int, tlsConfig *tls.Config, name string, kinds []string, handler http.Handler) {
|
||||
func listen1(ip string, port int, tlsConfig *tls.Config, name string, kinds []string, handler http.Handler, nextProto tlsNextProtoMap) {
|
||||
addr := net.JoinHostPort(ip, fmt.Sprintf("%d", port))
|
||||
|
||||
var protocol string
|
||||
@ -811,12 +978,20 @@ func listen1(ip string, port int, tlsConfig *tls.Config, name string, kinds []st
|
||||
}
|
||||
|
||||
server := &http.Server{
|
||||
Handler: handler,
|
||||
// Clone because our multiple Server.Serve calls modify config concurrently leading to data race.
|
||||
TLSConfig: tlsConfig.Clone(),
|
||||
Handler: handler,
|
||||
TLSConfig: tlsConfig,
|
||||
ReadHeaderTimeout: 30 * time.Second,
|
||||
IdleTimeout: 65 * time.Second, // Chrome closes connections after 60 seconds, firefox after 115 seconds.
|
||||
ErrorLog: golog.New(mlog.LogWriter(pkglog.With(slog.String("pkg", "net/http")), slog.LevelInfo, protocol+" error"), "", 0),
|
||||
TLSNextProto: nextProto,
|
||||
}
|
||||
// By default, the Go 1.6 and above http.Server includes support for HTTP2.
|
||||
// However, HTTP2 is negotiated via ALPN. Because we are configuring
|
||||
// TLSNextProto above, we have to explicitly enable HTTP2 by importing http2
|
||||
// and calling ConfigureServer.
|
||||
err = http2.ConfigureServer(server, nil)
|
||||
if err != nil {
|
||||
pkglog.Fatalx("https: unable to configure http2", err)
|
||||
}
|
||||
serve := func() {
|
||||
err := server.Serve(ln)
|
||||
|
@ -6,10 +6,8 @@ import (
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/mjl-/mox/dns"
|
||||
"github.com/mjl-/mox/mox-"
|
||||
)
|
||||
|
||||
@ -19,20 +17,8 @@ func TestServeHTTP(t *testing.T) {
|
||||
mox.ConfigDynamicPath = filepath.Join(filepath.Dir(mox.ConfigStaticPath), "domains.conf")
|
||||
mox.MustLoadConfig(true, false)
|
||||
|
||||
srv := &serve{
|
||||
PathHandlers: []pathHandler{
|
||||
{
|
||||
HostMatch: func(dom dns.Domain) bool {
|
||||
return strings.HasPrefix(dom.ASCII, "mta-sts.")
|
||||
},
|
||||
Path: "/.well-known/mta-sts.txt",
|
||||
Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Write([]byte("mta-sts!"))
|
||||
}),
|
||||
},
|
||||
},
|
||||
Webserver: true,
|
||||
}
|
||||
portSrvs := portServes("local", mox.Conf.Static.Listeners["local"])
|
||||
srv := portSrvs[80]
|
||||
|
||||
test := func(method, target string, expCode int, expContent string, expHeaders map[string]string) {
|
||||
t.Helper()
|
||||
@ -43,22 +29,22 @@ func TestServeHTTP(t *testing.T) {
|
||||
srv.ServeHTTP(rw, req)
|
||||
resp := rw.Result()
|
||||
if resp.StatusCode != expCode {
|
||||
t.Fatalf("got statuscode %d, expected %d", resp.StatusCode, expCode)
|
||||
t.Errorf("got statuscode %d, expected %d", resp.StatusCode, expCode)
|
||||
}
|
||||
if expContent != "" {
|
||||
s := rw.Body.String()
|
||||
if s != expContent {
|
||||
t.Fatalf("got response data %q, expected %q", s, expContent)
|
||||
t.Errorf("got response data %q, expected %q", s, expContent)
|
||||
}
|
||||
}
|
||||
for k, v := range expHeaders {
|
||||
if xv := resp.Header.Get(k); xv != v {
|
||||
t.Fatalf("got %q for header %q, expected %q", xv, k, v)
|
||||
t.Errorf("got %q for header %q, expected %q", xv, k, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
test("GET", "http://mta-sts.mox.example/.well-known/mta-sts.txt", http.StatusOK, "mta-sts!", nil)
|
||||
test("GET", "http://mta-sts.mox.example/.well-known/mta-sts.txt", http.StatusOK, "version: STSv1\nmode: enforce\nmax_age: 86400\nmx: mox.example\n", nil)
|
||||
test("GET", "http://mox.example/.well-known/mta-sts.txt", http.StatusNotFound, "", nil) // mta-sts endpoint not in this domain.
|
||||
test("GET", "http://mta-sts.mox.example/static/", http.StatusNotFound, "", nil) // static not served on this domain.
|
||||
test("GET", "http://mta-sts.mox.example/other", http.StatusNotFound, "", nil)
|
||||
@ -66,4 +52,24 @@ func TestServeHTTP(t *testing.T) {
|
||||
test("GET", "http://mox.example/static/index.html", http.StatusOK, "html\n", map[string]string{"X-Test": "mox"})
|
||||
test("GET", "http://mox.example/static/dir/", http.StatusOK, "", map[string]string{"X-Test": "mox"}) // Dir listing.
|
||||
test("GET", "http://mox.example/other", http.StatusNotFound, "", nil)
|
||||
|
||||
// Webmail on IP, localhost, mail host, clientsettingsdomain, not others.
|
||||
test("GET", "http://127.0.0.1/webmail/", http.StatusOK, "", nil)
|
||||
test("GET", "http://localhost/webmail/", http.StatusOK, "", nil)
|
||||
test("GET", "http://mox.example/webmail/", http.StatusOK, "", nil)
|
||||
test("GET", "http://mail.mox.example/webmail/", http.StatusOK, "", nil)
|
||||
test("GET", "http://mail.other.example/webmail/", http.StatusNotFound, "", nil)
|
||||
test("GET", "http://remotehost/webmail/", http.StatusNotFound, "", nil)
|
||||
|
||||
// admin on IP, localhost, mail host, not clientsettingsdomain.
|
||||
test("GET", "http://127.0.0.1/admin/", http.StatusOK, "", nil)
|
||||
test("GET", "http://localhost/admin/", http.StatusOK, "", nil)
|
||||
test("GET", "http://mox.example/admin/", http.StatusPermanentRedirect, "", nil) // Override by WebHandler.
|
||||
test("GET", "http://mail.mox.example/admin/", http.StatusNotFound, "", nil)
|
||||
|
||||
// account is off.
|
||||
test("GET", "http://127.0.0.1/", http.StatusNotFound, "", nil)
|
||||
test("GET", "http://localhost/", http.StatusNotFound, "", nil)
|
||||
test("GET", "http://mox.example/", http.StatusNotFound, "", nil)
|
||||
test("GET", "http://mail.mox.example/", http.StatusNotFound, "", nil)
|
||||
}
|
||||
|
@ -30,7 +30,6 @@ import (
|
||||
"github.com/mjl-/mox/dns"
|
||||
"github.com/mjl-/mox/mlog"
|
||||
"github.com/mjl-/mox/mox-"
|
||||
"github.com/mjl-/mox/moxio"
|
||||
)
|
||||
|
||||
func recvid(r *http.Request) string {
|
||||
@ -46,13 +45,13 @@ func recvid(r *http.Request) string {
|
||||
// WebHandle runs after the built-in handlers for mta-sts, autoconfig, etc.
|
||||
// If no handler matched, false is returned.
|
||||
// WebHandle sets w.Name to that of the matching handler.
|
||||
func WebHandle(w *loggingWriter, r *http.Request, host dns.Domain) (handled bool) {
|
||||
func WebHandle(w *loggingWriter, r *http.Request, host dns.IPDomain) (handled bool) {
|
||||
conf := mox.Conf.DynamicConfig()
|
||||
redirects := conf.WebDNSDomainRedirects
|
||||
handlers := conf.WebHandlers
|
||||
|
||||
for from, to := range redirects {
|
||||
if host != from {
|
||||
if host.Domain != from {
|
||||
continue
|
||||
}
|
||||
u := r.URL
|
||||
@ -64,7 +63,7 @@ func WebHandle(w *loggingWriter, r *http.Request, host dns.Domain) (handled bool
|
||||
}
|
||||
|
||||
for _, h := range handlers {
|
||||
if host != h.DNSDomain {
|
||||
if host.Domain != h.DNSDomain {
|
||||
continue
|
||||
}
|
||||
loc := h.Path.FindStringIndex(r.URL.Path)
|
||||
@ -99,6 +98,10 @@ func WebHandle(w *loggingWriter, r *http.Request, host dns.Domain) (handled bool
|
||||
w.Handler = h.Name
|
||||
return true
|
||||
}
|
||||
if h.WebInternal != nil && HandleInternal(h.WebInternal, w, r) {
|
||||
w.Handler = h.Name
|
||||
return true
|
||||
}
|
||||
}
|
||||
w.Compress = false
|
||||
return false
|
||||
@ -211,31 +214,41 @@ func HandleStatic(h *config.WebStatic, compress bool, w http.ResponseWriter, r *
|
||||
}
|
||||
http.NotFound(w, r)
|
||||
return true
|
||||
} else if errors.Is(err, syscall.ENAMETOOLONG) {
|
||||
http.NotFound(w, r)
|
||||
return true
|
||||
} else if os.IsPermission(err) {
|
||||
// If we tried opening a directory, we may not have permission to read it, but
|
||||
// still access files inside it (execute bit), such as index.html. So try to serve it.
|
||||
index, err := os.Open(filepath.Join(fspath, "index.html"))
|
||||
if err == nil {
|
||||
defer index.Close()
|
||||
var ifi os.FileInfo
|
||||
ifi, err = index.Stat()
|
||||
if err != nil {
|
||||
log().Errorx("stat index.html in directory we cannot list", err, slog.Any("url", r.URL), slog.String("fspath", fspath))
|
||||
http.Error(w, "500 - internal server error"+recvid(r), http.StatusInternalServerError)
|
||||
return true
|
||||
}
|
||||
w.Header().Set("Content-Type", "text/html; charset=utf-8")
|
||||
serveFile("index.html", ifi, index)
|
||||
if err != nil {
|
||||
http.Error(w, "403 - permission denied", http.StatusForbidden)
|
||||
return true
|
||||
}
|
||||
http.Error(w, "403 - permission denied", http.StatusForbidden)
|
||||
defer func() {
|
||||
err := index.Close()
|
||||
log().Check(err, "closing index file for serving")
|
||||
}()
|
||||
var ifi os.FileInfo
|
||||
ifi, err = index.Stat()
|
||||
if err != nil {
|
||||
log().Errorx("stat index.html in directory we cannot list", err, slog.Any("url", r.URL), slog.String("fspath", fspath))
|
||||
http.Error(w, "500 - internal server error"+recvid(r), http.StatusInternalServerError)
|
||||
return true
|
||||
}
|
||||
w.Header().Set("Content-Type", "text/html; charset=utf-8")
|
||||
serveFile("index.html", ifi, index)
|
||||
return true
|
||||
}
|
||||
log().Errorx("open file for static file serving", err, slog.Any("url", r.URL), slog.String("fspath", fspath))
|
||||
http.Error(w, "500 - internal server error"+recvid(r), http.StatusInternalServerError)
|
||||
return true
|
||||
}
|
||||
defer f.Close()
|
||||
defer func() {
|
||||
if err := f.Close(); err != nil {
|
||||
log().Check(err, "closing file for static file serving")
|
||||
}
|
||||
}()
|
||||
|
||||
fi, err := f.Stat()
|
||||
if err != nil {
|
||||
@ -267,7 +280,12 @@ func HandleStatic(h *config.WebStatic, compress bool, w http.ResponseWriter, r *
|
||||
http.Error(w, "403 - permission denied", http.StatusForbidden)
|
||||
return true
|
||||
} else if err == nil {
|
||||
defer index.Close()
|
||||
defer func() {
|
||||
if err := index.Close(); err != nil {
|
||||
log().Check(err, "closing index file for serving")
|
||||
}
|
||||
}()
|
||||
|
||||
var ifi os.FileInfo
|
||||
ifi, err = index.Stat()
|
||||
if err == nil {
|
||||
@ -334,8 +352,8 @@ func HandleStatic(h *config.WebStatic, compress bool, w http.ResponseWriter, r *
|
||||
}
|
||||
}
|
||||
err = lsTemplate.Execute(w, map[string]any{"Files": files})
|
||||
if err != nil && !moxio.IsClosed(err) {
|
||||
log().Errorx("executing directory listing template", err)
|
||||
if err != nil {
|
||||
log().Check(err, "executing directory listing template")
|
||||
}
|
||||
return true
|
||||
}
|
||||
@ -396,6 +414,12 @@ func HandleRedirect(h *config.WebRedirect, w http.ResponseWriter, r *http.Reques
|
||||
return true
|
||||
}
|
||||
|
||||
// HandleInternal passes the request to an internal service.
|
||||
func HandleInternal(h *config.WebInternal, w http.ResponseWriter, r *http.Request) (handled bool) {
|
||||
h.Handler.ServeHTTP(w, r)
|
||||
return true
|
||||
}
|
||||
|
||||
// HandleForward handles a request by forwarding it to another webserver and
|
||||
// passing the response on. I.e. a reverse proxy. It handles websocket
|
||||
// connections by monitoring the websocket handshake and then just passing along the
|
||||
@ -582,7 +606,9 @@ func forwardWebsocket(h *config.WebForward, w http.ResponseWriter, r *http.Reque
|
||||
}
|
||||
defer func() {
|
||||
if beconn != nil {
|
||||
beconn.Close()
|
||||
if err := beconn.Close(); err != nil {
|
||||
log().Check(err, "closing backend websocket connection")
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
@ -598,7 +624,9 @@ func forwardWebsocket(h *config.WebForward, w http.ResponseWriter, r *http.Reque
|
||||
}
|
||||
defer func() {
|
||||
if cconn != nil {
|
||||
cconn.Close()
|
||||
if err := cconn.Close(); err != nil {
|
||||
log().Check(err, "closing client websocket connection")
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
@ -651,8 +679,12 @@ func forwardWebsocket(h *config.WebForward, w http.ResponseWriter, r *http.Reque
|
||||
// connection whose closing was already announced with a websocket frame.
|
||||
lw.error(<-errc)
|
||||
// Close connections so other goroutine stops as well.
|
||||
cconn.Close()
|
||||
beconn.Close()
|
||||
if err := cconn.Close(); err != nil {
|
||||
log().Check(err, "closing client websocket connection")
|
||||
}
|
||||
if err := beconn.Close(); err != nil {
|
||||
log().Check(err, "closing backend websocket connection")
|
||||
}
|
||||
// Wait for goroutine so it has updated the logWriter.Size*Client fields before we
|
||||
// continue with logging.
|
||||
<-errc
|
||||
@ -705,7 +737,9 @@ func websocketTransact(ctx context.Context, targetURL *url.URL, r *http.Request)
|
||||
}
|
||||
defer func() {
|
||||
if rerr != nil {
|
||||
conn.Close()
|
||||
if xerr := conn.Close(); xerr != nil {
|
||||
log().Check(xerr, "cleaning up websocket connection")
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
@ -732,7 +766,9 @@ func websocketTransact(ctx context.Context, targetURL *url.URL, r *http.Request)
|
||||
}
|
||||
defer func() {
|
||||
if rerr != nil {
|
||||
resp.Body.Close()
|
||||
if xerr := resp.Body.Close(); xerr != nil {
|
||||
log().Check(xerr, "closing response body after error")
|
||||
}
|
||||
}
|
||||
}()
|
||||
if err := conn.SetDeadline(time.Time{}); err != nil {
|
||||
|
@ -134,6 +134,10 @@ func TestWebserver(t *testing.T) {
|
||||
|
||||
test("GET", "http://mox.example/bogus", nil, http.StatusNotFound, "", nil) // path not registered.
|
||||
test("GET", "http://bogus.mox.example/static/", nil, http.StatusNotFound, "", nil) // domain not registered.
|
||||
test("GET", "http://mox.example/xadmin/", nil, http.StatusOK, "", nil) // internal admin service
|
||||
test("GET", "http://mox.example/xaccount/", nil, http.StatusOK, "", nil) // internal account service
|
||||
test("GET", "http://mox.example/xwebmail/", nil, http.StatusOK, "", nil) // internal webmail service
|
||||
test("GET", "http://mox.example/xwebapi/v0/", nil, http.StatusOK, "", nil) // internal webapi service
|
||||
|
||||
npaths := len(staticgzcache.paths)
|
||||
if npaths != 1 {
|
||||
@ -335,5 +339,4 @@ func TestWebsocket(t *testing.T) {
|
||||
w.WriteHeader(http.StatusSwitchingProtocols)
|
||||
})
|
||||
test("GET", wsreqhdrs, http.StatusSwitchingProtocols, wsresphdrs)
|
||||
|
||||
}
|
||||
|
@ -1,40 +1,102 @@
|
||||
/*
|
||||
Package imapclient provides an IMAP4 client, primarily for testing the IMAP4 server.
|
||||
Package imapclient provides an IMAP4 client implementing IMAP4rev1 (RFC 3501),
|
||||
IMAP4rev2 (RFC 9051) and various extensions.
|
||||
|
||||
Commands can be sent to the server free-form, but responses are parsed strictly.
|
||||
Behaviour that may not be required by the IMAP4 specification may be expected by
|
||||
this client.
|
||||
Warning: Currently primarily for testing the mox IMAP4 server. Behaviour that
|
||||
may not be required by the IMAP4 specification may be expected by this client.
|
||||
|
||||
See [Conn] for a high-level client for executing IMAP commands. Use its embedded
|
||||
[Proto] for lower-level writing of commands and reading of responses.
|
||||
*/
|
||||
package imapclient
|
||||
|
||||
/*
|
||||
- Try to keep the parsing method names and the types similar to the ABNF names in the RFCs.
|
||||
|
||||
- todo: have mode for imap4rev1 vs imap4rev2, refusing what is not allowed. we are accepting too much now.
|
||||
- todo: stricter parsing. xnonspace() and xword() should be replaced by proper parsers.
|
||||
*/
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"net"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/mjl-/mox/mlog"
|
||||
"github.com/mjl-/mox/moxio"
|
||||
)
|
||||
|
||||
// Conn is an IMAP connection to a server.
|
||||
// Conn is an connection to an IMAP server.
|
||||
//
|
||||
// Method names on Conn are the names of IMAP commands. CloseMailbox, which
|
||||
// executes the IMAP CLOSE command, is an exception. The Close method closes the
|
||||
// connection.
|
||||
//
|
||||
// The methods starting with MSN are the original (old) IMAP commands. The variants
|
||||
// starting with UID should almost always be used instead, if available.
|
||||
//
|
||||
// The methods on Conn typically return errors of type Error or Response. Error
|
||||
// represents protocol and i/o level errors, including io.ErrDeadlineExceeded and
|
||||
// various errors for closed connections. Response is returned as error if the IMAP
|
||||
// result is NO or BAD instead of OK. The responses returned by the IMAP command
|
||||
// methods can also be non-zero on errors. Callers may wish to process any untagged
|
||||
// responses.
|
||||
//
|
||||
// The IMAP command methods defined on Conn don't interpret the untagged responses
|
||||
// except for untagged CAPABILITY and untagged ENABLED responses, and the
|
||||
// CAPABILITY response code. Fields CapAvailable and CapEnabled are updated when
|
||||
// those untagged responses are received.
|
||||
//
|
||||
// Capabilities indicate which optional IMAP functionality is supported by a
|
||||
// server. Capabilities are typically implicitly enabled when the client sends a
|
||||
// command using syntax of an optional extension. Extensions without new syntax
|
||||
// from client to server, but with new behaviour or syntax from server to client,
|
||||
// the client needs to explicitly enable the capability with the ENABLE command,
|
||||
// see the Enable method.
|
||||
type Conn struct {
|
||||
conn net.Conn
|
||||
r *bufio.Reader
|
||||
panic bool
|
||||
// If true, server sent a PREAUTH tag and the connection is already authenticated,
|
||||
// e.g. based on TLS certificate authentication.
|
||||
Preauth bool
|
||||
|
||||
// Capabilities available at server, from CAPABILITY command or response code.
|
||||
CapAvailable []Capability
|
||||
// Capabilities marked as enabled by the server, typically after an ENABLE command.
|
||||
CapEnabled []Capability
|
||||
|
||||
// Proto provides lower-level functions for interacting with the IMAP connection,
|
||||
// such as reading and writing individual lines/commands/responses.
|
||||
Proto
|
||||
}
|
||||
|
||||
// Proto provides low-level operations for writing requests and reading responses
|
||||
// on an IMAP connection.
|
||||
//
|
||||
// To implement the IDLE command, write "IDLE" using [Proto.WriteCommandf], then
|
||||
// read a line with [Proto.Readline]. If it starts with "+ ", the connection is in
|
||||
// idle mode and untagged responses can be read using [Proto.ReadUntagged]. If the
|
||||
// line doesn't start with "+ ", use [ParseResult] to interpret it as a response to
|
||||
// IDLE, which should be a NO or BAD. To abort idle mode, write "DONE" using
|
||||
// [Proto.Writelinef] and wait until a result line has been read.
|
||||
type Proto struct {
|
||||
// Connection, may be original TCP or TLS connection. Reads go through c.br, and
|
||||
// writes through c.xbw. The "x" for the writes indicate that failed writes cause
|
||||
// an i/o panic, which is either turned into a returned error, or passed on (see
|
||||
// boolean panic). The reader and writer wrap a tracing reading/writer and may wrap
|
||||
// flate compression.
|
||||
conn net.Conn
|
||||
connBroken bool // If connection is broken, we won't flush (and write) again.
|
||||
br *bufio.Reader
|
||||
tr *moxio.TraceReader
|
||||
xbw *bufio.Writer
|
||||
compress bool // If compression is enabled, we must flush flateWriter and its target original bufio writer.
|
||||
xflateWriter *moxio.FlateWriter
|
||||
xflateBW *bufio.Writer
|
||||
xtw *moxio.TraceWriter
|
||||
|
||||
log mlog.Log
|
||||
errHandle func(err error) // If set, called for all errors. Can panic. Used for imapserver tests.
|
||||
tagGen int
|
||||
record bool // If true, bytes read are added to recordBuf. recorded() resets.
|
||||
recordBuf []byte
|
||||
|
||||
LastTag string
|
||||
CapAvailable map[Capability]struct{} // Capabilities available at server, from CAPABILITY command or response code.
|
||||
CapEnabled map[Capability]struct{} // Capabilities enabled through ENABLE command.
|
||||
lastTag string
|
||||
}
|
||||
|
||||
// Error is a parse or other protocol error.
|
||||
@ -48,22 +110,52 @@ func (e Error) Unwrap() error {
|
||||
return e.err
|
||||
}
|
||||
|
||||
// New creates a new client on conn.
|
||||
// Opts has optional fields that influence behaviour of a Conn.
|
||||
type Opts struct {
|
||||
Logger *slog.Logger
|
||||
|
||||
// Error is called for IMAP-level and connection-level errors during the IMAP
|
||||
// command methods on Conn, not for errors in calls on Proto. Error is allowed to
|
||||
// call panic.
|
||||
Error func(err error)
|
||||
}
|
||||
|
||||
// New initializes a new IMAP client on conn.
|
||||
//
|
||||
// If xpanic is true, functions that would return an error instead panic. For parse
|
||||
// errors, the resulting stack traces show typically show what was being parsed.
|
||||
// Conn should normally be a TLS connection, typically connected to port 993 of an
|
||||
// IMAP server. Alternatively, conn can be a plain TCP connection to port 143. TLS
|
||||
// should be enabled on plain TCP connections with the [Conn.StartTLS] method.
|
||||
//
|
||||
// The initial untagged greeting response is read and must be "OK".
|
||||
func New(conn net.Conn, xpanic bool) (client *Conn, rerr error) {
|
||||
// The initial untagged greeting response is read and must be "OK" or
|
||||
// "PREAUTH". If preauth, the connection is already in authenticated state,
|
||||
// typically through TLS client certificate. This is indicated in Conn.Preauth.
|
||||
//
|
||||
// Logging is written to opts.Logger. In particular, IMAP protocol traces are
|
||||
// written with prefixes "CR: " and "CW: " (client read/write) as quoted strings at
|
||||
// levels Debug-4, with authentication messages at Debug-6 and (user) data at level
|
||||
// Debug-8.
|
||||
func New(conn net.Conn, opts *Opts) (client *Conn, rerr error) {
|
||||
c := Conn{
|
||||
conn: conn,
|
||||
r: bufio.NewReader(conn),
|
||||
panic: xpanic,
|
||||
CapAvailable: map[Capability]struct{}{},
|
||||
CapEnabled: map[Capability]struct{}{},
|
||||
Proto: Proto{conn: conn},
|
||||
}
|
||||
|
||||
defer c.recover(&rerr)
|
||||
var clog *slog.Logger
|
||||
if opts != nil {
|
||||
c.errHandle = opts.Error
|
||||
clog = opts.Logger
|
||||
} else {
|
||||
clog = slog.Default()
|
||||
}
|
||||
c.log = mlog.New("imapclient", clog)
|
||||
|
||||
c.tr = moxio.NewTraceReader(c.log, "CR: ", &c)
|
||||
c.br = bufio.NewReader(c.tr)
|
||||
|
||||
// Writes are buffered and write to Conn, which may panic.
|
||||
c.xtw = moxio.NewTraceWriter(c.log, "CW: ", &c)
|
||||
c.xbw = bufio.NewWriter(c.xtw)
|
||||
|
||||
defer c.recoverErr(&rerr)
|
||||
tag := c.xnonspace()
|
||||
if tag != "*" {
|
||||
c.xerrorf("expected untagged *, got %q", tag)
|
||||
@ -75,9 +167,15 @@ func New(conn net.Conn, xpanic bool) (client *Conn, rerr error) {
|
||||
if x.Status != OK {
|
||||
c.xerrorf("greeting, got status %q, expected OK", x.Status)
|
||||
}
|
||||
if x.Code != nil {
|
||||
if caps, ok := x.Code.(CodeCapability); ok {
|
||||
c.CapAvailable = caps
|
||||
}
|
||||
}
|
||||
return &c, nil
|
||||
case UntaggedPreauth:
|
||||
c.xerrorf("greeting: unexpected preauth")
|
||||
c.Preauth = true
|
||||
return &c, nil
|
||||
case UntaggedBye:
|
||||
c.xerrorf("greeting: server sent bye")
|
||||
default:
|
||||
@ -86,8 +184,16 @@ func New(conn net.Conn, xpanic bool) (client *Conn, rerr error) {
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
func (c *Conn) recover(rerr *error) {
|
||||
if c.panic {
|
||||
func (c *Conn) recoverErr(rerr *error) {
|
||||
c.recover(rerr, nil)
|
||||
}
|
||||
|
||||
func (c *Conn) recover(rerr *error, resp *Response) {
|
||||
if *rerr != nil {
|
||||
if r, ok := (*rerr).(Response); ok && resp != nil {
|
||||
*resp = r
|
||||
}
|
||||
c.errHandle(*rerr)
|
||||
return
|
||||
}
|
||||
|
||||
@ -95,30 +201,163 @@ func (c *Conn) recover(rerr *error) {
|
||||
if x == nil {
|
||||
return
|
||||
}
|
||||
err, ok := x.(Error)
|
||||
if !ok {
|
||||
var err error
|
||||
switch e := x.(type) {
|
||||
case Error:
|
||||
err = e
|
||||
case Response:
|
||||
err = e
|
||||
if resp != nil {
|
||||
*resp = e
|
||||
}
|
||||
default:
|
||||
panic(x)
|
||||
}
|
||||
if c.errHandle != nil {
|
||||
c.errHandle(err)
|
||||
}
|
||||
*rerr = err
|
||||
}
|
||||
|
||||
func (c *Conn) xerrorf(format string, args ...any) {
|
||||
panic(Error{fmt.Errorf(format, args...)})
|
||||
}
|
||||
func (p *Proto) recover(rerr *error) {
|
||||
if *rerr != nil {
|
||||
return
|
||||
}
|
||||
|
||||
func (c *Conn) xcheckf(err error, format string, args ...any) {
|
||||
if err != nil {
|
||||
c.xerrorf("%s: %w", fmt.Sprintf(format, args...), err)
|
||||
x := recover()
|
||||
if x == nil {
|
||||
return
|
||||
}
|
||||
switch e := x.(type) {
|
||||
case Error:
|
||||
*rerr = e
|
||||
default:
|
||||
panic(x)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Conn) xcheck(err error) {
|
||||
func (p *Proto) xerrorf(format string, args ...any) {
|
||||
panic(Error{fmt.Errorf(format, args...)})
|
||||
}
|
||||
|
||||
func (p *Proto) xcheckf(err error, format string, args ...any) {
|
||||
if err != nil {
|
||||
p.xerrorf("%s: %w", fmt.Sprintf(format, args...), err)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Proto) xcheck(err error) {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// TLSConnectionState returns the TLS connection state if the connection uses TLS.
|
||||
// xresponse sets resp if err is a Response and resp is not nil.
|
||||
func (p *Proto) xresponse(err error, resp *Response) {
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
if r, ok := err.(Response); ok && resp != nil {
|
||||
*resp = r
|
||||
}
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Write writes directly to underlying connection (TCP, TLS). For internal use
|
||||
// only, to implement io.Writer. Write errors do take the connection's panic mode
|
||||
// into account, i.e. Write can panic.
|
||||
func (p *Proto) Write(buf []byte) (n int, rerr error) {
|
||||
defer p.recover(&rerr)
|
||||
|
||||
n, rerr = p.conn.Write(buf)
|
||||
if rerr != nil {
|
||||
p.connBroken = true
|
||||
}
|
||||
p.xcheckf(rerr, "write")
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Read reads directly from the underlying connection (TCP, TLS). For internal use
|
||||
// only, to implement io.Reader.
|
||||
func (p *Proto) Read(buf []byte) (n int, err error) {
|
||||
return p.conn.Read(buf)
|
||||
}
|
||||
|
||||
func (p *Proto) xflush() {
|
||||
// Not writing any more when connection is broken.
|
||||
if p.connBroken {
|
||||
return
|
||||
}
|
||||
|
||||
err := p.xbw.Flush()
|
||||
p.xcheckf(err, "flush")
|
||||
|
||||
// If compression is active, we need to flush the deflate stream.
|
||||
if p.compress {
|
||||
err := p.xflateWriter.Flush()
|
||||
p.xcheckf(err, "flush deflate")
|
||||
err = p.xflateBW.Flush()
|
||||
p.xcheckf(err, "flush deflate buffer")
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Proto) xtraceread(level slog.Level) func() {
|
||||
if p.tr == nil {
|
||||
// For ParseUntagged and other parse functions.
|
||||
return func() {}
|
||||
}
|
||||
p.tr.SetTrace(level)
|
||||
return func() {
|
||||
p.tr.SetTrace(mlog.LevelTrace)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Proto) xtracewrite(level slog.Level) func() {
|
||||
if p.xtw == nil {
|
||||
// For ParseUntagged and other parse functions.
|
||||
return func() {}
|
||||
}
|
||||
|
||||
p.xflush()
|
||||
p.xtw.SetTrace(level)
|
||||
return func() {
|
||||
p.xflush()
|
||||
p.xtw.SetTrace(mlog.LevelTrace)
|
||||
}
|
||||
}
|
||||
|
||||
// Close closes the connection, flushing and closing any compression and TLS layer.
|
||||
//
|
||||
// You may want to call Logout first. Closing a connection with a mailbox with
|
||||
// deleted messages not yet expunged will not expunge those messages.
|
||||
//
|
||||
// Closing a TLS connection that is logged out, or closing a TLS connection with
|
||||
// compression enabled (i.e. two layered streams), may cause spurious errors
|
||||
// because the server may immediate close the underlying connection when it sees
|
||||
// the connection is being closed.
|
||||
func (c *Conn) Close() (rerr error) {
|
||||
defer c.recoverErr(&rerr)
|
||||
|
||||
if c.conn == nil {
|
||||
return nil
|
||||
}
|
||||
if !c.connBroken && c.xflateWriter != nil {
|
||||
err := c.xflateWriter.Close()
|
||||
c.xcheckf(err, "close deflate writer")
|
||||
err = c.xflateBW.Flush()
|
||||
c.xcheckf(err, "flush deflate buffer")
|
||||
c.xflateWriter = nil
|
||||
c.xflateBW = nil
|
||||
}
|
||||
err := c.conn.Close()
|
||||
c.xcheckf(err, "close connection")
|
||||
c.conn = nil
|
||||
return
|
||||
}
|
||||
|
||||
// TLSConnectionState returns the TLS connection state if the connection uses TLS,
|
||||
// either because the conn passed to [New] was a TLS connection, or because
|
||||
// [Conn.StartTLS] was called.
|
||||
func (c *Conn) TLSConnectionState() *tls.ConnectionState {
|
||||
if conn, ok := c.conn.(*tls.Conn); ok {
|
||||
cs := conn.ConnectionState()
|
||||
@ -127,177 +366,266 @@ func (c *Conn) TLSConnectionState() *tls.ConnectionState {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Commandf writes a free-form IMAP command to the server.
|
||||
// WriteCommandf writes a free-form IMAP command to the server. An ending \r\n is
|
||||
// written too.
|
||||
//
|
||||
// If tag is empty, a next unique tag is assigned.
|
||||
func (c *Conn) Commandf(tag string, format string, args ...any) (rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
func (p *Proto) WriteCommandf(tag string, format string, args ...any) (rerr error) {
|
||||
defer p.recover(&rerr)
|
||||
|
||||
if tag == "" {
|
||||
tag = c.nextTag()
|
||||
p.nextTag()
|
||||
} else {
|
||||
p.lastTag = tag
|
||||
}
|
||||
c.LastTag = tag
|
||||
|
||||
_, err := fmt.Fprintf(c.conn, "%s %s\r\n", tag, fmt.Sprintf(format, args...))
|
||||
c.xcheckf(err, "write command")
|
||||
fmt.Fprintf(p.xbw, "%s %s\r\n", p.lastTag, fmt.Sprintf(format, args...))
|
||||
p.xflush()
|
||||
return
|
||||
}
|
||||
|
||||
func (c *Conn) nextTag() string {
|
||||
c.tagGen++
|
||||
return fmt.Sprintf("x%03d", c.tagGen)
|
||||
func (p *Proto) nextTag() string {
|
||||
p.tagGen++
|
||||
p.lastTag = fmt.Sprintf("x%03d", p.tagGen)
|
||||
return p.lastTag
|
||||
}
|
||||
|
||||
// Response reads from the IMAP server until a tagged response line is found.
|
||||
// LastTag returns the tag last used for a command. For checking against a command
|
||||
// completion result.
|
||||
func (p *Proto) LastTag() string {
|
||||
return p.lastTag
|
||||
}
|
||||
|
||||
// LastTagSet sets a new last tag, as used for checking against a command completion result.
|
||||
func (p *Proto) LastTagSet(tag string) {
|
||||
p.lastTag = tag
|
||||
}
|
||||
|
||||
// ReadResponse reads from the IMAP server until a tagged response line is found.
|
||||
// The tag must be the same as the tag for the last written command.
|
||||
// Result holds the status of the command. The caller must check if this the status is OK.
|
||||
func (c *Conn) Response() (untagged []Untagged, result Result, rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
//
|
||||
// If an error is returned, resp can still be non-empty, and a caller may wish to
|
||||
// process resp.Untagged.
|
||||
//
|
||||
// Caller should check resp.Status for the result of the command too.
|
||||
//
|
||||
// Common types for the return error:
|
||||
// - Error, for protocol errors
|
||||
// - Various I/O errors from the underlying connection, including os.ErrDeadlineExceeded
|
||||
func (p *Proto) ReadResponse() (resp Response, rerr error) {
|
||||
defer p.recover(&rerr)
|
||||
|
||||
for {
|
||||
tag := c.xnonspace()
|
||||
c.xspace()
|
||||
tag := p.xnonspace()
|
||||
p.xspace()
|
||||
if tag == "*" {
|
||||
untagged = append(untagged, c.xuntagged())
|
||||
resp.Untagged = append(resp.Untagged, p.xuntagged())
|
||||
continue
|
||||
}
|
||||
|
||||
if tag != c.LastTag {
|
||||
c.xerrorf("got tag %q, expected %q", tag, c.LastTag)
|
||||
if tag != p.lastTag {
|
||||
p.xerrorf("got tag %q, expected %q", tag, p.lastTag)
|
||||
}
|
||||
|
||||
status := c.xstatus()
|
||||
c.xspace()
|
||||
result = c.xresult(status)
|
||||
c.xcrlf()
|
||||
status := p.xstatus()
|
||||
p.xspace()
|
||||
resp.Result = p.xresult(status)
|
||||
p.xcrlf()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// ReadUntagged reads a single untagged response line.
|
||||
// Useful for reading lines from IDLE.
|
||||
func (c *Conn) ReadUntagged() (untagged Untagged, rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
|
||||
tag := c.xnonspace()
|
||||
if tag != "*" {
|
||||
c.xerrorf("got tag %q, expected untagged", tag)
|
||||
// ParseCode parses a response code. The string must not have enclosing brackets.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// "APPENDUID 123 10"
|
||||
func ParseCode(s string) (code Code, rerr error) {
|
||||
p := Proto{br: bufio.NewReader(strings.NewReader(s + "]"))}
|
||||
defer p.recover(&rerr)
|
||||
code = p.xrespCode()
|
||||
p.xtake("]")
|
||||
buf, err := io.ReadAll(p.br)
|
||||
p.xcheckf(err, "read")
|
||||
if len(buf) != 0 {
|
||||
p.xerrorf("leftover data %q", buf)
|
||||
}
|
||||
c.xspace()
|
||||
ut := c.xuntagged()
|
||||
return code, nil
|
||||
}
|
||||
|
||||
// ParseResult parses a line, including required crlf, as a command result line.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// "tag1 OK [APPENDUID 123 10] message added\r\n"
|
||||
func ParseResult(s string) (tag string, result Result, rerr error) {
|
||||
p := Proto{br: bufio.NewReader(strings.NewReader(s))}
|
||||
defer p.recover(&rerr)
|
||||
tag = p.xnonspace()
|
||||
p.xspace()
|
||||
status := p.xstatus()
|
||||
p.xspace()
|
||||
result = p.xresult(status)
|
||||
p.xcrlf()
|
||||
return
|
||||
}
|
||||
|
||||
// ReadUntagged reads a single untagged response line.
|
||||
func (p *Proto) ReadUntagged() (untagged Untagged, rerr error) {
|
||||
defer p.recover(&rerr)
|
||||
return p.readUntagged()
|
||||
}
|
||||
|
||||
// ParseUntagged parses a line, including required crlf, as untagged response.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// "* BYE shutting down connection\r\n"
|
||||
func ParseUntagged(s string) (untagged Untagged, rerr error) {
|
||||
p := Proto{br: bufio.NewReader(strings.NewReader(s))}
|
||||
defer p.recover(&rerr)
|
||||
untagged, rerr = p.readUntagged()
|
||||
return
|
||||
}
|
||||
|
||||
func (p *Proto) readUntagged() (untagged Untagged, rerr error) {
|
||||
defer p.recover(&rerr)
|
||||
tag := p.xnonspace()
|
||||
if tag != "*" {
|
||||
p.xerrorf("got tag %q, expected untagged", tag)
|
||||
}
|
||||
p.xspace()
|
||||
ut := p.xuntagged()
|
||||
return ut, nil
|
||||
}
|
||||
|
||||
// Readline reads a line, including CRLF.
|
||||
// Used with IDLE and synchronous literals.
|
||||
func (c *Conn) Readline() (line string, rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
func (p *Proto) Readline() (line string, rerr error) {
|
||||
defer p.recover(&rerr)
|
||||
|
||||
line, err := c.r.ReadString('\n')
|
||||
c.xcheckf(err, "read line")
|
||||
line, err := p.br.ReadString('\n')
|
||||
p.xcheckf(err, "read line")
|
||||
return line, nil
|
||||
}
|
||||
|
||||
// ReadContinuation reads a line. If it is a continuation, i.e. starts with a +, it
|
||||
// is returned without leading "+ " and without trailing crlf. Otherwise, a command
|
||||
// response is returned. A successfully read continuation can return an empty line.
|
||||
// Callers should check rerr and result.Status being empty to check if a
|
||||
// continuation was read.
|
||||
func (c *Conn) ReadContinuation() (line string, untagged []Untagged, result Result, rerr error) {
|
||||
if !c.peek('+') {
|
||||
untagged, result, rerr = c.Response()
|
||||
c.xcheckf(rerr, "reading non-continuation response")
|
||||
c.xerrorf("response status %q, expected OK", result.Status)
|
||||
func (c *Conn) readContinuation() (line string, rerr error) {
|
||||
defer c.recover(&rerr, nil)
|
||||
line, rerr = c.ReadContinuation()
|
||||
if rerr != nil {
|
||||
if resp, ok := rerr.(Response); ok {
|
||||
c.processUntagged(resp.Untagged)
|
||||
c.processResult(resp.Result)
|
||||
}
|
||||
}
|
||||
c.xtake("+ ")
|
||||
line, err := c.Readline()
|
||||
c.xcheckf(err, "read line")
|
||||
return
|
||||
}
|
||||
|
||||
// ReadContinuation reads a line. If it is a continuation, i.e. starts with "+", it
|
||||
// is returned without leading "+ " and without trailing crlf. Otherwise, an error
|
||||
// is returned, which can be a Response with Untagged that a caller may wish to
|
||||
// process. A successfully read continuation can return an empty line.
|
||||
func (p *Proto) ReadContinuation() (line string, rerr error) {
|
||||
defer p.recover(&rerr)
|
||||
|
||||
if !p.peek('+') {
|
||||
var resp Response
|
||||
resp, rerr = p.ReadResponse()
|
||||
if rerr == nil {
|
||||
rerr = resp
|
||||
}
|
||||
return "", rerr
|
||||
}
|
||||
p.xtake("+ ")
|
||||
line, err := p.Readline()
|
||||
p.xcheckf(err, "read line")
|
||||
line = strings.TrimSuffix(line, "\r\n")
|
||||
return
|
||||
}
|
||||
|
||||
// Writelinef writes the formatted format and args as a single line, adding CRLF.
|
||||
// Used with IDLE and synchronous literals.
|
||||
func (c *Conn) Writelinef(format string, args ...any) (rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
func (p *Proto) Writelinef(format string, args ...any) (rerr error) {
|
||||
defer p.recover(&rerr)
|
||||
|
||||
s := fmt.Sprintf(format, args...)
|
||||
_, err := fmt.Fprintf(c.conn, "%s\r\n", s)
|
||||
c.xcheckf(err, "writeline")
|
||||
fmt.Fprintf(p.xbw, "%s\r\n", s)
|
||||
p.xflush()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Write writes directly to the connection. Write errors do take the connections
|
||||
// panic mode into account, i.e. Write can panic.
|
||||
func (c *Conn) Write(buf []byte) (n int, rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
// WriteSyncLiteral first writes the synchronous literal size, then reads the
|
||||
// continuation "+" and finally writes the data. If the literal is not accepted, an
|
||||
// error is returned, which may be a Response.
|
||||
func (p *Proto) WriteSyncLiteral(s string) (rerr error) {
|
||||
defer p.recover(&rerr)
|
||||
|
||||
n, rerr = c.conn.Write(buf)
|
||||
c.xcheckf(rerr, "write")
|
||||
return n, nil
|
||||
}
|
||||
fmt.Fprintf(p.xbw, "{%d}\r\n", len(s))
|
||||
p.xflush()
|
||||
|
||||
// WriteSyncLiteral first writes the synchronous literal size, then read the
|
||||
// continuation "+" and finally writes the data.
|
||||
func (c *Conn) WriteSyncLiteral(s string) (rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
plus, err := p.br.Peek(1)
|
||||
p.xcheckf(err, "read continuation")
|
||||
if plus[0] == '+' {
|
||||
_, err = p.Readline()
|
||||
p.xcheckf(err, "read continuation line")
|
||||
|
||||
_, err := fmt.Fprintf(c.conn, "{%d}\r\n", len(s))
|
||||
c.xcheckf(err, "write sync literal size")
|
||||
line, err := c.Readline()
|
||||
c.xcheckf(err, "read line")
|
||||
if !strings.HasPrefix(line, "+") {
|
||||
c.xerrorf("no continuation received for sync literal")
|
||||
defer p.xtracewrite(mlog.LevelTracedata)()
|
||||
_, err = p.xbw.Write([]byte(s))
|
||||
p.xcheckf(err, "write literal data")
|
||||
p.xtracewrite(mlog.LevelTrace)
|
||||
return nil
|
||||
}
|
||||
_, err = c.conn.Write([]byte(s))
|
||||
c.xcheckf(err, "write literal data")
|
||||
return nil
|
||||
var resp Response
|
||||
resp, rerr = p.ReadResponse()
|
||||
if rerr == nil {
|
||||
rerr = resp
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Transactf writes format and args as an IMAP command, using Commandf with an
|
||||
func (c *Conn) processUntagged(l []Untagged) {
|
||||
for _, ut := range l {
|
||||
switch e := ut.(type) {
|
||||
case UntaggedCapability:
|
||||
c.CapAvailable = []Capability(e)
|
||||
case UntaggedEnabled:
|
||||
c.CapEnabled = append(c.CapEnabled, e...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Conn) processResult(r Result) {
|
||||
if r.Code == nil {
|
||||
return
|
||||
}
|
||||
switch e := r.Code.(type) {
|
||||
case CodeCapability:
|
||||
c.CapAvailable = []Capability(e)
|
||||
}
|
||||
}
|
||||
|
||||
// transactf writes format and args as an IMAP command, using Commandf with an
|
||||
// empty tag. I.e. format must not contain a tag. Transactf then reads a response
|
||||
// using ReadResponse and checks the result status is OK.
|
||||
func (c *Conn) Transactf(format string, args ...any) (untagged []Untagged, result Result, rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
func (c *Conn) transactf(format string, args ...any) (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
|
||||
err := c.Commandf("", format, args...)
|
||||
err := c.WriteCommandf("", format, args...)
|
||||
if err != nil {
|
||||
return nil, Result{}, err
|
||||
return Response{}, err
|
||||
}
|
||||
return c.ResponseOK()
|
||||
|
||||
return c.responseOK()
|
||||
}
|
||||
|
||||
func (c *Conn) ResponseOK() (untagged []Untagged, result Result, rerr error) {
|
||||
untagged, result, rerr = c.Response()
|
||||
if rerr != nil {
|
||||
return nil, Result{}, rerr
|
||||
}
|
||||
if result.Status != OK {
|
||||
c.xerrorf("response status %q, expected OK", result.Status)
|
||||
}
|
||||
return untagged, result, rerr
|
||||
}
|
||||
func (c *Conn) responseOK() (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
|
||||
func (c *Conn) xgetUntagged(l []Untagged, dst any) {
|
||||
if len(l) != 1 {
|
||||
c.xerrorf("got %d untagged, expected 1: %v", len(l), l)
|
||||
resp, rerr = c.ReadResponse()
|
||||
c.processUntagged(resp.Untagged)
|
||||
c.processResult(resp.Result)
|
||||
if rerr == nil && resp.Status != OK {
|
||||
rerr = resp
|
||||
}
|
||||
got := l[0]
|
||||
gotv := reflect.ValueOf(got)
|
||||
dstv := reflect.ValueOf(dst)
|
||||
if gotv.Type() != dstv.Type().Elem() {
|
||||
c.xerrorf("got %v, expected %v", gotv.Type(), dstv.Type().Elem())
|
||||
}
|
||||
dstv.Elem().Set(gotv)
|
||||
}
|
||||
|
||||
// Close closes the connection without writing anything to the server.
|
||||
// You may want to call Logout. Closing a connection with a mailbox with deleted
|
||||
// message not yet expunged will not expunge those messages.
|
||||
func (c *Conn) Close() error {
|
||||
var err error
|
||||
if c.conn != nil {
|
||||
err = c.conn.Close()
|
||||
c.conn = nil
|
||||
}
|
||||
return err
|
||||
return
|
||||
}
|
||||
|
@ -6,73 +6,121 @@ import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/mjl-/flate"
|
||||
|
||||
"github.com/mjl-/mox/mlog"
|
||||
"github.com/mjl-/mox/moxio"
|
||||
"github.com/mjl-/mox/scram"
|
||||
)
|
||||
|
||||
// Capability requests a list of capabilities from the server. They are returned in
|
||||
// an UntaggedCapability response. The server also sends capabilities in initial
|
||||
// server greeting, in the response code.
|
||||
func (c *Conn) Capability() (untagged []Untagged, result Result, rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
return c.Transactf("capability")
|
||||
// Capability writes the IMAP4 "CAPABILITY" command, requesting a list of
|
||||
// capabilities from the server. They are returned in an UntaggedCapability
|
||||
// response. The server also sends capabilities in initial server greeting, in the
|
||||
// response code.
|
||||
func (c *Conn) Capability() (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
return c.transactf("capability")
|
||||
}
|
||||
|
||||
// Noop does nothing on its own, but a server will return any pending untagged
|
||||
// responses for new message delivery and changes to mailboxes.
|
||||
func (c *Conn) Noop() (untagged []Untagged, result Result, rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
return c.Transactf("noop")
|
||||
// Noop writes the IMAP4 "NOOP" command, which does nothing on its own, but a
|
||||
// server will return any pending untagged responses for new message delivery and
|
||||
// changes to mailboxes.
|
||||
func (c *Conn) Noop() (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
return c.transactf("noop")
|
||||
}
|
||||
|
||||
// Logout ends the IMAP session by writing a LOGOUT command. Close must still be
|
||||
// called on this client to close the socket.
|
||||
func (c *Conn) Logout() (untagged []Untagged, result Result, rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
return c.Transactf("logout")
|
||||
// Logout ends the IMAP4 session by writing an IMAP "LOGOUT" command. [Conn.Close]
|
||||
// must still be called on this client to close the socket.
|
||||
func (c *Conn) Logout() (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
return c.transactf("logout")
|
||||
}
|
||||
|
||||
// Starttls enables TLS on the connection with the STARTTLS command.
|
||||
func (c *Conn) Starttls(config *tls.Config) (untagged []Untagged, result Result, rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
untagged, result, rerr = c.Transactf("starttls")
|
||||
// StartTLS enables TLS on the connection with the IMAP4 "STARTTLS" command.
|
||||
func (c *Conn) StartTLS(config *tls.Config) (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
resp, rerr = c.transactf("starttls")
|
||||
c.xcheckf(rerr, "starttls command")
|
||||
conn := tls.Client(c.conn, config)
|
||||
err := conn.Handshake()
|
||||
|
||||
conn := c.xprefixConn()
|
||||
tlsConn := tls.Client(conn, config)
|
||||
err := tlsConn.Handshake()
|
||||
c.xcheckf(err, "tls handshake")
|
||||
c.conn = conn
|
||||
c.r = bufio.NewReader(conn)
|
||||
return untagged, result, nil
|
||||
}
|
||||
|
||||
// Login authenticates with username and password
|
||||
func (c *Conn) Login(username, password string) (untagged []Untagged, result Result, rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
return c.Transactf("login %s %s", astring(username), astring(password))
|
||||
}
|
||||
|
||||
// Authenticate with plaintext password using AUTHENTICATE PLAIN.
|
||||
func (c *Conn) AuthenticatePlain(username, password string) (untagged []Untagged, result Result, rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
|
||||
untagged, result, rerr = c.Transactf("authenticate plain %s", base64.StdEncoding.EncodeToString(fmt.Appendf(nil, "\u0000%s\u0000%s", username, password)))
|
||||
c.conn = tlsConn
|
||||
return
|
||||
}
|
||||
|
||||
// Authenticate with SCRAM-SHA-256(-PLUS) or SCRAM-SHA-1(-PLUS). With SCRAM, the
|
||||
// password is not exchanged in plaintext form, but only derived hashes are
|
||||
// exchanged by both parties as proof of knowledge of password.
|
||||
// Login authenticates using the IMAP4 "LOGIN" command, sending the plain text
|
||||
// password to the server.
|
||||
//
|
||||
// Authentication is not allowed while the "LOGINDISABLED" capability is announced.
|
||||
// Call [Conn.StartTLS] first.
|
||||
//
|
||||
// See [Conn.AuthenticateSCRAM] for a better authentication mechanism.
|
||||
func (c *Conn) Login(username, password string) (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
|
||||
fmt.Fprintf(c.xbw, "%s login %s ", c.nextTag(), astring(username))
|
||||
defer c.xtracewrite(mlog.LevelTraceauth)()
|
||||
fmt.Fprintf(c.xbw, "%s\r\n", astring(password))
|
||||
c.xtracewrite(mlog.LevelTrace) // Restore.
|
||||
return c.responseOK()
|
||||
}
|
||||
|
||||
// AuthenticatePlain executes the AUTHENTICATE command with SASL mechanism "PLAIN",
|
||||
// sending the password in plain text password to the server.
|
||||
//
|
||||
// Required capability: "AUTH=PLAIN"
|
||||
//
|
||||
// Authentication is not allowed while the "LOGINDISABLED" capability is announced.
|
||||
// Call [Conn.StartTLS] first.
|
||||
//
|
||||
// See [Conn.AuthenticateSCRAM] for a better authentication mechanism.
|
||||
func (c *Conn) AuthenticatePlain(username, password string) (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
|
||||
err := c.WriteCommandf("", "authenticate plain")
|
||||
c.xcheckf(err, "writing authenticate command")
|
||||
_, rerr = c.readContinuation()
|
||||
c.xresponse(rerr, &resp)
|
||||
|
||||
defer c.xtracewrite(mlog.LevelTraceauth)()
|
||||
xw := base64.NewEncoder(base64.StdEncoding, c.xbw)
|
||||
fmt.Fprintf(xw, "\u0000%s\u0000%s", username, password)
|
||||
xw.Close()
|
||||
c.xtracewrite(mlog.LevelTrace) // Restore.
|
||||
fmt.Fprintf(c.xbw, "\r\n")
|
||||
c.xflush()
|
||||
return c.responseOK()
|
||||
}
|
||||
|
||||
// todo: implement cram-md5, write its credentials as traceauth.
|
||||
|
||||
// AuthenticateSCRAM executes the IMAP4 "AUTHENTICATE" command with one of the
|
||||
// following SASL mechanisms: SCRAM-SHA-256(-PLUS) or SCRAM-SHA-1(-PLUS).//
|
||||
//
|
||||
// With SCRAM, the password is not sent to the server in plain text, but only
|
||||
// derived hashes are exchanged by both parties as proof of knowledge of password.
|
||||
//
|
||||
// Authentication is not allowed while the "LOGINDISABLED" capability is announced.
|
||||
// Call [Conn.StartTLS] first.
|
||||
//
|
||||
// Required capability: SCRAM-SHA-256-PLUS, SCRAM-SHA-256, SCRAM-SHA-1-PLUS,
|
||||
// SCRAM-SHA-1.
|
||||
//
|
||||
// The PLUS variants bind the authentication exchange to the TLS connection,
|
||||
// detecting MitM attacks.
|
||||
func (c *Conn) AuthenticateSCRAM(method string, h func() hash.Hash, username, password string) (untagged []Untagged, result Result, rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
func (c *Conn) AuthenticateSCRAM(mechanism string, h func() hash.Hash, username, password string) (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
|
||||
var cs *tls.ConnectionState
|
||||
lmethod := strings.ToLower(method)
|
||||
if strings.HasSuffix(lmethod, "-plus") {
|
||||
lmech := strings.ToLower(mechanism)
|
||||
if strings.HasSuffix(lmech, "-plus") {
|
||||
tlsConn, ok := c.conn.(*tls.Conn)
|
||||
if !ok {
|
||||
c.xerrorf("cannot use scram plus without tls")
|
||||
@ -83,17 +131,14 @@ func (c *Conn) AuthenticateSCRAM(method string, h func() hash.Hash, username, pa
|
||||
sc := scram.NewClient(h, username, "", false, cs)
|
||||
clientFirst, err := sc.ClientFirst()
|
||||
c.xcheckf(err, "scram clientFirst")
|
||||
c.LastTag = c.nextTag()
|
||||
err = c.Writelinef("%s authenticate %s %s", c.LastTag, method, base64.StdEncoding.EncodeToString([]byte(clientFirst)))
|
||||
// todo: only send clientFirst if server has announced SASL-IR
|
||||
err = c.Writelinef("%s authenticate %s %s", c.nextTag(), mechanism, base64.StdEncoding.EncodeToString([]byte(clientFirst)))
|
||||
c.xcheckf(err, "writing command line")
|
||||
|
||||
xreadContinuation := func() []byte {
|
||||
var line string
|
||||
line, untagged, result, rerr = c.ReadContinuation()
|
||||
c.xcheckf(err, "read continuation")
|
||||
if result.Status != "" {
|
||||
c.xerrorf("unexpected status %q", result.Status)
|
||||
}
|
||||
line, rerr = c.readContinuation()
|
||||
c.xresponse(rerr, &resp)
|
||||
buf, err := base64.StdEncoding.DecodeString(line)
|
||||
c.xcheckf(err, "parsing base64 from remote")
|
||||
return buf
|
||||
@ -113,83 +158,131 @@ func (c *Conn) AuthenticateSCRAM(method string, h func() hash.Hash, username, pa
|
||||
err = c.Writelinef("%s", base64.StdEncoding.EncodeToString(nil))
|
||||
c.xcheckf(err, "scram client end")
|
||||
|
||||
return c.ResponseOK()
|
||||
return c.responseOK()
|
||||
}
|
||||
|
||||
// Enable enables capabilities for use with the connection, verifying the server has indeed enabled them.
|
||||
func (c *Conn) Enable(capabilities ...string) (untagged []Untagged, result Result, rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
// CompressDeflate enables compression with deflate on the connection by executing
|
||||
// the IMAP4 "COMPRESS=DEFAULT" command.
|
||||
//
|
||||
// Required capability: "COMPRESS=DEFLATE".
|
||||
//
|
||||
// State: Authenticated or selected.
|
||||
func (c *Conn) CompressDeflate() (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
|
||||
untagged, result, rerr = c.Transactf("enable %s", strings.Join(capabilities, " "))
|
||||
resp, rerr = c.transactf("compress deflate")
|
||||
c.xcheck(rerr)
|
||||
var enabled UntaggedEnabled
|
||||
c.xgetUntagged(untagged, &enabled)
|
||||
got := map[string]struct{}{}
|
||||
for _, cap := range enabled {
|
||||
got[cap] = struct{}{}
|
||||
}
|
||||
for _, cap := range capabilities {
|
||||
if _, ok := got[cap]; !ok {
|
||||
c.xerrorf("capability %q not enabled by server", cap)
|
||||
}
|
||||
}
|
||||
|
||||
c.xflateBW = bufio.NewWriter(c)
|
||||
fw0, err := flate.NewWriter(c.xflateBW, flate.DefaultCompression)
|
||||
c.xcheckf(err, "deflate") // Cannot happen.
|
||||
fw := moxio.NewFlateWriter(fw0)
|
||||
|
||||
c.compress = true
|
||||
c.xflateWriter = fw
|
||||
c.xtw = moxio.NewTraceWriter(mlog.New("imapclient", nil), "CW: ", fw)
|
||||
c.xbw = bufio.NewWriter(c.xtw)
|
||||
|
||||
rc := c.xprefixConn()
|
||||
fr := flate.NewReaderPartial(rc)
|
||||
c.tr = moxio.NewTraceReader(mlog.New("imapclient", nil), "CR: ", fr)
|
||||
c.br = bufio.NewReader(c.tr)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Select opens mailbox as active mailbox.
|
||||
func (c *Conn) Select(mailbox string) (untagged []Untagged, result Result, rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
return c.Transactf("select %s", astring(mailbox))
|
||||
// Enable enables capabilities for use with the connection by executing the IMAP4 "ENABLE" command.
|
||||
//
|
||||
// Required capability: "ENABLE" or "IMAP4rev2"
|
||||
func (c *Conn) Enable(capabilities ...Capability) (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
|
||||
var caps strings.Builder
|
||||
for _, c := range capabilities {
|
||||
caps.WriteString(" " + string(c))
|
||||
}
|
||||
return c.transactf("enable%s", caps.String())
|
||||
}
|
||||
|
||||
// Examine opens mailbox as active mailbox read-only.
|
||||
func (c *Conn) Examine(mailbox string) (untagged []Untagged, result Result, rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
return c.Transactf("examine %s", astring(mailbox))
|
||||
// Select opens the mailbox with the IMAP4 "SELECT" command.
|
||||
//
|
||||
// If a mailbox is selected/active, it is automatically deselected before
|
||||
// selecting the mailbox, without permanently removing ("expunging") messages
|
||||
// marked \Deleted.
|
||||
//
|
||||
// If the mailbox cannot be opened, the connection is left in Authenticated state,
|
||||
// not Selected.
|
||||
func (c *Conn) Select(mailbox string) (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
return c.transactf("select %s", astring(mailbox))
|
||||
}
|
||||
|
||||
// Create makes a new mailbox on the server.
|
||||
func (c *Conn) Create(mailbox string) (untagged []Untagged, result Result, rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
return c.Transactf("create %s", astring(mailbox))
|
||||
// Examine opens the mailbox like [Conn.Select], but read-only, with the IMAP4
|
||||
// "EXAMINE" command.
|
||||
func (c *Conn) Examine(mailbox string) (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
return c.transactf("examine %s", astring(mailbox))
|
||||
}
|
||||
|
||||
// Delete removes an entire mailbox and its messages.
|
||||
func (c *Conn) Delete(mailbox string) (untagged []Untagged, result Result, rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
return c.Transactf("delete %s", astring(mailbox))
|
||||
// Create makes a new mailbox on the server using the IMAP4 "CREATE" command.
|
||||
//
|
||||
// SpecialUse can only be used on servers that announced the "CREATE-SPECIAL-USE"
|
||||
// capability. Specify flags like \Archive, \Drafts, \Junk, \Sent, \Trash, \All.
|
||||
func (c *Conn) Create(mailbox string, specialUse []string) (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
var useStr string
|
||||
if len(specialUse) > 0 {
|
||||
useStr = fmt.Sprintf(" USE (%s)", strings.Join(specialUse, " "))
|
||||
}
|
||||
return c.transactf("create %s%s", astring(mailbox), useStr)
|
||||
}
|
||||
|
||||
// Rename changes the name of a mailbox and all its child mailboxes.
|
||||
func (c *Conn) Rename(omailbox, nmailbox string) (untagged []Untagged, result Result, rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
return c.Transactf("rename %s %s", astring(omailbox), astring(nmailbox))
|
||||
// Delete removes an entire mailbox and its messages using the IMAP4 "DELETE"
|
||||
// command.
|
||||
func (c *Conn) Delete(mailbox string) (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
return c.transactf("delete %s", astring(mailbox))
|
||||
}
|
||||
|
||||
// Subscribe marks a mailbox as subscribed. The mailbox does not have to exist. It
|
||||
// is not an error if the mailbox is already subscribed.
|
||||
func (c *Conn) Subscribe(mailbox string) (untagged []Untagged, result Result, rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
return c.Transactf("subscribe %s", astring(mailbox))
|
||||
// Rename changes the name of a mailbox and all its child mailboxes
|
||||
// using the IMAP4 "RENAME" command.
|
||||
func (c *Conn) Rename(omailbox, nmailbox string) (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
return c.transactf("rename %s %s", astring(omailbox), astring(nmailbox))
|
||||
}
|
||||
|
||||
// Unsubscribe marks a mailbox as unsubscribed.
|
||||
func (c *Conn) Unsubscribe(mailbox string) (untagged []Untagged, result Result, rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
return c.Transactf("unsubscribe %s", astring(mailbox))
|
||||
// Subscribe marks a mailbox as subscribed using the IMAP4 "SUBSCRIBE" command.
|
||||
//
|
||||
// The mailbox does not have to exist. It is not an error if the mailbox is already
|
||||
// subscribed.
|
||||
func (c *Conn) Subscribe(mailbox string) (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
return c.transactf("subscribe %s", astring(mailbox))
|
||||
}
|
||||
|
||||
// List lists mailboxes with the basic LIST syntax.
|
||||
// Unsubscribe marks a mailbox as unsubscribed using the IMAP4 "UNSUBSCRIBE"
|
||||
// command.
|
||||
func (c *Conn) Unsubscribe(mailbox string) (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
return c.transactf("unsubscribe %s", astring(mailbox))
|
||||
}
|
||||
|
||||
// List lists mailboxes using the IMAP4 "LIST" command with the basic LIST syntax.
|
||||
// Pattern can contain * (match any) or % (match any except hierarchy delimiter).
|
||||
func (c *Conn) List(pattern string) (untagged []Untagged, result Result, rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
return c.Transactf(`list "" %s`, astring(pattern))
|
||||
func (c *Conn) List(pattern string) (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
return c.transactf(`list "" %s`, astring(pattern))
|
||||
}
|
||||
|
||||
// ListFull lists mailboxes with the extended LIST syntax requesting all supported data.
|
||||
// ListFull lists mailboxes using the LIST command with the extended LIST
|
||||
// syntax requesting all supported data.
|
||||
//
|
||||
// Required capability: "LIST-EXTENDED". If "IMAP4rev2" is announced, the command
|
||||
// is also available but only with a single pattern.
|
||||
//
|
||||
// Pattern can contain * (match any) or % (match any except hierarchy delimiter).
|
||||
func (c *Conn) ListFull(subscribedOnly bool, patterns ...string) (untagged []Untagged, result Result, rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
func (c *Conn) ListFull(subscribedOnly bool, patterns ...string) (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
var subscribedStr string
|
||||
if subscribedOnly {
|
||||
subscribedStr = "subscribed recursivematch"
|
||||
@ -197,115 +290,313 @@ func (c *Conn) ListFull(subscribedOnly bool, patterns ...string) (untagged []Unt
|
||||
for i, s := range patterns {
|
||||
patterns[i] = astring(s)
|
||||
}
|
||||
return c.Transactf(`list (%s) "" (%s) return (subscribed children special-use status (messages uidnext uidvalidity unseen deleted size recent appendlimit))`, subscribedStr, strings.Join(patterns, " "))
|
||||
return c.transactf(`list (%s) "" (%s) return (subscribed children special-use status (messages uidnext uidvalidity unseen deleted size recent appendlimit))`, subscribedStr, strings.Join(patterns, " "))
|
||||
}
|
||||
|
||||
// Namespace returns the hiearchy separator in an UntaggedNamespace response with personal/shared/other namespaces if present.
|
||||
func (c *Conn) Namespace() (untagged []Untagged, result Result, rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
return c.Transactf("namespace")
|
||||
// Namespace requests the hiearchy separator using the IMAP4 "NAMESPACE" command.
|
||||
//
|
||||
// Required capability: "NAMESPACE" or "IMAP4rev2".
|
||||
//
|
||||
// Server will return an UntaggedNamespace response with personal/shared/other
|
||||
// namespaces if present.
|
||||
func (c *Conn) Namespace() (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
return c.transactf("namespace")
|
||||
}
|
||||
|
||||
// Status requests information about a mailbox, such as number of messages, size,
|
||||
// etc. At least one attribute required.
|
||||
func (c *Conn) Status(mailbox string, attrs ...StatusAttr) (untagged []Untagged, result Result, rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
// Status requests information about a mailbox using the IMAP4 "STATUS" command. For
|
||||
// example, number of messages, size, etc. At least one attribute required.
|
||||
func (c *Conn) Status(mailbox string, attrs ...StatusAttr) (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
l := make([]string, len(attrs))
|
||||
for i, a := range attrs {
|
||||
l[i] = string(a)
|
||||
}
|
||||
return c.Transactf("status %s (%s)", astring(mailbox), strings.Join(l, " "))
|
||||
return c.transactf("status %s (%s)", astring(mailbox), strings.Join(l, " "))
|
||||
}
|
||||
|
||||
// Append adds message to mailbox with flags and optional receive time.
|
||||
func (c *Conn) Append(mailbox string, flags []string, received *time.Time, message []byte) (untagged []Untagged, result Result, rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
var date string
|
||||
if received != nil {
|
||||
date = ` "` + received.Format("_2-Jan-2006 15:04:05 -0700") + `"`
|
||||
// Append represents a parameter to the IMAP4 "APPEND" or "REPLACE" commands, for
|
||||
// adding a message to mailbox, or replacing a message with a new version in a
|
||||
// mailbox.
|
||||
type Append struct {
|
||||
Flags []string // Optional, flags for the new message.
|
||||
Received *time.Time // Optional, the INTERNALDATE field, typically time at which a message was received.
|
||||
Size int64
|
||||
Data io.Reader // Required, must return Size bytes.
|
||||
}
|
||||
|
||||
// Append adds message to mailbox with flags and optional receive time using the
|
||||
// IMAP4 "APPEND" command.
|
||||
func (c *Conn) Append(mailbox string, message Append) (resp Response, rerr error) {
|
||||
return c.MultiAppend(mailbox, message)
|
||||
}
|
||||
|
||||
// MultiAppend atomatically adds multiple messages to the mailbox.
|
||||
//
|
||||
// Required capability: "MULTIAPPEND"
|
||||
func (c *Conn) MultiAppend(mailbox string, message Append, more ...Append) (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
|
||||
fmt.Fprintf(c.xbw, "%s append %s", c.nextTag(), astring(mailbox))
|
||||
|
||||
msgs := append([]Append{message}, more...)
|
||||
for _, m := range msgs {
|
||||
var date string
|
||||
if m.Received != nil {
|
||||
date = ` "` + m.Received.Format("_2-Jan-2006 15:04:05 -0700") + `"`
|
||||
}
|
||||
|
||||
// todo: use literal8 if needed, with "UTF8()" if required.
|
||||
// todo: for larger messages, use a synchronizing literal.
|
||||
|
||||
fmt.Fprintf(c.xbw, " (%s)%s {%d+}\r\n", strings.Join(m.Flags, " "), date, m.Size)
|
||||
defer c.xtracewrite(mlog.LevelTracedata)()
|
||||
_, err := io.Copy(c.xbw, m.Data)
|
||||
c.xcheckf(err, "write message data")
|
||||
c.xtracewrite(mlog.LevelTrace) // Restore
|
||||
}
|
||||
return c.Transactf("append %s (%s)%s {%d+}\r\n%s", astring(mailbox), strings.Join(flags, " "), date, len(message), message)
|
||||
|
||||
fmt.Fprintf(c.xbw, "\r\n")
|
||||
c.xflush()
|
||||
return c.responseOK()
|
||||
}
|
||||
|
||||
// note: No idle command. Idle is better implemented by writing the request and reading and handling the responses as they come in.
|
||||
// note: No Idle or Notify command. Idle/Notify is better implemented by
|
||||
// writing the request and reading and handling the responses as they come in.
|
||||
|
||||
// CloseMailbox closes the currently selected/active mailbox, permanently removing
|
||||
// any messages marked with \Deleted.
|
||||
func (c *Conn) CloseMailbox() (untagged []Untagged, result Result, rerr error) {
|
||||
return c.Transactf("close")
|
||||
// CloseMailbox closes the selected/active mailbox using the IMAP4 "CLOSE" command,
|
||||
// permanently removing ("expunging") any messages marked with \Deleted.
|
||||
//
|
||||
// See [Conn.Unselect] for closing a mailbox without permanently removing messages.
|
||||
func (c *Conn) CloseMailbox() (resp Response, rerr error) {
|
||||
return c.transactf("close")
|
||||
}
|
||||
|
||||
// Unselect closes the currently selected/active mailbox, but unlike CloseMailbox
|
||||
// does not permanently remove any messages marked with \Deleted.
|
||||
func (c *Conn) Unselect() (untagged []Untagged, result Result, rerr error) {
|
||||
return c.Transactf("unselect")
|
||||
// Unselect closes the selected/active mailbox using the IMAP4 "UNSELECT" command,
|
||||
// but unlike MailboxClose does not permanently remove ("expunge") any messages
|
||||
// marked with \Deleted.
|
||||
//
|
||||
// Required capability: "UNSELECT" or "IMAP4rev2".
|
||||
//
|
||||
// If Unselect is not available, call [Conn.Select] with a non-existent mailbox for
|
||||
// the same effect: Deselecting a mailbox without permanently removing messages
|
||||
// marked \Deleted.
|
||||
func (c *Conn) Unselect() (resp Response, rerr error) {
|
||||
return c.transactf("unselect")
|
||||
}
|
||||
|
||||
// Expunge removes messages marked as deleted for the selected mailbox.
|
||||
func (c *Conn) Expunge() (untagged []Untagged, result Result, rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
return c.Transactf("expunge")
|
||||
// Expunge removes all messages marked as deleted for the selected mailbox using
|
||||
// the IMAP4 "EXPUNGE" command. If other sessions marked messages as deleted, even
|
||||
// if they aren't visible in the session, they are removed as well.
|
||||
//
|
||||
// UIDExpunge gives more control over which the messages that are removed.
|
||||
func (c *Conn) Expunge() (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
return c.transactf("expunge")
|
||||
}
|
||||
|
||||
// UIDExpunge is like expunge, but only removes messages matching uidSet.
|
||||
func (c *Conn) UIDExpunge(uidSet NumSet) (untagged []Untagged, result Result, rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
return c.Transactf("uid expunge %s", uidSet.String())
|
||||
// UIDExpunge is like expunge, but only removes messages matching UID set, using
|
||||
// the IMAP4 "UID EXPUNGE" command.
|
||||
//
|
||||
// Required capability: "UIDPLUS" or "IMAP4rev2".
|
||||
func (c *Conn) UIDExpunge(uidSet NumSet) (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
return c.transactf("uid expunge %s", uidSet.String())
|
||||
}
|
||||
|
||||
// Note: No search, fetch command yet due to its large syntax.
|
||||
|
||||
// StoreFlagsSet stores a new set of flags for messages from seqset with the STORE command.
|
||||
// If silent, no untagged responses with the updated flags will be sent by the server.
|
||||
func (c *Conn) StoreFlagsSet(seqset string, silent bool, flags ...string) (untagged []Untagged, result Result, rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
// MSNStoreFlagsSet stores a new set of flags for messages matching message
|
||||
// sequence numbers (MSNs) from sequence set with the IMAP4 "STORE" command.
|
||||
//
|
||||
// If silent, no untagged responses with the updated flags will be sent by the
|
||||
// server.
|
||||
//
|
||||
// Method [Conn.UIDStoreFlagsSet], which operates on a uid set, should be
|
||||
// preferred.
|
||||
func (c *Conn) MSNStoreFlagsSet(seqset string, silent bool, flags ...string) (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
item := "flags"
|
||||
if silent {
|
||||
item += ".silent"
|
||||
}
|
||||
return c.Transactf("store %s %s (%s)", seqset, item, strings.Join(flags, " "))
|
||||
return c.transactf("store %s %s (%s)", seqset, item, strings.Join(flags, " "))
|
||||
}
|
||||
|
||||
// StoreFlagsAdd is like StoreFlagsSet, but only adds flags, leaving current flags on the message intact.
|
||||
func (c *Conn) StoreFlagsAdd(seqset string, silent bool, flags ...string) (untagged []Untagged, result Result, rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
// MSNStoreFlagsAdd is like [Conn.MSNStoreFlagsSet], but only adds flags, leaving
|
||||
// current flags on the message intact.
|
||||
//
|
||||
// Method [Conn.UIDStoreFlagsAdd], which operates on a uid set, should be
|
||||
// preferred.
|
||||
func (c *Conn) MSNStoreFlagsAdd(seqset string, silent bool, flags ...string) (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
item := "+flags"
|
||||
if silent {
|
||||
item += ".silent"
|
||||
}
|
||||
return c.Transactf("store %s %s (%s)", seqset, item, strings.Join(flags, " "))
|
||||
return c.transactf("store %s %s (%s)", seqset, item, strings.Join(flags, " "))
|
||||
}
|
||||
|
||||
// StoreFlagsClear is like StoreFlagsSet, but only removes flags, leaving other flags on the message intact.
|
||||
func (c *Conn) StoreFlagsClear(seqset string, silent bool, flags ...string) (untagged []Untagged, result Result, rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
// MSNStoreFlagsClear is like [Conn.MSNStoreFlagsSet], but only removes flags,
|
||||
// leaving other flags on the message intact.
|
||||
//
|
||||
// Method [Conn.UIDStoreFlagsClear], which operates on a uid set, should be
|
||||
// preferred.
|
||||
func (c *Conn) MSNStoreFlagsClear(seqset string, silent bool, flags ...string) (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
item := "-flags"
|
||||
if silent {
|
||||
item += ".silent"
|
||||
}
|
||||
return c.Transactf("store %s %s (%s)", seqset, item, strings.Join(flags, " "))
|
||||
return c.transactf("store %s %s (%s)", seqset, item, strings.Join(flags, " "))
|
||||
}
|
||||
|
||||
// Copy adds messages from the sequences in seqSet in the currently selected/active mailbox to dstMailbox.
|
||||
func (c *Conn) Copy(seqSet NumSet, dstMailbox string) (untagged []Untagged, result Result, rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
return c.Transactf("copy %s %s", seqSet.String(), astring(dstMailbox))
|
||||
// UIDStoreFlagsSet stores a new set of flags for messages matching UIDs from
|
||||
// uidSet with the IMAP4 "UID STORE" command.
|
||||
//
|
||||
// If silent, no untagged responses with the updated flags will be sent by the
|
||||
// server.
|
||||
//
|
||||
// Required capability: "UIDPLUS" or "IMAP4rev2".
|
||||
func (c *Conn) UIDStoreFlagsSet(uidSet string, silent bool, flags ...string) (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
item := "flags"
|
||||
if silent {
|
||||
item += ".silent"
|
||||
}
|
||||
return c.transactf("uid store %s %s (%s)", uidSet, item, strings.Join(flags, " "))
|
||||
}
|
||||
|
||||
// UIDCopy is like copy, but operates on UIDs.
|
||||
func (c *Conn) UIDCopy(uidSet NumSet, dstMailbox string) (untagged []Untagged, result Result, rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
return c.Transactf("uid copy %s %s", uidSet.String(), astring(dstMailbox))
|
||||
// UIDStoreFlagsAdd is like UIDStoreFlagsSet, but only adds flags, leaving
|
||||
// current flags on the message intact.
|
||||
//
|
||||
// Required capability: "UIDPLUS" or "IMAP4rev2".
|
||||
func (c *Conn) UIDStoreFlagsAdd(uidSet string, silent bool, flags ...string) (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
item := "+flags"
|
||||
if silent {
|
||||
item += ".silent"
|
||||
}
|
||||
return c.transactf("uid store %s %s (%s)", uidSet, item, strings.Join(flags, " "))
|
||||
}
|
||||
|
||||
// Move moves messages from the sequences in seqSet in the currently selected/active mailbox to dstMailbox.
|
||||
func (c *Conn) Move(seqSet NumSet, dstMailbox string) (untagged []Untagged, result Result, rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
return c.Transactf("move %s %s", seqSet.String(), astring(dstMailbox))
|
||||
// UIDStoreFlagsClear is like UIDStoreFlagsSet, but only removes flags, leaving
|
||||
// other flags on the message intact.
|
||||
//
|
||||
// Required capability: "UIDPLUS" or "IMAP4rev2".
|
||||
func (c *Conn) UIDStoreFlagsClear(uidSet string, silent bool, flags ...string) (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
item := "-flags"
|
||||
if silent {
|
||||
item += ".silent"
|
||||
}
|
||||
return c.transactf("uid store %s %s (%s)", uidSet, item, strings.Join(flags, " "))
|
||||
}
|
||||
|
||||
// UIDMove is like move, but operates on UIDs.
|
||||
func (c *Conn) UIDMove(uidSet NumSet, dstMailbox string) (untagged []Untagged, result Result, rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
return c.Transactf("uid move %s %s", uidSet.String(), astring(dstMailbox))
|
||||
// MSNCopy adds messages from the sequences in the sequence set in the
|
||||
// selected/active mailbox to destMailbox using the IMAP4 "COPY" command.
|
||||
//
|
||||
// Method [Conn.UIDCopy], operating on UIDs instead of sequence numbers, should be
|
||||
// preferred.
|
||||
func (c *Conn) MSNCopy(seqSet string, destMailbox string) (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
return c.transactf("copy %s %s", seqSet, astring(destMailbox))
|
||||
}
|
||||
|
||||
// UIDCopy is like copy, but operates on UIDs, using the IMAP4 "UID COPY" command.
|
||||
//
|
||||
// Required capability: "UIDPLUS" or "IMAP4rev2".
|
||||
func (c *Conn) UIDCopy(uidSet string, destMailbox string) (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
return c.transactf("uid copy %s %s", uidSet, astring(destMailbox))
|
||||
}
|
||||
|
||||
// MSNSearch returns messages from the sequence set in the selected/active mailbox
|
||||
// that match the search critera using the IMAP4 "SEARCH" command.
|
||||
//
|
||||
// Method [Conn.UIDSearch], operating on UIDs instead of sequence numbers, should be
|
||||
// preferred.
|
||||
func (c *Conn) MSNSearch(seqSet string, criteria string) (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
return c.transactf("seach %s %s", seqSet, criteria)
|
||||
}
|
||||
|
||||
// UIDSearch returns messages from the uid set in the selected/active mailbox that
|
||||
// match the search critera using the IMAP4 "SEARCH" command.
|
||||
//
|
||||
// Criteria is a search program, see RFC 9051 and RFC 3501 for details.
|
||||
//
|
||||
// Required capability: "UIDPLUS" or "IMAP4rev2".
|
||||
func (c *Conn) UIDSearch(seqSet string, criteria string) (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
return c.transactf("seach %s %s", seqSet, criteria)
|
||||
}
|
||||
|
||||
// MSNMove moves messages from the sequence set in the selected/active mailbox to
|
||||
// destMailbox using the IMAP4 "MOVE" command.
|
||||
//
|
||||
// Required capability: "MOVE" or "IMAP4rev2".
|
||||
//
|
||||
// Method [Conn.UIDMove], operating on UIDs instead of sequence numbers, should be
|
||||
// preferred.
|
||||
func (c *Conn) MSNMove(seqSet string, destMailbox string) (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
return c.transactf("move %s %s", seqSet, astring(destMailbox))
|
||||
}
|
||||
|
||||
// UIDMove is like move, but operates on UIDs, using the IMAP4 "UID MOVE" command.
|
||||
//
|
||||
// Required capability: "MOVE" or "IMAP4rev2".
|
||||
func (c *Conn) UIDMove(uidSet string, destMailbox string) (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
return c.transactf("uid move %s %s", uidSet, astring(destMailbox))
|
||||
}
|
||||
|
||||
// MSNReplace is like the preferred [Conn.UIDReplace], but operates on a message
|
||||
// sequence number (MSN) instead of a UID.
|
||||
//
|
||||
// Required capability: "REPLACE".
|
||||
//
|
||||
// Method [Conn.UIDReplace], operating on UIDs instead of sequence numbers, should be
|
||||
// preferred.
|
||||
func (c *Conn) MSNReplace(msgseq string, mailbox string, msg Append) (resp Response, rerr error) {
|
||||
// todo: parse msgseq, must be nznumber, with a known msgseq. or "*" with at least one message.
|
||||
return c.replace("replace", msgseq, mailbox, msg)
|
||||
}
|
||||
|
||||
// UIDReplace uses the IMAP4 "UID REPLACE" command to replace a message from the
|
||||
// selected/active mailbox with a new/different version of the message in the named
|
||||
// mailbox, which may be the same or different than the selected mailbox.
|
||||
//
|
||||
// The replaced message is indicated by uid.
|
||||
//
|
||||
// Required capability: "REPLACE".
|
||||
func (c *Conn) UIDReplace(uid string, mailbox string, msg Append) (resp Response, rerr error) {
|
||||
// todo: parse uid, must be nznumber, with a known uid. or "*" with at least one message.
|
||||
return c.replace("uid replace", uid, mailbox, msg)
|
||||
}
|
||||
|
||||
func (c *Conn) replace(cmd string, num string, mailbox string, msg Append) (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
|
||||
// todo: use synchronizing literal for larger messages.
|
||||
|
||||
var date string
|
||||
if msg.Received != nil {
|
||||
date = ` "` + msg.Received.Format("_2-Jan-2006 15:04:05 -0700") + `"`
|
||||
}
|
||||
// todo: only use literal8 if needed, possibly with "UTF8()"
|
||||
// todo: encode mailbox
|
||||
err := c.WriteCommandf("", "%s %s %s (%s)%s ~{%d+}", cmd, num, astring(mailbox), strings.Join(msg.Flags, " "), date, msg.Size)
|
||||
c.xcheckf(err, "writing replace command")
|
||||
|
||||
defer c.xtracewrite(mlog.LevelTracedata)()
|
||||
_, err = io.Copy(c.xbw, msg.Data)
|
||||
c.xcheckf(err, "write message data")
|
||||
c.xtracewrite(mlog.LevelTrace)
|
||||
|
||||
fmt.Fprintf(c.xbw, "\r\n")
|
||||
c.xflush()
|
||||
|
||||
return c.responseOK()
|
||||
}
|
||||
|
38
imapclient/fuzz_test.go
Normal file
38
imapclient/fuzz_test.go
Normal file
@ -0,0 +1,38 @@
|
||||
package imapclient
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func FuzzParser(f *testing.F) {
|
||||
/*
|
||||
Gathering all untagged responses and command completion results from the RFCs:
|
||||
|
||||
cd ../rfc
|
||||
(
|
||||
grep ' S: \* [A-Z]' * | sed 's/^.*S: //g'
|
||||
grep -E ' S: [^ *]+ (OK|NO|BAD) ' * | sed 's/^.*S: //g'
|
||||
) | grep -v '\.\.\/' | sort | uniq >../testdata/imapclient/fuzzseed.txt
|
||||
*/
|
||||
buf, err := os.ReadFile("../testdata/imapclient/fuzzseed.txt")
|
||||
if err != nil {
|
||||
f.Fatalf("reading seed: %v", err)
|
||||
}
|
||||
for _, s := range strings.Split(string(buf), "\n") {
|
||||
f.Add(s + "\r\n")
|
||||
}
|
||||
f.Add("1:3")
|
||||
f.Add("3:1")
|
||||
f.Add("3,1")
|
||||
f.Add("*")
|
||||
|
||||
f.Fuzz(func(t *testing.T, data string) {
|
||||
ParseUntagged(data)
|
||||
ParseCode(data)
|
||||
ParseResult(data)
|
||||
ParseNumSet(data)
|
||||
ParseUIDRange(data)
|
||||
})
|
||||
}
|
1681
imapclient/parse.go
1681
imapclient/parse.go
File diff suppressed because it is too large
Load Diff
42
imapclient/parse_test.go
Normal file
42
imapclient/parse_test.go
Normal file
@ -0,0 +1,42 @@
|
||||
package imapclient
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func tcheckf(t *testing.T, err error, format string, args ...any) {
|
||||
if err != nil {
|
||||
t.Fatalf("%s: %s", fmt.Sprintf(format, args...), err)
|
||||
}
|
||||
}
|
||||
|
||||
func tcompare(t *testing.T, a, b any) {
|
||||
if !reflect.DeepEqual(a, b) {
|
||||
t.Fatalf("got:\n%#v\nexpected:\n%#v", a, b)
|
||||
}
|
||||
}
|
||||
|
||||
func uint32ptr(v uint32) *uint32 { return &v }
|
||||
|
||||
func TestParse(t *testing.T) {
|
||||
code, err := ParseCode("COPYUID 1 1:3 2:4")
|
||||
tcheckf(t, err, "parsing code")
|
||||
tcompare(t, code,
|
||||
CodeCopyUID{
|
||||
DestUIDValidity: 1,
|
||||
From: []NumRange{{First: 1, Last: uint32ptr(3)}},
|
||||
To: []NumRange{{First: 2, Last: uint32ptr(4)}},
|
||||
},
|
||||
)
|
||||
|
||||
ut, err := ParseUntagged("* BYE done\r\n")
|
||||
tcheckf(t, err, "parsing untagged")
|
||||
tcompare(t, ut, UntaggedBye{Text: "done"})
|
||||
|
||||
tag, result, err := ParseResult("tag1 OK [ALERT] Hello\r\n")
|
||||
tcheckf(t, err, "parsing result")
|
||||
tcompare(t, tag, "tag1")
|
||||
tcompare(t, result, Result{Status: OK, Code: CodeWord("ALERT"), Text: "Hello"})
|
||||
}
|
41
imapclient/prefixconn.go
Normal file
41
imapclient/prefixconn.go
Normal file
@ -0,0 +1,41 @@
|
||||
package imapclient
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net"
|
||||
)
|
||||
|
||||
// prefixConn is a net.Conn with a buffer from which the first reads are satisfied.
|
||||
// used for STARTTLS where already did a buffered read of initial TLS data.
|
||||
type prefixConn struct {
|
||||
prefix []byte
|
||||
net.Conn
|
||||
}
|
||||
|
||||
func (c *prefixConn) Read(buf []byte) (int, error) {
|
||||
if len(c.prefix) > 0 {
|
||||
n := min(len(buf), len(c.prefix))
|
||||
copy(buf[:n], c.prefix[:n])
|
||||
c.prefix = c.prefix[n:]
|
||||
if len(c.prefix) == 0 {
|
||||
c.prefix = nil
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
return c.Conn.Read(buf)
|
||||
}
|
||||
|
||||
// xprefixConn checks if there are any buffered unconsumed reads. If not, it
|
||||
// returns c.conn. Otherwise, it returns a *prefixConn from which the buffered data
|
||||
// can be read followed by data from c.conn.
|
||||
func (c *Conn) xprefixConn() net.Conn {
|
||||
n := c.br.Buffered()
|
||||
if n == 0 {
|
||||
return c.conn
|
||||
}
|
||||
|
||||
buf := make([]byte, n)
|
||||
_, err := io.ReadFull(c.br, buf)
|
||||
c.xcheckf(err, "get buffered data")
|
||||
return &prefixConn{buf, c.conn}
|
||||
}
|
@ -2,35 +2,57 @@ package imapclient
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Capability is a known string for with the ENABLED and CAPABILITY command.
|
||||
// Capability is a known string for with the ENABLED command and response and
|
||||
// CAPABILITY responses. Servers could send unknown values. Always in upper case.
|
||||
type Capability string
|
||||
|
||||
const (
|
||||
CapIMAP4rev1 Capability = "IMAP4rev1"
|
||||
CapIMAP4rev2 Capability = "IMAP4rev2"
|
||||
CapLoginDisabled Capability = "LOGINDISABLED"
|
||||
CapStarttls Capability = "STARTTLS"
|
||||
CapAuthPlain Capability = "AUTH=PLAIN"
|
||||
CapLiteralPlus Capability = "LITERAL+"
|
||||
CapLiteralMinus Capability = "LITERAL-"
|
||||
CapIdle Capability = "IDLE"
|
||||
CapNamespace Capability = "NAMESPACE"
|
||||
CapBinary Capability = "BINARY"
|
||||
CapUnselect Capability = "UNSELECT"
|
||||
CapUidplus Capability = "UIDPLUS"
|
||||
CapEsearch Capability = "ESEARCH"
|
||||
CapEnable Capability = "ENABLE"
|
||||
CapSave Capability = "SAVE"
|
||||
CapListExtended Capability = "LIST-EXTENDED"
|
||||
CapSpecialUse Capability = "SPECIAL-USE"
|
||||
CapMove Capability = "MOVE"
|
||||
CapUTF8Only Capability = "UTF8=ONLY"
|
||||
CapUTF8Accept Capability = "UTF8=ACCEPT"
|
||||
CapID Capability = "ID" // ../rfc/2971:80
|
||||
CapIMAP4rev1 Capability = "IMAP4REV1" // ../rfc/3501:1310
|
||||
CapIMAP4rev2 Capability = "IMAP4REV2" // ../rfc/9051:1219
|
||||
CapLoginDisabled Capability = "LOGINDISABLED" // ../rfc/3501:3792 ../rfc/9051:5436
|
||||
CapStartTLS Capability = "STARTTLS" // ../rfc/3501:1327 ../rfc/9051:1238
|
||||
CapAuthPlain Capability = "AUTH=PLAIN" // ../rfc/3501:1327 ../rfc/9051:1238
|
||||
CapAuthExternal Capability = "AUTH=EXTERNAL" // ../rfc/4422:1575
|
||||
CapAuthSCRAMSHA256Plus Capability = "AUTH=SCRAM-SHA-256-PLUS" // ../rfc/7677:80
|
||||
CapAuthSCRAMSHA256 Capability = "AUTH=SCRAM-SHA-256"
|
||||
CapAuthSCRAMSHA1Plus Capability = "AUTH=SCRAM-SHA-1-PLUS" // ../rfc/5802:465
|
||||
CapAuthSCRAMSHA1 Capability = "AUTH=SCRAM-SHA-1"
|
||||
CapAuthCRAMMD5 Capability = "AUTH=CRAM-MD5" // ../rfc/2195:80
|
||||
CapLiteralPlus Capability = "LITERAL+" // ../rfc/2088:45
|
||||
CapLiteralMinus Capability = "LITERAL-" // ../rfc/7888:26 ../rfc/9051:847 Default since IMAP4rev2
|
||||
CapIdle Capability = "IDLE" // ../rfc/2177:69 ../rfc/9051:3542 Default since IMAP4rev2
|
||||
CapNamespace Capability = "NAMESPACE" // ../rfc/2342:130 ../rfc/9051:135 Default since IMAP4rev2
|
||||
CapBinary Capability = "BINARY" // ../rfc/3516:100
|
||||
CapUnselect Capability = "UNSELECT" // ../rfc/3691:78 ../rfc/9051:3667 Default since IMAP4rev2
|
||||
CapUidplus Capability = "UIDPLUS" // ../rfc/4315:36 ../rfc/9051:8015 Default since IMAP4rev2
|
||||
CapEsearch Capability = "ESEARCH" // ../rfc/4731:69 ../rfc/9051:8016 Default since IMAP4rev2
|
||||
CapEnable Capability = "ENABLE" // ../rfc/5161:52 ../rfc/9051:8016 Default since IMAP4rev2
|
||||
CapListExtended Capability = "LIST-EXTENDED" // ../rfc/5258:150 ../rfc/9051:7987 Syntax except multiple mailboxes default since IMAP4rev2
|
||||
CapSpecialUse Capability = "SPECIAL-USE" // ../rfc/6154:156 ../rfc/9051:8021 Special-use attributes in LIST responses by default since IMAP4rev2
|
||||
CapMove Capability = "MOVE" // ../rfc/6851:87 ../rfc/9051:8018 Default since IMAP4rev2
|
||||
CapUTF8Only Capability = "UTF8=ONLY"
|
||||
CapUTF8Accept Capability = "UTF8=ACCEPT"
|
||||
CapCondstore Capability = "CONDSTORE" // ../rfc/7162:411
|
||||
CapQresync Capability = "QRESYNC" // ../rfc/7162:1376
|
||||
CapID Capability = "ID" // ../rfc/2971:80
|
||||
CapMetadata Capability = "METADATA" // ../rfc/5464:124
|
||||
CapMetadataServer Capability = "METADATA-SERVER" // ../rfc/5464:124
|
||||
CapSaveDate Capability = "SAVEDATE" // ../rfc/8514
|
||||
CapCreateSpecialUse Capability = "CREATE-SPECIAL-USE" // ../rfc/6154:296
|
||||
CapCompressDeflate Capability = "COMPRESS=DEFLATE" // ../rfc/4978:65
|
||||
CapListMetadata Capability = "LIST-METADATA" // ../rfc/9590:73
|
||||
CapMultiAppend Capability = "MULTIAPPEND" // ../rfc/3502:33
|
||||
CapReplace Capability = "REPLACE" // ../rfc/8508:155
|
||||
CapPreview Capability = "PREVIEW" // ../rfc/8970:114
|
||||
CapMultiSearch Capability = "MULTISEARCH" // ../rfc/7377:187
|
||||
CapNotify Capability = "NOTIFY" // ../rfc/5465:195
|
||||
CapUIDOnly Capability = "UIDONLY" // ../rfc/9586:129
|
||||
)
|
||||
|
||||
// Status is the tagged final result of a command.
|
||||
@ -42,73 +64,144 @@ const (
|
||||
OK Status = "OK" // Command succeeded.
|
||||
)
|
||||
|
||||
// Response is a response to an IMAP command including any preceding untagged
|
||||
// responses. Response implements the error interface through result.
|
||||
//
|
||||
// See [UntaggedResponseGet] and [UntaggedResponseList] to retrieve specific types
|
||||
// of untagged responses.
|
||||
type Response struct {
|
||||
Untagged []Untagged
|
||||
Result
|
||||
}
|
||||
|
||||
var (
|
||||
ErrMissing = errors.New("no response of type") // Returned by UntaggedResponseGet.
|
||||
ErrMultiple = errors.New("multiple responses of type") // Idem.
|
||||
)
|
||||
|
||||
// UntaggedResponseGet returns the single untagged response of type T. Only
|
||||
// [ErrMissing] or [ErrMultiple] can be returned as error.
|
||||
func UntaggedResponseGet[T Untagged](resp Response) (T, error) {
|
||||
var t T
|
||||
var have bool
|
||||
for _, e := range resp.Untagged {
|
||||
if tt, ok := e.(T); ok {
|
||||
if have {
|
||||
return t, ErrMultiple
|
||||
}
|
||||
t = tt
|
||||
}
|
||||
}
|
||||
if !have {
|
||||
return t, ErrMissing
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// UntaggedResponseList returns all untagged responses of type T.
|
||||
func UntaggedResponseList[T Untagged](resp Response) []T {
|
||||
var l []T
|
||||
for _, e := range resp.Untagged {
|
||||
if tt, ok := e.(T); ok {
|
||||
l = append(l, tt)
|
||||
}
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
// Result is the final response for a command, indicating success or failure.
|
||||
type Result struct {
|
||||
Status Status
|
||||
RespText
|
||||
Code Code // Set if response code is present.
|
||||
Text string // Any remaining text.
|
||||
}
|
||||
|
||||
// CodeArg represents a response code with arguments, i.e. the data between [] in the response line.
|
||||
type CodeArg interface {
|
||||
CodeString() string
|
||||
}
|
||||
|
||||
// CodeOther is a valid but unrecognized response code.
|
||||
type CodeOther struct {
|
||||
Code string
|
||||
Args []string
|
||||
}
|
||||
|
||||
func (c CodeOther) CodeString() string {
|
||||
return c.Code + " " + strings.Join(c.Args, " ")
|
||||
}
|
||||
|
||||
// CodeWords is a code with space-separated string parameters. E.g. CAPABILITY.
|
||||
type CodeWords struct {
|
||||
Code string
|
||||
Args []string
|
||||
}
|
||||
|
||||
func (c CodeWords) CodeString() string {
|
||||
s := c.Code
|
||||
for _, w := range c.Args {
|
||||
s += " " + w
|
||||
func (r Result) Error() string {
|
||||
s := fmt.Sprintf("IMAP result %s", r.Status)
|
||||
if r.Code != nil {
|
||||
s += "[" + r.Code.CodeString() + "]"
|
||||
}
|
||||
if r.Text != "" {
|
||||
s += " " + r.Text
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// CodeList is a code with a list with space-separated strings as parameters. E.g. BADCHARSET, PERMANENTFLAGS.
|
||||
type CodeList struct {
|
||||
Code string
|
||||
Args []string // If nil, no list was present. List can also be empty.
|
||||
// Code represents a response code with optional arguments, i.e. the data between [] in the response line.
|
||||
type Code interface {
|
||||
CodeString() string
|
||||
}
|
||||
|
||||
func (c CodeList) CodeString() string {
|
||||
s := c.Code
|
||||
if c.Args == nil {
|
||||
// CodeWord is a response code without parameters, always in upper case.
|
||||
type CodeWord string
|
||||
|
||||
func (c CodeWord) CodeString() string {
|
||||
return string(c)
|
||||
}
|
||||
|
||||
// CodeOther is an unrecognized response code with parameters.
|
||||
type CodeParams struct {
|
||||
Code string // Always in upper case.
|
||||
Args []string
|
||||
}
|
||||
|
||||
func (c CodeParams) CodeString() string {
|
||||
return c.Code + " " + strings.Join(c.Args, " ")
|
||||
}
|
||||
|
||||
// CodeCapability is a CAPABILITY response code with the capabilities supported by the server.
|
||||
type CodeCapability []Capability
|
||||
|
||||
func (c CodeCapability) CodeString() string {
|
||||
var s string
|
||||
for _, c := range c {
|
||||
s += " " + string(c)
|
||||
}
|
||||
return "CAPABILITY" + s
|
||||
}
|
||||
|
||||
type CodeBadCharset []string
|
||||
|
||||
func (c CodeBadCharset) CodeString() string {
|
||||
s := "BADCHARSET"
|
||||
if len(c) == 0 {
|
||||
return s
|
||||
}
|
||||
return s + "(" + strings.Join(c.Args, " ") + ")"
|
||||
return s + " (" + strings.Join([]string(c), " ") + ")"
|
||||
}
|
||||
|
||||
// CodeUint is a code with a uint32 parameter, e.g. UIDNEXT and UIDVALIDITY.
|
||||
type CodeUint struct {
|
||||
Code string
|
||||
Num uint32
|
||||
type CodePermanentFlags []string
|
||||
|
||||
func (c CodePermanentFlags) CodeString() string {
|
||||
return "PERMANENTFLAGS (" + strings.Join([]string(c), " ") + ")"
|
||||
}
|
||||
|
||||
func (c CodeUint) CodeString() string {
|
||||
return fmt.Sprintf("%s %d", c.Code, c.Num)
|
||||
type CodeUIDNext uint32
|
||||
|
||||
func (c CodeUIDNext) CodeString() string {
|
||||
return fmt.Sprintf("UIDNEXT %d", c)
|
||||
}
|
||||
|
||||
type CodeUIDValidity uint32
|
||||
|
||||
func (c CodeUIDValidity) CodeString() string {
|
||||
return fmt.Sprintf("UIDVALIDITY %d", c)
|
||||
}
|
||||
|
||||
type CodeUnseen uint32
|
||||
|
||||
func (c CodeUnseen) CodeString() string {
|
||||
return fmt.Sprintf("UNSEEN %d", c)
|
||||
}
|
||||
|
||||
// "APPENDUID" response code.
|
||||
type CodeAppendUID struct {
|
||||
UIDValidity uint32
|
||||
UID uint32
|
||||
UIDs NumRange
|
||||
}
|
||||
|
||||
func (c CodeAppendUID) CodeString() string {
|
||||
return fmt.Sprintf("APPENDUID %d %d", c.UIDValidity, c.UID)
|
||||
return fmt.Sprintf("APPENDUID %d %s", c.UIDValidity, c.UIDs.String())
|
||||
}
|
||||
|
||||
// "COPYUID" response code.
|
||||
@ -149,11 +242,66 @@ func (c CodeHighestModSeq) CodeString() string {
|
||||
return fmt.Sprintf("HIGHESTMODSEQ %d", c)
|
||||
}
|
||||
|
||||
// RespText represents a response line minus the leading tag.
|
||||
type RespText struct {
|
||||
Code string // The first word between [] after the status.
|
||||
CodeArg CodeArg // Set if code has a parameter.
|
||||
More string // Any remaining text.
|
||||
// "INPROGRESS" response code.
|
||||
type CodeInProgress struct {
|
||||
Tag string // Nil is empty string.
|
||||
Current *uint32
|
||||
Goal *uint32
|
||||
}
|
||||
|
||||
func (c CodeInProgress) CodeString() string {
|
||||
// ABNF allows inprogress-tag/state with all nil values. Doesn't seem useful enough
|
||||
// to keep track of.
|
||||
if c.Tag == "" && c.Current == nil && c.Goal == nil {
|
||||
return "INPROGRESS"
|
||||
}
|
||||
|
||||
// todo: quote tag properly
|
||||
current := "nil"
|
||||
goal := "nil"
|
||||
if c.Current != nil {
|
||||
current = fmt.Sprintf("%d", *c.Current)
|
||||
}
|
||||
if c.Goal != nil {
|
||||
goal = fmt.Sprintf("%d", *c.Goal)
|
||||
}
|
||||
return fmt.Sprintf("INPROGRESS (%q %s %s)", c.Tag, current, goal)
|
||||
}
|
||||
|
||||
// "BADEVENT" response code, with the events that are supported, for the NOTIFY
|
||||
// extension.
|
||||
type CodeBadEvent []string
|
||||
|
||||
func (c CodeBadEvent) CodeString() string {
|
||||
return fmt.Sprintf("BADEVENT (%s)", strings.Join([]string(c), " "))
|
||||
}
|
||||
|
||||
// "METADATA LONGENTRIES number" response for GETMETADATA command.
|
||||
type CodeMetadataLongEntries uint32
|
||||
|
||||
func (c CodeMetadataLongEntries) CodeString() string {
|
||||
return fmt.Sprintf("METADATA LONGENTRIES %d", c)
|
||||
}
|
||||
|
||||
// "METADATA (MAXSIZE number)" response for SETMETADATA command.
|
||||
type CodeMetadataMaxSize uint32
|
||||
|
||||
func (c CodeMetadataMaxSize) CodeString() string {
|
||||
return fmt.Sprintf("METADATA (MAXSIZE %d)", c)
|
||||
}
|
||||
|
||||
// "METADATA (TOOMANY)" response for SETMETADATA command.
|
||||
type CodeMetadataTooMany struct{}
|
||||
|
||||
func (c CodeMetadataTooMany) CodeString() string {
|
||||
return "METADATA (TOOMANY)"
|
||||
}
|
||||
|
||||
// "METADATA (NOPRIVATE)" response for SETMETADATA command.
|
||||
type CodeMetadataNoPrivate struct{}
|
||||
|
||||
func (c CodeMetadataNoPrivate) CodeString() string {
|
||||
return "METADATA (NOPRIVATE)"
|
||||
}
|
||||
|
||||
// atom or string.
|
||||
@ -194,17 +342,30 @@ func syncliteral(s string) string {
|
||||
// todo: make an interface that the untagged responses implement?
|
||||
type Untagged any
|
||||
|
||||
type UntaggedBye RespText
|
||||
type UntaggedPreauth RespText
|
||||
type UntaggedBye struct {
|
||||
Code Code // Set if response code is present.
|
||||
Text string // Any remaining text.
|
||||
}
|
||||
type UntaggedPreauth struct {
|
||||
Code Code // Set if response code is present.
|
||||
Text string // Any remaining text.
|
||||
}
|
||||
type UntaggedExpunge uint32
|
||||
type UntaggedExists uint32
|
||||
type UntaggedRecent uint32
|
||||
type UntaggedCapability []string
|
||||
type UntaggedEnabled []string
|
||||
|
||||
// UntaggedCapability lists all capabilities the server implements.
|
||||
type UntaggedCapability []Capability
|
||||
|
||||
// UntaggedEnabled indicates the capabilities that were enabled on the connection
|
||||
// by the server, typically in response to an ENABLE command.
|
||||
type UntaggedEnabled []Capability
|
||||
|
||||
type UntaggedResult Result
|
||||
type UntaggedFlags []string
|
||||
type UntaggedList struct {
|
||||
// ../rfc/9051:6690
|
||||
|
||||
Flags []string
|
||||
Separator byte // 0 for NIL
|
||||
Mailbox string
|
||||
@ -215,10 +376,19 @@ type UntaggedFetch struct {
|
||||
Seq uint32
|
||||
Attrs []FetchAttr
|
||||
}
|
||||
|
||||
// UntaggedUIDFetch is like UntaggedFetch, but with UIDs instead of message
|
||||
// sequence numbers, and returned instead of regular fetch responses when UIDONLY
|
||||
// is enabled.
|
||||
type UntaggedUIDFetch struct {
|
||||
UID uint32
|
||||
Attrs []FetchAttr
|
||||
}
|
||||
type UntaggedSearch []uint32
|
||||
|
||||
// ../rfc/7162:1101
|
||||
type UntaggedSearchModSeq struct {
|
||||
// ../rfc/7162:1101
|
||||
|
||||
Nums []uint32
|
||||
ModSeq int64
|
||||
}
|
||||
@ -227,9 +397,36 @@ type UntaggedStatus struct {
|
||||
Attrs map[StatusAttr]int64 // Upper case status attributes.
|
||||
}
|
||||
|
||||
// ../rfc/9051:7059 ../9208:712
|
||||
// Unsolicited response, indicating an annotation has changed.
|
||||
type UntaggedMetadataKeys struct {
|
||||
// ../rfc/5464:716
|
||||
|
||||
Mailbox string // Empty means not specific to mailbox.
|
||||
|
||||
// Keys that have changed. To get values (or determine absence), the server must be
|
||||
// queried.
|
||||
Keys []string
|
||||
}
|
||||
|
||||
// Annotation is a metadata server of mailbox annotation.
|
||||
type Annotation struct {
|
||||
Key string
|
||||
// Nil is represented by IsString false and a nil Value.
|
||||
IsString bool
|
||||
Value []byte
|
||||
}
|
||||
|
||||
type UntaggedMetadataAnnotations struct {
|
||||
// ../rfc/5464:683
|
||||
|
||||
Mailbox string // Empty means not specific to mailbox.
|
||||
Annotations []Annotation
|
||||
}
|
||||
|
||||
type StatusAttr string
|
||||
|
||||
// ../rfc/9051:7059 ../9208:712
|
||||
|
||||
const (
|
||||
StatusMessages StatusAttr = "MESSAGES"
|
||||
StatusUIDNext StatusAttr = "UIDNEXT"
|
||||
@ -248,6 +445,7 @@ type UntaggedNamespace struct {
|
||||
}
|
||||
type UntaggedLsub struct {
|
||||
// ../rfc/3501:4833
|
||||
|
||||
Flags []string
|
||||
Separator byte
|
||||
Mailbox string
|
||||
@ -255,15 +453,17 @@ type UntaggedLsub struct {
|
||||
|
||||
// Fields are optional and zero if absent.
|
||||
type UntaggedEsearch struct {
|
||||
// ../rfc/9051:6546
|
||||
Correlator string
|
||||
UID bool
|
||||
Min uint32
|
||||
Max uint32
|
||||
All NumSet
|
||||
Count *uint32
|
||||
ModSeq int64
|
||||
Exts []EsearchDataExt
|
||||
Tag string // ../rfc/9051:6546
|
||||
Mailbox string // For MULTISEARCH. ../rfc/7377:437
|
||||
UIDValidity uint32 // For MULTISEARCH, ../rfc/7377:438
|
||||
|
||||
UID bool
|
||||
Min uint32
|
||||
Max uint32
|
||||
All NumSet
|
||||
Count *uint32
|
||||
ModSeq int64
|
||||
Exts []EsearchDataExt
|
||||
}
|
||||
|
||||
// UntaggedVanished is used in QRESYNC to send UIDs that have been removed.
|
||||
@ -315,6 +515,7 @@ type EsearchDataExt struct {
|
||||
|
||||
type NamespaceDescr struct {
|
||||
// ../rfc/9051:6769
|
||||
|
||||
Prefix string
|
||||
Separator byte // If 0 then separator was absent.
|
||||
Exts []NamespaceExtension
|
||||
@ -322,13 +523,14 @@ type NamespaceDescr struct {
|
||||
|
||||
type NamespaceExtension struct {
|
||||
// ../rfc/9051:6773
|
||||
|
||||
Key string
|
||||
Values []string
|
||||
}
|
||||
|
||||
// FetchAttr represents a FETCH response attribute.
|
||||
type FetchAttr interface {
|
||||
Attr() string // Name of attribute.
|
||||
Attr() string // Name of attribute in upper case, e.g. "UID".
|
||||
}
|
||||
|
||||
type NumSet struct {
|
||||
@ -355,12 +557,19 @@ func (ns NumSet) String() string {
|
||||
}
|
||||
|
||||
func ParseNumSet(s string) (ns NumSet, rerr error) {
|
||||
c := Conn{r: bufio.NewReader(strings.NewReader(s))}
|
||||
c := Proto{br: bufio.NewReader(strings.NewReader(s))}
|
||||
defer c.recover(&rerr)
|
||||
ns = c.xsequenceSet()
|
||||
return
|
||||
}
|
||||
|
||||
func ParseUIDRange(s string) (nr NumRange, rerr error) {
|
||||
c := Proto{br: bufio.NewReader(strings.NewReader(s))}
|
||||
defer c.recover(&rerr)
|
||||
nr = c.xuidrange()
|
||||
return
|
||||
}
|
||||
|
||||
// NumRange is a single number or range.
|
||||
type NumRange struct {
|
||||
First uint32 // 0 for "*".
|
||||
@ -394,6 +603,7 @@ type TaggedExtComp struct {
|
||||
|
||||
type TaggedExtVal struct {
|
||||
// ../rfc/9051:7111
|
||||
|
||||
Number *int64
|
||||
SeqSet *NumSet
|
||||
Comp *TaggedExtComp // If SimpleNumber and SimpleSeqSet is nil, this is a Comp. But Comp is optional and can also be nil. Not great.
|
||||
@ -401,6 +611,7 @@ type TaggedExtVal struct {
|
||||
|
||||
type MboxListExtendedItem struct {
|
||||
// ../rfc/9051:6699
|
||||
|
||||
Tag string
|
||||
Val TaggedExtVal
|
||||
}
|
||||
@ -429,9 +640,21 @@ type Address struct {
|
||||
}
|
||||
|
||||
// "INTERNALDATE" fetch response.
|
||||
type FetchInternalDate string // todo: parsed time
|
||||
type FetchInternalDate struct {
|
||||
Date time.Time
|
||||
}
|
||||
|
||||
func (f FetchInternalDate) Attr() string { return "INTERNALDATE" }
|
||||
|
||||
// "SAVEDATE" fetch response.
|
||||
type FetchSaveDate struct {
|
||||
// ../rfc/8514:265
|
||||
|
||||
SaveDate *time.Time // nil means absent for message.
|
||||
}
|
||||
|
||||
func (f FetchSaveDate) Attr() string { return "SAVEDATE" }
|
||||
|
||||
// "RFC822.SIZE" fetch response.
|
||||
type FetchRFC822Size int64
|
||||
|
||||
@ -455,6 +678,7 @@ func (f FetchRFC822Text) Attr() string { return "RFC822.TEXT" }
|
||||
// "BODYSTRUCTURE" fetch response.
|
||||
type FetchBodystructure struct {
|
||||
// ../rfc/9051:6355
|
||||
|
||||
RespAttr string
|
||||
Body any // BodyType*
|
||||
}
|
||||
@ -464,6 +688,7 @@ func (f FetchBodystructure) Attr() string { return f.RespAttr }
|
||||
// "BODY" fetch response.
|
||||
type FetchBody struct {
|
||||
// ../rfc/9051:6756 ../rfc/9051:6985
|
||||
|
||||
RespAttr string
|
||||
Section string // todo: parse more ../rfc/9051:6985
|
||||
Offset int32
|
||||
@ -479,36 +704,96 @@ type BodyFields struct {
|
||||
Octets int32
|
||||
}
|
||||
|
||||
// BodyTypeMpart represents the body structure a multipart message, with subparts and the multipart media subtype. Used in a FETCH response.
|
||||
// BodyTypeMpart represents the body structure a multipart message, with
|
||||
// subparts and the multipart media subtype. Used in a FETCH response.
|
||||
type BodyTypeMpart struct {
|
||||
// ../rfc/9051:6411
|
||||
|
||||
Bodies []any // BodyTypeBasic, BodyTypeMsg, BodyTypeText
|
||||
MediaSubtype string
|
||||
Ext *BodyExtensionMpart
|
||||
}
|
||||
|
||||
// BodyTypeBasic represents basic information about a part, used in a FETCH response.
|
||||
// BodyTypeBasic represents basic information about a part, used in a FETCH
|
||||
// response.
|
||||
type BodyTypeBasic struct {
|
||||
// ../rfc/9051:6407
|
||||
|
||||
MediaType, MediaSubtype string
|
||||
BodyFields BodyFields
|
||||
Ext *BodyExtension1Part
|
||||
}
|
||||
|
||||
// BodyTypeMsg represents an email message as a body structure, used in a FETCH response.
|
||||
// BodyTypeMsg represents an email message as a body structure, used in a FETCH
|
||||
// response.
|
||||
type BodyTypeMsg struct {
|
||||
// ../rfc/9051:6415
|
||||
|
||||
MediaType, MediaSubtype string
|
||||
BodyFields BodyFields
|
||||
Envelope Envelope
|
||||
Bodystructure any // One of the BodyType*
|
||||
Lines int64
|
||||
Ext *BodyExtension1Part
|
||||
}
|
||||
|
||||
// BodyTypeText represents a text part as a body structure, used in a FETCH response.
|
||||
// BodyTypeText represents a text part as a body structure, used in a FETCH
|
||||
// response.
|
||||
type BodyTypeText struct {
|
||||
// ../rfc/9051:6418
|
||||
|
||||
MediaType, MediaSubtype string
|
||||
BodyFields BodyFields
|
||||
Lines int64
|
||||
Ext *BodyExtension1Part
|
||||
}
|
||||
|
||||
// BodyExtension1Part has the extensible form fields of a BODYSTRUCTURE for
|
||||
// multiparts.
|
||||
//
|
||||
// Fields in this struct are optional in IMAP4, and can be NIL or contain a value.
|
||||
// The first field is always present, otherwise the "parent" struct would have a
|
||||
// nil *BodyExtensionMpart. The second and later fields are nil when absent. For
|
||||
// non-reference types (e.g. strings), an IMAP4 NIL is represented as a pointer to
|
||||
// (*T)(nil). For reference types (e.g. slices), an IMAP4 NIL is represented by a
|
||||
// pointer to nil.
|
||||
type BodyExtensionMpart struct {
|
||||
// ../rfc/9051:5986 ../rfc/3501:4161 ../rfc/9051:6371 ../rfc/3501:4599
|
||||
|
||||
Params [][2]string
|
||||
Disposition **string
|
||||
DispositionParams *[][2]string
|
||||
Language *[]string
|
||||
Location **string
|
||||
More []BodyExtension // Nil if absent.
|
||||
}
|
||||
|
||||
// BodyExtension1Part has the extensible form fields of a BODYSTRUCTURE for
|
||||
// non-multiparts.
|
||||
//
|
||||
// Fields in this struct are optional in IMAP4, and can be NIL or contain a value.
|
||||
// The first field is always present, otherwise the "parent" struct would have a
|
||||
// nil *BodyExtensionMpart. The second and later fields are nil when absent. For
|
||||
// non-reference types (e.g. strings), an IMAP4 NIL is represented as a pointer to
|
||||
// (*T)(nil). For reference types (e.g. slices), an IMAP4 NIL is represented by a
|
||||
// pointer to nil.
|
||||
type BodyExtension1Part struct {
|
||||
// ../rfc/9051:6023 ../rfc/3501:4191 ../rfc/9051:6366 ../rfc/3501:4584
|
||||
|
||||
MD5 *string
|
||||
Disposition **string
|
||||
DispositionParams *[][2]string
|
||||
Language *[]string
|
||||
Location **string
|
||||
More []BodyExtension // Nil means absent.
|
||||
}
|
||||
|
||||
// BodyExtension has the additional extension fields for future expansion of
|
||||
// extensions.
|
||||
type BodyExtension struct {
|
||||
String *string
|
||||
Number *int64
|
||||
More []BodyExtension
|
||||
}
|
||||
|
||||
// "BINARY" fetch response.
|
||||
@ -538,3 +823,12 @@ func (f FetchUID) Attr() string { return "UID" }
|
||||
type FetchModSeq int64
|
||||
|
||||
func (f FetchModSeq) Attr() string { return "MODSEQ" }
|
||||
|
||||
// "PREVIEW" fetch response.
|
||||
type FetchPreview struct {
|
||||
Preview *string
|
||||
}
|
||||
|
||||
// ../rfc/8970:146
|
||||
|
||||
func (f FetchPreview) Attr() string { return "PREVIEW" }
|
||||
|
@ -7,22 +7,30 @@ import (
|
||||
)
|
||||
|
||||
func TestAppend(t *testing.T) {
|
||||
testAppend(t, false)
|
||||
}
|
||||
|
||||
func TestAppendUIDOnly(t *testing.T) {
|
||||
testAppend(t, true)
|
||||
}
|
||||
|
||||
func testAppend(t *testing.T, uidonly bool) {
|
||||
defer mockUIDValidity()()
|
||||
|
||||
tc := start(t) // note: with switchboard because this connection stays alive unlike tc2.
|
||||
tc := start(t, uidonly) // note: with switchboard because this connection stays alive unlike tc2.
|
||||
defer tc.close()
|
||||
|
||||
tc2 := startNoSwitchboard(t) // note: without switchboard because this connection will break during tests.
|
||||
defer tc2.close()
|
||||
tc2 := startNoSwitchboard(t, uidonly) // note: without switchboard because this connection will break during tests.
|
||||
defer tc2.closeNoWait()
|
||||
|
||||
tc3 := startNoSwitchboard(t)
|
||||
defer tc3.close()
|
||||
tc3 := startNoSwitchboard(t, uidonly)
|
||||
defer tc3.closeNoWait()
|
||||
|
||||
tc2.client.Login("mjl@mox.example", password0)
|
||||
tc2.login("mjl@mox.example", password0)
|
||||
tc2.client.Select("inbox")
|
||||
tc.client.Login("mjl@mox.example", password0)
|
||||
tc.login("mjl@mox.example", password0)
|
||||
tc.client.Select("inbox")
|
||||
tc3.client.Login("mjl@mox.example", password0)
|
||||
tc3.login("mjl@mox.example", password0)
|
||||
|
||||
tc2.transactf("bad", "append") // Missing params.
|
||||
tc2.transactf("bad", `append inbox`) // Missing message.
|
||||
@ -30,43 +38,44 @@ func TestAppend(t *testing.T) {
|
||||
|
||||
// Syntax error for line ending in literal causes connection abort.
|
||||
tc2.transactf("bad", "append inbox (\\Badflag) {1+}\r\nx") // Unknown flag.
|
||||
tc2 = startNoSwitchboard(t)
|
||||
defer tc2.close()
|
||||
tc2.client.Login("mjl@mox.example", password0)
|
||||
tc2 = startNoSwitchboard(t, uidonly)
|
||||
defer tc2.closeNoWait()
|
||||
tc2.login("mjl@mox.example", password0)
|
||||
tc2.client.Select("inbox")
|
||||
|
||||
tc2.transactf("bad", "append inbox () \"bad time\" {1+}\r\nx") // Bad time.
|
||||
tc2 = startNoSwitchboard(t)
|
||||
defer tc2.close()
|
||||
tc2.client.Login("mjl@mox.example", password0)
|
||||
tc2 = startNoSwitchboard(t, uidonly)
|
||||
defer tc2.closeNoWait()
|
||||
tc2.login("mjl@mox.example", password0)
|
||||
tc2.client.Select("inbox")
|
||||
|
||||
tc2.transactf("no", "append nobox (\\Seen) \" 1-Jan-2022 10:10:00 +0100\" {1}")
|
||||
tc2.xcode("TRYCREATE")
|
||||
tc2.xcodeWord("TRYCREATE")
|
||||
|
||||
tc2.transactf("no", "append expungebox (\\Seen) {1}")
|
||||
tc2.xcodeWord("TRYCREATE")
|
||||
|
||||
tc2.transactf("ok", "append inbox (\\Seen Label1 $label2) \" 1-Jan-2022 10:10:00 +0100\" {1+}\r\nx")
|
||||
tc2.xuntagged(imapclient.UntaggedExists(1))
|
||||
tc2.xcodeArg(imapclient.CodeAppendUID{UIDValidity: 1, UID: 1})
|
||||
tc2.xcode(imapclient.CodeAppendUID{UIDValidity: 1, UIDs: xparseUIDRange("1")})
|
||||
|
||||
tc.transactf("ok", "noop")
|
||||
uid1 := imapclient.FetchUID(1)
|
||||
flags := imapclient.FetchFlags{`\Seen`, "$label2", "label1"}
|
||||
tc.xuntagged(imapclient.UntaggedExists(1), imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, flags}})
|
||||
tc.xuntagged(imapclient.UntaggedExists(1), tc.untaggedFetch(1, 1, flags))
|
||||
tc3.transactf("ok", "noop")
|
||||
tc3.xuntagged() // Inbox is not selected, nothing to report.
|
||||
|
||||
tc2.transactf("ok", "append inbox (\\Seen) \" 1-Jan-2022 10:10:00 +0100\" UTF8 ({47+}\r\ncontent-type: just completely invalid;;\r\n\r\ntest)")
|
||||
tc2.transactf("ok", "append inbox (\\Seen) \" 1-Jan-2022 10:10:00 +0100\" UTF8 (~{47+}\r\ncontent-type: just completely invalid;;\r\n\r\ntest)")
|
||||
tc2.xuntagged(imapclient.UntaggedExists(2))
|
||||
tc2.xcodeArg(imapclient.CodeAppendUID{UIDValidity: 1, UID: 2})
|
||||
tc2.xcode(imapclient.CodeAppendUID{UIDValidity: 1, UIDs: xparseUIDRange("2")})
|
||||
|
||||
tc2.transactf("ok", "append inbox (\\Seen) \" 1-Jan-2022 10:10:00 +0100\" UTF8 ({31+}\r\ncontent-type: text/plain;\n\ntest)")
|
||||
tc2.transactf("ok", "append inbox (\\Seen) \" 1-Jan-2022 10:10:00 +0100\" UTF8 (~{31+}\r\ncontent-type: text/plain;\n\ntest)")
|
||||
tc2.xuntagged(imapclient.UntaggedExists(3))
|
||||
tc2.xcodeArg(imapclient.CodeAppendUID{UIDValidity: 1, UID: 3})
|
||||
tc2.xcode(imapclient.CodeAppendUID{UIDValidity: 1, UIDs: xparseUIDRange("3")})
|
||||
|
||||
// Messages that we cannot parse are marked as application/octet-stream. Perhaps
|
||||
// the imap client knows how to deal with them.
|
||||
tc2.transactf("ok", "uid fetch 2 body")
|
||||
uid2 := imapclient.FetchUID(2)
|
||||
xbs := imapclient.FetchBodystructure{
|
||||
RespAttr: "BODY",
|
||||
Body: imapclient.BodyTypeBasic{
|
||||
@ -77,16 +86,50 @@ func TestAppend(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
tc2.xuntagged(imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, xbs}})
|
||||
tc2.xuntagged(tc.untaggedFetch(2, 2, xbs))
|
||||
|
||||
tclimit := startArgs(t, false, false, true, true, "limit")
|
||||
// Multiappend with two messages.
|
||||
tc.transactf("ok", "noop") // Flush pending untagged responses.
|
||||
tc.transactf("ok", "append inbox {6+}\r\ntest\r\n ~{6+}\r\ntost\r\n")
|
||||
tc.xuntagged(imapclient.UntaggedExists(5))
|
||||
tc.xcode(imapclient.CodeAppendUID{UIDValidity: 1, UIDs: xparseUIDRange("4:5")})
|
||||
|
||||
// Cancelled with zero-length message.
|
||||
tc.transactf("no", "append inbox {6+}\r\ntest\r\n {0+}\r\n")
|
||||
|
||||
tclimit := startArgs(t, uidonly, false, false, true, true, "limit")
|
||||
defer tclimit.close()
|
||||
tclimit.client.Login("limit@mox.example", password0)
|
||||
tclimit.login("limit@mox.example", password0)
|
||||
tclimit.client.Select("inbox")
|
||||
// First message of 1 byte is within limits.
|
||||
tclimit.transactf("ok", "append inbox (\\Seen Label1 $label2) \" 1-Jan-2022 10:10:00 +0100\" {1+}\r\nx")
|
||||
tclimit.xuntagged(imapclient.UntaggedExists(1))
|
||||
// Second message would take account past limit.
|
||||
tclimit.transactf("no", "append inbox (\\Seen Label1 $label2) \" 1-Jan-2022 10:10:00 +0100\" {1+}\r\nx")
|
||||
tclimit.xcode("OVERQUOTA")
|
||||
tclimit.xcodeWord("OVERQUOTA")
|
||||
|
||||
// Empty mailbox.
|
||||
if uidonly {
|
||||
tclimit.transactf("ok", `uid store 1 flags (\deleted)`)
|
||||
} else {
|
||||
tclimit.transactf("ok", `store 1 flags (\deleted)`)
|
||||
}
|
||||
tclimit.transactf("ok", "expunge")
|
||||
|
||||
// Multiappend with first message within quota, and second message with sync
|
||||
// literal causing quota error. Request should get error response immediately.
|
||||
tclimit.transactf("no", "append inbox {1+}\r\nx {100000}")
|
||||
tclimit.xcodeWord("OVERQUOTA")
|
||||
|
||||
// Again, but second message now with non-sync literal, which is fully consumed by server.
|
||||
tclimit.client.WriteCommandf("", "append inbox {1+}\r\nx {4000+}")
|
||||
buf := make([]byte, 4000, 4002)
|
||||
for i := range buf {
|
||||
buf[i] = 'x'
|
||||
}
|
||||
buf = append(buf, "\r\n"...)
|
||||
_, err := tclimit.client.Write(buf)
|
||||
tclimit.check(err, "write append message")
|
||||
tclimit.response("no")
|
||||
tclimit.xcodeWord("OVERQUOTA")
|
||||
}
|
||||
|
@ -1,65 +1,74 @@
|
||||
package imapserver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/hmac"
|
||||
"crypto/md5"
|
||||
"crypto/sha1"
|
||||
"crypto/sha256"
|
||||
"crypto/tls"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"golang.org/x/text/secure/precis"
|
||||
|
||||
"github.com/mjl-/mox/imapclient"
|
||||
"github.com/mjl-/mox/mox-"
|
||||
"github.com/mjl-/mox/scram"
|
||||
"github.com/mjl-/mox/store"
|
||||
)
|
||||
|
||||
func TestAuthenticateLogin(t *testing.T) {
|
||||
// NFD username and PRECIS-cleaned password.
|
||||
tc := start(t)
|
||||
tc := start(t, false)
|
||||
tc.client.Login("mo\u0301x@mox.example", password1)
|
||||
tc.close()
|
||||
}
|
||||
|
||||
func TestAuthenticatePlain(t *testing.T) {
|
||||
tc := start(t)
|
||||
tc := start(t, false)
|
||||
|
||||
tc.transactf("no", "authenticate bogus ")
|
||||
tc.transactf("bad", "authenticate plain not base64...")
|
||||
tc.transactf("no", "authenticate plain %s", base64.StdEncoding.EncodeToString([]byte("\u0000baduser\u0000badpass")))
|
||||
tc.xcode("AUTHENTICATIONFAILED")
|
||||
tc.xcodeWord("AUTHENTICATIONFAILED")
|
||||
tc.transactf("no", "authenticate plain %s", base64.StdEncoding.EncodeToString([]byte("\u0000mjl@mox.example\u0000badpass")))
|
||||
tc.xcode("AUTHENTICATIONFAILED")
|
||||
tc.xcodeWord("AUTHENTICATIONFAILED")
|
||||
tc.transactf("no", "authenticate plain %s", base64.StdEncoding.EncodeToString([]byte("\u0000mjl\u0000badpass"))) // Need email, not account.
|
||||
tc.xcode("AUTHENTICATIONFAILED")
|
||||
tc.xcodeWord("AUTHENTICATIONFAILED")
|
||||
tc.transactf("no", "authenticate plain %s", base64.StdEncoding.EncodeToString([]byte("\u0000mjl@mox.example\u0000test")))
|
||||
tc.xcode("AUTHENTICATIONFAILED")
|
||||
tc.xcodeWord("AUTHENTICATIONFAILED")
|
||||
tc.transactf("no", "authenticate plain %s", base64.StdEncoding.EncodeToString([]byte("\u0000mjl@mox.example\u0000test"+password0)))
|
||||
tc.xcode("AUTHENTICATIONFAILED")
|
||||
tc.xcodeWord("AUTHENTICATIONFAILED")
|
||||
tc.transactf("bad", "authenticate plain %s", base64.StdEncoding.EncodeToString([]byte("\u0000")))
|
||||
tc.xcode("")
|
||||
tc.xcode(nil)
|
||||
tc.transactf("no", "authenticate plain %s", base64.StdEncoding.EncodeToString([]byte("other\u0000mjl@mox.example\u0000"+password0)))
|
||||
tc.xcode("AUTHORIZATIONFAILED")
|
||||
tc.xcodeWord("AUTHORIZATIONFAILED")
|
||||
tc.transactf("ok", "authenticate plain %s", base64.StdEncoding.EncodeToString([]byte("\u0000mjl@mox.example\u0000"+password0)))
|
||||
tc.close()
|
||||
|
||||
tc = start(t)
|
||||
tc = start(t, false)
|
||||
tc.transactf("ok", "authenticate plain %s", base64.StdEncoding.EncodeToString([]byte("mjl@mox.example\u0000mjl@mox.example\u0000"+password0)))
|
||||
tc.close()
|
||||
|
||||
// NFD username and PRECIS-cleaned password.
|
||||
tc = start(t)
|
||||
tc = start(t, false)
|
||||
tc.transactf("ok", "authenticate plain %s", base64.StdEncoding.EncodeToString([]byte("mo\u0301x@mox.example\u0000mo\u0301x@mox.example\u0000"+password1)))
|
||||
tc.close()
|
||||
|
||||
tc = start(t)
|
||||
tc = start(t, false)
|
||||
tc.client.AuthenticatePlain("mjl@mox.example", password0)
|
||||
tc.close()
|
||||
|
||||
tc = start(t)
|
||||
tc = start(t, false)
|
||||
defer tc.close()
|
||||
|
||||
tc.cmdf("", "authenticate plain")
|
||||
@ -73,6 +82,28 @@ func TestAuthenticatePlain(t *testing.T) {
|
||||
tc.readstatus("ok")
|
||||
}
|
||||
|
||||
func TestLoginDisabled(t *testing.T) {
|
||||
tc := start(t, false)
|
||||
defer tc.close()
|
||||
|
||||
acc, err := store.OpenAccount(pkglog, "disabled", false)
|
||||
tcheck(t, err, "open account")
|
||||
err = acc.SetPassword(pkglog, "test1234")
|
||||
tcheck(t, err, "set password")
|
||||
err = acc.Close()
|
||||
tcheck(t, err, "close account")
|
||||
|
||||
tc.transactf("no", "authenticate plain %s", base64.StdEncoding.EncodeToString([]byte("\u0000disabled@mox.example\u0000test1234")))
|
||||
tc.xcode(nil)
|
||||
tc.transactf("no", "authenticate plain %s", base64.StdEncoding.EncodeToString([]byte("\u0000disabled@mox.example\u0000bogus")))
|
||||
tc.xcodeWord("AUTHENTICATIONFAILED")
|
||||
|
||||
tc.transactf("no", "login disabled@mox.example test1234")
|
||||
tc.xcode(nil)
|
||||
tc.transactf("no", "login disabled@mox.example bogus")
|
||||
tc.xcodeWord("AUTHENTICATIONFAILED")
|
||||
}
|
||||
|
||||
func TestAuthenticateSCRAMSHA1(t *testing.T) {
|
||||
testAuthenticateSCRAM(t, false, "SCRAM-SHA-1", sha1.New)
|
||||
}
|
||||
@ -90,7 +121,7 @@ func TestAuthenticateSCRAMSHA256PLUS(t *testing.T) {
|
||||
}
|
||||
|
||||
func testAuthenticateSCRAM(t *testing.T, tls bool, method string, h func() hash.Hash) {
|
||||
tc := startArgs(t, true, tls, true, true, "mjl")
|
||||
tc := startArgs(t, false, true, tls, true, true, "mjl")
|
||||
tc.client.AuthenticateSCRAM(method, h, "mjl@mox.example", password0)
|
||||
tc.close()
|
||||
|
||||
@ -101,15 +132,11 @@ func testAuthenticateSCRAM(t *testing.T, tls bool, method string, h func() hash.
|
||||
sc := scram.NewClient(h, username, "", noServerPlus, tc.client.TLSConnectionState())
|
||||
clientFirst, err := sc.ClientFirst()
|
||||
tc.check(err, "scram clientFirst")
|
||||
tc.client.LastTag = "x001"
|
||||
tc.writelinef("%s authenticate %s %s", tc.client.LastTag, method, base64.StdEncoding.EncodeToString([]byte(clientFirst)))
|
||||
tc.client.WriteCommandf("", "authenticate %s %s", method, base64.StdEncoding.EncodeToString([]byte(clientFirst)))
|
||||
|
||||
xreadContinuation := func() []byte {
|
||||
line, _, result, rerr := tc.client.ReadContinuation()
|
||||
tc.check(rerr, "read continuation")
|
||||
if result.Status != "" {
|
||||
tc.t.Fatalf("expected continuation")
|
||||
}
|
||||
line, err := tc.client.ReadContinuation()
|
||||
tcheck(t, err, "read continuation")
|
||||
buf, err := base64.StdEncoding.DecodeString(line)
|
||||
tc.check(err, "parsing base64 from remote")
|
||||
return buf
|
||||
@ -132,14 +159,14 @@ func testAuthenticateSCRAM(t *testing.T, tls bool, method string, h func() hash.
|
||||
} else {
|
||||
tc.writelinef("")
|
||||
}
|
||||
_, result, err := tc.client.Response()
|
||||
resp, err := tc.client.ReadResponse()
|
||||
tc.check(err, "read response")
|
||||
if string(result.Status) != strings.ToUpper(status) {
|
||||
tc.t.Fatalf("got status %q, expected %q", result.Status, strings.ToUpper(status))
|
||||
if string(resp.Status) != strings.ToUpper(status) {
|
||||
tc.t.Fatalf("got status %q, expected %q", resp.Status, strings.ToUpper(status))
|
||||
}
|
||||
}
|
||||
|
||||
tc = startArgs(t, true, tls, true, true, "mjl")
|
||||
tc = startArgs(t, false, true, tls, true, true, "mjl")
|
||||
auth("no", scram.ErrInvalidProof, "mjl@mox.example", "badpass")
|
||||
auth("no", scram.ErrInvalidProof, "mjl@mox.example", "")
|
||||
// todo: server aborts due to invalid username. we should probably make client continue with fake determinisitically generated salt and result in error in the end.
|
||||
@ -147,7 +174,7 @@ func testAuthenticateSCRAM(t *testing.T, tls bool, method string, h func() hash.
|
||||
|
||||
tc.transactf("no", "authenticate bogus ")
|
||||
tc.transactf("bad", "authenticate %s not base64...", method)
|
||||
tc.transactf("bad", "authenticate %s %s", method, base64.StdEncoding.EncodeToString([]byte("bad data")))
|
||||
tc.transactf("no", "authenticate %s %s", method, base64.StdEncoding.EncodeToString([]byte("bad data")))
|
||||
|
||||
// NFD username, with PRECIS-cleaned password.
|
||||
auth("ok", nil, "mo\u0301x@mox.example", password1)
|
||||
@ -156,7 +183,7 @@ func testAuthenticateSCRAM(t *testing.T, tls bool, method string, h func() hash.
|
||||
}
|
||||
|
||||
func TestAuthenticateCRAMMD5(t *testing.T) {
|
||||
tc := start(t)
|
||||
tc := start(t, false)
|
||||
|
||||
tc.transactf("no", "authenticate bogus ")
|
||||
tc.transactf("bad", "authenticate CRAM-MD5 not base64...")
|
||||
@ -166,15 +193,11 @@ func TestAuthenticateCRAMMD5(t *testing.T) {
|
||||
auth := func(status string, username, password string) {
|
||||
t.Helper()
|
||||
|
||||
tc.client.LastTag = "x001"
|
||||
tc.writelinef("%s authenticate CRAM-MD5", tc.client.LastTag)
|
||||
tc.client.WriteCommandf("", "authenticate CRAM-MD5")
|
||||
|
||||
xreadContinuation := func() []byte {
|
||||
line, _, result, rerr := tc.client.ReadContinuation()
|
||||
tc.check(rerr, "read continuation")
|
||||
if result.Status != "" {
|
||||
tc.t.Fatalf("expected continuation")
|
||||
}
|
||||
line, err := tc.client.ReadContinuation()
|
||||
tcheck(t, err, "read continuation")
|
||||
buf, err := base64.StdEncoding.DecodeString(line)
|
||||
tc.check(err, "parsing base64 from remote")
|
||||
return buf
|
||||
@ -187,13 +210,13 @@ func TestAuthenticateCRAMMD5(t *testing.T) {
|
||||
}
|
||||
h := hmac.New(md5.New, []byte(password))
|
||||
h.Write([]byte(chal))
|
||||
resp := fmt.Sprintf("%s %x", username, h.Sum(nil))
|
||||
tc.writelinef("%s", base64.StdEncoding.EncodeToString([]byte(resp)))
|
||||
data := fmt.Sprintf("%s %x", username, h.Sum(nil))
|
||||
tc.writelinef("%s", base64.StdEncoding.EncodeToString([]byte(data)))
|
||||
|
||||
_, result, err := tc.client.Response()
|
||||
resp, err := tc.client.ReadResponse()
|
||||
tc.check(err, "read response")
|
||||
if string(result.Status) != strings.ToUpper(status) {
|
||||
tc.t.Fatalf("got status %q, expected %q", result.Status, strings.ToUpper(status))
|
||||
if string(resp.Status) != strings.ToUpper(status) {
|
||||
tc.t.Fatalf("got status %q, expected %q", resp.Status, strings.ToUpper(status))
|
||||
}
|
||||
}
|
||||
|
||||
@ -206,7 +229,154 @@ func TestAuthenticateCRAMMD5(t *testing.T) {
|
||||
tc.close()
|
||||
|
||||
// NFD username, with PRECIS-cleaned password.
|
||||
tc = start(t)
|
||||
tc = start(t, false)
|
||||
auth("ok", "mo\u0301x@mox.example", password1)
|
||||
tc.close()
|
||||
}
|
||||
|
||||
func TestAuthenticateTLSClientCert(t *testing.T) {
|
||||
tc := startArgsMore(t, false, true, true, nil, nil, true, true, "mjl", nil)
|
||||
tc.transactf("no", "authenticate external ") // No TLS auth.
|
||||
tc.close()
|
||||
|
||||
// Create a certificate, register its public key with account, and make a tls
|
||||
// client config that sends the certificate.
|
||||
clientCert0 := fakeCert(t, true)
|
||||
clientConfig := tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
Certificates: []tls.Certificate{clientCert0},
|
||||
}
|
||||
|
||||
tlspubkey, err := store.ParseTLSPublicKeyCert(clientCert0.Certificate[0])
|
||||
tcheck(t, err, "parse certificate")
|
||||
tlspubkey.Account = "mjl"
|
||||
tlspubkey.LoginAddress = "mjl@mox.example"
|
||||
tlspubkey.NoIMAPPreauth = true
|
||||
|
||||
addClientCert := func() error {
|
||||
return store.TLSPublicKeyAdd(ctxbg, &tlspubkey)
|
||||
}
|
||||
|
||||
// No preauth, explicit authenticate with TLS.
|
||||
tc = startArgsMore(t, false, true, true, nil, &clientConfig, false, true, "mjl", addClientCert)
|
||||
if tc.client.Preauth {
|
||||
t.Fatalf("preauthentication while not configured for tls public key")
|
||||
}
|
||||
tc.transactf("ok", "authenticate external ")
|
||||
tc.close()
|
||||
|
||||
// External with explicit username.
|
||||
tc = startArgsMore(t, false, true, true, nil, &clientConfig, false, true, "mjl", addClientCert)
|
||||
if tc.client.Preauth {
|
||||
t.Fatalf("preauthentication while not configured for tls public key")
|
||||
}
|
||||
tc.transactf("ok", "authenticate external %s", base64.StdEncoding.EncodeToString([]byte("mjl@mox.example")))
|
||||
tc.close()
|
||||
|
||||
// No preauth, also allow other mechanisms.
|
||||
tc = startArgsMore(t, false, true, true, nil, &clientConfig, false, true, "mjl", addClientCert)
|
||||
tc.transactf("ok", "authenticate plain %s", base64.StdEncoding.EncodeToString([]byte("\u0000mjl@mox.example\u0000"+password0)))
|
||||
tc.close()
|
||||
|
||||
// No preauth, also allow other username for same account.
|
||||
tc = startArgsMore(t, false, true, true, nil, &clientConfig, false, true, "mjl", addClientCert)
|
||||
tc.transactf("ok", "authenticate plain %s", base64.StdEncoding.EncodeToString([]byte("\u0000móx@mox.example\u0000"+password0)))
|
||||
tc.close()
|
||||
|
||||
// No preauth, other mechanism must be for same account.
|
||||
acc, err := store.OpenAccount(pkglog, "other", false)
|
||||
tcheck(t, err, "open account")
|
||||
err = acc.SetPassword(pkglog, "test1234")
|
||||
tcheck(t, err, "set password")
|
||||
err = acc.Close()
|
||||
tcheck(t, err, "close account")
|
||||
tc = startArgsMore(t, false, true, true, nil, &clientConfig, false, true, "mjl", addClientCert)
|
||||
tc.transactf("no", "authenticate plain %s", base64.StdEncoding.EncodeToString([]byte("\u0000other@mox.example\u0000test1234")))
|
||||
tc.close()
|
||||
|
||||
// Starttls and external auth.
|
||||
tc = startArgsMore(t, false, true, false, nil, &clientConfig, false, true, "mjl", addClientCert)
|
||||
tc.client.StartTLS(&clientConfig)
|
||||
tc.transactf("ok", "authenticate external =")
|
||||
tc.close()
|
||||
|
||||
tlspubkey.NoIMAPPreauth = false
|
||||
err = store.TLSPublicKeyUpdate(ctxbg, &tlspubkey)
|
||||
tcheck(t, err, "update tls public key")
|
||||
|
||||
// With preauth, no authenticate command needed/allowed.
|
||||
// Already set up tls session ticket cache, for next test.
|
||||
serverConfig := tls.Config{
|
||||
Certificates: []tls.Certificate{fakeCert(t, false)},
|
||||
}
|
||||
ctx, cancel := context.WithCancel(ctxbg)
|
||||
defer cancel()
|
||||
mox.StartTLSSessionTicketKeyRefresher(ctx, pkglog, &serverConfig)
|
||||
clientConfig.ClientSessionCache = tls.NewLRUClientSessionCache(10)
|
||||
tc = startArgsMore(t, false, true, true, &serverConfig, &clientConfig, false, true, "mjl", addClientCert)
|
||||
if !tc.client.Preauth {
|
||||
t.Fatalf("not preauthentication while configured for tls public key")
|
||||
}
|
||||
cs := tc.conn.(*tls.Conn).ConnectionState()
|
||||
if cs.DidResume {
|
||||
t.Fatalf("tls connection was resumed")
|
||||
}
|
||||
tc.transactf("no", "authenticate external ") // Not allowed, already in authenticated state.
|
||||
tc.close()
|
||||
|
||||
// Authentication works with TLS resumption.
|
||||
tc = startArgsMore(t, false, true, true, &serverConfig, &clientConfig, false, true, "mjl", addClientCert)
|
||||
if !tc.client.Preauth {
|
||||
t.Fatalf("not preauthentication while configured for tls public key")
|
||||
}
|
||||
cs = tc.conn.(*tls.Conn).ConnectionState()
|
||||
if !cs.DidResume {
|
||||
t.Fatalf("tls connection was not resumed")
|
||||
}
|
||||
// Check that operations that require an account work.
|
||||
tc.client.Enable(imapclient.CapIMAP4rev2)
|
||||
received, err := time.Parse(time.RFC3339, "2022-11-16T10:01:00+01:00")
|
||||
tc.check(err, "parse time")
|
||||
tc.client.Append("inbox", makeAppendTime(exampleMsg, received))
|
||||
tc.client.Select("inbox")
|
||||
tc.close()
|
||||
|
||||
// Authentication with unknown key should fail.
|
||||
// todo: less duplication, change startArgs so this can be merged into it.
|
||||
err = store.Close()
|
||||
tcheck(t, err, "store close")
|
||||
os.RemoveAll("../testdata/imap/data")
|
||||
err = store.Init(ctxbg)
|
||||
tcheck(t, err, "store init")
|
||||
mox.ConfigStaticPath = filepath.FromSlash("../testdata/imap/mox.conf")
|
||||
mox.MustLoadConfig(true, false)
|
||||
switchStop := store.Switchboard()
|
||||
defer switchStop()
|
||||
|
||||
serverConn, clientConn := net.Pipe()
|
||||
defer clientConn.Close()
|
||||
|
||||
done := make(chan struct{})
|
||||
defer func() { <-done }()
|
||||
connCounter++
|
||||
cid := connCounter
|
||||
go func() {
|
||||
defer serverConn.Close()
|
||||
serve("test", cid, &serverConfig, serverConn, true, false, false, false, "")
|
||||
close(done)
|
||||
}()
|
||||
|
||||
clientConfig.ClientSessionCache = nil
|
||||
clientConn = tls.Client(clientConn, &clientConfig)
|
||||
// note: It's not enough to do a handshake and check if that was successful. If the
|
||||
// client cert is not acceptable, we only learn after the handshake, when the first
|
||||
// data messages are exchanged.
|
||||
buf := make([]byte, 100)
|
||||
_, err = clientConn.Read(buf)
|
||||
if err == nil {
|
||||
t.Fatalf("tls handshake with unknown client certificate succeeded")
|
||||
}
|
||||
if alert, ok := mox.AsTLSAlert(err); !ok || alert != 42 {
|
||||
t.Fatalf("got err %#v, expected tls 'bad certificate' alert", err)
|
||||
}
|
||||
}
|
||||
|
82
imapserver/compress_test.go
Normal file
82
imapserver/compress_test.go
Normal file
@ -0,0 +1,82 @@
|
||||
package imapserver
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"encoding/base64"
|
||||
"io"
|
||||
mathrand "math/rand/v2"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestCompress(t *testing.T) {
|
||||
tc := start(t, false)
|
||||
defer tc.close()
|
||||
|
||||
tc.login("mjl@mox.example", password0)
|
||||
|
||||
tc.transactf("bad", "compress")
|
||||
tc.transactf("bad", "compress bogus ")
|
||||
tc.transactf("no", "compress bogus")
|
||||
|
||||
tc.client.CompressDeflate()
|
||||
tc.transactf("no", "compress deflate") // Cannot have multiple.
|
||||
tc.xcodeWord("COMPRESSIONACTIVE")
|
||||
|
||||
tc.client.Select("inbox")
|
||||
tc.transactf("ok", "append inbox (\\seen) {%d+}\r\n%s", len(exampleMsg), exampleMsg)
|
||||
tc.transactf("ok", "noop")
|
||||
tc.transactf("ok", "fetch 1 body.peek[1]")
|
||||
}
|
||||
|
||||
func TestCompressStartTLS(t *testing.T) {
|
||||
tc := start(t, false)
|
||||
defer tc.close()
|
||||
|
||||
tc.client.StartTLS(&tls.Config{InsecureSkipVerify: true})
|
||||
tc.login("mjl@mox.example", password0)
|
||||
tc.client.CompressDeflate()
|
||||
tc.client.Select("inbox")
|
||||
tc.transactf("ok", "append inbox (\\seen) {%d+}\r\n%s", len(exampleMsg), exampleMsg)
|
||||
tc.transactf("ok", "noop")
|
||||
tc.transactf("ok", "fetch 1 body.peek[1]")
|
||||
}
|
||||
|
||||
func TestCompressBreak(t *testing.T) {
|
||||
// Close the client connection when the server is writing. That causes writes in
|
||||
// the server to fail (panic), jumping out of the flate writer and leaving its
|
||||
// state inconsistent. We must not call into the flate writer again because due to
|
||||
// its broken internal state it may cause array out of bounds accesses.
|
||||
|
||||
tc := start(t, false)
|
||||
defer tc.close()
|
||||
|
||||
msg := exampleMsg
|
||||
// Add random data (so it is not compressible). Don't know why, but only
|
||||
// reproducible with large writes. As if setting socket buffers had no effect.
|
||||
buf := make([]byte, 64*1024)
|
||||
_, err := io.ReadFull(mathrand.NewChaCha8([32]byte{}), buf)
|
||||
tcheck(t, err, "read random")
|
||||
text := base64.StdEncoding.EncodeToString(buf)
|
||||
for len(text) > 0 {
|
||||
n := min(76, len(text))
|
||||
msg += text[:n] + "\r\n"
|
||||
text = text[n:]
|
||||
}
|
||||
|
||||
tc.login("mjl@mox.example", password0)
|
||||
tc.client.CompressDeflate()
|
||||
tc.client.Select("inbox")
|
||||
tc.transactf("ok", "append inbox (\\seen) {%d+}\r\n%s", len(msg), msg)
|
||||
tc.transactf("ok", "noop")
|
||||
|
||||
// Write request. Close connection instead of reading data. Write will panic,
|
||||
// coming through flate writer leaving its state inconsistent. Server must not try
|
||||
// to Flush/Write again on flate writer or it may panic.
|
||||
tc.client.Writelinef("x fetch 1 body.peek[1]")
|
||||
|
||||
// Close client connection and prevent cleanup from closing the client again.
|
||||
time.Sleep(time.Second / 10)
|
||||
tc.client = nil
|
||||
tc.conn.Close() // Simulate client disappearing.
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -7,17 +7,25 @@ import (
|
||||
)
|
||||
|
||||
func TestCopy(t *testing.T) {
|
||||
testCopy(t, false)
|
||||
}
|
||||
|
||||
func TestCopyUIDOnly(t *testing.T) {
|
||||
testCopy(t, true)
|
||||
}
|
||||
|
||||
func testCopy(t *testing.T, uidonly bool) {
|
||||
defer mockUIDValidity()()
|
||||
tc := start(t)
|
||||
tc := start(t, uidonly)
|
||||
defer tc.close()
|
||||
|
||||
tc2 := startNoSwitchboard(t)
|
||||
defer tc2.close()
|
||||
tc2 := startNoSwitchboard(t, uidonly)
|
||||
defer tc2.closeNoWait()
|
||||
|
||||
tc.client.Login("mjl@mox.example", password0)
|
||||
tc.login("mjl@mox.example", password0)
|
||||
tc.client.Select("inbox")
|
||||
|
||||
tc2.client.Login("mjl@mox.example", password0)
|
||||
tc2.login("mjl@mox.example", password0)
|
||||
tc2.client.Select("Trash")
|
||||
|
||||
tc.transactf("bad", "copy") // Missing params.
|
||||
@ -25,48 +33,53 @@ func TestCopy(t *testing.T) {
|
||||
tc.transactf("bad", "copy 1 inbox ") // Leftover.
|
||||
|
||||
// Seqs 1,2 and UIDs 3,4.
|
||||
tc.client.Append("inbox", nil, nil, []byte(exampleMsg))
|
||||
tc.client.Append("inbox", nil, nil, []byte(exampleMsg))
|
||||
tc.client.StoreFlagsSet("1:2", true, `\Deleted`)
|
||||
tc.client.Append("inbox", makeAppend(exampleMsg))
|
||||
tc.client.Append("inbox", makeAppend(exampleMsg))
|
||||
tc.transactf("ok", `Uid Store 1:2 +Flags.Silent (\Deleted)`)
|
||||
tc.client.Expunge()
|
||||
tc.client.Append("inbox", nil, nil, []byte(exampleMsg))
|
||||
tc.client.Append("inbox", nil, nil, []byte(exampleMsg))
|
||||
tc.client.Append("inbox", makeAppend(exampleMsg))
|
||||
tc.client.Append("inbox", makeAppend(exampleMsg))
|
||||
|
||||
tc.transactf("no", "copy 1 nonexistent")
|
||||
tc.xcode("TRYCREATE")
|
||||
if uidonly {
|
||||
tc.transactf("ok", "uid copy 3:* Trash")
|
||||
} else {
|
||||
tc.transactf("no", "copy 1 nonexistent")
|
||||
tc.xcodeWord("TRYCREATE")
|
||||
tc.transactf("no", "copy 1 expungebox")
|
||||
tc.xcodeWord("TRYCREATE")
|
||||
|
||||
tc.transactf("no", "copy 1 inbox") // Cannot copy to same mailbox.
|
||||
tc.transactf("no", "copy 1 inbox") // Cannot copy to same mailbox.
|
||||
|
||||
tc2.transactf("ok", "noop") // Drain.
|
||||
tc2.transactf("ok", "noop") // Drain.
|
||||
|
||||
tc.transactf("ok", "copy 1:* Trash")
|
||||
ptr := func(v uint32) *uint32 { return &v }
|
||||
tc.xcodeArg(imapclient.CodeCopyUID{DestUIDValidity: 1, From: []imapclient.NumRange{{First: 3, Last: ptr(4)}}, To: []imapclient.NumRange{{First: 1, Last: ptr(2)}}})
|
||||
tc.transactf("ok", "copy 1:* Trash")
|
||||
tc.xcode(mustParseCode("COPYUID 1 3:4 1:2"))
|
||||
}
|
||||
tc2.transactf("ok", "noop")
|
||||
tc2.xuntagged(
|
||||
imapclient.UntaggedExists(2),
|
||||
imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{imapclient.FetchUID(1), imapclient.FetchFlags(nil)}},
|
||||
imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{imapclient.FetchUID(2), imapclient.FetchFlags(nil)}},
|
||||
tc2.untaggedFetch(1, 1, imapclient.FetchFlags(nil)),
|
||||
tc2.untaggedFetch(2, 2, imapclient.FetchFlags(nil)),
|
||||
)
|
||||
|
||||
tc.transactf("no", "uid copy 1,2 Trash") // No match.
|
||||
tc.transactf("ok", "uid copy 4,3 Trash")
|
||||
tc.xcodeArg(imapclient.CodeCopyUID{DestUIDValidity: 1, From: []imapclient.NumRange{{First: 3, Last: ptr(4)}}, To: []imapclient.NumRange{{First: 3, Last: ptr(4)}}})
|
||||
tc.xcode(mustParseCode("COPYUID 1 3:4 3:4"))
|
||||
tc2.transactf("ok", "noop")
|
||||
tc2.xuntagged(
|
||||
imapclient.UntaggedExists(4),
|
||||
imapclient.UntaggedFetch{Seq: 3, Attrs: []imapclient.FetchAttr{imapclient.FetchUID(3), imapclient.FetchFlags(nil)}},
|
||||
imapclient.UntaggedFetch{Seq: 4, Attrs: []imapclient.FetchAttr{imapclient.FetchUID(4), imapclient.FetchFlags(nil)}},
|
||||
tc2.untaggedFetch(3, 3, imapclient.FetchFlags(nil)),
|
||||
tc2.untaggedFetch(4, 4, imapclient.FetchFlags(nil)),
|
||||
)
|
||||
|
||||
tclimit := startArgs(t, false, false, true, true, "limit")
|
||||
tclimit := startArgs(t, uidonly, false, false, true, true, "limit")
|
||||
defer tclimit.close()
|
||||
tclimit.client.Login("limit@mox.example", password0)
|
||||
tclimit.login("limit@mox.example", password0)
|
||||
tclimit.client.Select("inbox")
|
||||
// First message of 1 byte is within limits.
|
||||
tclimit.transactf("ok", "append inbox (\\Seen Label1 $label2) \" 1-Jan-2022 10:10:00 +0100\" {1+}\r\nx")
|
||||
tclimit.xuntagged(imapclient.UntaggedExists(1))
|
||||
// Second message would take account past limit.
|
||||
tclimit.transactf("no", "copy 1:* Trash")
|
||||
tclimit.xcode("OVERQUOTA")
|
||||
tclimit.transactf("no", "uid copy 1:* Trash")
|
||||
tclimit.xcodeWord("OVERQUOTA")
|
||||
}
|
||||
|
@ -7,24 +7,42 @@ import (
|
||||
)
|
||||
|
||||
func TestCreate(t *testing.T) {
|
||||
tc := start(t)
|
||||
testCreate(t, false)
|
||||
}
|
||||
|
||||
func TestCreateUIDOnly(t *testing.T) {
|
||||
testCreate(t, true)
|
||||
}
|
||||
|
||||
func testCreate(t *testing.T, uidonly bool) {
|
||||
tc := start(t, uidonly)
|
||||
defer tc.close()
|
||||
|
||||
tc2 := startNoSwitchboard(t)
|
||||
defer tc2.close()
|
||||
tc2 := startNoSwitchboard(t, uidonly)
|
||||
defer tc2.closeNoWait()
|
||||
|
||||
tc.client.Login("mjl@mox.example", password0)
|
||||
tc2.client.Login("mjl@mox.example", password0)
|
||||
tc.login("mjl@mox.example", password0)
|
||||
tc2.login("mjl@mox.example", password0)
|
||||
|
||||
tc.transactf("no", "create inbox") // Already exists and not allowed. ../rfc/9051:1913
|
||||
tc.transactf("no", "create Inbox") // Idem.
|
||||
|
||||
// Don't allow names that can cause trouble when exporting to directories.
|
||||
tc.transactf("no", "create .")
|
||||
tc.transactf("no", "create ..")
|
||||
tc.transactf("no", "create legit/..")
|
||||
tc.transactf("ok", "create ...") // No special meaning.
|
||||
|
||||
// ../rfc/9051:1937
|
||||
tc.transactf("ok", "create inbox/a/c")
|
||||
tc.xuntagged(imapclient.UntaggedList{Flags: []string{`\Subscribed`}, Separator: '/', Mailbox: "Inbox/a"}, imapclient.UntaggedList{Flags: []string{`\Subscribed`}, Separator: '/', Mailbox: "Inbox/a/c"})
|
||||
|
||||
tc2.transactf("ok", "noop")
|
||||
tc2.xuntagged(imapclient.UntaggedList{Flags: []string{`\Subscribed`}, Separator: '/', Mailbox: "Inbox/a"}, imapclient.UntaggedList{Flags: []string{`\Subscribed`}, Separator: '/', Mailbox: "Inbox/a/c"})
|
||||
tc2.xuntagged(
|
||||
imapclient.UntaggedList{Flags: []string{`\Subscribed`}, Separator: '/', Mailbox: "..."},
|
||||
imapclient.UntaggedList{Flags: []string{`\Subscribed`}, Separator: '/', Mailbox: "Inbox/a"},
|
||||
imapclient.UntaggedList{Flags: []string{`\Subscribed`}, Separator: '/', Mailbox: "Inbox/a/c"},
|
||||
)
|
||||
|
||||
tc.transactf("no", "create inbox/a/c") // Exists.
|
||||
|
||||
@ -39,7 +57,7 @@ func TestCreate(t *testing.T) {
|
||||
tc.xuntagged(imapclient.UntaggedList{Flags: []string{`\Subscribed`}, Separator: '/', Mailbox: "mailbox"})
|
||||
|
||||
// OldName is only set for IMAP4rev2 or NOTIFY.
|
||||
tc.client.Enable("imap4rev2")
|
||||
tc.client.Enable(imapclient.CapIMAP4rev2)
|
||||
tc.transactf("ok", "create mailbox2/")
|
||||
tc.xuntagged(imapclient.UntaggedList{Flags: []string{`\Subscribed`}, Separator: '/', Mailbox: "mailbox2", OldName: "mailbox2/"})
|
||||
|
||||
@ -72,8 +90,19 @@ func TestCreate(t *testing.T) {
|
||||
tc.transactf("no", `create "#"`) // Leading hash not allowed.
|
||||
tc.transactf("ok", `create "test#"`)
|
||||
|
||||
// Create with flags.
|
||||
tc.transactf("no", `create "newwithflags" (use (\unknown))`)
|
||||
tc.transactf("no", `create "newwithflags" (use (\all))`)
|
||||
tc.transactf("ok", `create "newwithflags" (use (\archive))`)
|
||||
tc.transactf("ok", "noop")
|
||||
tc.xuntagged()
|
||||
tc.transactf("ok", `create "newwithflags2" (use (\archive) use (\drafts \sent))`)
|
||||
|
||||
// UTF-7 checks are only for IMAP4 before rev2 and without UTF8=ACCEPT.
|
||||
tc.transactf("ok", `create "&"`) // Interpreted as UTF-8, no UTF-7.
|
||||
tc2.transactf("bad", `create "&"`) // Bad UTF-7.
|
||||
tc2.transactf("ok", `create "&Jjo-"`) // ☺, valid UTF-7.
|
||||
|
||||
tc.transactf("ok", "create expungebox") // Existed in past.
|
||||
tc.transactf("ok", "delete expungebox") // Gone again.
|
||||
}
|
||||
|
@ -7,36 +7,45 @@ import (
|
||||
)
|
||||
|
||||
func TestDelete(t *testing.T) {
|
||||
tc := start(t)
|
||||
testDelete(t, false)
|
||||
}
|
||||
|
||||
func TestDeleteUIDOnly(t *testing.T) {
|
||||
testDelete(t, false)
|
||||
}
|
||||
|
||||
func testDelete(t *testing.T, uidonly bool) {
|
||||
tc := start(t, uidonly)
|
||||
defer tc.close()
|
||||
|
||||
tc2 := startNoSwitchboard(t)
|
||||
defer tc2.close()
|
||||
tc2 := startNoSwitchboard(t, uidonly)
|
||||
defer tc2.closeNoWait()
|
||||
|
||||
tc3 := startNoSwitchboard(t)
|
||||
defer tc3.close()
|
||||
tc3 := startNoSwitchboard(t, uidonly)
|
||||
defer tc3.closeNoWait()
|
||||
|
||||
tc.client.Login("mjl@mox.example", password0)
|
||||
tc2.client.Login("mjl@mox.example", password0)
|
||||
tc3.client.Login("mjl@mox.example", password0)
|
||||
tc.login("mjl@mox.example", password0)
|
||||
tc2.login("mjl@mox.example", password0)
|
||||
tc3.login("mjl@mox.example", password0)
|
||||
|
||||
tc.transactf("bad", "delete") // Missing mailbox.
|
||||
tc.transactf("no", "delete inbox") // Cannot delete inbox.
|
||||
tc.transactf("no", "delete nonexistent") // Cannot delete mailbox that does not exist.
|
||||
tc.transactf("no", `delete "nonexistent"`) // Again, with quoted string syntax.
|
||||
tc.transactf("no", `delete "expungebox"`) // Already removed.
|
||||
|
||||
tc.client.Subscribe("x")
|
||||
tc.transactf("no", "delete x") // Subscription does not mean there is a mailbox that can be deleted.
|
||||
|
||||
tc.client.Create("a/b")
|
||||
tc.client.Create("a/b", nil)
|
||||
tc2.transactf("ok", "noop") // Drain changes.
|
||||
tc3.transactf("ok", "noop")
|
||||
|
||||
// ../rfc/9051:2000
|
||||
tc.transactf("no", "delete a") // Still has child.
|
||||
tc.xcode("HASCHILDREN")
|
||||
tc.xcodeWord("HASCHILDREN")
|
||||
|
||||
tc3.client.Enable("IMAP4rev2") // For \NonExistent support.
|
||||
tc3.client.Enable(imapclient.CapIMAP4rev2) // For \NonExistent support.
|
||||
tc.transactf("ok", "delete a/b")
|
||||
tc2.transactf("ok", "noop")
|
||||
tc2.xuntagged() // No IMAP4rev2, no \NonExistent.
|
||||
@ -53,12 +62,12 @@ func TestDelete(t *testing.T) {
|
||||
)
|
||||
|
||||
// Let's try again with a message present.
|
||||
tc.client.Create("msgs")
|
||||
tc.client.Append("msgs", nil, nil, []byte(exampleMsg))
|
||||
tc.client.Create("msgs", nil)
|
||||
tc.client.Append("msgs", makeAppend(exampleMsg))
|
||||
tc.transactf("ok", "delete msgs")
|
||||
|
||||
// Delete for inbox/* is allowed.
|
||||
tc.client.Create("inbox/a")
|
||||
tc.client.Create("inbox/a", nil)
|
||||
tc.transactf("ok", "delete inbox/a")
|
||||
|
||||
}
|
||||
|
@ -57,3 +57,9 @@ func xsyntaxErrorf(format string, args ...any) {
|
||||
err := errors.New(errmsg)
|
||||
panic(syntaxError{"", "", errmsg, err})
|
||||
}
|
||||
|
||||
func xsyntaxCodeErrorf(code, format string, args ...any) {
|
||||
errmsg := fmt.Sprintf(format, args...)
|
||||
err := errors.New(errmsg)
|
||||
panic(syntaxError{"", code, errmsg, err})
|
||||
}
|
||||
|
@ -7,17 +7,25 @@ import (
|
||||
)
|
||||
|
||||
func TestExpunge(t *testing.T) {
|
||||
testExpunge(t, false)
|
||||
}
|
||||
|
||||
func TestExpungeUIDOnly(t *testing.T) {
|
||||
testExpunge(t, true)
|
||||
}
|
||||
|
||||
func testExpunge(t *testing.T, uidonly bool) {
|
||||
defer mockUIDValidity()()
|
||||
tc := start(t)
|
||||
tc := start(t, uidonly)
|
||||
defer tc.close()
|
||||
|
||||
tc2 := startNoSwitchboard(t)
|
||||
defer tc2.close()
|
||||
tc2 := startNoSwitchboard(t, uidonly)
|
||||
defer tc2.closeNoWait()
|
||||
|
||||
tc.client.Login("mjl@mox.example", password0)
|
||||
tc.login("mjl@mox.example", password0)
|
||||
tc.client.Select("inbox")
|
||||
|
||||
tc2.client.Login("mjl@mox.example", password0)
|
||||
tc2.login("mjl@mox.example", password0)
|
||||
tc2.client.Select("inbox")
|
||||
|
||||
tc.transactf("bad", "expunge leftover") // Leftover data.
|
||||
@ -31,35 +39,43 @@ func TestExpunge(t *testing.T) {
|
||||
|
||||
tc.client.Unselect()
|
||||
tc.client.Select("inbox")
|
||||
tc.client.Append("inbox", nil, nil, []byte(exampleMsg))
|
||||
tc.client.Append("inbox", nil, nil, []byte(exampleMsg))
|
||||
tc.client.Append("inbox", nil, nil, []byte(exampleMsg))
|
||||
tc.client.Append("inbox", makeAppend(exampleMsg))
|
||||
tc.client.Append("inbox", makeAppend(exampleMsg))
|
||||
tc.client.Append("inbox", makeAppend(exampleMsg))
|
||||
tc.transactf("ok", "expunge") // Still nothing to remove.
|
||||
tc.xuntagged()
|
||||
|
||||
tc.client.StoreFlagsAdd("1,3", true, `\Deleted`)
|
||||
tc.transactf("ok", `uid store 1,3 +flags.silent \Deleted`)
|
||||
|
||||
tc2.transactf("ok", "noop") // Drain.
|
||||
|
||||
tc.transactf("ok", "expunge")
|
||||
tc.xuntagged(imapclient.UntaggedExpunge(1), imapclient.UntaggedExpunge(2))
|
||||
if uidonly {
|
||||
tc.xuntagged(imapclient.UntaggedVanished{UIDs: xparseNumSet("1,3")})
|
||||
} else {
|
||||
tc.xuntagged(imapclient.UntaggedExpunge(1), imapclient.UntaggedExpunge(2))
|
||||
}
|
||||
|
||||
tc2.transactf("ok", "noop") // Drain.
|
||||
tc2.xuntagged(imapclient.UntaggedExpunge(1), imapclient.UntaggedExpunge(2))
|
||||
if uidonly {
|
||||
tc2.xuntagged(imapclient.UntaggedVanished{UIDs: xparseNumSet("1,3")})
|
||||
} else {
|
||||
tc2.xuntagged(imapclient.UntaggedExpunge(1), imapclient.UntaggedExpunge(2))
|
||||
}
|
||||
|
||||
tc.transactf("ok", "expunge") // Nothing to remove anymore.
|
||||
tc.xuntagged()
|
||||
|
||||
// Only UID 2 is still left. We'll add 3 more. Getting us to UIDs 2,4,5,6.
|
||||
tc.client.Append("inbox", nil, nil, []byte(exampleMsg))
|
||||
tc.client.Append("inbox", nil, nil, []byte(exampleMsg))
|
||||
tc.client.Append("inbox", nil, nil, []byte(exampleMsg))
|
||||
tc.client.Append("inbox", makeAppend(exampleMsg))
|
||||
tc.client.Append("inbox", makeAppend(exampleMsg))
|
||||
tc.client.Append("inbox", makeAppend(exampleMsg))
|
||||
|
||||
tc.transactf("bad", "uid expunge") // Missing uid set.
|
||||
tc.transactf("bad", "uid expunge 1 leftover") // Leftover data.
|
||||
tc.transactf("bad", "uid expunge 1 leftover") // Leftover data.
|
||||
|
||||
tc.client.StoreFlagsAdd("1,2,4", true, `\Deleted`) // Marks UID 2,4,6 as deleted.
|
||||
tc.transactf("ok", `uid store 2,4,6 +flags.silent \Deleted`)
|
||||
|
||||
tc.transactf("ok", "uid expunge 1")
|
||||
tc.xuntagged() // No match.
|
||||
@ -67,8 +83,16 @@ func TestExpunge(t *testing.T) {
|
||||
tc2.transactf("ok", "noop") // Drain.
|
||||
|
||||
tc.transactf("ok", "uid expunge 4:6") // Removes UID 4,6 at seqs 2,4.
|
||||
tc.xuntagged(imapclient.UntaggedExpunge(2), imapclient.UntaggedExpunge(3))
|
||||
if uidonly {
|
||||
tc.xuntagged(imapclient.UntaggedVanished{UIDs: xparseNumSet("4,6")})
|
||||
} else {
|
||||
tc.xuntagged(imapclient.UntaggedExpunge(2), imapclient.UntaggedExpunge(3))
|
||||
}
|
||||
|
||||
tc2.transactf("ok", "noop")
|
||||
tc.xuntagged(imapclient.UntaggedExpunge(2), imapclient.UntaggedExpunge(3))
|
||||
if uidonly {
|
||||
tc2.xuntagged(imapclient.UntaggedVanished{UIDs: xparseNumSet("4,6")})
|
||||
} else {
|
||||
tc2.xuntagged(imapclient.UntaggedExpunge(2), imapclient.UntaggedExpunge(3))
|
||||
}
|
||||
}
|
||||
|
@ -4,19 +4,20 @@ package imapserver
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"maps"
|
||||
"mime"
|
||||
"net/textproto"
|
||||
"sort"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/exp/maps"
|
||||
|
||||
"github.com/mjl-/bstore"
|
||||
|
||||
"github.com/mjl-/mox/message"
|
||||
"github.com/mjl-/mox/mlog"
|
||||
"github.com/mjl-/mox/mox-"
|
||||
"github.com/mjl-/mox/moxio"
|
||||
"github.com/mjl-/mox/store"
|
||||
@ -25,18 +26,20 @@ import (
|
||||
// functions to handle fetch attribute requests are defined on fetchCmd.
|
||||
type fetchCmd struct {
|
||||
conn *conn
|
||||
mailboxID int64
|
||||
uid store.UID
|
||||
tx *bstore.Tx // Writable tx, for storing message when first parsed as mime parts.
|
||||
changes []store.Change // For updated Seen flag.
|
||||
markSeen bool
|
||||
needFlags bool
|
||||
needModseq bool // Whether untagged responses needs modseq.
|
||||
expungeIssued bool // Set if a message cannot be read. Can happen for expunged messages.
|
||||
modseq store.ModSeq // Initialized on first change, for marking messages as seen.
|
||||
isUID bool // If this is a UID FETCH command.
|
||||
hasChangedSince bool // Whether CHANGEDSINCE was set. Enables MODSEQ in response.
|
||||
deltaCounts store.MailboxCounts // By marking \Seen, the number of unread/unseen messages will go down. We update counts at the end.
|
||||
isUID bool // If this is a UID FETCH command.
|
||||
rtx *bstore.Tx // Read-only transaction, kept open while processing all messages.
|
||||
updateSeen []store.UID // To mark as seen after processing all messages. UID instead of message ID since moved messages keep their ID and insert a new ID in the original mailbox.
|
||||
hasChangedSince bool // Whether CHANGEDSINCE was set. Enables MODSEQ in response.
|
||||
expungeIssued bool // Set if any message has been expunged. Can happen for expunged messages.
|
||||
|
||||
// For message currently processing.
|
||||
mailboxID int64
|
||||
uid store.UID
|
||||
|
||||
markSeen bool
|
||||
needFlags bool
|
||||
needModseq bool // Whether untagged responses needs modseq.
|
||||
newPreviews map[store.UID]string // Save with messages when done.
|
||||
|
||||
// Loaded when first needed, closed when message was processed.
|
||||
m *store.Message // Message currently being processed.
|
||||
@ -76,7 +79,7 @@ func (c *conn) cmdxFetch(isUID bool, tag, cmdstr string, p *parser) {
|
||||
p.xspace()
|
||||
nums := p.xnumSet()
|
||||
p.xspace()
|
||||
atts := p.xfetchAtts(isUID)
|
||||
atts := p.xfetchAtts()
|
||||
var changedSince int64
|
||||
var haveChangedSince bool
|
||||
var vanished bool
|
||||
@ -125,42 +128,66 @@ func (c *conn) cmdxFetch(isUID bool, tag, cmdstr string, p *parser) {
|
||||
}
|
||||
p.xempty()
|
||||
|
||||
// We don't use c.account.WithRLock because we write to the client while reading messages.
|
||||
// We get the rlock, then we check the mailbox, release the lock and read the messages.
|
||||
// The db transaction still locks out any changes to the database...
|
||||
c.account.RLock()
|
||||
runlock := c.account.RUnlock
|
||||
// Note: we call runlock in a closure because we replace it below.
|
||||
// We only keep a wlock, only for initial checks and listing the uids. Then we
|
||||
// unlock and work without a lock. So changes to the store can happen, and we need
|
||||
// to deal with that. If we need to mark messages as seen, we do so after
|
||||
// processing the fetch for all messages, in a single write transaction. We don't
|
||||
// send untagged changes for those \seen flag changes before finishing this
|
||||
// command, because we have to sequence all changes properly, and since we don't
|
||||
// (want to) hold a wlock while processing messages (can be many!), other changes
|
||||
// may have happened to the store. So instead, we'll silently mark messages as seen
|
||||
// (the client should know this is happening anyway!), then broadcast the changes
|
||||
// to everyone, including ourselves. A noop/idle command that may come next will
|
||||
// return the \seen flag changes, in the correct order, with the correct modseq. We
|
||||
// also cannot just apply pending changes while processing. It is not allowed at
|
||||
// all for non-uid-fetch. It would also make life more complicated, e.g. we would
|
||||
// perhaps have to check if newly added messages also match uid fetch set that was
|
||||
// requested.
|
||||
|
||||
var uids []store.UID
|
||||
var vanishedUIDs []store.UID
|
||||
|
||||
cmd := &fetchCmd{conn: c, isUID: isUID, hasChangedSince: haveChangedSince, mailboxID: c.mailboxID, newPreviews: map[store.UID]string{}}
|
||||
|
||||
defer func() {
|
||||
runlock()
|
||||
if cmd.rtx == nil {
|
||||
return
|
||||
}
|
||||
err := cmd.rtx.Rollback()
|
||||
c.log.Check(err, "rollback rtx")
|
||||
cmd.rtx = nil
|
||||
}()
|
||||
|
||||
var vanishedUIDs []store.UID
|
||||
cmd := &fetchCmd{conn: c, mailboxID: c.mailboxID, isUID: isUID, hasChangedSince: haveChangedSince}
|
||||
c.xdbwrite(func(tx *bstore.Tx) {
|
||||
cmd.tx = tx
|
||||
c.account.WithRLock(func() {
|
||||
var err error
|
||||
cmd.rtx, err = c.account.DB.Begin(context.TODO(), false)
|
||||
cmd.xcheckf(err, "begin transaction")
|
||||
|
||||
// Ensure the mailbox still exists.
|
||||
mb := c.xmailboxID(tx, c.mailboxID)
|
||||
|
||||
var uids []store.UID
|
||||
c.xmailboxID(cmd.rtx, c.mailboxID)
|
||||
|
||||
// With changedSince, the client is likely asking for a small set of changes. Use a
|
||||
// database query to trim down the uids we need to look at.
|
||||
// ../rfc/7162:871
|
||||
// database query to trim down the uids we need to look at. We need to go through
|
||||
// the database for "VANISHED (EARLIER)" anyway, to see UIDs that aren't in the
|
||||
// session anymore. Vanished must be used with changedSince. ../rfc/7162:871
|
||||
if changedSince > 0 {
|
||||
q := bstore.QueryTx[store.Message](tx)
|
||||
q := bstore.QueryTx[store.Message](cmd.rtx)
|
||||
q.FilterNonzero(store.Message{MailboxID: c.mailboxID})
|
||||
q.FilterGreater("ModSeq", store.ModSeqFromClient(changedSince))
|
||||
if !vanished {
|
||||
q.FilterEqual("Expunged", false)
|
||||
}
|
||||
err := q.ForEach(func(m store.Message) error {
|
||||
if m.Expunged {
|
||||
vanishedUIDs = append(vanishedUIDs, m.UID)
|
||||
} else if isUID {
|
||||
if nums.containsUID(m.UID, c.uids, c.searchResult) {
|
||||
uids = append(uids, m.UID)
|
||||
if m.UID >= c.uidnext {
|
||||
return nil
|
||||
}
|
||||
if isUID {
|
||||
if nums.xcontainsKnownUID(m.UID, c.searchResult, func() store.UID { return c.uidnext - 1 }) {
|
||||
if m.Expunged {
|
||||
vanishedUIDs = append(vanishedUIDs, m.UID)
|
||||
} else {
|
||||
uids = append(uids, m.UID)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
seq := c.sequence(m.UID)
|
||||
@ -171,115 +198,196 @@ func (c *conn) cmdxFetch(isUID bool, tag, cmdstr string, p *parser) {
|
||||
return nil
|
||||
})
|
||||
xcheckf(err, "looking up messages with changedsince")
|
||||
} else {
|
||||
uids = c.xnumSetUIDs(isUID, nums)
|
||||
}
|
||||
|
||||
// Send vanished for all missing requested UIDs. ../rfc/7162:1718
|
||||
if vanished {
|
||||
delModSeq, err := c.account.HighestDeletedModSeq(tx)
|
||||
// In case of vanished where we don't have the full history, we must send VANISHED
|
||||
// for all uids matching nums. ../rfc/7162:1718
|
||||
delModSeq, err := c.account.HighestDeletedModSeq(cmd.rtx)
|
||||
xcheckf(err, "looking up highest deleted modseq")
|
||||
if changedSince < delModSeq.Client() {
|
||||
// First sort the uids we already found, for fast lookup.
|
||||
sort.Slice(vanishedUIDs, func(i, j int) bool {
|
||||
return vanishedUIDs[i] < vanishedUIDs[j]
|
||||
})
|
||||
|
||||
// We'll be gathering any more vanished uids in more.
|
||||
more := map[store.UID]struct{}{}
|
||||
checkVanished := func(uid store.UID) {
|
||||
if uidSearch(c.uids, uid) <= 0 && uidSearch(vanishedUIDs, uid) <= 0 {
|
||||
more[uid] = struct{}{}
|
||||
}
|
||||
}
|
||||
// Now look through the requested uids. We may have a searchResult, handle it
|
||||
// separately from a numset with potential stars, over which we can more easily
|
||||
// iterate.
|
||||
if nums.searchResult {
|
||||
for _, uid := range c.searchResult {
|
||||
checkVanished(uid)
|
||||
}
|
||||
} else {
|
||||
iter := nums.interpretStar(c.uids).newIter()
|
||||
for {
|
||||
num, ok := iter.Next()
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
checkVanished(store.UID(num))
|
||||
}
|
||||
}
|
||||
vanishedUIDs = append(vanishedUIDs, maps.Keys(more)...)
|
||||
if !vanished || changedSince >= delModSeq.Client() {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Release the account lock.
|
||||
runlock()
|
||||
runlock = func() {} // Prevent defer from unlocking again.
|
||||
// We'll iterate through all UIDs in the numset, and add anything that isn't
|
||||
// already in uids and vanishedUIDs. First sort the uids we already found, for fast
|
||||
// lookup. We'll gather new UIDs in more, so we don't break the binary search.
|
||||
slices.Sort(vanishedUIDs)
|
||||
slices.Sort(uids)
|
||||
|
||||
// First report all vanished UIDs. ../rfc/7162:1714
|
||||
if len(vanishedUIDs) > 0 {
|
||||
// Mention all vanished UIDs in compact numset form.
|
||||
// ../rfc/7162:1985
|
||||
sort.Slice(vanishedUIDs, func(i, j int) bool {
|
||||
return vanishedUIDs[i] < vanishedUIDs[j]
|
||||
})
|
||||
// No hard limit on response sizes, but clients are recommended to not send more
|
||||
// than 8k. We send a more conservative max 4k.
|
||||
for _, s := range compactUIDSet(vanishedUIDs).Strings(4*1024 - 32) {
|
||||
c.bwritelinef("* VANISHED (EARLIER) %s", s)
|
||||
more := map[store.UID]struct{}{} // We'll add them at the end.
|
||||
checkVanished := func(uid store.UID) {
|
||||
if uid < c.uidnext && uidSearch(uids, uid) <= 0 && uidSearch(vanishedUIDs, uid) <= 0 {
|
||||
more[uid] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// Now look through the requested uids. We may have a searchResult, handle it
|
||||
// separately from a numset with potential stars, over which we can more easily
|
||||
// iterate.
|
||||
if nums.searchResult {
|
||||
for _, uid := range c.searchResult {
|
||||
checkVanished(uid)
|
||||
}
|
||||
} else {
|
||||
xlastUID := c.newCachedLastUID(cmd.rtx, c.mailboxID, func(xerr error) { xuserErrorf("%s", xerr) })
|
||||
iter := nums.xinterpretStar(xlastUID).newIter()
|
||||
for {
|
||||
num, ok := iter.Next()
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
checkVanished(store.UID(num))
|
||||
}
|
||||
}
|
||||
vanishedUIDs = slices.AppendSeq(vanishedUIDs, maps.Keys(more))
|
||||
slices.Sort(vanishedUIDs)
|
||||
} else {
|
||||
uids = c.xnumSetEval(cmd.rtx, isUID, nums)
|
||||
}
|
||||
|
||||
for _, uid := range uids {
|
||||
cmd.uid = uid
|
||||
cmd.conn.log.Debug("processing uid", slog.Any("uid", uid))
|
||||
cmd.process(atts)
|
||||
}
|
||||
|
||||
var zeromc store.MailboxCounts
|
||||
if cmd.deltaCounts != zeromc {
|
||||
mb.Add(cmd.deltaCounts) // Unseen/Unread will be <= 0.
|
||||
err := tx.Update(&mb)
|
||||
xcheckf(err, "updating mailbox counts")
|
||||
cmd.changes = append(cmd.changes, mb.ChangeCounts())
|
||||
// No need to update account total message size.
|
||||
}
|
||||
})
|
||||
// We are continuing without a lock, working off our snapshot of uids to process.
|
||||
|
||||
if len(cmd.changes) > 0 {
|
||||
// Broadcast seen updates to other connections.
|
||||
c.broadcast(cmd.changes)
|
||||
// First report all vanished UIDs. ../rfc/7162:1714
|
||||
if len(vanishedUIDs) > 0 {
|
||||
// Mention all vanished UIDs in compact numset form.
|
||||
// ../rfc/7162:1985
|
||||
// No hard limit on response sizes, but clients are recommended to not send more
|
||||
// than 8k. We send a more conservative max 4k.
|
||||
for _, s := range compactUIDSet(vanishedUIDs).Strings(4*1024 - 32) {
|
||||
c.xbwritelinef("* VANISHED (EARLIER) %s", s)
|
||||
}
|
||||
}
|
||||
|
||||
defer cmd.msgclose() // In case of panic.
|
||||
|
||||
for _, cmd.uid = range uids {
|
||||
cmd.conn.log.Debug("processing uid", slog.Any("uid", cmd.uid))
|
||||
data, err := cmd.process(atts)
|
||||
if err != nil {
|
||||
cmd.conn.log.Infox("processing fetch attribute", err, slog.Any("uid", cmd.uid))
|
||||
xuserErrorf("processing fetch attribute: %v", err)
|
||||
}
|
||||
|
||||
// UIDFETCH in case of uidonly. ../rfc/9586:181
|
||||
if c.uidonly {
|
||||
fmt.Fprintf(cmd.conn.xbw, "* %d UIDFETCH ", cmd.uid)
|
||||
} else {
|
||||
fmt.Fprintf(cmd.conn.xbw, "* %d FETCH ", cmd.conn.xsequence(cmd.uid))
|
||||
}
|
||||
data.xwriteTo(cmd.conn, cmd.conn.xbw)
|
||||
cmd.conn.xbw.Write([]byte("\r\n"))
|
||||
|
||||
cmd.msgclose()
|
||||
}
|
||||
|
||||
// We've returned all data. Now we mark messages as seen in one go, in a new write
|
||||
// transaction. We don't send untagged messages for the changes, since there may be
|
||||
// unprocessed pending changes. Instead, we broadcast them to ourselve too, so a
|
||||
// next noop/idle will return the flags to the client.
|
||||
|
||||
err := cmd.rtx.Rollback()
|
||||
c.log.Check(err, "fetch read tx rollback")
|
||||
cmd.rtx = nil
|
||||
|
||||
// ../rfc/9051:4432 We mark all messages that need it as seen at the end of the
|
||||
// command, in a single transaction.
|
||||
if len(cmd.updateSeen) > 0 || len(cmd.newPreviews) > 0 {
|
||||
c.account.WithWLock(func() {
|
||||
changes := make([]store.Change, 0, len(cmd.updateSeen)+1)
|
||||
|
||||
c.xdbwrite(func(wtx *bstore.Tx) {
|
||||
mb, err := store.MailboxID(wtx, c.mailboxID)
|
||||
if err == store.ErrMailboxExpunged {
|
||||
xusercodeErrorf("NONEXISTENT", "mailbox has been expunged")
|
||||
}
|
||||
xcheckf(err, "get mailbox for updating counts after marking as seen")
|
||||
|
||||
var modseq store.ModSeq
|
||||
|
||||
for _, uid := range cmd.updateSeen {
|
||||
m, err := bstore.QueryTx[store.Message](wtx).FilterNonzero(store.Message{MailboxID: c.mailboxID, UID: uid}).Get()
|
||||
xcheckf(err, "get message")
|
||||
if m.Expunged {
|
||||
// Message has been deleted in the mean time.
|
||||
cmd.expungeIssued = true
|
||||
continue
|
||||
}
|
||||
if m.Seen {
|
||||
// Message already marked as seen by another process.
|
||||
continue
|
||||
}
|
||||
|
||||
if modseq == 0 {
|
||||
modseq, err = c.account.NextModSeq(wtx)
|
||||
xcheckf(err, "get next mod seq")
|
||||
}
|
||||
|
||||
oldFlags := m.Flags
|
||||
mb.Sub(m.MailboxCounts())
|
||||
m.Seen = true
|
||||
mb.Add(m.MailboxCounts())
|
||||
changes = append(changes, m.ChangeFlags(oldFlags, mb))
|
||||
|
||||
m.ModSeq = modseq
|
||||
err = wtx.Update(&m)
|
||||
xcheckf(err, "mark message as seen")
|
||||
}
|
||||
|
||||
changes = append(changes, mb.ChangeCounts())
|
||||
|
||||
for uid, s := range cmd.newPreviews {
|
||||
m, err := bstore.QueryTx[store.Message](wtx).FilterNonzero(store.Message{MailboxID: c.mailboxID, UID: uid}).Get()
|
||||
xcheckf(err, "get message")
|
||||
if m.Expunged {
|
||||
// Message has been deleted in the mean time.
|
||||
cmd.expungeIssued = true
|
||||
continue
|
||||
}
|
||||
|
||||
// note: we are not updating modseq.
|
||||
|
||||
m.Preview = &s
|
||||
err = wtx.Update(&m)
|
||||
xcheckf(err, "saving preview with message")
|
||||
}
|
||||
|
||||
if modseq > 0 {
|
||||
mb.ModSeq = modseq
|
||||
err = wtx.Update(&mb)
|
||||
xcheckf(err, "update mailbox with counts and modseq")
|
||||
}
|
||||
})
|
||||
|
||||
// Broadcast these changes also to ourselves, so we'll send the updated flags, but
|
||||
// in the correct order, after other changes.
|
||||
store.BroadcastChanges(c.account, changes)
|
||||
})
|
||||
}
|
||||
|
||||
if cmd.expungeIssued {
|
||||
// ../rfc/2180:343
|
||||
c.writeresultf("%s NO [EXPUNGEISSUED] at least one message was expunged", tag)
|
||||
// ../rfc/9051:5102
|
||||
c.xwriteresultf("%s OK [EXPUNGEISSUED] at least one message was expunged", tag)
|
||||
} else {
|
||||
c.ok(tag, cmdstr)
|
||||
}
|
||||
}
|
||||
|
||||
func (cmd *fetchCmd) xmodseq() store.ModSeq {
|
||||
if cmd.modseq == 0 {
|
||||
var err error
|
||||
cmd.modseq, err = cmd.conn.account.NextModSeq(cmd.tx)
|
||||
cmd.xcheckf(err, "assigning next modseq")
|
||||
}
|
||||
return cmd.modseq
|
||||
}
|
||||
|
||||
func (cmd *fetchCmd) xensureMessage() *store.Message {
|
||||
if cmd.m != nil {
|
||||
return cmd.m
|
||||
}
|
||||
|
||||
q := bstore.QueryTx[store.Message](cmd.tx)
|
||||
// We do not filter by Expunged, the message may have been deleted in other
|
||||
// sessions, but not in ours.
|
||||
q := bstore.QueryTx[store.Message](cmd.rtx)
|
||||
q.FilterNonzero(store.Message{MailboxID: cmd.mailboxID, UID: cmd.uid})
|
||||
q.FilterEqual("Expunged", false)
|
||||
m, err := q.Get()
|
||||
cmd.xcheckf(err, "get message for uid %d", cmd.uid)
|
||||
cmd.m = &m
|
||||
if m.Expunged {
|
||||
cmd.expungeIssued = true
|
||||
}
|
||||
return cmd.m
|
||||
}
|
||||
|
||||
@ -305,16 +413,20 @@ func (cmd *fetchCmd) xensureParsed() (*store.MsgReader, *message.Part) {
|
||||
return cmd.msgr, cmd.part
|
||||
}
|
||||
|
||||
func (cmd *fetchCmd) process(atts []fetchAtt) {
|
||||
defer func() {
|
||||
cmd.m = nil
|
||||
cmd.part = nil
|
||||
if cmd.msgr != nil {
|
||||
err := cmd.msgr.Close()
|
||||
cmd.conn.xsanity(err, "closing messagereader")
|
||||
cmd.msgr = nil
|
||||
}
|
||||
// msgclose must be called after processing a message (after having written/used
|
||||
// its data), even in the case of a panic.
|
||||
func (cmd *fetchCmd) msgclose() {
|
||||
cmd.m = nil
|
||||
cmd.part = nil
|
||||
if cmd.msgr != nil {
|
||||
err := cmd.msgr.Close()
|
||||
cmd.conn.xsanity(err, "closing messagereader")
|
||||
cmd.msgr = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (cmd *fetchCmd) process(atts []fetchAtt) (rdata listspace, rerr error) {
|
||||
defer func() {
|
||||
x := recover()
|
||||
if x == nil {
|
||||
return
|
||||
@ -322,16 +434,15 @@ func (cmd *fetchCmd) process(atts []fetchAtt) {
|
||||
err, ok := x.(attrError)
|
||||
if !ok {
|
||||
panic(x)
|
||||
} else if rerr == nil {
|
||||
rerr = err
|
||||
}
|
||||
if errors.Is(err, bstore.ErrAbsent) {
|
||||
cmd.expungeIssued = true
|
||||
return
|
||||
}
|
||||
cmd.conn.log.Infox("processing fetch attribute", err, slog.Any("uid", cmd.uid))
|
||||
xuserErrorf("processing fetch attribute: %v", err)
|
||||
}()
|
||||
|
||||
data := listspace{bare("UID"), number(cmd.uid)}
|
||||
var data listspace
|
||||
if !cmd.conn.uidonly {
|
||||
data = append(data, bare("UID"), number(cmd.uid))
|
||||
}
|
||||
|
||||
cmd.markSeen = false
|
||||
cmd.needFlags = false
|
||||
@ -342,17 +453,7 @@ func (cmd *fetchCmd) process(atts []fetchAtt) {
|
||||
}
|
||||
|
||||
if cmd.markSeen {
|
||||
m := cmd.xensureMessage()
|
||||
cmd.deltaCounts.Sub(m.MailboxCounts())
|
||||
origFlags := m.Flags
|
||||
m.Seen = true
|
||||
cmd.deltaCounts.Add(m.MailboxCounts())
|
||||
m.ModSeq = cmd.xmodseq()
|
||||
err := cmd.tx.Update(m)
|
||||
xcheckf(err, "marking message as seen")
|
||||
// No need to update account total message size.
|
||||
|
||||
cmd.changes = append(cmd.changes, m.ChangeFlags(origFlags))
|
||||
cmd.updateSeen = append(cmd.updateSeen, cmd.uid)
|
||||
}
|
||||
|
||||
if cmd.needFlags {
|
||||
@ -375,15 +476,12 @@ func (cmd *fetchCmd) process(atts []fetchAtt) {
|
||||
// other mentioning of cases elsewhere in the RFC would be too superfluous.
|
||||
//
|
||||
// ../rfc/7162:877 ../rfc/7162:388 ../rfc/7162:909 ../rfc/7162:1426
|
||||
if cmd.needModseq || cmd.hasChangedSince || cmd.conn.enabled[capQresync] && (cmd.isUID || cmd.markSeen) {
|
||||
if cmd.needModseq || cmd.hasChangedSince || cmd.conn.enabled[capQresync] && cmd.isUID {
|
||||
m := cmd.xensureMessage()
|
||||
data = append(data, bare("MODSEQ"), listspace{bare(fmt.Sprintf("%d", m.ModSeq.Client()))})
|
||||
}
|
||||
|
||||
// Write errors are turned into panics because we write through c.
|
||||
fmt.Fprintf(cmd.conn.bw, "* %d FETCH ", cmd.conn.xsequence(cmd.uid))
|
||||
data.writeTo(cmd.conn, cmd.conn.bw)
|
||||
cmd.conn.bw.Write([]byte("\r\n"))
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// result for one attribute. if processing fails, e.g. because data was requested
|
||||
@ -392,8 +490,12 @@ func (cmd *fetchCmd) process(atts []fetchAtt) {
|
||||
func (cmd *fetchCmd) xprocessAtt(a fetchAtt) []token {
|
||||
switch a.field {
|
||||
case "UID":
|
||||
// Always present.
|
||||
return nil
|
||||
// Present by default without uidonly. For uidonly, we only add it when explicitly
|
||||
// requested. ../rfc/9586:184
|
||||
if cmd.conn.uidonly {
|
||||
return []token{bare("UID"), number(cmd.uid)}
|
||||
}
|
||||
|
||||
case "ENVELOPE":
|
||||
_, part := cmd.xensureParsed()
|
||||
envelope := xenvelope(part)
|
||||
@ -404,9 +506,20 @@ func (cmd *fetchCmd) xprocessAtt(a fetchAtt) []token {
|
||||
m := cmd.xensureMessage()
|
||||
return []token{bare("INTERNALDATE"), dquote(m.Received.Format("_2-Jan-2006 15:04:05 -0700"))}
|
||||
|
||||
case "SAVEDATE":
|
||||
m := cmd.xensureMessage()
|
||||
// For messages in storage from before we implemented this extension, we don't have
|
||||
// a savedate, and we return nil. This is normally meant to be per mailbox, but
|
||||
// returning it per message should be fine. ../rfc/8514:191
|
||||
var savedate token = nilt
|
||||
if m.SaveDate != nil {
|
||||
savedate = dquote(m.SaveDate.Format("_2-Jan-2006 15:04:05 -0700"))
|
||||
}
|
||||
return []token{bare("SAVEDATE"), savedate}
|
||||
|
||||
case "BODYSTRUCTURE":
|
||||
_, part := cmd.xensureParsed()
|
||||
bs := xbodystructure(part)
|
||||
bs := xbodystructure(cmd.conn.log, part, true)
|
||||
return []token{bare("BODYSTRUCTURE"), bs}
|
||||
|
||||
case "BODY":
|
||||
@ -487,6 +600,37 @@ func (cmd *fetchCmd) xprocessAtt(a fetchAtt) []token {
|
||||
case "MODSEQ":
|
||||
cmd.needModseq = true
|
||||
|
||||
case "PREVIEW":
|
||||
m := cmd.xensureMessage()
|
||||
preview := m.Preview
|
||||
// We ignore "lazy", generating the preview is fast enough.
|
||||
if preview == nil {
|
||||
// Get the preview. We'll save all generated previews in a single transaction at
|
||||
// the end.
|
||||
_, p := cmd.xensureParsed()
|
||||
s, err := p.Preview(cmd.conn.log)
|
||||
cmd.xcheckf(err, "generating preview")
|
||||
preview = &s
|
||||
cmd.newPreviews[m.UID] = s
|
||||
}
|
||||
var t token = nilt
|
||||
if preview != nil {
|
||||
s := *preview
|
||||
|
||||
// Limit to 200 characters (not bytes). ../rfc/8970:206
|
||||
var n, o int
|
||||
for o = range s {
|
||||
n++
|
||||
if n > 200 {
|
||||
s = s[:o]
|
||||
break
|
||||
}
|
||||
}
|
||||
s = strings.TrimSpace(s)
|
||||
t = string0(s)
|
||||
}
|
||||
return []token{bare(a.field), t}
|
||||
|
||||
default:
|
||||
xserverErrorf("field %q not yet implemented", a.field)
|
||||
}
|
||||
@ -632,11 +776,15 @@ func (cmd *fetchCmd) xbinary(a fetchAtt) (string, token) {
|
||||
cmd.xerrorf("binary only allowed on leaf parts, not multipart/* or message/rfc822 or message/global")
|
||||
}
|
||||
|
||||
switch p.ContentTransferEncoding {
|
||||
var cte string
|
||||
if p.ContentTransferEncoding != nil {
|
||||
cte = *p.ContentTransferEncoding
|
||||
}
|
||||
switch cte {
|
||||
case "", "7BIT", "8BIT", "BINARY", "BASE64", "QUOTED-PRINTABLE":
|
||||
default:
|
||||
// ../rfc/9051:5913
|
||||
xusercodeErrorf("UNKNOWN-CTE", "unknown Content-Transfer-Encoding %q", p.ContentTransferEncoding)
|
||||
xusercodeErrorf("UNKNOWN-CTE", "unknown Content-Transfer-Encoding %q", cte)
|
||||
}
|
||||
|
||||
r := p.Reader()
|
||||
@ -660,7 +808,7 @@ func (cmd *fetchCmd) xbody(a fetchAtt) (string, token) {
|
||||
|
||||
if a.section == nil {
|
||||
// Non-extensible form of BODYSTRUCTURE.
|
||||
return a.field, xbodystructure(part)
|
||||
return a.field, xbodystructure(cmd.conn.log, part, false)
|
||||
}
|
||||
|
||||
cmd.peekOrSeen(a.peek)
|
||||
@ -672,16 +820,13 @@ func (cmd *fetchCmd) xbody(a fetchAtt) (string, token) {
|
||||
var offset int64
|
||||
count := m.Size
|
||||
if a.partial != nil {
|
||||
offset = int64(a.partial.offset)
|
||||
if offset > m.Size {
|
||||
offset = m.Size
|
||||
}
|
||||
offset = min(int64(a.partial.offset), m.Size)
|
||||
count = int64(a.partial.count)
|
||||
if offset+count > m.Size {
|
||||
count = m.Size - offset
|
||||
}
|
||||
}
|
||||
return respField, readerSizeSyncliteral{&moxio.AtReader{R: msgr, Offset: offset}, count}
|
||||
return respField, readerSizeSyncliteral{&moxio.AtReader{R: msgr, Offset: offset}, count, false}
|
||||
}
|
||||
|
||||
sr := cmd.xsection(a.section, part)
|
||||
@ -720,35 +865,40 @@ func (cmd *fetchCmd) xpartnumsDeref(nums []uint32, p *message.Part) *message.Par
|
||||
}
|
||||
|
||||
func (cmd *fetchCmd) xsection(section *sectionSpec, p *message.Part) io.Reader {
|
||||
// msgtext is not nil, i.e. HEADER* or TEXT (not MIME), for the top-level part (a message).
|
||||
if section.part == nil {
|
||||
return cmd.xsectionMsgtext(section.msgtext, p)
|
||||
}
|
||||
|
||||
p = cmd.xpartnumsDeref(section.part.part, p)
|
||||
|
||||
// If there is no sectionMsgText, then this isn't for HEADER*, TEXT or MIME, i.e. a
|
||||
// part body, e.g. "BODY[1]".
|
||||
if section.part.text == nil {
|
||||
return p.RawReader()
|
||||
}
|
||||
|
||||
// ../rfc/9051:4535
|
||||
if p.Message != nil {
|
||||
// MIME is defined for all parts. Otherwise it's HEADER* or TEXT, which is only
|
||||
// defined for parts that are messages. ../rfc/9051:4500 ../rfc/9051:4517
|
||||
if !section.part.text.mime {
|
||||
if p.Message == nil {
|
||||
cmd.xerrorf("part is not a message, cannot request header* or text")
|
||||
}
|
||||
|
||||
err := p.SetMessageReaderAt()
|
||||
cmd.xcheckf(err, "preparing submessage")
|
||||
p = p.Message
|
||||
}
|
||||
|
||||
if !section.part.text.mime {
|
||||
return cmd.xsectionMsgtext(section.part.text.msgtext, p)
|
||||
}
|
||||
|
||||
// MIME header, see ../rfc/9051:4534 ../rfc/2045:1645
|
||||
// MIME header, see ../rfc/9051:4514 ../rfc/2045:1652
|
||||
h, err := io.ReadAll(p.HeaderReader())
|
||||
cmd.xcheckf(err, "reading header")
|
||||
|
||||
matchesFields := func(line []byte) bool {
|
||||
k := textproto.CanonicalMIMEHeaderKey(string(bytes.TrimRight(bytes.SplitN(line, []byte(":"), 2)[0], " \t")))
|
||||
// Only add MIME-Version and additional CRLF for messages, not other parts. ../rfc/2045:1645 ../rfc/2045:1652
|
||||
return (p.Envelope != nil && k == "Mime-Version") || strings.HasPrefix(k, "Content-")
|
||||
return strings.HasPrefix(k, "Content-")
|
||||
}
|
||||
|
||||
var match bool
|
||||
@ -762,7 +912,7 @@ func (cmd *fetchCmd) xsection(section *sectionSpec, p *message.Part) io.Reader {
|
||||
h = h[len(line):]
|
||||
|
||||
match = matchesFields(line) || match && (bytes.HasPrefix(line, []byte(" ")) || bytes.HasPrefix(line, []byte("\t")))
|
||||
if match || len(line) == 2 {
|
||||
if match {
|
||||
hb.Write(line)
|
||||
}
|
||||
}
|
||||
@ -770,11 +920,10 @@ func (cmd *fetchCmd) xsection(section *sectionSpec, p *message.Part) io.Reader {
|
||||
}
|
||||
|
||||
func (cmd *fetchCmd) xsectionMsgtext(smt *sectionMsgtext, p *message.Part) io.Reader {
|
||||
if smt.s == "HEADER" {
|
||||
return p.HeaderReader()
|
||||
}
|
||||
|
||||
switch smt.s {
|
||||
case "HEADER":
|
||||
return p.HeaderReader()
|
||||
|
||||
case "HEADER.FIELDS":
|
||||
return cmd.xmodifiedHeader(p, smt.headers, false)
|
||||
|
||||
@ -782,8 +931,8 @@ func (cmd *fetchCmd) xsectionMsgtext(smt *sectionMsgtext, p *message.Part) io.Re
|
||||
return cmd.xmodifiedHeader(p, smt.headers, true)
|
||||
|
||||
case "TEXT":
|
||||
// It appears imap clients expect to get the body of the message, not a "text body"
|
||||
// which sounds like it means a text/* part of a message. ../rfc/9051:4517
|
||||
// TEXT the body (excluding headers) of a message, either the top-level message, or
|
||||
// a nested as message/rfc822 or message/global. ../rfc/9051:4517
|
||||
return p.RawReader()
|
||||
}
|
||||
panic(serverError{fmt.Errorf("missing case")})
|
||||
@ -834,27 +983,24 @@ func (cmd *fetchCmd) sectionMsgtextName(smt *sectionMsgtext) string {
|
||||
return s
|
||||
}
|
||||
|
||||
func bodyFldParams(params map[string]string) token {
|
||||
if len(params) == 0 {
|
||||
func bodyFldParams(p *message.Part) token {
|
||||
if len(p.ContentTypeParams) == 0 {
|
||||
return nilt
|
||||
}
|
||||
params := make(listspace, 0, 2*len(p.ContentTypeParams))
|
||||
// Ensure same ordering, easier for testing.
|
||||
var keys []string
|
||||
for k := range params {
|
||||
keys = append(keys, k)
|
||||
for _, k := range slices.Sorted(maps.Keys(p.ContentTypeParams)) {
|
||||
v := p.ContentTypeParams[k]
|
||||
params = append(params, string0(strings.ToUpper(k)), string0(v))
|
||||
}
|
||||
sort.Strings(keys)
|
||||
l := make(listspace, 2*len(keys))
|
||||
i := 0
|
||||
for _, k := range keys {
|
||||
l[i] = string0(strings.ToUpper(k))
|
||||
l[i+1] = string0(params[k])
|
||||
i += 2
|
||||
}
|
||||
return l
|
||||
return params
|
||||
}
|
||||
|
||||
func bodyFldEnc(s string) token {
|
||||
func bodyFldEnc(cte *string) token {
|
||||
var s string
|
||||
if cte != nil {
|
||||
s = *cte
|
||||
}
|
||||
up := strings.ToUpper(s)
|
||||
switch up {
|
||||
case "7BIT", "8BIT", "BINARY", "BASE64", "QUOTED-PRINTABLE":
|
||||
@ -863,25 +1009,92 @@ func bodyFldEnc(s string) token {
|
||||
return string0(s)
|
||||
}
|
||||
|
||||
func bodyFldMd5(p *message.Part) token {
|
||||
if p.ContentMD5 == nil {
|
||||
return nilt
|
||||
}
|
||||
return string0(*p.ContentMD5)
|
||||
}
|
||||
|
||||
func bodyFldDisp(log mlog.Log, p *message.Part) token {
|
||||
if p.ContentDisposition == nil {
|
||||
return nilt
|
||||
}
|
||||
|
||||
// ../rfc/9051:5989
|
||||
// mime.ParseMediaType recombines parameter value continuations like "title*0" and
|
||||
// "title*1" into "title". ../rfc/2231:147
|
||||
// And decodes character sets and removes language tags, like
|
||||
// "title*0*=us-ascii'en'hello%20world. ../rfc/2231:210
|
||||
|
||||
disp, params, err := mime.ParseMediaType(*p.ContentDisposition)
|
||||
if err != nil {
|
||||
log.Debugx("parsing content-disposition, ignoring", err, slog.String("header", *p.ContentDisposition))
|
||||
return nilt
|
||||
} else if len(params) == 0 {
|
||||
log.Debug("content-disposition has no parameters, ignoring", slog.String("header", *p.ContentDisposition))
|
||||
return nilt
|
||||
}
|
||||
var fields listspace
|
||||
for _, k := range slices.Sorted(maps.Keys(params)) {
|
||||
fields = append(fields, string0(k), string0(params[k]))
|
||||
}
|
||||
return listspace{string0(disp), fields}
|
||||
}
|
||||
|
||||
func bodyFldLang(p *message.Part) token {
|
||||
// todo: ../rfc/3282:86 ../rfc/5646:218 we currently just split on comma and trim space, should properly parse header.
|
||||
if p.ContentLanguage == nil {
|
||||
return nilt
|
||||
}
|
||||
var l listspace
|
||||
for _, s := range strings.Split(*p.ContentLanguage, ",") {
|
||||
s = strings.TrimSpace(s)
|
||||
if s == "" {
|
||||
return string0(*p.ContentLanguage)
|
||||
}
|
||||
l = append(l, string0(s))
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
func bodyFldLoc(p *message.Part) token {
|
||||
if p.ContentLocation == nil {
|
||||
return nilt
|
||||
}
|
||||
return string0(*p.ContentLocation)
|
||||
}
|
||||
|
||||
// xbodystructure returns a "body".
|
||||
// calls itself for multipart messages and message/{rfc822,global}.
|
||||
func xbodystructure(p *message.Part) token {
|
||||
func xbodystructure(log mlog.Log, p *message.Part, extensible bool) token {
|
||||
if p.MediaType == "MULTIPART" {
|
||||
// Multipart, ../rfc/9051:6355 ../rfc/9051:6411
|
||||
var bodies concat
|
||||
for i := range p.Parts {
|
||||
bodies = append(bodies, xbodystructure(&p.Parts[i]))
|
||||
bodies = append(bodies, xbodystructure(log, &p.Parts[i], extensible))
|
||||
}
|
||||
return listspace{bodies, string0(p.MediaSubType)}
|
||||
r := listspace{bodies, string0(p.MediaSubType)}
|
||||
// ../rfc/9051:6371
|
||||
if extensible {
|
||||
r = append(r,
|
||||
bodyFldParams(p),
|
||||
bodyFldDisp(log, p),
|
||||
bodyFldLang(p),
|
||||
bodyFldLoc(p),
|
||||
)
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// ../rfc/9051:6355
|
||||
var r listspace
|
||||
if p.MediaType == "TEXT" {
|
||||
// ../rfc/9051:6404 ../rfc/9051:6418
|
||||
return listspace{
|
||||
r = listspace{
|
||||
dquote("TEXT"), string0(p.MediaSubType), // ../rfc/9051:6739
|
||||
// ../rfc/9051:6376
|
||||
bodyFldParams(p.ContentTypeParams), // ../rfc/9051:6401
|
||||
bodyFldParams(p), // ../rfc/9051:6401
|
||||
nilOrString(p.ContentID),
|
||||
nilOrString(p.ContentDescription),
|
||||
bodyFldEnc(p.ContentTransferEncoding),
|
||||
@ -891,34 +1104,45 @@ func xbodystructure(p *message.Part) token {
|
||||
} else if p.MediaType == "MESSAGE" && (p.MediaSubType == "RFC822" || p.MediaSubType == "GLOBAL") {
|
||||
// ../rfc/9051:6415
|
||||
// note: we don't have to prepare p.Message for reading, because we aren't going to read from it.
|
||||
return listspace{
|
||||
r = listspace{
|
||||
dquote("MESSAGE"), dquote(p.MediaSubType), // ../rfc/9051:6732
|
||||
// ../rfc/9051:6376
|
||||
bodyFldParams(p.ContentTypeParams), // ../rfc/9051:6401
|
||||
bodyFldParams(p), // ../rfc/9051:6401
|
||||
nilOrString(p.ContentID),
|
||||
nilOrString(p.ContentDescription),
|
||||
bodyFldEnc(p.ContentTransferEncoding),
|
||||
number(p.EndOffset - p.BodyOffset),
|
||||
xenvelope(p.Message),
|
||||
xbodystructure(p.Message),
|
||||
xbodystructure(log, p.Message, extensible),
|
||||
number(p.RawLineCount), // todo: or mp.RawLineCount?
|
||||
}
|
||||
} else {
|
||||
var media token
|
||||
switch p.MediaType {
|
||||
case "APPLICATION", "AUDIO", "IMAGE", "FONT", "MESSAGE", "MODEL", "VIDEO":
|
||||
media = dquote(p.MediaType)
|
||||
default:
|
||||
media = string0(p.MediaType)
|
||||
}
|
||||
// ../rfc/9051:6404 ../rfc/9051:6407
|
||||
r = listspace{
|
||||
media, string0(p.MediaSubType), // ../rfc/9051:6723
|
||||
// ../rfc/9051:6376
|
||||
bodyFldParams(p), // ../rfc/9051:6401
|
||||
nilOrString(p.ContentID),
|
||||
nilOrString(p.ContentDescription),
|
||||
bodyFldEnc(p.ContentTransferEncoding),
|
||||
number(p.EndOffset - p.BodyOffset),
|
||||
}
|
||||
}
|
||||
var media token
|
||||
switch p.MediaType {
|
||||
case "APPLICATION", "AUDIO", "IMAGE", "FONT", "MESSAGE", "MODEL", "VIDEO":
|
||||
media = dquote(p.MediaType)
|
||||
default:
|
||||
media = string0(p.MediaType)
|
||||
}
|
||||
// ../rfc/9051:6404 ../rfc/9051:6407
|
||||
return listspace{
|
||||
media, string0(p.MediaSubType), // ../rfc/9051:6723
|
||||
// ../rfc/9051:6376
|
||||
bodyFldParams(p.ContentTypeParams), // ../rfc/9051:6401
|
||||
nilOrString(p.ContentID),
|
||||
nilOrString(p.ContentDescription),
|
||||
bodyFldEnc(p.ContentTransferEncoding),
|
||||
number(p.EndOffset - p.BodyOffset),
|
||||
if extensible {
|
||||
// ../rfc/9051:6366
|
||||
r = append(r,
|
||||
bodyFldMd5(p),
|
||||
bodyFldDisp(log, p),
|
||||
bodyFldLang(p),
|
||||
bodyFldLoc(p),
|
||||
)
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
@ -5,22 +5,33 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/mjl-/bstore"
|
||||
|
||||
"github.com/mjl-/mox/imapclient"
|
||||
"github.com/mjl-/mox/store"
|
||||
)
|
||||
|
||||
func TestFetch(t *testing.T) {
|
||||
tc := start(t)
|
||||
testFetch(t, false)
|
||||
}
|
||||
|
||||
func TestFetchUIDOnly(t *testing.T) {
|
||||
testFetch(t, true)
|
||||
}
|
||||
|
||||
func testFetch(t *testing.T, uidonly bool) {
|
||||
tc := start(t, uidonly)
|
||||
defer tc.close()
|
||||
|
||||
tc.client.Login("mjl@mox.example", password0)
|
||||
tc.client.Enable("imap4rev2")
|
||||
tc.login("mjl@mox.example", password0)
|
||||
tc.client.Enable(imapclient.CapIMAP4rev2)
|
||||
received, err := time.Parse(time.RFC3339, "2022-11-16T10:01:00+01:00")
|
||||
tc.check(err, "parse time")
|
||||
tc.client.Append("inbox", nil, &received, []byte(exampleMsg))
|
||||
tc.client.Append("inbox", makeAppendTime(exampleMsg, received))
|
||||
tc.client.Select("inbox")
|
||||
|
||||
uid1 := imapclient.FetchUID(1)
|
||||
date1 := imapclient.FetchInternalDate("16-Nov-2022 10:01:00 +0100")
|
||||
date1 := imapclient.FetchInternalDate{Date: received}
|
||||
rfcsize1 := imapclient.FetchRFC822Size(len(exampleMsg))
|
||||
env1 := imapclient.FetchEnvelope{
|
||||
Date: "Mon, 7 Feb 1994 21:52:25 -0800",
|
||||
@ -32,20 +43,29 @@ func TestFetch(t *testing.T) {
|
||||
MessageID: "<B27397-0100000@Blurdybloop.example>",
|
||||
}
|
||||
noflags := imapclient.FetchFlags(nil)
|
||||
bodystructbody1 := imapclient.BodyTypeText{
|
||||
MediaType: "TEXT",
|
||||
MediaSubtype: "PLAIN",
|
||||
BodyFields: imapclient.BodyFields{
|
||||
Params: [][2]string{[...]string{"CHARSET", "US-ASCII"}},
|
||||
Octets: 57,
|
||||
},
|
||||
Lines: 2,
|
||||
}
|
||||
bodyxstructure1 := imapclient.FetchBodystructure{
|
||||
RespAttr: "BODY",
|
||||
Body: imapclient.BodyTypeText{
|
||||
MediaType: "TEXT",
|
||||
MediaSubtype: "PLAIN",
|
||||
BodyFields: imapclient.BodyFields{
|
||||
Params: [][2]string{[...]string{"CHARSET", "US-ASCII"}},
|
||||
Octets: 57,
|
||||
},
|
||||
Lines: 2,
|
||||
},
|
||||
Body: bodystructbody1,
|
||||
}
|
||||
bodystructure1 := bodyxstructure1
|
||||
bodystructure1.RespAttr = "BODYSTRUCTURE"
|
||||
bodyext1 := imapclient.BodyExtension1Part{
|
||||
Disposition: ptr((*string)(nil)),
|
||||
DispositionParams: ptr([][2]string(nil)),
|
||||
Language: ptr([]string(nil)),
|
||||
Location: ptr((*string)(nil)),
|
||||
}
|
||||
bodystructbody1.Ext = &bodyext1
|
||||
bodystructure1.Body = bodystructbody1
|
||||
|
||||
split := strings.SplitN(exampleMsg, "\r\n\r\n", 2)
|
||||
exampleMsgHeader := split[0] + "\r\n\r\n"
|
||||
@ -72,136 +92,188 @@ func TestFetch(t *testing.T) {
|
||||
headerSplit := strings.SplitN(exampleMsgHeader, "\r\n", 2)
|
||||
dateheader1 := imapclient.FetchBody{RespAttr: "BODY[HEADER.FIELDS (Date)]", Section: "HEADER.FIELDS (Date)", Body: headerSplit[0] + "\r\n\r\n"}
|
||||
nodateheader1 := imapclient.FetchBody{RespAttr: "BODY[HEADER.FIELDS.NOT (Date)]", Section: "HEADER.FIELDS.NOT (Date)", Body: headerSplit[1]}
|
||||
date1header1 := imapclient.FetchBody{RespAttr: "BODY[1.HEADER.FIELDS (Date)]", Section: "1.HEADER.FIELDS (Date)", Body: headerSplit[0] + "\r\n\r\n"}
|
||||
nodate1header1 := imapclient.FetchBody{RespAttr: "BODY[1.HEADER.FIELDS.NOT (Date)]", Section: "1.HEADER.FIELDS.NOT (Date)", Body: headerSplit[1]}
|
||||
mime1 := imapclient.FetchBody{RespAttr: "BODY[1.MIME]", Section: "1.MIME", Body: "MIME-Version: 1.0\r\nContent-Type: TEXT/PLAIN; CHARSET=US-ASCII\r\n\r\n"}
|
||||
mime1 := imapclient.FetchBody{RespAttr: "BODY[1.MIME]", Section: "1.MIME", Body: "Content-Type: TEXT/PLAIN; CHARSET=US-ASCII\r\n"}
|
||||
|
||||
flagsSeen := imapclient.FetchFlags{`\Seen`}
|
||||
|
||||
tc.transactf("ok", "fetch 1 all")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, date1, rfcsize1, env1, noflags}})
|
||||
if !uidonly {
|
||||
tc.transactf("ok", "fetch 1 all")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, date1, rfcsize1, env1, noflags))
|
||||
|
||||
tc.transactf("ok", "fetch 1 fast")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, date1, rfcsize1, noflags}})
|
||||
tc.transactf("ok", "fetch 1 fast")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, date1, rfcsize1, noflags))
|
||||
|
||||
tc.transactf("ok", "fetch 1 full")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, date1, rfcsize1, env1, bodyxstructure1, noflags}})
|
||||
tc.transactf("ok", "fetch 1 full")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, date1, rfcsize1, env1, bodyxstructure1, noflags))
|
||||
|
||||
tc.transactf("ok", "fetch 1 flags")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, noflags}})
|
||||
tc.transactf("ok", "fetch 1 flags")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, noflags))
|
||||
|
||||
tc.transactf("ok", "fetch 1 bodystructure")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, bodystructure1}})
|
||||
tc.transactf("ok", "fetch 1 bodystructure")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, bodystructure1))
|
||||
|
||||
// Should be returned unmodified, because there is no content-transfer-encoding.
|
||||
tc.transactf("ok", "fetch 1 binary[]")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, binary1, flagsSeen}})
|
||||
// Should be returned unmodified, because there is no content-transfer-encoding.
|
||||
tc.transactf("ok", "fetch 1 binary[]")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, binary1, noflags))
|
||||
tc.transactf("ok", "noop")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, flagsSeen))
|
||||
|
||||
tc.transactf("ok", "fetch 1 binary[1]")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, binarypart1}}) // Seen flag not changed.
|
||||
tc.transactf("ok", "fetch 1 binary[1]")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, binarypart1)) // Seen flag not changed.
|
||||
|
||||
tc.client.StoreFlagsClear("1", true, `\Seen`)
|
||||
tc.transactf("ok", "fetch 1 binary[]<1.1>")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, binarypartial1, flagsSeen}})
|
||||
tc.client.MSNStoreFlagsClear("1", true, `\Seen`)
|
||||
tc.transactf("ok", "uid fetch 1 binary[]<1.1>")
|
||||
tc.xuntagged(
|
||||
tc.untaggedFetch(1, 1, binarypartial1, noflags),
|
||||
tc.untaggedFetch(1, 1, flagsSeen), // For UID FETCH, we get the flags during the command.
|
||||
)
|
||||
|
||||
tc.client.StoreFlagsClear("1", true, `\Seen`)
|
||||
tc.transactf("ok", "fetch 1 binary[1]<1.1>")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, binarypartpartial1, flagsSeen}})
|
||||
tc.client.MSNStoreFlagsClear("1", true, `\Seen`)
|
||||
tc.transactf("ok", "fetch 1 binary[1]<1.1>")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, binarypartpartial1, noflags))
|
||||
tc.transactf("ok", "noop")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, flagsSeen))
|
||||
|
||||
tc.client.StoreFlagsClear("1", true, `\Seen`)
|
||||
tc.transactf("ok", "fetch 1 binary[]<10000.10001>")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, binaryend1, flagsSeen}})
|
||||
tc.client.MSNStoreFlagsClear("1", true, `\Seen`)
|
||||
tc.transactf("ok", "fetch 1 binary[]<10000.10001>")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, binaryend1, noflags))
|
||||
tc.transactf("ok", "noop")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, flagsSeen))
|
||||
|
||||
tc.client.StoreFlagsClear("1", true, `\Seen`)
|
||||
tc.transactf("ok", "fetch 1 binary[1]<10000.10001>")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, binarypartend1, flagsSeen}})
|
||||
tc.client.MSNStoreFlagsClear("1", true, `\Seen`)
|
||||
tc.transactf("ok", "fetch 1 binary[1]<10000.10001>")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, binarypartend1, noflags))
|
||||
tc.transactf("ok", "noop")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, flagsSeen))
|
||||
|
||||
tc.client.StoreFlagsClear("1", true, `\Seen`)
|
||||
tc.transactf("ok", "fetch 1 binary.size[]")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, binarysize1}})
|
||||
tc.transactf("ok", "fetch 1 binary.size[]")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, binarysize1))
|
||||
|
||||
tc.transactf("ok", "fetch 1 binary.size[1]")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, binarysizepart1}})
|
||||
tc.transactf("ok", "fetch 1 binary.size[1]")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, binarysizepart1))
|
||||
|
||||
tc.client.StoreFlagsClear("1", true, `\Seen`)
|
||||
tc.transactf("ok", "fetch 1 body[]")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, body1, flagsSeen}})
|
||||
tc.transactf("ok", "fetch 1 body[]<1.2>")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, bodyoff1}}) // Already seen.
|
||||
tc.client.MSNStoreFlagsClear("1", true, `\Seen`)
|
||||
tc.transactf("ok", "fetch 1 body[]")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, body1, noflags))
|
||||
tc.transactf("ok", "noop")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, flagsSeen))
|
||||
tc.transactf("ok", "fetch 1 body[]<1.2>")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, bodyoff1)) // Already seen.
|
||||
tc.transactf("ok", "noop")
|
||||
tc.xuntagged() // Already seen.
|
||||
|
||||
tc.client.StoreFlagsClear("1", true, `\Seen`)
|
||||
tc.transactf("ok", "fetch 1 body[1]")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, bodypart1, flagsSeen}})
|
||||
tc.client.MSNStoreFlagsClear("1", true, `\Seen`)
|
||||
tc.transactf("ok", "fetch 1 body[1]")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, bodypart1, noflags))
|
||||
tc.transactf("ok", "noop")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, flagsSeen))
|
||||
|
||||
tc.client.StoreFlagsClear("1", true, `\Seen`)
|
||||
tc.transactf("ok", "fetch 1 body[1]<1.2>")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, body1off1, flagsSeen}})
|
||||
tc.client.MSNStoreFlagsClear("1", true, `\Seen`)
|
||||
tc.transactf("ok", "fetch 1 body[1]<1.2>")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, body1off1, noflags))
|
||||
tc.transactf("ok", "noop")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, flagsSeen))
|
||||
|
||||
tc.client.StoreFlagsClear("1", true, `\Seen`)
|
||||
tc.transactf("ok", "fetch 1 body[1]<100000.100000>")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, bodyend1, flagsSeen}})
|
||||
tc.client.MSNStoreFlagsClear("1", true, `\Seen`)
|
||||
tc.transactf("ok", "fetch 1 body[1]<100000.100000>")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, bodyend1, noflags))
|
||||
tc.transactf("ok", "noop")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, flagsSeen))
|
||||
|
||||
tc.client.StoreFlagsClear("1", true, `\Seen`)
|
||||
tc.transactf("ok", "fetch 1 body[header]")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, bodyheader1, flagsSeen}})
|
||||
tc.client.MSNStoreFlagsClear("1", true, `\Seen`)
|
||||
tc.transactf("ok", "fetch 1 body[header]")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, bodyheader1, noflags))
|
||||
tc.transactf("ok", "noop")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, flagsSeen))
|
||||
|
||||
tc.client.StoreFlagsClear("1", true, `\Seen`)
|
||||
tc.transactf("ok", "fetch 1 body[text]")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, bodytext1, flagsSeen}})
|
||||
tc.client.MSNStoreFlagsClear("1", true, `\Seen`)
|
||||
tc.transactf("ok", "fetch 1 body[text]")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, bodytext1, noflags))
|
||||
tc.transactf("ok", "noop")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, flagsSeen))
|
||||
|
||||
// equivalent to body.peek[header], ../rfc/3501:3183
|
||||
tc.client.StoreFlagsClear("1", true, `\Seen`)
|
||||
tc.transactf("ok", "fetch 1 rfc822.header")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, rfcheader1}})
|
||||
// equivalent to body.peek[header], ../rfc/3501:3183
|
||||
tc.transactf("ok", "fetch 1 rfc822.header")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, rfcheader1))
|
||||
|
||||
// equivalent to body[text], ../rfc/3501:3199
|
||||
tc.client.StoreFlagsClear("1", true, `\Seen`)
|
||||
tc.transactf("ok", "fetch 1 rfc822.text")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, rfctext1, flagsSeen}})
|
||||
// equivalent to body[text], ../rfc/3501:3199
|
||||
tc.client.MSNStoreFlagsClear("1", true, `\Seen`)
|
||||
tc.transactf("ok", "fetch 1 rfc822.text")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, rfctext1, noflags))
|
||||
tc.transactf("ok", "noop")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, flagsSeen))
|
||||
|
||||
// equivalent to body[], ../rfc/3501:3179
|
||||
tc.client.StoreFlagsClear("1", true, `\Seen`)
|
||||
tc.transactf("ok", "fetch 1 rfc822")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, rfc1, flagsSeen}})
|
||||
// equivalent to body[], ../rfc/3501:3179
|
||||
tc.client.MSNStoreFlagsClear("1", true, `\Seen`)
|
||||
tc.transactf("ok", "fetch 1 rfc822")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, rfc1, noflags))
|
||||
tc.transactf("ok", "noop")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, flagsSeen))
|
||||
|
||||
// With PEEK, we should not get the \Seen flag.
|
||||
tc.client.StoreFlagsClear("1", true, `\Seen`)
|
||||
tc.transactf("ok", "fetch 1 body.peek[]")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, body1}})
|
||||
// With PEEK, we should not get the \Seen flag.
|
||||
tc.client.MSNStoreFlagsClear("1", true, `\Seen`)
|
||||
tc.transactf("ok", "fetch 1 body.peek[]")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, body1))
|
||||
|
||||
tc.transactf("ok", "fetch 1 binary.peek[]")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, binary1}})
|
||||
tc.transactf("ok", "fetch 1 binary.peek[]")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, binary1))
|
||||
|
||||
// HEADER.FIELDS and .NOT
|
||||
tc.transactf("ok", "fetch 1 body.peek[header.fields (date)]")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, dateheader1}})
|
||||
tc.transactf("ok", "fetch 1 body.peek[header.fields.not (date)]")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, nodateheader1}})
|
||||
// For non-multipart messages, 1 means the whole message. ../rfc/9051:4481
|
||||
tc.transactf("ok", "fetch 1 body.peek[1.header.fields (date)]")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, date1header1}})
|
||||
tc.transactf("ok", "fetch 1 body.peek[1.header.fields.not (date)]")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, nodate1header1}})
|
||||
// HEADER.FIELDS and .NOT
|
||||
tc.transactf("ok", "fetch 1 body.peek[header.fields (date)]")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, dateheader1))
|
||||
tc.transactf("ok", "fetch 1 body.peek[header.fields.not (date)]")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, nodateheader1))
|
||||
// For non-multipart messages, 1 means the whole message, but since it's not of
|
||||
// type message/{rfc822,global} (a message), you can't get the message headers.
|
||||
// ../rfc/9051:4481
|
||||
tc.transactf("no", "fetch 1 body.peek[1.header]")
|
||||
|
||||
// MIME, part 1 for non-multipart messages is the message itself. ../rfc/9051:4481
|
||||
tc.transactf("ok", "fetch 1 body.peek[1.mime]")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, mime1}})
|
||||
// MIME, part 1 for non-multipart messages is the message itself. ../rfc/9051:4481
|
||||
tc.transactf("ok", "fetch 1 body.peek[1.mime]")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, mime1))
|
||||
|
||||
// Missing sequence number. ../rfc/9051:7018
|
||||
tc.transactf("bad", "fetch 2 body[]")
|
||||
// Missing sequence number. ../rfc/9051:7018
|
||||
tc.transactf("bad", "fetch 2 body[]")
|
||||
|
||||
tc.transactf("ok", "fetch 1:1 body[]")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, body1, flagsSeen}})
|
||||
tc.client.MSNStoreFlagsClear("1", true, `\Seen`)
|
||||
tc.transactf("ok", "fetch 1:1 body[]")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, body1, noflags))
|
||||
tc.transactf("ok", "noop")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, flagsSeen))
|
||||
} else {
|
||||
tc.client.UIDStoreFlagsAdd("1", true, `\Seen`)
|
||||
tc.transactf("ok", "noop")
|
||||
}
|
||||
|
||||
// UID fetch
|
||||
tc.transactf("ok", "uid fetch 1 body[]")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, body1}})
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, body1))
|
||||
|
||||
// UID fetch
|
||||
tc.transactf("ok", "uid fetch 2 body[]")
|
||||
tc.xuntagged()
|
||||
|
||||
// Test some invalid syntax.
|
||||
// SAVEDATE
|
||||
tc.transactf("ok", "uid fetch 1 savedate")
|
||||
// Fetch exact SaveDate we'll be expecting from server.
|
||||
var saveDate time.Time
|
||||
err = tc.account.DB.Read(ctxbg, func(tx *bstore.Tx) error {
|
||||
inbox, err := tc.account.MailboxFind(tx, "Inbox")
|
||||
tc.check(err, "get inbox")
|
||||
if inbox == nil {
|
||||
t.Fatalf("missing inbox")
|
||||
}
|
||||
m, err := bstore.QueryTx[store.Message](tx).FilterNonzero(store.Message{MailboxID: inbox.ID, UID: store.UID(uid1)}).Get()
|
||||
tc.check(err, "get message")
|
||||
if m.SaveDate == nil {
|
||||
t.Fatalf("zero savedate for message")
|
||||
}
|
||||
saveDate = m.SaveDate.Truncate(time.Second)
|
||||
return nil
|
||||
})
|
||||
tc.check(err, "get savedate")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, imapclient.FetchSaveDate{SaveDate: &saveDate}))
|
||||
|
||||
// Test some invalid syntax. Also invalid for uidonly.
|
||||
tc.transactf("bad", "fetch")
|
||||
tc.transactf("bad", "fetch ")
|
||||
tc.transactf("bad", "fetch ")
|
||||
@ -224,25 +296,38 @@ func TestFetch(t *testing.T) {
|
||||
tc.transactf("bad", "fetch 1 body[header.fields.not ()]") // List must be non-empty.
|
||||
tc.transactf("bad", "fetch 1 body[mime]") // MIME must be prefixed with a number. ../rfc/9051:4497
|
||||
|
||||
tc.transactf("no", "fetch 1 body[2]") // No such part.
|
||||
if !uidonly {
|
||||
tc.transactf("no", "fetch 1 body[2]") // No such part.
|
||||
}
|
||||
|
||||
// Add more complex message.
|
||||
|
||||
uid2 := imapclient.FetchUID(2)
|
||||
bodystructure2 := imapclient.FetchBodystructure{
|
||||
RespAttr: "BODYSTRUCTURE",
|
||||
Body: imapclient.BodyTypeMpart{
|
||||
Bodies: []any{
|
||||
imapclient.BodyTypeBasic{BodyFields: imapclient.BodyFields{Octets: 275}},
|
||||
imapclient.BodyTypeText{MediaType: "TEXT", MediaSubtype: "PLAIN", BodyFields: imapclient.BodyFields{Params: [][2]string{{"CHARSET", "US-ASCII"}}, Octets: 114}, Lines: 3},
|
||||
imapclient.BodyTypeBasic{BodyFields: imapclient.BodyFields{Octets: 275}, Ext: &bodyext1},
|
||||
imapclient.BodyTypeText{MediaType: "TEXT", MediaSubtype: "PLAIN", BodyFields: imapclient.BodyFields{Params: [][2]string{{"CHARSET", "US-ASCII"}}, Octets: 114}, Lines: 3, Ext: &bodyext1},
|
||||
imapclient.BodyTypeMpart{
|
||||
Bodies: []any{
|
||||
imapclient.BodyTypeBasic{MediaType: "AUDIO", MediaSubtype: "BASIC", BodyFields: imapclient.BodyFields{CTE: "BASE64", Octets: 22}},
|
||||
imapclient.BodyTypeBasic{MediaType: "IMAGE", MediaSubtype: "JPEG", BodyFields: imapclient.BodyFields{CTE: "BASE64"}},
|
||||
imapclient.BodyTypeBasic{MediaType: "AUDIO", MediaSubtype: "BASIC", BodyFields: imapclient.BodyFields{CTE: "BASE64", Octets: 22}, Ext: &bodyext1},
|
||||
imapclient.BodyTypeBasic{MediaType: "IMAGE", MediaSubtype: "JPEG", BodyFields: imapclient.BodyFields{CTE: "BASE64"}, Ext: &imapclient.BodyExtension1Part{
|
||||
Disposition: ptr(ptr("inline")),
|
||||
DispositionParams: ptr([][2]string{{"filename", "image.jpg"}}),
|
||||
Language: ptr([]string(nil)),
|
||||
Location: ptr((*string)(nil)),
|
||||
}},
|
||||
},
|
||||
MediaSubtype: "PARALLEL",
|
||||
Ext: &imapclient.BodyExtensionMpart{
|
||||
Params: [][2]string{{"BOUNDARY", "unique-boundary-2"}},
|
||||
Disposition: ptr((*string)(nil)), // Present but nil.
|
||||
DispositionParams: ptr([][2]string(nil)),
|
||||
Language: ptr([]string(nil)),
|
||||
Location: ptr((*string)(nil)),
|
||||
},
|
||||
},
|
||||
imapclient.BodyTypeText{MediaType: "TEXT", MediaSubtype: "ENRICHED", BodyFields: imapclient.BodyFields{Octets: 145}, Lines: 5},
|
||||
imapclient.BodyTypeText{MediaType: "TEXT", MediaSubtype: "ENRICHED", BodyFields: imapclient.BodyFields{Octets: 145}, Lines: 5, Ext: &bodyext1},
|
||||
imapclient.BodyTypeMsg{
|
||||
MediaType: "MESSAGE",
|
||||
MediaSubtype: "RFC822",
|
||||
@ -255,49 +340,64 @@ func TestFetch(t *testing.T) {
|
||||
To: []imapclient.Address{{Name: "mox", Adl: "", Mailbox: "info", Host: "mox.example"}},
|
||||
},
|
||||
Bodystructure: imapclient.BodyTypeText{
|
||||
MediaType: "TEXT", MediaSubtype: "PLAIN", BodyFields: imapclient.BodyFields{Params: [][2]string{{"CHARSET", "ISO-8859-1"}}, CTE: "QUOTED-PRINTABLE", Octets: 51}, Lines: 1},
|
||||
MediaType: "TEXT", MediaSubtype: "PLAIN", BodyFields: imapclient.BodyFields{Params: [][2]string{{"CHARSET", "ISO-8859-1"}}, CTE: "QUOTED-PRINTABLE", Octets: 51}, Lines: 1, Ext: &bodyext1},
|
||||
Lines: 7,
|
||||
Ext: &imapclient.BodyExtension1Part{
|
||||
MD5: ptr("MDEyMzQ1Njc4OWFiY2RlZjAxMjM0NTY3ODlhYmNkZWY="),
|
||||
Disposition: ptr((*string)(nil)),
|
||||
DispositionParams: ptr([][2]string(nil)),
|
||||
Language: ptr([]string{"en", "de"}),
|
||||
Location: ptr(ptr("http://localhost")),
|
||||
},
|
||||
},
|
||||
},
|
||||
MediaSubtype: "MIXED",
|
||||
Ext: &imapclient.BodyExtensionMpart{
|
||||
Params: [][2]string{{"BOUNDARY", "unique-boundary-1"}},
|
||||
Disposition: ptr((*string)(nil)), // Present but nil.
|
||||
DispositionParams: ptr([][2]string(nil)),
|
||||
Language: ptr([]string(nil)),
|
||||
Location: ptr((*string)(nil)),
|
||||
},
|
||||
},
|
||||
}
|
||||
tc.client.Append("inbox", nil, &received, []byte(nestedMessage))
|
||||
tc.transactf("ok", "fetch 2 bodystructure")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, bodystructure2}})
|
||||
tc.client.Append("inbox", makeAppendTime(nestedMessage, received))
|
||||
tc.transactf("ok", "uid fetch 2 bodystructure")
|
||||
tc.xuntagged(tc.untaggedFetch(2, 2, bodystructure2))
|
||||
|
||||
// Multiple responses.
|
||||
tc.transactf("ok", "fetch 1:2 bodystructure")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, bodystructure1}}, imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, bodystructure2}})
|
||||
tc.transactf("ok", "fetch 1,2 bodystructure")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, bodystructure1}}, imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, bodystructure2}})
|
||||
tc.transactf("ok", "fetch 2:1 bodystructure")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, bodystructure1}}, imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, bodystructure2}})
|
||||
tc.transactf("ok", "fetch 1:* bodystructure")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, bodystructure1}}, imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, bodystructure2}})
|
||||
tc.transactf("ok", "fetch *:1 bodystructure")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, bodystructure1}}, imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, bodystructure2}})
|
||||
tc.transactf("ok", "fetch *:2 bodystructure")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, bodystructure2}})
|
||||
|
||||
tc.transactf("ok", "fetch * bodystructure") // Highest msgseq.
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, bodystructure2}})
|
||||
if !uidonly {
|
||||
tc.transactf("ok", "fetch 1:2 bodystructure")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, bodystructure1), tc.untaggedFetch(2, 2, bodystructure2))
|
||||
tc.transactf("ok", "fetch 1,2 bodystructure")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, bodystructure1), tc.untaggedFetch(2, 2, bodystructure2))
|
||||
tc.transactf("ok", "fetch 2:1 bodystructure")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, bodystructure1), tc.untaggedFetch(2, 2, bodystructure2))
|
||||
tc.transactf("ok", "fetch 1:* bodystructure")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, bodystructure1), tc.untaggedFetch(2, 2, bodystructure2))
|
||||
tc.transactf("ok", "fetch *:1 bodystructure")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, bodystructure1), tc.untaggedFetch(2, 2, bodystructure2))
|
||||
tc.transactf("ok", "fetch *:2 bodystructure")
|
||||
tc.xuntagged(tc.untaggedFetch(2, 2, bodystructure2))
|
||||
tc.transactf("ok", "fetch * bodystructure") // Highest msgseq.
|
||||
tc.xuntagged(tc.untaggedFetch(2, 2, bodystructure2))
|
||||
}
|
||||
|
||||
tc.transactf("ok", "uid fetch 1:* bodystructure")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, bodystructure1}}, imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, bodystructure2}})
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, bodystructure1), tc.untaggedFetch(2, 2, bodystructure2))
|
||||
|
||||
tc.transactf("ok", "uid fetch 1:2 bodystructure")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, bodystructure1}}, imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, bodystructure2}})
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, bodystructure1), tc.untaggedFetch(2, 2, bodystructure2))
|
||||
|
||||
tc.transactf("ok", "uid fetch 1,2 bodystructure")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, bodystructure1}}, imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, bodystructure2}})
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, bodystructure1), tc.untaggedFetch(2, 2, bodystructure2))
|
||||
|
||||
tc.transactf("ok", "uid fetch 2:2 bodystructure")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, bodystructure2}})
|
||||
tc.xuntagged(tc.untaggedFetch(2, 2, bodystructure2))
|
||||
|
||||
// todo: read the bodies/headers of the parts, and of the nested message.
|
||||
tc.transactf("ok", "fetch 2 body.peek[]")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, imapclient.FetchBody{RespAttr: "BODY[]", Body: nestedMessage}}})
|
||||
tc.transactf("ok", "uid fetch 2 body.peek[]")
|
||||
tc.xuntagged(tc.untaggedFetch(2, 2, imapclient.FetchBody{RespAttr: "BODY[]", Body: nestedMessage}))
|
||||
|
||||
part1 := tocrlf(` ... Some text appears here ...
|
||||
|
||||
@ -307,22 +407,22 @@ func TestFetch(t *testing.T) {
|
||||
It could have been done with explicit typing as in the
|
||||
next part.]
|
||||
`)
|
||||
tc.transactf("ok", "fetch 2 body.peek[1]")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, imapclient.FetchBody{RespAttr: "BODY[1]", Section: "1", Body: part1}}})
|
||||
tc.transactf("ok", "uid fetch 2 body.peek[1]")
|
||||
tc.xuntagged(tc.untaggedFetch(2, 2, imapclient.FetchBody{RespAttr: "BODY[1]", Section: "1", Body: part1}))
|
||||
|
||||
tc.transactf("no", "fetch 2 binary.peek[3]") // Only allowed on leaf parts, not multiparts.
|
||||
tc.transactf("no", "fetch 2 binary.peek[5]") // Only allowed on leaf parts, not messages.
|
||||
tc.transactf("no", "uid fetch 2 binary.peek[3]") // Only allowed on leaf parts, not multiparts.
|
||||
tc.transactf("no", "uid fetch 2 binary.peek[5]") // Only allowed on leaf parts, not messages.
|
||||
|
||||
part31 := "aGVsbG8NCndvcmxkDQo=\r\n"
|
||||
part31dec := "hello\r\nworld\r\n"
|
||||
tc.transactf("ok", "fetch 2 binary.size[3.1]")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, imapclient.FetchBinarySize{RespAttr: "BINARY.SIZE[3.1]", Parts: []uint32{3, 1}, Size: int64(len(part31dec))}}})
|
||||
tc.transactf("ok", "uid fetch 2 binary.size[3.1]")
|
||||
tc.xuntagged(tc.untaggedFetch(2, 2, imapclient.FetchBinarySize{RespAttr: "BINARY.SIZE[3.1]", Parts: []uint32{3, 1}, Size: int64(len(part31dec))}))
|
||||
|
||||
tc.transactf("ok", "fetch 2 body.peek[3.1]")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, imapclient.FetchBody{RespAttr: "BODY[3.1]", Section: "3.1", Body: part31}}})
|
||||
tc.transactf("ok", "uid fetch 2 body.peek[3.1]")
|
||||
tc.xuntagged(tc.untaggedFetch(2, 2, imapclient.FetchBody{RespAttr: "BODY[3.1]", Section: "3.1", Body: part31}))
|
||||
|
||||
tc.transactf("ok", "fetch 2 binary.peek[3.1]")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, imapclient.FetchBinary{RespAttr: "BINARY[3.1]", Parts: []uint32{3, 1}, Data: part31dec}}})
|
||||
tc.transactf("ok", "uid fetch 2 binary.peek[3.1]")
|
||||
tc.xuntagged(tc.untaggedFetch(2, 2, imapclient.FetchBinary{RespAttr: "BINARY[3.1]", Parts: []uint32{3, 1}, Data: part31dec}))
|
||||
|
||||
part3 := tocrlf(`--unique-boundary-2
|
||||
Content-Type: audio/basic
|
||||
@ -333,19 +433,18 @@ aGVsbG8NCndvcmxkDQo=
|
||||
--unique-boundary-2
|
||||
Content-Type: image/jpeg
|
||||
Content-Transfer-Encoding: base64
|
||||
Content-Disposition: inline; filename=image.jpg
|
||||
|
||||
|
||||
--unique-boundary-2--
|
||||
|
||||
`)
|
||||
tc.transactf("ok", "fetch 2 body.peek[3]")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, imapclient.FetchBody{RespAttr: "BODY[3]", Section: "3", Body: part3}}})
|
||||
tc.transactf("ok", "uid fetch 2 body.peek[3]")
|
||||
tc.xuntagged(tc.untaggedFetch(2, 2, imapclient.FetchBody{RespAttr: "BODY[3]", Section: "3", Body: part3}))
|
||||
|
||||
part2mime := tocrlf(`Content-type: text/plain; charset=US-ASCII
|
||||
|
||||
`)
|
||||
tc.transactf("ok", "fetch 2 body.peek[2.mime]")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, imapclient.FetchBody{RespAttr: "BODY[2.MIME]", Section: "2.MIME", Body: part2mime}}})
|
||||
part2mime := "Content-type: text/plain; charset=US-ASCII\r\n"
|
||||
tc.transactf("ok", "uid fetch 2 body.peek[2.mime]")
|
||||
tc.xuntagged(tc.untaggedFetch(2, 2, imapclient.FetchBody{RespAttr: "BODY[2.MIME]", Section: "2.MIME", Body: part2mime}))
|
||||
|
||||
part5 := tocrlf(`From: info@mox.example
|
||||
To: mox <info@mox.example>
|
||||
@ -355,8 +454,8 @@ Content-Transfer-Encoding: Quoted-printable
|
||||
|
||||
... Additional text in ISO-8859-1 goes here ...
|
||||
`)
|
||||
tc.transactf("ok", "fetch 2 body.peek[5]")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, imapclient.FetchBody{RespAttr: "BODY[5]", Section: "5", Body: part5}}})
|
||||
tc.transactf("ok", "uid fetch 2 body.peek[5]")
|
||||
tc.xuntagged(tc.untaggedFetch(2, 2, imapclient.FetchBody{RespAttr: "BODY[5]", Section: "5", Body: part5}))
|
||||
|
||||
part5header := tocrlf(`From: info@mox.example
|
||||
To: mox <info@mox.example>
|
||||
@ -365,39 +464,101 @@ Content-Type: Text/plain; charset=ISO-8859-1
|
||||
Content-Transfer-Encoding: Quoted-printable
|
||||
|
||||
`)
|
||||
tc.transactf("ok", "fetch 2 body.peek[5.header]")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, imapclient.FetchBody{RespAttr: "BODY[5.HEADER]", Section: "5.HEADER", Body: part5header}}})
|
||||
|
||||
part5mime := tocrlf(`Content-Type: Text/plain; charset=ISO-8859-1
|
||||
Content-Transfer-Encoding: Quoted-printable
|
||||
tc.transactf("ok", "uid fetch 2 body.peek[5.header]")
|
||||
tc.xuntagged(tc.untaggedFetch(2, 2, imapclient.FetchBody{RespAttr: "BODY[5.HEADER]", Section: "5.HEADER", Body: part5header}))
|
||||
|
||||
part5mime := tocrlf(`Content-Type: message/rfc822
|
||||
Content-MD5: MDEyMzQ1Njc4OWFiY2RlZjAxMjM0NTY3ODlhYmNkZWY=
|
||||
Content-Language: en,de
|
||||
Content-Location: http://localhost
|
||||
`)
|
||||
tc.transactf("ok", "fetch 2 body.peek[5.mime]")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, imapclient.FetchBody{RespAttr: "BODY[5.MIME]", Section: "5.MIME", Body: part5mime}}})
|
||||
tc.transactf("ok", "uid fetch 2 body.peek[5.mime]")
|
||||
tc.xuntagged(tc.untaggedFetch(2, 2, imapclient.FetchBody{RespAttr: "BODY[5.MIME]", Section: "5.MIME", Body: part5mime}))
|
||||
|
||||
part5text := " ... Additional text in ISO-8859-1 goes here ...\r\n"
|
||||
tc.transactf("ok", "fetch 2 body.peek[5.text]")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, imapclient.FetchBody{RespAttr: "BODY[5.TEXT]", Section: "5.TEXT", Body: part5text}}})
|
||||
|
||||
tc.transactf("ok", "fetch 2 body.peek[5.1]")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, imapclient.FetchBody{RespAttr: "BODY[5.1]", Section: "5.1", Body: part5text}}})
|
||||
tc.transactf("ok", "uid fetch 2 body.peek[5.text]")
|
||||
tc.xuntagged(tc.untaggedFetch(2, 2, imapclient.FetchBody{RespAttr: "BODY[5.TEXT]", Section: "5.TEXT", Body: part5text}))
|
||||
|
||||
part5body := " ... Additional text in ISO-8859-1 goes here ...\r\n"
|
||||
tc.transactf("ok", "uid fetch 2 body.peek[5.1]")
|
||||
tc.xuntagged(tc.untaggedFetch(2, 2, imapclient.FetchBody{RespAttr: "BODY[5.1]", Section: "5.1", Body: part5body}))
|
||||
|
||||
// 5.1 is the part that is the sub message, but not as message/rfc822, but as part,
|
||||
// so we cannot request a header.
|
||||
tc.transactf("no", "uid fetch 2 body.peek[5.1.header]")
|
||||
|
||||
// In case of EXAMINE instead of SELECT, we should not be seeing any changed \Seen flags for non-peek commands.
|
||||
tc.client.StoreFlagsClear("1", true, `\Seen`)
|
||||
tc.client.UIDStoreFlagsClear("1", true, `\Seen`)
|
||||
tc.client.Unselect()
|
||||
tc.client.Examine("inbox")
|
||||
|
||||
tc.transactf("ok", "fetch 1 binary[]")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, binary1}})
|
||||
// Preview
|
||||
preview := "Hello Joe, do you think we can meet at 3:30 tomorrow?"
|
||||
tc.transactf("ok", "uid fetch 1 preview")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, imapclient.FetchPreview{Preview: &preview}))
|
||||
|
||||
tc.transactf("ok", "fetch 1 body[]")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, body1}})
|
||||
tc.transactf("ok", "uid fetch 1 preview (lazy)")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, imapclient.FetchPreview{Preview: &preview}))
|
||||
|
||||
tc.transactf("ok", "fetch 1 rfc822.text")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, rfctext1}})
|
||||
// On-demand preview and saving on first request.
|
||||
err = tc.account.DB.Write(ctxbg, func(tx *bstore.Tx) error {
|
||||
m := store.Message{ID: 1}
|
||||
err := tx.Get(&m)
|
||||
tcheck(t, err, "get message")
|
||||
if m.UID != 1 {
|
||||
t.Fatalf("uid %d instead of 1", m.UID)
|
||||
}
|
||||
m.Preview = nil
|
||||
err = tx.Update(&m)
|
||||
tcheck(t, err, "remove preview from message")
|
||||
return nil
|
||||
})
|
||||
tcheck(t, err, "remove preview from database")
|
||||
|
||||
tc.transactf("ok", "fetch 1 rfc822")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, rfc1}})
|
||||
tc.transactf("ok", "uid fetch 1 preview")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, imapclient.FetchPreview{Preview: &preview}))
|
||||
m := store.Message{ID: 1}
|
||||
err = tc.account.DB.Get(ctxbg, &m)
|
||||
tcheck(t, err, "get message")
|
||||
if m.Preview == nil {
|
||||
t.Fatalf("preview missing")
|
||||
} else if *m.Preview != preview+"\n" {
|
||||
t.Fatalf("got preview %q, expected %q", *m.Preview, preview+"\n")
|
||||
}
|
||||
|
||||
tc.transactf("bad", "uid fetch 1 preview (bogus)")
|
||||
|
||||
// Start a second session. Use it to remove the message. First session should still
|
||||
// be able to access the messages.
|
||||
tc2 := startNoSwitchboard(t, uidonly)
|
||||
defer tc2.closeNoWait()
|
||||
tc2.login("mjl@mox.example", password0)
|
||||
tc2.client.Select("inbox")
|
||||
tc2.client.UIDStoreFlagsSet("1", true, `\Deleted`)
|
||||
tc2.client.Expunge()
|
||||
tc2.client.Logout()
|
||||
|
||||
if uidonly {
|
||||
tc.transactf("ok", "uid fetch 1 binary[]")
|
||||
tc.xuntagged(
|
||||
tc.untaggedFetch(1, 1, imapclient.FetchFlags{`\Deleted`}),
|
||||
imapclient.UntaggedVanished{UIDs: xparseNumSet("1")},
|
||||
)
|
||||
// Message no longer available in session.
|
||||
} else {
|
||||
tc.transactf("ok", "fetch 1 binary[]")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, binary1))
|
||||
|
||||
tc.transactf("ok", "fetch 1 body[]")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, body1))
|
||||
|
||||
tc.transactf("ok", "fetch 1 rfc822.text")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, rfctext1))
|
||||
|
||||
tc.transactf("ok", "fetch 1 rfc822")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, rfc1))
|
||||
}
|
||||
|
||||
tc.client.Logout()
|
||||
}
|
||||
|
@ -5,6 +5,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@ -59,33 +60,11 @@ func FuzzServer(f *testing.F) {
|
||||
f.Add(tag + cmd)
|
||||
}
|
||||
|
||||
log := mlog.New("imapserver", nil)
|
||||
mox.Context = ctxbg
|
||||
mox.ConfigStaticPath = filepath.FromSlash("../testdata/imapserverfuzz/mox.conf")
|
||||
mox.MustLoadConfig(true, false)
|
||||
dataDir := mox.ConfigDirPath(mox.Conf.Static.DataDir)
|
||||
os.RemoveAll(dataDir)
|
||||
acc, err := store.OpenAccount(log, "mjl")
|
||||
if err != nil {
|
||||
f.Fatalf("open account: %v", err)
|
||||
}
|
||||
defer func() {
|
||||
acc.Close()
|
||||
acc.CheckClosed()
|
||||
}()
|
||||
err = acc.SetPassword(log, password0)
|
||||
if err != nil {
|
||||
f.Fatalf("set password: %v", err)
|
||||
}
|
||||
defer store.Switchboard()()
|
||||
|
||||
comm := store.RegisterComm(acc)
|
||||
defer comm.Unregister()
|
||||
|
||||
var cid int64 = 1
|
||||
|
||||
var fl *os.File
|
||||
if false {
|
||||
var err error
|
||||
fl, err = os.OpenFile("fuzz.log", os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600)
|
||||
if err != nil {
|
||||
f.Fatalf("fuzz log")
|
||||
@ -99,6 +78,34 @@ func FuzzServer(f *testing.F) {
|
||||
}
|
||||
|
||||
f.Fuzz(func(t *testing.T, s string) {
|
||||
log := mlog.New("imapserver", nil)
|
||||
mox.ConfigStaticPath = filepath.FromSlash("../testdata/imapserverfuzz/mox.conf")
|
||||
mox.MustLoadConfig(true, false)
|
||||
store.Close() // May not be open, we ignore error.
|
||||
dataDir := mox.ConfigDirPath(mox.Conf.Static.DataDir)
|
||||
os.RemoveAll(dataDir)
|
||||
err := store.Init(ctxbg)
|
||||
if err != nil {
|
||||
t.Fatalf("store init: %v", err)
|
||||
}
|
||||
defer store.Switchboard()()
|
||||
|
||||
acc, err := store.OpenAccount(log, "mjl", false)
|
||||
if err != nil {
|
||||
t.Fatalf("open account: %v", err)
|
||||
}
|
||||
defer func() {
|
||||
acc.Close()
|
||||
acc.WaitClosed()
|
||||
}()
|
||||
err = acc.SetPassword(log, password0)
|
||||
if err != nil {
|
||||
t.Fatalf("set password: %v", err)
|
||||
}
|
||||
|
||||
comm := store.RegisterComm(acc)
|
||||
defer comm.Unregister()
|
||||
|
||||
run := func(cmds []string) {
|
||||
limitersInit() // Reset rate limiters.
|
||||
serverConn, clientConn := net.Pipe()
|
||||
@ -121,19 +128,23 @@ func FuzzServer(f *testing.F) {
|
||||
|
||||
err := clientConn.SetDeadline(time.Now().Add(time.Second))
|
||||
flog(err, "set client deadline")
|
||||
client, _ := imapclient.New(clientConn, true)
|
||||
opts := imapclient.Opts{
|
||||
Logger: slog.Default().With("cid", mox.Cid()),
|
||||
Error: func(err error) { panic(err) },
|
||||
}
|
||||
client, _ := imapclient.New(clientConn, &opts)
|
||||
|
||||
for _, cmd := range cmds {
|
||||
client.Commandf("", "%s", cmd)
|
||||
client.Response()
|
||||
client.WriteCommandf("", "%s", cmd)
|
||||
client.ReadResponse()
|
||||
}
|
||||
client.Commandf("", "%s", s)
|
||||
client.Response()
|
||||
client.WriteCommandf("", "%s", s)
|
||||
client.ReadResponse()
|
||||
}()
|
||||
|
||||
err = serverConn.SetDeadline(time.Now().Add(time.Second))
|
||||
flog(err, "set server deadline")
|
||||
serve("test", cid, nil, serverConn, false, true)
|
||||
serve("test", cid, nil, serverConn, false, false, true, false, "")
|
||||
cid++
|
||||
}
|
||||
|
||||
|
@ -9,13 +9,14 @@ import (
|
||||
)
|
||||
|
||||
func TestIdle(t *testing.T) {
|
||||
tc1 := start(t)
|
||||
tc1 := start(t, false)
|
||||
defer tc1.close()
|
||||
tc1.client.Login("mjl@mox.example", password0)
|
||||
|
||||
tc2 := startNoSwitchboard(t)
|
||||
defer tc2.close()
|
||||
tc2.client.Login("mjl@mox.example", password0)
|
||||
tc2 := startNoSwitchboard(t, false)
|
||||
defer tc2.closeNoWait()
|
||||
|
||||
tc1.login("mjl@mox.example", password0)
|
||||
tc2.login("mjl@mox.example", password0)
|
||||
|
||||
tc1.transactf("ok", "select inbox")
|
||||
tc2.transactf("ok", "select inbox")
|
||||
|
@ -1,12 +1,14 @@
|
||||
package imapserver
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"path"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/mjl-/bstore"
|
||||
|
||||
"github.com/mjl-/mox/mox-"
|
||||
"github.com/mjl-/mox/store"
|
||||
)
|
||||
|
||||
@ -60,6 +62,7 @@ func (c *conn) cmdList(tag, cmd string, p *parser) {
|
||||
isExtended = isExtended || isList
|
||||
var retSubscribed, retChildren bool
|
||||
var retStatusAttrs []string
|
||||
var retMetadata []string
|
||||
if p.take(" RETURN (") {
|
||||
isExtended = true
|
||||
// ../rfc/9051:6613 ../rfc/9051:6915 ../rfc/9051:7072 ../rfc/9051:6821 ../rfc/5819:95
|
||||
@ -90,6 +93,18 @@ func (c *conn) cmdList(tag, cmd string, p *parser) {
|
||||
retStatusAttrs = append(retStatusAttrs, p.xstatusAtt())
|
||||
}
|
||||
p.xtake(")")
|
||||
case "METADATA":
|
||||
// ../rfc/9590:167
|
||||
p.xspace()
|
||||
p.xtake("(")
|
||||
for {
|
||||
s := p.xmetadataKey()
|
||||
retMetadata = append(retMetadata, s)
|
||||
if !p.space() {
|
||||
break
|
||||
}
|
||||
}
|
||||
p.xtake(")")
|
||||
default:
|
||||
// ../rfc/9051:2398
|
||||
xsyntaxErrorf("bad list return option %q", w)
|
||||
@ -100,7 +115,7 @@ func (c *conn) cmdList(tag, cmd string, p *parser) {
|
||||
|
||||
if !isExtended && reference == "" && patterns[0] == "" {
|
||||
// ../rfc/9051:2277 ../rfc/3501:2221
|
||||
c.bwritelinef(`* LIST () "/" ""`)
|
||||
c.xbwritelinef(`* LIST () "/" ""`)
|
||||
c.ok(tag, cmd)
|
||||
return
|
||||
}
|
||||
@ -117,6 +132,7 @@ func (c *conn) cmdList(tag, cmd string, p *parser) {
|
||||
}
|
||||
re := xmailboxPatternMatcher(reference, patterns)
|
||||
var responseLines []string
|
||||
var respMetadata []concatspace
|
||||
|
||||
c.account.WithRLock(func() {
|
||||
c.xdbread(func(tx *bstore.Tx) {
|
||||
@ -130,10 +146,11 @@ func (c *conn) cmdList(tag, cmd string, p *parser) {
|
||||
var nameList []string
|
||||
|
||||
q := bstore.QueryTx[store.Mailbox](tx)
|
||||
q.FilterEqual("Expunged", false)
|
||||
err := q.ForEach(func(mb store.Mailbox) error {
|
||||
names[mb.Name] = info{mailbox: &mb}
|
||||
nameList = append(nameList, mb.Name)
|
||||
for p := path.Dir(mb.Name); p != "."; p = path.Dir(p) {
|
||||
for p := mox.ParentMailboxName(mb.Name); p != ""; p = mox.ParentMailboxName(p) {
|
||||
hasChild[p] = true
|
||||
}
|
||||
return nil
|
||||
@ -148,7 +165,7 @@ func (c *conn) cmdList(tag, cmd string, p *parser) {
|
||||
if !ok {
|
||||
nameList = append(nameList, sub.Name)
|
||||
}
|
||||
for p := path.Dir(sub.Name); p != "."; p = path.Dir(p) {
|
||||
for p := mox.ParentMailboxName(sub.Name); p != ""; p = mox.ParentMailboxName(p) {
|
||||
hasSubscribedChild[p] = true
|
||||
}
|
||||
return nil
|
||||
@ -191,39 +208,64 @@ func (c *conn) cmdList(tag, cmd string, p *parser) {
|
||||
flags = append(flags, bare(`\Subscribed`))
|
||||
}
|
||||
if info.mailbox != nil {
|
||||
if info.mailbox.Archive {
|
||||
flags = append(flags, bare(`\Archive`))
|
||||
}
|
||||
if info.mailbox.Draft {
|
||||
flags = append(flags, bare(`\Drafts`))
|
||||
}
|
||||
if info.mailbox.Junk {
|
||||
flags = append(flags, bare(`\Junk`))
|
||||
}
|
||||
if info.mailbox.Sent {
|
||||
flags = append(flags, bare(`\Sent`))
|
||||
}
|
||||
if info.mailbox.Trash {
|
||||
flags = append(flags, bare(`\Trash`))
|
||||
add := func(b bool, v string) {
|
||||
if b {
|
||||
flags = append(flags, bare(v))
|
||||
}
|
||||
}
|
||||
mb := info.mailbox
|
||||
add(mb.Archive, `\Archive`)
|
||||
add(mb.Draft, `\Drafts`)
|
||||
add(mb.Junk, `\Junk`)
|
||||
add(mb.Sent, `\Sent`)
|
||||
add(mb.Trash, `\Trash`)
|
||||
}
|
||||
|
||||
var extStr string
|
||||
if extended != nil {
|
||||
extStr = " " + extended.pack(c)
|
||||
}
|
||||
line := fmt.Sprintf(`* LIST %s "/" %s%s`, flags.pack(c), astring(c.encodeMailbox(name)).pack(c), extStr)
|
||||
line := fmt.Sprintf(`* LIST %s "/" %s%s`, flags.pack(c), mailboxt(name).pack(c), extStr)
|
||||
responseLines = append(responseLines, line)
|
||||
|
||||
if retStatusAttrs != nil && info.mailbox != nil {
|
||||
responseLines = append(responseLines, c.xstatusLine(tx, *info.mailbox, retStatusAttrs))
|
||||
}
|
||||
|
||||
// ../rfc/9590:101
|
||||
if info.mailbox != nil && len(retMetadata) > 0 {
|
||||
var meta listspace
|
||||
for _, k := range retMetadata {
|
||||
q := bstore.QueryTx[store.Annotation](tx)
|
||||
q.FilterNonzero(store.Annotation{MailboxID: info.mailbox.ID, Key: k})
|
||||
q.FilterEqual("Expunged", false)
|
||||
a, err := q.Get()
|
||||
var v token
|
||||
if err == bstore.ErrAbsent {
|
||||
v = nilt
|
||||
} else {
|
||||
xcheckf(err, "get annotation")
|
||||
if a.IsString {
|
||||
v = string0(string(a.Value))
|
||||
} else {
|
||||
v = readerSizeSyncliteral{bytes.NewReader(a.Value), int64(len(a.Value)), true}
|
||||
}
|
||||
}
|
||||
meta = append(meta, astring(k), v)
|
||||
}
|
||||
line := concatspace{bare("*"), bare("METADATA"), mailboxt(info.mailbox.Name), meta}
|
||||
respMetadata = append(respMetadata, line)
|
||||
}
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
for _, line := range responseLines {
|
||||
c.bwritelinef("%s", line)
|
||||
c.xbwritelinef("%s", line)
|
||||
}
|
||||
for _, meta := range respMetadata {
|
||||
meta.xwriteTo(c, c.xbw)
|
||||
c.xbwritelinef("")
|
||||
}
|
||||
c.ok(tag, cmd)
|
||||
}
|
||||
|
@ -8,10 +8,18 @@ import (
|
||||
)
|
||||
|
||||
func TestListBasic(t *testing.T) {
|
||||
tc := start(t)
|
||||
testListBasic(t, false)
|
||||
}
|
||||
|
||||
func TestListBasicUIDOnly(t *testing.T) {
|
||||
testListBasic(t, true)
|
||||
}
|
||||
|
||||
func testListBasic(t *testing.T, uidonly bool) {
|
||||
tc := start(t, uidonly)
|
||||
defer tc.close()
|
||||
|
||||
tc.client.Login("mjl@mox.example", password0)
|
||||
tc.login("mjl@mox.example", password0)
|
||||
|
||||
ulist := func(name string, flags ...string) imapclient.UntaggedList {
|
||||
if len(flags) == 0 {
|
||||
@ -26,6 +34,9 @@ func TestListBasic(t *testing.T) {
|
||||
tc.last(tc.client.List("Inbox"))
|
||||
tc.xuntagged(ulist("Inbox"))
|
||||
|
||||
tc.last(tc.client.List("expungebox"))
|
||||
tc.xuntagged()
|
||||
|
||||
tc.last(tc.client.List("%"))
|
||||
tc.xuntagged(ulist("Archive", `\Archive`), ulist("Drafts", `\Drafts`), ulist("Inbox"), ulist("Junk", `\Junk`), ulist("Sent", `\Sent`), ulist("Trash", `\Trash`))
|
||||
|
||||
@ -35,7 +46,7 @@ func TestListBasic(t *testing.T) {
|
||||
tc.last(tc.client.List("A*"))
|
||||
tc.xuntagged(ulist("Archive", `\Archive`))
|
||||
|
||||
tc.client.Create("Inbox/todo")
|
||||
tc.client.Create("Inbox/todo", nil)
|
||||
|
||||
tc.last(tc.client.List("Inbox*"))
|
||||
tc.xuntagged(ulist("Inbox"), ulist("Inbox/todo"))
|
||||
@ -56,12 +67,20 @@ func TestListBasic(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestListExtended(t *testing.T) {
|
||||
testListExtended(t, false)
|
||||
}
|
||||
|
||||
func TestListExtendedUIDOnly(t *testing.T) {
|
||||
testListExtended(t, true)
|
||||
}
|
||||
|
||||
func testListExtended(t *testing.T, uidonly bool) {
|
||||
defer mockUIDValidity()()
|
||||
|
||||
tc := start(t)
|
||||
tc := start(t, uidonly)
|
||||
defer tc.close()
|
||||
|
||||
tc.client.Login("mjl@mox.example", password0)
|
||||
tc.login("mjl@mox.example", password0)
|
||||
|
||||
ulist := func(name string, flags ...string) imapclient.UntaggedList {
|
||||
if len(flags) == 0 {
|
||||
@ -78,7 +97,7 @@ func TestListExtended(t *testing.T) {
|
||||
for _, name := range store.DefaultInitialMailboxes.Regular {
|
||||
uidvals[name] = 1
|
||||
}
|
||||
var uidvalnext uint32 = 2
|
||||
var uidvalnext uint32 = 3
|
||||
uidval := func(name string) uint32 {
|
||||
v, ok := uidvals[name]
|
||||
if !ok {
|
||||
@ -146,7 +165,7 @@ func TestListExtended(t *testing.T) {
|
||||
tc.last(tc.client.ListFull(false, "A*", "Junk"))
|
||||
tc.xuntagged(xlist("Archive", Farchive), ustatus("Archive"), xlist("Junk", Fjunk), ustatus("Junk"))
|
||||
|
||||
tc.client.Create("Inbox/todo")
|
||||
tc.client.Create("Inbox/todo", nil)
|
||||
|
||||
tc.last(tc.client.ListFull(false, "Inbox*"))
|
||||
tc.xuntagged(ulist("Inbox", Fhaschildren, Fsubscribed), ustatus("Inbox"), xlist("Inbox/todo"), ustatus("Inbox/todo"))
|
||||
@ -204,7 +223,7 @@ func TestListExtended(t *testing.T) {
|
||||
tc.transactf("ok", `list (remote) "inbox" "a"`)
|
||||
tc.xuntagged()
|
||||
|
||||
tc.client.Create("inbox/a")
|
||||
tc.client.Create("inbox/a", nil)
|
||||
tc.transactf("ok", `list (remote) "inbox" "a"`)
|
||||
tc.xuntagged(ulist("Inbox/a"))
|
||||
|
||||
@ -216,4 +235,21 @@ func TestListExtended(t *testing.T) {
|
||||
tc.transactf("bad", `list (recursivematch remote) "" "*"`) // "remote" is not a base selection option.
|
||||
tc.transactf("bad", `list (unknown) "" "*"`) // Unknown selection options must result in BAD.
|
||||
tc.transactf("bad", `list () "" "*" return (unknown)`) // Unknown return options must result in BAD.
|
||||
|
||||
// Return metadata.
|
||||
tc.transactf("ok", `setmetadata inbox (/private/comment "y")`)
|
||||
tc.transactf("ok", `list () "" ("inbox") return (metadata (/private/comment /shared/comment))`)
|
||||
tc.xuntagged(
|
||||
ulist("Inbox"),
|
||||
imapclient.UntaggedMetadataAnnotations{
|
||||
Mailbox: "Inbox",
|
||||
Annotations: []imapclient.Annotation{
|
||||
{Key: "/private/comment", IsString: true, Value: []byte("y")},
|
||||
{Key: "/shared/comment"},
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
tc.transactf("bad", `list () "" ("inbox") return (metadata ())`) // Metadata list must be non-empty.
|
||||
tc.transactf("bad", `list () "" ("inbox") return (metadata (/shared/comment "/private/comment" ))`) // Extra space.
|
||||
}
|
||||
|
@ -7,10 +7,18 @@ import (
|
||||
)
|
||||
|
||||
func TestLsub(t *testing.T) {
|
||||
tc := start(t)
|
||||
testLsub(t, false)
|
||||
}
|
||||
|
||||
func TestLsubUIDOnly(t *testing.T) {
|
||||
testLsub(t, true)
|
||||
}
|
||||
|
||||
func testLsub(t *testing.T, uidonly bool) {
|
||||
tc := start(t, uidonly)
|
||||
defer tc.close()
|
||||
|
||||
tc.client.Login("mjl@mox.example", password0)
|
||||
tc.login("mjl@mox.example", password0)
|
||||
|
||||
tc.transactf("bad", "lsub") // Missing params.
|
||||
tc.transactf("bad", `lsub ""`) // Missing param.
|
||||
@ -19,6 +27,9 @@ func TestLsub(t *testing.T) {
|
||||
tc.transactf("ok", `lsub "" x*`)
|
||||
tc.xuntagged()
|
||||
|
||||
tc.transactf("ok", `lsub "" expungebox`)
|
||||
tc.xuntagged(imapclient.UntaggedLsub{Separator: '/', Mailbox: "expungebox"})
|
||||
|
||||
tc.transactf("ok", "create a/b/c")
|
||||
tc.transactf("ok", `lsub "" a/*`)
|
||||
tc.xuntagged(imapclient.UntaggedLsub{Separator: '/', Mailbox: "a/b"}, imapclient.UntaggedLsub{Separator: '/', Mailbox: "a/b/c"})
|
||||
|
17
imapserver/main_test.go
Normal file
17
imapserver/main_test.go
Normal file
@ -0,0 +1,17 @@
|
||||
package imapserver
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/mjl-/mox/metrics"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
m.Run()
|
||||
if metrics.Panics.Load() > 0 {
|
||||
fmt.Println("unhandled panics encountered")
|
||||
os.Exit(2)
|
||||
}
|
||||
}
|
317
imapserver/metadata.go
Normal file
317
imapserver/metadata.go
Normal file
@ -0,0 +1,317 @@
|
||||
package imapserver
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/mjl-/bstore"
|
||||
|
||||
"github.com/mjl-/mox/store"
|
||||
)
|
||||
|
||||
// Changed during tests.
|
||||
var metadataMaxKeys = 1000
|
||||
var metadataMaxSize = 1000 * 1000
|
||||
|
||||
// Metadata errata:
|
||||
// ../rfc/5464:183 ../rfc/5464-eid1691
|
||||
// ../rfc/5464:564 ../rfc/5464-eid1692
|
||||
// ../rfc/5464:494 ../rfc/5464-eid2785 ../rfc/5464-eid2786
|
||||
// ../rfc/5464:698 ../rfc/5464-eid3868
|
||||
|
||||
// Note: We do not tie the special-use mailbox flags to a (synthetic) private
|
||||
// per-mailbox annotation. ../rfc/6154:303
|
||||
|
||||
// For registration of names, see https://www.iana.org/assignments/imap-metadata/imap-metadata.xhtml
|
||||
|
||||
// Get metadata annotations, per mailbox or globally.
|
||||
//
|
||||
// State: Authenticated and selected.
|
||||
func (c *conn) cmdGetmetadata(tag, cmd string, p *parser) {
|
||||
// Command: ../rfc/5464:412
|
||||
|
||||
// Request syntax: ../rfc/5464:792
|
||||
|
||||
p.xspace()
|
||||
var optMaxSize int64 = -1
|
||||
var optDepth string
|
||||
if p.take("(") {
|
||||
for {
|
||||
if p.take("MAXSIZE") {
|
||||
// ../rfc/5464:804
|
||||
p.xspace()
|
||||
v := p.xnumber()
|
||||
if optMaxSize >= 0 {
|
||||
p.xerrorf("only a single maxsize option accepted")
|
||||
}
|
||||
optMaxSize = int64(v)
|
||||
} else if p.take("DEPTH") {
|
||||
// ../rfc/5464:823
|
||||
p.xspace()
|
||||
s := p.xtakelist("0", "1", "INFINITY")
|
||||
if optDepth != "" {
|
||||
p.xerrorf("only single depth option accepted")
|
||||
}
|
||||
optDepth = s
|
||||
} else {
|
||||
// ../rfc/5464:800 We are not doing anything further parsing for future extensions.
|
||||
p.xerrorf("unknown option for getmetadata, expected maxsize or depth")
|
||||
}
|
||||
|
||||
if p.take(")") {
|
||||
break
|
||||
}
|
||||
p.xspace()
|
||||
}
|
||||
p.xspace()
|
||||
}
|
||||
mailboxName := p.xmailbox()
|
||||
if mailboxName != "" {
|
||||
mailboxName = xcheckmailboxname(mailboxName, true)
|
||||
}
|
||||
p.xspace()
|
||||
// Entries ../rfc/5464:768
|
||||
entryNames := map[string]struct{}{}
|
||||
if p.take("(") {
|
||||
for {
|
||||
s := p.xmetadataKey()
|
||||
entryNames[s] = struct{}{}
|
||||
if p.take(")") {
|
||||
break
|
||||
}
|
||||
p.xtake(" ")
|
||||
}
|
||||
} else {
|
||||
s := p.xmetadataKey()
|
||||
entryNames[s] = struct{}{}
|
||||
}
|
||||
p.xempty()
|
||||
|
||||
var annotations []store.Annotation
|
||||
longentries := -1 // Size of largest value skipped due to optMaxSize. ../rfc/5464:482
|
||||
|
||||
c.account.WithRLock(func() {
|
||||
c.xdbread(func(tx *bstore.Tx) {
|
||||
q := bstore.QueryTx[store.Annotation](tx)
|
||||
if mailboxName == "" {
|
||||
q.FilterEqual("MailboxID", 0)
|
||||
} else {
|
||||
mb := c.xmailbox(tx, mailboxName, "TRYCREATE")
|
||||
q.FilterNonzero(store.Annotation{MailboxID: mb.ID})
|
||||
}
|
||||
q.FilterEqual("Expunged", false)
|
||||
q.SortAsc("MailboxID", "Key") // For tests.
|
||||
err := q.ForEach(func(a store.Annotation) error {
|
||||
// ../rfc/5464:516
|
||||
switch optDepth {
|
||||
case "", "0":
|
||||
if _, ok := entryNames[a.Key]; !ok {
|
||||
return nil
|
||||
}
|
||||
case "1", "INFINITY":
|
||||
// Go through all keys, matching depth.
|
||||
if _, ok := entryNames[a.Key]; ok {
|
||||
break
|
||||
}
|
||||
var match bool
|
||||
for s := range entryNames {
|
||||
prefix := s
|
||||
if s != "/" {
|
||||
prefix += "/"
|
||||
}
|
||||
if !strings.HasPrefix(a.Key, prefix) {
|
||||
continue
|
||||
}
|
||||
if optDepth == "INFINITY" {
|
||||
match = true
|
||||
break
|
||||
}
|
||||
suffix := a.Key[len(prefix):]
|
||||
t := strings.SplitN(suffix, "/", 2)
|
||||
if len(t) == 1 {
|
||||
match = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !match {
|
||||
return nil
|
||||
}
|
||||
default:
|
||||
xcheckf(fmt.Errorf("%q", optDepth), "missing case for depth")
|
||||
}
|
||||
|
||||
if optMaxSize >= 0 && int64(len(a.Value)) > optMaxSize {
|
||||
longentries = max(longentries, len(a.Value))
|
||||
} else {
|
||||
annotations = append(annotations, a)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
xcheckf(err, "looking up annotations")
|
||||
})
|
||||
})
|
||||
|
||||
// Response syntax: ../rfc/5464:807 ../rfc/5464:778
|
||||
// We can only send untagged responses when we have any matches.
|
||||
if len(annotations) > 0 {
|
||||
fmt.Fprintf(c.xbw, "* METADATA %s (", mailboxt(mailboxName).pack(c))
|
||||
for i, a := range annotations {
|
||||
if i > 0 {
|
||||
fmt.Fprint(c.xbw, " ")
|
||||
}
|
||||
astring(a.Key).xwriteTo(c, c.xbw)
|
||||
fmt.Fprint(c.xbw, " ")
|
||||
if a.IsString {
|
||||
string0(string(a.Value)).xwriteTo(c, c.xbw)
|
||||
} else {
|
||||
v := readerSizeSyncliteral{bytes.NewReader(a.Value), int64(len(a.Value)), true}
|
||||
v.xwriteTo(c, c.xbw)
|
||||
}
|
||||
}
|
||||
c.xbwritelinef(")")
|
||||
}
|
||||
|
||||
if longentries >= 0 {
|
||||
c.xbwritelinef("%s OK [METADATA LONGENTRIES %d] getmetadata done", tag, longentries)
|
||||
} else {
|
||||
c.ok(tag, cmd)
|
||||
}
|
||||
}
|
||||
|
||||
// Set metadata annotation, per mailbox or globally.
|
||||
//
|
||||
// We allow both /private/* and /shared/*, we store them in the same way since we
|
||||
// don't have ACL extension support yet or another mechanism for access control.
|
||||
//
|
||||
// State: Authenticated and selected.
|
||||
func (c *conn) cmdSetmetadata(tag, cmd string, p *parser) {
|
||||
// Command: ../rfc/5464:547
|
||||
|
||||
// Request syntax: ../rfc/5464:826
|
||||
|
||||
p.xspace()
|
||||
mailboxName := p.xmailbox()
|
||||
// Empty name means a global (per-account) annotation, not for a mailbox.
|
||||
if mailboxName != "" {
|
||||
mailboxName = xcheckmailboxname(mailboxName, true)
|
||||
}
|
||||
p.xspace()
|
||||
p.xtake("(")
|
||||
var l []store.Annotation
|
||||
for {
|
||||
key, isString, value := p.xmetadataKeyValue()
|
||||
l = append(l, store.Annotation{Key: key, IsString: isString, Value: value})
|
||||
if p.take(")") {
|
||||
break
|
||||
}
|
||||
p.xspace()
|
||||
}
|
||||
p.xempty()
|
||||
|
||||
// Additional checks on entry names.
|
||||
for _, a := range l {
|
||||
// ../rfc/5464:217
|
||||
if !strings.HasPrefix(a.Key, "/private/") && !strings.HasPrefix(a.Key, "/shared/") {
|
||||
// ../rfc/5464:346
|
||||
xuserErrorf("only /private/* and /shared/* entry names allowed")
|
||||
}
|
||||
|
||||
// We also enforce that /private/vendor/ is followed by at least 2 elements.
|
||||
// ../rfc/5464:234
|
||||
switch {
|
||||
case a.Key == "/private/vendor",
|
||||
strings.HasPrefix(a.Key, "/private/vendor/"),
|
||||
a.Key == "/shared/vendor", strings.HasPrefix(a.Key, "/shared/vendor/"):
|
||||
|
||||
t := strings.SplitN(a.Key[1:], "/", 4)
|
||||
if len(t) < 4 {
|
||||
xuserErrorf("entry names starting with /private/vendor or /shared/vendor must have at least 4 components")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Store the annotations, possibly removing/inserting/updating them.
|
||||
c.account.WithWLock(func() {
|
||||
var changes []store.Change
|
||||
var modseq store.ModSeq
|
||||
|
||||
c.xdbwrite(func(tx *bstore.Tx) {
|
||||
var mb store.Mailbox // mb.ID as 0 is used in query below.
|
||||
if mailboxName != "" {
|
||||
mb = c.xmailbox(tx, mailboxName, "TRYCREATE")
|
||||
}
|
||||
|
||||
for _, a := range l {
|
||||
q := bstore.QueryTx[store.Annotation](tx)
|
||||
q.FilterNonzero(store.Annotation{Key: a.Key})
|
||||
q.FilterEqual("MailboxID", mb.ID) // Can be zero.
|
||||
q.FilterEqual("Expunged", false)
|
||||
oa, err := q.Get()
|
||||
// Nil means remove. ../rfc/5464:579
|
||||
if err == bstore.ErrAbsent && a.Value == nil {
|
||||
continue
|
||||
}
|
||||
if modseq == 0 {
|
||||
var err error
|
||||
modseq, err = c.account.NextModSeq(tx)
|
||||
xcheckf(err, "get next modseq")
|
||||
}
|
||||
if err == bstore.ErrAbsent {
|
||||
a.MailboxID = mb.ID
|
||||
a.CreateSeq = modseq
|
||||
a.ModSeq = modseq
|
||||
err = tx.Insert(&a)
|
||||
xcheckf(err, "inserting annotation")
|
||||
changes = append(changes, a.Change(mailboxName))
|
||||
} else {
|
||||
xcheckf(err, "get metadata")
|
||||
oa.ModSeq = modseq
|
||||
if a.Value == nil {
|
||||
oa.Expunged = true
|
||||
}
|
||||
oa.IsString = a.IsString
|
||||
oa.Value = a.Value
|
||||
err = tx.Update(&oa)
|
||||
xcheckf(err, "updating metdata")
|
||||
changes = append(changes, oa.Change(mailboxName))
|
||||
}
|
||||
}
|
||||
|
||||
c.xcheckMetadataSize(tx)
|
||||
|
||||
// ../rfc/7162:1335
|
||||
if mb.ID != 0 && modseq != 0 {
|
||||
mb.ModSeq = modseq
|
||||
err := tx.Update(&mb)
|
||||
xcheckf(err, "updating mailbox with modseq")
|
||||
}
|
||||
})
|
||||
|
||||
c.broadcast(changes)
|
||||
})
|
||||
|
||||
c.ok(tag, cmd)
|
||||
}
|
||||
|
||||
func (c *conn) xcheckMetadataSize(tx *bstore.Tx) {
|
||||
// Check for total size. We allow a total of 1000 entries, with total capacity of 1MB.
|
||||
// ../rfc/5464:383
|
||||
var n int
|
||||
var size int
|
||||
err := bstore.QueryTx[store.Annotation](tx).FilterEqual("Expunged", false).ForEach(func(a store.Annotation) error {
|
||||
n++
|
||||
if n > metadataMaxKeys {
|
||||
// ../rfc/5464:590
|
||||
xusercodeErrorf("METADATA (TOOMANY)", "too many metadata entries, 1000 allowed in total")
|
||||
}
|
||||
size += len(a.Key) + len(a.Value)
|
||||
if size > metadataMaxSize {
|
||||
// ../rfc/5464:585 We only have a max total size limit, not per entry. We'll
|
||||
// mention the max total size.
|
||||
xusercodeErrorf(fmt.Sprintf("METADATA (MAXSIZE %d)", metadataMaxSize), "metadata entry values too large, total maximum size is 1MB")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
xcheckf(err, "checking metadata annotation size")
|
||||
}
|
296
imapserver/metadata_test.go
Normal file
296
imapserver/metadata_test.go
Normal file
@ -0,0 +1,296 @@
|
||||
package imapserver
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/mjl-/mox/imapclient"
|
||||
)
|
||||
|
||||
func TestMetadata(t *testing.T) {
|
||||
testMetadata(t, false)
|
||||
}
|
||||
|
||||
func TestMetadataUIDOnly(t *testing.T) {
|
||||
testMetadata(t, true)
|
||||
}
|
||||
|
||||
func testMetadata(t *testing.T, uidonly bool) {
|
||||
tc := start(t, uidonly)
|
||||
defer tc.close()
|
||||
|
||||
tc.login("mjl@mox.example", password0)
|
||||
|
||||
tc.transactf("ok", `getmetadata "" /private/comment`)
|
||||
tc.xuntagged()
|
||||
|
||||
tc.transactf("ok", `getmetadata inbox (/private/comment)`)
|
||||
tc.xuntagged()
|
||||
|
||||
tc.transactf("ok", `setmetadata "" (/PRIVATE/COMMENT "global value")`)
|
||||
tc.transactf("ok", `setmetadata inbox (/private/comment "mailbox value")`)
|
||||
|
||||
tc.transactf("ok", `create metabox`)
|
||||
tc.transactf("ok", `setmetadata metabox (/private/comment "mailbox value")`)
|
||||
tc.transactf("ok", `setmetadata metabox (/shared/comment "mailbox value")`)
|
||||
tc.transactf("ok", `setmetadata metabox (/shared/comment nil)`) // Remove.
|
||||
tc.transactf("ok", `delete metabox`) // Delete mailbox with live and expunged metadata.
|
||||
|
||||
tc.transactf("no", `setmetadata expungebox (/private/comment "mailbox value")`)
|
||||
tc.xcodeWord("TRYCREATE")
|
||||
|
||||
tc.transactf("ok", `getmetadata "" ("/private/comment")`)
|
||||
tc.xuntagged(imapclient.UntaggedMetadataAnnotations{
|
||||
Mailbox: "",
|
||||
Annotations: []imapclient.Annotation{
|
||||
{Key: "/private/comment", IsString: true, Value: []byte("global value")},
|
||||
},
|
||||
})
|
||||
|
||||
tc.transactf("ok", `setmetadata Inbox (/shared/comment "share")`)
|
||||
|
||||
tc.transactf("ok", `getmetadata inbox (/private/comment /private/unknown /shared/comment)`)
|
||||
tc.xuntagged(imapclient.UntaggedMetadataAnnotations{
|
||||
Mailbox: "Inbox",
|
||||
Annotations: []imapclient.Annotation{
|
||||
{Key: "/private/comment", IsString: true, Value: []byte("mailbox value")},
|
||||
{Key: "/shared/comment", IsString: true, Value: []byte("share")},
|
||||
},
|
||||
})
|
||||
|
||||
tc.transactf("no", `setmetadata doesnotexist (/private/comment "test")`) // Bad mailbox.
|
||||
tc.transactf("no", `setmetadata Inbox (/badprefix/comment "")`)
|
||||
tc.transactf("no", `setmetadata Inbox (/private/vendor "")`) // /*/vendor must have more components.
|
||||
tc.transactf("no", `setmetadata Inbox (/private/vendor/stillbad "")`) // /*/vendor must have more components.
|
||||
tc.transactf("ok", `setmetadata Inbox (/private/vendor/a/b "")`)
|
||||
tc.transactf("bad", `setmetadata Inbox (/private/no* "")`)
|
||||
tc.transactf("bad", `setmetadata Inbox (/private/no%% "")`)
|
||||
tc.transactf("bad", `setmetadata Inbox (/private/notrailingslash/ "")`)
|
||||
tc.transactf("bad", `setmetadata Inbox (/private//nodupslash "")`)
|
||||
tc.transactf("bad", "setmetadata Inbox (/private/\001 \"\")")
|
||||
tc.transactf("bad", "setmetadata Inbox (/private/\u007f \"\")")
|
||||
tc.transactf("bad", `getmetadata (depth 0 depth 0) inbox (/private/a)`) // Duplicate option.
|
||||
tc.transactf("bad", `getmetadata (depth badvalue) inbox (/private/a)`)
|
||||
tc.transactf("bad", `getmetadata (maxsize invalid) inbox (/private/a)`)
|
||||
tc.transactf("bad", `getmetadata (badoption) inbox (/private/a)`)
|
||||
|
||||
// Update existing annotation by key.
|
||||
tc.transactf("ok", `setmetadata "" (/PRIVATE/COMMENT "global updated")`)
|
||||
tc.transactf("ok", `setmetadata inbox (/private/comment "mailbox updated")`)
|
||||
tc.transactf("ok", `getmetadata "" (/private/comment)`)
|
||||
tc.xuntagged(imapclient.UntaggedMetadataAnnotations{
|
||||
Mailbox: "",
|
||||
Annotations: []imapclient.Annotation{
|
||||
{Key: "/private/comment", IsString: true, Value: []byte("global updated")},
|
||||
},
|
||||
})
|
||||
tc.transactf("ok", `getmetadata inbox (/private/comment)`)
|
||||
tc.xuntagged(imapclient.UntaggedMetadataAnnotations{
|
||||
Mailbox: "Inbox",
|
||||
Annotations: []imapclient.Annotation{
|
||||
{Key: "/private/comment", IsString: true, Value: []byte("mailbox updated")},
|
||||
},
|
||||
})
|
||||
|
||||
// Delete annotation with nil value.
|
||||
tc.transactf("ok", `setmetadata "" (/private/comment nil)`)
|
||||
tc.transactf("ok", `setmetadata inbox (/private/comment nil)`)
|
||||
tc.transactf("ok", `getmetadata "" (/private/comment)`)
|
||||
tc.xuntagged()
|
||||
tc.transactf("ok", `getmetadata inbox (/private/comment)`)
|
||||
tc.xuntagged()
|
||||
|
||||
// Create a literal8 value, not a string.
|
||||
tc.transactf("ok", "setmetadata inbox (/private/comment ~{4+}\r\ntest)")
|
||||
tc.transactf("ok", `getmetadata inbox (/private/comment)`)
|
||||
tc.xuntagged(imapclient.UntaggedMetadataAnnotations{
|
||||
Mailbox: "Inbox",
|
||||
Annotations: []imapclient.Annotation{
|
||||
{Key: "/private/comment", IsString: false, Value: []byte("test")},
|
||||
},
|
||||
})
|
||||
|
||||
// Request with a maximum size, we don't get anything larger.
|
||||
tc.transactf("ok", `setmetadata inbox (/private/another "longer")`)
|
||||
tc.transactf("ok", `getmetadata (maxsize 4) inbox (/private/comment /private/another)`)
|
||||
tc.xcode(imapclient.CodeMetadataLongEntries(6))
|
||||
tc.xuntagged(imapclient.UntaggedMetadataAnnotations{
|
||||
Mailbox: "Inbox",
|
||||
Annotations: []imapclient.Annotation{
|
||||
{Key: "/private/comment", IsString: false, Value: []byte("test")},
|
||||
},
|
||||
})
|
||||
|
||||
// Request with various depth values.
|
||||
tc.transactf("ok", `setmetadata inbox (/private/a "x" /private/a/b "x" /private/a/b/c "x" /private/a/b/c/d "x")`)
|
||||
tc.transactf("ok", `getmetadata (depth 0) inbox (/private/a)`)
|
||||
tc.xuntagged(imapclient.UntaggedMetadataAnnotations{
|
||||
Mailbox: "Inbox",
|
||||
Annotations: []imapclient.Annotation{
|
||||
{Key: "/private/a", IsString: true, Value: []byte("x")},
|
||||
},
|
||||
})
|
||||
tc.transactf("ok", `getmetadata (depth 1) inbox (/private/a)`)
|
||||
tc.xuntagged(imapclient.UntaggedMetadataAnnotations{
|
||||
Mailbox: "Inbox",
|
||||
Annotations: []imapclient.Annotation{
|
||||
{Key: "/private/a", IsString: true, Value: []byte("x")},
|
||||
{Key: "/private/a/b", IsString: true, Value: []byte("x")},
|
||||
},
|
||||
})
|
||||
tc.transactf("ok", `getmetadata (depth infinity) inbox (/private/a)`)
|
||||
tc.xuntagged(imapclient.UntaggedMetadataAnnotations{
|
||||
Mailbox: "Inbox",
|
||||
Annotations: []imapclient.Annotation{
|
||||
{Key: "/private/a", IsString: true, Value: []byte("x")},
|
||||
{Key: "/private/a/b", IsString: true, Value: []byte("x")},
|
||||
{Key: "/private/a/b/c", IsString: true, Value: []byte("x")},
|
||||
{Key: "/private/a/b/c/d", IsString: true, Value: []byte("x")},
|
||||
},
|
||||
})
|
||||
// Same as previous, but ask for everything below /.
|
||||
tc.transactf("ok", `getmetadata (depth infinity) inbox ("")`)
|
||||
tc.xuntagged(imapclient.UntaggedMetadataAnnotations{
|
||||
Mailbox: "Inbox",
|
||||
Annotations: []imapclient.Annotation{
|
||||
{Key: "/private/a", IsString: true, Value: []byte("x")},
|
||||
{Key: "/private/a/b", IsString: true, Value: []byte("x")},
|
||||
{Key: "/private/a/b/c", IsString: true, Value: []byte("x")},
|
||||
{Key: "/private/a/b/c/d", IsString: true, Value: []byte("x")},
|
||||
{Key: "/private/another", IsString: true, Value: []byte("longer")},
|
||||
{Key: "/private/comment", IsString: false, Value: []byte("test")},
|
||||
{Key: "/private/vendor/a/b", IsString: true, Value: []byte("")},
|
||||
{Key: "/shared/comment", IsString: true, Value: []byte("share")},
|
||||
},
|
||||
})
|
||||
|
||||
// Deleting a mailbox with an annotation should work and annotations should not
|
||||
// come back when recreating mailbox.
|
||||
tc.transactf("ok", "create testbox")
|
||||
tc.transactf("ok", `setmetadata testbox (/private/a "x")`)
|
||||
tc.transactf("ok", "delete testbox")
|
||||
tc.transactf("ok", "create testbox")
|
||||
tc.transactf("ok", `getmetadata testbox (/private/a)`)
|
||||
tc.xuntagged()
|
||||
|
||||
// When renaming mailbox, annotations must be copied to destination mailbox.
|
||||
tc.transactf("ok", "rename inbox newbox")
|
||||
tc.transactf("ok", `getmetadata newbox (/private/a)`)
|
||||
tc.xuntagged(imapclient.UntaggedMetadataAnnotations{
|
||||
Mailbox: "newbox",
|
||||
Annotations: []imapclient.Annotation{
|
||||
{Key: "/private/a", IsString: true, Value: []byte("x")},
|
||||
},
|
||||
})
|
||||
tc.transactf("ok", `getmetadata inbox (/private/a)`)
|
||||
tc.xuntagged(imapclient.UntaggedMetadataAnnotations{
|
||||
Mailbox: "Inbox",
|
||||
Annotations: []imapclient.Annotation{
|
||||
{Key: "/private/a", IsString: true, Value: []byte("x")},
|
||||
},
|
||||
})
|
||||
|
||||
// Broadcast should not happen when metadata capability is not enabled.
|
||||
tc2 := startNoSwitchboard(t, uidonly)
|
||||
defer tc2.closeNoWait()
|
||||
tc2.login("mjl@mox.example", password0)
|
||||
tc2.client.Select("inbox")
|
||||
|
||||
tc2.cmdf("", "idle")
|
||||
tc2.readprefixline("+ ")
|
||||
done := make(chan error)
|
||||
go func() {
|
||||
defer func() {
|
||||
x := recover()
|
||||
if x != nil {
|
||||
done <- fmt.Errorf("%v", x)
|
||||
}
|
||||
}()
|
||||
untagged, _ := tc2.client.ReadUntagged()
|
||||
var exists imapclient.UntaggedExists
|
||||
tuntagged(tc2.t, untagged, &exists)
|
||||
tc2.writelinef("done")
|
||||
tc2.response("ok")
|
||||
done <- nil
|
||||
}()
|
||||
|
||||
// Should not cause idle to return.
|
||||
tc.transactf("ok", `setmetadata inbox (/private/a "y")`)
|
||||
// Cause to return.
|
||||
tc.transactf("ok", "append inbox {4+}\r\ntest")
|
||||
|
||||
timer := time.NewTimer(time.Second)
|
||||
defer timer.Stop()
|
||||
select {
|
||||
case err := <-done:
|
||||
tc.check(err, "idle")
|
||||
case <-timer.C:
|
||||
t.Fatalf("idle did not finish")
|
||||
}
|
||||
|
||||
// Broadcast should happen when metadata capability is enabled.
|
||||
tc2.client.Enable(imapclient.CapMetadata)
|
||||
tc2.cmdf("", "idle")
|
||||
tc2.readprefixline("+ ")
|
||||
done = make(chan error)
|
||||
go func() {
|
||||
defer func() {
|
||||
x := recover()
|
||||
if x != nil {
|
||||
done <- fmt.Errorf("%v", x)
|
||||
}
|
||||
}()
|
||||
untagged, _ := tc2.client.ReadUntagged()
|
||||
var metadataKeys imapclient.UntaggedMetadataKeys
|
||||
tuntagged(tc2.t, untagged, &metadataKeys)
|
||||
tc2.writelinef("done")
|
||||
tc2.response("ok")
|
||||
done <- nil
|
||||
}()
|
||||
|
||||
// Should cause idle to return.
|
||||
tc.transactf("ok", `setmetadata inbox (/private/a "z")`)
|
||||
|
||||
timer = time.NewTimer(time.Second)
|
||||
defer timer.Stop()
|
||||
select {
|
||||
case err := <-done:
|
||||
tc.check(err, "idle")
|
||||
case <-timer.C:
|
||||
t.Fatalf("idle did not finish")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetadataLimit(t *testing.T) {
|
||||
tc := start(t, false)
|
||||
defer tc.close()
|
||||
|
||||
tc.login("mjl@mox.example", password0)
|
||||
|
||||
maxKeys, maxSize := metadataMaxKeys, metadataMaxSize
|
||||
defer func() {
|
||||
metadataMaxKeys = maxKeys
|
||||
metadataMaxSize = maxSize
|
||||
}()
|
||||
metadataMaxKeys = 10
|
||||
metadataMaxSize = 1000
|
||||
|
||||
// Reach max total size limit.
|
||||
buf := make([]byte, metadataMaxSize+1)
|
||||
for i := range buf {
|
||||
buf[i] = 'x'
|
||||
}
|
||||
tc.cmdf("", "setmetadata inbox (/private/large ~{%d+}", len(buf))
|
||||
tc.client.Write(buf)
|
||||
tc.client.Writelinef(")")
|
||||
tc.response("no")
|
||||
tc.xcode(imapclient.CodeMetadataMaxSize(metadataMaxSize))
|
||||
|
||||
// Reach limit for max number.
|
||||
for i := 1; i <= metadataMaxKeys; i++ {
|
||||
tc.transactf("ok", `setmetadata inbox (/private/key%d "test")`, i)
|
||||
}
|
||||
tc.transactf("no", `setmetadata inbox (/private/toomany "test")`)
|
||||
tc.xcode(imapclient.CodeMetadataTooMany{})
|
||||
}
|
@ -7,23 +7,31 @@ import (
|
||||
)
|
||||
|
||||
func TestMove(t *testing.T) {
|
||||
testMove(t, false)
|
||||
}
|
||||
|
||||
func TestMoveUIDOnly(t *testing.T) {
|
||||
testMove(t, true)
|
||||
}
|
||||
|
||||
func testMove(t *testing.T, uidonly bool) {
|
||||
defer mockUIDValidity()()
|
||||
tc := start(t)
|
||||
tc := start(t, uidonly)
|
||||
defer tc.close()
|
||||
|
||||
tc2 := startNoSwitchboard(t)
|
||||
defer tc2.close()
|
||||
tc2 := startNoSwitchboard(t, uidonly)
|
||||
defer tc2.closeNoWait()
|
||||
|
||||
tc3 := startNoSwitchboard(t)
|
||||
defer tc3.close()
|
||||
tc3 := startNoSwitchboard(t, uidonly)
|
||||
defer tc3.closeNoWait()
|
||||
|
||||
tc.client.Login("mjl@mox.example", password0)
|
||||
tc.login("mjl@mox.example", password0)
|
||||
tc.client.Select("inbox")
|
||||
|
||||
tc2.client.Login("mjl@mox.example", password0)
|
||||
tc2.login("mjl@mox.example", password0)
|
||||
tc2.client.Select("Trash")
|
||||
|
||||
tc3.client.Login("mjl@mox.example", password0)
|
||||
tc3.login("mjl@mox.example", password0)
|
||||
tc3.client.Select("inbox")
|
||||
|
||||
tc.transactf("bad", "move") // Missing params.
|
||||
@ -31,62 +39,79 @@ func TestMove(t *testing.T) {
|
||||
tc.transactf("bad", "move 1 inbox ") // Leftover.
|
||||
|
||||
// Seqs 1,2 and UIDs 3,4.
|
||||
tc.client.Append("inbox", nil, nil, []byte(exampleMsg))
|
||||
tc.client.Append("inbox", nil, nil, []byte(exampleMsg))
|
||||
tc.client.StoreFlagsSet("1:2", true, `\Deleted`)
|
||||
tc.client.Append("inbox", makeAppend(exampleMsg))
|
||||
tc.client.Append("inbox", makeAppend(exampleMsg))
|
||||
tc.client.UIDStoreFlagsSet("1:2", true, `\Deleted`)
|
||||
tc.client.Expunge()
|
||||
tc.client.Append("inbox", nil, nil, []byte(exampleMsg))
|
||||
tc.client.Append("inbox", nil, nil, []byte(exampleMsg))
|
||||
tc.client.Append("inbox", makeAppend(exampleMsg))
|
||||
tc.client.Append("inbox", makeAppend(exampleMsg))
|
||||
|
||||
tc.client.Unselect()
|
||||
tc.client.Examine("inbox")
|
||||
tc.transactf("no", "move 1 Trash") // Opened readonly.
|
||||
tc.client.Unselect()
|
||||
tc.client.Select("inbox")
|
||||
if uidonly {
|
||||
tc.transactf("ok", "uid move 1:* Trash")
|
||||
} else {
|
||||
tc.client.Unselect()
|
||||
tc.client.Examine("inbox")
|
||||
tc.transactf("no", "move 1 Trash") // Opened readonly.
|
||||
tc.client.Unselect()
|
||||
tc.client.Select("inbox")
|
||||
|
||||
tc.transactf("no", "move 1 nonexistent")
|
||||
tc.xcode("TRYCREATE")
|
||||
tc.transactf("no", "move 1 nonexistent")
|
||||
tc.xcodeWord("TRYCREATE")
|
||||
|
||||
tc.transactf("no", "move 1 inbox") // Cannot move to same mailbox.
|
||||
tc.transactf("no", "move 1 expungebox")
|
||||
tc.xcodeWord("TRYCREATE")
|
||||
|
||||
tc2.transactf("ok", "noop") // Drain.
|
||||
tc3.transactf("ok", "noop") // Drain.
|
||||
tc.transactf("no", "move 1 inbox") // Cannot move to same mailbox.
|
||||
|
||||
tc.transactf("ok", "move 1:* Trash")
|
||||
ptr := func(v uint32) *uint32 { return &v }
|
||||
tc.xuntagged(
|
||||
imapclient.UntaggedResult{Status: "OK", RespText: imapclient.RespText{Code: "COPYUID", CodeArg: imapclient.CodeCopyUID{DestUIDValidity: 1, From: []imapclient.NumRange{{First: 3, Last: ptr(4)}}, To: []imapclient.NumRange{{First: 1, Last: ptr(2)}}}, More: "moved"}},
|
||||
imapclient.UntaggedExpunge(1),
|
||||
imapclient.UntaggedExpunge(1),
|
||||
)
|
||||
tc2.transactf("ok", "noop")
|
||||
tc2.xuntagged(
|
||||
imapclient.UntaggedExists(2),
|
||||
imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{imapclient.FetchUID(1), imapclient.FetchFlags(nil)}},
|
||||
imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{imapclient.FetchUID(2), imapclient.FetchFlags(nil)}},
|
||||
)
|
||||
tc3.transactf("ok", "noop")
|
||||
tc3.xuntagged(imapclient.UntaggedExpunge(1), imapclient.UntaggedExpunge(1))
|
||||
tc2.transactf("ok", "noop") // Drain.
|
||||
tc3.transactf("ok", "noop") // Drain.
|
||||
|
||||
tc.transactf("ok", "move 1:* Trash")
|
||||
tc.xuntagged(
|
||||
imapclient.UntaggedResult{Status: "OK", Code: imapclient.CodeCopyUID{DestUIDValidity: 1, From: []imapclient.NumRange{{First: 3, Last: uint32ptr(4)}}, To: []imapclient.NumRange{{First: 1, Last: uint32ptr(2)}}}, Text: "moved"},
|
||||
imapclient.UntaggedExpunge(1),
|
||||
imapclient.UntaggedExpunge(1),
|
||||
)
|
||||
tc2.transactf("ok", "noop")
|
||||
tc2.xuntagged(
|
||||
imapclient.UntaggedExists(2),
|
||||
tc.untaggedFetch(1, 1, imapclient.FetchFlags(nil)),
|
||||
tc.untaggedFetch(2, 2, imapclient.FetchFlags(nil)),
|
||||
)
|
||||
tc3.transactf("ok", "noop")
|
||||
tc3.xuntagged(imapclient.UntaggedExpunge(1), imapclient.UntaggedExpunge(1))
|
||||
}
|
||||
|
||||
// UIDs 5,6
|
||||
tc.client.Append("inbox", nil, nil, []byte(exampleMsg))
|
||||
tc.client.Append("inbox", nil, nil, []byte(exampleMsg))
|
||||
tc.client.Append("inbox", makeAppend(exampleMsg))
|
||||
tc.client.Append("inbox", makeAppend(exampleMsg))
|
||||
tc2.transactf("ok", "noop") // Drain.
|
||||
tc3.transactf("ok", "noop") // Drain.
|
||||
|
||||
tc.transactf("no", "uid move 1:4 Trash") // No match.
|
||||
tc.transactf("ok", "uid move 6:5 Trash")
|
||||
tc.xuntagged(
|
||||
imapclient.UntaggedResult{Status: "OK", RespText: imapclient.RespText{Code: "COPYUID", CodeArg: imapclient.CodeCopyUID{DestUIDValidity: 1, From: []imapclient.NumRange{{First: 5, Last: ptr(6)}}, To: []imapclient.NumRange{{First: 3, Last: ptr(4)}}}, More: "moved"}},
|
||||
imapclient.UntaggedExpunge(1),
|
||||
imapclient.UntaggedExpunge(1),
|
||||
)
|
||||
if uidonly {
|
||||
tc.xuntagged(
|
||||
imapclient.UntaggedResult{Status: "OK", Code: imapclient.CodeCopyUID{DestUIDValidity: 1, From: []imapclient.NumRange{{First: 5, Last: uint32ptr(6)}}, To: []imapclient.NumRange{{First: 3, Last: uint32ptr(4)}}}, Text: "moved"},
|
||||
imapclient.UntaggedVanished{UIDs: xparseNumSet("5:6")},
|
||||
)
|
||||
} else {
|
||||
tc.xuntagged(
|
||||
imapclient.UntaggedResult{Status: "OK", Code: imapclient.CodeCopyUID{DestUIDValidity: 1, From: []imapclient.NumRange{{First: 5, Last: uint32ptr(6)}}, To: []imapclient.NumRange{{First: 3, Last: uint32ptr(4)}}}, Text: "moved"},
|
||||
imapclient.UntaggedExpunge(1),
|
||||
imapclient.UntaggedExpunge(1),
|
||||
)
|
||||
}
|
||||
tc2.transactf("ok", "noop")
|
||||
tc2.xuntagged(
|
||||
imapclient.UntaggedExists(4),
|
||||
imapclient.UntaggedFetch{Seq: 3, Attrs: []imapclient.FetchAttr{imapclient.FetchUID(3), imapclient.FetchFlags(nil)}},
|
||||
imapclient.UntaggedFetch{Seq: 4, Attrs: []imapclient.FetchAttr{imapclient.FetchUID(4), imapclient.FetchFlags(nil)}},
|
||||
tc2.untaggedFetch(3, 3, imapclient.FetchFlags(nil)),
|
||||
tc2.untaggedFetch(4, 4, imapclient.FetchFlags(nil)),
|
||||
)
|
||||
tc3.transactf("ok", "noop")
|
||||
tc3.xuntagged(imapclient.UntaggedExpunge(1), imapclient.UntaggedExpunge(1))
|
||||
if uidonly {
|
||||
tc3.xuntagged(imapclient.UntaggedVanished{UIDs: xparseNumSet("5:6")})
|
||||
} else {
|
||||
tc3.xuntagged(imapclient.UntaggedExpunge(1), imapclient.UntaggedExpunge(1))
|
||||
}
|
||||
}
|
||||
|
329
imapserver/notify.go
Normal file
329
imapserver/notify.go
Normal file
@ -0,0 +1,329 @@
|
||||
package imapserver
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/mjl-/bstore"
|
||||
|
||||
"github.com/mjl-/mox/store"
|
||||
)
|
||||
|
||||
// Max number of pending changes for selected-delayed mailbox before we write a
|
||||
// NOTIFICATIONOVERFLOW message, flush changes and stop gathering more changes.
|
||||
// Changed during tests.
|
||||
var selectedDelayedChangesMax = 1000
|
||||
|
||||
// notify represents a configuration as passed to the notify command.
|
||||
type notify struct {
|
||||
// "NOTIFY NONE" results in an empty list, matching no events.
|
||||
EventGroups []eventGroup
|
||||
|
||||
// Changes for the selected mailbox in case of SELECTED-DELAYED, when we don't send
|
||||
// events asynchrously. These must still be processed later on for their
|
||||
// ChangeRemoveUIDs, to erase expunged message files. At the end of a command (e.g.
|
||||
// NOOP) or immediately upon IDLE we will send untagged responses for these
|
||||
// changes. If the connection breaks, we still process the ChangeRemoveUIDs.
|
||||
Delayed []store.Change
|
||||
}
|
||||
|
||||
// match checks if an event for a mailbox id/name (optional depending on type)
|
||||
// should be turned into a notification to the client.
|
||||
func (n notify) match(c *conn, xtxfn func() *bstore.Tx, mailboxID int64, mailbox string, kind eventKind) (mailboxSpecifier, notifyEvent, bool) {
|
||||
// We look through the event groups, and won't stop looking until we've found a
|
||||
// confirmation the event should be notified. ../rfc/5465:756
|
||||
|
||||
// Non-message-related events are only matched by non-"selected" mailbox
|
||||
// specifiers. ../rfc/5465:268
|
||||
// If you read the mailboxes matching paragraph in isolation, you would think only
|
||||
// "SELECTED" and "SELECTED-DELAYED" can match events for the selected mailbox. But
|
||||
// a few other places hint that that only applies to message events, not to mailbox
|
||||
// events, such as subscriptions and mailbox metadata changes. With a strict
|
||||
// interpretation, clients couldn't request notifications for such events for the
|
||||
// selection mailbox. ../rfc/5465:752
|
||||
|
||||
for _, eg := range n.EventGroups {
|
||||
switch eg.MailboxSpecifier.Kind {
|
||||
case mbspecSelected, mbspecSelectedDelayed: // ../rfc/5465:800
|
||||
if mailboxID != c.mailboxID || !slices.Contains(messageEventKinds, kind) {
|
||||
continue
|
||||
}
|
||||
for _, ev := range eg.Events {
|
||||
if eventKind(ev.Kind) == kind {
|
||||
return eg.MailboxSpecifier, ev, true
|
||||
}
|
||||
}
|
||||
// We can only have a single selected for notify, so no point in continuing the search.
|
||||
return mailboxSpecifier{}, notifyEvent{}, false
|
||||
|
||||
default:
|
||||
// The selected mailbox can only match for non-message events for specifiers other
|
||||
// than "selected"/"selected-delayed".
|
||||
if c.mailboxID == mailboxID && slices.Contains(messageEventKinds, kind) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
var match bool
|
||||
Match:
|
||||
switch eg.MailboxSpecifier.Kind {
|
||||
case mbspecPersonal: // ../rfc/5465:817
|
||||
match = true
|
||||
|
||||
case mbspecInboxes: // ../rfc/5465:822
|
||||
if mailbox == "Inbox" || strings.HasPrefix(mailbox, "Inbox/") {
|
||||
match = true
|
||||
break Match
|
||||
}
|
||||
|
||||
if mailbox == "" {
|
||||
break Match
|
||||
}
|
||||
|
||||
// Include mailboxes we may deliver to based on destinations, or based on rulesets,
|
||||
// not including deliveries for mailing lists.
|
||||
conf, _ := c.account.Conf()
|
||||
for _, dest := range conf.Destinations {
|
||||
if dest.Mailbox == mailbox {
|
||||
match = true
|
||||
break Match
|
||||
}
|
||||
|
||||
for _, rs := range dest.Rulesets {
|
||||
if rs.ListAllowDomain == "" && rs.Mailbox == mailbox {
|
||||
match = true
|
||||
break Match
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
case mbspecSubscribed: // ../rfc/5465:831
|
||||
sub := store.Subscription{Name: mailbox}
|
||||
err := xtxfn().Get(&sub)
|
||||
if err != bstore.ErrAbsent {
|
||||
xcheckf(err, "lookup subscription")
|
||||
}
|
||||
match = err == nil
|
||||
|
||||
case mbspecSubtree: // ../rfc/5465:847
|
||||
for _, name := range eg.MailboxSpecifier.Mailboxes {
|
||||
if mailbox == name || strings.HasPrefix(mailbox, name+"/") {
|
||||
match = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
case mbspecSubtreeOne: // ../rfc/7377:274
|
||||
ntoken := len(strings.Split(mailbox, "/"))
|
||||
for _, name := range eg.MailboxSpecifier.Mailboxes {
|
||||
if mailbox == name || (strings.HasPrefix(mailbox, name+"/") && len(strings.Split(name, "/"))+1 == ntoken) {
|
||||
match = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
case mbspecMailboxes: // ../rfc/5465:853
|
||||
match = slices.Contains(eg.MailboxSpecifier.Mailboxes, mailbox)
|
||||
|
||||
default:
|
||||
panic("missing case for " + string(eg.MailboxSpecifier.Kind))
|
||||
}
|
||||
|
||||
if !match {
|
||||
continue
|
||||
}
|
||||
|
||||
// NONE is the signal we shouldn't return events for this mailbox. ../rfc/5465:455
|
||||
if len(eg.Events) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
// If event kind matches, we will be notifying about this change. If not, we'll
|
||||
// look again at next mailbox specifiers.
|
||||
for _, ev := range eg.Events {
|
||||
if eventKind(ev.Kind) == kind {
|
||||
return eg.MailboxSpecifier, ev, true
|
||||
}
|
||||
}
|
||||
}
|
||||
return mailboxSpecifier{}, notifyEvent{}, false
|
||||
}
|
||||
|
||||
// Notify enables continuous notifications from the server to the client, without
|
||||
// the client issuing an IDLE command. The mailboxes and events to notify about are
|
||||
// specified in the account. When notify is enabled, instead of being blocked
|
||||
// waiting for a command from the client, we also wait for events from the account,
|
||||
// and send events about it.
|
||||
//
|
||||
// State: Authenticated and selected.
|
||||
func (c *conn) cmdNotify(tag, cmd string, p *parser) {
|
||||
// Command: ../rfc/5465:203
|
||||
// Request syntax: ../rfc/5465:923
|
||||
|
||||
p.xspace()
|
||||
|
||||
// NONE indicates client doesn't want any events, also not the "normal" events
|
||||
// without notify. ../rfc/5465:234
|
||||
// ../rfc/5465:930
|
||||
if p.take("NONE") {
|
||||
p.xempty()
|
||||
|
||||
// If we have delayed changes for the selected mailbox, we are no longer going to
|
||||
// notify about them. The client can't know anymore whether messages still exist,
|
||||
// and trying to read them can cause errors if the messages have been expunged and
|
||||
// erased.
|
||||
var changes []store.Change
|
||||
if c.notify != nil {
|
||||
changes = c.notify.Delayed
|
||||
}
|
||||
c.notify = ¬ify{}
|
||||
c.flushChanges(changes)
|
||||
|
||||
c.ok(tag, cmd)
|
||||
return
|
||||
}
|
||||
|
||||
var n notify
|
||||
var status bool
|
||||
|
||||
// ../rfc/5465:926
|
||||
p.xtake("SET")
|
||||
p.xspace()
|
||||
if p.take("STATUS") {
|
||||
status = true
|
||||
p.xspace()
|
||||
}
|
||||
for {
|
||||
eg := p.xeventGroup()
|
||||
n.EventGroups = append(n.EventGroups, eg)
|
||||
if !p.space() {
|
||||
break
|
||||
}
|
||||
}
|
||||
p.xempty()
|
||||
|
||||
for _, eg := range n.EventGroups {
|
||||
var hasNew, hasExpunge, hasFlag, hasAnnotation bool
|
||||
for _, ev := range eg.Events {
|
||||
switch eventKind(ev.Kind) {
|
||||
case eventMessageNew:
|
||||
hasNew = true
|
||||
case eventMessageExpunge:
|
||||
hasExpunge = true
|
||||
case eventFlagChange:
|
||||
hasFlag = true
|
||||
case eventMailboxName, eventSubscriptionChange, eventMailboxMetadataChange, eventServerMetadataChange:
|
||||
// Nothing special.
|
||||
default: // Including eventAnnotationChange.
|
||||
hasAnnotation = true // Ineffective, we don't implement message annotations yet.
|
||||
// Result must be NO instead of BAD, and we must include BADEVENT and the events we
|
||||
// support. ../rfc/5465:343
|
||||
// ../rfc/5465:1033
|
||||
xusercodeErrorf("BADEVENT (MessageNew MessageExpunge FlagChange MailboxName SubscriptionChange MailboxMetadataChange ServerMetadataChange)", "unimplemented event %s", ev.Kind)
|
||||
}
|
||||
}
|
||||
if hasNew != hasExpunge {
|
||||
// ../rfc/5465:443 ../rfc/5465:987
|
||||
xsyntaxErrorf("MessageNew and MessageExpunge must be specified together")
|
||||
}
|
||||
if (hasFlag || hasAnnotation) && !hasNew {
|
||||
// ../rfc/5465:439
|
||||
xsyntaxErrorf("FlagChange and/or AnnotationChange requires MessageNew and MessageExpunge")
|
||||
}
|
||||
}
|
||||
|
||||
for _, eg := range n.EventGroups {
|
||||
for i, name := range eg.MailboxSpecifier.Mailboxes {
|
||||
eg.MailboxSpecifier.Mailboxes[i] = xcheckmailboxname(name, true)
|
||||
}
|
||||
}
|
||||
|
||||
// Only one selected/selected-delay mailbox filter is allowed. ../rfc/5465:779
|
||||
// Only message events are allowed for selected/selected-delayed. ../rfc/5465:796
|
||||
var haveSelected bool
|
||||
for _, eg := range n.EventGroups {
|
||||
switch eg.MailboxSpecifier.Kind {
|
||||
case mbspecSelected, mbspecSelectedDelayed:
|
||||
if haveSelected {
|
||||
xsyntaxErrorf("cannot have multiple selected/selected-delayed mailbox filters")
|
||||
}
|
||||
haveSelected = true
|
||||
|
||||
// Only events from message-event are allowed with selected mailbox specifiers.
|
||||
// ../rfc/5465:977
|
||||
for _, ev := range eg.Events {
|
||||
if !slices.Contains(messageEventKinds, eventKind(ev.Kind)) {
|
||||
xsyntaxErrorf("selected/selected-delayed is only allowed with message events, not %s", ev.Kind)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We must apply any changes for delayed select. ../rfc/5465:248
|
||||
if c.notify != nil {
|
||||
delayed := c.notify.Delayed
|
||||
c.notify.Delayed = nil
|
||||
c.xapplyChangesNotify(delayed, true)
|
||||
}
|
||||
|
||||
if status {
|
||||
var statuses []string
|
||||
|
||||
// Flush new pending changes before we read the current state from the database.
|
||||
// Don't allow any concurrent changes for a consistent snapshot.
|
||||
c.account.WithRLock(func() {
|
||||
select {
|
||||
case <-c.comm.Pending:
|
||||
overflow, changes := c.comm.Get()
|
||||
c.xapplyChanges(overflow, changes, true)
|
||||
default:
|
||||
}
|
||||
|
||||
c.xdbread(func(tx *bstore.Tx) {
|
||||
// Send STATUS responses for all matching mailboxes. ../rfc/5465:271
|
||||
q := bstore.QueryTx[store.Mailbox](tx)
|
||||
q.FilterEqual("Expunged", false)
|
||||
q.SortAsc("Name")
|
||||
for mb, err := range q.All() {
|
||||
xcheckf(err, "list mailboxes for status")
|
||||
|
||||
if mb.ID == c.mailboxID {
|
||||
continue
|
||||
}
|
||||
_, _, ok := n.match(c, func() *bstore.Tx { return tx }, mb.ID, mb.Name, eventMessageNew)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
list := listspace{
|
||||
bare("MESSAGES"), number(mb.MessageCountIMAP()),
|
||||
bare("UIDNEXT"), number(mb.UIDNext),
|
||||
bare("UIDVALIDITY"), number(mb.UIDValidity),
|
||||
// Unseen is not mentioned for STATUS, but clients are able to parse it due to
|
||||
// FlagChange, and it will be useful to have.
|
||||
bare("UNSEEN"), number(mb.MailboxCounts.Unseen),
|
||||
}
|
||||
if c.enabled[capCondstore] || c.enabled[capQresync] {
|
||||
list = append(list, bare("HIGHESTMODSEQ"), number(mb.ModSeq))
|
||||
}
|
||||
|
||||
status := fmt.Sprintf("* STATUS %s %s", mailboxt(mb.Name).pack(c), list.pack(c))
|
||||
statuses = append(statuses, status)
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
// Write outside of db transaction and lock.
|
||||
for _, s := range statuses {
|
||||
c.xbwritelinef("%s", s)
|
||||
}
|
||||
}
|
||||
|
||||
// We replace the previous notify config. ../rfc/5465:245
|
||||
c.notify = &n
|
||||
|
||||
// Writing OK will flush any other pending changes for the account according to the
|
||||
// new filters.
|
||||
c.ok(tag, cmd)
|
||||
}
|
516
imapserver/notify_test.go
Normal file
516
imapserver/notify_test.go
Normal file
@ -0,0 +1,516 @@
|
||||
package imapserver
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/mjl-/mox/imapclient"
|
||||
"github.com/mjl-/mox/store"
|
||||
)
|
||||
|
||||
func TestNotify(t *testing.T) {
|
||||
testNotify(t, false)
|
||||
}
|
||||
|
||||
func TestNotifyUIDOnly(t *testing.T) {
|
||||
testNotify(t, true)
|
||||
}
|
||||
|
||||
func testNotify(t *testing.T, uidonly bool) {
|
||||
defer mockUIDValidity()()
|
||||
tc := start(t, uidonly)
|
||||
defer tc.close()
|
||||
tc.login("mjl@mox.example", password0)
|
||||
tc.client.Select("inbox")
|
||||
|
||||
// Check for some invalid syntax.
|
||||
tc.transactf("bad", "Notify")
|
||||
tc.transactf("bad", "Notify bogus")
|
||||
tc.transactf("bad", "Notify None ") // Trailing space.
|
||||
tc.transactf("bad", "Notify Set")
|
||||
tc.transactf("bad", "Notify Set ")
|
||||
tc.transactf("bad", "Notify Set Status")
|
||||
tc.transactf("bad", "Notify Set Status ()") // Empty list.
|
||||
tc.transactf("bad", "Notify Set Status (UnknownSpecifier (messageNew))")
|
||||
tc.transactf("bad", "Notify Set Status (Personal messageNew)") // Missing list around events.
|
||||
tc.transactf("bad", "Notify Set Status (Personal (messageNew) )") // Trailing space.
|
||||
tc.transactf("bad", "Notify Set Status (Personal (messageNew)) ") // Trailing space.
|
||||
|
||||
tc.transactf("bad", "Notify Set Status (Selected (mailboxName))") // MailboxName not allowed on Selected.
|
||||
tc.transactf("bad", "Notify Set Status (Selected (messageNew))") // MessageNew must come with MessageExpunge.
|
||||
tc.transactf("bad", "Notify Set Status (Selected (flagChange))") // flagChange must come with MessageNew and MessageExpunge.
|
||||
tc.transactf("bad", "Notify Set Status (Selected (mailboxName)) (Selected-Delayed (mailboxName))") // Duplicate selected.
|
||||
tc.transactf("no", "Notify Set Status (Selected (annotationChange))") // We don't implement annotation change.
|
||||
tc.xcode(imapclient.CodeBadEvent{"MessageNew", "MessageExpunge", "FlagChange", "MailboxName", "SubscriptionChange", "MailboxMetadataChange", "ServerMetadataChange"})
|
||||
tc.transactf("no", "Notify Set Status (Personal (unknownEvent))")
|
||||
tc.xcode(imapclient.CodeBadEvent{"MessageNew", "MessageExpunge", "FlagChange", "MailboxName", "SubscriptionChange", "MailboxMetadataChange", "ServerMetadataChange"})
|
||||
|
||||
tc2 := startNoSwitchboard(t, uidonly)
|
||||
defer tc2.closeNoWait()
|
||||
tc2.login("mjl@mox.example", password0)
|
||||
tc2.client.Select("inbox")
|
||||
|
||||
var modseq uint32 = 4
|
||||
|
||||
// Check that we don't get pending changes when we set "notify none". We first make
|
||||
// changes that we drain with noop. Then add new pending changes and execute
|
||||
// "notify none". Server should still process changes to the message sequence
|
||||
// numbers of the selected mailbox.
|
||||
tc2.client.Append("inbox", makeAppend(searchMsg)) // Results in exists and fetch.
|
||||
modseq++
|
||||
tc2.client.Append("Junk", makeAppend(searchMsg)) // Not selected, not mentioned.
|
||||
modseq++
|
||||
tc.transactf("ok", "noop")
|
||||
tc.xuntagged(
|
||||
imapclient.UntaggedExists(1),
|
||||
tc.untaggedFetch(1, 1, imapclient.FetchFlags(nil)),
|
||||
)
|
||||
tc2.client.UIDStoreFlagsAdd("1:*", true, `\Deleted`)
|
||||
modseq++
|
||||
tc2.client.Expunge()
|
||||
modseq++
|
||||
tc.transactf("ok", "Notify None")
|
||||
tc.xuntagged() // No untagged responses for delete/expunge.
|
||||
|
||||
// Enable notify, will first result in a the pending changes, then status.
|
||||
tc.transactf("ok", "Notify Set Status (Selected (messageNew (Uid Modseq Bodystructure Preview) messageExpunge flagChange)) (personal (messageNew messageExpunge flagChange mailboxName subscriptionChange mailboxMetadataChange serverMetadataChange))")
|
||||
tc.xuntagged(
|
||||
imapclient.UntaggedResult{Status: imapclient.OK, Code: imapclient.CodeHighestModSeq(modseq), Text: "after condstore-enabling command"},
|
||||
// note: no status for Inbox since it is selected.
|
||||
imapclient.UntaggedStatus{Mailbox: "Drafts", Attrs: map[imapclient.StatusAttr]int64{imapclient.StatusMessages: 0, imapclient.StatusUIDNext: 1, imapclient.StatusUIDValidity: 1, imapclient.StatusUnseen: 0, imapclient.StatusHighestModSeq: 2}},
|
||||
imapclient.UntaggedStatus{Mailbox: "Sent", Attrs: map[imapclient.StatusAttr]int64{imapclient.StatusMessages: 0, imapclient.StatusUIDNext: 1, imapclient.StatusUIDValidity: 1, imapclient.StatusUnseen: 0, imapclient.StatusHighestModSeq: 2}},
|
||||
imapclient.UntaggedStatus{Mailbox: "Archive", Attrs: map[imapclient.StatusAttr]int64{imapclient.StatusMessages: 0, imapclient.StatusUIDNext: 1, imapclient.StatusUIDValidity: 1, imapclient.StatusUnseen: 0, imapclient.StatusHighestModSeq: 2}},
|
||||
imapclient.UntaggedStatus{Mailbox: "Trash", Attrs: map[imapclient.StatusAttr]int64{imapclient.StatusMessages: 0, imapclient.StatusUIDNext: 1, imapclient.StatusUIDValidity: 1, imapclient.StatusUnseen: 0, imapclient.StatusHighestModSeq: 2}},
|
||||
imapclient.UntaggedStatus{Mailbox: "Junk", Attrs: map[imapclient.StatusAttr]int64{imapclient.StatusMessages: 1, imapclient.StatusUIDNext: 2, imapclient.StatusUIDValidity: 1, imapclient.StatusUnseen: 1, imapclient.StatusHighestModSeq: int64(modseq - 2)}},
|
||||
)
|
||||
|
||||
// Selecting the mailbox again results in a refresh of the message sequence
|
||||
// numbers, with the deleted message gone (it wasn't acknowledged yet due to
|
||||
// "notify none").
|
||||
tc.client.Select("inbox")
|
||||
|
||||
// Add message, should result in EXISTS and FETCH with the configured attributes.
|
||||
tc2.client.Append("inbox", makeAppend(searchMsg))
|
||||
modseq++
|
||||
tc.readuntagged(
|
||||
imapclient.UntaggedExists(1),
|
||||
tc.untaggedFetchUID(1, 2,
|
||||
imapclient.FetchBodystructure{
|
||||
RespAttr: "BODYSTRUCTURE",
|
||||
Body: imapclient.BodyTypeMpart{
|
||||
Bodies: []any{
|
||||
imapclient.BodyTypeText{
|
||||
MediaType: "TEXT",
|
||||
MediaSubtype: "PLAIN",
|
||||
BodyFields: imapclient.BodyFields{
|
||||
Params: [][2]string{[...]string{"CHARSET", "utf-8"}},
|
||||
Octets: 21,
|
||||
},
|
||||
Lines: 1,
|
||||
Ext: &imapclient.BodyExtension1Part{
|
||||
Disposition: ptr((*string)(nil)),
|
||||
DispositionParams: ptr([][2]string(nil)),
|
||||
Language: ptr([]string(nil)),
|
||||
Location: ptr((*string)(nil)),
|
||||
},
|
||||
},
|
||||
imapclient.BodyTypeText{
|
||||
MediaType: "TEXT",
|
||||
MediaSubtype: "HTML",
|
||||
BodyFields: imapclient.BodyFields{
|
||||
Params: [][2]string{[...]string{"CHARSET", "utf-8"}},
|
||||
Octets: 15,
|
||||
},
|
||||
Lines: 1,
|
||||
Ext: &imapclient.BodyExtension1Part{
|
||||
Disposition: ptr((*string)(nil)),
|
||||
DispositionParams: ptr([][2]string(nil)),
|
||||
Language: ptr([]string(nil)),
|
||||
Location: ptr((*string)(nil)),
|
||||
},
|
||||
},
|
||||
},
|
||||
MediaSubtype: "ALTERNATIVE",
|
||||
Ext: &imapclient.BodyExtensionMpart{
|
||||
Params: [][2]string{{"BOUNDARY", "x"}},
|
||||
Disposition: ptr((*string)(nil)), // Present but nil.
|
||||
DispositionParams: ptr([][2]string(nil)),
|
||||
Language: ptr([]string(nil)),
|
||||
Location: ptr((*string)(nil)),
|
||||
},
|
||||
},
|
||||
},
|
||||
imapclient.FetchPreview{Preview: ptr("this is plain text.")},
|
||||
imapclient.FetchModSeq(modseq),
|
||||
),
|
||||
)
|
||||
|
||||
// Change flags.
|
||||
tc2.client.UIDStoreFlagsAdd("1:*", true, `\Deleted`)
|
||||
modseq++
|
||||
tc.readuntagged(tc.untaggedFetch(1, 2, imapclient.FetchFlags{`\Deleted`}, imapclient.FetchModSeq(modseq)))
|
||||
|
||||
// Remove message.
|
||||
tc2.client.Expunge()
|
||||
modseq++
|
||||
if uidonly {
|
||||
tc.readuntagged(imapclient.UntaggedVanished{UIDs: xparseNumSet("2")})
|
||||
} else {
|
||||
tc.readuntagged(imapclient.UntaggedExpunge(1))
|
||||
}
|
||||
|
||||
// MailboxMetadataChange for mailbox annotation.
|
||||
tc2.transactf("ok", `setmetadata Archive (/private/comment "test")`)
|
||||
modseq++
|
||||
tc.readuntagged(
|
||||
imapclient.UntaggedMetadataKeys{Mailbox: "Archive", Keys: []string{"/private/comment"}},
|
||||
)
|
||||
|
||||
// MailboxMetadataChange also for the selected Inbox.
|
||||
tc2.transactf("ok", `setmetadata Inbox (/private/comment "test")`)
|
||||
modseq++
|
||||
tc.readuntagged(
|
||||
imapclient.UntaggedMetadataKeys{Mailbox: "Inbox", Keys: []string{"/private/comment"}},
|
||||
)
|
||||
|
||||
// ServerMetadataChange for server annotation.
|
||||
tc2.transactf("ok", `setmetadata "" (/private/vendor/other/x "test")`)
|
||||
modseq++
|
||||
tc.readuntagged(
|
||||
imapclient.UntaggedMetadataKeys{Mailbox: "", Keys: []string{"/private/vendor/other/x"}},
|
||||
)
|
||||
|
||||
// SubscriptionChange for new subscription.
|
||||
tc2.client.Subscribe("doesnotexist")
|
||||
tc.readuntagged(
|
||||
imapclient.UntaggedList{Mailbox: "doesnotexist", Separator: '/', Flags: []string{`\Subscribed`, `\NonExistent`}},
|
||||
)
|
||||
|
||||
// SubscriptionChange for removed subscription.
|
||||
tc2.client.Unsubscribe("doesnotexist")
|
||||
tc.readuntagged(
|
||||
imapclient.UntaggedList{Mailbox: "doesnotexist", Separator: '/', Flags: []string{`\NonExistent`}},
|
||||
)
|
||||
|
||||
// SubscriptionChange for selected mailbox.
|
||||
tc2.client.Unsubscribe("Inbox")
|
||||
tc2.client.Subscribe("Inbox")
|
||||
tc.readuntagged(
|
||||
imapclient.UntaggedList{Mailbox: "Inbox", Separator: '/'},
|
||||
imapclient.UntaggedList{Mailbox: "Inbox", Separator: '/', Flags: []string{`\Subscribed`}},
|
||||
)
|
||||
|
||||
// MailboxName for creating mailbox.
|
||||
tc2.client.Create("newbox", nil)
|
||||
modseq++
|
||||
tc.readuntagged(
|
||||
imapclient.UntaggedList{Mailbox: "newbox", Separator: '/', Flags: []string{`\Subscribed`}},
|
||||
)
|
||||
|
||||
// MailboxName for renaming mailbox.
|
||||
tc2.client.Rename("newbox", "oldbox")
|
||||
modseq++
|
||||
tc.readuntagged(
|
||||
imapclient.UntaggedList{Mailbox: "oldbox", Separator: '/', OldName: "newbox"},
|
||||
)
|
||||
|
||||
// MailboxName for deleting mailbox.
|
||||
tc2.client.Delete("oldbox")
|
||||
modseq++
|
||||
tc.readuntagged(
|
||||
imapclient.UntaggedList{Mailbox: "oldbox", Separator: '/', Flags: []string{`\NonExistent`}},
|
||||
)
|
||||
|
||||
// Add message again to check for modseq. First set notify again with fewer fetch
|
||||
// attributes for simpler checking.
|
||||
tc.transactf("ok", "Notify Set (personal (messageNew messageExpunge flagChange mailboxName subscriptionChange mailboxMetadataChange serverMetadataChange)) (Selected (messageNew (Uid Modseq) messageExpunge flagChange))")
|
||||
tc2.client.Append("inbox", makeAppend(searchMsg))
|
||||
modseq++
|
||||
tc.readuntagged(
|
||||
imapclient.UntaggedExists(1),
|
||||
tc.untaggedFetchUID(1, 3, imapclient.FetchModSeq(modseq)),
|
||||
)
|
||||
|
||||
// Next round of events must be ignored. We shouldn't get anything until we add a
|
||||
// message to "testbox".
|
||||
tc.transactf("ok", "Notify Set (Selected None) (mailboxes testbox (messageNew messageExpunge)) (personal None)")
|
||||
tc2.client.Append("inbox", makeAppend(searchMsg)) // MessageNew
|
||||
modseq++
|
||||
tc2.client.UIDStoreFlagsAdd("1:*", true, `\Deleted`) // FlagChange
|
||||
modseq++
|
||||
tc2.client.Expunge() // MessageExpunge
|
||||
modseq++
|
||||
tc2.transactf("ok", `setmetadata Archive (/private/comment "test2")`) // MailboxMetadataChange
|
||||
modseq++
|
||||
tc2.transactf("ok", `setmetadata "" (/private/vendor/other/x "test2")`) // ServerMetadataChange
|
||||
modseq++
|
||||
tc2.client.Subscribe("doesnotexist2") // SubscriptionChange
|
||||
tc2.client.Unsubscribe("doesnotexist2") // SubscriptionChange
|
||||
tc2.client.Create("newbox2", nil) // MailboxName
|
||||
modseq++
|
||||
tc2.client.Rename("newbox2", "oldbox2") // MailboxName
|
||||
modseq++
|
||||
tc2.client.Delete("oldbox2") // MailboxName
|
||||
modseq++
|
||||
// Now trigger receiving a notification.
|
||||
tc2.client.Create("testbox", nil) // MailboxName
|
||||
modseq++
|
||||
tc2.client.Append("testbox", makeAppend(searchMsg)) // MessageNew
|
||||
modseq++
|
||||
tc.readuntagged(
|
||||
imapclient.UntaggedStatus{Mailbox: "testbox", Attrs: map[imapclient.StatusAttr]int64{imapclient.StatusMessages: 1, imapclient.StatusUIDNext: 2, imapclient.StatusUnseen: 1, imapclient.StatusHighestModSeq: int64(modseq)}},
|
||||
)
|
||||
|
||||
// Test filtering per mailbox specifier. We create two mailboxes.
|
||||
tc.client.Create("inbox/a/b", nil)
|
||||
modseq++
|
||||
tc.client.Create("other/a/b", nil)
|
||||
modseq++
|
||||
tc.client.Unsubscribe("other/a/b")
|
||||
|
||||
// Inboxes
|
||||
tc3 := startNoSwitchboard(t, uidonly)
|
||||
defer tc3.closeNoWait()
|
||||
tc3.login("mjl@mox.example", password0)
|
||||
tc3.transactf("ok", "Notify Set (Inboxes (messageNew messageExpunge))")
|
||||
|
||||
// Subscribed
|
||||
tc4 := startNoSwitchboard(t, uidonly)
|
||||
defer tc4.closeNoWait()
|
||||
tc4.login("mjl@mox.example", password0)
|
||||
tc4.transactf("ok", "Notify Set (Subscribed (messageNew messageExpunge))")
|
||||
|
||||
// Subtree
|
||||
tc5 := startNoSwitchboard(t, uidonly)
|
||||
defer tc5.closeNoWait()
|
||||
tc5.login("mjl@mox.example", password0)
|
||||
tc5.transactf("ok", "Notify Set (Subtree (Nonexistent inbox) (messageNew messageExpunge))")
|
||||
|
||||
// Subtree-One
|
||||
tc6 := startNoSwitchboard(t, uidonly)
|
||||
defer tc6.closeNoWait()
|
||||
tc6.login("mjl@mox.example", password0)
|
||||
tc6.transactf("ok", "Notify Set (Subtree-One (Nonexistent Inbox/a other) (messageNew messageExpunge))")
|
||||
|
||||
// We append to other/a/b first. It would normally come first in the notifications,
|
||||
// but we check we only get the second event.
|
||||
tc2.client.Append("other/a/b", makeAppend(searchMsg))
|
||||
modseq++
|
||||
tc2.client.Append("inbox/a/b", makeAppend(searchMsg))
|
||||
modseq++
|
||||
|
||||
// No highestmodseq, these connections don't have CONDSTORE enabled.
|
||||
tc3.readuntagged(
|
||||
imapclient.UntaggedStatus{Mailbox: "Inbox/a/b", Attrs: map[imapclient.StatusAttr]int64{imapclient.StatusMessages: 1, imapclient.StatusUIDNext: 2, imapclient.StatusUnseen: 1}},
|
||||
)
|
||||
tc4.readuntagged(
|
||||
imapclient.UntaggedStatus{Mailbox: "Inbox/a/b", Attrs: map[imapclient.StatusAttr]int64{imapclient.StatusMessages: 1, imapclient.StatusUIDNext: 2, imapclient.StatusUnseen: 1}},
|
||||
)
|
||||
tc5.readuntagged(
|
||||
imapclient.UntaggedStatus{Mailbox: "Inbox/a/b", Attrs: map[imapclient.StatusAttr]int64{imapclient.StatusMessages: 1, imapclient.StatusUIDNext: 2, imapclient.StatusUnseen: 1}},
|
||||
)
|
||||
tc6.readuntagged(
|
||||
imapclient.UntaggedStatus{Mailbox: "Inbox/a/b", Attrs: map[imapclient.StatusAttr]int64{imapclient.StatusMessages: 1, imapclient.StatusUIDNext: 2, imapclient.StatusUnseen: 1}},
|
||||
)
|
||||
|
||||
// Test for STATUS events on non-selected mailbox for message events.
|
||||
tc.transactf("ok", "notify set (personal (messageNew messageExpunge flagChange))")
|
||||
tc.client.Unselect()
|
||||
tc2.client.Create("statusbox", nil)
|
||||
modseq++
|
||||
tc2.client.Append("statusbox", makeAppend(searchMsg))
|
||||
modseq++
|
||||
tc.readuntagged(
|
||||
imapclient.UntaggedStatus{Mailbox: "statusbox", Attrs: map[imapclient.StatusAttr]int64{imapclient.StatusMessages: 1, imapclient.StatusUIDNext: 2, imapclient.StatusUnseen: 1, imapclient.StatusHighestModSeq: int64(modseq)}},
|
||||
)
|
||||
|
||||
// With Selected-Delayed, we only get the events for the selected mailbox for
|
||||
// explicit commands. We still get other events.
|
||||
tc.transactf("ok", "notify set (selected-delayed (messageNew messageExpunge flagChange)) (personal (messageNew messageExpunge flagChange))")
|
||||
tc.client.Select("statusbox")
|
||||
tc2.client.Append("inbox", makeAppend(searchMsg))
|
||||
modseq++
|
||||
tc2.client.UIDStoreFlagsSet("*", true, `\Seen`)
|
||||
modseq++
|
||||
tc2.client.Append("statusbox", imapclient.Append{Flags: []string{"newflag"}, Size: int64(len(searchMsg)), Data: strings.NewReader(searchMsg)})
|
||||
modseq++
|
||||
tc2.client.Select("statusbox")
|
||||
|
||||
tc.readuntagged(
|
||||
imapclient.UntaggedStatus{Mailbox: "Inbox", Attrs: map[imapclient.StatusAttr]int64{imapclient.StatusMessages: 1, imapclient.StatusUIDNext: 6, imapclient.StatusUnseen: 1, imapclient.StatusHighestModSeq: int64(modseq - 2)}},
|
||||
imapclient.UntaggedStatus{Mailbox: "Inbox", Attrs: map[imapclient.StatusAttr]int64{imapclient.StatusUIDValidity: 1, imapclient.StatusUnseen: 0, imapclient.StatusHighestModSeq: int64(modseq - 1)}},
|
||||
)
|
||||
|
||||
tc.transactf("ok", "noop")
|
||||
tc.xuntagged(
|
||||
imapclient.UntaggedExists(2),
|
||||
tc.untaggedFetch(2, 2, imapclient.FetchFlags{"newflag"}, imapclient.FetchModSeq(modseq)),
|
||||
imapclient.UntaggedFlags{`\Seen`, `\Answered`, `\Flagged`, `\Deleted`, `\Draft`, `$Forwarded`, `$Junk`, `$NotJunk`, `$Phishing`, `$MDNSent`, `newflag`},
|
||||
)
|
||||
|
||||
tc2.client.UIDStoreFlagsSet("2", true, `\Deleted`)
|
||||
modseq++
|
||||
tc2.client.Expunge()
|
||||
modseq++
|
||||
tc.transactf("ok", "noop")
|
||||
if uidonly {
|
||||
tc.xuntagged(
|
||||
tc.untaggedFetch(2, 2, imapclient.FetchFlags{`\Deleted`}, imapclient.FetchModSeq(modseq-1)),
|
||||
imapclient.UntaggedVanished{UIDs: xparseNumSet("2")},
|
||||
)
|
||||
} else {
|
||||
tc.xuntagged(
|
||||
tc.untaggedFetch(2, 2, imapclient.FetchFlags{`\Deleted`}, imapclient.FetchModSeq(modseq-1)),
|
||||
imapclient.UntaggedExpunge(2),
|
||||
)
|
||||
}
|
||||
|
||||
// With Selected-Delayed, we should get events for selected mailboxes immediately when using IDLE.
|
||||
tc2.client.UIDStoreFlagsSet("*", true, `\Answered`)
|
||||
modseq++
|
||||
tc2.client.Select("inbox")
|
||||
tc2.client.UIDStoreFlagsClear("*", true, `\Seen`)
|
||||
modseq++
|
||||
tc2.client.Select("statusbox")
|
||||
|
||||
tc.readuntagged(
|
||||
imapclient.UntaggedStatus{Mailbox: "Inbox", Attrs: map[imapclient.StatusAttr]int64{imapclient.StatusUIDValidity: 1, imapclient.StatusUnseen: 1, imapclient.StatusHighestModSeq: int64(modseq)}},
|
||||
)
|
||||
|
||||
tc.conn.SetReadDeadline(time.Now().Add(3 * time.Second))
|
||||
tc.cmdf("", "idle")
|
||||
tc.readprefixline("+ ")
|
||||
tc.readuntagged(tc.untaggedFetch(1, 1, imapclient.FetchFlags{`\Answered`}, imapclient.FetchModSeq(modseq-1)))
|
||||
tc.writelinef("done")
|
||||
tc.response("ok")
|
||||
tc.conn.SetReadDeadline(time.Now().Add(30 * time.Second))
|
||||
|
||||
// If any event matches, we normally return it. But NONE prevents looking further.
|
||||
tc.client.Unselect()
|
||||
tc.transactf("ok", "notify set (mailboxes statusbox NONE) (personal (mailboxName))")
|
||||
tc2.client.UIDStoreFlagsSet("*", true, `\Answered`) // Matches NONE, ignored.
|
||||
//modseq++
|
||||
tc2.client.Create("eventbox", nil)
|
||||
//modseq++
|
||||
tc.readuntagged(
|
||||
imapclient.UntaggedList{Mailbox: "eventbox", Separator: '/', Flags: []string{`\Subscribed`}},
|
||||
)
|
||||
|
||||
// Check we can return message contents.
|
||||
tc.transactf("ok", "notify set (selected (messageNew (body[header] body[text]) messageExpunge))")
|
||||
tc.client.Select("statusbox")
|
||||
tc2.client.Append("statusbox", makeAppend(searchMsg))
|
||||
// modseq++
|
||||
offset := strings.Index(searchMsg, "\r\n\r\n")
|
||||
tc.readuntagged(
|
||||
imapclient.UntaggedExists(2),
|
||||
tc.untaggedFetch(2, 3,
|
||||
imapclient.FetchBody{
|
||||
RespAttr: "BODY[HEADER]",
|
||||
Section: "HEADER",
|
||||
Body: searchMsg[:offset+4],
|
||||
},
|
||||
imapclient.FetchBody{
|
||||
RespAttr: "BODY[TEXT]",
|
||||
Section: "TEXT",
|
||||
Body: searchMsg[offset+4:],
|
||||
},
|
||||
imapclient.FetchFlags(nil),
|
||||
),
|
||||
)
|
||||
|
||||
// If we encounter an error during fetch, an untagged NO is returned.
|
||||
// We ask for the 2nd part of a message, and we add a message with just 1 part.
|
||||
tc.transactf("ok", "notify set (selected (messageNew (body[2]) messageExpunge))")
|
||||
tc2.client.Append("statusbox", makeAppend(exampleMsg))
|
||||
// modseq++
|
||||
tc.readuntagged(
|
||||
imapclient.UntaggedExists(3),
|
||||
imapclient.UntaggedResult{Status: "NO", Text: "generating notify fetch response: requested part does not exist"},
|
||||
tc.untaggedFetchUID(3, 4),
|
||||
)
|
||||
|
||||
// When adding new tests, uncomment modseq++ lines above.
|
||||
}
|
||||
|
||||
func TestNotifyOverflow(t *testing.T) {
|
||||
testNotifyOverflow(t, false)
|
||||
}
|
||||
|
||||
func TestNotifyOverflowUIDOnly(t *testing.T) {
|
||||
testNotifyOverflow(t, true)
|
||||
}
|
||||
|
||||
func testNotifyOverflow(t *testing.T, uidonly bool) {
|
||||
orig := store.CommPendingChangesMax
|
||||
store.CommPendingChangesMax = 3
|
||||
defer func() {
|
||||
store.CommPendingChangesMax = orig
|
||||
}()
|
||||
|
||||
defer mockUIDValidity()()
|
||||
tc := start(t, uidonly)
|
||||
defer tc.close()
|
||||
tc.login("mjl@mox.example", password0)
|
||||
tc.client.Select("inbox")
|
||||
tc.transactf("ok", "noop")
|
||||
|
||||
tc2 := startNoSwitchboard(t, uidonly)
|
||||
defer tc2.closeNoWait()
|
||||
tc2.login("mjl@mox.example", password0)
|
||||
tc2.client.Select("inbox")
|
||||
|
||||
// Generates 4 changes, crossing max 3.
|
||||
tc2.client.Append("inbox", makeAppend(searchMsg))
|
||||
tc2.client.Append("inbox", makeAppend(searchMsg))
|
||||
|
||||
tc.transactf("ok", "noop")
|
||||
tc.xuntagged(imapclient.UntaggedResult{Status: "OK", Code: imapclient.CodeWord("NOTIFICATIONOVERFLOW"), Text: "out of sync after too many pending changes"})
|
||||
|
||||
// Won't be getting any more notifications until we enable them again with NOTIFY.
|
||||
tc2.client.Append("inbox", makeAppend(searchMsg))
|
||||
tc.transactf("ok", "noop")
|
||||
tc.xuntagged()
|
||||
|
||||
// Enable notify again. Without uidonly, we won't get a notification because the
|
||||
// message isn't known in the session.
|
||||
tc.transactf("ok", "notify set (selected (messageNew messageExpunge flagChange))")
|
||||
tc2.client.UIDStoreFlagsAdd("1", true, `\Seen`)
|
||||
if uidonly {
|
||||
tc.readuntagged(tc.untaggedFetch(1, 1, imapclient.FetchFlags{`\Seen`}))
|
||||
} else {
|
||||
tc.transactf("ok", "noop")
|
||||
tc.xuntagged()
|
||||
}
|
||||
|
||||
// Reselect to get the message visible in the session.
|
||||
tc.client.Select("inbox")
|
||||
tc2.client.UIDStoreFlagsClear("1", true, `\Seen`)
|
||||
tc.transactf("ok", "noop")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, imapclient.FetchFlags(nil)))
|
||||
|
||||
// Trigger overflow for changes for "selected-delayed".
|
||||
store.CommPendingChangesMax = 10
|
||||
delayedMax := selectedDelayedChangesMax
|
||||
selectedDelayedChangesMax = 1
|
||||
defer func() {
|
||||
selectedDelayedChangesMax = delayedMax
|
||||
}()
|
||||
tc.transactf("ok", "notify set (selected-delayed (messageNew messageExpunge flagChange))")
|
||||
tc2.client.UIDStoreFlagsAdd("1", true, `\Seen`)
|
||||
tc2.client.UIDStoreFlagsClear("1", true, `\Seen`)
|
||||
tc.transactf("ok", "noop")
|
||||
tc.xuntagged(imapclient.UntaggedResult{Status: "OK", Code: imapclient.CodeWord("NOTIFICATIONOVERFLOW"), Text: "out of sync after too many pending changes for selected mailbox"})
|
||||
|
||||
// Again, no new notifications until we select and enable again.
|
||||
tc2.client.UIDStoreFlagsAdd("1", true, `\Seen`)
|
||||
tc.transactf("ok", "noop")
|
||||
tc.xuntagged()
|
||||
|
||||
tc.client.Select("inbox")
|
||||
tc.transactf("ok", "notify set (selected-delayed (messageNew messageExpunge flagChange))")
|
||||
tc2.client.UIDStoreFlagsClear("1", true, `\Seen`)
|
||||
tc.transactf("ok", "noop")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, imapclient.FetchFlags(nil)))
|
||||
}
|
@ -9,7 +9,7 @@ import (
|
||||
|
||||
type token interface {
|
||||
pack(c *conn) string
|
||||
writeTo(c *conn, w io.Writer)
|
||||
xwriteTo(c *conn, xw io.Writer) // Writes to xw panic on error.
|
||||
}
|
||||
|
||||
type bare string
|
||||
@ -18,8 +18,8 @@ func (t bare) pack(c *conn) string {
|
||||
return string(t)
|
||||
}
|
||||
|
||||
func (t bare) writeTo(c *conn, w io.Writer) {
|
||||
w.Write([]byte(t.pack(c)))
|
||||
func (t bare) xwriteTo(c *conn, xw io.Writer) {
|
||||
xw.Write([]byte(t.pack(c)))
|
||||
}
|
||||
|
||||
type niltoken struct{}
|
||||
@ -30,15 +30,15 @@ func (t niltoken) pack(c *conn) string {
|
||||
return "NIL"
|
||||
}
|
||||
|
||||
func (t niltoken) writeTo(c *conn, w io.Writer) {
|
||||
w.Write([]byte(t.pack(c)))
|
||||
func (t niltoken) xwriteTo(c *conn, xw io.Writer) {
|
||||
xw.Write([]byte(t.pack(c)))
|
||||
}
|
||||
|
||||
func nilOrString(s string) token {
|
||||
if s == "" {
|
||||
func nilOrString(s *string) token {
|
||||
if s == nil {
|
||||
return nilt
|
||||
}
|
||||
return string0(s)
|
||||
return string0(*s)
|
||||
}
|
||||
|
||||
type string0 string
|
||||
@ -60,8 +60,8 @@ func (t string0) pack(c *conn) string {
|
||||
return r
|
||||
}
|
||||
|
||||
func (t string0) writeTo(c *conn, w io.Writer) {
|
||||
w.Write([]byte(t.pack(c)))
|
||||
func (t string0) xwriteTo(c *conn, xw io.Writer) {
|
||||
xw.Write([]byte(t.pack(c)))
|
||||
}
|
||||
|
||||
type dquote string
|
||||
@ -78,8 +78,8 @@ func (t dquote) pack(c *conn) string {
|
||||
return r
|
||||
}
|
||||
|
||||
func (t dquote) writeTo(c *conn, w io.Writer) {
|
||||
w.Write([]byte(t.pack(c)))
|
||||
func (t dquote) xwriteTo(c *conn, xw io.Writer) {
|
||||
xw.Write([]byte(t.pack(c)))
|
||||
}
|
||||
|
||||
type syncliteral string
|
||||
@ -88,15 +88,16 @@ func (t syncliteral) pack(c *conn) string {
|
||||
return fmt.Sprintf("{%d}\r\n", len(t)) + string(t)
|
||||
}
|
||||
|
||||
func (t syncliteral) writeTo(c *conn, w io.Writer) {
|
||||
fmt.Fprintf(w, "{%d}\r\n", len(t))
|
||||
w.Write([]byte(t))
|
||||
func (t syncliteral) xwriteTo(c *conn, xw io.Writer) {
|
||||
fmt.Fprintf(xw, "{%d}\r\n", len(t))
|
||||
xw.Write([]byte(t))
|
||||
}
|
||||
|
||||
// data from reader with known size.
|
||||
type readerSizeSyncliteral struct {
|
||||
r io.Reader
|
||||
size int64
|
||||
lit8 bool
|
||||
}
|
||||
|
||||
func (t readerSizeSyncliteral) pack(c *conn) string {
|
||||
@ -104,13 +105,21 @@ func (t readerSizeSyncliteral) pack(c *conn) string {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return fmt.Sprintf("{%d}\r\n", t.size) + string(buf)
|
||||
var lit string
|
||||
if t.lit8 {
|
||||
lit = "~"
|
||||
}
|
||||
return fmt.Sprintf("%s{%d}\r\n", lit, t.size) + string(buf)
|
||||
}
|
||||
|
||||
func (t readerSizeSyncliteral) writeTo(c *conn, w io.Writer) {
|
||||
fmt.Fprintf(w, "{%d}\r\n", t.size)
|
||||
defer c.xtrace(mlog.LevelTracedata)()
|
||||
if _, err := io.Copy(w, io.LimitReader(t.r, t.size)); err != nil {
|
||||
func (t readerSizeSyncliteral) xwriteTo(c *conn, xw io.Writer) {
|
||||
var lit string
|
||||
if t.lit8 {
|
||||
lit = "~"
|
||||
}
|
||||
fmt.Fprintf(xw, "%s{%d}\r\n", lit, t.size)
|
||||
defer c.xtracewrite(mlog.LevelTracedata)()
|
||||
if _, err := io.Copy(xw, io.LimitReader(t.r, t.size)); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
@ -128,17 +137,14 @@ func (t readerSyncliteral) pack(c *conn) string {
|
||||
return fmt.Sprintf("{%d}\r\n", len(buf)) + string(buf)
|
||||
}
|
||||
|
||||
func (t readerSyncliteral) writeTo(c *conn, w io.Writer) {
|
||||
func (t readerSyncliteral) xwriteTo(c *conn, xw io.Writer) {
|
||||
buf, err := io.ReadAll(t.r)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
fmt.Fprintf(w, "{%d}\r\n", len(buf))
|
||||
defer c.xtrace(mlog.LevelTracedata)()
|
||||
_, err = w.Write(buf)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
fmt.Fprintf(xw, "{%d}\r\n", len(buf))
|
||||
defer c.xtracewrite(mlog.LevelTracedata)()
|
||||
xw.Write(buf)
|
||||
}
|
||||
|
||||
// list with tokens space-separated
|
||||
@ -156,15 +162,38 @@ func (t listspace) pack(c *conn) string {
|
||||
return s
|
||||
}
|
||||
|
||||
func (t listspace) writeTo(c *conn, w io.Writer) {
|
||||
fmt.Fprint(w, "(")
|
||||
func (t listspace) xwriteTo(c *conn, xw io.Writer) {
|
||||
fmt.Fprint(xw, "(")
|
||||
for i, e := range t {
|
||||
if i > 0 {
|
||||
fmt.Fprint(w, " ")
|
||||
fmt.Fprint(xw, " ")
|
||||
}
|
||||
e.writeTo(c, w)
|
||||
e.xwriteTo(c, xw)
|
||||
}
|
||||
fmt.Fprint(xw, ")")
|
||||
}
|
||||
|
||||
// concatenate tokens space-separated
|
||||
type concatspace []token
|
||||
|
||||
func (t concatspace) pack(c *conn) string {
|
||||
var s string
|
||||
for i, e := range t {
|
||||
if i > 0 {
|
||||
s += " "
|
||||
}
|
||||
s += e.pack(c)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (t concatspace) xwriteTo(c *conn, xw io.Writer) {
|
||||
for i, e := range t {
|
||||
if i > 0 {
|
||||
fmt.Fprint(xw, " ")
|
||||
}
|
||||
e.xwriteTo(c, xw)
|
||||
}
|
||||
fmt.Fprint(w, ")")
|
||||
}
|
||||
|
||||
// Concatenated tokens, no spaces or list syntax.
|
||||
@ -178,9 +207,9 @@ func (t concat) pack(c *conn) string {
|
||||
return s
|
||||
}
|
||||
|
||||
func (t concat) writeTo(c *conn, w io.Writer) {
|
||||
func (t concat) xwriteTo(c *conn, xw io.Writer) {
|
||||
for _, e := range t {
|
||||
e.writeTo(c, w)
|
||||
e.xwriteTo(c, xw)
|
||||
}
|
||||
}
|
||||
|
||||
@ -202,8 +231,23 @@ next:
|
||||
return string(t)
|
||||
}
|
||||
|
||||
func (t astring) writeTo(c *conn, w io.Writer) {
|
||||
w.Write([]byte(t.pack(c)))
|
||||
func (t astring) xwriteTo(c *conn, xw io.Writer) {
|
||||
xw.Write([]byte(t.pack(c)))
|
||||
}
|
||||
|
||||
// mailbox with utf7 encoding if connection requires it, or utf8 otherwise.
|
||||
type mailboxt string
|
||||
|
||||
func (t mailboxt) pack(c *conn) string {
|
||||
s := string(t)
|
||||
if !c.utf8strings() {
|
||||
s = utf7encode(s)
|
||||
}
|
||||
return astring(s).pack(c)
|
||||
}
|
||||
|
||||
func (t mailboxt) xwriteTo(c *conn, xw io.Writer) {
|
||||
xw.Write([]byte(t.pack(c)))
|
||||
}
|
||||
|
||||
type number uint32
|
||||
@ -212,6 +256,6 @@ func (t number) pack(c *conn) string {
|
||||
return fmt.Sprintf("%d", t)
|
||||
}
|
||||
|
||||
func (t number) writeTo(c *conn, w io.Writer) {
|
||||
w.Write([]byte(t.pack(c)))
|
||||
func (t number) xwriteTo(c *conn, xw io.Writer) {
|
||||
xw.Write([]byte(t.pack(c)))
|
||||
}
|
||||
|
@ -48,11 +48,13 @@ type parser struct {
|
||||
// Orig is the line in original casing, and upper in upper casing. We often match
|
||||
// against upper for easy case insensitive handling as IMAP requires, but sometimes
|
||||
// return from orig to keep the original case.
|
||||
orig string
|
||||
upper string
|
||||
o int // Current offset in parsing.
|
||||
contexts []string // What we're parsing, for error messages.
|
||||
conn *conn
|
||||
orig string
|
||||
upper string
|
||||
o int // Current offset in parsing.
|
||||
contexts []string // What we're parsing, for error messages.
|
||||
literals int // Literals in command, for limit.
|
||||
literalSize int64 // Total size of literals in command, for limit.
|
||||
conn *conn
|
||||
}
|
||||
|
||||
// toUpper upper cases bytes that are a-z. strings.ToUpper does too much. and
|
||||
@ -70,7 +72,7 @@ func toUpper(s string) string {
|
||||
}
|
||||
|
||||
func newParser(s string, conn *conn) *parser {
|
||||
return &parser{s, toUpper(s), 0, nil, conn}
|
||||
return &parser{s, toUpper(s), 0, nil, 0, 0, conn}
|
||||
}
|
||||
|
||||
func (p *parser) xerrorf(format string, args ...any) {
|
||||
@ -302,11 +304,11 @@ func (p *parser) xstring() (r string) {
|
||||
}
|
||||
p.xerrorf("missing closing dquote in string")
|
||||
}
|
||||
size, sync := p.xliteralSize(100*1024, false)
|
||||
s := p.conn.xreadliteral(size, sync)
|
||||
line := p.conn.readline(false)
|
||||
size, sync := p.xliteralSize(false, true)
|
||||
buf := p.conn.xreadliteral(size, sync)
|
||||
line := p.conn.xreadline(false)
|
||||
p.orig, p.upper, p.o = line, toUpper(line), 0
|
||||
return s
|
||||
return string(buf)
|
||||
}
|
||||
|
||||
func (p *parser) xnil() {
|
||||
@ -573,11 +575,13 @@ func (p *parser) xsectionBinary() (r []uint32) {
|
||||
var fetchAttWords = []string{
|
||||
"ENVELOPE", "FLAGS", "INTERNALDATE", "RFC822.SIZE", "BODYSTRUCTURE", "UID", "BODY.PEEK", "BODY", "BINARY.PEEK", "BINARY.SIZE", "BINARY",
|
||||
"RFC822.HEADER", "RFC822.TEXT", "RFC822", // older IMAP
|
||||
"MODSEQ", // CONDSTORE extension.
|
||||
"MODSEQ", // CONDSTORE extension.
|
||||
"SAVEDATE", // SAVEDATE extension, ../rfc/8514:186
|
||||
"PREVIEW", // ../rfc/8970:345
|
||||
}
|
||||
|
||||
// ../rfc/9051:6557 ../rfc/3501:4751 ../rfc/7162:2483
|
||||
func (p *parser) xfetchAtt(isUID bool) (r fetchAtt) {
|
||||
func (p *parser) xfetchAtt() (r fetchAtt) {
|
||||
defer p.context("fetchAtt")()
|
||||
f := p.xtakelist(fetchAttWords...)
|
||||
r.peek = strings.HasSuffix(f, ".PEEK")
|
||||
@ -605,12 +609,14 @@ func (p *parser) xfetchAtt(isUID bool) (r fetchAtt) {
|
||||
// The wording about when to respond with a MODSEQ attribute could be more clear. ../rfc/7162:923 ../rfc/7162:388
|
||||
// MODSEQ attribute is a CONDSTORE-enabling parameter. ../rfc/7162:377
|
||||
p.conn.xensureCondstore(nil)
|
||||
case "PREVIEW":
|
||||
r.previewLazy = p.take(" (LAZY)")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ../rfc/9051:6553 ../rfc/3501:4748
|
||||
func (p *parser) xfetchAtts(isUID bool) []fetchAtt {
|
||||
func (p *parser) xfetchAtts() []fetchAtt {
|
||||
defer p.context("fetchAtts")()
|
||||
|
||||
fields := func(l ...string) []fetchAtt {
|
||||
@ -634,13 +640,13 @@ func (p *parser) xfetchAtts(isUID bool) []fetchAtt {
|
||||
}
|
||||
|
||||
if !p.hasPrefix("(") {
|
||||
return []fetchAtt{p.xfetchAtt(isUID)}
|
||||
return []fetchAtt{p.xfetchAtt()}
|
||||
}
|
||||
|
||||
l := []fetchAtt{}
|
||||
p.xtake("(")
|
||||
for {
|
||||
l = append(l, p.xfetchAtt(isUID))
|
||||
l = append(l, p.xfetchAtt())
|
||||
if !p.take(" ") {
|
||||
break
|
||||
}
|
||||
@ -741,23 +747,47 @@ func (p *parser) xdateTime() time.Time {
|
||||
}
|
||||
|
||||
// ../rfc/9051:6655 ../rfc/7888:330 ../rfc/3501:4801
|
||||
func (p *parser) xliteralSize(maxSize int64, lit8 bool) (size int64, sync bool) {
|
||||
func (p *parser) xliteralSize(lit8 bool, checkSize bool) (size int64, sync bool) {
|
||||
// todo: enforce that we get non-binary when ~ isn't present?
|
||||
if lit8 {
|
||||
p.take("~")
|
||||
}
|
||||
p.xtake("{")
|
||||
size = p.xnumber64()
|
||||
if maxSize > 0 && size > maxSize {
|
||||
// ../rfc/7888:249
|
||||
line := fmt.Sprintf("* BYE [ALERT] Max literal size %d is larger than allowed %d in this context", size, maxSize)
|
||||
err := errors.New("literal too big")
|
||||
panic(syntaxError{line, "TOOBIG", err.Error(), err})
|
||||
}
|
||||
|
||||
sync = !p.take("+")
|
||||
p.xtake("}")
|
||||
p.xempty()
|
||||
|
||||
if checkSize {
|
||||
// ../rfc/7888:249
|
||||
var errmsg string
|
||||
const (
|
||||
litSizeMax = 100 * 1024
|
||||
totalLitSizeMax = 10 * litSizeMax
|
||||
litMax = 1000
|
||||
)
|
||||
p.literalSize += size
|
||||
p.literals++
|
||||
if size > litSizeMax {
|
||||
errmsg = fmt.Sprintf("max literal size %d is larger than allowed %d", size, litSizeMax)
|
||||
} else if p.literalSize > totalLitSizeMax {
|
||||
errmsg = fmt.Sprintf("max total literal size for command %d is larger than allowed %d", p.literalSize, totalLitSizeMax)
|
||||
} else if p.literals > litMax {
|
||||
errmsg = fmt.Sprintf("max literals for command %d is larger than allowed %d", p.literals, litMax)
|
||||
}
|
||||
if errmsg != "" {
|
||||
// ../rfc/9051:357 ../rfc/3501:347
|
||||
err := errors.New("literal too big: " + errmsg)
|
||||
if sync {
|
||||
errmsg = ""
|
||||
} else {
|
||||
errmsg = "* BYE [ALERT] " + errmsg
|
||||
}
|
||||
panic(syntaxError{errmsg, "TOOBIG", err.Error(), err})
|
||||
}
|
||||
}
|
||||
|
||||
return size, sync
|
||||
}
|
||||
|
||||
@ -766,6 +796,7 @@ var searchKeyWords = []string{
|
||||
"BEFORE", "BODY",
|
||||
"CC", "DELETED", "FLAGGED",
|
||||
"FROM", "KEYWORD",
|
||||
"OLDER", "YOUNGER", // WITHIN extension, ../rfc/5032:72
|
||||
"NEW", "OLD", "ON", "RECENT", "SEEN",
|
||||
"SINCE", "SUBJECT",
|
||||
"TEXT", "TO",
|
||||
@ -777,7 +808,8 @@ var searchKeyWords = []string{
|
||||
"SENTBEFORE", "SENTON",
|
||||
"SENTSINCE", "SMALLER",
|
||||
"UID", "UNDRAFT",
|
||||
"MODSEQ", // CONDSTORE extension.
|
||||
"MODSEQ", // CONDSTORE extension.
|
||||
"SAVEDBEFORE", "SAVEDON", "SAVEDSINCE", "SAVEDATESUPPORTED", // SAVEDATE extension, ../rfc/8514:203
|
||||
}
|
||||
|
||||
// ../rfc/9051:6923 ../rfc/3501:4957, MODSEQ ../rfc/7162:2492
|
||||
@ -901,31 +933,19 @@ func (p *parser) xsearchKey() *searchKey {
|
||||
sk.clientModseq = &v
|
||||
// MODSEQ is a CONDSTORE-enabling parameter. ../rfc/7162:377
|
||||
p.conn.enabled[capCondstore] = true
|
||||
case "SAVEDBEFORE", "SAVEDON", "SAVEDSINCE":
|
||||
p.xspace()
|
||||
sk.date = p.xdate() // ../rfc/8514:267
|
||||
case "SAVEDATESUPPORTED":
|
||||
case "OLDER", "YOUNGER":
|
||||
p.xspace()
|
||||
sk.number = int64(p.xnznumber())
|
||||
default:
|
||||
p.xerrorf("missing case for op %q", sk.op)
|
||||
}
|
||||
return sk
|
||||
}
|
||||
|
||||
// hasModseq returns whether there is a modseq filter anywhere in the searchkey.
|
||||
func (sk searchKey) hasModseq() bool {
|
||||
if sk.clientModseq != nil {
|
||||
return true
|
||||
}
|
||||
for _, e := range sk.searchKeys {
|
||||
if e.hasModseq() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
if sk.searchKey != nil && sk.searchKey.hasModseq() {
|
||||
return true
|
||||
}
|
||||
if sk.searchKey2 != nil && sk.searchKey2.hasModseq() {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ../rfc/9051:6489 ../rfc/3501:4692
|
||||
func (p *parser) xdateDay() int {
|
||||
d := p.xdigit()
|
||||
@ -948,3 +968,195 @@ func (p *parser) xdate() time.Time {
|
||||
}
|
||||
return time.Date(year, mon, day, 0, 0, 0, 0, time.UTC)
|
||||
}
|
||||
|
||||
// Parse and validate a metadata key (entry name), returned as lower-case.
|
||||
//
|
||||
// ../rfc/5464:190
|
||||
func (p *parser) xmetadataKey() string {
|
||||
// ../rfc/5464:772
|
||||
s := p.xastring()
|
||||
|
||||
// ../rfc/5464:192
|
||||
if strings.Contains(s, "//") {
|
||||
p.xerrorf("entry name must not contain two slashes")
|
||||
}
|
||||
// We allow a single slash, so it can be used with option "(depth infinity)" to get
|
||||
// all annotations.
|
||||
if s != "/" && strings.HasSuffix(s, "/") {
|
||||
p.xerrorf("entry name must not end with slash")
|
||||
}
|
||||
// ../rfc/5464:202
|
||||
if strings.Contains(s, "*") || strings.Contains(s, "%") {
|
||||
p.xerrorf("entry name must not contain * or %%")
|
||||
}
|
||||
for _, c := range s {
|
||||
if c < ' ' || c >= 0x7f {
|
||||
p.xerrorf("entry name must only contain non-control ascii characters")
|
||||
}
|
||||
}
|
||||
return strings.ToLower(s)
|
||||
}
|
||||
|
||||
// ../rfc/5464:776
|
||||
func (p *parser) xmetadataKeyValue() (key string, isString bool, value []byte) {
|
||||
key = p.xmetadataKey()
|
||||
p.xspace()
|
||||
|
||||
if p.hasPrefix("~{") {
|
||||
size, sync := p.xliteralSize(true, true)
|
||||
value = p.conn.xreadliteral(size, sync)
|
||||
line := p.conn.xreadline(false)
|
||||
p.orig, p.upper, p.o = line, toUpper(line), 0
|
||||
} else if p.hasPrefix(`"`) {
|
||||
value = []byte(p.xstring())
|
||||
isString = true
|
||||
} else if p.take("NIL") {
|
||||
value = nil
|
||||
} else {
|
||||
p.xerrorf("expected metadata value")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
type eventGroup struct {
|
||||
MailboxSpecifier mailboxSpecifier
|
||||
Events []notifyEvent // NONE is represented by an empty list.
|
||||
}
|
||||
|
||||
type mbspecKind string
|
||||
|
||||
const (
|
||||
mbspecSelected mbspecKind = "SELECTED"
|
||||
mbspecSelectedDelayed mbspecKind = "SELECTED-DELAYED" // Only for NOTIFY.
|
||||
mbspecInboxes mbspecKind = "INBOXES"
|
||||
mbspecPersonal mbspecKind = "PERSONAL"
|
||||
mbspecSubscribed mbspecKind = "SUBSCRIBED"
|
||||
mbspecSubtreeOne mbspecKind = "SUBTREE-ONE" // For ESEARCH, we allow it for NOTIFY too.
|
||||
mbspecSubtree mbspecKind = "SUBTREE"
|
||||
mbspecMailboxes mbspecKind = "MAILBOXES"
|
||||
)
|
||||
|
||||
// Used by both the ESEARCH and NOTIFY commands.
|
||||
type mailboxSpecifier struct {
|
||||
Kind mbspecKind
|
||||
Mailboxes []string
|
||||
}
|
||||
|
||||
type notifyEvent struct {
|
||||
// Kind is always upper case. Should be one of eventKind, anything else must result
|
||||
// in a BADEVENT response code.
|
||||
Kind string
|
||||
|
||||
FetchAtt []fetchAtt // Only for MessageNew
|
||||
}
|
||||
|
||||
// ../rfc/5465:943
|
||||
func (p *parser) xeventGroup() (eg eventGroup) {
|
||||
p.xtake("(")
|
||||
eg.MailboxSpecifier = p.xfilterMailbox(mbspecsNotify)
|
||||
p.xspace()
|
||||
if p.take("NONE") {
|
||||
p.xtake(")")
|
||||
return eg
|
||||
}
|
||||
p.xtake("(")
|
||||
for {
|
||||
e := p.xnotifyEvent()
|
||||
eg.Events = append(eg.Events, e)
|
||||
if !p.space() {
|
||||
break
|
||||
}
|
||||
}
|
||||
p.xtake(")")
|
||||
p.xtake(")")
|
||||
return eg
|
||||
}
|
||||
|
||||
var mbspecsEsearch = []mbspecKind{
|
||||
mbspecSelected, // selected-delayed is only for NOTIFY.
|
||||
mbspecInboxes,
|
||||
mbspecPersonal,
|
||||
mbspecSubscribed,
|
||||
mbspecSubtreeOne, // Must come before Subtree due to eager parsing.
|
||||
mbspecSubtree,
|
||||
mbspecMailboxes,
|
||||
}
|
||||
|
||||
var mbspecsNotify = []mbspecKind{
|
||||
mbspecSelectedDelayed, // Must come before mbspecSelected, for eager parsing and mbspecSelected.
|
||||
mbspecSelected,
|
||||
mbspecInboxes,
|
||||
mbspecPersonal,
|
||||
mbspecSubscribed,
|
||||
mbspecSubtreeOne, // From ESEARCH, we also allow it in NOTIFY.
|
||||
mbspecSubtree,
|
||||
mbspecMailboxes,
|
||||
}
|
||||
|
||||
// If not esearch with "subtree-one", then for notify with "selected-delayed".
|
||||
func (p *parser) xfilterMailbox(allowed []mbspecKind) (ms mailboxSpecifier) {
|
||||
var kind mbspecKind
|
||||
for _, s := range allowed {
|
||||
if p.take(string(s)) {
|
||||
kind = s
|
||||
break
|
||||
}
|
||||
}
|
||||
if kind == mbspecKind("") {
|
||||
xsyntaxErrorf("expected mailbox specifier")
|
||||
}
|
||||
|
||||
ms.Kind = kind
|
||||
switch kind {
|
||||
case "SUBTREE", "SUBTREE-ONE", "MAILBOXES":
|
||||
p.xtake(" ")
|
||||
// One or more mailboxes. Multiple start with a list. ../rfc/5465:937
|
||||
if p.take("(") {
|
||||
for {
|
||||
ms.Mailboxes = append(ms.Mailboxes, p.xmailbox())
|
||||
if !p.take(" ") {
|
||||
break
|
||||
}
|
||||
}
|
||||
p.xtake(")")
|
||||
} else {
|
||||
ms.Mailboxes = []string{p.xmailbox()}
|
||||
}
|
||||
}
|
||||
return ms
|
||||
}
|
||||
|
||||
type eventKind string
|
||||
|
||||
const (
|
||||
eventMessageNew eventKind = "MESSAGENEW"
|
||||
eventMessageExpunge eventKind = "MESSAGEEXPUNGE"
|
||||
eventFlagChange eventKind = "FLAGCHANGE"
|
||||
eventAnnotationChange eventKind = "ANNOTATIONCHANGE"
|
||||
eventMailboxName eventKind = "MAILBOXNAME"
|
||||
eventSubscriptionChange eventKind = "SUBSCRIPTIONCHANGE"
|
||||
eventMailboxMetadataChange eventKind = "MAILBOXMETADATACHANGE"
|
||||
eventServerMetadataChange eventKind = "SERVERMETADATACHANGE"
|
||||
)
|
||||
|
||||
var messageEventKinds = []eventKind{eventMessageNew, eventMessageExpunge, eventFlagChange, eventAnnotationChange}
|
||||
|
||||
// ../rfc/5465:974
|
||||
func (p *parser) xnotifyEvent() notifyEvent {
|
||||
s := strings.ToUpper(p.xatom())
|
||||
e := notifyEvent{Kind: s}
|
||||
if eventKind(e.Kind) == eventMessageNew {
|
||||
if p.take(" (") {
|
||||
for {
|
||||
a := p.xfetchAtt()
|
||||
e.FetchAtt = append(e.FetchAtt, a)
|
||||
if !p.take(" ") {
|
||||
break
|
||||
}
|
||||
}
|
||||
p.xtake(")")
|
||||
}
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
@ -1,6 +1,8 @@
|
||||
package imapserver
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
"net"
|
||||
)
|
||||
|
||||
@ -13,10 +15,7 @@ type prefixConn struct {
|
||||
|
||||
func (c *prefixConn) Read(buf []byte) (int, error) {
|
||||
if len(c.prefix) > 0 {
|
||||
n := len(buf)
|
||||
if n > len(c.prefix) {
|
||||
n = len(c.prefix)
|
||||
}
|
||||
n := min(len(buf), len(c.prefix))
|
||||
copy(buf[:n], c.prefix[:n])
|
||||
c.prefix = c.prefix[n:]
|
||||
if len(c.prefix) == 0 {
|
||||
@ -26,3 +25,18 @@ func (c *prefixConn) Read(buf []byte) (int, error) {
|
||||
}
|
||||
return c.Conn.Read(buf)
|
||||
}
|
||||
|
||||
// xprefixConn returns either the original net.Conn passed as parameter, or returns
|
||||
// a *prefixConn returning the buffered data available in br followed data from the
|
||||
// net.Conn passed in.
|
||||
func xprefixConn(c net.Conn, br *bufio.Reader) net.Conn {
|
||||
n := br.Buffered()
|
||||
if n == 0 {
|
||||
return c
|
||||
}
|
||||
|
||||
buf := make([]byte, n)
|
||||
_, err := io.ReadFull(c, buf)
|
||||
xcheckf(err, "get buffered data")
|
||||
return &prefixConn{buf, c}
|
||||
}
|
||||
|
@ -32,17 +32,26 @@ func (ss numSet) containsSeq(seq msgseq, uids []store.UID, searchResult []store.
|
||||
uid := uids[int(seq)-1]
|
||||
return uidSearch(searchResult, uid) > 0 && uidSearch(uids, uid) > 0
|
||||
}
|
||||
return ss.containsSeqCount(seq, uint32(len(uids)))
|
||||
}
|
||||
|
||||
// containsSeqCount returns whether seq is contained in ss, which must not be a
|
||||
// searchResult, assuming the message count.
|
||||
func (ss numSet) containsSeqCount(seq msgseq, msgCount uint32) bool {
|
||||
if msgCount == 0 {
|
||||
return false
|
||||
}
|
||||
for _, r := range ss.ranges {
|
||||
first := r.first.number
|
||||
if r.first.star || first > uint32(len(uids)) {
|
||||
first = uint32(len(uids))
|
||||
if r.first.star || first > msgCount {
|
||||
first = msgCount
|
||||
}
|
||||
|
||||
last := first
|
||||
if r.last != nil {
|
||||
last = r.last.number
|
||||
if r.last.star || last > uint32(len(uids)) {
|
||||
last = uint32(len(uids))
|
||||
if r.last.star || last > msgCount {
|
||||
last = msgCount
|
||||
}
|
||||
}
|
||||
if first > last {
|
||||
@ -56,35 +65,77 @@ func (ss numSet) containsSeq(seq msgseq, uids []store.UID, searchResult []store.
|
||||
return false
|
||||
}
|
||||
|
||||
func (ss numSet) containsUID(uid store.UID, uids []store.UID, searchResult []store.UID) bool {
|
||||
if len(uids) == 0 {
|
||||
return false
|
||||
}
|
||||
// containsKnownUID returns whether uid, which is known to exist, matches the numSet.
|
||||
// highestUID must return the highest/last UID in the mailbox, or an error. A last UID must
|
||||
// exist, otherwise this method wouldn't have been called with a known uid.
|
||||
// highestUID is needed for interpreting UID sets like "<num>:*" where num is
|
||||
// higher than the uid to check.
|
||||
func (ss numSet) xcontainsKnownUID(uid store.UID, searchResult []store.UID, xhighestUID func() store.UID) bool {
|
||||
if ss.searchResult {
|
||||
return uidSearch(searchResult, uid) > 0 && uidSearch(uids, uid) > 0
|
||||
return uidSearch(searchResult, uid) > 0
|
||||
}
|
||||
|
||||
for _, r := range ss.ranges {
|
||||
first := store.UID(r.first.number)
|
||||
if r.first.star || first > uids[len(uids)-1] {
|
||||
first = uids[len(uids)-1]
|
||||
}
|
||||
last := first
|
||||
a := store.UID(r.first.number)
|
||||
// Num in <num>:* can be larger than last, but it still matches the last...
|
||||
// Similar for *:<num>. ../rfc/9051:4814
|
||||
if r.first.star {
|
||||
if r.last != nil && uid >= store.UID(r.last.number) {
|
||||
return true
|
||||
}
|
||||
a = xhighestUID()
|
||||
}
|
||||
b := a
|
||||
if r.last != nil {
|
||||
last = store.UID(r.last.number)
|
||||
if r.last.star || last > uids[len(uids)-1] {
|
||||
last = uids[len(uids)-1]
|
||||
b = store.UID(r.last.number)
|
||||
if r.last.star {
|
||||
if uid >= a {
|
||||
return true
|
||||
}
|
||||
b = xhighestUID()
|
||||
}
|
||||
}
|
||||
if a > b {
|
||||
a, b = b, a
|
||||
}
|
||||
if uid >= a && uid <= b {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// xinterpretStar returns a numset that interprets stars in a uid set using
|
||||
// xlastUID, returning a new uid set without stars, with increasing first/last, and
|
||||
// without unneeded ranges (first.number != last.number).
|
||||
// If there are no messages in the mailbox, xlastUID must return zero and the
|
||||
// returned numSet will include 0.
|
||||
func (s numSet) xinterpretStar(xlastUID func() store.UID) numSet {
|
||||
var ns numSet
|
||||
|
||||
for _, r := range s.ranges {
|
||||
first := r.first.number
|
||||
if r.first.star {
|
||||
first = uint32(xlastUID())
|
||||
}
|
||||
last := first
|
||||
if r.last != nil {
|
||||
if r.last.star {
|
||||
last = uint32(xlastUID())
|
||||
} else {
|
||||
last = r.last.number
|
||||
}
|
||||
}
|
||||
if first > last {
|
||||
first, last = last, first
|
||||
}
|
||||
if uid >= first && uid <= last && uidSearch(uids, uid) > 0 {
|
||||
return true
|
||||
nr := numRange{first: setNumber{number: first}}
|
||||
if first != last {
|
||||
nr.last = &setNumber{number: last}
|
||||
}
|
||||
ns.ranges = append(ns.ranges, nr)
|
||||
}
|
||||
return false
|
||||
return ns
|
||||
}
|
||||
|
||||
// contains returns whether the numset contains the number.
|
||||
@ -158,38 +209,6 @@ func (ss numSet) String() string {
|
||||
return l[0]
|
||||
}
|
||||
|
||||
// interpretStar returns a numset that interprets stars in a numset, returning a new
|
||||
// numset without stars with increasing first/last.
|
||||
func (s numSet) interpretStar(uids []store.UID) numSet {
|
||||
var ns numSet
|
||||
if len(uids) == 0 {
|
||||
return ns
|
||||
}
|
||||
|
||||
for _, r := range s.ranges {
|
||||
first := r.first.number
|
||||
if r.first.star || first > uint32(uids[len(uids)-1]) {
|
||||
first = uint32(uids[len(uids)-1])
|
||||
}
|
||||
last := first
|
||||
if r.last != nil {
|
||||
last = r.last.number
|
||||
if r.last.star || last > uint32(uids[len(uids)-1]) {
|
||||
last = uint32(uids[len(uids)-1])
|
||||
}
|
||||
}
|
||||
if first > last {
|
||||
first, last = last, first
|
||||
}
|
||||
nr := numRange{first: setNumber{number: first}}
|
||||
if first != last {
|
||||
nr.last = &setNumber{number: last}
|
||||
}
|
||||
ns.ranges = append(ns.ranges, nr)
|
||||
}
|
||||
return ns
|
||||
}
|
||||
|
||||
// whether numSet only has numbers (no star/search), and is strictly increasing.
|
||||
func (s *numSet) isBasicIncreasing() bool {
|
||||
if s.searchResult {
|
||||
@ -307,13 +326,15 @@ type fetchAtt struct {
|
||||
section *sectionSpec
|
||||
sectionBinary []uint32
|
||||
partial *partial
|
||||
previewLazy bool // Not regular "PREVIEW", but "PREVIEW (LAZY)".
|
||||
}
|
||||
|
||||
type searchKey struct {
|
||||
// Only one of searchKeys, seqSet and op can be non-nil/non-empty.
|
||||
searchKeys []searchKey // In case of nested/multiple keys. Also for the top-level command.
|
||||
seqSet *numSet // In case of bare sequence set. For op UID, field uidSet contains the parameter.
|
||||
op string // Determines which of the fields below are set.
|
||||
searchKeys []searchKey // In case of nested/multiple keys. Also for the top-level command.
|
||||
seqSet *numSet // In case of bare sequence set. For op UID, field uidSet contains the parameter.
|
||||
op string // Determines which of the fields below are set.
|
||||
|
||||
headerField string
|
||||
astring string
|
||||
date time.Time
|
||||
@ -325,6 +346,40 @@ type searchKey struct {
|
||||
clientModseq *int64
|
||||
}
|
||||
|
||||
// Whether we need message sequence numbers to evaluate. Sequence numbers are not
|
||||
// allowed with UIDONLY. And if we need sequence numbers we cannot optimize
|
||||
// searching for MAX with a query in reverse order.
|
||||
func (sk *searchKey) hasSequenceNumbers() bool {
|
||||
for _, k := range sk.searchKeys {
|
||||
if k.hasSequenceNumbers() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
if sk.searchKey != nil && sk.searchKey.hasSequenceNumbers() || sk.searchKey2 != nil && sk.searchKey2.hasSequenceNumbers() {
|
||||
return true
|
||||
}
|
||||
return sk.seqSet != nil && !sk.seqSet.searchResult
|
||||
}
|
||||
|
||||
// hasModseq returns whether there is a modseq filter anywhere in the searchkey.
|
||||
func (sk *searchKey) hasModseq() bool {
|
||||
if sk.clientModseq != nil {
|
||||
return true
|
||||
}
|
||||
for _, e := range sk.searchKeys {
|
||||
if e.hasModseq() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
if sk.searchKey != nil && sk.searchKey.hasModseq() {
|
||||
return true
|
||||
}
|
||||
if sk.searchKey2 != nil && sk.searchKey2.hasModseq() {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func compactUIDSet(l []store.UID) (r numSet) {
|
||||
for len(l) > 0 {
|
||||
e := 1
|
||||
|
@ -23,16 +23,10 @@ func TestNumSetContains(t *testing.T) {
|
||||
check(ss0.containsSeq(1, []store.UID{2}, []store.UID{2}))
|
||||
check(!ss0.containsSeq(1, []store.UID{2}, []store.UID{}))
|
||||
|
||||
check(ss0.containsUID(1, []store.UID{1}, []store.UID{1}))
|
||||
check(ss0.containsUID(2, []store.UID{1, 2, 3}, []store.UID{2}))
|
||||
check(!ss0.containsUID(2, []store.UID{1, 2, 3}, []store.UID{}))
|
||||
check(!ss0.containsUID(2, []store.UID{}, []store.UID{2}))
|
||||
|
||||
ss1 := numSet{false, []numRange{{*num(1), nil}}} // Single number 1.
|
||||
check(ss1.containsSeq(1, []store.UID{2}, nil))
|
||||
check(!ss1.containsSeq(2, []store.UID{1, 2}, nil))
|
||||
|
||||
check(ss1.containsUID(1, []store.UID{1}, nil))
|
||||
check(ss1.containsSeq(1, []store.UID{2}, nil))
|
||||
check(!ss1.containsSeq(2, []store.UID{1, 2}, nil))
|
||||
|
||||
@ -44,15 +38,6 @@ func TestNumSetContains(t *testing.T) {
|
||||
check(ss2.containsSeq(3, []store.UID{4, 5, 6}, nil))
|
||||
check(!ss2.containsSeq(4, []store.UID{4, 5, 6}, nil))
|
||||
|
||||
check(ss2.containsUID(2, []store.UID{2}, nil))
|
||||
check(!ss2.containsUID(1, []store.UID{1, 2, 3}, nil))
|
||||
check(ss2.containsUID(3, []store.UID{1, 2, 3}, nil))
|
||||
check(!ss2.containsUID(2, []store.UID{4, 5}, nil))
|
||||
check(!ss2.containsUID(2, []store.UID{1}, nil))
|
||||
|
||||
check(ss2.containsUID(2, []store.UID{2, 6}, nil))
|
||||
check(ss2.containsUID(6, []store.UID{2, 6}, nil))
|
||||
|
||||
// *:2, same as 2:*
|
||||
ss3 := numSet{false, []numRange{{*star, num(2)}}}
|
||||
check(ss3.containsSeq(1, []store.UID{2}, nil))
|
||||
@ -60,15 +45,6 @@ func TestNumSetContains(t *testing.T) {
|
||||
check(ss3.containsSeq(2, []store.UID{4, 5}, nil))
|
||||
check(ss3.containsSeq(3, []store.UID{4, 5, 6}, nil))
|
||||
check(!ss3.containsSeq(4, []store.UID{4, 5, 6}, nil))
|
||||
|
||||
check(ss3.containsUID(2, []store.UID{2}, nil))
|
||||
check(!ss3.containsUID(1, []store.UID{1, 2, 3}, nil))
|
||||
check(ss3.containsUID(3, []store.UID{1, 2, 3}, nil))
|
||||
check(!ss3.containsUID(2, []store.UID{4, 5}, nil))
|
||||
check(!ss3.containsUID(2, []store.UID{1}, nil))
|
||||
|
||||
check(ss3.containsUID(2, []store.UID{2, 6}, nil))
|
||||
check(ss3.containsUID(6, []store.UID{2, 6}, nil))
|
||||
}
|
||||
|
||||
func TestNumSetInterpret(t *testing.T) {
|
||||
@ -77,38 +53,34 @@ func TestNumSetInterpret(t *testing.T) {
|
||||
return p.xnumSet0(true, false)
|
||||
}
|
||||
|
||||
checkEqual := func(uids []store.UID, a, s string) {
|
||||
checkEqual := func(lastUID store.UID, a, s string) {
|
||||
t.Helper()
|
||||
n := parseNumSet(a).interpretStar(uids)
|
||||
n := parseNumSet(a).xinterpretStar(func() store.UID { return lastUID })
|
||||
ns := n.String()
|
||||
if ns != s {
|
||||
t.Fatalf("%s != %s", ns, s)
|
||||
}
|
||||
}
|
||||
|
||||
checkEqual([]store.UID{}, "1:*", "")
|
||||
checkEqual([]store.UID{1}, "1:*", "1")
|
||||
checkEqual([]store.UID{1, 3}, "1:*", "1:3")
|
||||
checkEqual([]store.UID{1, 3}, "4:*", "3")
|
||||
checkEqual([]store.UID{1, 3}, "*:4", "3")
|
||||
checkEqual([]store.UID{2, 3}, "*:4", "3")
|
||||
checkEqual([]store.UID{2, 3}, "*:1", "1:3")
|
||||
checkEqual([]store.UID{2, 3}, "1:*", "1:3")
|
||||
checkEqual([]store.UID{1, 2, 3}, "1,2,3", "1,2,3")
|
||||
checkEqual([]store.UID{}, "1,2,3", "")
|
||||
checkEqual([]store.UID{}, "1:3", "")
|
||||
checkEqual([]store.UID{}, "3:1", "")
|
||||
checkEqual(0, "1:*", "0:1")
|
||||
checkEqual(1, "1:*", "1")
|
||||
checkEqual(3, "1:*", "1:3")
|
||||
checkEqual(3, "4:*", "3:4")
|
||||
checkEqual(3, "*:4", "3:4")
|
||||
checkEqual(3, "*:4", "3:4")
|
||||
checkEqual(3, "*:1", "1:3")
|
||||
checkEqual(3, "1:*", "1:3")
|
||||
checkEqual(3, "1,2,3", "1,2,3")
|
||||
checkEqual(0, "1,2,3", "1,2,3")
|
||||
checkEqual(0, "1:3", "1:3")
|
||||
checkEqual(0, "3:1", "1:3")
|
||||
|
||||
iter := parseNumSet("1:3").interpretStar([]store.UID{}).newIter()
|
||||
if _, ok := iter.Next(); ok {
|
||||
t.Fatalf("expected immediate end for empty iter")
|
||||
}
|
||||
|
||||
iter = parseNumSet("3:1").interpretStar([]store.UID{1, 2}).newIter()
|
||||
iter := parseNumSet("3:1").xinterpretStar(func() store.UID { return 2 }).newIter()
|
||||
v0, _ := iter.Next()
|
||||
v1, _ := iter.Next()
|
||||
v2, _ := iter.Next()
|
||||
_, ok := iter.Next()
|
||||
if v0 != 1 || v1 != 2 || ok {
|
||||
t.Fatalf("got %v %v %v, expected 1, 2, false", v0, v1, ok)
|
||||
if v0 != 1 || v1 != 2 || v2 != 3 || ok {
|
||||
t.Fatalf("got %v %v %v %v, expected 1, 2, 3 false", v0, v1, v2, ok)
|
||||
}
|
||||
}
|
||||
|
@ -7,10 +7,10 @@ import (
|
||||
)
|
||||
|
||||
func TestQuota1(t *testing.T) {
|
||||
tc := start(t)
|
||||
tc := start(t, false)
|
||||
defer tc.close()
|
||||
|
||||
tc.client.Login("mjl@mox.example", password0)
|
||||
tc.login("mjl@mox.example", password0)
|
||||
|
||||
// We don't implement setquota.
|
||||
tc.transactf("bad", `setquota "" (STORAGE 123)`)
|
||||
@ -35,10 +35,10 @@ func TestQuota1(t *testing.T) {
|
||||
tc.xuntagged(imapclient.UntaggedStatus{Mailbox: "Inbox", Attrs: map[imapclient.StatusAttr]int64{imapclient.StatusDeletedStorage: 0}})
|
||||
|
||||
// tclimit does have a limit.
|
||||
tclimit := startArgs(t, false, false, true, true, "limit")
|
||||
tclimit := startArgs(t, false, false, false, true, true, "limit")
|
||||
defer tclimit.close()
|
||||
|
||||
tclimit.client.Login("limit@mox.example", password0)
|
||||
tclimit.login("limit@mox.example", password0)
|
||||
|
||||
tclimit.transactf("ok", "getquotaroot inbox")
|
||||
tclimit.xuntagged(
|
||||
|
@ -6,29 +6,39 @@ import (
|
||||
"github.com/mjl-/mox/imapclient"
|
||||
)
|
||||
|
||||
// todo: check that UIDValidity is indeed updated properly.
|
||||
func TestRename(t *testing.T) {
|
||||
tc := start(t)
|
||||
testRename(t, false)
|
||||
}
|
||||
|
||||
func TestRenameUIDOnly(t *testing.T) {
|
||||
testRename(t, true)
|
||||
}
|
||||
|
||||
// todo: check that UIDValidity is indeed updated properly.
|
||||
func testRename(t *testing.T, uidonly bool) {
|
||||
tc := start(t, uidonly)
|
||||
defer tc.close()
|
||||
|
||||
tc2 := startNoSwitchboard(t)
|
||||
defer tc2.close()
|
||||
tc2 := startNoSwitchboard(t, uidonly)
|
||||
defer tc2.closeNoWait()
|
||||
|
||||
tc.client.Login("mjl@mox.example", password0)
|
||||
tc2.client.Login("mjl@mox.example", password0)
|
||||
tc.login("mjl@mox.example", password0)
|
||||
tc2.login("mjl@mox.example", password0)
|
||||
|
||||
tc.transactf("bad", "rename") // Missing parameters.
|
||||
tc.transactf("bad", "rename x") // Missing destination.
|
||||
tc.transactf("bad", "rename x y ") // Leftover data.
|
||||
|
||||
tc.transactf("no", "rename doesnotexist newbox") // Does not exist.
|
||||
tc.xcode("NONEXISTENT") // ../rfc/9051:5140
|
||||
tc.transactf("no", `rename "Sent" "Trash"`) // Already exists.
|
||||
tc.xcode("ALREADYEXISTS")
|
||||
tc.xcodeWord("NONEXISTENT") // ../rfc/9051:5140
|
||||
tc.transactf("no", "rename expungebox newbox") // No longer exists.
|
||||
tc.xcodeWord("NONEXISTENT")
|
||||
tc.transactf("no", `rename "Sent" "Trash"`) // Already exists.
|
||||
tc.xcodeWord("ALREADYEXISTS")
|
||||
|
||||
tc.client.Create("x")
|
||||
tc.client.Create("x", nil)
|
||||
tc.client.Subscribe("sub")
|
||||
tc.client.Create("a/b/c")
|
||||
tc.client.Create("a/b/c", nil)
|
||||
tc.client.Subscribe("x/y/c") // For later rename, but not affected by rename of x.
|
||||
tc2.transactf("ok", "noop") // Drain.
|
||||
|
||||
@ -37,7 +47,7 @@ func TestRename(t *testing.T) {
|
||||
tc2.xuntagged(imapclient.UntaggedList{Separator: '/', Mailbox: "z"})
|
||||
|
||||
// OldName is only set for IMAP4rev2 or NOTIFY.
|
||||
tc2.client.Enable("IMAP4rev2")
|
||||
tc2.client.Enable(imapclient.CapIMAP4rev2)
|
||||
tc.transactf("ok", "rename z y")
|
||||
tc2.transactf("ok", "noop")
|
||||
tc2.xuntagged(imapclient.UntaggedList{Separator: '/', Mailbox: "y", OldName: "z"})
|
||||
@ -49,16 +59,16 @@ func TestRename(t *testing.T) {
|
||||
|
||||
// Cannot rename a child to a parent. It already exists.
|
||||
tc.transactf("no", "rename a/b/c a/b")
|
||||
tc.xcode("ALREADYEXISTS")
|
||||
tc.xcodeWord("ALREADYEXISTS")
|
||||
tc.transactf("no", "rename a/b a")
|
||||
tc.xcode("ALREADYEXISTS")
|
||||
tc.xcodeWord("ALREADYEXISTS")
|
||||
|
||||
tc2.transactf("ok", "noop") // Drain.
|
||||
tc.transactf("ok", "rename a/b x/y") // This will cause new parent "x" to be created, and a/b and a/b/c to be renamed.
|
||||
tc2.transactf("ok", "noop")
|
||||
tc2.xuntagged(imapclient.UntaggedList{Flags: []string{`\Subscribed`}, Separator: '/', Mailbox: "x"}, imapclient.UntaggedList{Separator: '/', Mailbox: "x/y", OldName: "a/b"}, imapclient.UntaggedList{Flags: []string{`\Subscribed`}, Separator: '/', Mailbox: "x/y/c", OldName: "a/b/c"})
|
||||
|
||||
tc.client.Create("k/l")
|
||||
tc.client.Create("k/l", nil)
|
||||
tc.transactf("ok", "rename k/l k/l/m") // With "l" renamed, a new "k" will be created.
|
||||
tc.transactf("ok", `list "" "k*" return (subscribed)`)
|
||||
tc.xuntagged(imapclient.UntaggedList{Flags: []string{`\Subscribed`}, Separator: '/', Mailbox: "k"}, imapclient.UntaggedList{Flags: []string{`\Subscribed`}, Separator: '/', Mailbox: "k/l"}, imapclient.UntaggedList{Separator: '/', Mailbox: "k/l/m"})
|
||||
@ -70,28 +80,51 @@ func TestRename(t *testing.T) {
|
||||
tc.client.Unsubscribe("k")
|
||||
tc.transactf("ok", "rename k/l k/l/m") // With "l" renamed, a new "k" will be created.
|
||||
tc.transactf("ok", `list "" "k*" return (subscribed)`)
|
||||
tc.xuntagged(imapclient.UntaggedList{Separator: '/', Mailbox: "k"}, imapclient.UntaggedList{Flags: []string{"\\Subscribed"}, Separator: '/', Mailbox: "k/l"}, imapclient.UntaggedList{Separator: '/', Mailbox: "k/l/m"})
|
||||
tc.xuntagged(
|
||||
imapclient.UntaggedList{Separator: '/', Mailbox: "k"},
|
||||
imapclient.UntaggedList{Flags: []string{"\\Subscribed"}, Separator: '/', Mailbox: "k/l"},
|
||||
imapclient.UntaggedList{Separator: '/', Mailbox: "k/l/m"},
|
||||
)
|
||||
|
||||
tc.transactf("ok", "rename k/l/m k/l/x/y/m") // k/l/x and k/l/x/y will be created.
|
||||
tc.transactf("ok", `list "" "k/l/x*" return (subscribed)`)
|
||||
tc.xuntagged(
|
||||
imapclient.UntaggedList{Separator: '/', Mailbox: "k/l/x"},
|
||||
imapclient.UntaggedList{Separator: '/', Mailbox: "k/l/x/y"},
|
||||
imapclient.UntaggedList{Separator: '/', Mailbox: "k/l/x/y/m"},
|
||||
)
|
||||
|
||||
// Renaming inbox keeps inbox in existence, moves messages, and does not rename children.
|
||||
tc.transactf("ok", "create inbox/a")
|
||||
// To check if UIDs are renumbered properly, we add UIDs 1 and 2. Expunge 1,
|
||||
// keeping only 2. Then rename the inbox, which should renumber UID 2 in the old
|
||||
// inbox to UID 1 in the newly created mailbox.
|
||||
tc.transactf("ok", "append inbox (\\deleted) \" 1-Jan-2022 10:10:00 +0100\" {1+}\r\nx")
|
||||
tc.transactf("ok", "append inbox (label1) \" 1-Jan-2022 10:10:00 +0100\" {1+}\r\nx")
|
||||
tc.transactf("ok", "append inbox (\\deleted) {1+}\r\nx")
|
||||
tc.transactf("ok", "append inbox (label1) {1+}\r\nx")
|
||||
tc.transactf("ok", `select inbox`)
|
||||
tc.transactf("ok", "expunge")
|
||||
tc.transactf("ok", "rename inbox minbox")
|
||||
tc.transactf("ok", `list "" (inbox inbox/a minbox)`)
|
||||
tc.xuntagged(imapclient.UntaggedList{Separator: '/', Mailbox: "Inbox"}, imapclient.UntaggedList{Separator: '/', Mailbox: "Inbox/a"}, imapclient.UntaggedList{Separator: '/', Mailbox: "minbox"})
|
||||
tc.transactf("ok", `select minbox`)
|
||||
tc.transactf("ok", "rename inbox x/minbox")
|
||||
tc.transactf("ok", `list "" (inbox inbox/a x/minbox)`)
|
||||
tc.xuntagged(
|
||||
imapclient.UntaggedList{Separator: '/', Mailbox: "Inbox"},
|
||||
imapclient.UntaggedList{Separator: '/', Mailbox: "Inbox/a"},
|
||||
imapclient.UntaggedList{Separator: '/', Mailbox: "x/minbox"},
|
||||
)
|
||||
tc.transactf("ok", `select x/minbox`)
|
||||
tc.transactf("ok", `uid fetch 1:* flags`)
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{imapclient.FetchUID(1), imapclient.FetchFlags{"label1"}}})
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, imapclient.FetchFlags{"label1"}))
|
||||
|
||||
// Renaming to new hiearchy that does not have any subscribes.
|
||||
tc.transactf("ok", "rename minbox w/w")
|
||||
tc.transactf("ok", "rename x/minbox w/w")
|
||||
tc.transactf("ok", `list "" "w*"`)
|
||||
tc.xuntagged(imapclient.UntaggedList{Separator: '/', Mailbox: "w"}, imapclient.UntaggedList{Separator: '/', Mailbox: "w/w"})
|
||||
|
||||
tc.transactf("ok", "rename inbox misc/old/inbox")
|
||||
tc.transactf("ok", `list "" (misc misc/old/inbox)`)
|
||||
tc.xuntagged(
|
||||
imapclient.UntaggedList{Separator: '/', Mailbox: "misc"},
|
||||
imapclient.UntaggedList{Separator: '/', Mailbox: "misc/old/inbox"},
|
||||
)
|
||||
|
||||
// todo: test create+delete+rename of/to a name results in a higher uidvalidity.
|
||||
}
|
||||
|
369
imapserver/replace.go
Normal file
369
imapserver/replace.go
Normal file
@ -0,0 +1,369 @@
|
||||
package imapserver
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/mjl-/bstore"
|
||||
|
||||
"github.com/mjl-/mox/message"
|
||||
"github.com/mjl-/mox/mlog"
|
||||
"github.com/mjl-/mox/store"
|
||||
)
|
||||
|
||||
// Replace relaces a message for another, atomically, possibly in another mailbox,
|
||||
// without needing a sequence of: append message, store \deleted flag, expunge.
|
||||
//
|
||||
// State: Selected
|
||||
func (c *conn) cmdxReplace(isUID bool, tag, cmd string, p *parser) {
|
||||
// Command: ../rfc/8508:158 ../rfc/8508:198
|
||||
|
||||
// Request syntax: ../rfc/8508:471
|
||||
p.xspace()
|
||||
star := p.take("*")
|
||||
var num uint32
|
||||
if !star {
|
||||
num = p.xnznumber()
|
||||
}
|
||||
p.xspace()
|
||||
name := p.xmailbox()
|
||||
|
||||
// ../rfc/4466:473
|
||||
p.xspace()
|
||||
var storeFlags store.Flags
|
||||
var keywords []string
|
||||
if p.hasPrefix("(") {
|
||||
// Error must be a syntax error, to properly abort the connection due to literal.
|
||||
var err error
|
||||
storeFlags, keywords, err = store.ParseFlagsKeywords(p.xflagList())
|
||||
if err != nil {
|
||||
xsyntaxErrorf("parsing flags: %v", err)
|
||||
}
|
||||
p.xspace()
|
||||
}
|
||||
|
||||
var tm time.Time
|
||||
if p.hasPrefix(`"`) {
|
||||
tm = p.xdateTime()
|
||||
p.xspace()
|
||||
} else {
|
||||
tm = time.Now()
|
||||
}
|
||||
|
||||
// todo: only with utf8 should we we accept message headers with utf-8. we currently always accept them.
|
||||
// todo: this is only relevant if we also support the CATENATE extension?
|
||||
// ../rfc/6855:204
|
||||
utf8 := p.take("UTF8 (")
|
||||
if utf8 {
|
||||
p.xtake("~")
|
||||
}
|
||||
// Always allow literal8, for binary extension. ../rfc/4466:486
|
||||
// For utf8, we already consumed the required ~ above.
|
||||
size, synclit := p.xliteralSize(!utf8, false)
|
||||
|
||||
// Check the request, including old message in database, whether the message fits
|
||||
// in quota. If a non-nil func is returned, an error was found. Calling the
|
||||
// function aborts handling this command.
|
||||
var uidOld store.UID
|
||||
checkMessage := func(tx *bstore.Tx) func() {
|
||||
if c.readonly {
|
||||
return func() { xuserErrorf("mailbox open in read-only mode") }
|
||||
}
|
||||
|
||||
mb, err := c.account.MailboxFind(tx, name)
|
||||
if err != nil {
|
||||
return func() { xserverErrorf("finding mailbox: %v", err) }
|
||||
}
|
||||
if mb == nil {
|
||||
return func() { xusercodeErrorf("TRYCREATE", "%w", store.ErrUnknownMailbox) }
|
||||
}
|
||||
|
||||
// Resolve "*" for UID or message sequence.
|
||||
if star {
|
||||
if c.uidonly {
|
||||
q := bstore.QueryTx[store.Message](tx)
|
||||
q.FilterNonzero(store.Message{MailboxID: c.mailboxID})
|
||||
q.FilterEqual("Expunged", false)
|
||||
q.FilterLess("UID", c.uidnext)
|
||||
q.SortDesc("UID")
|
||||
q.Limit(1)
|
||||
m, err := q.Get()
|
||||
if err == bstore.ErrAbsent {
|
||||
return func() { xsyntaxErrorf("cannot use * on empty mailbox") }
|
||||
}
|
||||
xcheckf(err, "get last message in mailbox")
|
||||
num = uint32(m.UID)
|
||||
} else if c.exists == 0 {
|
||||
return func() { xsyntaxErrorf("cannot use * on empty mailbox") }
|
||||
} else if isUID {
|
||||
num = uint32(c.uids[c.exists-1])
|
||||
} else {
|
||||
num = uint32(c.exists)
|
||||
}
|
||||
star = false
|
||||
}
|
||||
|
||||
// Find or verify UID of message to replace.
|
||||
if isUID {
|
||||
uidOld = store.UID(num)
|
||||
} else if num > c.exists {
|
||||
return func() { xuserErrorf("invalid msgseq") }
|
||||
} else {
|
||||
uidOld = c.uids[int(num)-1]
|
||||
}
|
||||
|
||||
// Check the message still exists in the database. If it doesn't, it may have been
|
||||
// deleted just now and we won't check the quota. We'll raise an error later on,
|
||||
// when we are not possibly reading a sync literal and can respond with unsolicited
|
||||
// expunges.
|
||||
q := bstore.QueryTx[store.Message](tx)
|
||||
q.FilterNonzero(store.Message{MailboxID: c.mailboxID, UID: uidOld})
|
||||
q.FilterEqual("Expunged", false)
|
||||
q.FilterLess("UID", c.uidnext)
|
||||
_, err = q.Get()
|
||||
if err == bstore.ErrAbsent {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return func() { xserverErrorf("get message to replace: %v", err) }
|
||||
}
|
||||
|
||||
// Check if we can add size bytes. We can't necessarily remove the current message yet.
|
||||
ok, maxSize, err := c.account.CanAddMessageSize(tx, size)
|
||||
if err != nil {
|
||||
return func() { xserverErrorf("check quota: %v", err) }
|
||||
}
|
||||
if !ok {
|
||||
// ../rfc/9208:472
|
||||
return func() { xusercodeErrorf("OVERQUOTA", "account over maximum total message size %d", maxSize) }
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var errfn func()
|
||||
if synclit {
|
||||
// Check request, if it cannot succeed, fail it now before client is sending the data.
|
||||
|
||||
name = xcheckmailboxname(name, true)
|
||||
|
||||
c.account.WithRLock(func() {
|
||||
c.xdbread(func(tx *bstore.Tx) {
|
||||
errfn = checkMessage(tx)
|
||||
if errfn != nil {
|
||||
errfn()
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
c.xwritelinef("+ ")
|
||||
} else {
|
||||
var err error
|
||||
name, _, err = store.CheckMailboxName(name, true)
|
||||
if err != nil {
|
||||
errfn = func() { xusercodeErrorf("CANNOT", "%s", err) }
|
||||
} else {
|
||||
c.account.WithRLock(func() {
|
||||
c.xdbread(func(tx *bstore.Tx) {
|
||||
errfn = checkMessage(tx)
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
var file *os.File
|
||||
var newID int64 // Delivered message ID, file removed on error.
|
||||
var f io.Writer
|
||||
var commit bool
|
||||
|
||||
if errfn != nil {
|
||||
// We got a non-sync literal, we will consume some data, but abort if there's too
|
||||
// much. We draw the line at 1mb. Client should have used synchronizing literal.
|
||||
if size > 1000*1000 {
|
||||
// ../rfc/9051:357 ../rfc/3501:347
|
||||
err := errors.New("error condition and non-synchronizing literal too big")
|
||||
bye := "* BYE [ALERT] " + err.Error()
|
||||
panic(syntaxError{bye, "TOOBIG", err.Error(), err})
|
||||
}
|
||||
// Message will not be accepted.
|
||||
f = io.Discard
|
||||
} else {
|
||||
// Read the message into a temporary file.
|
||||
var err error
|
||||
file, err = store.CreateMessageTemp(c.log, "imap-replace")
|
||||
xcheckf(err, "creating temp file for message")
|
||||
defer store.CloseRemoveTempFile(c.log, file, "temporary message file")
|
||||
f = file
|
||||
|
||||
defer func() {
|
||||
if !commit && newID != 0 {
|
||||
p := c.account.MessagePath(newID)
|
||||
err := os.Remove(p)
|
||||
c.xsanity(err, "remove message file for replace after error")
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Read the message data.
|
||||
defer c.xtraceread(mlog.LevelTracedata)()
|
||||
mw := message.NewWriter(f)
|
||||
msize, err := io.Copy(mw, io.LimitReader(c.br, size))
|
||||
c.xtraceread(mlog.LevelTrace) // Restore.
|
||||
if err != nil {
|
||||
// Cannot use xcheckf due to %w handling of errIO.
|
||||
c.xbrokenf("reading literal message: %s (%w)", err, errIO)
|
||||
}
|
||||
if msize != size {
|
||||
c.xbrokenf("read %d bytes for message, expected %d (%w)", msize, size, errIO)
|
||||
}
|
||||
|
||||
// Finish reading the command.
|
||||
line := c.xreadline(false)
|
||||
p = newParser(line, c)
|
||||
if utf8 {
|
||||
p.xtake(")")
|
||||
}
|
||||
p.xempty()
|
||||
|
||||
// If an error was found earlier, abort the command now that we've read the message.
|
||||
if errfn != nil {
|
||||
errfn()
|
||||
}
|
||||
|
||||
var oldMsgExpunged bool
|
||||
|
||||
var om, nm store.Message
|
||||
var mbSrc, mbDst store.Mailbox // Src and dst mailboxes can be different. ../rfc/8508:263
|
||||
var overflow bool
|
||||
var pendingChanges []store.Change
|
||||
defer func() {
|
||||
// In case of panic.
|
||||
c.flushChanges(pendingChanges)
|
||||
}()
|
||||
|
||||
c.account.WithWLock(func() {
|
||||
var changes []store.Change
|
||||
|
||||
c.xdbwrite(func(tx *bstore.Tx) {
|
||||
mbSrc = c.xmailboxID(tx, c.mailboxID)
|
||||
|
||||
// Get old message. If it has been expunged, we should have a pending change for
|
||||
// it. We'll send untagged responses and fail the command.
|
||||
var err error
|
||||
qom := bstore.QueryTx[store.Message](tx)
|
||||
qom.FilterNonzero(store.Message{MailboxID: mbSrc.ID, UID: uidOld})
|
||||
om, err = qom.Get()
|
||||
xcheckf(err, "get old message to replace from database")
|
||||
if om.Expunged {
|
||||
oldMsgExpunged = true
|
||||
return
|
||||
}
|
||||
|
||||
// Check quota for addition of new message. We can't necessarily yet remove the old message.
|
||||
ok, maxSize, err := c.account.CanAddMessageSize(tx, mw.Size)
|
||||
xcheckf(err, "checking quota")
|
||||
if !ok {
|
||||
// ../rfc/9208:472
|
||||
xusercodeErrorf("OVERQUOTA", "account over maximum total message size %d", maxSize)
|
||||
}
|
||||
|
||||
modseq, err := c.account.NextModSeq(tx)
|
||||
xcheckf(err, "get next mod seq")
|
||||
|
||||
chremuids, _, err := c.account.MessageRemove(c.log, tx, modseq, &mbSrc, store.RemoveOpts{}, om)
|
||||
xcheckf(err, "expunge old message")
|
||||
changes = append(changes, chremuids)
|
||||
// Note: we only add a mbSrc counts change later on, if it is not equal to mbDst.
|
||||
|
||||
err = tx.Update(&mbSrc)
|
||||
xcheckf(err, "updating source mailbox counts")
|
||||
|
||||
mbDst = c.xmailbox(tx, name, "TRYCREATE")
|
||||
mbDst.ModSeq = modseq
|
||||
|
||||
nkeywords := len(mbDst.Keywords)
|
||||
|
||||
// Make new message to deliver.
|
||||
nm = store.Message{
|
||||
MailboxID: mbDst.ID,
|
||||
MailboxOrigID: mbDst.ID,
|
||||
Received: tm,
|
||||
Flags: storeFlags,
|
||||
Keywords: keywords,
|
||||
Size: mw.Size,
|
||||
ModSeq: modseq,
|
||||
CreateSeq: modseq,
|
||||
}
|
||||
|
||||
err = c.account.MessageAdd(c.log, tx, &mbDst, &nm, file, store.AddOpts{})
|
||||
xcheckf(err, "delivering message")
|
||||
newID = nm.ID
|
||||
|
||||
changes = append(changes, nm.ChangeAddUID(mbDst), mbDst.ChangeCounts())
|
||||
if nkeywords != len(mbDst.Keywords) {
|
||||
changes = append(changes, mbDst.ChangeKeywords())
|
||||
}
|
||||
|
||||
err = tx.Update(&mbDst)
|
||||
xcheckf(err, "updating destination mailbox")
|
||||
})
|
||||
|
||||
// Fetch pending changes, possibly with new UIDs, so we can apply them before adding our own new UID.
|
||||
overflow, pendingChanges = c.comm.Get()
|
||||
|
||||
if oldMsgExpunged {
|
||||
return
|
||||
}
|
||||
|
||||
// Success, make sure messages aren't cleaned up anymore.
|
||||
commit = true
|
||||
|
||||
// Broadcast the change to other connections.
|
||||
if mbSrc.ID != mbDst.ID {
|
||||
changes = append(changes, mbSrc.ChangeCounts())
|
||||
}
|
||||
c.broadcast(changes)
|
||||
})
|
||||
|
||||
// Must update our msgseq/uids tracking with latest pending changes.
|
||||
l := pendingChanges
|
||||
pendingChanges = nil
|
||||
c.xapplyChanges(overflow, l, false)
|
||||
|
||||
// If we couldn't find the message, send a NO response. We've just applied pending
|
||||
// changes, which should have expunged the absent message.
|
||||
if oldMsgExpunged {
|
||||
xuserErrorf("message to be replaced has been expunged")
|
||||
}
|
||||
|
||||
// If the destination mailbox is our currently selected mailbox, we register and
|
||||
// announce the new message.
|
||||
if mbDst.ID == c.mailboxID {
|
||||
c.uidAppend(nm.UID)
|
||||
// We send an untagged OK with APPENDUID, for sane bookkeeping in clients. ../rfc/8508:401
|
||||
c.xbwritelinef("* OK [APPENDUID %d %d] ", mbDst.UIDValidity, nm.UID)
|
||||
c.xbwritelinef("* %d EXISTS", c.exists)
|
||||
}
|
||||
|
||||
// We must return vanished instead of expunge, and also highestmodseq, when qresync
|
||||
// was enabled. ../rfc/8508:422 ../rfc/7162:1883
|
||||
qresync := c.enabled[capQresync]
|
||||
|
||||
// Now that we are in sync with msgseq, we can find our old msgseq and say it is
|
||||
// expunged or vanished. ../rfc/7162:1900
|
||||
var oseq msgseq
|
||||
if c.uidonly {
|
||||
c.exists--
|
||||
} else {
|
||||
oseq = c.xsequence(om.UID)
|
||||
c.sequenceRemove(oseq, om.UID)
|
||||
}
|
||||
if qresync || c.uidonly {
|
||||
c.xbwritelinef("* VANISHED %d", om.UID)
|
||||
// ../rfc/7162:1916
|
||||
} else {
|
||||
c.xbwritelinef("* %d EXPUNGE", oseq)
|
||||
}
|
||||
c.xwriteresultf("%s OK [HIGHESTMODSEQ %d] replaced", tag, nm.ModSeq.Client())
|
||||
}
|
233
imapserver/replace_test.go
Normal file
233
imapserver/replace_test.go
Normal file
@ -0,0 +1,233 @@
|
||||
package imapserver
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/mjl-/mox/imapclient"
|
||||
)
|
||||
|
||||
func TestReplace(t *testing.T) {
|
||||
testReplace(t, false)
|
||||
}
|
||||
|
||||
func TestReplaceUIDOnly(t *testing.T) {
|
||||
testReplace(t, true)
|
||||
}
|
||||
|
||||
func testReplace(t *testing.T, uidonly bool) {
|
||||
defer mockUIDValidity()()
|
||||
|
||||
tc := start(t, uidonly)
|
||||
defer tc.close()
|
||||
|
||||
tc2 := startNoSwitchboard(t, uidonly)
|
||||
defer tc2.closeNoWait()
|
||||
|
||||
tc.login("mjl@mox.example", password0)
|
||||
tc.client.Select("inbox")
|
||||
|
||||
// Star not allowed on empty mailbox.
|
||||
tc.transactf("bad", "uid replace * inbox {1}")
|
||||
if !uidonly {
|
||||
tc.transactf("bad", "replace * inbox {1}")
|
||||
}
|
||||
|
||||
// Append 3 messages, remove first. Leaves msgseq 1,2 with uid 2,3.
|
||||
tc.client.MultiAppend("inbox", makeAppend(exampleMsg), makeAppend(exampleMsg), makeAppend(exampleMsg))
|
||||
tc.client.UIDStoreFlagsSet("1", true, `\deleted`)
|
||||
tc.client.Expunge()
|
||||
|
||||
tc.transactf("no", "uid replace 1 expungebox {1}") // Mailbox no longer exists.
|
||||
tc.xcodeWord("TRYCREATE")
|
||||
|
||||
tc2.login("mjl@mox.example", password0)
|
||||
tc2.client.Select("inbox")
|
||||
|
||||
// Replace last message (msgseq 2, uid 3) in same mailbox.
|
||||
if uidonly {
|
||||
tc.lastResponse, tc.lastErr = tc.client.UIDReplace("3", "INBOX", makeAppend(searchMsg))
|
||||
} else {
|
||||
tc.lastResponse, tc.lastErr = tc.client.MSNReplace("2", "INBOX", makeAppend(searchMsg))
|
||||
}
|
||||
tcheck(tc.t, tc.lastErr, "read imap response")
|
||||
if uidonly {
|
||||
tc.xuntagged(
|
||||
imapclient.UntaggedResult{Status: "OK", Code: imapclient.CodeAppendUID{UIDValidity: 1, UIDs: xparseUIDRange("4")}, Text: ""},
|
||||
imapclient.UntaggedExists(3),
|
||||
imapclient.UntaggedVanished{UIDs: xparseNumSet("3")},
|
||||
)
|
||||
} else {
|
||||
tc.xuntagged(
|
||||
imapclient.UntaggedResult{Status: "OK", Code: imapclient.CodeAppendUID{UIDValidity: 1, UIDs: xparseUIDRange("4")}, Text: ""},
|
||||
imapclient.UntaggedExists(3),
|
||||
imapclient.UntaggedExpunge(2),
|
||||
)
|
||||
}
|
||||
tc.xcode(imapclient.CodeHighestModSeq(8))
|
||||
|
||||
// Check that other client sees Exists and Expunge.
|
||||
tc2.transactf("ok", "noop")
|
||||
if uidonly {
|
||||
tc2.xuntagged(
|
||||
imapclient.UntaggedVanished{UIDs: xparseNumSet("3")},
|
||||
imapclient.UntaggedExists(2),
|
||||
tc.untaggedFetch(2, 4, imapclient.FetchFlags(nil)),
|
||||
)
|
||||
} else {
|
||||
tc2.xuntagged(
|
||||
imapclient.UntaggedExpunge(2),
|
||||
imapclient.UntaggedExists(2),
|
||||
tc.untaggedFetch(2, 4, imapclient.FetchFlags(nil)),
|
||||
)
|
||||
}
|
||||
|
||||
// Enable qresync, replace uid 2 (msgseq 1) to different mailbox, see that we get vanished instead of expunged.
|
||||
tc.transactf("ok", "enable qresync")
|
||||
tc.lastResponse, tc.lastErr = tc.client.UIDReplace("2", "INBOX", makeAppend(searchMsg))
|
||||
tcheck(tc.t, tc.lastErr, "read imap response")
|
||||
tc.xuntagged(
|
||||
imapclient.UntaggedResult{Status: "OK", Code: imapclient.CodeAppendUID{UIDValidity: 1, UIDs: xparseUIDRange("5")}, Text: ""},
|
||||
imapclient.UntaggedExists(3),
|
||||
imapclient.UntaggedVanished{UIDs: xparseNumSet("2")},
|
||||
)
|
||||
tc.xcode(imapclient.CodeHighestModSeq(9))
|
||||
|
||||
// Use "*" for replacing.
|
||||
tc.transactf("ok", "uid replace * inbox {1+}\r\nx")
|
||||
tc.xuntagged(
|
||||
imapclient.UntaggedResult{Status: "OK", Code: imapclient.CodeAppendUID{UIDValidity: 1, UIDs: xparseUIDRange("6")}, Text: ""},
|
||||
imapclient.UntaggedExists(3),
|
||||
imapclient.UntaggedVanished{UIDs: xparseNumSet("5")},
|
||||
)
|
||||
if !uidonly {
|
||||
tc.transactf("ok", "replace * inbox {1+}\r\ny")
|
||||
tc.xuntagged(
|
||||
imapclient.UntaggedResult{Status: "OK", Code: imapclient.CodeAppendUID{UIDValidity: 1, UIDs: xparseUIDRange("7")}, Text: ""},
|
||||
imapclient.UntaggedExists(3),
|
||||
imapclient.UntaggedVanished{UIDs: xparseNumSet("6")},
|
||||
)
|
||||
}
|
||||
|
||||
// Non-existent mailbox with non-synchronizing literal should consume the literal.
|
||||
if uidonly {
|
||||
tc.transactf("no", "uid replace 1 bogusbox {1+}\r\nx")
|
||||
} else {
|
||||
tc.transactf("no", "replace 1 bogusbox {1+}\r\nx")
|
||||
}
|
||||
|
||||
// Leftover data.
|
||||
tc.transactf("bad", "replace 1 inbox () {6+}\r\ntest\r\n ")
|
||||
}
|
||||
|
||||
func TestReplaceBigNonsyncLit(t *testing.T) {
|
||||
tc := start(t, false)
|
||||
defer tc.close()
|
||||
|
||||
tc.login("mjl@mox.example", password0)
|
||||
tc.client.Select("inbox")
|
||||
|
||||
// Adding a message >1mb with non-sync literal to non-existent mailbox should abort entire connection.
|
||||
tc.transactf("bad", "replace 12345 inbox {2000000+}")
|
||||
tc.xuntagged(
|
||||
imapclient.UntaggedBye{Code: imapclient.CodeWord("ALERT"), Text: "error condition and non-synchronizing literal too big"},
|
||||
)
|
||||
tc.xcodeWord("TOOBIG")
|
||||
}
|
||||
|
||||
func TestReplaceQuota(t *testing.T) {
|
||||
testReplaceQuota(t, false)
|
||||
}
|
||||
|
||||
func TestReplaceQuotaUIDOnly(t *testing.T) {
|
||||
testReplaceQuota(t, true)
|
||||
}
|
||||
|
||||
func testReplaceQuota(t *testing.T, uidonly bool) {
|
||||
// with quota limit
|
||||
tc := startArgs(t, uidonly, true, false, true, true, "limit")
|
||||
defer tc.close()
|
||||
|
||||
tc.login("limit@mox.example", password0)
|
||||
tc.client.Select("inbox")
|
||||
tc.client.Append("inbox", makeAppend("x"))
|
||||
|
||||
// Synchronizing literal, we get failure immediately.
|
||||
tc.transactf("no", "uid replace 1 inbox {6}\r\n")
|
||||
tc.xcodeWord("OVERQUOTA")
|
||||
|
||||
// Synchronizing literal to non-existent mailbox, we get failure immediately.
|
||||
tc.transactf("no", "uid replace 1 badbox {6}\r\n")
|
||||
tc.xcodeWord("TRYCREATE")
|
||||
|
||||
buf := make([]byte, 4000, 4002)
|
||||
for i := range buf {
|
||||
buf[i] = 'x'
|
||||
}
|
||||
buf = append(buf, "\r\n"...)
|
||||
|
||||
// Non-synchronizing literal. We get to write our data.
|
||||
tc.client.WriteCommandf("", "uid replace 1 inbox ~{4000+}")
|
||||
_, err := tc.client.Write(buf)
|
||||
tc.check(err, "write replace message")
|
||||
tc.response("no")
|
||||
tc.xcodeWord("OVERQUOTA")
|
||||
|
||||
// Non-synchronizing literal to bad mailbox.
|
||||
tc.client.WriteCommandf("", "uid replace 1 badbox {4000+}")
|
||||
_, err = tc.client.Write(buf)
|
||||
tc.check(err, "write replace message")
|
||||
tc.response("no")
|
||||
tc.xcodeWord("TRYCREATE")
|
||||
}
|
||||
|
||||
func TestReplaceExpunged(t *testing.T) {
|
||||
testReplaceExpunged(t, false)
|
||||
}
|
||||
|
||||
func TestReplaceExpungedUIDOnly(t *testing.T) {
|
||||
testReplaceExpunged(t, true)
|
||||
}
|
||||
|
||||
func testReplaceExpunged(t *testing.T, uidonly bool) {
|
||||
tc := start(t, uidonly)
|
||||
defer tc.close()
|
||||
|
||||
tc.login("mjl@mox.example", password0)
|
||||
tc.client.Select("inbox")
|
||||
tc.client.Append("inbox", makeAppend(exampleMsg))
|
||||
|
||||
// We start the command, but don't write data yet.
|
||||
tc.client.WriteCommandf("", "uid replace 1 inbox {4000}")
|
||||
|
||||
// Get in with second client and remove the message we are replacing.
|
||||
tc2 := startNoSwitchboard(t, uidonly)
|
||||
defer tc2.closeNoWait()
|
||||
tc2.login("mjl@mox.example", password0)
|
||||
tc2.client.Select("inbox")
|
||||
tc2.client.UIDStoreFlagsSet("1", true, `\Deleted`)
|
||||
tc2.client.Expunge()
|
||||
tc2.client.Unselect()
|
||||
tc2.client.Close()
|
||||
|
||||
// Now continue trying to replace the message. We should get an error and an expunge.
|
||||
tc.readprefixline("+ ")
|
||||
buf := make([]byte, 4000, 4002)
|
||||
for i := range buf {
|
||||
buf[i] = 'x'
|
||||
}
|
||||
buf = append(buf, "\r\n"...)
|
||||
_, err := tc.client.Write(buf)
|
||||
tc.check(err, "write replace message")
|
||||
tc.response("no")
|
||||
if uidonly {
|
||||
tc.xuntagged(
|
||||
tc.untaggedFetch(1, 1, imapclient.FetchFlags{`\Deleted`}),
|
||||
imapclient.UntaggedVanished{UIDs: xparseNumSet("1")},
|
||||
)
|
||||
} else {
|
||||
tc.xuntagged(
|
||||
tc.untaggedFetch(1, 1, imapclient.FetchFlags{`\Deleted`}),
|
||||
imapclient.UntaggedExpunge(1),
|
||||
)
|
||||
}
|
||||
}
|
@ -1,10 +1,14 @@
|
||||
package imapserver
|
||||
|
||||
import (
|
||||
"cmp"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"maps"
|
||||
"net/textproto"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/mjl-/bstore"
|
||||
|
||||
@ -12,22 +16,54 @@ import (
|
||||
"github.com/mjl-/mox/store"
|
||||
)
|
||||
|
||||
// If last search output was this long ago, we write an untagged inprogress
|
||||
// response. Changed during tests. ../rfc/9585:109
|
||||
var inProgressPeriod = time.Duration(10 * time.Second)
|
||||
|
||||
// ESEARCH allows searching multiple mailboxes, referenced through mailbox filters
|
||||
// borrowed from the NOTIFY extension. Unlike the regular extended SEARCH/UID
|
||||
// SEARCH command that always returns an ESEARCH response, the ESEARCH command only
|
||||
// returns ESEARCH responses when there were matches in a mailbox.
|
||||
//
|
||||
// ../rfc/7377:159
|
||||
func (c *conn) cmdEsearch(tag, cmd string, p *parser) {
|
||||
c.cmdxSearch(true, true, tag, cmd, p)
|
||||
}
|
||||
|
||||
// Search returns messages matching criteria specified in parameters.
|
||||
//
|
||||
// State: Selected
|
||||
func (c *conn) cmdxSearch(isUID bool, tag, cmd string, p *parser) {
|
||||
// Command: ../rfc/9051:3716 ../rfc/4731:31 ../rfc/4466:354 ../rfc/3501:2723
|
||||
// Examples: ../rfc/9051:3986 ../rfc/4731:153 ../rfc/3501:2975
|
||||
// Syntax: ../rfc/9051:6918 ../rfc/4466:611 ../rfc/3501:4954
|
||||
// State: Selected for SEARCH and UID SEARCH, Authenticated or selectd for ESEARCH.
|
||||
func (c *conn) cmdxSearch(isUID, isE bool, tag, cmd string, p *parser) {
|
||||
// Command: ../rfc/9051:3716 ../rfc/7377:159 ../rfc/6237:142 ../rfc/4731:31 ../rfc/4466:354 ../rfc/3501:2723
|
||||
// Examples: ../rfc/9051:3986 ../rfc/7377:385 ../rfc/6237:323 ../rfc/4731:153 ../rfc/3501:2975
|
||||
// Syntax: ../rfc/9051:6918 ../rfc/7377:462 ../rfc/6237:403 ../rfc/4466:611 ../rfc/3501:4954
|
||||
|
||||
// We will respond with ESEARCH instead of SEARCH if "RETURN" is present or for IMAP4rev2.
|
||||
// We will respond with ESEARCH instead of SEARCH if "RETURN" is present or for IMAP4rev2 or for isE (ESEARCH command).
|
||||
var eargs map[string]bool // Options except SAVE. Nil means old-style SEARCH response.
|
||||
var save bool // For SAVE option. Kept separately for easier handling of MIN/MAX later.
|
||||
|
||||
// IMAP4rev2 always returns ESEARCH, even with absent RETURN.
|
||||
if c.enabled[capIMAP4rev2] {
|
||||
if c.enabled[capIMAP4rev2] || isE {
|
||||
eargs = map[string]bool{}
|
||||
}
|
||||
|
||||
// The ESEARCH command has various ways to specify which mailboxes are to be
|
||||
// searched. We parse and gather the request first, and evaluate them to mailboxes
|
||||
// after parsing, when we start and have a DB transaction.
|
||||
var mailboxSpecs []mailboxSpecifier
|
||||
|
||||
// ../rfc/7377:468
|
||||
if isE && p.take(" IN (") {
|
||||
for {
|
||||
ms := p.xfilterMailbox(mbspecsEsearch)
|
||||
mailboxSpecs = append(mailboxSpecs, ms)
|
||||
|
||||
if !p.take(" ") {
|
||||
break
|
||||
}
|
||||
}
|
||||
p.xtake(")")
|
||||
// We are not parsing the scope-options since there aren't any defined yet. ../rfc/7377:469
|
||||
}
|
||||
// ../rfc/9051:6967
|
||||
if p.take(" RETURN (") {
|
||||
eargs = map[string]bool{}
|
||||
@ -71,6 +107,11 @@ func (c *conn) cmdxSearch(isUID bool, tag, cmd string, p *parser) {
|
||||
sk.searchKeys = append(sk.searchKeys, *p.xsearchKey())
|
||||
}
|
||||
|
||||
// Sequence set search program must be rejected with UIDONLY enabled. ../rfc/9586:220
|
||||
if c.uidonly && sk.hasSequenceNumbers() {
|
||||
xsyntaxCodeErrorf("UIDREQUIRED", "cannot search message sequence numbers in search program with uidonly enabled")
|
||||
}
|
||||
|
||||
// Even in case of error, we ensure search result is changed.
|
||||
if save {
|
||||
c.searchResult = []store.UID{}
|
||||
@ -125,68 +166,344 @@ func (c *conn) cmdxSearch(isUID bool, tag, cmd string, p *parser) {
|
||||
|
||||
// If we only have a MIN and/or MAX, we can stop processing as soon as we
|
||||
// have those matches.
|
||||
var min, max int
|
||||
var min1, max1 int
|
||||
if eargs["MIN"] {
|
||||
min = 1
|
||||
min1 = 1
|
||||
}
|
||||
if eargs["MAX"] {
|
||||
max = 1
|
||||
max1 = 1
|
||||
}
|
||||
|
||||
var expungeIssued bool
|
||||
var maxModSeq store.ModSeq
|
||||
// We'll have one Result per mailbox we are searching. For regular (UID) SEARCH
|
||||
// commands, we'll have just one, for the selected mailbox.
|
||||
type Result struct {
|
||||
Mailbox store.Mailbox
|
||||
MaxModSeq store.ModSeq
|
||||
UIDs []store.UID
|
||||
}
|
||||
var results []Result
|
||||
|
||||
// We periodically send an untagged OK with INPROGRESS code while searching, to let
|
||||
// clients doing slow searches know we're still working.
|
||||
inProgressLast := time.Now()
|
||||
// Only respond with tag if it can't be confused as end of response code. ../rfc/9585:122
|
||||
inProgressTag := "nil"
|
||||
if !strings.Contains(tag, "]") {
|
||||
inProgressTag = dquote(tag).pack(c)
|
||||
}
|
||||
|
||||
var uids []store.UID
|
||||
c.xdbread(func(tx *bstore.Tx) {
|
||||
c.xmailboxID(tx, c.mailboxID) // Validate.
|
||||
// Gather mailboxes to operate on. Usually just the selected mailbox. But with the
|
||||
// ESEARCH command, we may be searching multiple.
|
||||
var mailboxes []store.Mailbox
|
||||
if len(mailboxSpecs) > 0 {
|
||||
// While gathering, we deduplicate mailboxes. ../rfc/7377:312
|
||||
m := map[int64]store.Mailbox{}
|
||||
for _, ms := range mailboxSpecs {
|
||||
switch ms.Kind {
|
||||
case mbspecSelected:
|
||||
// ../rfc/7377:306
|
||||
if c.state != stateSelected {
|
||||
xsyntaxErrorf("cannot use ESEARCH with selected when state is not selected")
|
||||
}
|
||||
|
||||
mb := c.xmailboxID(tx, c.mailboxID) // Validate.
|
||||
m[mb.ID] = mb
|
||||
|
||||
case mbspecInboxes:
|
||||
// Inbox and everything below. And we look at destinations and rulesets. We all
|
||||
// mailboxes from the destinations, and all from the rulesets except when
|
||||
// ListAllowDomain is non-empty.
|
||||
// ../rfc/5465:822
|
||||
q := bstore.QueryTx[store.Mailbox](tx)
|
||||
q.FilterEqual("Expunged", false)
|
||||
q.FilterGreaterEqual("Name", "Inbox")
|
||||
q.SortAsc("Name")
|
||||
for mb, err := range q.All() {
|
||||
xcheckf(err, "list mailboxes")
|
||||
if mb.Name != "Inbox" && !strings.HasPrefix(mb.Name, "Inbox/") {
|
||||
break
|
||||
}
|
||||
m[mb.ID] = mb
|
||||
}
|
||||
|
||||
conf, _ := c.account.Conf()
|
||||
for _, dest := range conf.Destinations {
|
||||
if dest.Mailbox != "" && dest.Mailbox != "Inbox" {
|
||||
mb, err := c.account.MailboxFind(tx, dest.Mailbox)
|
||||
xcheckf(err, "find mailbox from destination")
|
||||
if mb != nil {
|
||||
m[mb.ID] = *mb
|
||||
}
|
||||
}
|
||||
|
||||
for _, rs := range dest.Rulesets {
|
||||
if rs.ListAllowDomain != "" || rs.Mailbox == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
mb, err := c.account.MailboxFind(tx, rs.Mailbox)
|
||||
xcheckf(err, "find mailbox from ruleset")
|
||||
if mb != nil {
|
||||
m[mb.ID] = *mb
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
case mbspecPersonal:
|
||||
// All mailboxes in the personal namespace. Which is all mailboxes for us.
|
||||
// ../rfc/5465:817
|
||||
for mb, err := range bstore.QueryTx[store.Mailbox](tx).FilterEqual("Expunged", false).All() {
|
||||
xcheckf(err, "list mailboxes")
|
||||
m[mb.ID] = mb
|
||||
}
|
||||
|
||||
case mbspecSubscribed:
|
||||
// Mailboxes that are subscribed. Will typically be same as personal, since we
|
||||
// subscribe to all mailboxes. But user can manage subscriptions differently.
|
||||
// ../rfc/5465:831
|
||||
for mb, err := range bstore.QueryTx[store.Mailbox](tx).FilterEqual("Expunged", false).All() {
|
||||
xcheckf(err, "list mailboxes")
|
||||
if err := tx.Get(&store.Subscription{Name: mb.Name}); err == nil {
|
||||
m[mb.ID] = mb
|
||||
} else if err != bstore.ErrAbsent {
|
||||
xcheckf(err, "lookup subscription for mailbox")
|
||||
}
|
||||
}
|
||||
|
||||
case mbspecSubtree, mbspecSubtreeOne:
|
||||
// The mailbox name itself, and children. ../rfc/5465:847
|
||||
// SUBTREE is arbitrarily deep, SUBTREE-ONE is one level deeper than requested
|
||||
// mailbox. The mailbox itself is included too ../rfc/7377:274
|
||||
|
||||
// We don't have to worry about loops. Mailboxes are not in the file system.
|
||||
// ../rfc/7377:291
|
||||
|
||||
for _, name := range ms.Mailboxes {
|
||||
name = xcheckmailboxname(name, true)
|
||||
|
||||
one := ms.Kind == mbspecSubtreeOne
|
||||
var ntoken int
|
||||
if one {
|
||||
ntoken = len(strings.Split(name, "/")) + 1
|
||||
}
|
||||
|
||||
q := bstore.QueryTx[store.Mailbox](tx)
|
||||
q.FilterEqual("Expunged", false)
|
||||
q.FilterGreaterEqual("Name", name)
|
||||
q.SortAsc("Name")
|
||||
for mb, err := range q.All() {
|
||||
xcheckf(err, "list mailboxes")
|
||||
if mb.Name != name && !strings.HasPrefix(mb.Name, name+"/") {
|
||||
break
|
||||
}
|
||||
if !one || mb.Name == name || len(strings.Split(mb.Name, "/")) == ntoken {
|
||||
m[mb.ID] = mb
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
case mbspecMailboxes:
|
||||
// Just the specified mailboxes. ../rfc/5465:853
|
||||
for _, name := range ms.Mailboxes {
|
||||
name = xcheckmailboxname(name, true)
|
||||
|
||||
// If a mailbox doesn't exist, we don't treat it as an error. Seems reasonable
|
||||
// giving we are searching. Messages may not exist. And likewise for the mailbox.
|
||||
// Just results in no hits.
|
||||
mb, err := c.account.MailboxFind(tx, name)
|
||||
xcheckf(err, "looking up mailbox")
|
||||
if mb != nil {
|
||||
m[mb.ID] = *mb
|
||||
}
|
||||
}
|
||||
|
||||
default:
|
||||
panic("missing case")
|
||||
}
|
||||
}
|
||||
mailboxes = slices.Collect(maps.Values(m))
|
||||
slices.SortFunc(mailboxes, func(a, b store.Mailbox) int {
|
||||
return cmp.Compare(a.Name, b.Name)
|
||||
})
|
||||
|
||||
// If no source mailboxes were specified (no mailboxSpecs), the selected mailbox is
|
||||
// used below. ../rfc/7377:298
|
||||
} else {
|
||||
mb := c.xmailboxID(tx, c.mailboxID) // Validate.
|
||||
mailboxes = []store.Mailbox{mb}
|
||||
}
|
||||
|
||||
if save && !(len(mailboxes) == 1 && mailboxes[0].ID == c.mailboxID) {
|
||||
// ../rfc/7377:319
|
||||
xsyntaxErrorf("can only use SAVE on selected mailbox")
|
||||
}
|
||||
|
||||
runlock()
|
||||
runlock = func() {}
|
||||
|
||||
// Normal forward search when we don't have MAX only.
|
||||
var lastIndex = -1
|
||||
if eargs == nil || max == 0 || len(eargs) != 1 {
|
||||
for i, uid := range c.uids {
|
||||
lastIndex = i
|
||||
if match, modseq := c.searchMatch(tx, msgseq(i+1), uid, *sk, bodySearch, textSearch, &expungeIssued); match {
|
||||
uids = append(uids, uid)
|
||||
if modseq > maxModSeq {
|
||||
maxModSeq = modseq
|
||||
// Determine if search has a sequence set without search results. If so, we need
|
||||
// sequence numbers for matching, and we must always go through the messages in
|
||||
// forward order. No reverse search for MAX only.
|
||||
needSeq := (len(mailboxes) > 1 || len(mailboxes) == 1 && mailboxes[0].ID != c.mailboxID) && sk.hasSequenceNumbers()
|
||||
|
||||
forward := eargs == nil || max1 == 0 || len(eargs) != 1 || needSeq
|
||||
reverse := max1 == 1 && (len(eargs) == 1 || min1+max1 == len(eargs)) && !needSeq
|
||||
|
||||
// We set a worst-case "goal" of having gone through all messages in all mailboxes.
|
||||
// Sometimes, we can be faster, when we only do a MIN and/or MAX query and we can
|
||||
// stop early. We'll account for that as we go. For the selected mailbox, we'll
|
||||
// only look at those the session has already seen.
|
||||
goal := "nil"
|
||||
var total uint32
|
||||
for _, mb := range mailboxes {
|
||||
if mb.ID == c.mailboxID && !c.uidonly {
|
||||
total += c.exists
|
||||
} else {
|
||||
total += uint32(mb.Total + mb.Deleted)
|
||||
}
|
||||
}
|
||||
if total > 0 {
|
||||
// Goal is always non-zero. ../rfc/9585:232
|
||||
goal = fmt.Sprintf("%d", total)
|
||||
}
|
||||
|
||||
var progress uint32
|
||||
for _, mb := range mailboxes {
|
||||
var lastUID store.UID
|
||||
|
||||
result := Result{Mailbox: mb}
|
||||
|
||||
msgCount := uint32(mb.MailboxCounts.Total + mb.MailboxCounts.Deleted)
|
||||
if mb.ID == c.mailboxID && !c.uidonly {
|
||||
msgCount = c.exists
|
||||
}
|
||||
|
||||
// Used for interpreting UID sets with a star, like "1:*" and "10:*". Only called
|
||||
// for UIDs that are higher than the number, since "10:*" evaluates to "10:5" if 5
|
||||
// is the highest UID, and UID 5-10 would all match.
|
||||
var cachedHighestUID store.UID
|
||||
xhighestUID := func() store.UID {
|
||||
if cachedHighestUID > 0 {
|
||||
return cachedHighestUID
|
||||
}
|
||||
|
||||
q := bstore.QueryTx[store.Message](tx)
|
||||
q.FilterNonzero(store.Message{MailboxID: mb.ID})
|
||||
q.FilterEqual("Expunged", false)
|
||||
if mb.ID == c.mailboxID {
|
||||
q.FilterLess("UID", c.uidnext)
|
||||
}
|
||||
q.SortDesc("UID")
|
||||
q.Limit(1)
|
||||
m, err := q.Get()
|
||||
if err == bstore.ErrAbsent {
|
||||
xuserErrorf("cannot use * on empty mailbox")
|
||||
}
|
||||
xcheckf(err, "get last uid")
|
||||
cachedHighestUID = m.UID
|
||||
return cachedHighestUID
|
||||
}
|
||||
|
||||
progressOrig := progress
|
||||
|
||||
if forward {
|
||||
// We track this for non-selected mailboxes. searchMatch will look the message
|
||||
// sequence number for this session up if we are searching the selected mailbox.
|
||||
var seq msgseq = 1
|
||||
|
||||
q := bstore.QueryTx[store.Message](tx)
|
||||
q.FilterNonzero(store.Message{MailboxID: mb.ID})
|
||||
q.FilterEqual("Expunged", false)
|
||||
if mb.ID == c.mailboxID {
|
||||
q.FilterLess("UID", c.uidnext)
|
||||
}
|
||||
q.SortAsc("UID")
|
||||
for m, err := range q.All() {
|
||||
xcheckf(err, "list messages in mailbox")
|
||||
|
||||
// We track this for the "reverse" case, we'll stop before seeing lastUID.
|
||||
lastUID = m.UID
|
||||
|
||||
if time.Since(inProgressLast) > inProgressPeriod {
|
||||
c.xwritelinef("* OK [INPROGRESS (%s %d %s)] still searching", inProgressTag, progress, goal)
|
||||
inProgressLast = time.Now()
|
||||
}
|
||||
if min == 1 && min+max == len(eargs) {
|
||||
progress++
|
||||
|
||||
if c.searchMatch(tx, msgCount, seq, m, *sk, bodySearch, textSearch, xhighestUID) {
|
||||
result.UIDs = append(result.UIDs, m.UID)
|
||||
result.MaxModSeq = max(result.MaxModSeq, m.ModSeq)
|
||||
if min1 == 1 && min1+max1 == len(eargs) {
|
||||
if !needSeq {
|
||||
break
|
||||
}
|
||||
// We only need a MIN and a MAX, but we also need sequence numbers so we are
|
||||
// walking through and collecting all UIDs. Correct for that, keeping only the MIN
|
||||
// (first)
|
||||
// and MAX (second).
|
||||
if len(result.UIDs) == 3 {
|
||||
result.UIDs[1] = result.UIDs[2]
|
||||
result.UIDs = result.UIDs[:2]
|
||||
}
|
||||
}
|
||||
}
|
||||
seq++
|
||||
}
|
||||
}
|
||||
// And reverse search for MAX if we have only MAX or MAX combined with MIN, and
|
||||
// don't need sequence numbers. We just need a single match, then we stop.
|
||||
if reverse {
|
||||
q := bstore.QueryTx[store.Message](tx)
|
||||
q.FilterNonzero(store.Message{MailboxID: mb.ID})
|
||||
q.FilterEqual("Expunged", false)
|
||||
q.FilterGreater("UID", lastUID)
|
||||
if mb.ID == c.mailboxID {
|
||||
q.FilterLess("UID", c.uidnext)
|
||||
}
|
||||
q.SortDesc("UID")
|
||||
for m, err := range q.All() {
|
||||
xcheckf(err, "list messages in mailbox")
|
||||
|
||||
if time.Since(inProgressLast) > inProgressPeriod {
|
||||
c.xwritelinef("* OK [INPROGRESS (%s %d %s)] still searching", inProgressTag, progress, goal)
|
||||
inProgressLast = time.Now()
|
||||
}
|
||||
progress++
|
||||
|
||||
var seq msgseq // Filled in by searchMatch for messages in selected mailbox.
|
||||
if c.searchMatch(tx, msgCount, seq, m, *sk, bodySearch, textSearch, xhighestUID) {
|
||||
result.UIDs = append(result.UIDs, m.UID)
|
||||
result.MaxModSeq = max(result.MaxModSeq, m.ModSeq)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// And reverse search for MAX if we have only MAX or MAX combined with MIN.
|
||||
if max == 1 && (len(eargs) == 1 || min+max == len(eargs)) {
|
||||
for i := len(c.uids) - 1; i > lastIndex; i-- {
|
||||
if match, modseq := c.searchMatch(tx, msgseq(i+1), c.uids[i], *sk, bodySearch, textSearch, &expungeIssued); match {
|
||||
uids = append(uids, c.uids[i])
|
||||
if modseq > maxModSeq {
|
||||
maxModSeq = modseq
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// We could have finished searching the mailbox with fewer
|
||||
mailboxProcessed := progress - progressOrig
|
||||
mailboxTotal := uint32(mb.MailboxCounts.Total + mb.MailboxCounts.Deleted)
|
||||
progress += max(0, mailboxTotal-mailboxProcessed)
|
||||
|
||||
results = append(results, result)
|
||||
}
|
||||
})
|
||||
|
||||
if eargs == nil {
|
||||
// We'll only have a result for the one selected mailbox.
|
||||
result := results[0]
|
||||
|
||||
// In IMAP4rev1, an untagged SEARCH response is required. ../rfc/3501:2728
|
||||
if len(uids) == 0 {
|
||||
c.bwritelinef("* SEARCH")
|
||||
if len(result.UIDs) == 0 {
|
||||
c.xbwritelinef("* SEARCH")
|
||||
}
|
||||
|
||||
// Old-style SEARCH response. We must spell out each number. So we may be splitting
|
||||
// into multiple responses. ../rfc/9051:6809 ../rfc/3501:4833
|
||||
for len(uids) > 0 {
|
||||
n := len(uids)
|
||||
if n > 100 {
|
||||
n = 100
|
||||
}
|
||||
for len(result.UIDs) > 0 {
|
||||
n := min(100, len(result.UIDs))
|
||||
s := ""
|
||||
for _, v := range uids[:n] {
|
||||
for _, v := range result.UIDs[:n] {
|
||||
if !isUID {
|
||||
v = store.UID(c.xsequence(v))
|
||||
}
|
||||
@ -202,91 +519,111 @@ func (c *conn) cmdxSearch(isUID bool, tag, cmd string, p *parser) {
|
||||
var modseq string
|
||||
if sk.hasModseq() {
|
||||
// ../rfc/7162:2557
|
||||
modseq = fmt.Sprintf(" (MODSEQ %d)", maxModSeq.Client())
|
||||
modseq = fmt.Sprintf(" (MODSEQ %d)", result.MaxModSeq.Client())
|
||||
}
|
||||
|
||||
c.bwritelinef("* SEARCH%s%s", s, modseq)
|
||||
uids = uids[n:]
|
||||
c.xbwritelinef("* SEARCH%s%s", s, modseq)
|
||||
result.UIDs = result.UIDs[n:]
|
||||
}
|
||||
} else {
|
||||
// New-style ESEARCH response syntax: ../rfc/9051:6546 ../rfc/4466:522
|
||||
|
||||
if save {
|
||||
// ../rfc/9051:3784 ../rfc/5182:13
|
||||
c.searchResult = uids
|
||||
if sanityChecks {
|
||||
checkUIDs(c.searchResult)
|
||||
}
|
||||
c.searchResult = results[0].UIDs
|
||||
c.checkUIDs(c.searchResult, false)
|
||||
}
|
||||
|
||||
// No untagged ESEARCH response if nothing was requested. ../rfc/9051:4160
|
||||
if len(eargs) > 0 {
|
||||
// The tag was originally a string, became an astring in IMAP4rev2, better stick to
|
||||
// string. ../rfc/4466:707 ../rfc/5259:1163 ../rfc/9051:7087
|
||||
resp := fmt.Sprintf(`* ESEARCH (TAG "%s")`, tag)
|
||||
if isUID {
|
||||
resp += " UID"
|
||||
}
|
||||
|
||||
// NOTE: we are converting UIDs to msgseq in the uids slice (if needed) while
|
||||
// keeping the "uids" name!
|
||||
if !isUID {
|
||||
// If searchResult is hanging on to the slice, we need to work on a copy.
|
||||
if save {
|
||||
nuids := make([]store.UID, len(uids))
|
||||
copy(nuids, uids)
|
||||
uids = nuids
|
||||
for _, result := range results {
|
||||
// For the ESEARCH command, we must not return a response if there were no matching
|
||||
// messages. This is unlike the later IMAP4rev2, where an ESEARCH response must be
|
||||
// sent if there were no matches. ../rfc/7377:243 ../rfc/9051:3775
|
||||
if isE && len(result.UIDs) == 0 {
|
||||
continue
|
||||
}
|
||||
for i, uid := range uids {
|
||||
uids[i] = store.UID(c.xsequence(uid))
|
||||
|
||||
// The tag was originally a string, became an astring in IMAP4rev2, better stick to
|
||||
// string. ../rfc/4466:707 ../rfc/5259:1163 ../rfc/9051:7087
|
||||
if isE {
|
||||
fmt.Fprintf(c.xbw, `* ESEARCH (TAG "%s" MAILBOX %s UIDVALIDITY %d)`, tag, result.Mailbox.Name, result.Mailbox.UIDValidity)
|
||||
} else {
|
||||
fmt.Fprintf(c.xbw, `* ESEARCH (TAG "%s")`, tag)
|
||||
}
|
||||
if isUID {
|
||||
fmt.Fprintf(c.xbw, " UID")
|
||||
}
|
||||
}
|
||||
|
||||
// If no matches, then no MIN/MAX response. ../rfc/4731:98 ../rfc/9051:3758
|
||||
if eargs["MIN"] && len(uids) > 0 {
|
||||
resp += fmt.Sprintf(" MIN %d", uids[0])
|
||||
}
|
||||
if eargs["MAX"] && len(uids) > 0 {
|
||||
resp += fmt.Sprintf(" MAX %d", uids[len(uids)-1])
|
||||
}
|
||||
if eargs["COUNT"] {
|
||||
resp += fmt.Sprintf(" COUNT %d", len(uids))
|
||||
}
|
||||
if eargs["ALL"] && len(uids) > 0 {
|
||||
resp += fmt.Sprintf(" ALL %s", compactUIDSet(uids).String())
|
||||
}
|
||||
// NOTE: we are potentially converting UIDs to msgseq, but keep the store.UID type
|
||||
// for convenience.
|
||||
nums := result.UIDs
|
||||
if !isUID {
|
||||
// If searchResult is hanging on to the slice, we need to work on a copy.
|
||||
if save {
|
||||
nums = slices.Clone(nums)
|
||||
}
|
||||
for i, uid := range nums {
|
||||
nums[i] = store.UID(c.xsequence(uid))
|
||||
}
|
||||
}
|
||||
|
||||
// Interaction between ESEARCH and CONDSTORE: ../rfc/7162:1211 ../rfc/4731:273
|
||||
// Summary: send the highest modseq of the returned messages.
|
||||
if sk.hasModseq() && len(uids) > 0 {
|
||||
resp += fmt.Sprintf(" MODSEQ %d", maxModSeq.Client())
|
||||
}
|
||||
// If no matches, then no MIN/MAX response. ../rfc/4731:98 ../rfc/9051:3758
|
||||
if eargs["MIN"] && len(nums) > 0 {
|
||||
fmt.Fprintf(c.xbw, " MIN %d", nums[0])
|
||||
}
|
||||
if eargs["MAX"] && len(result.UIDs) > 0 {
|
||||
fmt.Fprintf(c.xbw, " MAX %d", nums[len(nums)-1])
|
||||
}
|
||||
if eargs["COUNT"] {
|
||||
fmt.Fprintf(c.xbw, " COUNT %d", len(nums))
|
||||
}
|
||||
if eargs["ALL"] && len(nums) > 0 {
|
||||
fmt.Fprintf(c.xbw, " ALL %s", compactUIDSet(nums).String())
|
||||
}
|
||||
|
||||
c.bwritelinef("%s", resp)
|
||||
// Interaction between ESEARCH and CONDSTORE: ../rfc/7162:1211 ../rfc/4731:273
|
||||
// Summary: send the highest modseq of the returned messages.
|
||||
if sk.hasModseq() && len(nums) > 0 {
|
||||
fmt.Fprintf(c.xbw, " MODSEQ %d", result.MaxModSeq.Client())
|
||||
}
|
||||
|
||||
c.xbwritelinef("")
|
||||
}
|
||||
}
|
||||
}
|
||||
if expungeIssued {
|
||||
// ../rfc/9051:5102
|
||||
c.writeresultf("%s OK [EXPUNGEISSUED] done", tag)
|
||||
} else {
|
||||
c.ok(tag, cmd)
|
||||
}
|
||||
|
||||
c.ok(tag, cmd)
|
||||
}
|
||||
|
||||
type search struct {
|
||||
c *conn
|
||||
tx *bstore.Tx
|
||||
seq msgseq
|
||||
uid store.UID
|
||||
mr *store.MsgReader
|
||||
m store.Message
|
||||
p *message.Part
|
||||
expungeIssued *bool
|
||||
hasModseq bool
|
||||
c *conn
|
||||
tx *bstore.Tx
|
||||
msgCount uint32 // Number of messages in mailbox (or session when selected).
|
||||
seq msgseq // Can be 0, for other mailboxes than selected in case of MAX.
|
||||
m store.Message
|
||||
mr *store.MsgReader
|
||||
p *message.Part
|
||||
xhighestUID func() store.UID
|
||||
}
|
||||
|
||||
func (c *conn) searchMatch(tx *bstore.Tx, seq msgseq, uid store.UID, sk searchKey, bodySearch, textSearch *store.WordSearch, expungeIssued *bool) (bool, store.ModSeq) {
|
||||
s := search{c: c, tx: tx, seq: seq, uid: uid, expungeIssued: expungeIssued, hasModseq: sk.hasModseq()}
|
||||
func (c *conn) searchMatch(tx *bstore.Tx, msgCount uint32, seq msgseq, m store.Message, sk searchKey, bodySearch, textSearch *store.WordSearch, xhighestUID func() store.UID) bool {
|
||||
if m.MailboxID == c.mailboxID {
|
||||
// If session doesn't know about the message yet, don't return it.
|
||||
if c.uidonly {
|
||||
if m.UID >= c.uidnext {
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
// Set seq for use in evaluations.
|
||||
seq = c.sequence(m.UID)
|
||||
if seq == 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
s := search{c: c, tx: tx, msgCount: msgCount, seq: seq, m: m, xhighestUID: xhighestUID}
|
||||
defer func() {
|
||||
if s.mr != nil {
|
||||
err := s.mr.Close()
|
||||
@ -297,18 +634,7 @@ func (c *conn) searchMatch(tx *bstore.Tx, seq msgseq, uid store.UID, sk searchKe
|
||||
return s.match(sk, bodySearch, textSearch)
|
||||
}
|
||||
|
||||
func (s *search) match(sk searchKey, bodySearch, textSearch *store.WordSearch) (match bool, modseq store.ModSeq) {
|
||||
// Instead of littering all the cases in match0 with calls to get modseq, we do it once
|
||||
// here in case of a match.
|
||||
defer func() {
|
||||
if match && s.hasModseq {
|
||||
if s.m.ID == 0 {
|
||||
match = s.xensureMessage()
|
||||
}
|
||||
modseq = s.m.ModSeq
|
||||
}
|
||||
}()
|
||||
|
||||
func (s *search) match(sk searchKey, bodySearch, textSearch *store.WordSearch) (match bool) {
|
||||
match = s.match0(sk)
|
||||
if match && bodySearch != nil {
|
||||
if !s.xensurePart() {
|
||||
@ -331,24 +657,6 @@ func (s *search) match(sk searchKey, bodySearch, textSearch *store.WordSearch) (
|
||||
return
|
||||
}
|
||||
|
||||
func (s *search) xensureMessage() bool {
|
||||
if s.m.ID > 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
q := bstore.QueryTx[store.Message](s.tx)
|
||||
q.FilterNonzero(store.Message{MailboxID: s.c.mailboxID, UID: s.uid})
|
||||
m, err := q.Get()
|
||||
if err == bstore.ErrAbsent || err == nil && m.Expunged {
|
||||
// ../rfc/2180:607
|
||||
*s.expungeIssued = true
|
||||
return false
|
||||
}
|
||||
xcheckf(err, "get message")
|
||||
s.m = m
|
||||
return true
|
||||
}
|
||||
|
||||
// ensure message, reader and part are loaded. returns whether that was
|
||||
// successful.
|
||||
func (s *search) xensurePart() bool {
|
||||
@ -356,10 +664,6 @@ func (s *search) xensurePart() bool {
|
||||
return s.p != nil
|
||||
}
|
||||
|
||||
if !s.xensureMessage() {
|
||||
return false
|
||||
}
|
||||
|
||||
// Closed by searchMatch after all (recursive) search.match calls are finished.
|
||||
s.mr = s.c.account.MessageReader(s.m)
|
||||
|
||||
@ -386,14 +690,23 @@ func (s *search) match0(sk searchKey) bool {
|
||||
}
|
||||
return true
|
||||
} else if sk.seqSet != nil {
|
||||
return sk.seqSet.containsSeq(s.seq, c.uids, c.searchResult)
|
||||
if sk.seqSet.searchResult {
|
||||
// Interpreting search results on a mailbox that isn't selected during multisearch
|
||||
// is likely a mistake. No mention about it in the RFC. ../rfc/7377:257
|
||||
if s.m.MailboxID != c.mailboxID {
|
||||
xuserErrorf("can only use search result with the selected mailbox")
|
||||
}
|
||||
return uidSearch(c.searchResult, s.m.UID) > 0
|
||||
}
|
||||
// For multisearch, we have arranged to have a seq for non-selected mailboxes too.
|
||||
return sk.seqSet.containsSeqCount(s.seq, s.msgCount)
|
||||
}
|
||||
|
||||
filterHeader := func(field, value string) bool {
|
||||
lower := strings.ToLower(value)
|
||||
h, err := s.p.Header()
|
||||
if err != nil {
|
||||
c.log.Debugx("parsing message header", err, slog.Any("uid", s.uid))
|
||||
c.log.Debugx("parsing message header", err, slog.Any("uid", s.m.UID), slog.Int64("msgid", s.m.ID))
|
||||
return false
|
||||
}
|
||||
for _, v := range h.Values(field) {
|
||||
@ -423,7 +736,12 @@ func (s *search) match0(sk searchKey) bool {
|
||||
case "OR":
|
||||
return s.match0(*sk.searchKey) || s.match0(*sk.searchKey2)
|
||||
case "UID":
|
||||
return sk.uidSet.containsUID(s.uid, c.uids, c.searchResult)
|
||||
if sk.uidSet.searchResult && s.m.MailboxID != c.mailboxID {
|
||||
// Interpreting search results on a mailbox that isn't selected during multisearch
|
||||
// is likely a mistake. No mention about it in the RFC. ../rfc/7377:257
|
||||
xuserErrorf("cannot use search result from another mailbox")
|
||||
}
|
||||
return sk.uidSet.xcontainsKnownUID(s.m.UID, c.searchResult, s.xhighestUID)
|
||||
}
|
||||
|
||||
// Parsed part.
|
||||
@ -453,12 +771,7 @@ func (s *search) match0(sk searchKey) bool {
|
||||
case "$mdnsent":
|
||||
return s.m.MDNSent
|
||||
default:
|
||||
for _, k := range s.m.Keywords {
|
||||
if k == kw {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
return slices.Contains(s.m.Keywords, kw)
|
||||
}
|
||||
case "SEEN":
|
||||
return s.m.Seen
|
||||
@ -482,12 +795,7 @@ func (s *search) match0(sk searchKey) bool {
|
||||
case "$mdnsent":
|
||||
return !s.m.MDNSent
|
||||
default:
|
||||
for _, k := range s.m.Keywords {
|
||||
if k == kw {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
return !slices.Contains(s.m.Keywords, kw)
|
||||
}
|
||||
case "UNSEEN":
|
||||
return !s.m.Seen
|
||||
@ -514,10 +822,42 @@ func (s *search) match0(sk searchKey) bool {
|
||||
case "MODSEQ":
|
||||
// ../rfc/7162:1045
|
||||
return s.m.ModSeq.Client() >= *sk.clientModseq
|
||||
case "SAVEDBEFORE", "SAVEDON", "SAVEDSINCE":
|
||||
// If we don't have a savedate for this message (for messages received before we
|
||||
// implemented this feature), we use the "internal date" (received timestamp) of
|
||||
// the message. ../rfc/8514:237
|
||||
rt := s.m.Received
|
||||
if s.m.SaveDate != nil {
|
||||
rt = *s.m.SaveDate
|
||||
}
|
||||
|
||||
skdt := sk.date.Format("2006-01-02")
|
||||
rdt := rt.Format("2006-01-02")
|
||||
switch sk.op {
|
||||
case "SAVEDBEFORE":
|
||||
return rdt < skdt
|
||||
case "SAVEDON":
|
||||
return rdt == skdt
|
||||
case "SAVEDSINCE":
|
||||
return rdt >= skdt
|
||||
}
|
||||
panic("missing case")
|
||||
case "SAVEDATESUPPORTED":
|
||||
// We return whether we have a savedate for this message. We support it on all
|
||||
// mailboxes, but we only have this metadata from the time we implemented this
|
||||
// feature.
|
||||
return s.m.SaveDate != nil
|
||||
case "OLDER":
|
||||
// ../rfc/5032:76
|
||||
seconds := int64(time.Since(s.m.Received) / time.Second)
|
||||
return seconds >= sk.number
|
||||
case "YOUNGER":
|
||||
seconds := int64(time.Since(s.m.Received) / time.Second)
|
||||
return seconds <= sk.number
|
||||
}
|
||||
|
||||
if s.p == nil {
|
||||
c.log.Info("missing parsed message, not matching", slog.Any("uid", s.uid))
|
||||
c.log.Info("missing parsed message, not matching", slog.Any("uid", s.m.UID), slog.Int64("msgid", s.m.ID))
|
||||
return false
|
||||
}
|
||||
|
||||
@ -546,7 +886,7 @@ func (s *search) match0(sk searchKey) bool {
|
||||
lower := strings.ToLower(sk.astring)
|
||||
h, err := s.p.Header()
|
||||
if err != nil {
|
||||
c.log.Errorx("parsing header for search", err, slog.Any("uid", s.uid))
|
||||
c.log.Errorx("parsing header for search", err, slog.Any("uid", s.m.UID), slog.Int64("msgid", s.m.ID))
|
||||
return false
|
||||
}
|
||||
k := textproto.CanonicalMIMEHeaderKey(sk.headerField)
|
||||
|
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user