mirror of
https://github.com/mjl-/mox.git
synced 2025-06-28 12:18:16 +03:00
Compare commits
No commits in common. "main" and "v0.0.10" have entirely different histories.
4
.github/workflows/build-test.yml
vendored
4
.github/workflows/build-test.yml
vendored
@ -17,6 +17,7 @@ jobs:
|
||||
with:
|
||||
node-version: 16
|
||||
cache: 'npm'
|
||||
- run: npm ci
|
||||
- run: 'touch */*.ts'
|
||||
|
||||
- uses: actions/setup-go@v4
|
||||
@ -27,9 +28,8 @@ jobs:
|
||||
# Need to run tests with a temp dir on same file system for os.Rename to succeed.
|
||||
- run: 'mkdir -p tmp && TMPDIR=$PWD/tmp make test'
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: coverage-${{ matrix.go-version }}
|
||||
path: cover.html
|
||||
|
||||
# Format code, we check below if nothing changed.
|
||||
|
2
.gitignore
vendored
2
.gitignore
vendored
@ -5,7 +5,7 @@
|
||||
/local/
|
||||
/testdata/check/
|
||||
/testdata/*/data/
|
||||
/testdata/ctl/config/dkim/
|
||||
/testdata/ctl/dkim/
|
||||
/testdata/empty/
|
||||
/testdata/exportmaildir/
|
||||
/testdata/exportmbox/
|
||||
|
149
Makefile
149
Makefile
@ -7,7 +7,9 @@ build0:
|
||||
CGO_ENABLED=0 go build
|
||||
CGO_ENABLED=0 go vet ./...
|
||||
./gendoc.sh
|
||||
./genapidoc.sh
|
||||
(cd webadmin && CGO_ENABLED=0 go run ../vendor/github.com/mjl-/sherpadoc/cmd/sherpadoc/*.go -adjust-function-names none Admin) >webadmin/api.json
|
||||
(cd webaccount && CGO_ENABLED=0 go run ../vendor/github.com/mjl-/sherpadoc/cmd/sherpadoc/*.go -adjust-function-names none Account) >webaccount/api.json
|
||||
(cd webmail && CGO_ENABLED=0 go run ../vendor/github.com/mjl-/sherpadoc/cmd/sherpadoc/*.go -adjust-function-names none Webmail) >webmail/api.json
|
||||
./gents.sh webadmin/api.json webadmin/api.ts
|
||||
./gents.sh webaccount/api.json webaccount/api.ts
|
||||
./gents.sh webmail/api.json webmail/api.ts
|
||||
@ -16,35 +18,19 @@ build1:
|
||||
# build again, api json files above are embedded and new frontend code generated
|
||||
CGO_ENABLED=0 go build
|
||||
|
||||
install: build0 frontend
|
||||
CGO_ENABLED=0 go install
|
||||
|
||||
race: build0
|
||||
go build -race
|
||||
|
||||
test:
|
||||
CGO_ENABLED=0 go test -fullpath -shuffle=on -coverprofile cover.out ./...
|
||||
CGO_ENABLED=0 go test -shuffle=on -coverprofile cover.out ./...
|
||||
go tool cover -html=cover.out -o cover.html
|
||||
|
||||
test-race:
|
||||
CGO_ENABLED=1 go test -fullpath -race -shuffle=on -covermode atomic -coverprofile cover.out ./...
|
||||
CGO_ENABLED=1 go test -race -shuffle=on -covermode atomic -coverprofile cover.out ./...
|
||||
go tool cover -html=cover.out -o cover.html
|
||||
|
||||
test-more:
|
||||
TZ= CGO_ENABLED=0 go test -fullpath -shuffle=on -count 2 ./...
|
||||
|
||||
# note: if testdata/upgradetest.mbox.gz exists, its messages will be imported
|
||||
# during tests. helpful for performance/resource consumption tests.
|
||||
test-upgrade: build
|
||||
nice ./test-upgrade.sh
|
||||
|
||||
# needed for "check" target
|
||||
install-staticcheck:
|
||||
CGO_ENABLED=0 go install honnef.co/go/tools/cmd/staticcheck@latest
|
||||
|
||||
install-ineffassign:
|
||||
CGO_ENABLED=0 go install github.com/gordonklaus/ineffassign@v0.1.0
|
||||
|
||||
check:
|
||||
CGO_ENABLED=0 go vet -tags integration
|
||||
CGO_ENABLED=0 go vet -tags website website/website.go
|
||||
@ -52,108 +38,91 @@ check:
|
||||
CGO_ENABLED=0 go vet -tags errata rfc/errata.go
|
||||
CGO_ENABLED=0 go vet -tags xr rfc/xr.go
|
||||
GOARCH=386 CGO_ENABLED=0 go vet ./...
|
||||
CGO_ENABLED=0 ineffassign ./...
|
||||
CGO_ENABLED=0 staticcheck ./...
|
||||
CGO_ENABLED=0 staticcheck -tags integration
|
||||
CGO_ENABLED=0 staticcheck -tags website website/website.go
|
||||
CGO_ENABLED=0 staticcheck -tags link rfc/link.go
|
||||
CGO_ENABLED=0 staticcheck -tags errata rfc/errata.go
|
||||
CGO_ENABLED=0 staticcheck -tags xr rfc/xr.go
|
||||
|
||||
# needed for check-shadow
|
||||
install-shadow:
|
||||
CGO_ENABLED=0 go install golang.org/x/tools/go/analysis/passes/shadow/cmd/shadow@latest
|
||||
staticcheck ./...
|
||||
staticcheck -tags integration
|
||||
staticcheck -tags website website/website.go
|
||||
staticcheck -tags link rfc/link.go
|
||||
staticcheck -tags errata rfc/errata.go
|
||||
staticcheck -tags xr rfc/xr.go
|
||||
|
||||
# having "err" shadowed is common, best to not have others
|
||||
check-shadow:
|
||||
CGO_ENABLED=0 go vet -vettool=$$(which shadow) ./... 2>&1 | grep -v '"err"'
|
||||
CGO_ENABLED=0 go vet -tags integration -vettool=$$(which shadow) 2>&1 | grep -v '"err"'
|
||||
CGO_ENABLED=0 go vet -tags website -vettool=$$(which shadow) website/website.go 2>&1 | grep -v '"err"'
|
||||
CGO_ENABLED=0 go vet -tags link -vettool=$$(which shadow) rfc/link.go 2>&1 | grep -v '"err"'
|
||||
CGO_ENABLED=0 go vet -tags errata -vettool=$$(which shadow) rfc/errata.go 2>&1 | grep -v '"err"'
|
||||
CGO_ENABLED=0 go vet -tags xr -vettool=$$(which shadow) rfc/xr.go 2>&1 | grep -v '"err"'
|
||||
go vet -vettool=$$(which shadow) ./... 2>&1 | grep -v '"err"'
|
||||
go vet -tags integration -vettool=$$(which shadow) 2>&1 | grep -v '"err"'
|
||||
go vet -tags website -vettool=$$(which shadow) website/website.go 2>&1 | grep -v '"err"'
|
||||
go vet -tags link -vettool=$$(which shadow) rfc/link.go 2>&1 | grep -v '"err"'
|
||||
go vet -tags errata -vettool=$$(which shadow) rfc/errata.go 2>&1 | grep -v '"err"'
|
||||
go vet -tags xr -vettool=$$(which shadow) rfc/xr.go 2>&1 | grep -v '"err"'
|
||||
|
||||
fuzz:
|
||||
go test -fullpath -fuzz FuzzParseSignature -fuzztime 5m ./dkim
|
||||
go test -fullpath -fuzz FuzzParseRecord -fuzztime 5m ./dkim
|
||||
go test -fullpath -fuzz . -fuzztime 5m ./dmarc
|
||||
go test -fullpath -fuzz . -fuzztime 5m ./dmarcrpt
|
||||
go test -fullpath -fuzz . -parallel 1 -fuzztime 5m ./imapserver
|
||||
go test -fullpath -fuzz . -fuzztime 5m ./imapclient
|
||||
go test -fullpath -fuzz . -parallel 1 -fuzztime 5m ./junk
|
||||
go test -fullpath -fuzz FuzzParseRecord -fuzztime 5m ./mtasts
|
||||
go test -fullpath -fuzz FuzzParsePolicy -fuzztime 5m ./mtasts
|
||||
go test -fullpath -fuzz . -fuzztime 5m ./smtp
|
||||
go test -fullpath -fuzz . -parallel 1 -fuzztime 5m ./smtpserver
|
||||
go test -fullpath -fuzz . -fuzztime 5m ./spf
|
||||
go test -fullpath -fuzz FuzzParseRecord -fuzztime 5m ./tlsrpt
|
||||
go test -fullpath -fuzz FuzzParseMessage -fuzztime 5m ./tlsrpt
|
||||
go test -fuzz FuzzParseSignature -fuzztime 5m ./dkim
|
||||
go test -fuzz FuzzParseRecord -fuzztime 5m ./dkim
|
||||
go test -fuzz . -fuzztime 5m ./dmarc
|
||||
go test -fuzz . -fuzztime 5m ./dmarcrpt
|
||||
go test -fuzz . -parallel 1 -fuzztime 5m ./imapserver
|
||||
go test -fuzz . -parallel 1 -fuzztime 5m ./junk
|
||||
go test -fuzz FuzzParseRecord -fuzztime 5m ./mtasts
|
||||
go test -fuzz FuzzParsePolicy -fuzztime 5m ./mtasts
|
||||
go test -fuzz . -parallel 1 -fuzztime 5m ./smtpserver
|
||||
go test -fuzz . -fuzztime 5m ./spf
|
||||
go test -fuzz FuzzParseRecord -fuzztime 5m ./tlsrpt
|
||||
go test -fuzz FuzzParseMessage -fuzztime 5m ./tlsrpt
|
||||
|
||||
govendor:
|
||||
go mod tidy
|
||||
go mod vendor
|
||||
./genlicenses.sh
|
||||
|
||||
test-integration:
|
||||
-docker compose -f docker-compose-integration.yml kill
|
||||
-docker compose -f docker-compose-integration.yml down
|
||||
docker image build --pull --no-cache -f Dockerfile -t mox_integration_moxmail .
|
||||
docker image build --pull --no-cache -f testdata/integration/Dockerfile.test -t mox_integration_test testdata/integration
|
||||
-rm -rf testdata/integration/moxacmepebble/data
|
||||
-rm -rf testdata/integration/moxmail2/data
|
||||
-rm -f testdata/integration/tmp-pebble-ca.pem
|
||||
MOX_UID=$$(id -u) docker compose -f docker-compose-integration.yml run test
|
||||
docker compose -f docker-compose-integration.yml kill
|
||||
MOX_UID=$$(id -u) docker-compose -f docker-compose-integration.yml run test
|
||||
docker-compose -f docker-compose-integration.yml down --timeout 1
|
||||
|
||||
|
||||
imaptest-build:
|
||||
-docker compose -f docker-compose-imaptest.yml build --no-cache --pull mox
|
||||
-docker-compose -f docker-compose-imaptest.yml build --no-cache --pull mox
|
||||
|
||||
imaptest-run:
|
||||
-rm -r testdata/imaptest/data
|
||||
mkdir testdata/imaptest/data
|
||||
docker compose -f docker-compose-imaptest.yml run --entrypoint /usr/local/bin/imaptest imaptest host=mox port=1143 user=mjl@mox.example pass=testtest mbox=imaptest.mbox
|
||||
docker compose -f docker-compose-imaptest.yml down
|
||||
docker-compose -f docker-compose-imaptest.yml run --entrypoint /usr/local/bin/imaptest imaptest host=mox port=1143 user=mjl@mox.example pass=testtest mbox=imaptest.mbox
|
||||
docker-compose -f docker-compose-imaptest.yml down
|
||||
|
||||
|
||||
fmt:
|
||||
go fmt ./...
|
||||
gofmt -w -s *.go */*.go
|
||||
|
||||
tswatch:
|
||||
jswatch:
|
||||
bash -c 'while true; do inotifywait -q -e close_write *.ts webadmin/*.ts webaccount/*.ts webmail/*.ts; make frontend; done'
|
||||
|
||||
node_modules/.bin/tsc:
|
||||
jsinstall:
|
||||
-mkdir -p node_modules/.bin
|
||||
npm ci --ignore-scripts
|
||||
npm ci
|
||||
|
||||
install-js: node_modules/.bin/tsc
|
||||
|
||||
install-js0:
|
||||
jsinstall0:
|
||||
-mkdir -p node_modules/.bin
|
||||
npm install --ignore-scripts --save-dev --save-exact typescript@5.1.6
|
||||
npm install --save-dev --save-exact typescript@5.1.6
|
||||
|
||||
webmail/webmail.js: lib.ts webmail/api.ts webmail/lib.ts webmail/webmail.ts
|
||||
./tsc.sh $@ lib.ts webmail/api.ts webmail/lib.ts webmail/webmail.ts
|
||||
./tsc.sh $@ $^
|
||||
|
||||
webmail/msg.js: lib.ts webmail/api.ts webmail/lib.ts webmail/msg.ts
|
||||
./tsc.sh $@ lib.ts webmail/api.ts webmail/lib.ts webmail/msg.ts
|
||||
./tsc.sh $@ $^
|
||||
|
||||
webmail/text.js: lib.ts webmail/api.ts webmail/lib.ts webmail/text.ts
|
||||
./tsc.sh $@ lib.ts webmail/api.ts webmail/lib.ts webmail/text.ts
|
||||
./tsc.sh $@ $^
|
||||
|
||||
webadmin/admin.js: lib.ts webadmin/api.ts webadmin/admin.ts
|
||||
./tsc.sh $@ lib.ts webadmin/api.ts webadmin/admin.ts
|
||||
./tsc.sh $@ $^
|
||||
|
||||
webaccount/account.js: lib.ts webaccount/api.ts webaccount/account.ts
|
||||
./tsc.sh $@ lib.ts webaccount/api.ts webaccount/account.ts
|
||||
./tsc.sh $@ $^
|
||||
|
||||
frontend: node_modules/.bin/tsc webadmin/admin.js webaccount/account.js webmail/webmail.js webmail/msg.js webmail/text.js
|
||||
|
||||
install-apidiff:
|
||||
CGO_ENABLED=0 go install golang.org/x/exp/cmd/apidiff@v0.0.0-20231206192017-f3f8817b8deb
|
||||
frontend: webadmin/admin.js webaccount/account.js webmail/webmail.js webmail/msg.js webmail/text.js
|
||||
|
||||
genapidiff:
|
||||
# needs file next.txt containing next version number, and golang.org/x/exp/cmd/apidiff@v0.0.0-20231206192017-f3f8817b8deb installed
|
||||
./apidiff.sh
|
||||
|
||||
docker:
|
||||
@ -166,17 +135,17 @@ genwebsite:
|
||||
./genwebsite.sh
|
||||
|
||||
buildall:
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=arm go build
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=386 go build
|
||||
CGO_ENABLED=0 GOOS=openbsd GOARCH=amd64 go build
|
||||
CGO_ENABLED=0 GOOS=freebsd GOARCH=amd64 go build
|
||||
CGO_ENABLED=0 GOOS=netbsd GOARCH=amd64 go build
|
||||
CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 go build
|
||||
CGO_ENABLED=0 GOOS=dragonfly GOARCH=amd64 go build
|
||||
CGO_ENABLED=0 GOOS=illumos GOARCH=amd64 go build
|
||||
CGO_ENABLED=0 GOOS=solaris GOARCH=amd64 go build
|
||||
CGO_ENABLED=0 GOOS=aix GOARCH=ppc64 go build
|
||||
CGO_ENABLED=0 GOOS=windows GOARCH=amd64 go build
|
||||
GOOS=linux GOARCH=arm go build
|
||||
GOOS=linux GOARCH=arm64 go build
|
||||
GOOS=linux GOARCH=amd64 go build
|
||||
GOOS=linux GOARCH=386 go build
|
||||
GOOS=openbsd GOARCH=amd64 go build
|
||||
GOOS=freebsd GOARCH=amd64 go build
|
||||
GOOS=netbsd GOARCH=amd64 go build
|
||||
GOOS=darwin GOARCH=amd64 go build
|
||||
GOOS=dragonfly GOARCH=amd64 go build
|
||||
GOOS=illumos GOARCH=amd64 go build
|
||||
GOOS=solaris GOARCH=amd64 go build
|
||||
GOOS=aix GOARCH=ppc64 go build
|
||||
GOOS=windows GOARCH=amd64 go build
|
||||
# no plan9 for now
|
||||
|
144
README.md
144
README.md
@ -19,22 +19,20 @@ See Quickstart below to get started.
|
||||
(similar to greylisting). Rejected emails are stored in a mailbox called Rejects
|
||||
for a short period, helping with misclassified legitimate synchronous
|
||||
signup/login/transactional emails.
|
||||
- Internationalized email (EIA), with unicode in email address usernames
|
||||
- Internationalized email, with unicode in email address usernames
|
||||
("localparts"), and in domain names (IDNA).
|
||||
- Automatic TLS with ACME, for use with Let's Encrypt and other CA's.
|
||||
- DANE and MTA-STS for inbound and outbound delivery over SMTP with STARTTLS,
|
||||
including REQUIRETLS and with incoming/outgoing TLSRPT reporting.
|
||||
- Web admin interface that helps you set up your domains, accounts and list
|
||||
aliases (instructions to create DNS records, configure
|
||||
SPF/DKIM/DMARC/TLSRPT/MTA-STS), for status information, and modifying the
|
||||
configuration file.
|
||||
- Web admin interface that helps you set up your domains and accounts
|
||||
(instructions to create DNS records, configure
|
||||
SPF/DKIM/DMARC/TLSRPT/MTA-STS), for status information, managing
|
||||
accounts/domains, and modifying the configuration file.
|
||||
- Account autodiscovery (with SRV records, Microsoft-style, Thunderbird-style,
|
||||
and Apple device management profiles) for easy account setup (though client
|
||||
support is limited).
|
||||
- Webserver with serving static files and forwarding requests (reverse
|
||||
proxy), so port 443 can also be used to serve websites.
|
||||
- Simple HTTP/JSON API for sending transaction email and receiving delivery
|
||||
events and incoming messages (webapi and webhooks).
|
||||
- Prometheus metrics and structured logging for operational insight.
|
||||
- "mox localserve" subcommand for running mox locally for email-related
|
||||
testing/developing, including pedantic mode.
|
||||
@ -99,7 +97,7 @@ for other platforms.
|
||||
# Compiling
|
||||
|
||||
You can easily (cross) compile mox yourself. You need a recent Go toolchain
|
||||
installed. Run `go version`, it must be >= 1.23. Download the latest version
|
||||
installed. Run `go version`, it must be >= 1.20. Download the latest version
|
||||
from https://go.dev/dl/ or see https://go.dev/doc/manage-install.
|
||||
|
||||
To download the source code of the latest release, and compile it to binary "mox":
|
||||
@ -125,53 +123,40 @@ It is important to run with docker host networking, so mox can use the public
|
||||
IPs and has correct remote IP information for incoming connections (important
|
||||
for junk filtering and rate-limiting).
|
||||
|
||||
# Development
|
||||
# Future/development
|
||||
|
||||
See develop.txt for instructions/tips for developing on mox.
|
||||
Mox will receive funding for essentially full-time continued work from August
|
||||
2023 to August 2024 through NLnet/EU's NGI0 Entrust, see
|
||||
https://nlnet.nl/project/Mox/.
|
||||
|
||||
# Sponsors
|
||||
## Roadmap
|
||||
|
||||
Thanks to NLnet foundation, the European Commission's NGI programme, and the
|
||||
Netherlands Ministry of the Interior and Kingdom Relations for financial
|
||||
support:
|
||||
|
||||
- 2024/2025, NLnet NGI0 Zero Core, https://nlnet.nl/project/Mox-Automation/
|
||||
- 2024, NLnet e-Commons Fund, https://nlnet.nl/project/Mox-API/
|
||||
- 2023/2024, NLnet NGI0 Entrust, https://nlnet.nl/project/Mox/
|
||||
|
||||
# Roadmap
|
||||
|
||||
- "mox setup" command, using admin web interface for interactive setup
|
||||
- Automate DNS management, for setup and maintenance, such as DANE/DKIM key rotation
|
||||
- Config options for "transactional email domains", for which mox will only
|
||||
send messages
|
||||
- Encrypted storage of files (email messages, TLS keys), also with per account keys
|
||||
- Recognize common deliverability issues and help postmasters solve them
|
||||
- JMAP, IMAP OBJECTID extension, IMAP JMAPACCESS extension
|
||||
- Webmail improvements
|
||||
- HTTP-based API for sending messages and receiving delivery feedback
|
||||
- Calendaring with CalDAV/iCal
|
||||
- Introbox, to which first-time senders are delivered
|
||||
- More IMAP extensions (PREVIEW, WITHIN, IMPORTANT, COMPRESS=DEFLATE,
|
||||
CREATE-SPECIAL-USE, SAVEDATE, UNAUTHENTICATE, REPLACE, QUOTA, NOTIFY,
|
||||
MULTIAPPEND, OBJECTID, MULTISEARCH, THREAD, SORT)
|
||||
- ARC, with forwarded email from trusted source
|
||||
- Forwarding (to an external address)
|
||||
- Add special IMAP mailbox ("Queue?") that contains queued but
|
||||
undelivered messages, updated with IMAP flags/keywords/tags and message headers.
|
||||
- External addresses in aliases/lists.
|
||||
- Sieve for filtering (for now see Rulesets in the account config)
|
||||
- Autoresponder (out of office/vacation)
|
||||
- Mailing list manager
|
||||
- IMAP extensions for "online"/non-syncing/webmail clients (SORT (including
|
||||
DISPLAYFROM, DISPLAYTO), THREAD, PARTIAL, CONTEXT=SEARCH CONTEXT=SORT ESORT,
|
||||
FILTERS)
|
||||
- IMAP ACL support, for account sharing (interacts with many extensions and code)
|
||||
- Improve support for mobile clients with extensions: IMAP URLAUTH, SMTP
|
||||
CHUNKING and BINARYMIME, IMAP CATENATE
|
||||
- OAUTH2 support, for single sign on
|
||||
- Privilege separation, isolating parts of the application to more restricted
|
||||
sandbox (e.g. new unauthenticated connections)
|
||||
- Using mox as backup MX
|
||||
- Sieve for filtering (for now see Rulesets in the account config)
|
||||
- ARC, with forwarded email from trusted source
|
||||
- JMAP
|
||||
- Milter support, for integration with external tools
|
||||
- SMTP DSN extension
|
||||
- IMAP extensions for "online"/non-syncing/webmail clients (SORT (including
|
||||
DISPLAYFROM, DISPLAYTO), THREAD, PARTIAL, CONTEXT=SEARCH CONTEXT=SORT ESORT,
|
||||
FILTERS)
|
||||
- IMAP Sieve extension, to run Sieve scripts after message changes (not only
|
||||
new deliveries)
|
||||
- OAUTH2 support, for single sign on
|
||||
- Forwarding (to an external address)
|
||||
- Improve support for mobile clients with extensions: IMAP URLAUTH, SMTP
|
||||
CHUNKING and BINARYMIME, IMAP CATENATE
|
||||
- Mailing list manager
|
||||
|
||||
There are many smaller improvements to make as well, search for "todo" in the code.
|
||||
|
||||
@ -180,10 +165,12 @@ There are many smaller improvements to make as well, search for "todo" in the co
|
||||
There is currently no plan to implement the following. Though this may
|
||||
change in the future.
|
||||
|
||||
- Functioning as an SMTP relay without authentication
|
||||
- Functioning as SMTP relay
|
||||
- POP3
|
||||
- Delivery to (unix) OS system users (mbox/Maildir)
|
||||
- Delivery to (unix) OS system users
|
||||
- Support for pluggable delivery mechanisms
|
||||
- iOS Mail push notifications (with XAPPLEPUSHSERVICE undocumented imap
|
||||
extension and hard to get APNS certificate)
|
||||
|
||||
|
||||
# FAQ - Frequently Asked Questions
|
||||
@ -295,8 +282,7 @@ MIT license (like mox), and have the rights to do so.
|
||||
|
||||
## Where can I discuss mox?
|
||||
|
||||
Join #mox on irc.oftc.net, or #mox:matrix.org (https://matrix.to/#/#mox:matrix.org),
|
||||
or #mox on the "Gopher slack".
|
||||
Join #mox on irc.oftc.net, or #mox:matrix.org, or #mox on the "Gopher slack".
|
||||
|
||||
For bug reports, please file an issue at https://github.com/mjl-/mox/issues/new.
|
||||
|
||||
@ -354,18 +340,15 @@ in place and restart. If manual actions are required, the release notes mention
|
||||
them. Check the release notes of all version between your current installation
|
||||
and the release you're upgrading to.
|
||||
|
||||
Before upgrading, make a backup of the config & data directory with `mox backup
|
||||
<destdir>`. This copies all files from the config directory to
|
||||
`<destdir>/config`, and creates `<destdir>/data` with a consistent snapshots of
|
||||
the database files, and message files from the outgoing queue and accounts.
|
||||
Using the new mox binary, run `mox verifydata <destdir>/data` (do NOT use the
|
||||
"live" data directory!) for a dry run. If this fails, an upgrade will probably
|
||||
fail too.
|
||||
|
||||
Important: verifydata with the new mox binary can modify the database files
|
||||
(due to automatic schema upgrades). So make a fresh backup again before the
|
||||
actual upgrade. See the help output of the "backup" and "verifydata" commands
|
||||
for more details.
|
||||
Before upgrading, make a backup of the data directory with `mox backup
|
||||
<destdir>`. This writes consistent snapshots of the database files, and
|
||||
duplicates message files from the outgoing queue and accounts. Using the new
|
||||
mox binary, run `mox verifydata <backupdir>` (do NOT use the "live" data
|
||||
directory!) for a dry run. If this fails, an upgrade will probably fail too.
|
||||
Important: verifydata with the new mox binary can modify the database files (due
|
||||
to automatic schema upgrades). So make a fresh backup again before the actual
|
||||
upgrade. See the help output of the "backup" and "verifydata" commands for more
|
||||
details.
|
||||
|
||||
During backup, message files are hardlinked if possible, and copied otherwise.
|
||||
Using a destination directory like `data/tmp/backup` increases the odds
|
||||
@ -444,41 +427,6 @@ the SMTP transaction that explains why. In the case of big email providers the
|
||||
error message often has instructions on how to prove to them you are a
|
||||
legitimate sender.
|
||||
|
||||
## Can mox deliver through a smarthost?
|
||||
|
||||
Yes, you can configure a "Transport" in mox.conf and configure "Routes" in
|
||||
domains.conf to send some or all messages through the transport. A transport
|
||||
can be an SMTP relay or authenticated submission, or making mox make outgoing
|
||||
connections through a SOCKS proxy.
|
||||
|
||||
For an example, see https://www.xmox.nl/config/#hdr-example-transport. For
|
||||
details about Transports and Routes, see
|
||||
https://www.xmox.nl/config/#cfg-mox-conf-Transports and
|
||||
https://www.xmox.nl/config/#cfg-domains-conf-Routes.
|
||||
|
||||
Remember to add the IP addresses of the transport to the SPF records of your
|
||||
domains. Keep in mind some 3rd party submission servers may mishandle your
|
||||
messages, for example by replacing your Message-Id header and thereby
|
||||
invalidating your DKIM-signatures, or rejecting messages with more than one
|
||||
DKIM-signature.
|
||||
|
||||
## Can I use mox to send transactional email?
|
||||
|
||||
Yes. While you can use SMTP submission to send messages you've composed
|
||||
yourself, and monitor a mailbox for DSNs, a more convenient option is to use
|
||||
the mox HTTP/JSON-based webapi and webhooks.
|
||||
|
||||
The mox webapi can be used to send outgoing messages that mox composes. The web
|
||||
api can also be used to deal with messages stored in an account, like changing
|
||||
message flags, retrieving messages in parsed form or individual parts of
|
||||
multipart messages, or moving messages to another mailbox or deleting messages
|
||||
altogether.
|
||||
|
||||
Mox webhooks can be used to receive updates about incoming and outgoing
|
||||
deliveries. Mox can automatically manage per account suppression lists.
|
||||
|
||||
See https://www.xmox.nl/features/#hdr-webapi-and-webhooks for details.
|
||||
|
||||
## Can I use existing TLS certificates/keys?
|
||||
|
||||
Yes. The quickstart command creates a config that uses ACME with Let's Encrypt,
|
||||
@ -540,13 +488,3 @@ ensuring they don't become too large. The message index database file for an
|
||||
account is at `data/accounts/<account>/index.db`, accessed with the bstore
|
||||
database library, which uses bbolt (formerly BoltDB) for storage, a
|
||||
transactional key/value library/file format inspired by LMDB.
|
||||
|
||||
## How do I block IPs with authentication failures with fail2ban?
|
||||
|
||||
Mox includes a rate limiter for IPs/networks that cause too many authentication
|
||||
failures. It automatically unblocks such IPs/networks after a while. So you may
|
||||
not need fail2ban. If you want to use fail2ban, you could use this snippet:
|
||||
|
||||
[Definition]
|
||||
failregex = .*failed authentication attempt.*remote=<HOST>
|
||||
ignoreregex =
|
||||
|
1158
admin/admin.go
1158
admin/admin.go
File diff suppressed because it is too large
Load Diff
@ -1,175 +0,0 @@
|
||||
package admin
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"maps"
|
||||
"slices"
|
||||
|
||||
"github.com/mjl-/mox/config"
|
||||
"github.com/mjl-/mox/dns"
|
||||
"github.com/mjl-/mox/mox-"
|
||||
)
|
||||
|
||||
type TLSMode uint8
|
||||
|
||||
const (
|
||||
TLSModeImmediate TLSMode = 0
|
||||
TLSModeSTARTTLS TLSMode = 1
|
||||
TLSModeNone TLSMode = 2
|
||||
)
|
||||
|
||||
type ProtocolConfig struct {
|
||||
Host dns.Domain
|
||||
Port int
|
||||
TLSMode TLSMode
|
||||
EnabledOnHTTPS bool
|
||||
}
|
||||
|
||||
type ClientConfig struct {
|
||||
IMAP ProtocolConfig
|
||||
Submission ProtocolConfig
|
||||
}
|
||||
|
||||
// ClientConfigDomain returns a single IMAP and Submission client configuration for
|
||||
// a domain.
|
||||
func ClientConfigDomain(d dns.Domain) (rconfig ClientConfig, rerr error) {
|
||||
var haveIMAP, haveSubmission bool
|
||||
|
||||
domConf, ok := mox.Conf.Domain(d)
|
||||
if !ok {
|
||||
return ClientConfig{}, fmt.Errorf("%w: unknown domain", ErrRequest)
|
||||
}
|
||||
|
||||
gather := func(l config.Listener) (done bool) {
|
||||
host := mox.Conf.Static.HostnameDomain
|
||||
if l.Hostname != "" {
|
||||
host = l.HostnameDomain
|
||||
}
|
||||
if domConf.ClientSettingsDomain != "" {
|
||||
host = domConf.ClientSettingsDNSDomain
|
||||
}
|
||||
if !haveIMAP && l.IMAPS.Enabled {
|
||||
rconfig.IMAP.Host = host
|
||||
rconfig.IMAP.Port = config.Port(l.IMAPS.Port, 993)
|
||||
rconfig.IMAP.TLSMode = TLSModeImmediate
|
||||
rconfig.IMAP.EnabledOnHTTPS = l.IMAPS.EnabledOnHTTPS
|
||||
haveIMAP = true
|
||||
}
|
||||
if !haveIMAP && l.IMAP.Enabled {
|
||||
rconfig.IMAP.Host = host
|
||||
rconfig.IMAP.Port = config.Port(l.IMAP.Port, 143)
|
||||
rconfig.IMAP.TLSMode = TLSModeSTARTTLS
|
||||
if l.TLS == nil {
|
||||
rconfig.IMAP.TLSMode = TLSModeNone
|
||||
}
|
||||
haveIMAP = true
|
||||
}
|
||||
if !haveSubmission && l.Submissions.Enabled {
|
||||
rconfig.Submission.Host = host
|
||||
rconfig.Submission.Port = config.Port(l.Submissions.Port, 465)
|
||||
rconfig.Submission.TLSMode = TLSModeImmediate
|
||||
rconfig.Submission.EnabledOnHTTPS = l.Submissions.EnabledOnHTTPS
|
||||
haveSubmission = true
|
||||
}
|
||||
if !haveSubmission && l.Submission.Enabled {
|
||||
rconfig.Submission.Host = host
|
||||
rconfig.Submission.Port = config.Port(l.Submission.Port, 587)
|
||||
rconfig.Submission.TLSMode = TLSModeSTARTTLS
|
||||
if l.TLS == nil {
|
||||
rconfig.Submission.TLSMode = TLSModeNone
|
||||
}
|
||||
haveSubmission = true
|
||||
}
|
||||
return haveIMAP && haveSubmission
|
||||
}
|
||||
|
||||
// Look at the public listener first. Most likely the intended configuration.
|
||||
if public, ok := mox.Conf.Static.Listeners["public"]; ok {
|
||||
if gather(public) {
|
||||
return
|
||||
}
|
||||
}
|
||||
// Go through the other listeners in consistent order.
|
||||
names := slices.Sorted(maps.Keys(mox.Conf.Static.Listeners))
|
||||
for _, name := range names {
|
||||
if gather(mox.Conf.Static.Listeners[name]) {
|
||||
return
|
||||
}
|
||||
}
|
||||
return ClientConfig{}, fmt.Errorf("%w: no listeners found for imap and/or submission", ErrRequest)
|
||||
}
|
||||
|
||||
// ClientConfigs holds the client configuration for IMAP/Submission for a
|
||||
// domain.
|
||||
type ClientConfigs struct {
|
||||
Entries []ClientConfigsEntry
|
||||
}
|
||||
|
||||
type ClientConfigsEntry struct {
|
||||
Protocol string
|
||||
Host dns.Domain
|
||||
Port int
|
||||
Listener string
|
||||
Note string
|
||||
}
|
||||
|
||||
// ClientConfigsDomain returns the client configs for IMAP/Submission for a
|
||||
// domain.
|
||||
func ClientConfigsDomain(d dns.Domain) (ClientConfigs, error) {
|
||||
domConf, ok := mox.Conf.Domain(d)
|
||||
if !ok {
|
||||
return ClientConfigs{}, fmt.Errorf("%w: unknown domain", ErrRequest)
|
||||
}
|
||||
|
||||
c := ClientConfigs{}
|
||||
c.Entries = []ClientConfigsEntry{}
|
||||
var listeners []string
|
||||
|
||||
for name := range mox.Conf.Static.Listeners {
|
||||
listeners = append(listeners, name)
|
||||
}
|
||||
slices.Sort(listeners)
|
||||
|
||||
note := func(tls bool, requiretls bool) string {
|
||||
if !tls {
|
||||
return "plain text, no STARTTLS configured"
|
||||
}
|
||||
if requiretls {
|
||||
return "STARTTLS required"
|
||||
}
|
||||
return "STARTTLS optional"
|
||||
}
|
||||
|
||||
for _, name := range listeners {
|
||||
l := mox.Conf.Static.Listeners[name]
|
||||
host := mox.Conf.Static.HostnameDomain
|
||||
if l.Hostname != "" {
|
||||
host = l.HostnameDomain
|
||||
}
|
||||
if domConf.ClientSettingsDomain != "" {
|
||||
host = domConf.ClientSettingsDNSDomain
|
||||
}
|
||||
if l.Submissions.Enabled {
|
||||
note := "with TLS"
|
||||
if l.Submissions.EnabledOnHTTPS {
|
||||
note += "; also served on port 443 with TLS ALPN \"smtp\""
|
||||
}
|
||||
c.Entries = append(c.Entries, ClientConfigsEntry{"Submission (SMTP)", host, config.Port(l.Submissions.Port, 465), name, note})
|
||||
}
|
||||
if l.IMAPS.Enabled {
|
||||
note := "with TLS"
|
||||
if l.IMAPS.EnabledOnHTTPS {
|
||||
note += "; also served on port 443 with TLS ALPN \"imap\""
|
||||
}
|
||||
c.Entries = append(c.Entries, ClientConfigsEntry{"IMAP", host, config.Port(l.IMAPS.Port, 993), name, note})
|
||||
}
|
||||
if l.Submission.Enabled {
|
||||
c.Entries = append(c.Entries, ClientConfigsEntry{"Submission (SMTP)", host, config.Port(l.Submission.Port, 587), name, note(l.TLS != nil, !l.Submission.NoRequireSTARTTLS)})
|
||||
}
|
||||
if l.IMAP.Enabled {
|
||||
c.Entries = append(c.Entries, ClientConfigsEntry{"IMAP", host, config.Port(l.IMAPS.Port, 143), name, note(l.TLS != nil, !l.IMAP.NoRequireSTARTTLS)})
|
||||
}
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
@ -1,318 +0,0 @@
|
||||
package admin
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/ed25519"
|
||||
"crypto/rsa"
|
||||
"crypto/sha256"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/mjl-/adns"
|
||||
|
||||
"github.com/mjl-/mox/config"
|
||||
"github.com/mjl-/mox/dkim"
|
||||
"github.com/mjl-/mox/dmarc"
|
||||
"github.com/mjl-/mox/dns"
|
||||
"github.com/mjl-/mox/mox-"
|
||||
"github.com/mjl-/mox/smtp"
|
||||
"github.com/mjl-/mox/spf"
|
||||
"github.com/mjl-/mox/tlsrpt"
|
||||
"slices"
|
||||
)
|
||||
|
||||
// todo: find a way to automatically create the dns records as it would greatly simplify setting up email for a domain. we could also dynamically make changes, e.g. providing grace periods after disabling a dkim key, only automatically removing the dkim dns key after a few days. but this requires some kind of api and authentication to the dns server. there doesn't appear to be a single commonly used api for dns management. each of the numerous cloud providers have their own APIs and rather large SKDs to use them. we don't want to link all of them in.
|
||||
|
||||
// DomainRecords returns text lines describing DNS records required for configuring
|
||||
// a domain.
|
||||
//
|
||||
// If certIssuerDomainName is set, CAA records to limit TLS certificate issuance to
|
||||
// that caID will be suggested. If acmeAccountURI is also set, CAA records also
|
||||
// restricting issuance to that account ID will be suggested.
|
||||
func DomainRecords(domConf config.Domain, domain dns.Domain, hasDNSSEC bool, certIssuerDomainName, acmeAccountURI string) ([]string, error) {
|
||||
d := domain.ASCII
|
||||
h := mox.Conf.Static.HostnameDomain.ASCII
|
||||
|
||||
// The first line with ";" is used by ../testdata/integration/moxacmepebble.sh and
|
||||
// ../testdata/integration/moxmail2.sh for selecting DNS records
|
||||
records := []string{
|
||||
"; Time To Live of 5 minutes, may be recognized if importing as a zone file.",
|
||||
"; Once your setup is working, you may want to increase the TTL.",
|
||||
"$TTL 300",
|
||||
"",
|
||||
}
|
||||
|
||||
if public, ok := mox.Conf.Static.Listeners["public"]; ok && public.TLS != nil && (len(public.TLS.HostPrivateRSA2048Keys) > 0 || len(public.TLS.HostPrivateECDSAP256Keys) > 0) {
|
||||
records = append(records,
|
||||
`; DANE: These records indicate that a remote mail server trying to deliver email`,
|
||||
`; with SMTP (TCP port 25) must verify the TLS certificate with DANE-EE (3), based`,
|
||||
`; on the certificate public key ("SPKI", 1) that is SHA2-256-hashed (1) to the`,
|
||||
`; hexadecimal hash. DANE-EE verification means only the certificate or public`,
|
||||
`; key is verified, not whether the certificate is signed by a (centralized)`,
|
||||
`; certificate authority (CA), is expired, or matches the host name.`,
|
||||
`;`,
|
||||
`; NOTE: Create the records below only once: They are for the machine, and apply`,
|
||||
`; to all hosted domains.`,
|
||||
)
|
||||
if !hasDNSSEC {
|
||||
records = append(records,
|
||||
";",
|
||||
"; WARNING: Domain does not appear to be DNSSEC-signed. To enable DANE, first",
|
||||
"; enable DNSSEC on your domain, then add the TLSA records. Records below have been",
|
||||
"; commented out.",
|
||||
)
|
||||
}
|
||||
addTLSA := func(privKey crypto.Signer) error {
|
||||
spkiBuf, err := x509.MarshalPKIXPublicKey(privKey.Public())
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshal SubjectPublicKeyInfo for DANE record: %v", err)
|
||||
}
|
||||
sum := sha256.Sum256(spkiBuf)
|
||||
tlsaRecord := adns.TLSA{
|
||||
Usage: adns.TLSAUsageDANEEE,
|
||||
Selector: adns.TLSASelectorSPKI,
|
||||
MatchType: adns.TLSAMatchTypeSHA256,
|
||||
CertAssoc: sum[:],
|
||||
}
|
||||
var s string
|
||||
if hasDNSSEC {
|
||||
s = fmt.Sprintf("_25._tcp.%-*s TLSA %s", 20+len(d)-len("_25._tcp."), h+".", tlsaRecord.Record())
|
||||
} else {
|
||||
s = fmt.Sprintf(";; _25._tcp.%-*s TLSA %s", 20+len(d)-len(";; _25._tcp."), h+".", tlsaRecord.Record())
|
||||
}
|
||||
records = append(records, s)
|
||||
return nil
|
||||
}
|
||||
for _, privKey := range public.TLS.HostPrivateECDSAP256Keys {
|
||||
if err := addTLSA(privKey); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
for _, privKey := range public.TLS.HostPrivateRSA2048Keys {
|
||||
if err := addTLSA(privKey); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
records = append(records, "")
|
||||
}
|
||||
|
||||
if d != h {
|
||||
records = append(records,
|
||||
"; For the machine, only needs to be created once, for the first domain added:",
|
||||
"; ",
|
||||
"; SPF-allow host for itself, resulting in relaxed DMARC pass for (postmaster)",
|
||||
"; messages (DSNs) sent from host:",
|
||||
fmt.Sprintf(`%-*s TXT "v=spf1 a -all"`, 20+len(d), h+"."), // ../rfc/7208:2263 ../rfc/7208:2287
|
||||
"",
|
||||
)
|
||||
}
|
||||
if d != h && mox.Conf.Static.HostTLSRPT.ParsedLocalpart != "" {
|
||||
uri := url.URL{
|
||||
Scheme: "mailto",
|
||||
Opaque: smtp.NewAddress(mox.Conf.Static.HostTLSRPT.ParsedLocalpart, mox.Conf.Static.HostnameDomain).Pack(false),
|
||||
}
|
||||
tlsrptr := tlsrpt.Record{Version: "TLSRPTv1", RUAs: [][]tlsrpt.RUA{{tlsrpt.RUA(uri.String())}}}
|
||||
records = append(records,
|
||||
"; For the machine, only needs to be created once, for the first domain added:",
|
||||
"; ",
|
||||
"; Request reporting about success/failures of TLS connections to (MX) host, for DANE.",
|
||||
fmt.Sprintf(`_smtp._tls.%-*s TXT "%s"`, 20+len(d)-len("_smtp._tls."), h+".", tlsrptr.String()),
|
||||
"",
|
||||
)
|
||||
}
|
||||
|
||||
records = append(records,
|
||||
"; Deliver email for the domain to this host.",
|
||||
fmt.Sprintf("%s. MX 10 %s.", d, h),
|
||||
"",
|
||||
|
||||
"; Outgoing messages will be signed with the first two DKIM keys. The other two",
|
||||
"; configured for backup, switching to them is just a config change.",
|
||||
)
|
||||
var selectors []string
|
||||
for name := range domConf.DKIM.Selectors {
|
||||
selectors = append(selectors, name)
|
||||
}
|
||||
slices.Sort(selectors)
|
||||
for _, name := range selectors {
|
||||
sel := domConf.DKIM.Selectors[name]
|
||||
dkimr := dkim.Record{
|
||||
Version: "DKIM1",
|
||||
Hashes: []string{"sha256"},
|
||||
PublicKey: sel.Key.Public(),
|
||||
}
|
||||
if _, ok := sel.Key.(ed25519.PrivateKey); ok {
|
||||
dkimr.Key = "ed25519"
|
||||
} else if _, ok := sel.Key.(*rsa.PrivateKey); !ok {
|
||||
return nil, fmt.Errorf("unrecognized private key for DKIM selector %q: %T", name, sel.Key)
|
||||
}
|
||||
txt, err := dkimr.Record()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("making DKIM DNS TXT record: %v", err)
|
||||
}
|
||||
|
||||
if len(txt) > 100 {
|
||||
records = append(records,
|
||||
"; NOTE: The following is a single long record split over several lines for use",
|
||||
"; in zone files. When adding through a DNS operator web interface, combine the",
|
||||
"; strings into a single string, without ().",
|
||||
)
|
||||
}
|
||||
s := fmt.Sprintf("%s._domainkey.%s. TXT %s", name, d, mox.TXTStrings(txt))
|
||||
records = append(records, s)
|
||||
|
||||
}
|
||||
dmarcr := dmarc.DefaultRecord
|
||||
dmarcr.Policy = "reject"
|
||||
if domConf.DMARC != nil {
|
||||
uri := url.URL{
|
||||
Scheme: "mailto",
|
||||
Opaque: smtp.NewAddress(domConf.DMARC.ParsedLocalpart, domConf.DMARC.DNSDomain).Pack(false),
|
||||
}
|
||||
dmarcr.AggregateReportAddresses = []dmarc.URI{
|
||||
{Address: uri.String(), MaxSize: 10, Unit: "m"},
|
||||
}
|
||||
}
|
||||
dspfr := spf.Record{Version: "spf1"}
|
||||
for _, ip := range mox.DomainSPFIPs() {
|
||||
mech := "ip4"
|
||||
if ip.To4() == nil {
|
||||
mech = "ip6"
|
||||
}
|
||||
dspfr.Directives = append(dspfr.Directives, spf.Directive{Mechanism: mech, IP: ip})
|
||||
}
|
||||
dspfr.Directives = append(dspfr.Directives,
|
||||
spf.Directive{Mechanism: "mx"},
|
||||
spf.Directive{Qualifier: "~", Mechanism: "all"},
|
||||
)
|
||||
dspftxt, err := dspfr.Record()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("making domain spf record: %v", err)
|
||||
}
|
||||
records = append(records,
|
||||
"",
|
||||
|
||||
"; Specify the MX host is allowed to send for our domain and for itself (for DSNs).",
|
||||
"; ~all means softfail for anything else, which is done instead of -all to prevent older",
|
||||
"; mail servers from rejecting the message because they never get to looking for a dkim/dmarc pass.",
|
||||
fmt.Sprintf(`%s. TXT "%s"`, d, dspftxt),
|
||||
"",
|
||||
|
||||
"; Emails that fail the DMARC check (without aligned DKIM and without aligned SPF)",
|
||||
"; should be rejected, and request reports. If you email through mailing lists that",
|
||||
"; strip DKIM-Signature headers and don't rewrite the From header, you may want to",
|
||||
"; set the policy to p=none.",
|
||||
fmt.Sprintf(`_dmarc.%s. TXT "%s"`, d, dmarcr.String()),
|
||||
"",
|
||||
)
|
||||
|
||||
if sts := domConf.MTASTS; sts != nil {
|
||||
records = append(records,
|
||||
"; Remote servers can use MTA-STS to verify our TLS certificate with the",
|
||||
"; WebPKI pool of CA's (certificate authorities) when delivering over SMTP with",
|
||||
"; STARTTLS.",
|
||||
fmt.Sprintf(`mta-sts.%s. CNAME %s.`, d, h),
|
||||
fmt.Sprintf(`_mta-sts.%s. TXT "v=STSv1; id=%s"`, d, sts.PolicyID),
|
||||
"",
|
||||
)
|
||||
} else {
|
||||
records = append(records,
|
||||
"; Note: No MTA-STS to indicate TLS should be used. Either because disabled for the",
|
||||
"; domain or because mox.conf does not have a listener with MTA-STS configured.",
|
||||
"",
|
||||
)
|
||||
}
|
||||
|
||||
if domConf.TLSRPT != nil {
|
||||
uri := url.URL{
|
||||
Scheme: "mailto",
|
||||
Opaque: smtp.NewAddress(domConf.TLSRPT.ParsedLocalpart, domConf.TLSRPT.DNSDomain).Pack(false),
|
||||
}
|
||||
tlsrptr := tlsrpt.Record{Version: "TLSRPTv1", RUAs: [][]tlsrpt.RUA{{tlsrpt.RUA(uri.String())}}}
|
||||
records = append(records,
|
||||
"; Request reporting about TLS failures.",
|
||||
fmt.Sprintf(`_smtp._tls.%s. TXT "%s"`, d, tlsrptr.String()),
|
||||
"",
|
||||
)
|
||||
}
|
||||
|
||||
if domConf.ClientSettingsDomain != "" && domConf.ClientSettingsDNSDomain != mox.Conf.Static.HostnameDomain {
|
||||
records = append(records,
|
||||
"; Client settings will reference a subdomain of the hosted domain, making it",
|
||||
"; easier to migrate to a different server in the future by not requiring settings",
|
||||
"; in all clients to be updated.",
|
||||
fmt.Sprintf(`%-*s CNAME %s.`, 20+len(d), domConf.ClientSettingsDNSDomain.ASCII+".", h),
|
||||
"",
|
||||
)
|
||||
}
|
||||
|
||||
records = append(records,
|
||||
"; Autoconfig is used by Thunderbird. Autodiscover is (in theory) used by Microsoft.",
|
||||
fmt.Sprintf(`autoconfig.%s. CNAME %s.`, d, h),
|
||||
fmt.Sprintf(`_autodiscover._tcp.%s. SRV 0 1 443 %s.`, d, h),
|
||||
"",
|
||||
|
||||
// ../rfc/6186:133 ../rfc/8314:692
|
||||
"; For secure IMAP and submission autoconfig, point to mail host.",
|
||||
fmt.Sprintf(`_imaps._tcp.%s. SRV 0 1 993 %s.`, d, h),
|
||||
fmt.Sprintf(`_submissions._tcp.%s. SRV 0 1 465 %s.`, d, h),
|
||||
"",
|
||||
// ../rfc/6186:242
|
||||
"; Next records specify POP3 and non-TLS ports are not to be used.",
|
||||
"; These are optional and safe to leave out (e.g. if you have to click a lot in a",
|
||||
"; DNS admin web interface).",
|
||||
fmt.Sprintf(`_imap._tcp.%s. SRV 0 0 0 .`, d),
|
||||
fmt.Sprintf(`_submission._tcp.%s. SRV 0 0 0 .`, d),
|
||||
fmt.Sprintf(`_pop3._tcp.%s. SRV 0 0 0 .`, d),
|
||||
fmt.Sprintf(`_pop3s._tcp.%s. SRV 0 0 0 .`, d),
|
||||
)
|
||||
|
||||
if certIssuerDomainName != "" {
|
||||
// ../rfc/8659:18 for CAA records.
|
||||
records = append(records,
|
||||
"",
|
||||
"; Optional:",
|
||||
"; You could mark Let's Encrypt as the only Certificate Authority allowed to",
|
||||
"; sign TLS certificates for your domain.",
|
||||
fmt.Sprintf(`%s. CAA 0 issue "%s"`, d, certIssuerDomainName),
|
||||
)
|
||||
if acmeAccountURI != "" {
|
||||
// ../rfc/8657:99 for accounturi.
|
||||
// ../rfc/8657:147 for validationmethods.
|
||||
records = append(records,
|
||||
";",
|
||||
"; Optionally limit certificates for this domain to the account ID and methods used by mox.",
|
||||
fmt.Sprintf(`;; %s. CAA 0 issue "%s; accounturi=%s; validationmethods=tls-alpn-01,http-01"`, d, certIssuerDomainName, acmeAccountURI),
|
||||
";",
|
||||
"; Or alternatively only limit for email-specific subdomains, so you can use",
|
||||
"; other accounts/methods for other subdomains.",
|
||||
fmt.Sprintf(`;; autoconfig.%s. CAA 0 issue "%s; accounturi=%s; validationmethods=tls-alpn-01,http-01"`, d, certIssuerDomainName, acmeAccountURI),
|
||||
fmt.Sprintf(`;; mta-sts.%s. CAA 0 issue "%s; accounturi=%s; validationmethods=tls-alpn-01,http-01"`, d, certIssuerDomainName, acmeAccountURI),
|
||||
)
|
||||
if domConf.ClientSettingsDomain != "" && domConf.ClientSettingsDNSDomain != mox.Conf.Static.HostnameDomain {
|
||||
records = append(records,
|
||||
fmt.Sprintf(`;; %-*s CAA 0 issue "%s; accounturi=%s; validationmethods=tls-alpn-01,http-01"`, 20-3+len(d), domConf.ClientSettingsDNSDomain.ASCII, certIssuerDomainName, acmeAccountURI),
|
||||
)
|
||||
}
|
||||
if strings.HasSuffix(h, "."+d) {
|
||||
records = append(records,
|
||||
";",
|
||||
"; And the mail hostname.",
|
||||
fmt.Sprintf(`;; %-*s CAA 0 issue "%s; accounturi=%s; validationmethods=tls-alpn-01,http-01"`, 20-3+len(d), h+".", certIssuerDomainName, acmeAccountURI),
|
||||
)
|
||||
}
|
||||
} else {
|
||||
// The string "will be suggested" is used by
|
||||
// ../testdata/integration/moxacmepebble.sh and ../testdata/integration/moxmail2.sh
|
||||
// as end of DNS records.
|
||||
records = append(records,
|
||||
";",
|
||||
"; Note: After starting up, once an ACME account has been created, CAA records",
|
||||
"; that restrict issuance to the account will be suggested.",
|
||||
)
|
||||
}
|
||||
}
|
||||
return records, nil
|
||||
}
|
33
apidiff.sh
33
apidiff.sh
@ -2,37 +2,24 @@
|
||||
set -e
|
||||
|
||||
prevversion=$(go list -mod=readonly -m -f '{{ .Version }}' github.com/mjl-/mox@latest)
|
||||
nextversion=$(cat next.txt)
|
||||
if ! test -d tmp/mox-$prevversion; then
|
||||
mkdir -p tmp/mox-$prevversion
|
||||
git archive --format=tar $prevversion | tar -C tmp/mox-$prevversion -xf -
|
||||
fi
|
||||
(rm -r tmp/apidiff || exit 0)
|
||||
mkdir -p tmp/apidiff/$prevversion tmp/apidiff/next
|
||||
(rm apidiff/next.txt.new 2>/dev/null || exit 0)
|
||||
touch apidiff/next.txt.new
|
||||
(rm apidiff/$nextversion.txt || exit 0)
|
||||
(
|
||||
echo "Below are the incompatible changes between $prevversion and $nextversion, per package."
|
||||
echo
|
||||
) >>apidiff/$nextversion.txt
|
||||
for p in $(cat apidiff/packages.txt); do
|
||||
if ! test -d tmp/mox-$prevversion/$p; then
|
||||
continue
|
||||
fi
|
||||
(cd tmp/mox-$prevversion && apidiff -w ../apidiff/$prevversion/$p.api ./$p)
|
||||
apidiff -w tmp/apidiff/next/$p.api ./$p
|
||||
apidiff -incompatible tmp/apidiff/$prevversion/$p.api tmp/apidiff/next/$p.api >$p.diff
|
||||
if test -s $p.diff; then
|
||||
(
|
||||
echo '#' $p
|
||||
cat $p.diff
|
||||
echo
|
||||
) >>apidiff/next.txt.new
|
||||
fi
|
||||
rm $p.diff
|
||||
done
|
||||
if test -s apidiff/next.txt.new; then
|
||||
(
|
||||
echo "Below are the incompatible changes between $prevversion and next, per package."
|
||||
echo '#' $p
|
||||
apidiff -incompatible tmp/apidiff/$prevversion/$p.api tmp/apidiff/next/$p.api
|
||||
echo
|
||||
cat apidiff/next.txt.new
|
||||
) >apidiff/next.txt
|
||||
rm apidiff/next.txt.new
|
||||
else
|
||||
mv apidiff/next.txt.new apidiff/next.txt
|
||||
fi
|
||||
) >>apidiff/$nextversion.txt
|
||||
done
|
||||
|
@ -1,5 +0,0 @@
|
||||
Below are the incompatible changes between v0.0.15 and next, per package.
|
||||
|
||||
# smtpclient
|
||||
- GatherDestinations: changed from func(context.Context, *log/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.IPDomain) (bool, bool, bool, github.com/mjl-/mox/dns.Domain, []github.com/mjl-/mox/dns.IPDomain, bool, error) to func(context.Context, *log/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.IPDomain) (bool, bool, bool, github.com/mjl-/mox/dns.Domain, []HostPref, bool, error)
|
||||
|
@ -16,5 +16,3 @@ spf
|
||||
subjectpass
|
||||
tlsrpt
|
||||
updates
|
||||
webapi
|
||||
webhook
|
||||
|
@ -1,45 +0,0 @@
|
||||
Below are the incompatible changes between v0.0.10 and v0.0.11, per package.
|
||||
|
||||
# dane
|
||||
|
||||
# dmarc
|
||||
- DMARCPolicy: removed
|
||||
|
||||
# dmarcrpt
|
||||
|
||||
# dns
|
||||
|
||||
# dnsbl
|
||||
|
||||
# iprev
|
||||
|
||||
# message
|
||||
- (*Composer).TextPart: changed from func(string) ([]byte, string, string) to func(string, string) ([]byte, string, string)
|
||||
- From: changed from func(*log/slog.Logger, bool, io.ReaderAt) (github.com/mjl-/mox/smtp.Address, *Envelope, net/textproto.MIMEHeader, error) to func(*log/slog.Logger, bool, io.ReaderAt, *Part) (github.com/mjl-/mox/smtp.Address, *Envelope, net/textproto.MIMEHeader, error)
|
||||
- NewComposer: changed from func(io.Writer, int64) *Composer to func(io.Writer, int64, bool) *Composer
|
||||
|
||||
# mtasts
|
||||
- STSMX: removed
|
||||
|
||||
# publicsuffix
|
||||
|
||||
# ratelimit
|
||||
|
||||
# sasl
|
||||
|
||||
# scram
|
||||
|
||||
# smtp
|
||||
- SeMsg6ConversoinUnsupported3: removed
|
||||
|
||||
# smtpclient
|
||||
- GatherIPs: changed from func(context.Context, *log/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.IPDomain, map[string][]net.IP) (bool, bool, github.com/mjl-/mox/dns.Domain, []net.IP, bool, error) to func(context.Context, *log/slog.Logger, github.com/mjl-/mox/dns.Resolver, string, github.com/mjl-/mox/dns.IPDomain, map[string][]net.IP) (bool, bool, github.com/mjl-/mox/dns.Domain, []net.IP, bool, error)
|
||||
|
||||
# spf
|
||||
|
||||
# subjectpass
|
||||
|
||||
# tlsrpt
|
||||
|
||||
# updates
|
||||
|
@ -1,43 +0,0 @@
|
||||
Below are the incompatible changes between v0.0.11 and next, per package.
|
||||
|
||||
# dane
|
||||
|
||||
# dmarc
|
||||
|
||||
# dmarcrpt
|
||||
|
||||
# dns
|
||||
|
||||
# dnsbl
|
||||
|
||||
# iprev
|
||||
|
||||
# message
|
||||
- (*HeaderWriter).AddWrap: changed from func([]byte) to func([]byte, bool)
|
||||
|
||||
# mtasts
|
||||
|
||||
# publicsuffix
|
||||
|
||||
# ratelimit
|
||||
|
||||
# sasl
|
||||
|
||||
# scram
|
||||
|
||||
# smtp
|
||||
|
||||
# smtpclient
|
||||
|
||||
# spf
|
||||
|
||||
# subjectpass
|
||||
|
||||
# tlsrpt
|
||||
|
||||
# updates
|
||||
|
||||
# webapi
|
||||
|
||||
# webhook
|
||||
|
@ -1,5 +0,0 @@
|
||||
Below are the incompatible changes between v0.0.13 and next, per package.
|
||||
|
||||
# webhook
|
||||
- PartStructure: removed
|
||||
|
@ -1,7 +0,0 @@
|
||||
Below are the incompatible changes between v0.0.14 and next, per package.
|
||||
|
||||
# message
|
||||
- Part.ContentDescription: changed from string to *string
|
||||
- Part.ContentID: changed from string to *string
|
||||
- Part.ContentTransferEncoding: changed from string to *string
|
||||
|
@ -42,24 +42,6 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
metricMissingServerName = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "mox_autotls_missing_servername_total",
|
||||
Help: "Number of failed TLS connection attempts with missing SNI where no fallback hostname was configured.",
|
||||
},
|
||||
)
|
||||
metricUnknownServerName = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "mox_autotls_unknown_servername_total",
|
||||
Help: "Number of failed TLS connection attempts with an unrecognized SNI name where no fallback hostname was configured.",
|
||||
},
|
||||
)
|
||||
metricCertRequestErrors = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "mox_autotls_cert_request_errors_total",
|
||||
Help: "Number of errors trying to retrieve a certificate for a hostname, possibly ACME verification errors.",
|
||||
},
|
||||
)
|
||||
metricCertput = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "mox_autotls_certput_total",
|
||||
@ -72,6 +54,7 @@ var (
|
||||
// certificates for allowlisted hosts.
|
||||
type Manager struct {
|
||||
ACMETLSConfig *tls.Config // For serving HTTPS on port 443, which is required for certificate requests to succeed.
|
||||
TLSConfig *tls.Config // For all TLS servers not used for validating ACME requests. Like SMTP and IMAP (including with STARTTLS) and HTTPS on ports other than 443.
|
||||
Manager *autocert.Manager
|
||||
|
||||
shutdown <-chan struct{}
|
||||
@ -94,7 +77,7 @@ type Manager struct {
|
||||
// host, or a newly generated key.
|
||||
//
|
||||
// When shutdown is closed, no new TLS connections can be created.
|
||||
func Load(log mlog.Log, name, acmeDir, contactEmail, directoryURL string, eabKeyID string, eabKey []byte, getPrivateKey func(host string, keyType autocert.KeyType) (crypto.Signer, error), shutdown <-chan struct{}) (*Manager, error) {
|
||||
func Load(name, acmeDir, contactEmail, directoryURL string, eabKeyID string, eabKey []byte, getPrivateKey func(host string, keyType autocert.KeyType) (crypto.Signer, error), shutdown <-chan struct{}) (*Manager, error) {
|
||||
if directoryURL == "" {
|
||||
return nil, fmt.Errorf("empty ACME directory URL")
|
||||
}
|
||||
@ -107,10 +90,7 @@ func Load(log mlog.Log, name, acmeDir, contactEmail, directoryURL string, eabKey
|
||||
var key crypto.Signer
|
||||
f, err := os.Open(p)
|
||||
if f != nil {
|
||||
defer func() {
|
||||
err := f.Close()
|
||||
log.Check(err, "closing identify key file")
|
||||
}()
|
||||
defer f.Close()
|
||||
}
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
key, err = ecdsa.GenerateKey(elliptic.P256(), cryptorand.Reader)
|
||||
@ -178,109 +158,46 @@ func Load(log mlog.Log, name, acmeDir, contactEmail, directoryURL string, eabKey
|
||||
}
|
||||
}
|
||||
|
||||
a := &Manager{
|
||||
Manager: m,
|
||||
shutdown: shutdown,
|
||||
hosts: map[dns.Domain]struct{}{},
|
||||
}
|
||||
m.HostPolicy = a.HostPolicy
|
||||
acmeTLSConfig := *m.TLSConfig()
|
||||
acmeTLSConfig.GetCertificate = func(hello *tls.ClientHelloInfo) (*tls.Certificate, error) {
|
||||
return a.loggingGetCertificate(hello, dns.Domain{}, false, false)
|
||||
}
|
||||
a.ACMETLSConfig = &acmeTLSConfig
|
||||
return a, nil
|
||||
}
|
||||
loggingGetCertificate := func(hello *tls.ClientHelloInfo) (*tls.Certificate, error) {
|
||||
log := mlog.New("autotls", nil).WithContext(hello.Context())
|
||||
|
||||
// loggingGetCertificate is a helper to implement crypto/tls.Config.GetCertificate,
|
||||
// optionally falling back to a certificate for fallbackHostname in case SNI is
|
||||
// absent or for an unknown hostname.
|
||||
func (m *Manager) loggingGetCertificate(hello *tls.ClientHelloInfo, fallbackHostname dns.Domain, fallbackNoSNI, fallbackUnknownSNI bool) (*tls.Certificate, error) {
|
||||
log := mlog.New("autotls", nil).WithContext(hello.Context()).With(
|
||||
slog.Any("localaddr", hello.Conn.LocalAddr()),
|
||||
slog.Any("supportedprotos", hello.SupportedProtos),
|
||||
slog.String("servername", hello.ServerName),
|
||||
)
|
||||
|
||||
// If we can't find a certificate (depending on fallback parameters), we return a
|
||||
// nil certificate and nil error, which crypto/tls turns into a TLS alert
|
||||
// "unrecognized name", which can be interpreted by clients as a hint that they are
|
||||
// using the wrong hostname, or a certificate is missing. ../rfc/9325:578
|
||||
|
||||
// IP addresses for ServerName are not allowed, but happen in practice. If we
|
||||
// should be lenient (fallbackUnknownSNI), we switch to the fallback hostname,
|
||||
// otherwise we return an error. We don't want to pass IP addresses to
|
||||
// GetCertificate because it will return an error for IPv6 addresses.
|
||||
// ../rfc/6066:367 ../rfc/4366:535
|
||||
if net.ParseIP(hello.ServerName) != nil {
|
||||
if fallbackUnknownSNI {
|
||||
hello.ServerName = fallbackHostname.ASCII
|
||||
log = log.With(slog.String("servername", hello.ServerName))
|
||||
} else {
|
||||
log.Debug("tls request with ip for server name, rejecting")
|
||||
return nil, fmt.Errorf("invalid ip address for sni server name")
|
||||
}
|
||||
}
|
||||
|
||||
if hello.ServerName == "" && fallbackNoSNI {
|
||||
hello.ServerName = fallbackHostname.ASCII
|
||||
log = log.With(slog.String("servername", hello.ServerName))
|
||||
}
|
||||
|
||||
// Handle missing SNI to prevent logging an error below.
|
||||
if hello.ServerName == "" {
|
||||
metricMissingServerName.Inc()
|
||||
log.Debug("tls request without sni server name, rejecting")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
cert, err := m.Manager.GetCertificate(hello)
|
||||
if err != nil && errors.Is(err, errHostNotAllowed) {
|
||||
if !fallbackUnknownSNI {
|
||||
metricUnknownServerName.Inc()
|
||||
log.Debugx("requesting certificate", err)
|
||||
return nil, nil
|
||||
// Handle missing SNI to prevent logging an error below.
|
||||
// At startup, during config initialization, we already adjust the tls config to
|
||||
// inject the listener hostname if there isn't one in the TLS client hello. This is
|
||||
// common for SMTP STARTTLS connections, which often do not care about the
|
||||
// verification of the certificate.
|
||||
if hello.ServerName == "" {
|
||||
log.Debug("tls request without sni servername, rejecting", slog.Any("localaddr", hello.Conn.LocalAddr()), slog.Any("supportedprotos", hello.SupportedProtos))
|
||||
return nil, fmt.Errorf("sni server name required")
|
||||
}
|
||||
|
||||
// Some legitimate email deliveries over SMTP use an unknown SNI, e.g. a bare
|
||||
// domain instead of the MX hostname. We "should" return an error, but that would
|
||||
// break email delivery, so we use the fallback name if it is configured.
|
||||
// ../rfc/9325:589
|
||||
|
||||
log = log.With(slog.String("servername", hello.ServerName))
|
||||
log.Debug("certificate for unknown hostname, using fallback hostname")
|
||||
hello.ServerName = fallbackHostname.ASCII
|
||||
cert, err = m.Manager.GetCertificate(hello)
|
||||
cert, err := m.GetCertificate(hello)
|
||||
if err != nil {
|
||||
metricCertRequestErrors.Inc()
|
||||
log.Errorx("requesting certificate for fallback hostname", err)
|
||||
} else {
|
||||
log.Debug("using certificate for fallback hostname")
|
||||
if errors.Is(err, errHostNotAllowed) {
|
||||
log.Debugx("requesting certificate", err, slog.String("host", hello.ServerName))
|
||||
} else {
|
||||
log.Errorx("requesting certificate", err, slog.String("host", hello.ServerName))
|
||||
}
|
||||
}
|
||||
return cert, err
|
||||
} else if err != nil {
|
||||
metricCertRequestErrors.Inc()
|
||||
log.Errorx("requesting certificate", err)
|
||||
}
|
||||
return cert, err
|
||||
}
|
||||
|
||||
// TLSConfig returns a TLS server config that optionally returns a certificate for
|
||||
// fallbackHostname if no SNI was done, or for an unknown hostname.
|
||||
//
|
||||
// If fallbackNoSNI is set, TLS connections without SNI will use a certificate for
|
||||
// fallbackHostname. Otherwise, connections without SNI will fail with a message
|
||||
// that no TLS certificate is available.
|
||||
//
|
||||
// If fallbackUnknownSNI is set, TLS connections with an SNI hostname that is not
|
||||
// allowlisted will instead use a certificate for fallbackHostname. Otherwise, such
|
||||
// TLS connections will fail.
|
||||
func (m *Manager) TLSConfig(fallbackHostname dns.Domain, fallbackNoSNI, fallbackUnknownSNI bool) *tls.Config {
|
||||
return &tls.Config{
|
||||
GetCertificate: func(hello *tls.ClientHelloInfo) (*tls.Certificate, error) {
|
||||
return m.loggingGetCertificate(hello, fallbackHostname, fallbackNoSNI, fallbackUnknownSNI)
|
||||
},
|
||||
acmeTLSConfig := *m.TLSConfig()
|
||||
acmeTLSConfig.GetCertificate = loggingGetCertificate
|
||||
|
||||
tlsConfig := tls.Config{
|
||||
GetCertificate: loggingGetCertificate,
|
||||
}
|
||||
|
||||
a := &Manager{
|
||||
ACMETLSConfig: &acmeTLSConfig,
|
||||
TLSConfig: &tlsConfig,
|
||||
Manager: m,
|
||||
shutdown: shutdown,
|
||||
hosts: map[dns.Domain]struct{}{},
|
||||
}
|
||||
m.HostPolicy = a.HostPolicy
|
||||
return a, nil
|
||||
}
|
||||
|
||||
// CertAvailable checks whether a non-expired ECDSA certificate is available in the
|
||||
@ -363,12 +280,12 @@ func (m *Manager) SetAllowedHostnames(log mlog.Log, resolver dns.Resolver, hostn
|
||||
for _, h := range added {
|
||||
ips, _, err := resolver.LookupIP(ctx, "ip", h.ASCII+".")
|
||||
if err != nil {
|
||||
log.Warnx("acme tls cert validation for host may fail due to dns lookup error", err, slog.Any("host", h))
|
||||
log.Errorx("warning: acme tls cert validation for host may fail due to dns lookup error", err, slog.Any("host", h))
|
||||
continue
|
||||
}
|
||||
for _, ip := range ips {
|
||||
if _, ok := publicIPstrs[ip.String()]; !ok {
|
||||
log.Warn("acme tls cert validation for host is likely to fail because not all its ips are being listened on",
|
||||
log.Error("warning: acme tls cert validation for host is likely to fail because not all its ips are being listened on",
|
||||
slog.Any("hostname", h),
|
||||
slog.Any("listenedips", publicIPs),
|
||||
slog.Any("hostips", ips),
|
||||
|
@ -25,7 +25,7 @@ func TestAutotls(t *testing.T) {
|
||||
getPrivateKey := func(host string, keyType autocert.KeyType) (crypto.Signer, error) {
|
||||
return nil, fmt.Errorf("not used")
|
||||
}
|
||||
m, err := Load(log, "test", "../testdata/autotls", "mox@localhost", "https://localhost/", "", nil, getPrivateKey, shutdown)
|
||||
m, err := Load("test", "../testdata/autotls", "mox@localhost", "https://localhost/", "", nil, getPrivateKey, shutdown)
|
||||
if err != nil {
|
||||
t.Fatalf("load manager: %v", err)
|
||||
}
|
||||
@ -82,7 +82,7 @@ func TestAutotls(t *testing.T) {
|
||||
|
||||
key0 := m.Manager.Client.Key
|
||||
|
||||
m, err = Load(log, "test", "../testdata/autotls", "mox@localhost", "https://localhost/", "", nil, getPrivateKey, shutdown)
|
||||
m, err = Load("test", "../testdata/autotls", "mox@localhost", "https://localhost/", "", nil, getPrivateKey, shutdown)
|
||||
if err != nil {
|
||||
t.Fatalf("load manager again: %v", err)
|
||||
}
|
||||
@ -95,7 +95,7 @@ func TestAutotls(t *testing.T) {
|
||||
t.Fatalf("hostpolicy, got err %v, expected no error", err)
|
||||
}
|
||||
|
||||
m2, err := Load(log, "test2", "../testdata/autotls", "mox@localhost", "https://localhost/", "", nil, nil, shutdown)
|
||||
m2, err := Load("test2", "../testdata/autotls", "mox@localhost", "https://localhost/", "", nil, nil, shutdown)
|
||||
if err != nil {
|
||||
t.Fatalf("load another manager: %v", err)
|
||||
}
|
||||
|
254
backup.go
254
backup.go
@ -10,10 +10,7 @@ import (
|
||||
"log/slog"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/mjl-/bstore"
|
||||
@ -27,7 +24,7 @@ import (
|
||||
"github.com/mjl-/mox/tlsrptdb"
|
||||
)
|
||||
|
||||
func xbackupctl(ctx context.Context, xctl *ctl) {
|
||||
func backupctl(ctx context.Context, ctl *ctl) {
|
||||
/* protocol:
|
||||
> "backup"
|
||||
> destdir
|
||||
@ -41,14 +38,14 @@ func xbackupctl(ctx context.Context, xctl *ctl) {
|
||||
// "src" or "dst" are incomplete paths relative to the source or destination data
|
||||
// directories.
|
||||
|
||||
dstDir := xctl.xread()
|
||||
verbose := xctl.xread() == "verbose"
|
||||
dstDataDir := ctl.xread()
|
||||
verbose := ctl.xread() == "verbose"
|
||||
|
||||
// Set when an error is encountered. At the end, we warn if set.
|
||||
var incomplete bool
|
||||
|
||||
// We'll be writing output, and logging both to mox and the ctl stream.
|
||||
xwriter := xctl.writer()
|
||||
writer := ctl.writer()
|
||||
|
||||
// Format easily readable output for the user.
|
||||
formatLog := func(prefix, text string, err error, attrs ...slog.Attr) []byte {
|
||||
@ -67,8 +64,10 @@ func xbackupctl(ctx context.Context, xctl *ctl) {
|
||||
|
||||
// Log an error to both the mox service as the user running "mox backup".
|
||||
pkglogx := func(prefix, text string, err error, attrs ...slog.Attr) {
|
||||
xctl.log.Errorx(text, err, attrs...)
|
||||
xwriter.Write(formatLog(prefix, text, err, attrs...))
|
||||
ctl.log.Errorx(text, err, attrs...)
|
||||
|
||||
_, werr := writer.Write(formatLog(prefix, text, err, attrs...))
|
||||
ctl.xcheck(werr, "write to ctl")
|
||||
}
|
||||
|
||||
// Log an error but don't mark backup as failed.
|
||||
@ -85,100 +84,15 @@ func xbackupctl(ctx context.Context, xctl *ctl) {
|
||||
|
||||
// If verbose is enabled, log to the cli command. Always log as info level.
|
||||
xvlog := func(text string, attrs ...slog.Attr) {
|
||||
xctl.log.Info(text, attrs...)
|
||||
ctl.log.Info(text, attrs...)
|
||||
if verbose {
|
||||
xwriter.Write(formatLog("", text, nil, attrs...))
|
||||
_, werr := writer.Write(formatLog("", text, nil, attrs...))
|
||||
ctl.xcheck(werr, "write to ctl")
|
||||
}
|
||||
}
|
||||
|
||||
dstConfigDir := filepath.Join(dstDir, "config")
|
||||
dstDataDir := filepath.Join(dstDir, "data")
|
||||
|
||||
// Warn if directories already exist, will likely cause failures when trying to
|
||||
// write files that already exist.
|
||||
if _, err := os.Stat(dstConfigDir); err == nil {
|
||||
xwarnx("destination config directory already exists", nil, slog.String("configdir", dstConfigDir))
|
||||
}
|
||||
if _, err := os.Stat(dstDataDir); err == nil {
|
||||
xwarnx("destination data directory already exists", nil, slog.String("datadir", dstDataDir))
|
||||
}
|
||||
|
||||
os.MkdirAll(dstDir, 0770)
|
||||
os.MkdirAll(dstConfigDir, 0770)
|
||||
os.MkdirAll(dstDataDir, 0770)
|
||||
|
||||
// Copy all files in the config dir.
|
||||
srcConfigDir := filepath.Clean(mox.ConfigDirPath("."))
|
||||
err := filepath.WalkDir(srcConfigDir, func(srcPath string, d fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if srcConfigDir == srcPath {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Trim directory and separator.
|
||||
relPath := srcPath[len(srcConfigDir)+1:]
|
||||
|
||||
destPath := filepath.Join(dstConfigDir, relPath)
|
||||
|
||||
if d.IsDir() {
|
||||
if info, err := os.Stat(srcPath); err != nil {
|
||||
return fmt.Errorf("stat config dir %s: %v", srcPath, err)
|
||||
} else if err := os.Mkdir(destPath, info.Mode()&0777); err != nil {
|
||||
return fmt.Errorf("mkdir %s: %v", destPath, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if d.Type()&fs.ModeSymlink != 0 {
|
||||
linkDest, err := os.Readlink(srcPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("reading symlink %s: %v", srcPath, err)
|
||||
}
|
||||
if err := os.Symlink(linkDest, destPath); err != nil {
|
||||
return fmt.Errorf("creating symlink %s: %v", destPath, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if !d.Type().IsRegular() {
|
||||
xwarnx("skipping non-regular/dir/symlink file in config dir", nil, slog.String("path", srcPath))
|
||||
return nil
|
||||
}
|
||||
|
||||
sf, err := os.Open(srcPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("open config file %s: %v", srcPath, err)
|
||||
}
|
||||
info, err := sf.Stat()
|
||||
if err != nil {
|
||||
return fmt.Errorf("stat config file %s: %v", srcPath, err)
|
||||
}
|
||||
df, err := os.OpenFile(destPath, os.O_CREATE|os.O_EXCL|os.O_WRONLY, 0777&info.Mode())
|
||||
if err != nil {
|
||||
return fmt.Errorf("create destination config file %s: %v", destPath, err)
|
||||
}
|
||||
defer func() {
|
||||
if df != nil {
|
||||
err := df.Close()
|
||||
xctl.log.Check(err, "closing file")
|
||||
}
|
||||
}()
|
||||
defer func() {
|
||||
err := sf.Close()
|
||||
xctl.log.Check(err, "closing file")
|
||||
}()
|
||||
if _, err := io.Copy(df, sf); err != nil {
|
||||
return fmt.Errorf("copying config file %s to %s: %v", srcPath, destPath, err)
|
||||
}
|
||||
if err := df.Close(); err != nil {
|
||||
return fmt.Errorf("closing destination config file %s: %v", srcPath, err)
|
||||
}
|
||||
df = nil
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
xerrx("storing config directory", err)
|
||||
xwarnx("destination data directory already exists", nil, slog.String("dir", dstDataDir))
|
||||
}
|
||||
|
||||
srcDataDir := filepath.Clean(mox.DataDirPath("."))
|
||||
@ -208,10 +122,7 @@ func xbackupctl(ctx context.Context, xctl *ctl) {
|
||||
xerrx("open source file (not backed up)", err, slog.String("srcpath", srcpath), slog.String("dstpath", dstpath))
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
err := sf.Close()
|
||||
xctl.log.Check(err, "closing source file")
|
||||
}()
|
||||
defer sf.Close()
|
||||
|
||||
ensureDestDir(dstpath)
|
||||
df, err := os.OpenFile(dstpath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0660)
|
||||
@ -221,8 +132,7 @@ func xbackupctl(ctx context.Context, xctl *ctl) {
|
||||
}
|
||||
defer func() {
|
||||
if df != nil {
|
||||
err := df.Close()
|
||||
xctl.log.Check(err, "closing destination file")
|
||||
df.Close()
|
||||
}
|
||||
}()
|
||||
if _, err := io.Copy(df, sf); err != nil {
|
||||
@ -264,9 +174,18 @@ func xbackupctl(ctx context.Context, xctl *ctl) {
|
||||
xvlog("backed up directory", slog.String("dir", dir), slog.Duration("duration", time.Since(tmDir)))
|
||||
}
|
||||
|
||||
// Backup a database by copying it in a readonly transaction. Wrapped by backupDB
|
||||
// which logs and returns just a bool.
|
||||
backupDB0 := func(db *bstore.DB, path string) error {
|
||||
// Backup a database by copying it in a readonly transaction.
|
||||
// Always logs on error, so caller doesn't have to, but also returns the error so
|
||||
// callers can see result.
|
||||
backupDB := func(db *bstore.DB, path string) (rerr error) {
|
||||
defer func() {
|
||||
if rerr != nil {
|
||||
xerrx("backing up database", rerr, slog.String("path", path))
|
||||
}
|
||||
}()
|
||||
|
||||
tmDB := time.Now()
|
||||
|
||||
dstpath := filepath.Join(dstDataDir, path)
|
||||
ensureDestDir(dstpath)
|
||||
df, err := os.OpenFile(dstpath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0660)
|
||||
@ -275,8 +194,7 @@ func xbackupctl(ctx context.Context, xctl *ctl) {
|
||||
}
|
||||
defer func() {
|
||||
if df != nil {
|
||||
err := df.Close()
|
||||
xctl.log.Check(err, "closing destination database file")
|
||||
df.Close()
|
||||
}
|
||||
}()
|
||||
err = db.Read(ctx, func(tx *bstore.Tx) error {
|
||||
@ -301,20 +219,10 @@ func xbackupctl(ctx context.Context, xctl *ctl) {
|
||||
if err != nil {
|
||||
return fmt.Errorf("closing destination database after copy: %v", err)
|
||||
}
|
||||
xvlog("backed up database file", slog.String("path", path), slog.Duration("duration", time.Since(tmDB)))
|
||||
return nil
|
||||
}
|
||||
|
||||
backupDB := func(db *bstore.DB, path string) bool {
|
||||
start := time.Now()
|
||||
err := backupDB0(db, path)
|
||||
if err != nil {
|
||||
xerrx("backing up database", err, slog.String("path", path), slog.Duration("duration", time.Since(start)))
|
||||
return false
|
||||
}
|
||||
xvlog("backed up database file", slog.String("path", path), slog.Duration("duration", time.Since(start)))
|
||||
return true
|
||||
}
|
||||
|
||||
// Try to create a hardlink. Fall back to copying the file (e.g. when on different file system).
|
||||
warnedHardlink := false // We warn once about failing to hardlink.
|
||||
linkOrCopy := func(srcpath, dstpath string) (bool, error) {
|
||||
@ -326,11 +234,7 @@ func xbackupctl(ctx context.Context, xctl *ctl) {
|
||||
// No point in trying with regular copy, we would warn twice.
|
||||
return false, err
|
||||
} else if !warnedHardlink {
|
||||
var hardlinkHint string
|
||||
if runtime.GOOS == "linux" && errors.Is(err, syscall.EXDEV) {
|
||||
hardlinkHint = " (hint: if running under systemd, ReadWritePaths in mox.service may cause multiple mountpoints; consider merging paths into a single parent directory to prevent cross-device/mountpoint hardlinks)"
|
||||
}
|
||||
xwarnx("creating hardlink to message failed, will be doing regular file copies and not warn again"+hardlinkHint, err, slog.String("srcpath", srcpath), slog.String("dstpath", dstpath))
|
||||
xwarnx("creating hardlink to message failed, will be doing regular file copies and not warn again", err, slog.String("srcpath", srcpath), slog.String("dstpath", dstpath))
|
||||
warnedHardlink = true
|
||||
}
|
||||
|
||||
@ -341,7 +245,7 @@ func xbackupctl(ctx context.Context, xctl *ctl) {
|
||||
}
|
||||
defer func() {
|
||||
err := sf.Close()
|
||||
xctl.log.Check(err, "closing copied source file")
|
||||
ctl.log.Check(err, "closing copied source file")
|
||||
}()
|
||||
|
||||
df, err := os.OpenFile(dstpath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0660)
|
||||
@ -351,7 +255,7 @@ func xbackupctl(ctx context.Context, xctl *ctl) {
|
||||
defer func() {
|
||||
if df != nil {
|
||||
err := df.Close()
|
||||
xctl.log.Check(err, "closing partial destination file")
|
||||
ctl.log.Check(err, "closing partial destination file")
|
||||
}
|
||||
}()
|
||||
if _, err := io.Copy(df, sf); err != nil {
|
||||
@ -368,16 +272,16 @@ func xbackupctl(ctx context.Context, xctl *ctl) {
|
||||
// Start making the backup.
|
||||
tmStart := time.Now()
|
||||
|
||||
xctl.log.Print("making backup", slog.String("destdir", dstDataDir))
|
||||
ctl.log.Print("making backup", slog.String("destdir", dstDataDir))
|
||||
|
||||
if err := os.MkdirAll(dstDataDir, 0770); err != nil {
|
||||
err := os.MkdirAll(dstDataDir, 0770)
|
||||
if err != nil {
|
||||
xerrx("creating destination data directory", err)
|
||||
}
|
||||
|
||||
if err := os.WriteFile(filepath.Join(dstDataDir, "moxversion"), []byte(moxvar.Version), 0660); err != nil {
|
||||
xerrx("writing moxversion", err)
|
||||
}
|
||||
backupDB(store.AuthDB, "auth.db")
|
||||
backupDB(dmarcdb.ReportsDB, "dmarcrpt.db")
|
||||
backupDB(dmarcdb.EvalDB, "dmarceval.db")
|
||||
backupDB(mtastsdb.DB, "mtasts.db")
|
||||
@ -389,7 +293,7 @@ func xbackupctl(ctx context.Context, xctl *ctl) {
|
||||
srcAcmeDir := filepath.Join(srcDataDir, "acme")
|
||||
if _, err := os.Stat(srcAcmeDir); err == nil {
|
||||
backupDir("acme")
|
||||
} else if !os.IsNotExist(err) {
|
||||
} else if err != nil && !os.IsNotExist(err) {
|
||||
xerrx("copying acme/", err)
|
||||
}
|
||||
|
||||
@ -397,13 +301,13 @@ func xbackupctl(ctx context.Context, xctl *ctl) {
|
||||
backupQueue := func(path string) {
|
||||
tmQueue := time.Now()
|
||||
|
||||
if !backupDB(queue.DB, path) {
|
||||
if err := backupDB(queue.DB, path); err != nil {
|
||||
xerrx("queue not backed up", err, slog.String("path", path), slog.Duration("duration", time.Since(tmQueue)))
|
||||
return
|
||||
}
|
||||
|
||||
dstdbpath := filepath.Join(dstDataDir, path)
|
||||
opts := bstore.Options{MustExist: true, RegisterLogger: xctl.log.Logger}
|
||||
db, err := bstore.Open(ctx, dstdbpath, &opts, queue.DBTypes...)
|
||||
db, err := bstore.Open(ctx, dstdbpath, &bstore.Options{MustExist: true}, queue.DBTypes...)
|
||||
if err != nil {
|
||||
xerrx("open copied queue database", err, slog.String("dstpath", dstdbpath), slog.Duration("duration", time.Since(tmQueue)))
|
||||
return
|
||||
@ -412,20 +316,17 @@ func xbackupctl(ctx context.Context, xctl *ctl) {
|
||||
defer func() {
|
||||
if db != nil {
|
||||
err := db.Close()
|
||||
xctl.log.Check(err, "closing new queue db")
|
||||
ctl.log.Check(err, "closing new queue db")
|
||||
}
|
||||
}()
|
||||
|
||||
// Link/copy known message files. If a message has been removed while we read the
|
||||
// database, our backup is not consistent and the backup will be marked failed.
|
||||
// Link/copy known message files. Warn if files are missing or unexpected
|
||||
// (though a message file could have been removed just now due to delivery, or a
|
||||
// new message may have been queued).
|
||||
tmMsgs := time.Now()
|
||||
seen := map[string]struct{}{}
|
||||
var nlinked, ncopied int
|
||||
var maxID int64
|
||||
err = bstore.QueryDB[queue.Msg](ctx, db).ForEach(func(m queue.Msg) error {
|
||||
if m.ID > maxID {
|
||||
maxID = m.ID
|
||||
}
|
||||
mp := store.MessagePath(m.ID)
|
||||
seen[mp] = struct{}{}
|
||||
srcpath := filepath.Join(srcDataDir, "queue", mp)
|
||||
@ -448,9 +349,7 @@ func xbackupctl(ctx context.Context, xctl *ctl) {
|
||||
slog.Duration("duration", time.Since(tmMsgs)))
|
||||
}
|
||||
|
||||
// Read through all files in queue directory and warn about anything we haven't
|
||||
// handled yet. Message files that are newer than we expect from our consistent
|
||||
// database snapshot are ignored.
|
||||
// Read through all files in queue directory and warn about anything we haven't handled yet.
|
||||
tmWalk := time.Now()
|
||||
srcqdir := filepath.Join(srcDataDir, "queue")
|
||||
err = filepath.WalkDir(srcqdir, func(srcqpath string, d fs.DirEntry, err error) error {
|
||||
@ -468,12 +367,6 @@ func xbackupctl(ctx context.Context, xctl *ctl) {
|
||||
if p == "index.db" {
|
||||
return nil
|
||||
}
|
||||
// Skip any messages that were added since we started on our consistent snapshot.
|
||||
// We don't want to cause spurious backup warnings.
|
||||
if id, err := strconv.ParseInt(filepath.Base(p), 10, 64); err == nil && maxID > 0 && id > maxID && p == store.MessagePath(id) {
|
||||
return nil
|
||||
}
|
||||
|
||||
qp := filepath.Join("queue", p)
|
||||
xwarnx("backing up unrecognized file in queue directory", nil, slog.String("path", qp))
|
||||
backupFile(qp)
|
||||
@ -490,21 +383,21 @@ func xbackupctl(ctx context.Context, xctl *ctl) {
|
||||
backupQueue(filepath.FromSlash("queue/index.db"))
|
||||
|
||||
backupAccount := func(acc *store.Account) {
|
||||
defer func() {
|
||||
err := acc.Close()
|
||||
xctl.log.Check(err, "closing account")
|
||||
}()
|
||||
defer acc.Close()
|
||||
|
||||
tmAccount := time.Now()
|
||||
|
||||
// Copy database file.
|
||||
dbpath := filepath.Join("accounts", acc.Name, "index.db")
|
||||
backupDB(acc.DB, dbpath)
|
||||
err := backupDB(acc.DB, dbpath)
|
||||
if err != nil {
|
||||
xerrx("copying account database", err, slog.String("path", dbpath), slog.Duration("duration", time.Since(tmAccount)))
|
||||
}
|
||||
|
||||
// todo: should document/check not taking a rlock on account.
|
||||
|
||||
// Copy junkfilter files, if configured.
|
||||
if jf, _, err := acc.OpenJunkFilter(ctx, xctl.log); err != nil {
|
||||
if jf, _, err := acc.OpenJunkFilter(ctx, ctl.log); err != nil {
|
||||
if !errors.Is(err, store.ErrNoJunkFilter) {
|
||||
xerrx("opening junk filter for account (not backed up)", err)
|
||||
}
|
||||
@ -514,13 +407,13 @@ func xbackupctl(ctx context.Context, xctl *ctl) {
|
||||
backupDB(db, jfpath)
|
||||
bloompath := filepath.Join("accounts", acc.Name, "junkfilter.bloom")
|
||||
backupFile(bloompath)
|
||||
db = nil
|
||||
err := jf.Close()
|
||||
xctl.log.Check(err, "closing junkfilter")
|
||||
ctl.log.Check(err, "closing junkfilter")
|
||||
}
|
||||
|
||||
dstdbpath := filepath.Join(dstDataDir, dbpath)
|
||||
opts := bstore.Options{MustExist: true, RegisterLogger: xctl.log.Logger}
|
||||
db, err := bstore.Open(ctx, dstdbpath, &opts, store.DBTypes...)
|
||||
db, err := bstore.Open(ctx, dstdbpath, &bstore.Options{MustExist: true}, store.DBTypes...)
|
||||
if err != nil {
|
||||
xerrx("open copied account database", err, slog.String("dstpath", dstdbpath), slog.Duration("duration", time.Since(tmAccount)))
|
||||
return
|
||||
@ -529,19 +422,17 @@ func xbackupctl(ctx context.Context, xctl *ctl) {
|
||||
defer func() {
|
||||
if db != nil {
|
||||
err := db.Close()
|
||||
xctl.log.Check(err, "close account database")
|
||||
ctl.log.Check(err, "close account database")
|
||||
}
|
||||
}()
|
||||
|
||||
// Link/copy known message files.
|
||||
// Link/copy known message files. Warn if files are missing or unexpected (though a
|
||||
// message file could have been added just now due to delivery, or a message have
|
||||
// been removed).
|
||||
tmMsgs := time.Now()
|
||||
seen := map[string]struct{}{}
|
||||
var maxID int64
|
||||
var nlinked, ncopied int
|
||||
err = bstore.QueryDB[store.Message](ctx, db).FilterEqual("Expunged", false).ForEach(func(m store.Message) error {
|
||||
if m.ID > maxID {
|
||||
maxID = m.ID
|
||||
}
|
||||
mp := store.MessagePath(m.ID)
|
||||
seen[mp] = struct{}{}
|
||||
amp := filepath.Join("accounts", acc.Name, "msg", mp)
|
||||
@ -565,18 +456,7 @@ func xbackupctl(ctx context.Context, xctl *ctl) {
|
||||
slog.Duration("duration", time.Since(tmMsgs)))
|
||||
}
|
||||
|
||||
eraseIDs := map[int64]struct{}{}
|
||||
err = bstore.QueryDB[store.MessageErase](ctx, db).ForEach(func(me store.MessageErase) error {
|
||||
eraseIDs[me.ID] = struct{}{}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
xerrx("listing erased messages", err)
|
||||
}
|
||||
|
||||
// Read through all files in queue directory and warn about anything we haven't
|
||||
// handled yet. Message files that are newer than we expect from our consistent
|
||||
// database snapshot are ignored.
|
||||
// Read through all files in account directory and warn about anything we haven't handled yet.
|
||||
tmWalk := time.Now()
|
||||
srcadir := filepath.Join(srcDataDir, "accounts", acc.Name)
|
||||
err = filepath.WalkDir(srcadir, func(srcapath string, d fs.DirEntry, err error) error {
|
||||
@ -594,16 +474,6 @@ func xbackupctl(ctx context.Context, xctl *ctl) {
|
||||
if _, ok := seen[mp]; ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Skip any messages that were added since we started on our consistent snapshot,
|
||||
// or messages that will be erased. We don't want to cause spurious backup
|
||||
// warnings.
|
||||
id, err := strconv.ParseInt(l[len(l)-1], 10, 64)
|
||||
if err == nil && id > maxID && mp == store.MessagePath(id) {
|
||||
return nil
|
||||
} else if _, ok := eraseIDs[id]; err == nil && ok {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
switch p {
|
||||
case "index.db", "junkfilter.db", "junkfilter.bloom":
|
||||
@ -632,7 +502,7 @@ func xbackupctl(ctx context.Context, xctl *ctl) {
|
||||
// account directories when handling "all other files" below.
|
||||
accounts := map[string]struct{}{}
|
||||
for _, accName := range mox.Conf.Accounts() {
|
||||
acc, err := store.OpenAccount(xctl.log, accName, false)
|
||||
acc, err := store.OpenAccount(ctl.log, accName)
|
||||
if err != nil {
|
||||
xerrx("opening account for copying (will try to copy as regular files later)", err, slog.String("account", accName))
|
||||
continue
|
||||
@ -670,7 +540,7 @@ func xbackupctl(ctx context.Context, xctl *ctl) {
|
||||
}
|
||||
|
||||
switch p {
|
||||
case "auth.db", "dmarcrpt.db", "dmarceval.db", "mtasts.db", "tlsrpt.db", "tlsrptresult.db", "receivedid.key", "ctl":
|
||||
case "dmarcrpt.db", "dmarceval.db", "mtasts.db", "tlsrpt.db", "tlsrptresult.db", "receivedid.key", "ctl":
|
||||
// Already handled.
|
||||
return nil
|
||||
case "lastknownversion": // Optional file, not yet handled.
|
||||
@ -688,11 +558,11 @@ func xbackupctl(ctx context.Context, xctl *ctl) {
|
||||
|
||||
xvlog("backup finished", slog.Duration("duration", time.Since(tmStart)))
|
||||
|
||||
xwriter.xclose()
|
||||
writer.xclose()
|
||||
|
||||
if incomplete {
|
||||
xctl.xwrite("errors were encountered during backup")
|
||||
ctl.xwrite("errors were encountered during backup")
|
||||
} else {
|
||||
xctl.xwriteok()
|
||||
ctl.xwriteok()
|
||||
}
|
||||
}
|
||||
|
290
config/config.go
290
config/config.go
@ -5,7 +5,6 @@ import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"regexp"
|
||||
@ -61,11 +60,11 @@ type Static struct {
|
||||
HostTLSRPT struct {
|
||||
Account string `sconf-doc:"Account to deliver TLS reports to. Typically same account as for postmaster."`
|
||||
Mailbox string `sconf-doc:"Mailbox to deliver TLS reports to. Recommended value: TLSRPT."`
|
||||
Localpart string `sconf-doc:"Localpart at hostname to accept TLS reports at. Recommended value: tlsreports."`
|
||||
Localpart string `sconf-doc:"Localpart at hostname to accept TLS reports at. Recommended value: tls-reports."`
|
||||
|
||||
ParsedLocalpart smtp.Localpart `sconf:"-"`
|
||||
} `sconf:"optional" sconf-doc:"Destination for per-host TLS reports (TLSRPT). TLS reports can be per recipient domain (for MTA-STS), or per MX host (for DANE). The per-domain TLS reporting configuration is in domains.conf. This is the TLS reporting configuration for this host. If absent, no host-based TLSRPT address is configured, and no host TLSRPT DNS record is suggested."`
|
||||
InitialMailboxes InitialMailboxes `sconf:"optional" sconf-doc:"Mailboxes to create for new accounts. Inbox is always created. Mailboxes can be given a 'special-use' role, which are understood by most mail clients. If absent/empty, the following additional mailboxes are created: Sent, Archive, Trash, Drafts and Junk."`
|
||||
InitialMailboxes InitialMailboxes `sconf:"optional" sconf-doc:"Mailboxes to create for new accounts. Inbox is always created. Mailboxes can be given a 'special-use' role, which are understood by most mail clients. If absent/empty, the following mailboxes are created: Sent, Archive, Trash, Drafts and Junk."`
|
||||
DefaultMailboxes []string `sconf:"optional" sconf-doc:"Deprecated in favor of InitialMailboxes. Mailboxes to create when adding an account. Inbox is always created. If no mailboxes are specified, the following are automatically created: Sent, Archive, Trash, Drafts and Junk."`
|
||||
Transports map[string]Transport `sconf:"optional" sconf-doc:"Transport are mechanisms for delivering messages. Transports can be referenced from Routes in accounts, domains and the global configuration. There is always an implicit/fallback delivery transport doing direct delivery with SMTP from the outgoing message queue. Transports are typically only configured when using smarthosts, i.e. when delivering through another SMTP server. Zero or one transport methods must be set in a transport, never multiple. When using an external party to send email for a domain, keep in mind you may have to add their IP address to your domain's SPF record, and possibly additional DKIM records."`
|
||||
// Awkward naming of fields to get intended default behaviour for zero values.
|
||||
@ -74,7 +73,7 @@ type Static struct {
|
||||
OutgoingTLSReportsForAllSuccess bool `sconf:"optional" sconf-doc:"Also send TLS reports if there were no SMTP STARTTLS connection failures. By default, reports are only sent when at least one failure occurred. If a report is sent, it does always include the successful connection counts as well."`
|
||||
QuotaMessageSize int64 `sconf:"optional" sconf-doc:"Default maximum total message size in bytes for each individual account, only applicable if greater than zero. Can be overridden per account. Attempting to add new messages to an account beyond its maximum total size will result in an error. Useful to prevent a single account from filling storage. The quota only applies to the email message files, not to any file system overhead and also not the message index database file (account for approximately 15% overhead)."`
|
||||
|
||||
// All IPs that were explicitly listened on for external SMTP. Only set when there
|
||||
// All IPs that were explicitly listen on for external SMTP. Only set when there
|
||||
// are no unspecified external SMTP listeners and there is at most one for IPv4 and
|
||||
// at most one for IPv6. Used for setting the local address when making outgoing
|
||||
// connections. Those IPs are assumed to be in an SPF record for the domain,
|
||||
@ -110,20 +109,19 @@ type Dynamic struct {
|
||||
Domains map[string]Domain `sconf-doc:"NOTE: This config file is in 'sconf' format. Indent with tabs. Comments must be on their own line, they don't end a line. Do not escape or quote strings. Details: https://pkg.go.dev/github.com/mjl-/sconf.\n\n\nDomains for which email is accepted. For internationalized domains, use their IDNA names in UTF-8."`
|
||||
Accounts map[string]Account `sconf-doc:"Accounts represent mox users, each with a password and email address(es) to which email can be delivered (possibly at different domains). Each account has its own on-disk directory holding its messages and index database. An account name is not an email address."`
|
||||
WebDomainRedirects map[string]string `sconf:"optional" sconf-doc:"Redirect all requests from domain (key) to domain (value). Always redirects to HTTPS. For plain HTTP redirects, use a WebHandler with a WebRedirect."`
|
||||
WebHandlers []WebHandler `sconf:"optional" sconf-doc:"Handle webserver requests by serving static files, redirecting, reverse-proxying HTTP(s) or passing the request to an internal service. The first matching WebHandler will handle the request. Built-in system handlers, e.g. for ACME validation, autoconfig and mta-sts always run first. Built-in handlers for admin, account, webmail and webapi are evaluated after all handlers, including webhandlers (allowing for overrides of internal services for some domains). If no handler matches, the response status code is file not found (404). If webserver features are missing, forward the requests to an application that provides the needed functionality itself."`
|
||||
WebHandlers []WebHandler `sconf:"optional" sconf-doc:"Handle webserver requests by serving static files, redirecting or reverse-proxying HTTP(s). The first matching WebHandler will handle the request. Built-in handlers, e.g. for account, admin, autoconfig and mta-sts always run first. If no handler matches, the response status code is file not found (404). If functionality you need is missng, simply forward the requests to an application that can provide the needed functionality."`
|
||||
Routes []Route `sconf:"optional" sconf-doc:"Routes for delivering outgoing messages through the queue. Each delivery attempt evaluates account routes, domain routes and finally these global routes. The transport of the first matching route is used in the delivery attempt. If no routes match, which is the default with no configured routes, messages are delivered directly from the queue."`
|
||||
MonitorDNSBLs []string `sconf:"optional" sconf-doc:"DNS blocklists to periodically check with if IPs we send from are present, without using them for checking incoming deliveries.. Also see DNSBLs in SMTP listeners in mox.conf, which specifies DNSBLs to use both for incoming deliveries and for checking our IPs against. Example DNSBLs: sbl.spamhaus.org, bl.spamcop.net."`
|
||||
|
||||
WebDNSDomainRedirects map[dns.Domain]dns.Domain `sconf:"-" json:"-"`
|
||||
MonitorDNSBLZones []dns.Domain `sconf:"-"`
|
||||
ClientSettingDomains map[dns.Domain]struct{} `sconf:"-" json:"-"`
|
||||
}
|
||||
|
||||
type ACME struct {
|
||||
DirectoryURL string `sconf-doc:"For letsencrypt, use https://acme-v02.api.letsencrypt.org/directory."`
|
||||
RenewBefore time.Duration `sconf:"optional" sconf-doc:"How long before expiration to renew the certificate. Default is 30 days."`
|
||||
ContactEmail string `sconf-doc:"Email address to register at ACME provider. The provider can email you when certificates are about to expire. If you configure an address for which email is delivered by this server, keep in mind that TLS misconfigurations could result in such notification emails not arriving."`
|
||||
Port int `sconf:"optional" sconf-doc:"TLS port for ACME validation, 443 by default. You should only override this if you cannot listen on port 443 directly. ACME will make requests to port 443, so you'll have to add an external mechanism to get the tls connection here, e.g. by configuring firewall-level port forwarding. Validation over the https port uses tls-alpn-01 with application-layer protocol negotiation, which essentially means the original tls connection must make it here unmodified, an https reverse proxy will not work."`
|
||||
Port int `sconf:"optional" sconf-doc:"TLS port for ACME validation, 443 by default. You should only override this if you cannot listen on port 443 directly. ACME will make requests to port 443, so you'll have to add an external mechanism to get the connection here, e.g. by configuring port forwarding."`
|
||||
IssuerDomainName string `sconf:"optional" sconf-doc:"If set, used for suggested CAA DNS records, for restricting TLS certificate issuance to a Certificate Authority. If empty and DirectyURL is for Let's Encrypt, this value is set automatically to letsencrypt.org."`
|
||||
ExternalAccountBinding *ExternalAccountBinding `sconf:"optional" sconf-doc:"ACME providers can require that a request for a new ACME account reference an existing non-ACME account known to the provider. External account binding references that account by a key id, and authorizes new ACME account requests by signing it with a key known both by the ACME client and ACME provider."`
|
||||
// ../rfc/8555:2111
|
||||
@ -137,10 +135,10 @@ type ExternalAccountBinding struct {
|
||||
}
|
||||
|
||||
type Listener struct {
|
||||
IPs []string `sconf-doc:"Use 0.0.0.0 to listen on all IPv4 and/or :: to listen on all IPv6 addresses, but it is better to explicitly specify the IPs you want to use for email, as mox will make sure outgoing connections will only be made from one of those IPs. If both outgoing IPv4 and IPv6 connectivity is possible, and only one family has explicitly configured addresses, both address families are still used for outgoing connections. Use the \"direct\" transport to limit address families for outgoing connections."`
|
||||
IPs []string `sconf-doc:"Use 0.0.0.0 to listen on all IPv4 and/or :: to listen on all IPv6 addresses, but it is better to explicitly specify the IPs you want to use for email, as mox will make sure outgoing connections will only be made from one of those IPs."`
|
||||
NATIPs []string `sconf:"optional" sconf-doc:"If set, the mail server is configured behind a NAT and field IPs are internal instead of the public IPs, while NATIPs lists the public IPs. Used during IP-related DNS self-checks, such as for iprev, mx, spf, autoconfig, autodiscover, and for autotls."`
|
||||
IPsNATed bool `sconf:"optional" sconf-doc:"Deprecated, use NATIPs instead. If set, IPs are not the public IPs, but are NATed. Skips IP-related DNS self-checks."`
|
||||
Hostname string `sconf:"optional" sconf-doc:"If empty, the config global Hostname is used. The internal services webadmin, webaccount, webmail and webapi only match requests to IPs, this hostname, \"localhost\". All except webadmin also match for any client settings domain."`
|
||||
Hostname string `sconf:"optional" sconf-doc:"If empty, the config global Hostname is used."`
|
||||
HostnameDomain dns.Domain `sconf:"-" json:"-"` // Set when parsing config.
|
||||
|
||||
TLS *TLS `sconf:"optional" sconf-doc:"For SMTP/IMAP STARTTLS, direct TLS and HTTPS connections."`
|
||||
@ -158,8 +156,6 @@ type Listener struct {
|
||||
|
||||
FirstTimeSenderDelay *time.Duration `sconf:"optional" sconf-doc:"Delay before accepting a message from a first-time sender for the destination account. Default: 15s."`
|
||||
|
||||
TLSSessionTicketsDisabled *bool `sconf:"optional" sconf-doc:"Override default setting for enabling TLS session tickets. Disabling session tickets may work around TLS interoperability issues."`
|
||||
|
||||
DNSBLZones []dns.Domain `sconf:"-"`
|
||||
} `sconf:"optional"`
|
||||
Submission struct {
|
||||
@ -168,9 +164,8 @@ type Listener struct {
|
||||
NoRequireSTARTTLS bool `sconf:"optional" sconf-doc:"Do not require STARTTLS. Since users must login, this means password may be sent without encryption. Not recommended."`
|
||||
} `sconf:"optional" sconf-doc:"SMTP for submitting email, e.g. by email applications. Starts out in plain text, can be upgraded to TLS with the STARTTLS command. Prefer using Submissions which is always a TLS connection."`
|
||||
Submissions struct {
|
||||
Enabled bool
|
||||
Port int `sconf:"optional" sconf-doc:"Default 465."`
|
||||
EnabledOnHTTPS bool `sconf:"optional" sconf-doc:"Additionally enable submission on HTTPS port 443 via TLS ALPN. TLS Application Layer Protocol Negotiation allows clients to request a specific protocol from the server as part of the TLS connection setup. When this setting is enabled and a client requests the 'smtp' protocol after TLS, it will be able to talk SMTP to Mox on port 443. This is meant to be useful as a censorship circumvention technique for Delta Chat."`
|
||||
Enabled bool
|
||||
Port int `sconf:"optional" sconf-doc:"Default 465."`
|
||||
} `sconf:"optional" sconf-doc:"SMTP over TLS for submitting email, by email applications. Requires a TLS config."`
|
||||
IMAP struct {
|
||||
Enabled bool
|
||||
@ -178,9 +173,8 @@ type Listener struct {
|
||||
NoRequireSTARTTLS bool `sconf:"optional" sconf-doc:"Enable this only when the connection is otherwise encrypted (e.g. through a VPN)."`
|
||||
} `sconf:"optional" sconf-doc:"IMAP for reading email, by email applications. Starts out in plain text, can be upgraded to TLS with the STARTTLS command. Prefer using IMAPS instead which is always a TLS connection."`
|
||||
IMAPS struct {
|
||||
Enabled bool
|
||||
Port int `sconf:"optional" sconf-doc:"Default 993."`
|
||||
EnabledOnHTTPS bool `sconf:"optional" sconf-doc:"Additionally enable IMAP on HTTPS port 443 via TLS ALPN. TLS Application Layer Protocol Negotiation allows clients to request a specific protocol from the server as part of the TLS connection setup. When this setting is enabled and a client requests the 'imap' protocol after TLS, it will be able to talk IMAP to Mox on port 443. This is meant to be useful as a censorship circumvention technique for Delta Chat."`
|
||||
Enabled bool
|
||||
Port int `sconf:"optional" sconf-doc:"Default 993."`
|
||||
} `sconf:"optional" sconf-doc:"IMAP over TLS for reading email, by email applications. Requires a TLS config."`
|
||||
AccountHTTP WebService `sconf:"optional" sconf-doc:"Account web interface, for email users wanting to change their accounts, e.g. set new password, set new delivery rulesets. Default path is /."`
|
||||
AccountHTTPS WebService `sconf:"optional" sconf-doc:"Account web interface listener like AccountHTTP, but for HTTPS. Requires a TLS config."`
|
||||
@ -188,8 +182,6 @@ type Listener struct {
|
||||
AdminHTTPS WebService `sconf:"optional" sconf-doc:"Admin web interface listener like AdminHTTP, but for HTTPS. Requires a TLS config."`
|
||||
WebmailHTTP WebService `sconf:"optional" sconf-doc:"Webmail client, for reading email. Default path is /webmail/."`
|
||||
WebmailHTTPS WebService `sconf:"optional" sconf-doc:"Webmail client, like WebmailHTTP, but for HTTPS. Requires a TLS config."`
|
||||
WebAPIHTTP WebService `sconf:"optional" sconf-doc:"Like WebAPIHTTP, but with plain HTTP, without TLS."`
|
||||
WebAPIHTTPS WebService `sconf:"optional" sconf-doc:"WebAPI, a simple HTTP/JSON-based API for email, with HTTPS (requires a TLS config). Default path is /webapi/."`
|
||||
MetricsHTTP struct {
|
||||
Enabled bool
|
||||
Port int `sconf:"optional" sconf-doc:"Default 8010."`
|
||||
@ -209,22 +201,20 @@ type Listener struct {
|
||||
NonTLS bool `sconf:"optional" sconf-doc:"If set, plain HTTP instead of HTTPS is spoken on the configured port. Can be useful when the mta-sts domain is reverse proxied."`
|
||||
} `sconf:"optional" sconf-doc:"Serve MTA-STS policies describing SMTP TLS requirements. Requires a TLS config."`
|
||||
WebserverHTTP struct {
|
||||
Enabled bool
|
||||
Port int `sconf:"optional" sconf-doc:"Port for plain HTTP (non-TLS) webserver."`
|
||||
RateLimitDisabled bool `sconf:"optional" sconf-doc:"Disable rate limiting for all requests to this port."`
|
||||
Enabled bool
|
||||
Port int `sconf:"optional" sconf-doc:"Port for plain HTTP (non-TLS) webserver."`
|
||||
} `sconf:"optional" sconf-doc:"All configured WebHandlers will serve on an enabled listener."`
|
||||
WebserverHTTPS struct {
|
||||
Enabled bool
|
||||
Port int `sconf:"optional" sconf-doc:"Port for HTTPS webserver."`
|
||||
RateLimitDisabled bool `sconf:"optional" sconf-doc:"Disable rate limiting for all requests to this port."`
|
||||
Enabled bool
|
||||
Port int `sconf:"optional" sconf-doc:"Port for HTTPS webserver."`
|
||||
} `sconf:"optional" sconf-doc:"All configured WebHandlers will serve on an enabled listener. Either ACME must be configured, or for each WebHandler domain a TLS certificate must be configured."`
|
||||
}
|
||||
|
||||
// WebService is an internal web interface: webmail, webaccount, webadmin, webapi.
|
||||
// WebService is an internal web interface: webmail, account, admin.
|
||||
type WebService struct {
|
||||
Enabled bool
|
||||
Port int `sconf:"optional" sconf-doc:"Default 80 for HTTP and 443 for HTTPS. See Hostname at Listener for hostname matching behaviour."`
|
||||
Path string `sconf:"optional" sconf-doc:"Path to serve requests on. Should end with a slash, related to cookie paths."`
|
||||
Port int `sconf:"optional" sconf-doc:"Default 80 for HTTP and 443 for HTTPS."`
|
||||
Path string `sconf:"optional" sconf-doc:"Path to serve requests on."`
|
||||
Forwarded bool `sconf:"optional" sconf-doc:"If set, X-Forwarded-* headers are used for the remote IP address for rate limiting and for the \"secure\" status of cookies."`
|
||||
}
|
||||
|
||||
@ -232,12 +222,10 @@ type WebService struct {
|
||||
// be non-nil. The non-nil field represents the type of transport. For a
|
||||
// transport with all fields nil, regular email delivery is done.
|
||||
type Transport struct {
|
||||
Submissions *TransportSMTP `sconf:"optional" sconf-doc:"Submission SMTP over a TLS connection to submit email to a remote queue."`
|
||||
Submission *TransportSMTP `sconf:"optional" sconf-doc:"Submission SMTP over a plain TCP connection (possibly with STARTTLS) to submit email to a remote queue."`
|
||||
SMTP *TransportSMTP `sconf:"optional" sconf-doc:"SMTP over a plain connection (possibly with STARTTLS), typically for old-fashioned unauthenticated relaying to a remote queue."`
|
||||
Socks *TransportSocks `sconf:"optional" sconf-doc:"Like regular direct delivery, but makes outgoing connections through a SOCKS proxy."`
|
||||
Direct *TransportDirect `sconf:"optional" sconf-doc:"Like regular direct delivery, but allows to tweak outgoing connections."`
|
||||
Fail *TransportFail `sconf:"optional" sconf-doc:"Immediately fails the delivery attempt."`
|
||||
Submissions *TransportSMTP `sconf:"optional" sconf-doc:"Submission SMTP over a TLS connection to submit email to a remote queue."`
|
||||
Submission *TransportSMTP `sconf:"optional" sconf-doc:"Submission SMTP over a plain TCP connection (possibly with STARTTLS) to submit email to a remote queue."`
|
||||
SMTP *TransportSMTP `sconf:"optional" sconf-doc:"SMTP over a plain connection (possibly with STARTTLS), typically for old-fashioned unauthenticated relaying to a remote queue."`
|
||||
Socks *TransportSocks `sconf:"optional" sconf-doc:"Like regular direct delivery, but makes outgoing connections through a SOCKS proxy."`
|
||||
}
|
||||
|
||||
// TransportSMTP delivers messages by "submission" (SMTP, typically
|
||||
@ -274,112 +262,67 @@ type TransportSocks struct {
|
||||
Hostname dns.Domain `sconf:"-" json:"-"` // Parsed form of RemoteHostname
|
||||
}
|
||||
|
||||
type TransportDirect struct {
|
||||
DisableIPv4 bool `sconf:"optional" sconf-doc:"If set, outgoing SMTP connections will *NOT* use IPv4 addresses to connect to remote SMTP servers."`
|
||||
DisableIPv6 bool `sconf:"optional" sconf-doc:"If set, outgoing SMTP connections will *NOT* use IPv6 addresses to connect to remote SMTP servers."`
|
||||
|
||||
IPFamily string `sconf:"-" json:"-"`
|
||||
}
|
||||
|
||||
// TransportFail is a transport that fails all delivery attempts.
|
||||
type TransportFail struct {
|
||||
SMTPCode int `sconf:"optional" sconf-doc:"SMTP error code and optional enhanced error code to use for the failure. If empty, 554 is used (transaction failed)."`
|
||||
SMTPMessage string `sconf:"optional" sconf-doc:"Message to include for the rejection. It will be shown in the DSN."`
|
||||
|
||||
// Effective values to use, set when parsing.
|
||||
Code int `sconf:"-"`
|
||||
Message string `sconf:"-"`
|
||||
}
|
||||
|
||||
type Domain struct {
|
||||
Disabled bool `sconf:"optional" sconf-doc:"Disabled domains can be useful during/before migrations. Domains that are disabled can still be configured like normal, including adding addresses using the domain to accounts. However, disabled domains: 1. Do not try to fetch ACME certificates. TLS connections to host names involving the email domain will fail. A TLS certificate for the hostname (that wil be used as MX) itself will be requested. 2. Incoming deliveries over SMTP are rejected with a temporary error '450 4.2.1 recipient domain temporarily disabled'. 3. Submissions over SMTP using an (envelope) SMTP MAIL FROM address or message 'From' address of a disabled domain will be rejected with a temporary error '451 4.3.0 sender domain temporarily disabled'. Note that accounts with addresses at disabled domains can still log in and read email (unless the account itself is disabled)."`
|
||||
Description string `sconf:"optional" sconf-doc:"Free-form description of domain."`
|
||||
ClientSettingsDomain string `sconf:"optional" sconf-doc:"Hostname for client settings instead of the mail server hostname. E.g. mail.<domain>. For future migration to another mail operator without requiring all clients to update their settings, it is convenient to have client settings that reference a subdomain of the hosted domain instead of the hostname of the server where the mail is currently hosted. If empty, the hostname of the mail server is used for client configurations. Unicode name."`
|
||||
LocalpartCatchallSeparator string `sconf:"optional" sconf-doc:"If not empty, only the string before the separator is used to for email delivery decisions. For example, if set to \"+\", you+anything@example.com will be delivered to you@example.com."`
|
||||
LocalpartCatchallSeparators []string `sconf:"optional" sconf-doc:"Similar to LocalpartCatchallSeparator, but in case multiple are needed. For example both \"+\" and \"-\". Only of one LocalpartCatchallSeparator or LocalpartCatchallSeparators can be set. If set, the first separator is used to make unique addresses for outgoing SMTP connections with FromIDLoginAddresses."`
|
||||
LocalpartCaseSensitive bool `sconf:"optional" sconf-doc:"If set, upper/lower case is relevant for email delivery."`
|
||||
DKIM DKIM `sconf:"optional" sconf-doc:"With DKIM signing, a domain is taking responsibility for (content of) emails it sends, letting receiving mail servers build up a (hopefully positive) reputation of the domain, which can help with mail delivery."`
|
||||
DMARC *DMARC `sconf:"optional" sconf-doc:"With DMARC, a domain publishes, in DNS, a policy on how other mail servers should handle incoming messages with the From-header matching this domain and/or subdomain (depending on the configured alignment). Receiving mail servers use this to build up a reputation of this domain, which can help with mail delivery. A domain can also publish an email address to which reports about DMARC verification results can be sent by verifying mail servers, useful for monitoring. Incoming DMARC reports are automatically parsed, validated, added to metrics and stored in the reporting database for later display in the admin web pages."`
|
||||
MTASTS *MTASTS `sconf:"optional" sconf-doc:"MTA-STS is a mechanism that allows publishing a policy with requirements for WebPKI-verified SMTP STARTTLS connections for email delivered to a domain. Existence of a policy is announced in a DNS TXT record (often unprotected/unverified, MTA-STS's weak spot). If a policy exists, it is fetched with a WebPKI-verified HTTPS request. The policy can indicate that WebPKI-verified SMTP STARTTLS is required, and which MX hosts (optionally with a wildcard pattern) are allowd. MX hosts to deliver to are still taken from DNS (again, not necessarily protected/verified), but messages will only be delivered to domains matching the MX hosts from the published policy. Mail servers look up the MTA-STS policy when first delivering to a domain, then keep a cached copy, periodically checking the DNS record if a new policy is available, and fetching and caching it if so. To update a policy, first serve a new policy with an updated policy ID, then update the DNS record (not the other way around). To remove an enforced policy, publish an updated policy with mode \"none\" for a long enough period so all cached policies have been refreshed (taking DNS TTL and policy max age into account), then remove the policy from DNS, wait for TTL to expire, and stop serving the policy."`
|
||||
TLSRPT *TLSRPT `sconf:"optional" sconf-doc:"With TLSRPT a domain specifies in DNS where reports about encountered SMTP TLS behaviour should be sent. Useful for monitoring. Incoming TLS reports are automatically parsed, validated, added to metrics and stored in the reporting database for later display in the admin web pages."`
|
||||
Routes []Route `sconf:"optional" sconf-doc:"Routes for delivering outgoing messages through the queue. Each delivery attempt evaluates account routes, these domain routes and finally global routes. The transport of the first matching route is used in the delivery attempt. If no routes match, which is the default with no configured routes, messages are delivered directly from the queue."`
|
||||
Aliases map[string]Alias `sconf:"optional" sconf-doc:"Aliases that cause messages to be delivered to one or more locally configured addresses. Keys are localparts (encoded, as they appear in email addresses)."`
|
||||
Description string `sconf:"optional" sconf-doc:"Free-form description of domain."`
|
||||
ClientSettingsDomain string `sconf:"optional" sconf-doc:"Hostname for client settings instead of the mail server hostname. E.g. mail.<domain>. For future migration to another mail operator without requiring all clients to update their settings, it is convenient to have client settings that reference a subdomain of the hosted domain instead of the hostname of the server where the mail is currently hosted. If empty, the hostname of the mail server is used for client configurations. Unicode name."`
|
||||
LocalpartCatchallSeparator string `sconf:"optional" sconf-doc:"If not empty, only the string before the separator is used to for email delivery decisions. For example, if set to \"+\", you+anything@example.com will be delivered to you@example.com."`
|
||||
LocalpartCaseSensitive bool `sconf:"optional" sconf-doc:"If set, upper/lower case is relevant for email delivery."`
|
||||
DKIM DKIM `sconf:"optional" sconf-doc:"With DKIM signing, a domain is taking responsibility for (content of) emails it sends, letting receiving mail servers build up a (hopefully positive) reputation of the domain, which can help with mail delivery."`
|
||||
DMARC *DMARC `sconf:"optional" sconf-doc:"With DMARC, a domain publishes, in DNS, a policy on how other mail servers should handle incoming messages with the From-header matching this domain and/or subdomain (depending on the configured alignment). Receiving mail servers use this to build up a reputation of this domain, which can help with mail delivery. A domain can also publish an email address to which reports about DMARC verification results can be sent by verifying mail servers, useful for monitoring. Incoming DMARC reports are automatically parsed, validated, added to metrics and stored in the reporting database for later display in the admin web pages."`
|
||||
MTASTS *MTASTS `sconf:"optional" sconf-doc:"With MTA-STS a domain publishes, in DNS, presence of a policy for using/requiring TLS for SMTP connections. The policy is served over HTTPS."`
|
||||
TLSRPT *TLSRPT `sconf:"optional" sconf-doc:"With TLSRPT a domain specifies in DNS where reports about encountered SMTP TLS behaviour should be sent. Useful for monitoring. Incoming TLS reports are automatically parsed, validated, added to metrics and stored in the reporting database for later display in the admin web pages."`
|
||||
Routes []Route `sconf:"optional" sconf-doc:"Routes for delivering outgoing messages through the queue. Each delivery attempt evaluates account routes, these domain routes and finally global routes. The transport of the first matching route is used in the delivery attempt. If no routes match, which is the default with no configured routes, messages are delivered directly from the queue."`
|
||||
|
||||
Domain dns.Domain `sconf:"-"`
|
||||
Domain dns.Domain `sconf:"-" json:"-"`
|
||||
ClientSettingsDNSDomain dns.Domain `sconf:"-" json:"-"`
|
||||
|
||||
// Set when DMARC and TLSRPT (when set) has an address with different domain (we're
|
||||
// hosting the reporting), and there are no destination addresses configured for
|
||||
// the domain. Disables some functionality related to hosting a domain.
|
||||
ReportsOnly bool `sconf:"-" json:"-"`
|
||||
LocalpartCatchallSeparatorsEffective []string `sconf:"-"` // Either LocalpartCatchallSeparators, the value of LocalpartCatchallSeparator, or empty.
|
||||
}
|
||||
|
||||
// todo: allow external addresses as members of aliases. we would add messages for them to the queue for outgoing delivery. we should require an admin addresses to which delivery failures will be delivered (locally, and to use in smtp mail from, so dsns go there). also take care to evaluate smtputf8 (if external address requires utf8 and incoming transaction didn't).
|
||||
// todo: as alternative to PostPublic, allow specifying a list of addresses (dmarc-like verified) that are (the only addresses) allowed to post to the list. if msgfrom is an external address, require a valid dkim signature to prevent dmarc-policy-related issues when delivering to remote members.
|
||||
// todo: add option to require messages sent to an alias have that alias as From or Reply-To address?
|
||||
|
||||
type Alias struct {
|
||||
Addresses []string `sconf-doc:"Expanded addresses to deliver to. These must currently be of addresses of local accounts. To prevent duplicate messages, a member address that is also an explicit recipient in the SMTP transaction will only have the message delivered once. If the address in the message From header is a member, that member also won't receive the message."`
|
||||
PostPublic bool `sconf:"optional" sconf-doc:"If true, anyone can send messages to the list. Otherwise only members, based on message From address, which is assumed to be DMARC-like-verified."`
|
||||
ListMembers bool `sconf:"optional" sconf-doc:"If true, members can see addresses of members."`
|
||||
AllowMsgFrom bool `sconf:"optional" sconf-doc:"If true, members are allowed to send messages with this alias address in the message From header."`
|
||||
|
||||
LocalpartStr string `sconf:"-"` // In encoded form.
|
||||
Domain dns.Domain `sconf:"-"`
|
||||
ParsedAddresses []AliasAddress `sconf:"-"` // Matches addresses.
|
||||
}
|
||||
|
||||
type AliasAddress struct {
|
||||
Address smtp.Address // Parsed address.
|
||||
AccountName string // Looked up.
|
||||
Destination Destination // Belonging to address.
|
||||
ReportsOnly bool `sconf:"-" json:"-"`
|
||||
}
|
||||
|
||||
type DMARC struct {
|
||||
Localpart string `sconf-doc:"Address-part before the @ that accepts DMARC reports. Must be non-internationalized. Recommended value: dmarcreports."`
|
||||
Domain string `sconf:"optional" sconf-doc:"Alternative domain for reporting address, for incoming reports. Typically empty, causing the domain wherein this config exists to be used. Can be used to receive reports for domains that aren't fully hosted on this server. Configure such a domain as a hosted domain without making all the DNS changes, and configure this field with a domain that is fully hosted on this server, so the localpart and the domain of this field form a reporting address. Then only update the DMARC DNS record for the not fully hosted domain, ensuring the reporting address is specified in its \"rua\" field as shown in the suggested DNS settings. Unicode name."`
|
||||
Localpart string `sconf-doc:"Address-part before the @ that accepts DMARC reports. Must be non-internationalized. Recommended value: dmarc-reports."`
|
||||
Domain string `sconf:"optional" sconf-doc:"Alternative domain for report recipient address. Can be used to receive reports for other domains. Unicode name."`
|
||||
Account string `sconf-doc:"Account to deliver to."`
|
||||
Mailbox string `sconf-doc:"Mailbox to deliver to, e.g. DMARC."`
|
||||
|
||||
ParsedLocalpart smtp.Localpart `sconf:"-"` // Lower-case if case-sensitivity is not configured for domain. Not "canonical" for catchall separators for backwards compatibility.
|
||||
ParsedLocalpart smtp.Localpart `sconf:"-"`
|
||||
DNSDomain dns.Domain `sconf:"-"` // Effective domain, always set based on Domain field or Domain where this is configured.
|
||||
}
|
||||
|
||||
type MTASTS struct {
|
||||
PolicyID string `sconf-doc:"Policies are versioned. The version must be specified in the DNS record. If you change a policy, first change it here to update the served policy, then update the DNS record with the updated policy ID."`
|
||||
Mode mtasts.Mode `sconf-doc:"If set to \"enforce\", a remote SMTP server will not deliver email to us if it cannot make a WebPKI-verified SMTP STARTTLS connection. In mode \"testing\", deliveries can be done without verified TLS, but errors will be reported through TLS reporting. In mode \"none\", verified TLS is not required, used for phasing out an MTA-STS policy."`
|
||||
PolicyID string `sconf-doc:"Policies are versioned. The version must be specified in the DNS record. If you change a policy, first change it in mox, then update the DNS record."`
|
||||
Mode mtasts.Mode `sconf-doc:"testing, enforce or none. If set to enforce, a remote SMTP server will not deliver email to us if it cannot make a TLS connection."`
|
||||
MaxAge time.Duration `sconf-doc:"How long a remote mail server is allowed to cache a policy. Typically 1 or several weeks."`
|
||||
MX []string `sconf:"optional" sconf-doc:"List of server names allowed for SMTP. If empty, the configured hostname is set. Host names can contain a wildcard (*) as a leading label (matching a single label, e.g. *.example matches host.example, not sub.host.example)."`
|
||||
// todo: parse mx as valid mtasts.Policy.MX, with dns.ParseDomain but taking wildcard into account
|
||||
}
|
||||
|
||||
type TLSRPT struct {
|
||||
Localpart string `sconf-doc:"Address-part before the @ that accepts TLSRPT reports. Recommended value: tlsreports."`
|
||||
Domain string `sconf:"optional" sconf-doc:"Alternative domain for reporting address, for incoming reports. Typically empty, causing the domain wherein this config exists to be used. Can be used to receive reports for domains that aren't fully hosted on this server. Configure such a domain as a hosted domain without making all the DNS changes, and configure this field with a domain that is fully hosted on this server, so the localpart and the domain of this field form a reporting address. Then only update the TLSRPT DNS record for the not fully hosted domain, ensuring the reporting address is specified in its \"rua\" field as shown in the suggested DNS settings. Unicode name."`
|
||||
Localpart string `sconf-doc:"Address-part before the @ that accepts TLSRPT reports. Recommended value: tls-reports."`
|
||||
Domain string `sconf:"optional" sconf-doc:"Alternative domain for report recipient address. Can be used to receive reports for other domains. Unicode name."`
|
||||
Account string `sconf-doc:"Account to deliver to."`
|
||||
Mailbox string `sconf-doc:"Mailbox to deliver to, e.g. TLSRPT."`
|
||||
|
||||
ParsedLocalpart smtp.Localpart `sconf:"-"` // Lower-case if case-sensitivity is not configured for domain. Not "canonical" for catchall separators for backwards compatibility.
|
||||
ParsedLocalpart smtp.Localpart `sconf:"-"`
|
||||
DNSDomain dns.Domain `sconf:"-"` // Effective domain, always set based on Domain field or Domain where this is configured.
|
||||
}
|
||||
|
||||
type Canonicalization struct {
|
||||
HeaderRelaxed bool `sconf-doc:"If set, some modifications to the headers (mostly whitespace) are allowed."`
|
||||
BodyRelaxed bool `sconf-doc:"If set, some whitespace modifications to the message body are allowed."`
|
||||
}
|
||||
|
||||
type Selector struct {
|
||||
Hash string `sconf:"optional" sconf-doc:"sha256 (default) or (older, not recommended) sha1."`
|
||||
HashEffective string `sconf:"-"`
|
||||
Canonicalization Canonicalization `sconf:"optional"`
|
||||
Headers []string `sconf:"optional" sconf-doc:"Headers to sign with DKIM. If empty, a reasonable default set of headers is selected."`
|
||||
HeadersEffective []string `sconf:"-"` // Used when signing. Based on Headers from config, or the reasonable default.
|
||||
DontSealHeaders bool `sconf:"optional" sconf-doc:"If set, don't prevent duplicate headers from being added. Not recommended."`
|
||||
Expiration string `sconf:"optional" sconf-doc:"Period a signature is valid after signing, as duration, e.g. 72h. The period should be enough for delivery at the final destination, potentially with several hops/relays. In the order of days at least."`
|
||||
PrivateKeyFile string `sconf-doc:"Either an RSA or ed25519 private key file in PKCS8 PEM form."`
|
||||
Hash string `sconf:"optional" sconf-doc:"sha256 (default) or (older, not recommended) sha1"`
|
||||
HashEffective string `sconf:"-"`
|
||||
Canonicalization struct {
|
||||
HeaderRelaxed bool `sconf-doc:"If set, some modifications to the headers (mostly whitespace) are allowed."`
|
||||
BodyRelaxed bool `sconf-doc:"If set, some whitespace modifications to the message body are allowed."`
|
||||
} `sconf:"optional"`
|
||||
Headers []string `sconf:"optional" sconf-doc:"Headers to sign with DKIM. If empty, a reasonable default set of headers is selected."`
|
||||
HeadersEffective []string `sconf:"-"` // Used when signing. Based on Headers from config, or the reasonable default.
|
||||
DontSealHeaders bool `sconf:"optional" sconf-doc:"If set, don't prevent duplicate headers from being added. Not recommended."`
|
||||
Expiration string `sconf:"optional" sconf-doc:"Period a signature is valid after signing, as duration, e.g. 72h. The period should be enough for delivery at the final destination, potentially with several hops/relays. In the order of days at least."`
|
||||
PrivateKeyFile string `sconf-doc:"Either an RSA or ed25519 private key file in PKCS8 PEM form."`
|
||||
|
||||
Algorithm string `sconf:"-"` // "ed25519", "rsa-*", based on private key.
|
||||
ExpirationSeconds int `sconf:"-" json:"-"` // Parsed from Expiration.
|
||||
Key crypto.Signer `sconf:"-" json:"-"` // As parsed with x509.ParsePKCS8PrivateKey.
|
||||
Domain dns.Domain `sconf:"-" json:"-"` // Of selector only, not FQDN.
|
||||
@ -405,66 +348,32 @@ type Route struct {
|
||||
|
||||
// todo: move RejectsMailbox to store.Mailbox.SpecialUse, possibly with "X" prefix?
|
||||
|
||||
// note: outgoing hook events are in ../queue/hooks.go, ../mox-/config.go, ../queue.go and ../webapi/gendoc.sh. keep in sync.
|
||||
|
||||
type OutgoingWebhook struct {
|
||||
URL string `sconf-doc:"URL to POST webhooks."`
|
||||
Authorization string `sconf:"optional" sconf-doc:"If not empty, value of Authorization header to add to HTTP requests."`
|
||||
Events []string `sconf:"optional" sconf-doc:"Events to send outgoing delivery notifications for. If absent, all events are sent. Valid values: delivered, suppressed, delayed, failed, relayed, expanded, canceled, unrecognized."`
|
||||
}
|
||||
|
||||
type IncomingWebhook struct {
|
||||
URL string `sconf-doc:"URL to POST webhooks to for incoming deliveries over SMTP."`
|
||||
Authorization string `sconf:"optional" sconf-doc:"If not empty, value of Authorization header to add to HTTP requests."`
|
||||
}
|
||||
|
||||
type SubjectPass struct {
|
||||
Period time.Duration `sconf-doc:"How long unique values are accepted after generating, e.g. 12h."` // todo: have a reasonable default for this?
|
||||
}
|
||||
|
||||
type AutomaticJunkFlags struct {
|
||||
Enabled bool `sconf-doc:"If enabled, junk/nonjunk flags will be set automatically if they match some of the regular expressions. When two of the three mailbox regular expressions are set, the remaining one will match all unmatched messages. Messages are matched in the order 'junk', 'neutral', 'not junk', and the search stops on the first match. Mailboxes are lowercased before matching."`
|
||||
JunkMailboxRegexp string `sconf:"optional" sconf-doc:"Example: ^(junk|spam)."`
|
||||
NeutralMailboxRegexp string `sconf:"optional" sconf-doc:"Example: ^(inbox|neutral|postmaster|dmarc|tlsrpt|rejects), and you may wish to add trash depending on how you use it, or leave this empty."`
|
||||
NotJunkMailboxRegexp string `sconf:"optional" sconf-doc:"Example: .* or an empty string."`
|
||||
}
|
||||
|
||||
type Account struct {
|
||||
OutgoingWebhook *OutgoingWebhook `sconf:"optional" sconf-doc:"Webhooks for events about outgoing deliveries."`
|
||||
IncomingWebhook *IncomingWebhook `sconf:"optional" sconf-doc:"Webhooks for events about incoming deliveries over SMTP."`
|
||||
FromIDLoginAddresses []string `sconf:"optional" sconf-doc:"Login addresses that cause outgoing email to be sent with SMTP MAIL FROM addresses with a unique id after the localpart catchall separator (which must be enabled when addresses are specified here). Any delivery status notifications (DSN, e.g. for bounces), can be related to the original message and recipient with unique id's. You can login to an account with any valid email address, including variants with the localpart catchall separator. You can use this mechanism to both send outgoing messages with and without unique fromid for a given email address. With the webapi and webmail, a unique id will be generated. For submission, the id from the SMTP MAIL FROM command is used if present, and a unique id is generated otherwise."`
|
||||
KeepRetiredMessagePeriod time.Duration `sconf:"optional" sconf-doc:"Period to keep messages retired from the queue (delivered or failed) around. Keeping retired messages is useful for maintaining the suppression list for transactional email, for matching incoming DSNs to sent messages, and for debugging. The time at which to clean up (remove) is calculated at retire time. E.g. 168h (1 week)."`
|
||||
KeepRetiredWebhookPeriod time.Duration `sconf:"optional" sconf-doc:"Period to keep webhooks retired from the queue (delivered or failed) around. Useful for debugging. The time at which to clean up (remove) is calculated at retire time. E.g. 168h (1 week)."`
|
||||
Domain string `sconf-doc:"Default domain for account. Deprecated behaviour: If a destination is not a full address but only a localpart, this domain is added to form a full address."`
|
||||
Description string `sconf:"optional" sconf-doc:"Free form description, e.g. full name or alternative contact info."`
|
||||
FullName string `sconf:"optional" sconf-doc:"Full name, to use in message From header when composing messages in webmail. Can be overridden per destination."`
|
||||
Destinations map[string]Destination `sconf:"optional" sconf-doc:"Destinations, keys are email addresses (with IDNA domains). All destinations are allowed for logging in with IMAP/SMTP/webmail. If no destinations are configured, the account can not login. If the address is of the form '@domain', i.e. with localpart missing, it serves as a catchall for the domain, matching all messages that are not explicitly configured. Deprecated behaviour: If the address is not a full address but a localpart, it is combined with Domain to form a full address."`
|
||||
SubjectPass struct {
|
||||
Period time.Duration `sconf-doc:"How long unique values are accepted after generating, e.g. 12h."` // todo: have a reasonable default for this?
|
||||
} `sconf:"optional" sconf-doc:"If configured, messages classified as weakly spam are rejected with instructions to retry delivery, but this time with a signed token added to the subject. During the next delivery attempt, the signed token will bypass the spam filter. Messages with a clear spam signal, such as a known bad reputation, are rejected/delayed without a signed token."`
|
||||
QuotaMessageSize int64 `sconf:"optional" sconf-doc:"Default maximum total message size in bytes for the account, overriding any globally configured default maximum size if non-zero. A negative value can be used to have no limit in case there is a limit by default. Attempting to add new messages to an account beyond its maximum total size will result in an error. Useful to prevent a single account from filling storage."`
|
||||
RejectsMailbox string `sconf:"optional" sconf-doc:"Mail that looks like spam will be rejected, but a copy can be stored temporarily in a mailbox, e.g. Rejects. If mail isn't coming in when you expect, you can look there. The mail still isn't accepted, so the remote mail server may retry (hopefully, if legitimate), or give up (hopefully, if indeed a spammer). Messages are automatically removed from this mailbox, so do not set it to a mailbox that has messages you want to keep."`
|
||||
KeepRejects bool `sconf:"optional" sconf-doc:"Don't automatically delete mail in the RejectsMailbox listed above. This can be useful, e.g. for future spam training."`
|
||||
AutomaticJunkFlags struct {
|
||||
Enabled bool `sconf-doc:"If enabled, flags will be set automatically if they match a regular expression below. When two of the three mailbox regular expressions are set, the remaining one will match all unmatched messages. Messages are matched in the order specified and the search stops on the first match. Mailboxes are lowercased before matching."`
|
||||
JunkMailboxRegexp string `sconf:"optional" sconf-doc:"Example: ^(junk|spam)."`
|
||||
NeutralMailboxRegexp string `sconf:"optional" sconf-doc:"Example: ^(inbox|neutral|postmaster|dmarc|tlsrpt|rejects), and you may wish to add trash depending on how you use it, or leave this empty."`
|
||||
NotJunkMailboxRegexp string `sconf:"optional" sconf-doc:"Example: .* or an empty string."`
|
||||
} `sconf:"optional" sconf-doc:"Automatically set $Junk and $NotJunk flags based on mailbox messages are delivered/moved/copied to. Email clients typically have too limited functionality to conveniently set these flags, especially $NonJunk, but they can all move messages to a different mailbox, so this helps them."`
|
||||
JunkFilter *JunkFilter `sconf:"optional" sconf-doc:"Content-based filtering, using the junk-status of individual messages to rank words in such messages as spam or ham. It is recommended you always set the applicable (non)-junk status on messages, and that you do not empty your Trash because those messages contain valuable ham/spam training information."` // todo: sane defaults for junkfilter
|
||||
MaxOutgoingMessagesPerDay int `sconf:"optional" sconf-doc:"Maximum number of outgoing messages for this account in a 24 hour window. This limits the damage to recipients and the reputation of this mail server in case of account compromise. Default 1000."`
|
||||
MaxFirstTimeRecipientsPerDay int `sconf:"optional" sconf-doc:"Maximum number of first-time recipients in outgoing messages for this account in a 24 hour window. This limits the damage to recipients and the reputation of this mail server in case of account compromise. Default 200."`
|
||||
Routes []Route `sconf:"optional" sconf-doc:"Routes for delivering outgoing messages through the queue. Each delivery attempt evaluates these account routes, domain routes and finally global routes. The transport of the first matching route is used in the delivery attempt. If no routes match, which is the default with no configured routes, messages are delivered directly from the queue."`
|
||||
|
||||
LoginDisabled string `sconf:"optional" sconf-doc:"If non-empty, login attempts on all protocols (e.g. SMTP/IMAP, web interfaces) is rejected with this error message. Useful during migrations. Incoming deliveries for addresses of this account are still accepted as normal."`
|
||||
Domain string `sconf-doc:"Default domain for account. Deprecated behaviour: If a destination is not a full address but only a localpart, this domain is added to form a full address."`
|
||||
Description string `sconf:"optional" sconf-doc:"Free form description, e.g. full name or alternative contact info."`
|
||||
FullName string `sconf:"optional" sconf-doc:"Full name, to use in message From header when composing messages in webmail. Can be overridden per destination."`
|
||||
Destinations map[string]Destination `sconf:"optional" sconf-doc:"Destinations, keys are email addresses (with IDNA domains). All destinations are allowed for logging in with IMAP/SMTP/webmail. If no destinations are configured, the account can not login. If the address is of the form '@domain', i.e. with localpart missing, it serves as a catchall for the domain, matching all messages that are not explicitly configured. Deprecated behaviour: If the address is not a full address but a localpart, it is combined with Domain to form a full address."`
|
||||
SubjectPass SubjectPass `sconf:"optional" sconf-doc:"If configured, messages classified as weakly spam are rejected with instructions to retry delivery, but this time with a signed token added to the subject. During the next delivery attempt, the signed token will bypass the spam filter. Messages with a clear spam signal, such as a known bad reputation, are rejected/delayed without a signed token."`
|
||||
QuotaMessageSize int64 `sconf:"optional" sconf-doc:"Default maximum total message size in bytes for the account, overriding any globally configured default maximum size if non-zero. A negative value can be used to have no limit in case there is a limit by default. Attempting to add new messages to an account beyond its maximum total size will result in an error. Useful to prevent a single account from filling storage."`
|
||||
RejectsMailbox string `sconf:"optional" sconf-doc:"Mail that looks like spam will be rejected, but a copy can be stored temporarily in a mailbox, e.g. Rejects. If mail isn't coming in when you expect, you can look there. The mail still isn't accepted, so the remote mail server may retry (hopefully, if legitimate), or give up (hopefully, if indeed a spammer). Messages are automatically removed from this mailbox, so do not set it to a mailbox that has messages you want to keep."`
|
||||
KeepRejects bool `sconf:"optional" sconf-doc:"Don't automatically delete mail in the RejectsMailbox listed above. This can be useful, e.g. for future spam training. It can also cause storage to fill up."`
|
||||
AutomaticJunkFlags AutomaticJunkFlags `sconf:"optional" sconf-doc:"Automatically set $Junk and $NotJunk flags based on mailbox messages are delivered/moved/copied to. Email clients typically have too limited functionality to conveniently set these flags, especially $NonJunk, but they can all move messages to a different mailbox, so this helps them."`
|
||||
JunkFilter *JunkFilter `sconf:"optional" sconf-doc:"Content-based filtering, using the junk-status of individual messages to rank words in such messages as spam or ham. It is recommended you always set the applicable (non)-junk status on messages, and that you do not empty your Trash because those messages contain valuable ham/spam training information."` // todo: sane defaults for junkfilter
|
||||
MaxOutgoingMessagesPerDay int `sconf:"optional" sconf-doc:"Maximum number of outgoing messages for this account in a 24 hour window. This limits the damage to recipients and the reputation of this mail server in case of account compromise. Default 1000."`
|
||||
MaxFirstTimeRecipientsPerDay int `sconf:"optional" sconf-doc:"Maximum number of first-time recipients in outgoing messages for this account in a 24 hour window. This limits the damage to recipients and the reputation of this mail server in case of account compromise. Default 200."`
|
||||
NoFirstTimeSenderDelay bool `sconf:"optional" sconf-doc:"Do not apply a delay to SMTP connections before accepting an incoming message from a first-time sender. Can be useful for accounts that sends automated responses and want instant replies."`
|
||||
NoCustomPassword bool `sconf:"optional" sconf-doc:"If set, this account cannot set a password of their own choice, but can only set a new randomly generated password, preventing password reuse across services and use of weak passwords. Custom account passwords can be set by the admin."`
|
||||
Routes []Route `sconf:"optional" sconf-doc:"Routes for delivering outgoing messages through the queue. Each delivery attempt evaluates these account routes, domain routes and finally global routes. The transport of the first matching route is used in the delivery attempt. If no routes match, which is the default with no configured routes, messages are delivered directly from the queue."`
|
||||
|
||||
DNSDomain dns.Domain `sconf:"-"` // Parsed form of Domain.
|
||||
JunkMailbox *regexp.Regexp `sconf:"-" json:"-"`
|
||||
NeutralMailbox *regexp.Regexp `sconf:"-" json:"-"`
|
||||
NotJunkMailbox *regexp.Regexp `sconf:"-" json:"-"`
|
||||
ParsedFromIDLoginAddresses []smtp.Address `sconf:"-" json:"-"`
|
||||
Aliases []AddressAlias `sconf:"-"`
|
||||
}
|
||||
|
||||
type AddressAlias struct {
|
||||
SubscriptionAddress string
|
||||
Alias Alias // Without members.
|
||||
MemberAddresses []string // Only if allowed to see.
|
||||
DNSDomain dns.Domain `sconf:"-"` // Parsed form of Domain.
|
||||
JunkMailbox *regexp.Regexp `sconf:"-" json:"-"`
|
||||
NeutralMailbox *regexp.Regexp `sconf:"-" json:"-"`
|
||||
NotJunkMailbox *regexp.Regexp `sconf:"-" json:"-"`
|
||||
}
|
||||
|
||||
type JunkFilter struct {
|
||||
@ -473,19 +382,13 @@ type JunkFilter struct {
|
||||
}
|
||||
|
||||
type Destination struct {
|
||||
Mailbox string `sconf:"optional" sconf-doc:"Mailbox to deliver to if none of Rulesets match. Default: Inbox."`
|
||||
Rulesets []Ruleset `sconf:"optional" sconf-doc:"Delivery rules based on message and SMTP transaction. You may want to match each mailing list by SMTP MailFrom address, VerifiedDomain and/or List-ID header (typically <listname.example.org> if the list address is listname@example.org), delivering them to their own mailbox."`
|
||||
SMTPError string `sconf:"optional" sconf-doc:"If non-empty, incoming delivery attempts to this destination will be rejected during SMTP RCPT TO with this error response line. Useful when a catchall address is configured for the domain and messages to some addresses should be rejected. The response line must start with an error code. Currently the following error resonse codes are allowed: 421 (temporary local error), 550 (user not found). If the line consists of only an error code, an appropriate error message is added. Rejecting messages with a 4xx code invites later retries by the remote, while 5xx codes should prevent further delivery attempts."`
|
||||
MessageAuthRequiredSMTPError string `sconf:"optional" sconf-doc:"If non-empty, an additional DMARC-like message authentication check is done for incoming messages, validating the domain in the From-header of the message. Messages without either an aligned SPF or aligned DKIM pass are rejected during the SMTP DATA command with a permanent error code followed by the message in this field. The domain in the message 'From' header is matched in relaxed or strict mode according to the domain's DMARC policy if present, or relaxed mode (organizational instead of exact domain match) otherwise. Useful for autoresponders that don't want to accept messages they don't want to send an automated reply to."`
|
||||
FullName string `sconf:"optional" sconf-doc:"Full name to use in message From header when composing messages coming from this address with webmail."`
|
||||
Mailbox string `sconf:"optional" sconf-doc:"Mailbox to deliver to if none of Rulesets match. Default: Inbox."`
|
||||
Rulesets []Ruleset `sconf:"optional" sconf-doc:"Delivery rules based on message and SMTP transaction. You may want to match each mailing list by SMTP MailFrom address, VerifiedDomain and/or List-ID header (typically <listname.example.org> if the list address is listname@example.org), delivering them to their own mailbox."`
|
||||
FullName string `sconf:"optional" sconf-doc:"Full name to use in message From header when composing messages coming from this address with webmail."`
|
||||
|
||||
DMARCReports bool `sconf:"-" json:"-"`
|
||||
HostTLSReports bool `sconf:"-" json:"-"`
|
||||
DomainTLSReports bool `sconf:"-" json:"-"`
|
||||
// Ready to use in SMTP responses.
|
||||
SMTPErrorCode int `sconf:"-" json:"-"`
|
||||
SMTPErrorSecode string `sconf:"-" json:"-"`
|
||||
SMTPErrorMsg string `sconf:"-" json:"-"`
|
||||
}
|
||||
|
||||
// Equal returns whether d and o are equal, only looking at their user-changeable fields.
|
||||
@ -503,10 +406,9 @@ func (d Destination) Equal(o Destination) bool {
|
||||
|
||||
type Ruleset struct {
|
||||
SMTPMailFromRegexp string `sconf:"optional" sconf-doc:"Matches if this regular expression matches (a substring of) the SMTP MAIL FROM address (not the message From-header). E.g. '^user@example\\.org$'."`
|
||||
MsgFromRegexp string `sconf:"optional" sconf-doc:"Matches if this regular expression matches (a substring of) the single address in the message From header."`
|
||||
VerifiedDomain string `sconf:"optional" sconf-doc:"Matches if this domain matches an SPF- and/or DKIM-verified (sub)domain."`
|
||||
HeadersRegexp map[string]string `sconf:"optional" sconf-doc:"Matches if these header field/value regular expressions all match (substrings of) the message headers. Header fields and valuees are converted to lower case before matching. Whitespace is trimmed from the value before matching. A header field can occur multiple times in a message, only one instance has to match. For mailing lists, you could match on ^list-id$ with the value typically the mailing list address in angled brackets with @ replaced with a dot, e.g. <name\\.lists\\.example\\.org>."`
|
||||
// todo: add a SMTPRcptTo check
|
||||
// todo: add a SMTPRcptTo check, and MessageFrom that works on a properly parsed From header.
|
||||
|
||||
// todo: once we implement ARC, we can use dkim domains that we cannot verify but that the arc-verified forwarding mail server was able to verify.
|
||||
IsForward bool `sconf:"optional" sconf-doc:"Influences spam filtering only, this option does not change whether a message matches this ruleset. Can only be used together with SMTPMailFromRegexp and VerifiedDomain. SMTPMailFromRegexp must be set to the address used to deliver the forwarded message, e.g. '^user(|\\+.*)@forward\\.example$'. Changes to junk analysis: 1. Messages are not rejected for failing a DMARC policy, because a legitimate forwarded message without valid/intact/aligned DKIM signature would be rejected because any verified SPF domain will be 'unaligned', of the forwarding mail server. 2. The sending mail server IP address, and sending EHLO and MAIL FROM domains and matching DKIM domain aren't used in future reputation-based spam classifications (but other verified DKIM domains are) because the forwarding server is not a useful spam signal for future messages."`
|
||||
@ -514,10 +416,8 @@ type Ruleset struct {
|
||||
AcceptRejectsToMailbox string `sconf:"optional" sconf-doc:"Influences spam filtering only, this option does not change whether a message matches this ruleset. If a message is classified as spam, it isn't rejected during the SMTP transaction (the normal behaviour), but accepted during the SMTP transaction and delivered to the specified mailbox. The specified mailbox is not automatically cleaned up like the account global Rejects mailbox, unless set to that Rejects mailbox."`
|
||||
|
||||
Mailbox string `sconf-doc:"Mailbox to deliver to if this ruleset matches."`
|
||||
Comment string `sconf:"optional" sconf-doc:"Free-form comments."`
|
||||
|
||||
SMTPMailFromRegexpCompiled *regexp.Regexp `sconf:"-" json:"-"`
|
||||
MsgFromRegexpCompiled *regexp.Regexp `sconf:"-" json:"-"`
|
||||
VerifiedDNSDomain dns.Domain `sconf:"-"`
|
||||
HeadersRegexpCompiled [][2]*regexp.Regexp `sconf:"-" json:"-"`
|
||||
ListAllowDNSDomain dns.Domain `sconf:"-"`
|
||||
@ -525,7 +425,7 @@ type Ruleset struct {
|
||||
|
||||
// Equal returns whether r and o are equal, only looking at their user-changeable fields.
|
||||
func (r Ruleset) Equal(o Ruleset) bool {
|
||||
if r.SMTPMailFromRegexp != o.SMTPMailFromRegexp || r.MsgFromRegexp != o.MsgFromRegexp || r.VerifiedDomain != o.VerifiedDomain || r.IsForward != o.IsForward || r.ListAllowDomain != o.ListAllowDomain || r.AcceptRejectsToMailbox != o.AcceptRejectsToMailbox || r.Mailbox != o.Mailbox || r.Comment != o.Comment {
|
||||
if r.SMTPMailFromRegexp != o.SMTPMailFromRegexp || r.VerifiedDomain != o.VerifiedDomain || r.IsForward != o.IsForward || r.ListAllowDomain != o.ListAllowDomain || r.AcceptRejectsToMailbox != o.AcceptRejectsToMailbox || r.Mailbox != o.Mailbox {
|
||||
return false
|
||||
}
|
||||
if !reflect.DeepEqual(r.HeadersRegexp, o.HeadersRegexp) {
|
||||
@ -544,27 +444,22 @@ type TLS struct {
|
||||
KeyCerts []KeyCert `sconf:"optional" sconf-doc:"Keys and certificates to use for this listener. The files are opened by the privileged root process and passed to the unprivileged mox process, so no special permissions are required on the files. If the private key will not be replaced when refreshing certificates, also consider adding the private key to HostPrivateKeyFiles and configuring DANE TLSA DNS records."`
|
||||
MinVersion string `sconf:"optional" sconf-doc:"Minimum TLS version. Default: TLSv1.2."`
|
||||
HostPrivateKeyFiles []string `sconf:"optional" sconf-doc:"Private keys used for ACME certificates. Specified explicitly so DANE TLSA DNS records can be generated, even before the certificates are requested. DANE is a mechanism to authenticate remote TLS certificates based on a public key or certificate specified in DNS, protected with DNSSEC. DANE is opportunistic and attempted when delivering SMTP with STARTTLS. The private key files must be in PEM format. PKCS8 is recommended, but PKCS1 and EC private keys are recognized as well. Only RSA 2048 bit and ECDSA P-256 keys are currently used. The first of each is used when requesting new certificates through ACME."`
|
||||
ClientAuthDisabled bool `sconf:"optional" sconf-doc:"Disable TLS client authentication with certificates/keys, preventing the TLS server from requesting a TLS certificate from clients. Useful for working around clients that don't handle TLS client authentication well."`
|
||||
|
||||
Config *tls.Config `sconf:"-" json:"-"` // TLS config for non-ACME-verification connections, i.e. SMTP and IMAP, and not port 443. Connections without SNI will use a certificate for the hostname of the listener, connections with an SNI hostname that isn't allowed will be rejected.
|
||||
ConfigFallback *tls.Config `sconf:"-" json:"-"` // Like Config, but uses the certificate for the listener hostname when the requested SNI hostname is not allowed, instead of causing the connection to fail.
|
||||
Config *tls.Config `sconf:"-" json:"-"` // TLS config for non-ACME-verification connections, i.e. SMTP and IMAP, and not port 443.
|
||||
ACMEConfig *tls.Config `sconf:"-" json:"-"` // TLS config that handles ACME verification, for serving on port 443.
|
||||
HostPrivateRSA2048Keys []crypto.Signer `sconf:"-" json:"-"` // Private keys for new TLS certificates for listener host name, for new certificates with ACME, and for DANE records.
|
||||
HostPrivateECDSAP256Keys []crypto.Signer `sconf:"-" json:"-"`
|
||||
}
|
||||
|
||||
// todo: we could implement matching WebHandler.Domain as IPs too
|
||||
|
||||
type WebHandler struct {
|
||||
LogName string `sconf:"optional" sconf-doc:"Name to use in logging and metrics."`
|
||||
Domain string `sconf-doc:"Both Domain and PathRegexp must match for this WebHandler to match a request. Exactly one of WebStatic, WebRedirect, WebForward, WebInternal must be set."`
|
||||
Domain string `sconf-doc:"Both Domain and PathRegexp must match for this WebHandler to match a request. Exactly one of WebStatic, WebRedirect, WebForward must be set."`
|
||||
PathRegexp string `sconf-doc:"Regular expression matched against request path, must always start with ^ to ensure matching from the start of the path. The matching prefix can optionally be stripped by WebForward. The regular expression does not have to end with $."`
|
||||
DontRedirectPlainHTTP bool `sconf:"optional" sconf-doc:"If set, plain HTTP requests are not automatically permanently redirected (308) to HTTPS. If you don't have a HTTPS webserver configured, set this to true."`
|
||||
Compress bool `sconf:"optional" sconf-doc:"Transparently compress responses (currently with gzip) if the client supports it, the status is 200 OK, no Content-Encoding is set on the response yet and the Content-Type of the response hints that the data is compressible (text/..., specific application/... and .../...+json and .../...+xml). For static files only, a cache with compressed files is kept."`
|
||||
WebStatic *WebStatic `sconf:"optional" sconf-doc:"Serve static files."`
|
||||
WebRedirect *WebRedirect `sconf:"optional" sconf-doc:"Redirect requests to configured URL."`
|
||||
WebForward *WebForward `sconf:"optional" sconf-doc:"Forward requests to another webserver, i.e. reverse proxy."`
|
||||
WebInternal *WebInternal `sconf:"optional" sconf-doc:"Pass request to internal service, like webmail, webapi, etc."`
|
||||
|
||||
Name string `sconf:"-"` // Either LogName, or numeric index if LogName was empty. Used instead of LogName in logging/metrics.
|
||||
DNSDomain dns.Domain `sconf:"-"`
|
||||
@ -580,7 +475,6 @@ func (wh WebHandler) Equal(o WebHandler) bool {
|
||||
x.WebStatic = nil
|
||||
x.WebRedirect = nil
|
||||
x.WebForward = nil
|
||||
x.WebInternal = nil
|
||||
return x
|
||||
}
|
||||
cwh := clean(wh)
|
||||
@ -588,7 +482,7 @@ func (wh WebHandler) Equal(o WebHandler) bool {
|
||||
if cwh != co {
|
||||
return false
|
||||
}
|
||||
if (wh.WebStatic == nil) != (o.WebStatic == nil) || (wh.WebRedirect == nil) != (o.WebRedirect == nil) || (wh.WebForward == nil) != (o.WebForward == nil) || (wh.WebInternal == nil) != (o.WebInternal == nil) {
|
||||
if (wh.WebStatic == nil) != (o.WebStatic == nil) || (wh.WebRedirect == nil) != (o.WebRedirect == nil) || (wh.WebForward == nil) != (o.WebForward == nil) {
|
||||
return false
|
||||
}
|
||||
if wh.WebStatic != nil {
|
||||
@ -600,9 +494,6 @@ func (wh WebHandler) Equal(o WebHandler) bool {
|
||||
if wh.WebForward != nil {
|
||||
return wh.WebForward.equal(*o.WebForward)
|
||||
}
|
||||
if wh.WebInternal != nil {
|
||||
return wh.WebInternal.equal(*o.WebInternal)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
@ -645,16 +536,3 @@ func (wf WebForward) equal(o WebForward) bool {
|
||||
o.TargetURL = nil
|
||||
return reflect.DeepEqual(wf, o)
|
||||
}
|
||||
|
||||
type WebInternal struct {
|
||||
BasePath string `sconf-doc:"Path to use as root of internal service, e.g. /webmail/."`
|
||||
Service string `sconf-doc:"Name of the service, values: admin, account, webmail, webapi."`
|
||||
|
||||
Handler http.Handler `sconf:"-" json:"-"`
|
||||
}
|
||||
|
||||
func (wi WebInternal) equal(o WebInternal) bool {
|
||||
wi.Handler = nil
|
||||
o.Handler = nil
|
||||
return reflect.DeepEqual(wi, o)
|
||||
}
|
||||
|
390
config/doc.go
390
config/doc.go
@ -113,11 +113,8 @@ See https://pkg.go.dev/github.com/mjl-/sconf for details.
|
||||
|
||||
# TLS port for ACME validation, 443 by default. You should only override this if
|
||||
# you cannot listen on port 443 directly. ACME will make requests to port 443, so
|
||||
# you'll have to add an external mechanism to get the tls connection here, e.g. by
|
||||
# configuring firewall-level port forwarding. Validation over the https port uses
|
||||
# tls-alpn-01 with application-layer protocol negotiation, which essentially means
|
||||
# the original tls connection must make it here unmodified, an https reverse proxy
|
||||
# will not work. (optional)
|
||||
# you'll have to add an external mechanism to get the connection here, e.g. by
|
||||
# configuring port forwarding. (optional)
|
||||
Port: 0
|
||||
|
||||
# If set, used for suggested CAA DNS records, for restricting TLS certificate
|
||||
@ -156,11 +153,7 @@ See https://pkg.go.dev/github.com/mjl-/sconf for details.
|
||||
|
||||
# Use 0.0.0.0 to listen on all IPv4 and/or :: to listen on all IPv6 addresses, but
|
||||
# it is better to explicitly specify the IPs you want to use for email, as mox
|
||||
# will make sure outgoing connections will only be made from one of those IPs. If
|
||||
# both outgoing IPv4 and IPv6 connectivity is possible, and only one family has
|
||||
# explicitly configured addresses, both address families are still used for
|
||||
# outgoing connections. Use the "direct" transport to limit address families for
|
||||
# outgoing connections.
|
||||
# will make sure outgoing connections will only be made from one of those IPs.
|
||||
IPs:
|
||||
-
|
||||
|
||||
@ -175,10 +168,7 @@ See https://pkg.go.dev/github.com/mjl-/sconf for details.
|
||||
# NATed. Skips IP-related DNS self-checks. (optional)
|
||||
IPsNATed: false
|
||||
|
||||
# If empty, the config global Hostname is used. The internal services webadmin,
|
||||
# webaccount, webmail and webapi only match requests to IPs, this hostname,
|
||||
# "localhost". All except webadmin also match for any client settings domain.
|
||||
# (optional)
|
||||
# If empty, the config global Hostname is used. (optional)
|
||||
Hostname:
|
||||
|
||||
# For SMTP/IMAP STARTTLS, direct TLS and HTTPS connections. (optional)
|
||||
@ -217,11 +207,6 @@ See https://pkg.go.dev/github.com/mjl-/sconf for details.
|
||||
HostPrivateKeyFiles:
|
||||
-
|
||||
|
||||
# Disable TLS client authentication with certificates/keys, preventing the TLS
|
||||
# server from requesting a TLS certificate from clients. Useful for working around
|
||||
# clients that don't handle TLS client authentication well. (optional)
|
||||
ClientAuthDisabled: false
|
||||
|
||||
# Maximum size in bytes for incoming and outgoing messages. Default is 100MB.
|
||||
# (optional)
|
||||
SMTPMaxMessageSize: 0
|
||||
@ -267,10 +252,6 @@ See https://pkg.go.dev/github.com/mjl-/sconf for details.
|
||||
# account. Default: 15s. (optional)
|
||||
FirstTimeSenderDelay: 0s
|
||||
|
||||
# Override default setting for enabling TLS session tickets. Disabling session
|
||||
# tickets may work around TLS interoperability issues. (optional)
|
||||
TLSSessionTicketsDisabled: false
|
||||
|
||||
# SMTP for submitting email, e.g. by email applications. Starts out in plain text,
|
||||
# can be upgraded to TLS with the STARTTLS command. Prefer using Submissions which
|
||||
# is always a TLS connection. (optional)
|
||||
@ -292,14 +273,6 @@ See https://pkg.go.dev/github.com/mjl-/sconf for details.
|
||||
# Default 465. (optional)
|
||||
Port: 0
|
||||
|
||||
# Additionally enable submission on HTTPS port 443 via TLS ALPN. TLS Application
|
||||
# Layer Protocol Negotiation allows clients to request a specific protocol from
|
||||
# the server as part of the TLS connection setup. When this setting is enabled and
|
||||
# a client requests the 'smtp' protocol after TLS, it will be able to talk SMTP to
|
||||
# Mox on port 443. This is meant to be useful as a censorship circumvention
|
||||
# technique for Delta Chat. (optional)
|
||||
EnabledOnHTTPS: false
|
||||
|
||||
# IMAP for reading email, by email applications. Starts out in plain text, can be
|
||||
# upgraded to TLS with the STARTTLS command. Prefer using IMAPS instead which is
|
||||
# always a TLS connection. (optional)
|
||||
@ -321,25 +294,15 @@ See https://pkg.go.dev/github.com/mjl-/sconf for details.
|
||||
# Default 993. (optional)
|
||||
Port: 0
|
||||
|
||||
# Additionally enable IMAP on HTTPS port 443 via TLS ALPN. TLS Application Layer
|
||||
# Protocol Negotiation allows clients to request a specific protocol from the
|
||||
# server as part of the TLS connection setup. When this setting is enabled and a
|
||||
# client requests the 'imap' protocol after TLS, it will be able to talk IMAP to
|
||||
# Mox on port 443. This is meant to be useful as a censorship circumvention
|
||||
# technique for Delta Chat. (optional)
|
||||
EnabledOnHTTPS: false
|
||||
|
||||
# Account web interface, for email users wanting to change their accounts, e.g.
|
||||
# set new password, set new delivery rulesets. Default path is /. (optional)
|
||||
AccountHTTP:
|
||||
Enabled: false
|
||||
|
||||
# Default 80 for HTTP and 443 for HTTPS. See Hostname at Listener for hostname
|
||||
# matching behaviour. (optional)
|
||||
# Default 80 for HTTP and 443 for HTTPS. (optional)
|
||||
Port: 0
|
||||
|
||||
# Path to serve requests on. Should end with a slash, related to cookie paths.
|
||||
# (optional)
|
||||
# Path to serve requests on. (optional)
|
||||
Path:
|
||||
|
||||
# If set, X-Forwarded-* headers are used for the remote IP address for rate
|
||||
@ -351,12 +314,10 @@ See https://pkg.go.dev/github.com/mjl-/sconf for details.
|
||||
AccountHTTPS:
|
||||
Enabled: false
|
||||
|
||||
# Default 80 for HTTP and 443 for HTTPS. See Hostname at Listener for hostname
|
||||
# matching behaviour. (optional)
|
||||
# Default 80 for HTTP and 443 for HTTPS. (optional)
|
||||
Port: 0
|
||||
|
||||
# Path to serve requests on. Should end with a slash, related to cookie paths.
|
||||
# (optional)
|
||||
# Path to serve requests on. (optional)
|
||||
Path:
|
||||
|
||||
# If set, X-Forwarded-* headers are used for the remote IP address for rate
|
||||
@ -371,12 +332,10 @@ See https://pkg.go.dev/github.com/mjl-/sconf for details.
|
||||
AdminHTTP:
|
||||
Enabled: false
|
||||
|
||||
# Default 80 for HTTP and 443 for HTTPS. See Hostname at Listener for hostname
|
||||
# matching behaviour. (optional)
|
||||
# Default 80 for HTTP and 443 for HTTPS. (optional)
|
||||
Port: 0
|
||||
|
||||
# Path to serve requests on. Should end with a slash, related to cookie paths.
|
||||
# (optional)
|
||||
# Path to serve requests on. (optional)
|
||||
Path:
|
||||
|
||||
# If set, X-Forwarded-* headers are used for the remote IP address for rate
|
||||
@ -388,12 +347,10 @@ See https://pkg.go.dev/github.com/mjl-/sconf for details.
|
||||
AdminHTTPS:
|
||||
Enabled: false
|
||||
|
||||
# Default 80 for HTTP and 443 for HTTPS. See Hostname at Listener for hostname
|
||||
# matching behaviour. (optional)
|
||||
# Default 80 for HTTP and 443 for HTTPS. (optional)
|
||||
Port: 0
|
||||
|
||||
# Path to serve requests on. Should end with a slash, related to cookie paths.
|
||||
# (optional)
|
||||
# Path to serve requests on. (optional)
|
||||
Path:
|
||||
|
||||
# If set, X-Forwarded-* headers are used for the remote IP address for rate
|
||||
@ -404,12 +361,10 @@ See https://pkg.go.dev/github.com/mjl-/sconf for details.
|
||||
WebmailHTTP:
|
||||
Enabled: false
|
||||
|
||||
# Default 80 for HTTP and 443 for HTTPS. See Hostname at Listener for hostname
|
||||
# matching behaviour. (optional)
|
||||
# Default 80 for HTTP and 443 for HTTPS. (optional)
|
||||
Port: 0
|
||||
|
||||
# Path to serve requests on. Should end with a slash, related to cookie paths.
|
||||
# (optional)
|
||||
# Path to serve requests on. (optional)
|
||||
Path:
|
||||
|
||||
# If set, X-Forwarded-* headers are used for the remote IP address for rate
|
||||
@ -421,45 +376,10 @@ See https://pkg.go.dev/github.com/mjl-/sconf for details.
|
||||
WebmailHTTPS:
|
||||
Enabled: false
|
||||
|
||||
# Default 80 for HTTP and 443 for HTTPS. See Hostname at Listener for hostname
|
||||
# matching behaviour. (optional)
|
||||
# Default 80 for HTTP and 443 for HTTPS. (optional)
|
||||
Port: 0
|
||||
|
||||
# Path to serve requests on. Should end with a slash, related to cookie paths.
|
||||
# (optional)
|
||||
Path:
|
||||
|
||||
# If set, X-Forwarded-* headers are used for the remote IP address for rate
|
||||
# limiting and for the "secure" status of cookies. (optional)
|
||||
Forwarded: false
|
||||
|
||||
# Like WebAPIHTTP, but with plain HTTP, without TLS. (optional)
|
||||
WebAPIHTTP:
|
||||
Enabled: false
|
||||
|
||||
# Default 80 for HTTP and 443 for HTTPS. See Hostname at Listener for hostname
|
||||
# matching behaviour. (optional)
|
||||
Port: 0
|
||||
|
||||
# Path to serve requests on. Should end with a slash, related to cookie paths.
|
||||
# (optional)
|
||||
Path:
|
||||
|
||||
# If set, X-Forwarded-* headers are used for the remote IP address for rate
|
||||
# limiting and for the "secure" status of cookies. (optional)
|
||||
Forwarded: false
|
||||
|
||||
# WebAPI, a simple HTTP/JSON-based API for email, with HTTPS (requires a TLS
|
||||
# config). Default path is /webapi/. (optional)
|
||||
WebAPIHTTPS:
|
||||
Enabled: false
|
||||
|
||||
# Default 80 for HTTP and 443 for HTTPS. See Hostname at Listener for hostname
|
||||
# matching behaviour. (optional)
|
||||
Port: 0
|
||||
|
||||
# Path to serve requests on. Should end with a slash, related to cookie paths.
|
||||
# (optional)
|
||||
# Path to serve requests on. (optional)
|
||||
Path:
|
||||
|
||||
# If set, X-Forwarded-* headers are used for the remote IP address for rate
|
||||
@ -519,9 +439,6 @@ See https://pkg.go.dev/github.com/mjl-/sconf for details.
|
||||
# Port for plain HTTP (non-TLS) webserver. (optional)
|
||||
Port: 0
|
||||
|
||||
# Disable rate limiting for all requests to this port. (optional)
|
||||
RateLimitDisabled: false
|
||||
|
||||
# All configured WebHandlers will serve on an enabled listener. Either ACME must
|
||||
# be configured, or for each WebHandler domain a TLS certificate must be
|
||||
# configured. (optional)
|
||||
@ -531,9 +448,6 @@ See https://pkg.go.dev/github.com/mjl-/sconf for details.
|
||||
# Port for HTTPS webserver. (optional)
|
||||
Port: 0
|
||||
|
||||
# Disable rate limiting for all requests to this port. (optional)
|
||||
RateLimitDisabled: false
|
||||
|
||||
# Destination for emails delivered to postmaster addresses: a plain 'postmaster'
|
||||
# without domain, 'postmaster@<hostname>' (also for each listener with SMTP
|
||||
# enabled), and as fallback for each domain without explicitly configured
|
||||
@ -557,13 +471,13 @@ See https://pkg.go.dev/github.com/mjl-/sconf for details.
|
||||
# Mailbox to deliver TLS reports to. Recommended value: TLSRPT.
|
||||
Mailbox:
|
||||
|
||||
# Localpart at hostname to accept TLS reports at. Recommended value: tlsreports.
|
||||
# Localpart at hostname to accept TLS reports at. Recommended value: tls-reports.
|
||||
Localpart:
|
||||
|
||||
# Mailboxes to create for new accounts. Inbox is always created. Mailboxes can be
|
||||
# given a 'special-use' role, which are understood by most mail clients. If
|
||||
# absent/empty, the following additional mailboxes are created: Sent, Archive,
|
||||
# Trash, Drafts and Junk. (optional)
|
||||
# absent/empty, the following mailboxes are created: Sent, Archive, Trash, Drafts
|
||||
# and Junk. (optional)
|
||||
InitialMailboxes:
|
||||
|
||||
# Special-use roles to mailbox to create. (optional)
|
||||
@ -724,28 +638,6 @@ See https://pkg.go.dev/github.com/mjl-/sconf for details.
|
||||
# typically the hostname of the host in the Address field.
|
||||
RemoteHostname:
|
||||
|
||||
# Like regular direct delivery, but allows to tweak outgoing connections.
|
||||
# (optional)
|
||||
Direct:
|
||||
|
||||
# If set, outgoing SMTP connections will *NOT* use IPv4 addresses to connect to
|
||||
# remote SMTP servers. (optional)
|
||||
DisableIPv4: false
|
||||
|
||||
# If set, outgoing SMTP connections will *NOT* use IPv6 addresses to connect to
|
||||
# remote SMTP servers. (optional)
|
||||
DisableIPv6: false
|
||||
|
||||
# Immediately fails the delivery attempt. (optional)
|
||||
Fail:
|
||||
|
||||
# SMTP error code and optional enhanced error code to use for the failure. If
|
||||
# empty, 554 is used (transaction failed). (optional)
|
||||
SMTPCode: 0
|
||||
|
||||
# Message to include for the rejection. It will be shown in the DSN. (optional)
|
||||
SMTPMessage:
|
||||
|
||||
# Do not send DMARC reports (aggregate only). By default, aggregate reports on
|
||||
# DMARC evaluations are sent to domains if their DMARC policy requests them.
|
||||
# Reports are sent at whole hours, with a minimum of 1 hour and maximum of 24
|
||||
@ -789,19 +681,6 @@ See https://pkg.go.dev/github.com/mjl-/sconf for details.
|
||||
Domains:
|
||||
x:
|
||||
|
||||
# Disabled domains can be useful during/before migrations. Domains that are
|
||||
# disabled can still be configured like normal, including adding addresses using
|
||||
# the domain to accounts. However, disabled domains: 1. Do not try to fetch ACME
|
||||
# certificates. TLS connections to host names involving the email domain will
|
||||
# fail. A TLS certificate for the hostname (that wil be used as MX) itself will be
|
||||
# requested. 2. Incoming deliveries over SMTP are rejected with a temporary error
|
||||
# '450 4.2.1 recipient domain temporarily disabled'. 3. Submissions over SMTP
|
||||
# using an (envelope) SMTP MAIL FROM address or message 'From' address of a
|
||||
# disabled domain will be rejected with a temporary error '451 4.3.0 sender domain
|
||||
# temporarily disabled'. Note that accounts with addresses at disabled domains can
|
||||
# still log in and read email (unless the account itself is disabled). (optional)
|
||||
Disabled: false
|
||||
|
||||
# Free-form description of domain. (optional)
|
||||
Description:
|
||||
|
||||
@ -818,14 +697,6 @@ See https://pkg.go.dev/github.com/mjl-/sconf for details.
|
||||
# delivered to you@example.com. (optional)
|
||||
LocalpartCatchallSeparator:
|
||||
|
||||
# Similar to LocalpartCatchallSeparator, but in case multiple are needed. For
|
||||
# example both "+" and "-". Only of one LocalpartCatchallSeparator or
|
||||
# LocalpartCatchallSeparators can be set. If set, the first separator is used to
|
||||
# make unique addresses for outgoing SMTP connections with FromIDLoginAddresses.
|
||||
# (optional)
|
||||
LocalpartCatchallSeparators:
|
||||
-
|
||||
|
||||
# If set, upper/lower case is relevant for email delivery. (optional)
|
||||
LocalpartCaseSensitive: false
|
||||
|
||||
@ -840,7 +711,7 @@ See https://pkg.go.dev/github.com/mjl-/sconf for details.
|
||||
Selectors:
|
||||
x:
|
||||
|
||||
# sha256 (default) or (older, not recommended) sha1. (optional)
|
||||
# sha256 (default) or (older, not recommended) sha1 (optional)
|
||||
Hash:
|
||||
|
||||
# (optional)
|
||||
@ -885,18 +756,11 @@ See https://pkg.go.dev/github.com/mjl-/sconf for details.
|
||||
DMARC:
|
||||
|
||||
# Address-part before the @ that accepts DMARC reports. Must be
|
||||
# non-internationalized. Recommended value: dmarcreports.
|
||||
# non-internationalized. Recommended value: dmarc-reports.
|
||||
Localpart:
|
||||
|
||||
# Alternative domain for reporting address, for incoming reports. Typically empty,
|
||||
# causing the domain wherein this config exists to be used. Can be used to receive
|
||||
# reports for domains that aren't fully hosted on this server. Configure such a
|
||||
# domain as a hosted domain without making all the DNS changes, and configure this
|
||||
# field with a domain that is fully hosted on this server, so the localpart and
|
||||
# the domain of this field form a reporting address. Then only update the DMARC
|
||||
# DNS record for the not fully hosted domain, ensuring the reporting address is
|
||||
# specified in its "rua" field as shown in the suggested DNS settings. Unicode
|
||||
# name. (optional)
|
||||
# Alternative domain for report recipient address. Can be used to receive reports
|
||||
# for other domains. Unicode name. (optional)
|
||||
Domain:
|
||||
|
||||
# Account to deliver to.
|
||||
@ -905,35 +769,17 @@ See https://pkg.go.dev/github.com/mjl-/sconf for details.
|
||||
# Mailbox to deliver to, e.g. DMARC.
|
||||
Mailbox:
|
||||
|
||||
# MTA-STS is a mechanism that allows publishing a policy with requirements for
|
||||
# WebPKI-verified SMTP STARTTLS connections for email delivered to a domain.
|
||||
# Existence of a policy is announced in a DNS TXT record (often
|
||||
# unprotected/unverified, MTA-STS's weak spot). If a policy exists, it is fetched
|
||||
# with a WebPKI-verified HTTPS request. The policy can indicate that
|
||||
# WebPKI-verified SMTP STARTTLS is required, and which MX hosts (optionally with a
|
||||
# wildcard pattern) are allowd. MX hosts to deliver to are still taken from DNS
|
||||
# (again, not necessarily protected/verified), but messages will only be delivered
|
||||
# to domains matching the MX hosts from the published policy. Mail servers look up
|
||||
# the MTA-STS policy when first delivering to a domain, then keep a cached copy,
|
||||
# periodically checking the DNS record if a new policy is available, and fetching
|
||||
# and caching it if so. To update a policy, first serve a new policy with an
|
||||
# updated policy ID, then update the DNS record (not the other way around). To
|
||||
# remove an enforced policy, publish an updated policy with mode "none" for a long
|
||||
# enough period so all cached policies have been refreshed (taking DNS TTL and
|
||||
# policy max age into account), then remove the policy from DNS, wait for TTL to
|
||||
# expire, and stop serving the policy. (optional)
|
||||
# With MTA-STS a domain publishes, in DNS, presence of a policy for
|
||||
# using/requiring TLS for SMTP connections. The policy is served over HTTPS.
|
||||
# (optional)
|
||||
MTASTS:
|
||||
|
||||
# Policies are versioned. The version must be specified in the DNS record. If you
|
||||
# change a policy, first change it here to update the served policy, then update
|
||||
# the DNS record with the updated policy ID.
|
||||
# change a policy, first change it in mox, then update the DNS record.
|
||||
PolicyID:
|
||||
|
||||
# If set to "enforce", a remote SMTP server will not deliver email to us if it
|
||||
# cannot make a WebPKI-verified SMTP STARTTLS connection. In mode "testing",
|
||||
# deliveries can be done without verified TLS, but errors will be reported through
|
||||
# TLS reporting. In mode "none", verified TLS is not required, used for phasing
|
||||
# out an MTA-STS policy.
|
||||
# testing, enforce or none. If set to enforce, a remote SMTP server will not
|
||||
# deliver email to us if it cannot make a TLS connection.
|
||||
Mode:
|
||||
|
||||
# How long a remote mail server is allowed to cache a policy. Typically 1 or
|
||||
@ -953,18 +799,11 @@ See https://pkg.go.dev/github.com/mjl-/sconf for details.
|
||||
TLSRPT:
|
||||
|
||||
# Address-part before the @ that accepts TLSRPT reports. Recommended value:
|
||||
# tlsreports.
|
||||
# tls-reports.
|
||||
Localpart:
|
||||
|
||||
# Alternative domain for reporting address, for incoming reports. Typically empty,
|
||||
# causing the domain wherein this config exists to be used. Can be used to receive
|
||||
# reports for domains that aren't fully hosted on this server. Configure such a
|
||||
# domain as a hosted domain without making all the DNS changes, and configure this
|
||||
# field with a domain that is fully hosted on this server, so the localpart and
|
||||
# the domain of this field form a reporting address. Then only update the TLSRPT
|
||||
# DNS record for the not fully hosted domain, ensuring the reporting address is
|
||||
# specified in its "rua" field as shown in the suggested DNS settings. Unicode
|
||||
# name. (optional)
|
||||
# Alternative domain for report recipient address. Can be used to receive reports
|
||||
# for other domains. Unicode name. (optional)
|
||||
Domain:
|
||||
|
||||
# Account to deliver to.
|
||||
@ -997,31 +836,6 @@ See https://pkg.go.dev/github.com/mjl-/sconf for details.
|
||||
MinimumAttempts: 0
|
||||
Transport:
|
||||
|
||||
# Aliases that cause messages to be delivered to one or more locally configured
|
||||
# addresses. Keys are localparts (encoded, as they appear in email addresses).
|
||||
# (optional)
|
||||
Aliases:
|
||||
x:
|
||||
|
||||
# Expanded addresses to deliver to. These must currently be of addresses of local
|
||||
# accounts. To prevent duplicate messages, a member address that is also an
|
||||
# explicit recipient in the SMTP transaction will only have the message delivered
|
||||
# once. If the address in the message From header is a member, that member also
|
||||
# won't receive the message.
|
||||
Addresses:
|
||||
-
|
||||
|
||||
# If true, anyone can send messages to the list. Otherwise only members, based on
|
||||
# message From address, which is assumed to be DMARC-like-verified. (optional)
|
||||
PostPublic: false
|
||||
|
||||
# If true, members can see addresses of members. (optional)
|
||||
ListMembers: false
|
||||
|
||||
# If true, members are allowed to send messages with this alias address in the
|
||||
# message From header. (optional)
|
||||
AllowMsgFrom: false
|
||||
|
||||
# Accounts represent mox users, each with a password and email address(es) to
|
||||
# which email can be delivered (possibly at different domains). Each account has
|
||||
# its own on-disk directory holding its messages and index database. An account
|
||||
@ -1029,61 +843,6 @@ See https://pkg.go.dev/github.com/mjl-/sconf for details.
|
||||
Accounts:
|
||||
x:
|
||||
|
||||
# Webhooks for events about outgoing deliveries. (optional)
|
||||
OutgoingWebhook:
|
||||
|
||||
# URL to POST webhooks.
|
||||
URL:
|
||||
|
||||
# If not empty, value of Authorization header to add to HTTP requests. (optional)
|
||||
Authorization:
|
||||
|
||||
# Events to send outgoing delivery notifications for. If absent, all events are
|
||||
# sent. Valid values: delivered, suppressed, delayed, failed, relayed, expanded,
|
||||
# canceled, unrecognized. (optional)
|
||||
Events:
|
||||
-
|
||||
|
||||
# Webhooks for events about incoming deliveries over SMTP. (optional)
|
||||
IncomingWebhook:
|
||||
|
||||
# URL to POST webhooks to for incoming deliveries over SMTP.
|
||||
URL:
|
||||
|
||||
# If not empty, value of Authorization header to add to HTTP requests. (optional)
|
||||
Authorization:
|
||||
|
||||
# Login addresses that cause outgoing email to be sent with SMTP MAIL FROM
|
||||
# addresses with a unique id after the localpart catchall separator (which must be
|
||||
# enabled when addresses are specified here). Any delivery status notifications
|
||||
# (DSN, e.g. for bounces), can be related to the original message and recipient
|
||||
# with unique id's. You can login to an account with any valid email address,
|
||||
# including variants with the localpart catchall separator. You can use this
|
||||
# mechanism to both send outgoing messages with and without unique fromid for a
|
||||
# given email address. With the webapi and webmail, a unique id will be generated.
|
||||
# For submission, the id from the SMTP MAIL FROM command is used if present, and a
|
||||
# unique id is generated otherwise. (optional)
|
||||
FromIDLoginAddresses:
|
||||
-
|
||||
|
||||
# Period to keep messages retired from the queue (delivered or failed) around.
|
||||
# Keeping retired messages is useful for maintaining the suppression list for
|
||||
# transactional email, for matching incoming DSNs to sent messages, and for
|
||||
# debugging. The time at which to clean up (remove) is calculated at retire time.
|
||||
# E.g. 168h (1 week). (optional)
|
||||
KeepRetiredMessagePeriod: 0s
|
||||
|
||||
# Period to keep webhooks retired from the queue (delivered or failed) around.
|
||||
# Useful for debugging. The time at which to clean up (remove) is calculated at
|
||||
# retire time. E.g. 168h (1 week). (optional)
|
||||
KeepRetiredWebhookPeriod: 0s
|
||||
|
||||
# If non-empty, login attempts on all protocols (e.g. SMTP/IMAP, web interfaces)
|
||||
# is rejected with this error message. Useful during migrations. Incoming
|
||||
# deliveries for addresses of this account are still accepted as normal.
|
||||
# (optional)
|
||||
LoginDisabled:
|
||||
|
||||
# Default domain for account. Deprecated behaviour: If a destination is not a full
|
||||
# address but only a localpart, this domain is added to form a full address.
|
||||
Domain:
|
||||
@ -1119,10 +878,6 @@ See https://pkg.go.dev/github.com/mjl-/sconf for details.
|
||||
# address (not the message From-header). E.g. '^user@example\.org$'. (optional)
|
||||
SMTPMailFromRegexp:
|
||||
|
||||
# Matches if this regular expression matches (a substring of) the single address
|
||||
# in the message From header. (optional)
|
||||
MsgFromRegexp:
|
||||
|
||||
# Matches if this domain matches an SPF- and/or DKIM-verified (sub)domain.
|
||||
# (optional)
|
||||
VerifiedDomain:
|
||||
@ -1173,31 +928,6 @@ See https://pkg.go.dev/github.com/mjl-/sconf for details.
|
||||
# Mailbox to deliver to if this ruleset matches.
|
||||
Mailbox:
|
||||
|
||||
# Free-form comments. (optional)
|
||||
Comment:
|
||||
|
||||
# If non-empty, incoming delivery attempts to this destination will be rejected
|
||||
# during SMTP RCPT TO with this error response line. Useful when a catchall
|
||||
# address is configured for the domain and messages to some addresses should be
|
||||
# rejected. The response line must start with an error code. Currently the
|
||||
# following error resonse codes are allowed: 421 (temporary local error), 550
|
||||
# (user not found). If the line consists of only an error code, an appropriate
|
||||
# error message is added. Rejecting messages with a 4xx code invites later retries
|
||||
# by the remote, while 5xx codes should prevent further delivery attempts.
|
||||
# (optional)
|
||||
SMTPError:
|
||||
|
||||
# If non-empty, an additional DMARC-like message authentication check is done for
|
||||
# incoming messages, validating the domain in the From-header of the message.
|
||||
# Messages without either an aligned SPF or aligned DKIM pass are rejected during
|
||||
# the SMTP DATA command with a permanent error code followed by the message in
|
||||
# this field. The domain in the message 'From' header is matched in relaxed or
|
||||
# strict mode according to the domain's DMARC policy if present, or relaxed mode
|
||||
# (organizational instead of exact domain match) otherwise. Useful for
|
||||
# autoresponders that don't want to accept messages they don't want to send an
|
||||
# automated reply to. (optional)
|
||||
MessageAuthRequiredSMTPError:
|
||||
|
||||
# Full name to use in message From header when composing messages coming from this
|
||||
# address with webmail. (optional)
|
||||
FullName:
|
||||
@ -1228,8 +958,7 @@ See https://pkg.go.dev/github.com/mjl-/sconf for details.
|
||||
RejectsMailbox:
|
||||
|
||||
# Don't automatically delete mail in the RejectsMailbox listed above. This can be
|
||||
# useful, e.g. for future spam training. It can also cause storage to fill up.
|
||||
# (optional)
|
||||
# useful, e.g. for future spam training. (optional)
|
||||
KeepRejects: false
|
||||
|
||||
# Automatically set $Junk and $NotJunk flags based on mailbox messages are
|
||||
@ -1238,11 +967,11 @@ See https://pkg.go.dev/github.com/mjl-/sconf for details.
|
||||
# all move messages to a different mailbox, so this helps them. (optional)
|
||||
AutomaticJunkFlags:
|
||||
|
||||
# If enabled, junk/nonjunk flags will be set automatically if they match some of
|
||||
# the regular expressions. When two of the three mailbox regular expressions are
|
||||
# set, the remaining one will match all unmatched messages. Messages are matched
|
||||
# in the order 'junk', 'neutral', 'not junk', and the search stops on the first
|
||||
# match. Mailboxes are lowercased before matching.
|
||||
# If enabled, flags will be set automatically if they match a regular expression
|
||||
# below. When two of the three mailbox regular expressions are set, the remaining
|
||||
# one will match all unmatched messages. Messages are matched in the order
|
||||
# specified and the search stops on the first match. Mailboxes are lowercased
|
||||
# before matching.
|
||||
Enabled: false
|
||||
|
||||
# Example: ^(junk|spam). (optional)
|
||||
@ -1303,17 +1032,6 @@ See https://pkg.go.dev/github.com/mjl-/sconf for details.
|
||||
# this mail server in case of account compromise. Default 200. (optional)
|
||||
MaxFirstTimeRecipientsPerDay: 0
|
||||
|
||||
# Do not apply a delay to SMTP connections before accepting an incoming message
|
||||
# from a first-time sender. Can be useful for accounts that sends automated
|
||||
# responses and want instant replies. (optional)
|
||||
NoFirstTimeSenderDelay: false
|
||||
|
||||
# If set, this account cannot set a password of their own choice, but can only set
|
||||
# a new randomly generated password, preventing password reuse across services and
|
||||
# use of weak passwords. Custom account passwords can be set by the admin.
|
||||
# (optional)
|
||||
NoCustomPassword: false
|
||||
|
||||
# Routes for delivering outgoing messages through the queue. Each delivery attempt
|
||||
# evaluates these account routes, domain routes and finally global routes. The
|
||||
# transport of the first matching route is used in the delivery attempt. If no
|
||||
@ -1343,15 +1061,12 @@ See https://pkg.go.dev/github.com/mjl-/sconf for details.
|
||||
WebDomainRedirects:
|
||||
x:
|
||||
|
||||
# Handle webserver requests by serving static files, redirecting, reverse-proxying
|
||||
# HTTP(s) or passing the request to an internal service. The first matching
|
||||
# WebHandler will handle the request. Built-in system handlers, e.g. for ACME
|
||||
# validation, autoconfig and mta-sts always run first. Built-in handlers for
|
||||
# admin, account, webmail and webapi are evaluated after all handlers, including
|
||||
# webhandlers (allowing for overrides of internal services for some domains). If
|
||||
# no handler matches, the response status code is file not found (404). If
|
||||
# webserver features are missing, forward the requests to an application that
|
||||
# provides the needed functionality itself. (optional)
|
||||
# Handle webserver requests by serving static files, redirecting or
|
||||
# reverse-proxying HTTP(s). The first matching WebHandler will handle the request.
|
||||
# Built-in handlers, e.g. for account, admin, autoconfig and mta-sts always run
|
||||
# first. If no handler matches, the response status code is file not found (404).
|
||||
# If functionality you need is missng, simply forward the requests to an
|
||||
# application that can provide the needed functionality. (optional)
|
||||
WebHandlers:
|
||||
-
|
||||
|
||||
@ -1359,7 +1074,7 @@ See https://pkg.go.dev/github.com/mjl-/sconf for details.
|
||||
LogName:
|
||||
|
||||
# Both Domain and PathRegexp must match for this WebHandler to match a request.
|
||||
# Exactly one of WebStatic, WebRedirect, WebForward, WebInternal must be set.
|
||||
# Exactly one of WebStatic, WebRedirect, WebForward must be set.
|
||||
Domain:
|
||||
|
||||
# Regular expression matched against request path, must always start with ^ to
|
||||
@ -1466,15 +1181,6 @@ See https://pkg.go.dev/github.com/mjl-/sconf for details.
|
||||
ResponseHeaders:
|
||||
x:
|
||||
|
||||
# Pass request to internal service, like webmail, webapi, etc. (optional)
|
||||
WebInternal:
|
||||
|
||||
# Path to use as root of internal service, e.g. /webmail/.
|
||||
BasePath:
|
||||
|
||||
# Name of the service, values: admin, account, webmail, webapi.
|
||||
Service:
|
||||
|
||||
# Routes for delivering outgoing messages through the queue. Each delivery attempt
|
||||
# evaluates account routes, domain routes and finally these global routes. The
|
||||
# transport of the first matching route is used in the delivery attempt. If no
|
||||
@ -1510,8 +1216,8 @@ See https://pkg.go.dev/github.com/mjl-/sconf for details.
|
||||
# Examples
|
||||
|
||||
Mox includes configuration files to illustrate common setups. You can see these
|
||||
examples with "mox config example", and print a specific example with "mox
|
||||
config example <name>". Below are all examples included in mox.
|
||||
examples with "mox example", and print a specific example with "mox example
|
||||
<name>". Below are all examples included in mox.
|
||||
|
||||
# Example webhandlers
|
||||
|
||||
@ -1595,7 +1301,7 @@ config example <name>". Below are all examples included in mox.
|
||||
# Example transport
|
||||
|
||||
# Snippet for mox.conf, defining a transport called Example that connects on the
|
||||
# SMTP submission with TLS port 465 ("submissions"), authenticating with
|
||||
# SMTP submission with TLS port 465 ("submissions), authenticating with
|
||||
# SCRAM-SHA-256-PLUS (other providers may not support SCRAM-SHA-256-PLUS, but they
|
||||
# typically do support the older CRAM-MD5).:
|
||||
|
||||
|
466
ctl_test.go
466
ctl_test.go
@ -4,28 +4,18 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/ed25519"
|
||||
cryptorand "crypto/rand"
|
||||
"crypto/x509"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"math/big"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/mjl-/mox/config"
|
||||
"github.com/mjl-/mox/dmarcdb"
|
||||
"github.com/mjl-/mox/dns"
|
||||
"github.com/mjl-/mox/imapclient"
|
||||
"github.com/mjl-/mox/mlog"
|
||||
"github.com/mjl-/mox/mox-"
|
||||
"github.com/mjl-/mox/mtastsdb"
|
||||
"github.com/mjl-/mox/queue"
|
||||
"github.com/mjl-/mox/smtp"
|
||||
"github.com/mjl-/mox/store"
|
||||
"github.com/mjl-/mox/tlsrptdb"
|
||||
)
|
||||
@ -45,422 +35,154 @@ func tcheck(t *testing.T, err error, errmsg string) {
|
||||
// unhandled errors would cause a panic.
|
||||
func TestCtl(t *testing.T) {
|
||||
os.RemoveAll("testdata/ctl/data")
|
||||
mox.ConfigStaticPath = filepath.FromSlash("testdata/ctl/config/mox.conf")
|
||||
mox.ConfigDynamicPath = filepath.FromSlash("testdata/ctl/config/domains.conf")
|
||||
mox.ConfigStaticPath = filepath.FromSlash("testdata/ctl/mox.conf")
|
||||
mox.ConfigDynamicPath = filepath.FromSlash("testdata/ctl/domains.conf")
|
||||
if errs := mox.LoadConfig(ctxbg, pkglog, true, false); len(errs) > 0 {
|
||||
t.Fatalf("loading mox config: %v", errs)
|
||||
}
|
||||
err := store.Init(ctxbg)
|
||||
tcheck(t, err, "store init")
|
||||
defer store.Close()
|
||||
defer store.Switchboard()()
|
||||
|
||||
err = queue.Init()
|
||||
tcheck(t, err, "queue init")
|
||||
defer queue.Shutdown()
|
||||
|
||||
var cid int64
|
||||
|
||||
testctl := func(fn func(clientxctl *ctl)) {
|
||||
testctl := func(fn func(clientctl *ctl)) {
|
||||
t.Helper()
|
||||
|
||||
cconn, sconn := net.Pipe()
|
||||
clientxctl := ctl{conn: cconn, log: pkglog}
|
||||
serverxctl := ctl{conn: sconn, log: pkglog}
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
cid++
|
||||
servectlcmd(ctxbg, &serverxctl, cid, func() {})
|
||||
close(done)
|
||||
}()
|
||||
fn(&clientxctl)
|
||||
clientctl := ctl{conn: cconn, log: pkglog}
|
||||
serverctl := ctl{conn: sconn, log: pkglog}
|
||||
go servectlcmd(ctxbg, &serverctl, func() {})
|
||||
fn(&clientctl)
|
||||
cconn.Close()
|
||||
<-done
|
||||
sconn.Close()
|
||||
}
|
||||
|
||||
// "deliver"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdDeliver(xctl, "mjl@mox.example")
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdDeliver(ctl, "mjl@mox.example")
|
||||
})
|
||||
|
||||
// "setaccountpassword"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdSetaccountpassword(xctl, "mjl", "test4321")
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdSetaccountpassword(ctl, "mjl", "test4321")
|
||||
})
|
||||
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueHoldrulesList(xctl)
|
||||
err := queue.Init()
|
||||
tcheck(t, err, "queue init")
|
||||
|
||||
// "queue"
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdQueueList(ctl)
|
||||
})
|
||||
|
||||
// All messages.
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueHoldrulesAdd(xctl, "", "", "")
|
||||
})
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueHoldrulesAdd(xctl, "mjl", "", "")
|
||||
})
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueHoldrulesAdd(xctl, "", "☺.mox.example", "")
|
||||
})
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueHoldrulesAdd(xctl, "mox", "☺.mox.example", "example.com")
|
||||
})
|
||||
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueHoldrulesRemove(xctl, 1)
|
||||
})
|
||||
|
||||
// Queue a message to list/change/dump.
|
||||
msg := "Subject: subject\r\n\r\nbody\r\n"
|
||||
msgFile, err := store.CreateMessageTemp(pkglog, "queuedump-test")
|
||||
tcheck(t, err, "temp file")
|
||||
_, err = msgFile.Write([]byte(msg))
|
||||
tcheck(t, err, "write message")
|
||||
_, err = msgFile.Seek(0, 0)
|
||||
tcheck(t, err, "rewind message")
|
||||
defer os.Remove(msgFile.Name())
|
||||
defer msgFile.Close()
|
||||
addr, err := smtp.ParseAddress("mjl@mox.example")
|
||||
tcheck(t, err, "parse address")
|
||||
qml := []queue.Msg{queue.MakeMsg(addr.Path(), addr.Path(), false, false, int64(len(msg)), "<random@localhost>", nil, nil, time.Now(), "subject")}
|
||||
queue.Add(ctxbg, pkglog, "mjl", msgFile, qml...)
|
||||
qmid := qml[0].ID
|
||||
|
||||
// Has entries now.
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueHoldrulesList(xctl)
|
||||
})
|
||||
|
||||
// "queuelist"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueList(xctl, queue.Filter{}, queue.Sort{})
|
||||
})
|
||||
|
||||
// "queueholdset"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueHoldSet(xctl, queue.Filter{}, true)
|
||||
})
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueHoldSet(xctl, queue.Filter{}, false)
|
||||
})
|
||||
|
||||
// "queueschedule"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueSchedule(xctl, queue.Filter{}, true, time.Minute)
|
||||
})
|
||||
|
||||
// "queuetransport"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueTransport(xctl, queue.Filter{}, "socks")
|
||||
})
|
||||
|
||||
// "queuerequiretls"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueRequireTLS(xctl, queue.Filter{}, nil)
|
||||
})
|
||||
|
||||
// "queuedump"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueDump(xctl, fmt.Sprintf("%d", qmid))
|
||||
})
|
||||
|
||||
// "queuefail"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueFail(xctl, queue.Filter{})
|
||||
// "queuekick"
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdQueueKick(ctl, 0, "", "", "")
|
||||
})
|
||||
|
||||
// "queuedrop"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueDrop(xctl, queue.Filter{})
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdQueueDrop(ctl, 0, "", "")
|
||||
})
|
||||
|
||||
// "queueholdruleslist"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueHoldrulesList(xctl)
|
||||
})
|
||||
|
||||
// "queueholdrulesadd"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueHoldrulesAdd(xctl, "mjl", "", "")
|
||||
})
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueHoldrulesAdd(xctl, "mjl", "localhost", "")
|
||||
})
|
||||
|
||||
// "queueholdrulesremove"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueHoldrulesRemove(xctl, 2)
|
||||
})
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueHoldrulesList(xctl)
|
||||
})
|
||||
|
||||
// "queuesuppresslist"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueSuppressList(xctl, "mjl")
|
||||
})
|
||||
|
||||
// "queuesuppressadd"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueSuppressAdd(xctl, "mjl", "base@localhost")
|
||||
})
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueSuppressAdd(xctl, "mjl", "other@localhost")
|
||||
})
|
||||
|
||||
// "queuesuppresslookup"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueSuppressLookup(xctl, "mjl", "base@localhost")
|
||||
})
|
||||
|
||||
// "queuesuppressremove"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueSuppressRemove(xctl, "mjl", "base@localhost")
|
||||
})
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueSuppressList(xctl, "mjl")
|
||||
})
|
||||
|
||||
// "queueretiredlist"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueRetiredList(xctl, queue.RetiredFilter{}, queue.RetiredSort{})
|
||||
})
|
||||
|
||||
// "queueretiredprint"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueRetiredPrint(xctl, "1")
|
||||
})
|
||||
|
||||
// "queuehooklist"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueHookList(xctl, queue.HookFilter{}, queue.HookSort{})
|
||||
})
|
||||
|
||||
// "queuehookschedule"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueHookSchedule(xctl, queue.HookFilter{}, true, time.Minute)
|
||||
})
|
||||
|
||||
// "queuehookprint"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueHookPrint(xctl, "1")
|
||||
})
|
||||
|
||||
// "queuehookcancel"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueHookCancel(xctl, queue.HookFilter{})
|
||||
})
|
||||
|
||||
// "queuehookretiredlist"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueHookRetiredList(xctl, queue.HookRetiredFilter{}, queue.HookRetiredSort{})
|
||||
})
|
||||
|
||||
// "queuehookretiredprint"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdQueueHookRetiredPrint(xctl, "1")
|
||||
})
|
||||
// no "queuedump", we don't have a message to dump, and the commands exits without a message.
|
||||
|
||||
// "importmbox"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdImport(xctl, true, "mjl", "inbox", "testdata/importtest.mbox")
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdImport(ctl, true, "mjl", "inbox", "testdata/importtest.mbox")
|
||||
})
|
||||
|
||||
// "importmaildir"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdImport(xctl, false, "mjl", "inbox", "testdata/importtest.maildir")
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdImport(ctl, false, "mjl", "inbox", "testdata/importtest.maildir")
|
||||
})
|
||||
|
||||
// "domainadd"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdConfigDomainAdd(xctl, false, dns.Domain{ASCII: "mox2.example"}, "mjl", "")
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdConfigDomainAdd(ctl, dns.Domain{ASCII: "mox2.example"}, "mjl", "")
|
||||
})
|
||||
|
||||
// "accountadd"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdConfigAccountAdd(xctl, "mjl2", "mjl2@mox2.example")
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdConfigAccountAdd(ctl, "mjl2", "mjl2@mox2.example")
|
||||
})
|
||||
|
||||
// "addressadd"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdConfigAddressAdd(xctl, "mjl3@mox2.example", "mjl2")
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdConfigAddressAdd(ctl, "mjl3@mox2.example", "mjl2")
|
||||
})
|
||||
|
||||
// Add a message.
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdDeliver(xctl, "mjl3@mox2.example")
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdDeliver(ctl, "mjl3@mox2.example")
|
||||
})
|
||||
// "retrain", retrain junk filter.
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdRetrain(xctl, "mjl2")
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdRetrain(ctl, "mjl2")
|
||||
})
|
||||
|
||||
// "addressrm"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdConfigAddressRemove(xctl, "mjl3@mox2.example")
|
||||
})
|
||||
|
||||
// "accountdisabled"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdConfigAccountDisabled(xctl, "mjl2", "testing")
|
||||
})
|
||||
|
||||
// "accountlist"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdConfigAccountList(xctl)
|
||||
})
|
||||
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdConfigAccountDisabled(xctl, "mjl2", "")
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdConfigAddressRemove(ctl, "mjl3@mox2.example")
|
||||
})
|
||||
|
||||
// "accountrm"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdConfigAccountRemove(xctl, "mjl2")
|
||||
})
|
||||
|
||||
// "domaindisabled"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdConfigDomainDisabled(xctl, dns.Domain{ASCII: "mox2.example"}, true)
|
||||
})
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdConfigDomainDisabled(xctl, dns.Domain{ASCII: "mox2.example"}, false)
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdConfigAccountRemove(ctl, "mjl2")
|
||||
})
|
||||
|
||||
// "domainrm"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdConfigDomainRemove(xctl, dns.Domain{ASCII: "mox2.example"})
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdConfigDomainRemove(ctl, dns.Domain{ASCII: "mox2.example"})
|
||||
})
|
||||
|
||||
// "aliasadd"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdConfigAliasAdd(xctl, "support@mox.example", config.Alias{Addresses: []string{"mjl@mox.example"}})
|
||||
})
|
||||
|
||||
// "aliaslist"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdConfigAliasList(xctl, "mox.example")
|
||||
})
|
||||
|
||||
// "aliasprint"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdConfigAliasPrint(xctl, "support@mox.example")
|
||||
})
|
||||
|
||||
// "aliasupdate"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdConfigAliasUpdate(xctl, "support@mox.example", "true", "true", "true")
|
||||
})
|
||||
|
||||
// "aliasaddaddr"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdConfigAliasAddaddr(xctl, "support@mox.example", []string{"mjl2@mox.example"})
|
||||
})
|
||||
|
||||
// "aliasrmaddr"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdConfigAliasRmaddr(xctl, "support@mox.example", []string{"mjl2@mox.example"})
|
||||
})
|
||||
|
||||
// "aliasrm"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdConfigAliasRemove(xctl, "support@mox.example")
|
||||
})
|
||||
|
||||
// accounttlspubkeyadd
|
||||
certDER := fakeCert(t)
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdConfigTlspubkeyAdd(xctl, "mjl@mox.example", "testkey", false, certDER)
|
||||
})
|
||||
|
||||
// "accounttlspubkeylist"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdConfigTlspubkeyList(xctl, "")
|
||||
})
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdConfigTlspubkeyList(xctl, "mjl")
|
||||
})
|
||||
|
||||
tpkl, err := store.TLSPublicKeyList(ctxbg, "")
|
||||
tcheck(t, err, "list tls public keys")
|
||||
if len(tpkl) != 1 {
|
||||
t.Fatalf("got %d tls public keys, expected 1", len(tpkl))
|
||||
}
|
||||
fingerprint := tpkl[0].Fingerprint
|
||||
|
||||
// "accounttlspubkeyget"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdConfigTlspubkeyGet(xctl, fingerprint)
|
||||
})
|
||||
|
||||
// "accounttlspubkeyrm"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdConfigTlspubkeyRemove(xctl, fingerprint)
|
||||
})
|
||||
|
||||
tpkl, err = store.TLSPublicKeyList(ctxbg, "")
|
||||
tcheck(t, err, "list tls public keys")
|
||||
if len(tpkl) != 0 {
|
||||
t.Fatalf("got %d tls public keys, expected 0", len(tpkl))
|
||||
}
|
||||
|
||||
// "loglevels"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdLoglevels(xctl)
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdLoglevels(ctl)
|
||||
})
|
||||
|
||||
// "setloglevels"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdSetLoglevels(xctl, "", "debug")
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdSetLoglevels(ctl, "", "debug")
|
||||
})
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdSetLoglevels(xctl, "smtpserver", "debug")
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdSetLoglevels(ctl, "smtpserver", "debug")
|
||||
})
|
||||
|
||||
// Export data, import it again
|
||||
xcmdExport(true, false, []string{filepath.FromSlash("testdata/ctl/data/tmp/export/mbox/"), filepath.FromSlash("testdata/ctl/data/accounts/mjl")}, &cmd{log: pkglog})
|
||||
xcmdExport(false, false, []string{filepath.FromSlash("testdata/ctl/data/tmp/export/maildir/"), filepath.FromSlash("testdata/ctl/data/accounts/mjl")}, &cmd{log: pkglog})
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdImport(xctl, true, "mjl", "inbox", filepath.FromSlash("testdata/ctl/data/tmp/export/mbox/Inbox.mbox"))
|
||||
xcmdExport(true, []string{filepath.FromSlash("testdata/ctl/data/tmp/export/mbox/"), filepath.FromSlash("testdata/ctl/data/accounts/mjl")}, &cmd{log: pkglog})
|
||||
xcmdExport(false, []string{filepath.FromSlash("testdata/ctl/data/tmp/export/maildir/"), filepath.FromSlash("testdata/ctl/data/accounts/mjl")}, &cmd{log: pkglog})
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdImport(ctl, true, "mjl", "inbox", filepath.FromSlash("testdata/ctl/data/tmp/export/mbox/Inbox.mbox"))
|
||||
})
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdImport(xctl, false, "mjl", "inbox", filepath.FromSlash("testdata/ctl/data/tmp/export/maildir/Inbox"))
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdImport(ctl, false, "mjl", "inbox", filepath.FromSlash("testdata/ctl/data/tmp/export/maildir/Inbox"))
|
||||
})
|
||||
|
||||
// "recalculatemailboxcounts"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdRecalculateMailboxCounts(xctl, "mjl")
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdRecalculateMailboxCounts(ctl, "mjl")
|
||||
})
|
||||
|
||||
// "fixmsgsize"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdFixmsgsize(xctl, "mjl")
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdFixmsgsize(ctl, "mjl")
|
||||
})
|
||||
testctl(func(xctl *ctl) {
|
||||
acc, err := store.OpenAccount(xctl.log, "mjl", false)
|
||||
testctl(func(ctl *ctl) {
|
||||
acc, err := store.OpenAccount(ctl.log, "mjl")
|
||||
tcheck(t, err, "open account")
|
||||
defer func() {
|
||||
acc.Close()
|
||||
acc.WaitClosed()
|
||||
}()
|
||||
defer acc.Close()
|
||||
|
||||
content := []byte("Subject: hi\r\n\r\nbody\r\n")
|
||||
|
||||
deliver := func(m *store.Message) {
|
||||
t.Helper()
|
||||
m.Size = int64(len(content))
|
||||
msgf, err := store.CreateMessageTemp(xctl.log, "ctltest")
|
||||
msgf, err := store.CreateMessageTemp(ctl.log, "ctltest")
|
||||
tcheck(t, err, "create temp file")
|
||||
defer os.Remove(msgf.Name())
|
||||
defer msgf.Close()
|
||||
_, err = msgf.Write(content)
|
||||
tcheck(t, err, "write message file")
|
||||
|
||||
acc.WithWLock(func() {
|
||||
err = acc.DeliverMailbox(xctl.log, "Inbox", m, msgf)
|
||||
tcheck(t, err, "deliver message")
|
||||
})
|
||||
err = acc.DeliverMailbox(ctl.log, "Inbox", m, msgf)
|
||||
tcheck(t, err, "deliver message")
|
||||
}
|
||||
|
||||
var msgBadSize store.Message
|
||||
@ -478,7 +200,7 @@ func TestCtl(t *testing.T) {
|
||||
tcheck(t, err, "update mailbox size")
|
||||
|
||||
// Fix up the size.
|
||||
ctlcmdFixmsgsize(xctl, "")
|
||||
ctlcmdFixmsgsize(ctl, "")
|
||||
|
||||
err = acc.DB.Get(ctxbg, &msgBadSize)
|
||||
tcheck(t, err, "get message")
|
||||
@ -488,71 +210,39 @@ func TestCtl(t *testing.T) {
|
||||
})
|
||||
|
||||
// "reparse"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdReparse(xctl, "mjl")
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdReparse(ctl, "mjl")
|
||||
})
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdReparse(xctl, "")
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdReparse(ctl, "")
|
||||
})
|
||||
|
||||
// "reassignthreads"
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdReassignthreads(xctl, "mjl")
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdReassignthreads(ctl, "mjl")
|
||||
})
|
||||
testctl(func(xctl *ctl) {
|
||||
ctlcmdReassignthreads(xctl, "")
|
||||
testctl(func(ctl *ctl) {
|
||||
ctlcmdReassignthreads(ctl, "")
|
||||
})
|
||||
|
||||
// "backup", backup account.
|
||||
err = dmarcdb.Init()
|
||||
tcheck(t, err, "dmarcdb init")
|
||||
defer dmarcdb.Close()
|
||||
err = mtastsdb.Init(false)
|
||||
tcheck(t, err, "mtastsdb init")
|
||||
defer mtastsdb.Close()
|
||||
err = tlsrptdb.Init()
|
||||
tcheck(t, err, "tlsrptdb init")
|
||||
defer tlsrptdb.Close()
|
||||
testctl(func(xctl *ctl) {
|
||||
os.RemoveAll("testdata/ctl/data/tmp/backup")
|
||||
testctl(func(ctl *ctl) {
|
||||
os.RemoveAll("testdata/ctl/data/tmp/backup-data")
|
||||
err := os.WriteFile("testdata/ctl/data/receivedid.key", make([]byte, 16), 0600)
|
||||
tcheck(t, err, "writing receivedid.key")
|
||||
ctlcmdBackup(xctl, filepath.FromSlash("testdata/ctl/data/tmp/backup"), false)
|
||||
ctlcmdBackup(ctl, filepath.FromSlash("testdata/ctl/data/tmp/backup-data"), false)
|
||||
})
|
||||
|
||||
// Verify the backup.
|
||||
xcmd := cmd{
|
||||
flag: flag.NewFlagSet("", flag.ExitOnError),
|
||||
flagArgs: []string{filepath.FromSlash("testdata/ctl/data/tmp/backup/data")},
|
||||
flagArgs: []string{filepath.FromSlash("testdata/ctl/data/tmp/backup-data")},
|
||||
}
|
||||
cmdVerifydata(&xcmd)
|
||||
|
||||
// IMAP connection.
|
||||
testctl(func(xctl *ctl) {
|
||||
a, b := net.Pipe()
|
||||
go func() {
|
||||
opts := imapclient.Opts{
|
||||
Logger: slog.Default().With("cid", mox.Cid()),
|
||||
Error: func(err error) { panic(err) },
|
||||
}
|
||||
client, err := imapclient.New(a, &opts)
|
||||
tcheck(t, err, "new imapclient")
|
||||
client.Select("inbox")
|
||||
client.Logout()
|
||||
defer a.Close()
|
||||
}()
|
||||
ctlcmdIMAPServe(xctl, "mjl@mox.example", b, b)
|
||||
})
|
||||
}
|
||||
|
||||
func fakeCert(t *testing.T) []byte {
|
||||
t.Helper()
|
||||
seed := make([]byte, ed25519.SeedSize)
|
||||
privKey := ed25519.NewKeyFromSeed(seed) // Fake key, don't use this for real!
|
||||
template := &x509.Certificate{
|
||||
SerialNumber: big.NewInt(1), // Required field...
|
||||
}
|
||||
localCertBuf, err := x509.CreateCertificate(cryptorand.Reader, template, template, privKey.Public(), privKey)
|
||||
tcheck(t, err, "making certificate")
|
||||
return localCertBuf
|
||||
}
|
||||
|
14
curves.go
14
curves.go
@ -1,14 +0,0 @@
|
||||
//go:build !go1.24
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
)
|
||||
|
||||
var curvesList = []tls.CurveID{
|
||||
tls.CurveP256,
|
||||
tls.CurveP384,
|
||||
tls.CurveP521,
|
||||
tls.X25519,
|
||||
}
|
@ -1,15 +0,0 @@
|
||||
//go:build go1.24
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
)
|
||||
|
||||
var curvesList = []tls.CurveID{
|
||||
tls.CurveP256,
|
||||
tls.CurveP384,
|
||||
tls.CurveP521,
|
||||
tls.X25519,
|
||||
tls.X25519MLKEM768,
|
||||
}
|
20
dane/dane.go
20
dane/dane.go
@ -65,7 +65,6 @@ import (
|
||||
"github.com/mjl-/mox/dns"
|
||||
"github.com/mjl-/mox/mlog"
|
||||
"github.com/mjl-/mox/stub"
|
||||
"slices"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -215,9 +214,12 @@ func Dial(ctx context.Context, elog *slog.Logger, resolver dns.Resolver, network
|
||||
if allowedUsages != nil {
|
||||
o := 0
|
||||
for _, r := range records {
|
||||
if slices.Contains(allowedUsages, r.Usage) {
|
||||
records[o] = r
|
||||
o++
|
||||
for _, usage := range allowedUsages {
|
||||
if r.Usage == usage {
|
||||
records[o] = r
|
||||
o++
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
records = records[:o]
|
||||
@ -261,8 +263,7 @@ func Dial(ctx context.Context, elog *slog.Logger, resolver dns.Resolver, network
|
||||
config := TLSClientConfig(log.Logger, records, baseDom, moreAllowedHosts, &verifiedRecord, pkixRoots)
|
||||
tlsConn := tls.Client(conn, &config)
|
||||
if err := tlsConn.HandshakeContext(ctx); err != nil {
|
||||
xerr := conn.Close()
|
||||
log.Check(xerr, "closing connection")
|
||||
conn.Close()
|
||||
return nil, adns.TLSA{}, err
|
||||
}
|
||||
return tlsConn, verifiedRecord, nil
|
||||
@ -447,8 +448,7 @@ func verifySingle(log mlog.Log, tlsa adns.TLSA, cs tls.ConnectionState, allowedH
|
||||
// We set roots, so the system defaults don't get used. Verify checks the host name
|
||||
// (set below) and checks for expiration.
|
||||
opts := x509.VerifyOptions{
|
||||
Intermediates: x509.NewCertPool(),
|
||||
Roots: x509.NewCertPool(),
|
||||
Roots: x509.NewCertPool(),
|
||||
}
|
||||
|
||||
// If the full certificate was included, we must add it to the valid roots, the TLS
|
||||
@ -465,13 +465,11 @@ func verifySingle(log mlog.Log, tlsa adns.TLSA, cs tls.ConnectionState, allowedH
|
||||
}
|
||||
}
|
||||
|
||||
for i, cert := range cs.PeerCertificates {
|
||||
for _, cert := range cs.PeerCertificates {
|
||||
if match(cert) {
|
||||
opts.Roots.AddCert(cert)
|
||||
found = true
|
||||
break
|
||||
} else if i > 0 {
|
||||
opts.Intermediates.AddCert(cert)
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
|
@ -12,6 +12,7 @@ import (
|
||||
"crypto/x509/pkix"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"math/big"
|
||||
"net"
|
||||
"reflect"
|
||||
@ -35,6 +36,7 @@ func tcheckf(t *testing.T, err error, format string, args ...any) {
|
||||
|
||||
// Test dialing and DANE TLS verification.
|
||||
func TestDial(t *testing.T) {
|
||||
mlog.SetConfig(map[string]slog.Level{"": mlog.LevelDebug})
|
||||
log := mlog.New("dane", nil)
|
||||
|
||||
// Create fake CA/trusted-anchor certificate.
|
||||
|
97
develop.txt
97
develop.txt
@ -1,34 +1,5 @@
|
||||
This file has notes useful for mox developers.
|
||||
|
||||
# Building & testing
|
||||
|
||||
For a full build, you'll need a recent Go compiler/toolchain and nodejs/npm for
|
||||
the frontend. Run "make build" to do a full build. Run "make test" to run the
|
||||
test suite. With docker installed, you can run "make test-integration" to start
|
||||
up a few mox instances, a dns server, a postfix instance, and send email
|
||||
between them.
|
||||
|
||||
The mox localserve command is a convenient way to test locally. Most of the
|
||||
code paths are reachable/testable with mox localserve, but some use cases will
|
||||
require a full setup.
|
||||
|
||||
Before committing, run at least "make fmt" and "make check" (which requires
|
||||
staticcheck and ineffassign, run "make install-staticcheck install-ineffassign"
|
||||
once). Also run "make check-shadow" and fix any shadowed variables other than
|
||||
"err" (which are filtered out, but causes the command to always exit with an
|
||||
error code; run "make install-shadow" once to install the shadow command). If
|
||||
you've updated RFC references, run "make" in rfc/, it verifies the referenced
|
||||
files exist.
|
||||
|
||||
When making changes to the public API of a package listed in
|
||||
apidiff/packages.txt, run "make genapidiff" to update the list of changes in
|
||||
the upcoming release (run "make install-apidiff" once to install the apidiff
|
||||
command).
|
||||
|
||||
New features may be worth mentioning on the website, see website/ and
|
||||
instructions below.
|
||||
|
||||
|
||||
# Code style, guidelines, notes
|
||||
|
||||
- Keep the same style as existing code.
|
||||
@ -47,18 +18,6 @@ instructions below.
|
||||
standard slog package for logging, not our mlog package. Packages not intended
|
||||
for reuse do use mlog as it is more convenient. Internally, we always use
|
||||
mlog.Log to do the logging, wrapping an slog.Logger.
|
||||
- The code uses panic for error handling in quite a few places, including
|
||||
smtpserver, imapserver and web API calls. Functions/methods, variables, struct
|
||||
fields and types that begin with an "x" indicate they can panic on errors. Both
|
||||
for i/o errors that are fatal for a connection, and also often for user-induced
|
||||
errors, for example bad IMAP commands or invalid web API requests. These panics
|
||||
are caught again at the top of a command or top of the connection. Write code
|
||||
that is panic-safe, using defer to clean up and release resources.
|
||||
- Try to check all errors, at the minimum using mlog.Log.Check() to log an error
|
||||
at the appropriate level. Also when just closing a file. Log messages sometimes
|
||||
unexpectedly point out latent issues. Only when there is no point in logging,
|
||||
for example when previous writes to stderr failed, can error logging be skipped.
|
||||
Test code is less strict about checking errors.
|
||||
|
||||
|
||||
# Reusable packages
|
||||
@ -85,8 +44,8 @@ https://github.com/mjl-/sherpats/.
|
||||
The JavaScript that is generated from the TypeScript is included in the
|
||||
repository. This makes it available for inclusion in the binary, which is
|
||||
practical for users, and desirable given Go's reproducible builds. When
|
||||
developing, run "make" to also build the frontend code. Run "make
|
||||
install-frontend" once to install the TypeScript compiler into ./node_modules/.
|
||||
developing, run "make" to also build the frontend code. Run "make jsinstall"
|
||||
once to install the TypeScript compiler into ./node_modules/.
|
||||
|
||||
There are no other external (runtime or devtime) frontend dependencies. A
|
||||
light-weight abstraction over the DOM is provided by ./lib.ts. A bit more
|
||||
@ -101,23 +60,22 @@ managable.
|
||||
# Website
|
||||
|
||||
The content of the public website at https://www.xmox.nl is in website/, as
|
||||
markdown files. The website HTML is generated with "make genwebsite", which
|
||||
writes to website/html/ (files not committed). The FAQ is taken from
|
||||
README.md, the protocol support table is generated from rfc/index.txt. The
|
||||
website is kept in this repository so a commit can change both the
|
||||
implementation and the documentation on the website. Some of the info in
|
||||
README.md is duplicated on the website, often more elaborate and possibly with
|
||||
a slightly less technical audience. The website should also mostly be readable
|
||||
through the markdown in the git repo.
|
||||
markdown files. The website HTML is generated by website/website.go. The FAQ
|
||||
is taken from README.md, the protocol support table is generated from
|
||||
rfc/index.txt. The website is kept in this repository so a commit can change
|
||||
both the implementation and the documentation on the website. Some of the info
|
||||
in README.md is duplicated on the website, often more elaborate and possibly
|
||||
with a slightly less technical audience. The website should also mostly be
|
||||
readable through the markdown in the git repo.
|
||||
|
||||
Large files (images/videos) are in https://github.com/mjl-/mox-website-files to
|
||||
keep the repository reasonably sized.
|
||||
|
||||
The public website may serve the content from the "website" branch. After a
|
||||
release, the main branch (with latest development code and corresponding
|
||||
changes to the website about new features) is merged into the website branch.
|
||||
Commits to the website branch (e.g. for a news item, or any other change
|
||||
unrelated to a new release) is merged back into the main branch.
|
||||
The public website serves the content from the "website" branch. After a
|
||||
release release, the main branch (with latest development code and
|
||||
corresponding changes to the website about new features) is merged into the
|
||||
website branch. Commits to the website branch (e.g. for a news item, or any
|
||||
other change unrelated to a new release) is merged back into the main branch.
|
||||
|
||||
|
||||
# TLS certificates
|
||||
@ -298,7 +256,7 @@ for i in 0 12; do
|
||||
done
|
||||
```
|
||||
|
||||
With the following "tombox.sh" script:
|
||||
With the following "tobmox.sh" script:
|
||||
|
||||
```
|
||||
#!/bin/sh
|
||||
@ -316,13 +274,12 @@ done
|
||||
|
||||
- Gather feedback on recent changes.
|
||||
- Check if dependencies need updates.
|
||||
- Update to latest publicsuffix/ list.
|
||||
- Check code if there are deprecated features that can be removed.
|
||||
- Generate apidiff and check if breaking changes can be prevented. Update moxtools.
|
||||
- Update features & roadmap in README.md and website.
|
||||
- Write release notes, copy from previous.
|
||||
- Build and run tests with previous major Go release, run "make docker-release" to test building images.
|
||||
- Run tests, including with race detector, also with TZ= for UTC-behaviour, and with -count 2.
|
||||
- Update features & roadmap in README.md
|
||||
- Write release notes.
|
||||
- Build and run tests with previous major Go release.
|
||||
- Run tests, including with race detector.
|
||||
- Run integration and upgrade tests.
|
||||
- Run fuzzing tests for a while.
|
||||
- Deploy to test environment. Test the update instructions.
|
||||
@ -331,15 +288,9 @@ done
|
||||
- Send and receive email with imap4/smtp clients.
|
||||
- Check DNS check admin page.
|
||||
- Check with https://internet.nl.
|
||||
- Move apidiff/next.txt to apidiff/<version>.txt, and create empty next.txt.
|
||||
- Add release to the Latest release & News sections of website/index.md.
|
||||
- Create git tag (note: "#" is comment, not title/header), push code.
|
||||
- Build and publish new docker image.
|
||||
- Deploy update to website.
|
||||
- Create new release on the github page, so watchers get a notification.
|
||||
Copy/paste it manually from the tag text, and add link to download/compile
|
||||
instructions to prevent confusion about "assets" github links to.
|
||||
- Publish new cross-referenced code/rfc to www.xmox.nl/xr/.
|
||||
- Update moxtools with latest version.
|
||||
- Update implementations support matrix.
|
||||
- Add release to the News section of website/index.md.
|
||||
- Create git tag, push code.
|
||||
- Publish new docker image.
|
||||
- Publish signed release notes for updates.xmox.nl and update DNS record.
|
||||
- Publish new cross-referenced code/rfc to www.xmox.nl/xr/.
|
||||
- Create new release on the github page, so watchers get a notification.
|
||||
|
14
dkim/dkim.go
14
dkim/dkim.go
@ -31,12 +31,8 @@ import (
|
||||
"github.com/mjl-/mox/publicsuffix"
|
||||
"github.com/mjl-/mox/smtp"
|
||||
"github.com/mjl-/mox/stub"
|
||||
"slices"
|
||||
)
|
||||
|
||||
// If set, signatures for top-level domain "localhost" are accepted.
|
||||
var Localserve bool
|
||||
|
||||
var (
|
||||
MetricSign stub.CounterVec = stub.CounterVecIgnore{}
|
||||
MetricVerify stub.HistogramVec = stub.HistogramVecIgnore{}
|
||||
@ -174,7 +170,7 @@ func Sign(ctx context.Context, elog *slog.Logger, localpart smtp.Localpart, doma
|
||||
sig.Domain = domain
|
||||
sig.Selector = sel.Domain
|
||||
sig.Identity = &Identity{&localpart, domain}
|
||||
sig.SignedHeaders = slices.Clone(sel.Headers)
|
||||
sig.SignedHeaders = append([]string{}, sel.Headers...)
|
||||
if sel.SealHeaders {
|
||||
// ../rfc/6376:2156
|
||||
// Each time a header name is added to the signature, the next unused value is
|
||||
@ -446,7 +442,7 @@ func checkSignatureParams(ctx context.Context, log mlog.Log, sig *Sig) (hash cry
|
||||
if subdom.Unicode != "" {
|
||||
subdom.Unicode = "x." + subdom.Unicode
|
||||
}
|
||||
if orgDom := publicsuffix.Lookup(ctx, log.Logger, subdom); subdom.ASCII == orgDom.ASCII && !(Localserve && sig.Domain.ASCII == "localhost") {
|
||||
if orgDom := publicsuffix.Lookup(ctx, log.Logger, subdom); subdom.ASCII == orgDom.ASCII {
|
||||
return 0, false, false, fmt.Errorf("%w: %s", ErrTLD, sig.Domain)
|
||||
}
|
||||
|
||||
@ -549,7 +545,7 @@ func verifySignatureRecord(r *Record, sig *Sig, hash crypto.Hash, canonHeaderSim
|
||||
if r.PublicKey == nil {
|
||||
return StatusPermerror, ErrKeyRevoked
|
||||
} else if rsaKey, ok := r.PublicKey.(*rsa.PublicKey); ok && rsaKey.N.BitLen() < 1024 {
|
||||
// ../rfc/8301:157
|
||||
// todo: find a reference that supports this.
|
||||
return StatusPermerror, ErrWeakKey
|
||||
}
|
||||
|
||||
@ -840,8 +836,8 @@ func parseHeaders(br *bufio.Reader) ([]header, int, error) {
|
||||
return nil, 0, fmt.Errorf("empty header key")
|
||||
}
|
||||
lkey = strings.ToLower(key)
|
||||
value = slices.Clone(t[1])
|
||||
raw = slices.Clone(line)
|
||||
value = append([]byte{}, t[1]...)
|
||||
raw = append([]byte{}, line...)
|
||||
}
|
||||
if key != "" {
|
||||
l = append(l, header{key, lkey, value, raw})
|
||||
|
@ -117,7 +117,7 @@ func (s *Sig) Header() (string, error) {
|
||||
} else if i == len(s.SignedHeaders)-1 {
|
||||
v += ";"
|
||||
}
|
||||
w.Addf(sep, "%s", v)
|
||||
w.Addf(sep, v)
|
||||
}
|
||||
}
|
||||
if len(s.CopiedHeaders) > 0 {
|
||||
@ -139,7 +139,7 @@ func (s *Sig) Header() (string, error) {
|
||||
} else if i == len(s.CopiedHeaders)-1 {
|
||||
v += ";"
|
||||
}
|
||||
w.Addf(sep, "%s", v)
|
||||
w.Addf(sep, v)
|
||||
}
|
||||
}
|
||||
|
||||
@ -147,7 +147,7 @@ func (s *Sig) Header() (string, error) {
|
||||
|
||||
w.Addf(" ", "b=")
|
||||
if len(s.Signature) > 0 {
|
||||
w.AddWrap([]byte(base64.StdEncoding.EncodeToString(s.Signature)), false)
|
||||
w.AddWrap([]byte(base64.StdEncoding.EncodeToString(s.Signature)))
|
||||
}
|
||||
w.Add("\r\n")
|
||||
return w.String(), nil
|
||||
|
@ -32,7 +32,7 @@ func TestParseRecord(t *testing.T) {
|
||||
}
|
||||
if r != nil {
|
||||
pk := r.Pubkey
|
||||
for range 2 {
|
||||
for i := 0; i < 2; i++ {
|
||||
ntxt, err := r.Record()
|
||||
if err != nil {
|
||||
t.Fatalf("making record: %v", err)
|
||||
|
@ -15,7 +15,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
mathrand2 "math/rand/v2"
|
||||
mathrand "math/rand"
|
||||
"time"
|
||||
|
||||
"github.com/mjl-/mox/dkim"
|
||||
@ -257,7 +257,7 @@ func Verify(ctx context.Context, elog *slog.Logger, resolver dns.Resolver, msgFr
|
||||
|
||||
// Record can request sampling of messages to apply policy.
|
||||
// See ../rfc/7489:1432
|
||||
useResult = !applyRandomPercentage || record.Percentage == 100 || mathrand2.IntN(100) < record.Percentage
|
||||
useResult = !applyRandomPercentage || record.Percentage == 100 || mathrand.Intn(100) < record.Percentage
|
||||
|
||||
// We treat "quarantine" and "reject" the same. Thus, we also don't "downgrade"
|
||||
// from reject to quarantine if this message was sampled out.
|
||||
|
@ -37,7 +37,7 @@ func ExampleVerify() {
|
||||
|
||||
// Message to verify.
|
||||
msg := strings.NewReader("From: <sender@example.com>\r\nMore: headers\r\n\r\nBody\r\n")
|
||||
msgFrom, _, _, err := message.From(slog.Default(), true, msg, nil)
|
||||
msgFrom, _, _, err := message.From(slog.Default(), true, msg)
|
||||
if err != nil {
|
||||
log.Fatalf("parsing message for from header: %v", err)
|
||||
}
|
||||
|
@ -92,9 +92,9 @@ func parseRecord(s string, checkRequired bool) (record *Record, isdmarc bool, re
|
||||
// ../rfc/7489:1105
|
||||
p.xerrorf("p= (policy) must be first tag")
|
||||
}
|
||||
r.Policy = Policy(p.xtakelist("none", "quarantine", "reject"))
|
||||
r.Policy = DMARCPolicy(p.xtakelist("none", "quarantine", "reject"))
|
||||
case "sp":
|
||||
r.SubdomainPolicy = Policy(p.xkeyword())
|
||||
r.SubdomainPolicy = DMARCPolicy(p.xkeyword())
|
||||
// note: we check if the value is valid before returning.
|
||||
case "rua":
|
||||
r.AggregateReportAddresses = append(r.AggregateReportAddresses, p.xuri())
|
||||
|
34
dmarc/txt.go
34
dmarc/txt.go
@ -5,16 +5,18 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// todo: DMARCPolicy should be named just Policy, but this is causing conflicting types in sherpadoc output. should somehow get the dmarc-prefix only in the sherpadoc.
|
||||
|
||||
// Policy as used in DMARC DNS record for "p=" or "sp=".
|
||||
type Policy string
|
||||
type DMARCPolicy string
|
||||
|
||||
// ../rfc/7489:1157
|
||||
|
||||
const (
|
||||
PolicyEmpty Policy = "" // Only for the optional Record.SubdomainPolicy.
|
||||
PolicyNone Policy = "none"
|
||||
PolicyQuarantine Policy = "quarantine"
|
||||
PolicyReject Policy = "reject"
|
||||
PolicyEmpty DMARCPolicy = "" // Only for the optional Record.SubdomainPolicy.
|
||||
PolicyNone DMARCPolicy = "none"
|
||||
PolicyQuarantine DMARCPolicy = "quarantine"
|
||||
PolicyReject DMARCPolicy = "reject"
|
||||
)
|
||||
|
||||
// URI is a destination address for reporting.
|
||||
@ -53,17 +55,17 @@ const (
|
||||
//
|
||||
// v=DMARC1; p=reject; rua=mailto:postmaster@mox.example
|
||||
type Record struct {
|
||||
Version string // "v=DMARC1", fixed.
|
||||
Policy Policy // Required, for "p=".
|
||||
SubdomainPolicy Policy // Like policy but for subdomains. Optional, for "sp=".
|
||||
AggregateReportAddresses []URI // Optional, for "rua=". Destination addresses for aggregate reports.
|
||||
FailureReportAddresses []URI // Optional, for "ruf=". Destination addresses for failure reports.
|
||||
ADKIM Align // Alignment: "r" (default) for relaxed or "s" for simple. For "adkim=".
|
||||
ASPF Align // Alignment: "r" (default) for relaxed or "s" for simple. For "aspf=".
|
||||
AggregateReportingInterval int // In seconds, default 86400. For "ri="
|
||||
FailureReportingOptions []string // "0" (default), "1", "d", "s". For "fo=".
|
||||
ReportingFormat []string // "afrf" (default). For "rf=".
|
||||
Percentage int // Between 0 and 100, default 100. For "pct=". Policy applies randomly to this percentage of messages.
|
||||
Version string // "v=DMARC1", fixed.
|
||||
Policy DMARCPolicy // Required, for "p=".
|
||||
SubdomainPolicy DMARCPolicy // Like policy but for subdomains. Optional, for "sp=".
|
||||
AggregateReportAddresses []URI // Optional, for "rua=". Destination addresses for aggregate reports.
|
||||
FailureReportAddresses []URI // Optional, for "ruf=". Destination addresses for failure reports.
|
||||
ADKIM Align // Alignment: "r" (default) for relaxed or "s" for simple. For "adkim=".
|
||||
ASPF Align // Alignment: "r" (default) for relaxed or "s" for simple. For "aspf=".
|
||||
AggregateReportingInterval int // In seconds, default 86400. For "ri="
|
||||
FailureReportingOptions []string // "0" (default), "1", "d", "s". For "fo=".
|
||||
ReportingFormat []string // "afrf" (default). For "rf=".
|
||||
Percentage int // Between 0 and 100, default 100. For "pct=". Policy applies randomly to this percentage of messages.
|
||||
}
|
||||
|
||||
// DefaultRecord holds the defaults for a DMARC record.
|
||||
|
@ -11,17 +11,7 @@
|
||||
package dmarcdb
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/mjl-/bstore"
|
||||
|
||||
"github.com/mjl-/mox/mlog"
|
||||
"github.com/mjl-/mox/mox-"
|
||||
"github.com/mjl-/mox/moxvar"
|
||||
)
|
||||
|
||||
// Init opens the databases.
|
||||
@ -29,49 +19,11 @@ import (
|
||||
// The incoming reports and evaluations for outgoing reports are in separate
|
||||
// databases for simpler file-based handling of the databases.
|
||||
func Init() error {
|
||||
if ReportsDB != nil || EvalDB != nil {
|
||||
return fmt.Errorf("already initialized")
|
||||
if _, err := reportsDB(mox.Shutdown); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log := mlog.New("dmarcdb", nil)
|
||||
var err error
|
||||
|
||||
ReportsDB, err = openReportsDB(mox.Shutdown, log)
|
||||
if err != nil {
|
||||
return fmt.Errorf("open reports db: %v", err)
|
||||
if _, err := evalDB(mox.Shutdown); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
EvalDB, err = openEvalDB(mox.Shutdown, log)
|
||||
if err != nil {
|
||||
return fmt.Errorf("open eval db: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func Close() error {
|
||||
if err := ReportsDB.Close(); err != nil {
|
||||
return fmt.Errorf("closing reports db: %w", err)
|
||||
}
|
||||
ReportsDB = nil
|
||||
|
||||
if err := EvalDB.Close(); err != nil {
|
||||
return fmt.Errorf("closing eval db: %w", err)
|
||||
}
|
||||
EvalDB = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func openReportsDB(ctx context.Context, log mlog.Log) (*bstore.DB, error) {
|
||||
p := mox.DataDirPath("dmarcrpt.db")
|
||||
os.MkdirAll(filepath.Dir(p), 0770)
|
||||
opts := bstore.Options{Timeout: 5 * time.Second, Perm: 0660, RegisterLogger: moxvar.RegisterLogger(p, log.Logger)}
|
||||
return bstore.Open(ctx, p, &opts, ReportsDBTypes...)
|
||||
}
|
||||
|
||||
func openEvalDB(ctx context.Context, log mlog.Log) (*bstore.DB, error) {
|
||||
p := mox.DataDirPath("dmarceval.db")
|
||||
os.MkdirAll(filepath.Dir(p), 0770)
|
||||
opts := bstore.Options{Timeout: 5 * time.Second, Perm: 0660, RegisterLogger: moxvar.RegisterLogger(p, log.Logger)}
|
||||
return bstore.Open(ctx, p, &opts, EvalDBTypes...)
|
||||
}
|
||||
|
154
dmarcdb/eval.go
154
dmarcdb/eval.go
@ -10,18 +10,21 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"maps"
|
||||
"mime"
|
||||
"mime/multipart"
|
||||
"net/textproto"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime/debug"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/exp/maps"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
|
||||
@ -63,7 +66,8 @@ var (
|
||||
// Exported for backups. For incoming deliveries the SMTP server adds evaluations
|
||||
// to the database. Every hour, a goroutine wakes up that gathers evaluations from
|
||||
// the last hour(s), sends a report, and removes the evaluations from the database.
|
||||
EvalDB *bstore.DB
|
||||
EvalDB *bstore.DB
|
||||
evalMutex sync.Mutex
|
||||
)
|
||||
|
||||
// Evaluation is the result of an evaluation of a DMARC policy, to be included
|
||||
@ -158,6 +162,21 @@ func (e Evaluation) ReportRecord(count int) dmarcrpt.ReportRecord {
|
||||
}
|
||||
}
|
||||
|
||||
func evalDB(ctx context.Context) (rdb *bstore.DB, rerr error) {
|
||||
evalMutex.Lock()
|
||||
defer evalMutex.Unlock()
|
||||
if EvalDB == nil {
|
||||
p := mox.DataDirPath("dmarceval.db")
|
||||
os.MkdirAll(filepath.Dir(p), 0770)
|
||||
db, err := bstore.Open(ctx, p, &bstore.Options{Timeout: 5 * time.Second, Perm: 0660}, EvalDBTypes...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
EvalDB = db
|
||||
}
|
||||
return EvalDB, nil
|
||||
}
|
||||
|
||||
var intervalOpts = []int{24, 12, 8, 6, 4, 3, 2}
|
||||
|
||||
func intervalHours(seconds int) int {
|
||||
@ -178,13 +197,23 @@ func intervalHours(seconds int) int {
|
||||
func AddEvaluation(ctx context.Context, aggregateReportingIntervalSeconds int, e *Evaluation) error {
|
||||
e.IntervalHours = intervalHours(aggregateReportingIntervalSeconds)
|
||||
|
||||
db, err := evalDB(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
e.ID = 0
|
||||
return EvalDB.Insert(ctx, e)
|
||||
return db.Insert(ctx, e)
|
||||
}
|
||||
|
||||
// Evaluations returns all evaluations in the database.
|
||||
func Evaluations(ctx context.Context) ([]Evaluation, error) {
|
||||
q := bstore.QueryDB[Evaluation](ctx, EvalDB)
|
||||
db, err := evalDB(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
q := bstore.QueryDB[Evaluation](ctx, db)
|
||||
q.SortAsc("Evaluated")
|
||||
return q.List()
|
||||
}
|
||||
@ -200,9 +229,14 @@ type EvaluationStat struct {
|
||||
|
||||
// EvaluationStats returns evaluation counts and report-sending status per domain.
|
||||
func EvaluationStats(ctx context.Context) (map[string]EvaluationStat, error) {
|
||||
db, err := evalDB(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
r := map[string]EvaluationStat{}
|
||||
|
||||
err := bstore.QueryDB[Evaluation](ctx, EvalDB).ForEach(func(e Evaluation) error {
|
||||
err = bstore.QueryDB[Evaluation](ctx, db).ForEach(func(e Evaluation) error {
|
||||
if stat, ok := r[e.PolicyDomain]; ok {
|
||||
if !slices.Contains(stat.Dispositions, string(e.Disposition)) {
|
||||
stat.Dispositions = append(stat.Dispositions, string(e.Disposition))
|
||||
@ -229,7 +263,12 @@ func EvaluationStats(ctx context.Context) (map[string]EvaluationStat, error) {
|
||||
|
||||
// EvaluationsDomain returns all evaluations for a domain.
|
||||
func EvaluationsDomain(ctx context.Context, domain dns.Domain) ([]Evaluation, error) {
|
||||
q := bstore.QueryDB[Evaluation](ctx, EvalDB)
|
||||
db, err := evalDB(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
q := bstore.QueryDB[Evaluation](ctx, db)
|
||||
q.FilterNonzero(Evaluation{PolicyDomain: domain.Name()})
|
||||
q.SortAsc("Evaluated")
|
||||
return q.List()
|
||||
@ -238,9 +277,14 @@ func EvaluationsDomain(ctx context.Context, domain dns.Domain) ([]Evaluation, er
|
||||
// RemoveEvaluationsDomain removes evaluations for domain so they won't be sent in
|
||||
// an aggregate report.
|
||||
func RemoveEvaluationsDomain(ctx context.Context, domain dns.Domain) error {
|
||||
q := bstore.QueryDB[Evaluation](ctx, EvalDB)
|
||||
db, err := evalDB(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
q := bstore.QueryDB[Evaluation](ctx, db)
|
||||
q.FilterNonzero(Evaluation{PolicyDomain: domain.Name()})
|
||||
_, err := q.Delete()
|
||||
_, err = q.Delete()
|
||||
return err
|
||||
}
|
||||
|
||||
@ -250,7 +294,7 @@ var jitterRand = mox.NewPseudoRand()
|
||||
// Jitter so we don't cause load at exactly whole hours, other processes may
|
||||
// already be doing that.
|
||||
var jitteredTimeUntil = func(t time.Time) time.Duration {
|
||||
return time.Until(t.Add(time.Duration(30+jitterRand.IntN(60)) * time.Second))
|
||||
return time.Until(t.Add(time.Duration(30+jitterRand.Intn(60)) * time.Second))
|
||||
}
|
||||
|
||||
// Start launches a goroutine that wakes up at each whole hour (plus jitter) and
|
||||
@ -274,6 +318,12 @@ func Start(resolver dns.Resolver) {
|
||||
|
||||
ctx := mox.Shutdown
|
||||
|
||||
db, err := evalDB(ctx)
|
||||
if err != nil {
|
||||
log.Errorx("opening dmarc evaluations database for sending dmarc aggregate reports, not sending reports", err)
|
||||
return
|
||||
}
|
||||
|
||||
for {
|
||||
now := time.Now()
|
||||
nextEnd := nextWholeHour(now)
|
||||
@ -305,12 +355,12 @@ func Start(resolver dns.Resolver) {
|
||||
// 24 hour interval). They should have been processed by now. We may have kept them
|
||||
// during temporary errors, but persistent temporary errors shouldn't fill up our
|
||||
// database. This also cleans up evaluations that were all optional for a domain.
|
||||
_, err := bstore.QueryDB[Evaluation](ctx, EvalDB).FilterLess("Evaluated", nextEnd.Add(-48*time.Hour)).Delete()
|
||||
_, err := bstore.QueryDB[Evaluation](ctx, db).FilterLess("Evaluated", nextEnd.Add(-48*time.Hour)).Delete()
|
||||
log.Check(err, "removing stale dmarc evaluations from database")
|
||||
|
||||
clog := log.WithCid(mox.Cid())
|
||||
clog.Info("sending dmarc aggregate reports", slog.Time("end", nextEnd.UTC()), slog.Any("intervals", intervals))
|
||||
if err := sendReports(ctx, clog, resolver, EvalDB, nextEnd, intervals); err != nil {
|
||||
if err := sendReports(ctx, clog, resolver, db, nextEnd, intervals); err != nil {
|
||||
clog.Errorx("sending dmarc aggregate reports", err)
|
||||
metricReportError.Inc()
|
||||
} else {
|
||||
@ -687,7 +737,9 @@ func sendReportDomain(ctx context.Context, log mlog.Log, resolver dns.Resolver,
|
||||
report.PolicyPublished = last.PolicyPublished
|
||||
|
||||
// Process records in-order for testable results.
|
||||
for _, recstr := range slices.Sorted(maps.Keys(counts)) {
|
||||
recstrs := maps.Keys(counts)
|
||||
sort.Strings(recstrs)
|
||||
for _, recstr := range recstrs {
|
||||
rc := counts[recstr]
|
||||
rc.ReportRecord.Row.Count = rc.count
|
||||
report.Records = append(report.Records, rc.ReportRecord)
|
||||
@ -727,7 +779,7 @@ func sendReportDomain(ctx context.Context, log mlog.Log, resolver dns.Resolver,
|
||||
// DKIM keys, so we can DKIM-sign our reports. SPF should pass anyway.
|
||||
// A single report can contain deliveries from a single policy domain
|
||||
// to multiple of our configured domains.
|
||||
from := smtp.NewAddress("postmaster", mox.Conf.Static.HostnameDomain)
|
||||
from := smtp.Address{Localpart: "postmaster", Domain: mox.Conf.Static.HostnameDomain}
|
||||
|
||||
// Subject follows the form in RFC. ../rfc/7489:1871
|
||||
subject := fmt.Sprintf("Report Domain: %s Submitter: %s Report-ID: <%s>", dom.ASCII, mox.Conf.Static.HostnameDomain.ASCII, report.ReportMetadata.ReportID)
|
||||
@ -790,7 +842,7 @@ Period: %s - %s UTC
|
||||
continue
|
||||
}
|
||||
|
||||
qm := queue.MakeMsg(from.Path(), rcpt.address.Path(), has8bit, smtputf8, msgSize, messageID, []byte(msgPrefix), nil, time.Now(), subject)
|
||||
qm := queue.MakeMsg(from.Path(), rcpt.address.Path(), has8bit, smtputf8, msgSize, messageID, []byte(msgPrefix), nil, time.Now())
|
||||
// Don't try as long as regular deliveries, and stop before we would send the
|
||||
// delayed DSN. Though we also won't send that due to IsDMARCReport.
|
||||
qm.MaxAttempts = 5
|
||||
@ -823,15 +875,7 @@ Period: %s - %s UTC
|
||||
}
|
||||
|
||||
func composeAggregateReport(ctx context.Context, log mlog.Log, mf *os.File, fromAddr smtp.Address, recipients []message.NameAddress, subject, text, filename string, reportXMLGzipFile *os.File) (msgPrefix string, has8bit, smtputf8 bool, messageID string, rerr error) {
|
||||
// We only use smtputf8 if we have to, with a utf-8 localpart. For IDNA, we use ASCII domains.
|
||||
smtputf8 = fromAddr.Localpart.IsInternational()
|
||||
for _, r := range recipients {
|
||||
if smtputf8 {
|
||||
smtputf8 = r.Address.Localpart.IsInternational()
|
||||
break
|
||||
}
|
||||
}
|
||||
xc := message.NewComposer(mf, 100*1024*1024, smtputf8)
|
||||
xc := message.NewComposer(mf, 100*1024*1024)
|
||||
defer func() {
|
||||
x := recover()
|
||||
if x == nil {
|
||||
@ -844,6 +888,14 @@ func composeAggregateReport(ctx context.Context, log mlog.Log, mf *os.File, from
|
||||
panic(x)
|
||||
}()
|
||||
|
||||
// We only use smtputf8 if we have to, with a utf-8 localpart. For IDNA, we use ASCII domains.
|
||||
for _, a := range recipients {
|
||||
if a.Address.Localpart.IsInternational() {
|
||||
xc.SMTPUTF8 = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
xc.HeaderAddrs("From", []message.NameAddress{{Address: fromAddr}})
|
||||
xc.HeaderAddrs("To", recipients)
|
||||
xc.Subject(subject)
|
||||
@ -859,7 +911,7 @@ func composeAggregateReport(ctx context.Context, log mlog.Log, mf *os.File, from
|
||||
xc.Line()
|
||||
|
||||
// Textual part, just mentioning this is a DMARC report.
|
||||
textBody, ct, cte := xc.TextPart("plain", text)
|
||||
textBody, ct, cte := xc.TextPart(text)
|
||||
textHdr := textproto.MIMEHeader{}
|
||||
textHdr.Set("Content-Type", ct)
|
||||
textHdr.Set("Content-Transfer-Encoding", cte)
|
||||
@ -945,7 +997,7 @@ Submitting-URI: %s
|
||||
continue
|
||||
}
|
||||
|
||||
qm := queue.MakeMsg(fromAddr.Path(), rcpt.Address.Path(), has8bit, smtputf8, msgSize, messageID, []byte(msgPrefix), nil, time.Now(), subject)
|
||||
qm := queue.MakeMsg(fromAddr.Path(), rcpt.Address.Path(), has8bit, smtputf8, msgSize, messageID, []byte(msgPrefix), nil, time.Now())
|
||||
// Don't try as long as regular deliveries, and stop before we would send the
|
||||
// delayed DSN. Though we also won't send that due to IsDMARCReport.
|
||||
qm.MaxAttempts = 5
|
||||
@ -963,15 +1015,7 @@ Submitting-URI: %s
|
||||
}
|
||||
|
||||
func composeErrorReport(ctx context.Context, log mlog.Log, mf *os.File, fromAddr smtp.Address, recipients []message.NameAddress, subject, text string) (msgPrefix string, has8bit, smtputf8 bool, messageID string, rerr error) {
|
||||
// We only use smtputf8 if we have to, with a utf-8 localpart. For IDNA, we use ASCII domains.
|
||||
smtputf8 = fromAddr.Localpart.IsInternational()
|
||||
for _, r := range recipients {
|
||||
if smtputf8 {
|
||||
smtputf8 = r.Address.Localpart.IsInternational()
|
||||
break
|
||||
}
|
||||
}
|
||||
xc := message.NewComposer(mf, 100*1024*1024, smtputf8)
|
||||
xc := message.NewComposer(mf, 100*1024*1024)
|
||||
defer func() {
|
||||
x := recover()
|
||||
if x == nil {
|
||||
@ -984,6 +1028,14 @@ func composeErrorReport(ctx context.Context, log mlog.Log, mf *os.File, fromAddr
|
||||
panic(x)
|
||||
}()
|
||||
|
||||
// We only use smtputf8 if we have to, with a utf-8 localpart. For IDNA, we use ASCII domains.
|
||||
for _, a := range recipients {
|
||||
if a.Address.Localpart.IsInternational() {
|
||||
xc.SMTPUTF8 = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
xc.HeaderAddrs("From", []message.NameAddress{{Address: fromAddr}})
|
||||
xc.HeaderAddrs("To", recipients)
|
||||
xc.Header("Subject", subject)
|
||||
@ -993,7 +1045,7 @@ func composeErrorReport(ctx context.Context, log mlog.Log, mf *os.File, fromAddr
|
||||
xc.Header("User-Agent", "mox/"+moxvar.Version)
|
||||
xc.Header("MIME-Version", "1.0")
|
||||
|
||||
textBody, ct, cte := xc.TextPart("plain", text)
|
||||
textBody, ct, cte := xc.TextPart(text)
|
||||
xc.Header("Content-Type", ct)
|
||||
xc.Header("Content-Transfer-Encoding", cte)
|
||||
xc.Line()
|
||||
@ -1017,7 +1069,7 @@ func dkimSign(ctx context.Context, log mlog.Log, fromAddr smtp.Address, smtputf8
|
||||
for fd != zerodom {
|
||||
confDom, ok := mox.Conf.Domain(fd)
|
||||
selectors := mox.DKIMSelectors(confDom.DKIM)
|
||||
if len(selectors) > 0 && !confDom.Disabled {
|
||||
if len(selectors) > 0 {
|
||||
dkimHeaders, err := dkim.Sign(ctx, log.Logger, fromAddr.Localpart, fd, selectors, smtputf8, mf)
|
||||
if err != nil {
|
||||
log.Errorx("dkim-signing dmarc report, continuing without signature", err)
|
||||
@ -1039,26 +1091,46 @@ func dkimSign(ctx context.Context, log mlog.Log, fromAddr smtp.Address, smtputf8
|
||||
|
||||
// SuppressAdd adds an address to the suppress list.
|
||||
func SuppressAdd(ctx context.Context, ba *SuppressAddress) error {
|
||||
return EvalDB.Insert(ctx, ba)
|
||||
db, err := evalDB(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return db.Insert(ctx, ba)
|
||||
}
|
||||
|
||||
// SuppressList returns all reporting addresses on the suppress list.
|
||||
func SuppressList(ctx context.Context) ([]SuppressAddress, error) {
|
||||
return bstore.QueryDB[SuppressAddress](ctx, EvalDB).SortDesc("ID").List()
|
||||
db, err := evalDB(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return bstore.QueryDB[SuppressAddress](ctx, db).SortDesc("ID").List()
|
||||
}
|
||||
|
||||
// SuppressRemove removes a reporting address record from the suppress list.
|
||||
func SuppressRemove(ctx context.Context, id int64) error {
|
||||
return EvalDB.Delete(ctx, &SuppressAddress{ID: id})
|
||||
db, err := evalDB(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return db.Delete(ctx, &SuppressAddress{ID: id})
|
||||
}
|
||||
|
||||
// SuppressUpdate updates the until field of a reporting address record.
|
||||
func SuppressUpdate(ctx context.Context, id int64, until time.Time) error {
|
||||
db, err := evalDB(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ba := SuppressAddress{ID: id}
|
||||
err := EvalDB.Get(ctx, &ba)
|
||||
err = db.Get(ctx, &ba)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ba.Until = until
|
||||
return EvalDB.Update(ctx, &ba)
|
||||
return db.Update(ctx, &ba)
|
||||
}
|
||||
|
@ -6,6 +6,7 @@ import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
@ -19,7 +20,6 @@ import (
|
||||
"github.com/mjl-/mox/mox-"
|
||||
"github.com/mjl-/mox/moxio"
|
||||
"github.com/mjl-/mox/queue"
|
||||
"slices"
|
||||
)
|
||||
|
||||
func tcheckf(t *testing.T, err error, format string, args ...any) {
|
||||
@ -41,13 +41,13 @@ func TestEvaluations(t *testing.T) {
|
||||
mox.Context = ctxbg
|
||||
mox.ConfigStaticPath = filepath.FromSlash("../testdata/dmarcdb/mox.conf")
|
||||
mox.MustLoadConfig(true, false)
|
||||
EvalDB = nil
|
||||
|
||||
os.Remove(mox.DataDirPath("dmarceval.db"))
|
||||
err := Init()
|
||||
tcheckf(t, err, "init")
|
||||
_, err := evalDB(ctxbg)
|
||||
tcheckf(t, err, "database")
|
||||
defer func() {
|
||||
err := Close()
|
||||
tcheckf(t, err, "close")
|
||||
EvalDB.Close()
|
||||
EvalDB = nil
|
||||
}()
|
||||
|
||||
parseJSON := func(s string) (e Evaluation) {
|
||||
@ -157,17 +157,19 @@ func TestEvaluations(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSendReports(t *testing.T) {
|
||||
mlog.SetConfig(map[string]slog.Level{"": slog.LevelDebug})
|
||||
|
||||
os.RemoveAll("../testdata/dmarcdb/data")
|
||||
mox.Context = ctxbg
|
||||
mox.ConfigStaticPath = filepath.FromSlash("../testdata/dmarcdb/mox.conf")
|
||||
mox.MustLoadConfig(true, false)
|
||||
EvalDB = nil
|
||||
|
||||
os.Remove(mox.DataDirPath("dmarceval.db"))
|
||||
err := Init()
|
||||
tcheckf(t, err, "init")
|
||||
db, err := evalDB(ctxbg)
|
||||
tcheckf(t, err, "database")
|
||||
defer func() {
|
||||
err := Close()
|
||||
tcheckf(t, err, "close")
|
||||
EvalDB.Close()
|
||||
EvalDB = nil
|
||||
}()
|
||||
|
||||
resolver := dns.MockResolver{
|
||||
@ -286,7 +288,7 @@ func TestSendReports(t *testing.T) {
|
||||
mox.Shutdown, mox.ShutdownCancel = context.WithCancel(ctxbg)
|
||||
|
||||
for _, e := range evals {
|
||||
err := EvalDB.Insert(ctxbg, &e)
|
||||
err := db.Insert(ctxbg, &e)
|
||||
tcheckf(t, err, "inserting evaluation")
|
||||
}
|
||||
|
||||
@ -302,7 +304,7 @@ func TestSendReports(t *testing.T) {
|
||||
// Read message file. Also write copy to disk for inspection.
|
||||
buf, err := io.ReadAll(&moxio.AtReader{R: msgFile})
|
||||
tcheckf(t, err, "read report message")
|
||||
err = os.WriteFile("../testdata/dmarcdb/data/report.eml", slices.Concat(qm.MsgPrefix, buf), 0600)
|
||||
err = os.WriteFile("../testdata/dmarcdb/data/report.eml", append(append([]byte{}, qm.MsgPrefix...), buf...), 0600)
|
||||
tcheckf(t, err, "write report message")
|
||||
|
||||
var feedback *dmarcrpt.Feedback
|
||||
@ -357,13 +359,13 @@ func TestSendReports(t *testing.T) {
|
||||
|
||||
// Address is suppressed.
|
||||
sa := SuppressAddress{ReportingAddress: "dmarcrpt@sender.example", Until: time.Now().Add(time.Minute)}
|
||||
err = EvalDB.Insert(ctxbg, &sa)
|
||||
err = db.Insert(ctxbg, &sa)
|
||||
tcheckf(t, err, "insert suppress address")
|
||||
test([]Evaluation{eval}, map[string]struct{}{}, map[string]struct{}{}, nil)
|
||||
|
||||
// Suppression has expired.
|
||||
sa.Until = time.Now().Add(-time.Minute)
|
||||
err = EvalDB.Update(ctxbg, &sa)
|
||||
err = db.Update(ctxbg, &sa)
|
||||
tcheckf(t, err, "update suppress address")
|
||||
test([]Evaluation{eval}, map[string]struct{}{"dmarcrpt@sender.example": {}}, map[string]struct{}{}, expFeedback)
|
||||
|
||||
|
@ -1,17 +0,0 @@
|
||||
package dmarcdb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/mjl-/mox/metrics"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
m.Run()
|
||||
if metrics.Panics.Load() > 0 {
|
||||
fmt.Println("unhandled panics encountered")
|
||||
os.Exit(2)
|
||||
}
|
||||
}
|
@ -3,6 +3,9 @@ package dmarcdb
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
@ -12,11 +15,13 @@ import (
|
||||
|
||||
"github.com/mjl-/mox/dmarcrpt"
|
||||
"github.com/mjl-/mox/dns"
|
||||
"github.com/mjl-/mox/mox-"
|
||||
)
|
||||
|
||||
var (
|
||||
ReportsDBTypes = []any{DomainFeedback{}} // Types stored in DB.
|
||||
ReportsDB *bstore.DB // Exported for backups.
|
||||
reportsMutex sync.Mutex
|
||||
)
|
||||
|
||||
var (
|
||||
@ -54,18 +59,38 @@ type DomainFeedback struct {
|
||||
dmarcrpt.Feedback
|
||||
}
|
||||
|
||||
func reportsDB(ctx context.Context) (rdb *bstore.DB, rerr error) {
|
||||
reportsMutex.Lock()
|
||||
defer reportsMutex.Unlock()
|
||||
if ReportsDB == nil {
|
||||
p := mox.DataDirPath("dmarcrpt.db")
|
||||
os.MkdirAll(filepath.Dir(p), 0770)
|
||||
db, err := bstore.Open(ctx, p, &bstore.Options{Timeout: 5 * time.Second, Perm: 0660}, ReportsDBTypes...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ReportsDB = db
|
||||
}
|
||||
return ReportsDB, nil
|
||||
}
|
||||
|
||||
// AddReport adds a DMARC aggregate feedback report from an email to the database,
|
||||
// and updates prometheus metrics.
|
||||
//
|
||||
// fromDomain is the domain in the report message From header.
|
||||
func AddReport(ctx context.Context, f *dmarcrpt.Feedback, fromDomain dns.Domain) error {
|
||||
db, err := reportsDB(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d, err := dns.ParseDomain(f.PolicyPublished.Domain)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing domain in report: %v", err)
|
||||
}
|
||||
|
||||
df := DomainFeedback{0, d.Name(), fromDomain.Name(), *f}
|
||||
if err := ReportsDB.Insert(ctx, &df); err != nil {
|
||||
if err := db.Insert(ctx, &df); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -104,23 +129,38 @@ func AddReport(ctx context.Context, f *dmarcrpt.Feedback, fromDomain dns.Domain)
|
||||
|
||||
// Records returns all reports in the database.
|
||||
func Records(ctx context.Context) ([]DomainFeedback, error) {
|
||||
return bstore.QueryDB[DomainFeedback](ctx, ReportsDB).List()
|
||||
db, err := reportsDB(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return bstore.QueryDB[DomainFeedback](ctx, db).List()
|
||||
}
|
||||
|
||||
// RecordID returns the report for the ID.
|
||||
func RecordID(ctx context.Context, id int64) (DomainFeedback, error) {
|
||||
db, err := reportsDB(ctx)
|
||||
if err != nil {
|
||||
return DomainFeedback{}, err
|
||||
}
|
||||
|
||||
e := DomainFeedback{ID: id}
|
||||
err := ReportsDB.Get(ctx, &e)
|
||||
err = db.Get(ctx, &e)
|
||||
return e, err
|
||||
}
|
||||
|
||||
// RecordsPeriodDomain returns the reports overlapping start and end, for the given
|
||||
// domain. If domain is empty, all records match for domain.
|
||||
func RecordsPeriodDomain(ctx context.Context, start, end time.Time, domain string) ([]DomainFeedback, error) {
|
||||
db, err := reportsDB(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s := start.Unix()
|
||||
e := end.Unix()
|
||||
|
||||
q := bstore.QueryDB[DomainFeedback](ctx, ReportsDB)
|
||||
q := bstore.QueryDB[DomainFeedback](ctx, db)
|
||||
if domain != "" {
|
||||
q.FilterNonzero(DomainFeedback{Domain: domain})
|
||||
}
|
||||
|
@ -20,12 +20,16 @@ func TestDMARCDB(t *testing.T) {
|
||||
mox.ConfigStaticPath = filepath.FromSlash("../testdata/dmarcdb/mox.conf")
|
||||
mox.MustLoadConfig(true, false)
|
||||
|
||||
os.Remove(mox.DataDirPath("dmarcrpt.db"))
|
||||
err := Init()
|
||||
tcheckf(t, err, "init")
|
||||
dbpath := mox.DataDirPath("dmarcrpt.db")
|
||||
os.MkdirAll(filepath.Dir(dbpath), 0770)
|
||||
|
||||
if err := Init(); err != nil {
|
||||
t.Fatalf("init database: %s", err)
|
||||
}
|
||||
defer os.Remove(dbpath)
|
||||
defer func() {
|
||||
err := Close()
|
||||
tcheckf(t, err, "close")
|
||||
ReportsDB.Close()
|
||||
ReportsDB = nil
|
||||
}()
|
||||
|
||||
feedback := &dmarcrpt.Feedback{
|
||||
|
@ -52,7 +52,7 @@ func parseMessageReport(log mlog.Log, p message.Part) (*Feedback, error) {
|
||||
// content of the message.
|
||||
|
||||
if p.MediaType != "MULTIPART" {
|
||||
return parseReport(log, p)
|
||||
return parseReport(p)
|
||||
}
|
||||
|
||||
for {
|
||||
@ -72,7 +72,7 @@ func parseMessageReport(log mlog.Log, p message.Part) (*Feedback, error) {
|
||||
}
|
||||
}
|
||||
|
||||
func parseReport(log mlog.Log, p message.Part) (*Feedback, error) {
|
||||
func parseReport(p message.Part) (*Feedback, error) {
|
||||
ct := strings.ToLower(p.MediaType + "/" + p.MediaSubType)
|
||||
r := p.Reader()
|
||||
|
||||
@ -93,7 +93,7 @@ func parseReport(log mlog.Log, p message.Part) (*Feedback, error) {
|
||||
switch ct {
|
||||
case "application/zip":
|
||||
// Google sends messages with direct application/zip content-type.
|
||||
return parseZip(log, r)
|
||||
return parseZip(r)
|
||||
case "application/gzip", "application/x-gzip":
|
||||
gzr, err := gzip.NewReader(r)
|
||||
if err != nil {
|
||||
@ -106,7 +106,7 @@ func parseReport(log mlog.Log, p message.Part) (*Feedback, error) {
|
||||
return nil, ErrNoReport
|
||||
}
|
||||
|
||||
func parseZip(log mlog.Log, r io.Reader) (*Feedback, error) {
|
||||
func parseZip(r io.Reader) (*Feedback, error) {
|
||||
buf, err := io.ReadAll(r)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading feedback: %s", err)
|
||||
@ -122,9 +122,6 @@ func parseZip(log mlog.Log, r io.Reader) (*Feedback, error) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("opening file in zip: %s", err)
|
||||
}
|
||||
defer func() {
|
||||
err := f.Close()
|
||||
log.Check(err, "closing report file in zip file")
|
||||
}()
|
||||
defer f.Close()
|
||||
return ParseReport(f)
|
||||
}
|
||||
|
17
dns/dns.go
17
dns/dns.go
@ -5,7 +5,6 @@ package dns
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/net/idna"
|
||||
@ -20,7 +19,6 @@ var (
|
||||
errTrailingDot = errors.New("dns name has trailing dot")
|
||||
errUnderscore = errors.New("domain name with underscore")
|
||||
errIDNA = errors.New("idna")
|
||||
errIPNotName = errors.New("ip address while name required")
|
||||
)
|
||||
|
||||
// Domain is a domain name, with one or more labels, with at least an ASCII
|
||||
@ -97,12 +95,6 @@ func ParseDomain(s string) (Domain, error) {
|
||||
return Domain{}, errTrailingDot
|
||||
}
|
||||
|
||||
// IPv4 addresses would be accepted by idna lookups. TLDs cannot be all numerical,
|
||||
// so IP addresses are not valid DNS names.
|
||||
if net.ParseIP(s) != nil {
|
||||
return Domain{}, errIPNotName
|
||||
}
|
||||
|
||||
ascii, err := idna.Lookup.ToASCII(s)
|
||||
if err != nil {
|
||||
return Domain{}, fmt.Errorf("%w: to ascii: %v", errIDNA, err)
|
||||
@ -156,9 +148,7 @@ func ParseDomainLax(s string) (Domain, error) {
|
||||
return Domain{ASCII: s}, nil
|
||||
}
|
||||
|
||||
// IsNotFound returns whether an error is an adns.DNSError or net.DNSError with
|
||||
// IsNotFound set.
|
||||
//
|
||||
// IsNotFound returns whether an error is an adns.DNSError with IsNotFound set.
|
||||
// IsNotFound means the requested type does not exist for the given domain (a
|
||||
// nodata or nxdomain response). It doesn't not necessarily mean no other types for
|
||||
// that name exist.
|
||||
@ -168,7 +158,6 @@ func ParseDomainLax(s string) (Domain, error) {
|
||||
// The adns resolver (just like the Go resolver) returns an IsNotFound error for
|
||||
// both cases, there is no need to explicitly check for zero entries.
|
||||
func IsNotFound(err error) bool {
|
||||
var adnsErr *adns.DNSError
|
||||
var dnsErr *net.DNSError
|
||||
return err != nil && (errors.As(err, &adnsErr) && adnsErr.IsNotFound || errors.As(err, &dnsErr) && dnsErr.IsNotFound)
|
||||
var dnsErr *adns.DNSError
|
||||
return err != nil && errors.As(err, &dnsErr) && dnsErr.IsNotFound
|
||||
}
|
||||
|
@ -10,7 +10,7 @@
|
||||
// looked up with an DNS "A" lookup of a name similar to an IPv4 address, but with
|
||||
// 4-bit hexadecimal dot-separated characters, in reverse.
|
||||
//
|
||||
// The health of a DNSBL "zone" can be checked through a lookup of 127.0.0.1
|
||||
// The health of a DNSBL "zone" can be check through a lookup of 127.0.0.1
|
||||
// (must not be present) and 127.0.0.2 (must be present).
|
||||
package dnsbl
|
||||
|
||||
|
@ -1,12 +1,13 @@
|
||||
version: '3.7'
|
||||
services:
|
||||
mox:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile.moximaptest
|
||||
volumes:
|
||||
- ./testdata/imaptest/config:/mox/config:z
|
||||
- ./testdata/imaptest/data:/mox/data:z
|
||||
- ./testdata/imaptest/imaptest.mbox:/mox/imaptest.mbox:z
|
||||
- ./testdata/imaptest/config:/mox/config
|
||||
- ./testdata/imaptest/data:/mox/data
|
||||
- ./testdata/imaptest/imaptest.mbox:/mox/imaptest.mbox
|
||||
working_dir: /mox
|
||||
tty: true # For job control with set -m.
|
||||
command: sh -c 'set -m; mox serve & sleep 1; echo testtest | mox setaccountpassword mjl; fg'
|
||||
@ -23,7 +24,7 @@ services:
|
||||
command: host=mox port=1143 'user=mjl@mox.example' pass=testtest mbox=/imaptest/imaptest.mbox
|
||||
working_dir: /imaptest
|
||||
volumes:
|
||||
- ./testdata/imaptest:/imaptest:z
|
||||
- ./testdata/imaptest:/imaptest
|
||||
depends_on:
|
||||
mox:
|
||||
condition: service_healthy
|
||||
|
@ -1,3 +1,4 @@
|
||||
version: '3.7'
|
||||
services:
|
||||
# We run integration_test.go from this container, it connects to the other mox instances.
|
||||
test:
|
||||
@ -8,11 +9,11 @@ services:
|
||||
# dials in integration_test.go succeed.
|
||||
command: ["sh", "-c", "set -ex; cat /integration/tmp-pebble-ca.pem /integration/tls/ca.pem >>/etc/ssl/certs/ca-certificates.crt; go test -tags integration"]
|
||||
volumes:
|
||||
- ./.go:/.go:z
|
||||
- ./testdata/integration/resolv.conf:/etc/resolv.conf:z
|
||||
- ./testdata/integration:/integration:z
|
||||
- ./testdata/integration/moxsubmit.conf:/etc/moxsubmit.conf:z
|
||||
- .:/mox:z
|
||||
- ./.go:/.go
|
||||
- ./testdata/integration/resolv.conf:/etc/resolv.conf
|
||||
- ./testdata/integration:/integration
|
||||
- ./testdata/integration/moxsubmit.conf:/etc/moxsubmit.conf
|
||||
- .:/mox
|
||||
environment:
|
||||
GOCACHE: /.go/.cache/go-build
|
||||
depends_on:
|
||||
@ -25,8 +26,6 @@ services:
|
||||
condition: service_healthy
|
||||
localserve:
|
||||
condition: service_healthy
|
||||
moxacmepebblealpn:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
mailnet1:
|
||||
ipv4_address: 172.28.1.50
|
||||
@ -40,8 +39,8 @@ services:
|
||||
MOX_UID: "${MOX_UID}"
|
||||
command: ["sh", "-c", "/integration/moxacmepebble.sh"]
|
||||
volumes:
|
||||
- ./testdata/integration/resolv.conf:/etc/resolv.conf:z
|
||||
- ./testdata/integration:/integration:z
|
||||
- ./testdata/integration/resolv.conf:/etc/resolv.conf
|
||||
- ./testdata/integration:/integration
|
||||
healthcheck:
|
||||
test: netstat -nlt | grep ':25 '
|
||||
interval: 1s
|
||||
@ -65,8 +64,8 @@ services:
|
||||
MOX_UID: "${MOX_UID}"
|
||||
command: ["sh", "-c", "/integration/moxmail2.sh"]
|
||||
volumes:
|
||||
- ./testdata/integration/resolv.conf:/etc/resolv.conf:z
|
||||
- ./testdata/integration:/integration:z
|
||||
- ./testdata/integration/resolv.conf:/etc/resolv.conf
|
||||
- ./testdata/integration:/integration
|
||||
healthcheck:
|
||||
test: netstat -nlt | grep ':25 '
|
||||
interval: 1s
|
||||
@ -84,40 +83,15 @@ services:
|
||||
mailnet1:
|
||||
ipv4_address: 172.28.1.20
|
||||
|
||||
# Third mox instance that uses ACME with pebble and has ALPN enabled.
|
||||
moxacmepebblealpn:
|
||||
hostname: moxacmepebblealpn.mox1.example
|
||||
domainname: mox1.example
|
||||
image: mox_integration_moxmail
|
||||
environment:
|
||||
MOX_UID: "${MOX_UID}"
|
||||
command: ["sh", "-c", "/integration/moxacmepebblealpn.sh"]
|
||||
volumes:
|
||||
- ./testdata/integration/resolv.conf:/etc/resolv.conf:z
|
||||
- ./testdata/integration:/integration:z
|
||||
healthcheck:
|
||||
test: netstat -nlt | grep ':25 '
|
||||
interval: 1s
|
||||
timeout: 1s
|
||||
retries: 10
|
||||
depends_on:
|
||||
dns:
|
||||
condition: service_healthy
|
||||
acmepebble:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
mailnet1:
|
||||
ipv4_address: 172.28.1.80
|
||||
|
||||
localserve:
|
||||
hostname: localserve.mox1.example
|
||||
domainname: mox1.example
|
||||
image: mox_integration_moxmail
|
||||
command: ["sh", "-c", "set -e; chmod o+r /etc/resolv.conf; mox -checkconsistency localserve -ip 172.28.1.60"]
|
||||
volumes:
|
||||
- ./.go:/.go:z
|
||||
- ./testdata/integration/resolv.conf:/etc/resolv.conf:z
|
||||
- .:/mox:z
|
||||
- ./.go:/.go
|
||||
- ./testdata/integration/resolv.conf:/etc/resolv.conf
|
||||
- .:/mox
|
||||
environment:
|
||||
GOCACHE: /.go/.cache/go-build
|
||||
healthcheck:
|
||||
@ -140,7 +114,7 @@ services:
|
||||
context: testdata/integration
|
||||
volumes:
|
||||
# todo: figure out how to mount files with a uid that the process in the container can read...
|
||||
- ./testdata/integration/resolv.conf:/etc/resolv.conf:z
|
||||
- ./testdata/integration/resolv.conf:/etc/resolv.conf
|
||||
command: ["sh", "-c", "set -e; chmod o+r /etc/resolv.conf; (echo 'maillog_file = /dev/stdout'; echo 'mydestination = $$myhostname, localhost.$$mydomain, localhost, $$mydomain'; echo 'smtp_tls_security_level = may') >>/etc/postfix/main.cf; echo 'root: postfix@mox1.example' >>/etc/postfix/aliases; newaliases; postfix start-fg"]
|
||||
healthcheck:
|
||||
test: netstat -nlt | grep ':25 '
|
||||
@ -161,8 +135,8 @@ services:
|
||||
# todo: figure out how to build from dockerfile with empty context without creating empty dirs in file system.
|
||||
context: testdata/integration
|
||||
volumes:
|
||||
- ./testdata/integration/resolv.conf:/etc/resolv.conf:z
|
||||
- ./testdata/integration:/integration:z
|
||||
- ./testdata/integration/resolv.conf:/etc/resolv.conf
|
||||
- ./testdata/integration:/integration
|
||||
# We start with a base example.zone, but moxacmepebble appends its records,
|
||||
# followed by moxmail2. They restart unbound after appending records.
|
||||
command: ["sh", "-c", "set -ex; ls -l /etc/resolv.conf; chmod o+r /etc/resolv.conf; install -m 640 -o unbound /integration/unbound.conf /etc/unbound/; chmod 755 /integration; chmod 644 /integration/*.zone; cp /integration/example.zone /integration/example-integration.zone; ls -ld /integration /integration/reverse.zone; unbound -d -p -v"]
|
||||
@ -182,8 +156,8 @@ services:
|
||||
hostname: acmepebble.example
|
||||
image: docker.io/letsencrypt/pebble:v2.3.1@sha256:fc5a537bf8fbc7cc63aa24ec3142283aa9b6ba54529f86eb8ff31fbde7c5b258
|
||||
volumes:
|
||||
- ./testdata/integration/resolv.conf:/etc/resolv.conf:z
|
||||
- ./testdata/integration:/integration:z
|
||||
- ./testdata/integration/resolv.conf:/etc/resolv.conf
|
||||
- ./testdata/integration:/integration
|
||||
command: ["sh", "-c", "set -ex; mount; ls -l /etc/resolv.conf; chmod o+r /etc/resolv.conf; pebble -config /integration/pebble-config.json"]
|
||||
ports:
|
||||
- 14000:14000 # ACME port
|
||||
|
@ -27,6 +27,7 @@
|
||||
# The -ip flag ensures connections to the published ports make it to mox, and it
|
||||
# prevents listening on ::1 (IPv6 is not enabled in docker by default).
|
||||
|
||||
version: '3.7'
|
||||
services:
|
||||
mox:
|
||||
# Replace "latest" with the version you want to run, see https://r.xmox.nl/r/mox/.
|
||||
@ -38,11 +39,11 @@ services:
|
||||
# machine, and the IPs of incoming connections for spam filtering.
|
||||
network_mode: 'host'
|
||||
volumes:
|
||||
- ./config:/mox/config:z
|
||||
- ./data:/mox/data:z
|
||||
- ./config:/mox/config
|
||||
- ./data:/mox/data
|
||||
# web is optional but recommended to bind in, useful for serving static files with
|
||||
# the webserver.
|
||||
- ./web:/mox/web:z
|
||||
- ./web:/mox/web
|
||||
working_dir: /mox
|
||||
restart: on-failure
|
||||
healthcheck:
|
||||
|
15
dsn/dsn.go
15
dsn/dsn.go
@ -114,9 +114,9 @@ type Recipient struct {
|
||||
// deliveries.
|
||||
RemoteMTA NameIP
|
||||
|
||||
// DiagnosticCodeSMTP are the full SMTP response lines, space separated. The marshaled
|
||||
// form starts with "smtp; ", this value does not.
|
||||
DiagnosticCodeSMTP string
|
||||
// DiagnosticCode should either be empty, or start with "smtp; " followed by the
|
||||
// literal full SMTP response lines, space separated.
|
||||
DiagnosticCode string
|
||||
|
||||
LastAttemptDate time.Time
|
||||
FinalLogID string
|
||||
@ -286,9 +286,9 @@ func (m *Message) Compose(log mlog.Log, smtputf8 bool) ([]byte, error) {
|
||||
status("Remote-MTA", s)
|
||||
}
|
||||
// Presence of Diagnostic-Code indicates the code is from Remote-MTA. ../rfc/3464:1053
|
||||
if r.DiagnosticCodeSMTP != "" {
|
||||
if r.DiagnosticCode != "" {
|
||||
// ../rfc/3461:1342 ../rfc/6533:589
|
||||
status("Diagnostic-Code", "smtp; "+r.DiagnosticCodeSMTP)
|
||||
status("Diagnostic-Code", r.DiagnosticCode)
|
||||
}
|
||||
if !r.LastAttemptDate.IsZero() {
|
||||
status("Last-Attempt-Date", r.LastAttemptDate.Format(message.RFC5322Z)) // ../rfc/3464:1076
|
||||
@ -340,7 +340,10 @@ func (m *Message) Compose(log mlog.Log, smtputf8 bool) ([]byte, error) {
|
||||
data := base64.StdEncoding.EncodeToString(headers)
|
||||
for len(data) > 0 {
|
||||
line := data
|
||||
n := min(len(line), 76) // ../rfc/2045:1372
|
||||
n := len(line)
|
||||
if n > 78 {
|
||||
n = 78
|
||||
}
|
||||
line, data = data[:n], data[n:]
|
||||
if _, err := origp.Write([]byte(line + "\r\n")); err != nil {
|
||||
return nil, err
|
||||
|
@ -50,8 +50,8 @@ func tcheckType(t *testing.T, p *message.Part, mt, mst, cte string) {
|
||||
if !strings.EqualFold(p.MediaSubType, mst) {
|
||||
t.Fatalf("got mediasubtype %q, expected %q", p.MediaSubType, mst)
|
||||
}
|
||||
if !(cte == "" && p.ContentTransferEncoding == nil || cte != "" && p.ContentTransferEncoding != nil && strings.EqualFold(cte, *p.ContentTransferEncoding)) {
|
||||
t.Fatalf("got content-transfer-encoding %v, expected %v", p.ContentTransferEncoding, cte)
|
||||
if !strings.EqualFold(p.ContentTransferEncoding, cte) {
|
||||
t.Fatalf("got content-transfer-encoding %q, expected %q", p.ContentTransferEncoding, cte)
|
||||
}
|
||||
}
|
||||
|
||||
|
15
dsn/parse.go
15
dsn/parse.go
@ -14,7 +14,6 @@ import (
|
||||
"github.com/mjl-/mox/message"
|
||||
"github.com/mjl-/mox/mlog"
|
||||
"github.com/mjl-/mox/smtp"
|
||||
"slices"
|
||||
)
|
||||
|
||||
// Parse reads a DSN message.
|
||||
@ -218,9 +217,15 @@ func parseRecipientHeader(mr *textproto.Reader, utf8 bool) (Recipient, error) {
|
||||
case "Action":
|
||||
a := Action(strings.ToLower(v))
|
||||
actions := []Action{Failed, Delayed, Delivered, Relayed, Expanded}
|
||||
if slices.Contains(actions, a) {
|
||||
r.Action = a
|
||||
} else {
|
||||
var ok bool
|
||||
for _, x := range actions {
|
||||
if a == x {
|
||||
ok = true
|
||||
r.Action = a
|
||||
break
|
||||
}
|
||||
}
|
||||
if !ok {
|
||||
err = fmt.Errorf("unrecognized action %q", v)
|
||||
}
|
||||
case "Status":
|
||||
@ -244,7 +249,7 @@ func parseRecipientHeader(mr *textproto.Reader, utf8 bool) (Recipient, error) {
|
||||
} else if len(t) != 2 {
|
||||
err = fmt.Errorf("missing semicolon to separate diagnostic-type from code")
|
||||
} else {
|
||||
r.DiagnosticCodeSMTP = strings.TrimSpace(t[1])
|
||||
r.DiagnosticCode = strings.TrimSpace(t[1])
|
||||
}
|
||||
case "Last-Attempt-Date":
|
||||
r.LastAttemptDate, err = parseDateTime(v)
|
||||
|
132
examples.go
132
examples.go
@ -1,21 +1,13 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/mjl-/sconf"
|
||||
|
||||
"github.com/mjl-/mox/config"
|
||||
"github.com/mjl-/mox/mox-"
|
||||
"github.com/mjl-/mox/smtp"
|
||||
"github.com/mjl-/mox/webhook"
|
||||
)
|
||||
|
||||
func cmdExample(c *cmd) {
|
||||
@ -44,33 +36,7 @@ func cmdExample(c *cmd) {
|
||||
fmt.Print(match())
|
||||
}
|
||||
|
||||
func cmdConfigExample(c *cmd) {
|
||||
c.params = "[name]"
|
||||
c.help = `List available config examples, or print a specific example.`
|
||||
|
||||
args := c.Parse()
|
||||
if len(args) > 1 {
|
||||
c.Usage()
|
||||
}
|
||||
|
||||
var match func() string
|
||||
for _, ex := range configExamples {
|
||||
if len(args) == 0 {
|
||||
fmt.Println(ex.Name)
|
||||
} else if args[0] == ex.Name {
|
||||
match = ex.Get
|
||||
}
|
||||
}
|
||||
if len(args) == 0 {
|
||||
return
|
||||
}
|
||||
if match == nil {
|
||||
log.Fatalln("not found")
|
||||
}
|
||||
fmt.Print(match())
|
||||
}
|
||||
|
||||
var configExamples = []struct {
|
||||
var examples = []struct {
|
||||
Name string
|
||||
Get func() string
|
||||
}{
|
||||
@ -169,7 +135,7 @@ WebHandlers:
|
||||
"transport",
|
||||
func() string {
|
||||
const moxconf = `# Snippet for mox.conf, defining a transport called Example that connects on the
|
||||
# SMTP submission with TLS port 465 ("submissions"), authenticating with
|
||||
# SMTP submission with TLS port 465 ("submissions), authenticating with
|
||||
# SCRAM-SHA-256-PLUS (other providers may not support SCRAM-SHA-256-PLUS, but they
|
||||
# typically do support the older CRAM-MD5).:
|
||||
|
||||
@ -229,97 +195,3 @@ Routes:
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
var exampleTime = time.Date(2024, time.March, 27, 0, 0, 0, 0, time.UTC)
|
||||
|
||||
var examples = []struct {
|
||||
Name string
|
||||
Get func() string
|
||||
}{
|
||||
{
|
||||
"webhook-outgoing-delivered",
|
||||
func() string {
|
||||
v := webhook.Outgoing{
|
||||
Version: 0,
|
||||
Event: webhook.EventDelivered,
|
||||
QueueMsgID: 101,
|
||||
FromID: base64.RawURLEncoding.EncodeToString([]byte("0123456789abcdef")),
|
||||
MessageID: "<QnxzgulZK51utga6agH_rg@mox.example>",
|
||||
Subject: "subject of original message",
|
||||
WebhookQueued: exampleTime,
|
||||
Extra: map[string]string{},
|
||||
SMTPCode: smtp.C250Completed,
|
||||
}
|
||||
return "Example webhook HTTP POST JSON body for successful outgoing delivery:\n\n\t" + formatJSON(v)
|
||||
},
|
||||
},
|
||||
{
|
||||
"webhook-outgoing-dsn-failed",
|
||||
func() string {
|
||||
v := webhook.Outgoing{
|
||||
Version: 0,
|
||||
Event: webhook.EventFailed,
|
||||
DSN: true,
|
||||
Suppressing: true,
|
||||
QueueMsgID: 102,
|
||||
FromID: base64.RawURLEncoding.EncodeToString([]byte("0123456789abcdef")),
|
||||
MessageID: "<QnxzgulZK51utga6agH_rg@mox.example>",
|
||||
Subject: "subject of original message",
|
||||
WebhookQueued: exampleTime,
|
||||
Extra: map[string]string{"userid": "456"},
|
||||
Error: "timeout connecting to host",
|
||||
SMTPCode: smtp.C554TransactionFailed,
|
||||
SMTPEnhancedCode: "5." + smtp.SeNet4Other0,
|
||||
}
|
||||
return `Example webhook HTTP POST JSON body for failed delivery based on incoming DSN
|
||||
message, with custom extra data fields (from original submission), and adding address to the suppression list:
|
||||
|
||||
` + formatJSON(v)
|
||||
},
|
||||
},
|
||||
{
|
||||
"webhook-incoming-basic",
|
||||
func() string {
|
||||
v := webhook.Incoming{
|
||||
Version: 0,
|
||||
From: []webhook.NameAddress{{Address: "mox@localhost"}},
|
||||
To: []webhook.NameAddress{{Address: "mjl@localhost"}},
|
||||
Subject: "hi",
|
||||
MessageID: "<QnxzgulZK51utga6agH_rg@mox.example>",
|
||||
Date: &exampleTime,
|
||||
Text: "hello world ☺\n",
|
||||
Structure: webhook.Structure{
|
||||
ContentType: "text/plain",
|
||||
ContentTypeParams: map[string]string{"charset": "utf-8"},
|
||||
DecodedSize: int64(len("hello world ☺\r\n")),
|
||||
Parts: []webhook.Structure{},
|
||||
},
|
||||
Meta: webhook.IncomingMeta{
|
||||
MsgID: 201,
|
||||
MailFrom: "mox@localhost",
|
||||
MailFromValidated: false,
|
||||
MsgFromValidated: true,
|
||||
RcptTo: "mjl@localhost",
|
||||
DKIMVerifiedDomains: []string{"localhost"},
|
||||
RemoteIP: "127.0.0.1",
|
||||
Received: exampleTime.Add(3 * time.Second),
|
||||
MailboxName: "Inbox",
|
||||
Automated: false,
|
||||
},
|
||||
}
|
||||
return "Example JSON body for webhooks for incoming delivery of basic message:\n\n\t" + formatJSON(v)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func formatJSON(v any) string {
|
||||
nv, _ := mox.FillNil(reflect.ValueOf(v))
|
||||
v = nv.Interface()
|
||||
var b bytes.Buffer
|
||||
enc := json.NewEncoder(&b)
|
||||
enc.SetIndent("\t", "\t")
|
||||
enc.SetEscapeHTML(false)
|
||||
err := enc.Encode(v)
|
||||
xcheckf(err, "encoding to json")
|
||||
return b.String()
|
||||
}
|
||||
|
25
export.go
25
export.go
@ -12,22 +12,20 @@ import (
|
||||
)
|
||||
|
||||
func cmdExportMaildir(c *cmd) {
|
||||
c.params = "[-single] dst-dir account-path [mailbox]"
|
||||
c.params = "dst-dir account-path [mailbox]"
|
||||
c.help = `Export one or all mailboxes from an account in maildir format.
|
||||
|
||||
Export bypasses a running mox instance. It opens the account mailbox/message
|
||||
database file directly. This may block if a running mox instance also has the
|
||||
database open, e.g. for IMAP connections. To export from a running instance, use
|
||||
the accounts web page or webmail.
|
||||
the accounts web page.
|
||||
`
|
||||
var single bool
|
||||
c.flag.BoolVar(&single, "single", false, "export single mailbox, without any children. disabled if mailbox isn't specified.")
|
||||
args := c.Parse()
|
||||
xcmdExport(false, single, args, c)
|
||||
xcmdExport(false, args, c)
|
||||
}
|
||||
|
||||
func cmdExportMbox(c *cmd) {
|
||||
c.params = "[-single] dst-dir account-path [mailbox]"
|
||||
c.params = "dst-dir account-path [mailbox]"
|
||||
c.help = `Export messages from one or all mailboxes in an account in mbox format.
|
||||
|
||||
Using mbox is not recommended. Maildir is a better format.
|
||||
@ -35,19 +33,17 @@ Using mbox is not recommended. Maildir is a better format.
|
||||
Export bypasses a running mox instance. It opens the account mailbox/message
|
||||
database file directly. This may block if a running mox instance also has the
|
||||
database open, e.g. for IMAP connections. To export from a running instance, use
|
||||
the accounts web page or webmail.
|
||||
the accounts web page.
|
||||
|
||||
For mbox export, "mboxrd" is used where message lines starting with the magic
|
||||
"From " string are escaped by prepending a >. All ">*From " are escaped,
|
||||
otherwise reconstructing the original could lose a ">".
|
||||
`
|
||||
var single bool
|
||||
c.flag.BoolVar(&single, "single", false, "export single mailbox, without any children. disabled if mailbox isn't specified.")
|
||||
args := c.Parse()
|
||||
xcmdExport(true, single, args, c)
|
||||
xcmdExport(true, args, c)
|
||||
}
|
||||
|
||||
func xcmdExport(mbox, single bool, args []string, c *cmd) {
|
||||
func xcmdExport(mbox bool, args []string, c *cmd) {
|
||||
if len(args) != 2 && len(args) != 3 {
|
||||
c.Usage()
|
||||
}
|
||||
@ -57,13 +53,10 @@ func xcmdExport(mbox, single bool, args []string, c *cmd) {
|
||||
var mailbox string
|
||||
if len(args) == 3 {
|
||||
mailbox = args[2]
|
||||
} else {
|
||||
single = false
|
||||
}
|
||||
|
||||
dbpath := filepath.Join(accountDir, "index.db")
|
||||
opts := bstore.Options{Timeout: 5 * time.Second, Perm: 0660, RegisterLogger: c.log.Logger}
|
||||
db, err := bstore.Open(context.Background(), dbpath, &opts, store.DBTypes...)
|
||||
db, err := bstore.Open(context.Background(), dbpath, &bstore.Options{Timeout: 5 * time.Second, Perm: 0660}, store.DBTypes...)
|
||||
xcheckf(err, "open database %q", dbpath)
|
||||
defer func() {
|
||||
if err := db.Close(); err != nil {
|
||||
@ -72,7 +65,7 @@ func xcmdExport(mbox, single bool, args []string, c *cmd) {
|
||||
}()
|
||||
|
||||
a := store.DirArchiver{Dir: dst}
|
||||
err = store.ExportMessages(context.Background(), c.log, db, accountDir, a, !mbox, mailbox, nil, !single)
|
||||
err = store.ExportMessages(context.Background(), c.log, db, accountDir, a, !mbox, mailbox)
|
||||
xcheckf(err, "exporting messages")
|
||||
err = a.Close()
|
||||
xcheckf(err, "closing archiver")
|
||||
|
10
genapidoc.sh
10
genapidoc.sh
@ -1,10 +0,0 @@
|
||||
#!/bin/sh
|
||||
set -eu
|
||||
|
||||
# we rewrite some dmarcprt and tlsrpt enums into untyped strings: real-world
|
||||
# reports have invalid values, and our loose Go typed strings accept all values,
|
||||
# but we don't want the typescript runtime checker to fail on those unrecognized
|
||||
# values.
|
||||
(cd webadmin && CGO_ENABLED=0 go run ../vendor/github.com/mjl-/sherpadoc/cmd/sherpadoc/*.go -adjust-function-names none -rename 'config Domain ConfigDomain,dmarc Policy DMARCPolicy,mtasts MX STSMX,tlsrptdb Record TLSReportRecord,tlsrptdb SuppressAddress TLSRPTSuppressAddress,dmarcrpt DKIMResult string,dmarcrpt SPFResult string,dmarcrpt SPFDomainScope string,dmarcrpt DMARCResult string,dmarcrpt PolicyOverride string,dmarcrpt Alignment string,dmarcrpt Disposition string,tlsrpt PolicyType string,tlsrpt ResultType string' Admin) >webadmin/api.json
|
||||
(cd webaccount && CGO_ENABLED=0 go run ../vendor/github.com/mjl-/sherpadoc/cmd/sherpadoc/*.go -adjust-function-names none Account) >webaccount/api.json
|
||||
(cd webmail && CGO_ENABLED=0 go run ../vendor/github.com/mjl-/sherpadoc/cmd/sherpadoc/*.go -adjust-function-names none Webmail) >webmail/api.json
|
20
gendoc.sh
20
gendoc.sh
@ -1,6 +1,5 @@
|
||||
#!/usr/bin/env sh
|
||||
|
||||
# ./doc.go
|
||||
(
|
||||
cat <<EOF
|
||||
/*
|
||||
@ -26,7 +25,7 @@ any parameters. Followed by the help and usage information for each command.
|
||||
|
||||
EOF
|
||||
|
||||
./mox 2>&1 | sed -e 's/^usage: */ /' -e 's/^ */ /'
|
||||
./mox 2>&1 | sed -e 's/^usage: */\t/' -e 's/^ */\t/'
|
||||
echo
|
||||
./mox helpall 2>&1
|
||||
|
||||
@ -39,7 +38,6 @@ EOF
|
||||
)>doc.go
|
||||
gofmt -w doc.go
|
||||
|
||||
# ./config/doc.go
|
||||
(
|
||||
cat <<EOF
|
||||
/*
|
||||
@ -80,29 +78,29 @@ See https://pkg.go.dev/github.com/mjl-/sconf for details.
|
||||
# mox.conf
|
||||
|
||||
EOF
|
||||
./mox config describe-static | sed 's/^/ /'
|
||||
./mox config describe-static | sed 's/^/\t/'
|
||||
|
||||
cat <<EOF
|
||||
|
||||
# domains.conf
|
||||
|
||||
EOF
|
||||
./mox config describe-domains | sed 's/^/ /'
|
||||
./mox config describe-domains | sed 's/^/\t/'
|
||||
|
||||
cat <<EOF
|
||||
|
||||
# Examples
|
||||
|
||||
Mox includes configuration files to illustrate common setups. You can see these
|
||||
examples with "mox config example", and print a specific example with "mox
|
||||
config example <name>". Below are all examples included in mox.
|
||||
examples with "mox example", and print a specific example with "mox example
|
||||
<name>". Below are all examples included in mox.
|
||||
|
||||
EOF
|
||||
|
||||
for ex in $(./mox config example); do
|
||||
for ex in $(./mox example); do
|
||||
echo '# Example '$ex
|
||||
echo
|
||||
./mox config example $ex | sed 's/^/ /'
|
||||
./mox example $ex | sed 's/^/\t/'
|
||||
echo
|
||||
done
|
||||
|
||||
@ -114,7 +112,3 @@ package config
|
||||
EOF
|
||||
)>config/doc.go
|
||||
gofmt -w config/doc.go
|
||||
|
||||
# ./webapi/doc.go
|
||||
./webapi/gendoc.sh >webapi/doc.go
|
||||
gofmt -w webapi/doc.go
|
||||
|
@ -1,7 +0,0 @@
|
||||
#!/bin/sh
|
||||
rm -r licenses
|
||||
set -e
|
||||
for p in $(cd vendor && find . -iname '*license*' -or -iname '*licence*' -or -iname '*notice*' -or -iname '*patent*'); do
|
||||
(set +e; mkdir -p $(dirname licenses/$p))
|
||||
cp vendor/$p licenses/$p
|
||||
done
|
@ -30,7 +30,7 @@ import (
|
||||
|
||||
func cmdGentestdata(c *cmd) {
|
||||
c.unlisted = true
|
||||
c.params = "destdir"
|
||||
c.params = "dest-dir"
|
||||
c.help = `Generate a data directory populated, for testing upgrades.`
|
||||
args := c.Parse()
|
||||
if len(args) != 1 {
|
||||
@ -187,12 +187,6 @@ Accounts:
|
||||
err = os.WriteFile(filepath.Join(destDataDir, "moxversion"), []byte(moxvar.Version), 0660)
|
||||
xcheckf(err, "writing moxversion")
|
||||
|
||||
// Populate auth.db
|
||||
err = store.Init(ctxbg)
|
||||
xcheckf(err, "store init")
|
||||
err = store.TLSPublicKeyAdd(ctxbg, &store.TLSPublicKey{Name: "testkey", Fingerprint: "...", Type: "ecdsa-p256", CertDER: []byte("..."), Account: "test0", LoginAddress: "test0@mox.example"})
|
||||
xcheckf(err, "adding tlspubkey")
|
||||
|
||||
// Populate dmarc.db.
|
||||
err = dmarcdb.Init()
|
||||
xcheckf(err, "dmarcdb init")
|
||||
@ -207,7 +201,7 @@ Accounts:
|
||||
mtastsPolicy := mtasts.Policy{
|
||||
Version: "STSv1",
|
||||
Mode: mtasts.ModeTesting,
|
||||
MX: []mtasts.MX{
|
||||
MX: []mtasts.STSMX{
|
||||
{Domain: dns.Domain{ASCII: "mx1.example.com"}},
|
||||
{Domain: dns.Domain{ASCII: "mx2.example.com"}},
|
||||
{Domain: dns.Domain{ASCII: "backup-example.com"}, Wildcard: true},
|
||||
@ -234,17 +228,18 @@ Accounts:
|
||||
prefix := []byte{}
|
||||
mf := tempfile()
|
||||
xcheckf(err, "temp file for queue message")
|
||||
defer store.CloseRemoveTempFile(c.log, mf, "test message")
|
||||
defer os.Remove(mf.Name())
|
||||
defer mf.Close()
|
||||
const qmsg = "From: <test0@mox.example>\r\nTo: <other@remote.example>\r\nSubject: test\r\n\r\nthe message...\r\n"
|
||||
_, err = fmt.Fprint(mf, qmsg)
|
||||
xcheckf(err, "writing message")
|
||||
qm := queue.MakeMsg(mailfrom, rcptto, false, false, int64(len(qmsg)), "<test@localhost>", prefix, nil, time.Now(), "test")
|
||||
qm := queue.MakeMsg(mailfrom, rcptto, false, false, int64(len(qmsg)), "<test@localhost>", prefix, nil, time.Now())
|
||||
err = queue.Add(ctxbg, c.log, "test0", mf, qm)
|
||||
xcheckf(err, "enqueue message")
|
||||
|
||||
// Create three accounts.
|
||||
// First account without messages.
|
||||
accTest0, err := store.OpenAccount(c.log, "test0", false)
|
||||
accTest0, err := store.OpenAccount(c.log, "test0")
|
||||
xcheckf(err, "open account test0")
|
||||
err = accTest0.ThreadingWait(c.log)
|
||||
xcheckf(err, "wait for threading to finish")
|
||||
@ -252,7 +247,7 @@ Accounts:
|
||||
xcheckf(err, "close account")
|
||||
|
||||
// Second account with one message.
|
||||
accTest1, err := store.OpenAccount(c.log, "test1", false)
|
||||
accTest1, err := store.OpenAccount(c.log, "test1")
|
||||
xcheckf(err, "open account test1")
|
||||
err = accTest1.ThreadingWait(c.log)
|
||||
xcheckf(err, "wait for threading to finish")
|
||||
@ -263,6 +258,7 @@ Accounts:
|
||||
m := store.Message{
|
||||
MailboxID: inbox.ID,
|
||||
MailboxOrigID: inbox.ID,
|
||||
MailboxDestinedID: inbox.ID,
|
||||
RemoteIP: "1.2.3.4",
|
||||
RemoteIPMasked1: "1.2.3.4",
|
||||
RemoteIPMasked2: "1.2.3.0",
|
||||
@ -287,13 +283,20 @@ Accounts:
|
||||
}
|
||||
mf := tempfile()
|
||||
xcheckf(err, "creating temp file for delivery")
|
||||
defer store.CloseRemoveTempFile(c.log, mf, "test message")
|
||||
_, err = fmt.Fprint(mf, msg)
|
||||
xcheckf(err, "writing deliver message to file")
|
||||
err = accTest1.DeliverMessage(c.log, tx, &m, mf, false, true, false, true)
|
||||
|
||||
err = accTest1.MessageAdd(c.log, tx, &inbox, &m, mf, store.AddOpts{})
|
||||
xcheckf(err, "deliver message")
|
||||
mfname := mf.Name()
|
||||
xcheckf(err, "add message to account test1")
|
||||
err = mf.Close()
|
||||
xcheckf(err, "closing file")
|
||||
err = os.Remove(mfname)
|
||||
xcheckf(err, "removing temp message file")
|
||||
|
||||
err = tx.Get(&inbox)
|
||||
xcheckf(err, "get inbox")
|
||||
inbox.Add(m.MailboxCounts())
|
||||
err = tx.Update(&inbox)
|
||||
xcheckf(err, "update inbox")
|
||||
|
||||
@ -304,7 +307,7 @@ Accounts:
|
||||
xcheckf(err, "close account")
|
||||
|
||||
// Third account with two messages and junkfilter.
|
||||
accTest2, err := store.OpenAccount(c.log, "test2", false)
|
||||
accTest2, err := store.OpenAccount(c.log, "test2")
|
||||
xcheckf(err, "open account test2")
|
||||
err = accTest2.ThreadingWait(c.log)
|
||||
xcheckf(err, "wait for threading to finish")
|
||||
@ -315,6 +318,7 @@ Accounts:
|
||||
m0 := store.Message{
|
||||
MailboxID: inbox.ID,
|
||||
MailboxOrigID: inbox.ID,
|
||||
MailboxDestinedID: inbox.ID,
|
||||
RemoteIP: "::1",
|
||||
RemoteIPMasked1: "::",
|
||||
RemoteIPMasked2: "::",
|
||||
@ -339,11 +343,20 @@ Accounts:
|
||||
}
|
||||
mf0 := tempfile()
|
||||
xcheckf(err, "creating temp file for delivery")
|
||||
defer store.CloseRemoveTempFile(c.log, mf0, "test message")
|
||||
_, err = fmt.Fprint(mf0, msg0)
|
||||
xcheckf(err, "writing deliver message to file")
|
||||
err = accTest2.MessageAdd(c.log, tx, &inbox, &m0, mf0, store.AddOpts{})
|
||||
err = accTest2.DeliverMessage(c.log, tx, &m0, mf0, false, false, false, true)
|
||||
xcheckf(err, "add message to account test2")
|
||||
|
||||
mf0name := mf0.Name()
|
||||
err = mf0.Close()
|
||||
xcheckf(err, "closing file")
|
||||
err = os.Remove(mf0name)
|
||||
xcheckf(err, "removing temp message file")
|
||||
|
||||
err = tx.Get(&inbox)
|
||||
xcheckf(err, "get inbox")
|
||||
inbox.Add(m0.MailboxCounts())
|
||||
err = tx.Update(&inbox)
|
||||
xcheckf(err, "update inbox")
|
||||
|
||||
@ -352,19 +365,29 @@ Accounts:
|
||||
const prefix1 = "Extra: test\r\n"
|
||||
const msg1 = "From: <other@remote.example>\r\nTo: <☹@xn--74h.example>\r\nSubject: test\r\n\r\nthe message...\r\n"
|
||||
m1 := store.Message{
|
||||
MailboxID: sent.ID,
|
||||
MailboxOrigID: sent.ID,
|
||||
Flags: store.Flags{Seen: true, Junk: true},
|
||||
Size: int64(len(prefix1) + len(msg1)),
|
||||
MsgPrefix: []byte(prefix1),
|
||||
MailboxID: sent.ID,
|
||||
MailboxOrigID: sent.ID,
|
||||
MailboxDestinedID: sent.ID,
|
||||
Flags: store.Flags{Seen: true, Junk: true},
|
||||
Size: int64(len(prefix1) + len(msg1)),
|
||||
MsgPrefix: []byte(prefix1),
|
||||
}
|
||||
mf1 := tempfile()
|
||||
xcheckf(err, "creating temp file for delivery")
|
||||
defer store.CloseRemoveTempFile(c.log, mf1, "test message")
|
||||
_, err = fmt.Fprint(mf1, msg1)
|
||||
xcheckf(err, "writing deliver message to file")
|
||||
err = accTest2.MessageAdd(c.log, tx, &sent, &m1, mf1, store.AddOpts{})
|
||||
err = accTest2.DeliverMessage(c.log, tx, &m1, mf1, false, false, false, true)
|
||||
xcheckf(err, "add message to account test2")
|
||||
|
||||
mf1name := mf1.Name()
|
||||
err = mf1.Close()
|
||||
xcheckf(err, "closing file")
|
||||
err = os.Remove(mf1name)
|
||||
xcheckf(err, "removing temp message file")
|
||||
|
||||
err = tx.Get(&sent)
|
||||
xcheckf(err, "get sent")
|
||||
sent.Add(m1.MailboxCounts())
|
||||
err = tx.Update(&sent)
|
||||
xcheckf(err, "update sent")
|
||||
|
||||
|
@ -24,7 +24,7 @@ mkdir html/features
|
||||
(
|
||||
cat features/index.md
|
||||
echo
|
||||
sed -n -e 's/^# Roadmap/## Roadmap/' -e '/# FAQ/q' -e '/# Roadmap/,/# FAQ/p' < ../README.md
|
||||
sed -n -e '/# FAQ/q' -e '/## Roadmap/,/# FAQ/p' < ../README.md
|
||||
echo
|
||||
echo 'Also see the [Protocols](../protocols/) page for implementation status, and (non)-plans.'
|
||||
) | go run website.go 'Features' >html/features/index.html
|
||||
@ -54,64 +54,3 @@ mkdir html/commands
|
||||
|
||||
mkdir html/protocols
|
||||
go run website.go -protocols 'Protocols' <../rfc/index.txt >html/protocols/index.html
|
||||
|
||||
mkdir html/b
|
||||
cat <<'EOF' >html/b/index.html
|
||||
<!doctype html>
|
||||
<html>
|
||||
<head>
|
||||
<meta charset="utf-8" />
|
||||
<title>mox build</title>
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1" />
|
||||
<link rel="icon" href="noNeedlessFaviconRequestsPlease:" />
|
||||
<style>
|
||||
body { padding: 1em; }
|
||||
* { font-size: 18px; font-family: ubuntu, lato, sans-serif; margin: 0; padding: 0; box-sizing: border-box; }
|
||||
p { max-width: 50em; margin-bottom: 2ex; }
|
||||
pre { font-family: 'ubuntu mono', monospace; }
|
||||
pre, blockquote { padding: 1em; background-color: #eee; border-radius: .25em; display: inline-block; margin-bottom: 1em; }
|
||||
h1 { margin: 1em 0 .5em 0; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<script>
|
||||
const elem = (name, ...s) => {
|
||||
const e = document.createElement(name)
|
||||
e.append(...s)
|
||||
return e
|
||||
}
|
||||
const link = (url, anchor) => {
|
||||
const e = document.createElement('a')
|
||||
e.setAttribute('href', url)
|
||||
e.setAttribute('rel', 'noopener')
|
||||
e.append(anchor || url)
|
||||
return e
|
||||
}
|
||||
let h = location.hash.substring(1)
|
||||
const ok = /^[a-zA-Z0-9_\.]+$/.test(h)
|
||||
if (!ok) {
|
||||
h = '<tag-or-branch-or-commithash>'
|
||||
}
|
||||
const init = () => {
|
||||
document.body.append(
|
||||
elem('p', 'Compile or download any version of mox, by tag (release), branch or commit hash.'),
|
||||
elem('h1', 'Compile'),
|
||||
elem('p', 'Run:'),
|
||||
elem('pre', 'CGO_ENABLED=0 GOBIN=$PWD go install github.com/mjl-/mox@'+h),
|
||||
elem('p', 'Mox is tested with the Go toolchain versions that are still have support: The most recent version, and the version before.'),
|
||||
elem('h1', 'Download'),
|
||||
elem('p', 'Download a binary for your platform:'),
|
||||
elem('blockquote', ok ?
|
||||
link('https://beta.gobuilds.org/github.com/mjl-/mox@'+h) :
|
||||
'https://beta.gobuilds.org/github.com/mjl-/mox@'+h
|
||||
),
|
||||
elem('p', 'Because mox is written in Go, builds are reproducible, also when cross-compiling. Gobuilds.org is a service that builds Go applications on-demand with the latest Go toolchain/runtime.'),
|
||||
elem('h1', 'Localserve'),
|
||||
elem('p', 'Changes to mox can often be most easily tested locally with ', link('../features/#hdr-localserve', '"mox localserve"'), ', without having to update your running mail server.'),
|
||||
)
|
||||
}
|
||||
window.addEventListener('load', init)
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
EOF
|
||||
|
29
go.mod
29
go.mod
@ -1,24 +1,23 @@
|
||||
module github.com/mjl-/mox
|
||||
|
||||
go 1.23.0
|
||||
go 1.21
|
||||
|
||||
require (
|
||||
github.com/mjl-/adns v0.0.0-20250321173553-ab04b05bdfea
|
||||
github.com/mjl-/autocert v0.0.0-20250321204043-abab2b936e31
|
||||
github.com/mjl-/bstore v0.0.9
|
||||
github.com/mjl-/flate v0.0.0-20250221133712-6372d09eb978
|
||||
github.com/mjl-/sconf v0.0.7
|
||||
github.com/mjl-/adns v0.0.0-20240309142737-2a1aacf346dc
|
||||
github.com/mjl-/autocert v0.0.0-20231214125928-31b7400acb05
|
||||
github.com/mjl-/bstore v0.0.4
|
||||
github.com/mjl-/sconf v0.0.5
|
||||
github.com/mjl-/sherpa v0.6.7
|
||||
github.com/mjl-/sherpadoc v0.0.16
|
||||
github.com/mjl-/sherpadoc v0.0.12
|
||||
github.com/mjl-/sherpaprom v0.0.2
|
||||
github.com/mjl-/sherpats v0.0.6
|
||||
github.com/prometheus/client_golang v1.18.0
|
||||
github.com/russross/blackfriday/v2 v2.1.0
|
||||
go.etcd.io/bbolt v1.3.11
|
||||
golang.org/x/crypto v0.37.0
|
||||
golang.org/x/net v0.39.0
|
||||
golang.org/x/sys v0.32.0
|
||||
golang.org/x/text v0.24.0
|
||||
go.etcd.io/bbolt v1.3.9
|
||||
golang.org/x/crypto v0.21.0
|
||||
golang.org/x/exp v0.0.0-20240222234643-814bf88cf225
|
||||
golang.org/x/net v0.22.0
|
||||
golang.org/x/text v0.14.0
|
||||
rsc.io/qr v0.2.0
|
||||
)
|
||||
|
||||
@ -30,8 +29,8 @@ require (
|
||||
github.com/prometheus/client_model v0.5.0 // indirect
|
||||
github.com/prometheus/common v0.45.0 // indirect
|
||||
github.com/prometheus/procfs v0.12.0 // indirect
|
||||
golang.org/x/mod v0.24.0 // indirect
|
||||
golang.org/x/sync v0.13.0 // indirect
|
||||
golang.org/x/tools v0.32.0 // indirect
|
||||
golang.org/x/mod v0.16.0 // indirect
|
||||
golang.org/x/sys v0.18.0 // indirect
|
||||
golang.org/x/tools v0.19.0 // indirect
|
||||
google.golang.org/protobuf v1.31.0 // indirect
|
||||
)
|
||||
|
60
go.sum
60
go.sum
@ -16,29 +16,27 @@ github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg=
|
||||
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k=
|
||||
github.com/mjl-/adns v0.0.0-20250321173553-ab04b05bdfea h1:8dftsVL1tHhRksXzFZRhSJ7gSlcy/t87Nvucs3JnTGE=
|
||||
github.com/mjl-/adns v0.0.0-20250321173553-ab04b05bdfea/go.mod h1:rWZMqGA2HoBm5b5q/A5J8u1sSVuEYh6zBz9tMoVs+RU=
|
||||
github.com/mjl-/autocert v0.0.0-20250321204043-abab2b936e31 h1:6MFGOLPGf6VzHWkKv8waSzJMMS98EFY2LVKPRHffCyo=
|
||||
github.com/mjl-/autocert v0.0.0-20250321204043-abab2b936e31/go.mod h1:taMFU86abMxKLPV4Bynhv8enbYmS67b8LG80qZv2Qus=
|
||||
github.com/mjl-/bstore v0.0.9 h1:j8HVXL10Arbk4ujeRGwns8gipH1N1TZn853inQ42FgY=
|
||||
github.com/mjl-/bstore v0.0.9/go.mod h1:xzIpSfcFosgPJ6h+vsdIt0pzCq4i8hhMuHPQJ0aHQhM=
|
||||
github.com/mjl-/flate v0.0.0-20250221133712-6372d09eb978 h1:Eg5DfI3/00URzGErujKus6a3O0kyXzF8vjoDZzH/gig=
|
||||
github.com/mjl-/flate v0.0.0-20250221133712-6372d09eb978/go.mod h1:QBkFtjai3AiQQuUu7pVh6PA06Vd3oa68E+vddf/UBOs=
|
||||
github.com/mjl-/sconf v0.0.7 h1:bdBcSFZCDFMm/UdBsgNCsjkYmKrSgYwp7rAOoufwHe4=
|
||||
github.com/mjl-/sconf v0.0.7/go.mod h1:uF8OdWtLT8La3i4ln176i1pB0ps9pXGCaABEU55ZkE0=
|
||||
github.com/mjl-/adns v0.0.0-20240309142737-2a1aacf346dc h1:ghTx3KsrO0hSJW0bCFCGwjSrYeXZ6Bj5hdv9FTTFV4M=
|
||||
github.com/mjl-/adns v0.0.0-20240309142737-2a1aacf346dc/go.mod h1:v47qUMJnipnmDTRGaHwpCwzE6oypa5K33mUvBfzZBn8=
|
||||
github.com/mjl-/autocert v0.0.0-20231214125928-31b7400acb05 h1:s6ay4bh4tmpPLdxjyeWG45mcwHfEluBMuGPkqxHWUJ4=
|
||||
github.com/mjl-/autocert v0.0.0-20231214125928-31b7400acb05/go.mod h1:taMFU86abMxKLPV4Bynhv8enbYmS67b8LG80qZv2Qus=
|
||||
github.com/mjl-/bstore v0.0.4 h1:q+R1oAr8+E9yf9q+zxkVjQ18VFqD/E9KmGVoe4FIOBA=
|
||||
github.com/mjl-/bstore v0.0.4/go.mod h1:/cD25FNBaDfvL/plFRxI3Ba3E+wcB0XVOS8nJDqndg0=
|
||||
github.com/mjl-/sconf v0.0.5 h1:4CMUTENpSnaeP2g6RKtrs8udTxnJgjX2MCCovxGId6s=
|
||||
github.com/mjl-/sconf v0.0.5/go.mod h1:uF8OdWtLT8La3i4ln176i1pB0ps9pXGCaABEU55ZkE0=
|
||||
github.com/mjl-/sherpa v0.6.7 h1:C5F8XQdV5nCuS4fvB+ye/ziUQrajEhOoj/t2w5T14BY=
|
||||
github.com/mjl-/sherpa v0.6.7/go.mod h1:dSpAOdgpwdqQZ72O4n3EHo/tR68eKyan8tYYraUMPNc=
|
||||
github.com/mjl-/sherpadoc v0.0.0-20190505200843-c0a7f43f5f1d/go.mod h1:5khTKxoKKNXcB8bkVUO6GlzC7PFtMmkHq578lPbmnok=
|
||||
github.com/mjl-/sherpadoc v0.0.16 h1:BdlFNXfnTaA7qO54kof4xpNFJxYBTY0cIObRk7QAP6M=
|
||||
github.com/mjl-/sherpadoc v0.0.16/go.mod h1:vh5zcsk3j/Tvm725EY+unTZb3EZcZcpiEQzrODSa6+I=
|
||||
github.com/mjl-/sherpadoc v0.0.12 h1:6hVe2Z0DnwjC0bfuOwfz8ov7JTCUU49cEaj7h22NiPk=
|
||||
github.com/mjl-/sherpadoc v0.0.12/go.mod h1:vh5zcsk3j/Tvm725EY+unTZb3EZcZcpiEQzrODSa6+I=
|
||||
github.com/mjl-/sherpaprom v0.0.2 h1:1dlbkScsNafM5jURI44uiWrZMSwfZtcOFEEq7vx2C1Y=
|
||||
github.com/mjl-/sherpaprom v0.0.2/go.mod h1:cl5nMNOvqhzMiQJ2FzccQ9ReivjHXe53JhOVkPfSvw4=
|
||||
github.com/mjl-/sherpats v0.0.6 h1:2lSoJbb+jkjLOdlvoMxItq0QQrrnkH+rnm3PMRfpbmA=
|
||||
@ -70,38 +68,40 @@ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0=
|
||||
go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I=
|
||||
go.etcd.io/bbolt v1.3.9 h1:8x7aARPEXiXbHmtUwAIv7eV2fQFHrLLavdiJ3uzJXoI=
|
||||
go.etcd.io/bbolt v1.3.9/go.mod h1:zaO32+Ti0PK1ivdPtgMESzuzL2VPoIG1PCQNvOdo/dE=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE=
|
||||
golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc=
|
||||
golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
|
||||
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
|
||||
golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 h1:LfspQV/FYTatPTr/3HzIcmiUFH7PGP+OQ6mgDYo3yuQ=
|
||||
golang.org/x/exp v0.0.0-20240222234643-814bf88cf225/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc=
|
||||
golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
|
||||
golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU=
|
||||
golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
|
||||
golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic=
|
||||
golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY=
|
||||
golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E=
|
||||
golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc=
|
||||
golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610=
|
||||
golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
|
||||
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20=
|
||||
golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4=
|
||||
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0=
|
||||
golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU=
|
||||
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.32.0 h1:Q7N1vhpkQv7ybVzLFtTjvQya2ewbwNDZzUgfXGqtMWU=
|
||||
golang.org/x/tools v0.32.0/go.mod h1:ZxrU41P/wAbZD8EDa6dDCa6XfpkhJ7HFMjHJXfBDu8s=
|
||||
golang.org/x/tools v0.19.0 h1:tfGCXNR1OsFG+sVdLAitlpjAvD/I6dHDKnYrpEZUHkw=
|
||||
golang.org/x/tools v0.19.0/go.mod h1:qoJWxmGSIBmAeriMx19ogtrEPrGtDbPK634QFIcLAhc=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
167
http/autoconf.go
167
http/autoconf.go
@ -11,8 +11,7 @@ import (
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"rsc.io/qr"
|
||||
|
||||
"github.com/mjl-/mox/admin"
|
||||
"github.com/mjl-/mox/dns"
|
||||
"github.com/mjl-/mox/mox-"
|
||||
"github.com/mjl-/mox/smtp"
|
||||
)
|
||||
|
||||
@ -65,35 +64,19 @@ func autoconfHandle(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
email := r.FormValue("emailaddress")
|
||||
log.Debug("autoconfig request", slog.String("email", email))
|
||||
var domain dns.Domain
|
||||
if email == "" {
|
||||
email = "%EMAILADDRESS%"
|
||||
// Declare this here rather than using := to avoid shadowing domain from
|
||||
// the outer scope.
|
||||
var err error
|
||||
domain, err = dns.ParseDomain(r.Host)
|
||||
if err != nil {
|
||||
http.Error(w, fmt.Sprintf("400 - bad request - invalid domain: %s", r.Host), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
domain.ASCII = strings.TrimPrefix(domain.ASCII, "autoconfig.")
|
||||
domain.Unicode = strings.TrimPrefix(domain.Unicode, "autoconfig.")
|
||||
} else {
|
||||
addr, err := smtp.ParseAddress(email)
|
||||
if err != nil {
|
||||
http.Error(w, "400 - bad request - invalid parameter emailaddress", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
domain = addr.Domain
|
||||
addr, err := smtp.ParseAddress(email)
|
||||
if err != nil {
|
||||
http.Error(w, "400 - bad request - invalid parameter emailaddress", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
socketType := func(tlsMode admin.TLSMode) (string, error) {
|
||||
socketType := func(tlsMode mox.TLSMode) (string, error) {
|
||||
switch tlsMode {
|
||||
case admin.TLSModeImmediate:
|
||||
case mox.TLSModeImmediate:
|
||||
return "SSL", nil
|
||||
case admin.TLSModeSTARTTLS:
|
||||
case mox.TLSModeSTARTTLS:
|
||||
return "STARTTLS", nil
|
||||
case admin.TLSModeNone:
|
||||
case mox.TLSModeNone:
|
||||
return "plain", nil
|
||||
default:
|
||||
return "", fmt.Errorf("unknown tls mode %v", tlsMode)
|
||||
@ -101,7 +84,7 @@ func autoconfHandle(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
var imapTLS, submissionTLS string
|
||||
config, err := admin.ClientConfigDomain(domain)
|
||||
config, err := mox.ClientConfigDomain(addr.Domain)
|
||||
if err == nil {
|
||||
imapTLS, err = socketType(config.IMAP.TLSMode)
|
||||
}
|
||||
@ -116,67 +99,37 @@ func autoconfHandle(w http.ResponseWriter, r *http.Request) {
|
||||
// Thunderbird doesn't seem to allow U-labels, always return ASCII names.
|
||||
var resp autoconfigResponse
|
||||
resp.Version = "1.1"
|
||||
resp.EmailProvider.ID = domain.ASCII
|
||||
resp.EmailProvider.Domain = domain.ASCII
|
||||
resp.EmailProvider.ID = addr.Domain.ASCII
|
||||
resp.EmailProvider.Domain = addr.Domain.ASCII
|
||||
resp.EmailProvider.DisplayName = email
|
||||
resp.EmailProvider.DisplayShortName = domain.ASCII
|
||||
resp.EmailProvider.DisplayShortName = addr.Domain.ASCII
|
||||
|
||||
// todo: specify SCRAM-SHA-256 once thunderbird and autoconfig supports it. or perhaps that will fall under "password-encrypted" by then.
|
||||
// todo: let user configure they prefer or require tls client auth and specify "TLS-client-cert"
|
||||
|
||||
incoming := incomingServer{
|
||||
"imap",
|
||||
config.IMAP.Host.ASCII,
|
||||
config.IMAP.Port,
|
||||
imapTLS,
|
||||
email,
|
||||
"password-encrypted",
|
||||
}
|
||||
resp.EmailProvider.IncomingServers = append(resp.EmailProvider.IncomingServers, incoming)
|
||||
if config.IMAP.EnabledOnHTTPS {
|
||||
tlsMode, _ := socketType(admin.TLSModeImmediate)
|
||||
incomingALPN := incomingServer{
|
||||
"imap",
|
||||
config.IMAP.Host.ASCII,
|
||||
443,
|
||||
tlsMode,
|
||||
email,
|
||||
"password-encrypted",
|
||||
}
|
||||
resp.EmailProvider.IncomingServers = append(resp.EmailProvider.IncomingServers, incomingALPN)
|
||||
}
|
||||
resp.EmailProvider.IncomingServer.Type = "imap"
|
||||
resp.EmailProvider.IncomingServer.Hostname = config.IMAP.Host.ASCII
|
||||
resp.EmailProvider.IncomingServer.Port = config.IMAP.Port
|
||||
resp.EmailProvider.IncomingServer.SocketType = imapTLS
|
||||
resp.EmailProvider.IncomingServer.Username = email
|
||||
resp.EmailProvider.IncomingServer.Authentication = "password-encrypted"
|
||||
|
||||
outgoing := outgoingServer{
|
||||
"smtp",
|
||||
config.Submission.Host.ASCII,
|
||||
config.Submission.Port,
|
||||
submissionTLS,
|
||||
email,
|
||||
"password-encrypted",
|
||||
}
|
||||
resp.EmailProvider.OutgoingServers = append(resp.EmailProvider.OutgoingServers, outgoing)
|
||||
if config.Submission.EnabledOnHTTPS {
|
||||
tlsMode, _ := socketType(admin.TLSModeImmediate)
|
||||
outgoingALPN := outgoingServer{
|
||||
"smtp",
|
||||
config.Submission.Host.ASCII,
|
||||
443,
|
||||
tlsMode,
|
||||
email,
|
||||
"password-encrypted",
|
||||
}
|
||||
resp.EmailProvider.OutgoingServers = append(resp.EmailProvider.OutgoingServers, outgoingALPN)
|
||||
}
|
||||
resp.EmailProvider.OutgoingServer.Type = "smtp"
|
||||
resp.EmailProvider.OutgoingServer.Hostname = config.Submission.Host.ASCII
|
||||
resp.EmailProvider.OutgoingServer.Port = config.Submission.Port
|
||||
resp.EmailProvider.OutgoingServer.SocketType = submissionTLS
|
||||
resp.EmailProvider.OutgoingServer.Username = email
|
||||
resp.EmailProvider.OutgoingServer.Authentication = "password-encrypted"
|
||||
|
||||
// todo: should we put the email address in the URL?
|
||||
resp.ClientConfigUpdate.URL = fmt.Sprintf("https://autoconfig.%s/mail/config-v1.1.xml", domain.ASCII)
|
||||
resp.ClientConfigUpdate.URL = fmt.Sprintf("https://autoconfig.%s/mail/config-v1.1.xml", addr.Domain.ASCII)
|
||||
|
||||
w.Header().Set("Content-Type", "application/xml; charset=utf-8")
|
||||
enc := xml.NewEncoder(w)
|
||||
enc.Indent("", "\t")
|
||||
fmt.Fprint(w, xml.Header)
|
||||
err = enc.Encode(resp)
|
||||
log.Check(err, "write autoconfig xml response")
|
||||
if err := enc.Encode(resp); err != nil {
|
||||
log.Errorx("marshal autoconfig response", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Autodiscover from Microsoft, also used by Thunderbird.
|
||||
@ -217,13 +170,13 @@ func autodiscoverHandle(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
// tlsmode returns the "ssl" and "encryption" fields.
|
||||
tlsmode := func(tlsMode admin.TLSMode) (string, string, error) {
|
||||
tlsmode := func(tlsMode mox.TLSMode) (string, string, error) {
|
||||
switch tlsMode {
|
||||
case admin.TLSModeImmediate:
|
||||
case mox.TLSModeImmediate:
|
||||
return "on", "TLS", nil
|
||||
case admin.TLSModeSTARTTLS:
|
||||
case mox.TLSModeSTARTTLS:
|
||||
return "on", "", nil
|
||||
case admin.TLSModeNone:
|
||||
case mox.TLSModeNone:
|
||||
return "off", "", nil
|
||||
default:
|
||||
return "", "", fmt.Errorf("unknown tls mode %v", tlsMode)
|
||||
@ -232,7 +185,7 @@ func autodiscoverHandle(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
var imapSSL, imapEncryption string
|
||||
var submissionSSL, submissionEncryption string
|
||||
config, err := admin.ClientConfigDomain(addr.Domain)
|
||||
config, err := mox.ClientConfigDomain(addr.Domain)
|
||||
if err == nil {
|
||||
imapSSL, imapEncryption, err = tlsmode(config.IMAP.TLSMode)
|
||||
}
|
||||
@ -255,8 +208,6 @@ func autodiscoverHandle(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
w.Header().Set("Content-Type", "application/xml; charset=utf-8")
|
||||
|
||||
// todo: let user configure they prefer or require tls client auth and add "AuthPackage" with value "certificate" to Protocol? see https://learn.microsoft.com/en-us/openspecs/exchange_server_protocols/ms-oxdscli/21fd2dd5-c4ee-485b-94fb-e7db5da93726
|
||||
|
||||
resp := autodiscoverResponse{}
|
||||
resp.XMLName.Local = "Autodiscover"
|
||||
resp.XMLName.Space = "http://schemas.microsoft.com/exchange/autodiscover/responseschema/2006"
|
||||
@ -291,8 +242,9 @@ func autodiscoverHandle(w http.ResponseWriter, r *http.Request) {
|
||||
enc := xml.NewEncoder(w)
|
||||
enc.Indent("", "\t")
|
||||
fmt.Fprint(w, xml.Header)
|
||||
err = enc.Encode(resp)
|
||||
log.Check(err, "marshal autodiscover xml response")
|
||||
if err := enc.Encode(resp); err != nil {
|
||||
log.Errorx("marshal autodiscover response", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Thunderbird requests these URLs for autoconfig/autodiscover:
|
||||
@ -300,22 +252,6 @@ func autodiscoverHandle(w http.ResponseWriter, r *http.Request) {
|
||||
// https://autodiscover.example.org/autodiscover/autodiscover.xml
|
||||
// https://example.org/.well-known/autoconfig/mail/config-v1.1.xml?emailaddress=user%40example.org
|
||||
// https://example.org/autodiscover/autodiscover.xml
|
||||
type incomingServer struct {
|
||||
Type string `xml:"type,attr"`
|
||||
Hostname string `xml:"hostname"`
|
||||
Port int `xml:"port"`
|
||||
SocketType string `xml:"socketType"`
|
||||
Username string `xml:"username"`
|
||||
Authentication string `xml:"authentication"`
|
||||
}
|
||||
type outgoingServer struct {
|
||||
Type string `xml:"type,attr"`
|
||||
Hostname string `xml:"hostname"`
|
||||
Port int `xml:"port"`
|
||||
SocketType string `xml:"socketType"`
|
||||
Username string `xml:"username"`
|
||||
Authentication string `xml:"authentication"`
|
||||
}
|
||||
type autoconfigResponse struct {
|
||||
XMLName xml.Name `xml:"clientConfig"`
|
||||
Version string `xml:"version,attr"`
|
||||
@ -326,8 +262,23 @@ type autoconfigResponse struct {
|
||||
DisplayName string `xml:"displayName"`
|
||||
DisplayShortName string `xml:"displayShortName"`
|
||||
|
||||
IncomingServers []incomingServer `xml:"incomingServer"`
|
||||
OutgoingServers []outgoingServer `xml:"outgoingServer"`
|
||||
IncomingServer struct {
|
||||
Type string `xml:"type,attr"`
|
||||
Hostname string `xml:"hostname"`
|
||||
Port int `xml:"port"`
|
||||
SocketType string `xml:"socketType"`
|
||||
Username string `xml:"username"`
|
||||
Authentication string `xml:"authentication"`
|
||||
} `xml:"incomingServer"`
|
||||
|
||||
OutgoingServer struct {
|
||||
Type string `xml:"type,attr"`
|
||||
Hostname string `xml:"hostname"`
|
||||
Port int `xml:"port"`
|
||||
SocketType string `xml:"socketType"`
|
||||
Username string `xml:"username"`
|
||||
Authentication string `xml:"authentication"`
|
||||
} `xml:"outgoingServer"`
|
||||
} `xml:"emailProvider"`
|
||||
|
||||
ClientConfigUpdate struct {
|
||||
@ -373,8 +324,6 @@ type autodiscoverProtocol struct {
|
||||
// Serve a .mobileconfig file. This endpoint is not a standard place where Apple
|
||||
// devices look. We point to it from the account page.
|
||||
func mobileconfigHandle(w http.ResponseWriter, r *http.Request) {
|
||||
log := pkglog.WithContext(r.Context())
|
||||
|
||||
if r.Method != "GET" {
|
||||
http.Error(w, "405 - method not allowed - get required", http.StatusMethodNotAllowed)
|
||||
return
|
||||
@ -400,15 +349,12 @@ func mobileconfigHandle(w http.ResponseWriter, r *http.Request) {
|
||||
filename = strings.ReplaceAll(filename, "@", "-at-")
|
||||
filename = "email-account-" + filename + ".mobileconfig"
|
||||
h.Set("Content-Disposition", fmt.Sprintf(`attachment; filename="%s"`, filename))
|
||||
_, err = w.Write(buf)
|
||||
log.Check(err, "writing mobileconfig response")
|
||||
w.Write(buf)
|
||||
}
|
||||
|
||||
// Serve a png file with qrcode with the link to the .mobileconfig file, should be
|
||||
// helpful for mobile devices.
|
||||
func mobileconfigQRCodeHandle(w http.ResponseWriter, r *http.Request) {
|
||||
log := pkglog.WithContext(r.Context())
|
||||
|
||||
if r.Method != "GET" {
|
||||
http.Error(w, "405 - method not allowed - get required", http.StatusMethodNotAllowed)
|
||||
return
|
||||
@ -435,6 +381,5 @@ func mobileconfigQRCodeHandle(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
h := w.Header()
|
||||
h.Set("Content-Type", "image/png")
|
||||
_, err = w.Write(code.PNG())
|
||||
log.Check(err, "writing mobileconfig qr code")
|
||||
w.Write(code.PNG())
|
||||
}
|
||||
|
BIN
http/favicon.ico
BIN
http/favicon.ico
Binary file not shown.
Before Width: | Height: | Size: 823 B |
@ -1,17 +0,0 @@
|
||||
package http
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/mjl-/mox/metrics"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
m.Run()
|
||||
if metrics.Panics.Load() > 0 {
|
||||
fmt.Println("unhandled panics encountered")
|
||||
os.Exit(2)
|
||||
}
|
||||
}
|
@ -6,11 +6,13 @@ import (
|
||||
"crypto/sha256"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"maps"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/mjl-/mox/admin"
|
||||
"golang.org/x/exp/maps"
|
||||
|
||||
"github.com/mjl-/mox/mox-"
|
||||
"github.com/mjl-/mox/smtp"
|
||||
)
|
||||
|
||||
@ -37,7 +39,8 @@ func (m dict) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
if err := e.EncodeToken(xml.StartElement{Name: xml.Name{Local: "dict"}}); err != nil {
|
||||
return err
|
||||
}
|
||||
l := slices.Sorted(maps.Keys(m))
|
||||
l := maps.Keys(m)
|
||||
sort.Strings(l)
|
||||
for _, k := range l {
|
||||
tokens := []xml.Token{
|
||||
xml.StartElement{Name: xml.Name{Local: "key"}},
|
||||
@ -61,7 +64,7 @@ func (m dict) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
case int:
|
||||
tokens = []xml.Token{
|
||||
xml.StartElement{Name: xml.Name{Local: "integer"}},
|
||||
xml.CharData(fmt.Appendf(nil, "%d", v)),
|
||||
xml.CharData([]byte(fmt.Sprintf("%d", v))),
|
||||
xml.EndElement{Name: xml.Name{Local: "integer"}},
|
||||
}
|
||||
case bool:
|
||||
@ -119,7 +122,7 @@ func MobileConfig(addresses []string, fullName string) ([]byte, error) {
|
||||
return nil, fmt.Errorf("parsing address: %v", err)
|
||||
}
|
||||
|
||||
config, err := admin.ClientConfigDomain(addr.Domain)
|
||||
config, err := mox.ClientConfigDomain(addr.Domain)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getting config for domain: %v", err)
|
||||
}
|
||||
@ -172,12 +175,12 @@ func MobileConfig(addresses []string, fullName string) ([]byte, error) {
|
||||
"IncomingMailServerUsername": addresses[0],
|
||||
"IncomingMailServerHostName": config.IMAP.Host.ASCII,
|
||||
"IncomingMailServerPortNumber": config.IMAP.Port,
|
||||
"IncomingMailServerUseSSL": config.IMAP.TLSMode == admin.TLSModeImmediate,
|
||||
"IncomingMailServerUseSSL": config.IMAP.TLSMode == mox.TLSModeImmediate,
|
||||
"OutgoingMailServerAuthentication": "EmailAuthCRAMMD5", // SCRAM not an option at time of writing...
|
||||
"OutgoingMailServerHostName": config.Submission.Host.ASCII,
|
||||
"OutgoingMailServerPortNumber": config.Submission.Port,
|
||||
"OutgoingMailServerUsername": addresses[0],
|
||||
"OutgoingMailServerUseSSL": config.Submission.TLSMode == admin.TLSModeImmediate,
|
||||
"OutgoingMailServerUseSSL": config.Submission.TLSMode == mox.TLSModeImmediate,
|
||||
"OutgoingPasswordSameAsIncomingPassword": true,
|
||||
"PayloadIdentifier": reverseAddr + ".email.account",
|
||||
"PayloadType": "com.apple.mail.managed",
|
||||
|
@ -43,9 +43,9 @@ func mtastsPolicyHandle(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
var mxs []mtasts.MX
|
||||
var mxs []mtasts.STSMX
|
||||
for _, s := range sts.MX {
|
||||
var mx mtasts.MX
|
||||
var mx mtasts.STSMX
|
||||
if strings.HasPrefix(s, "*.") {
|
||||
mx.Wildcard = true
|
||||
s = s[2:]
|
||||
@ -60,7 +60,7 @@ func mtastsPolicyHandle(w http.ResponseWriter, r *http.Request) {
|
||||
mxs = append(mxs, mx)
|
||||
}
|
||||
if len(mxs) == 0 {
|
||||
mxs = []mtasts.MX{{Domain: mox.Conf.Static.HostnameDomain}}
|
||||
mxs = []mtasts.STSMX{{Domain: mox.Conf.Static.HostnameDomain}}
|
||||
}
|
||||
|
||||
policy := mtasts.Policy{
|
||||
|
725
http/web.go
725
http/web.go
@ -11,20 +11,17 @@ import (
|
||||
"io"
|
||||
golog "log"
|
||||
"log/slog"
|
||||
"maps"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
_ "embed"
|
||||
_ "net/http/pprof"
|
||||
|
||||
"golang.org/x/net/http2"
|
||||
"golang.org/x/exp/maps"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
@ -33,14 +30,11 @@ import (
|
||||
"github.com/mjl-/mox/autotls"
|
||||
"github.com/mjl-/mox/config"
|
||||
"github.com/mjl-/mox/dns"
|
||||
"github.com/mjl-/mox/imapserver"
|
||||
"github.com/mjl-/mox/mlog"
|
||||
"github.com/mjl-/mox/mox-"
|
||||
"github.com/mjl-/mox/ratelimit"
|
||||
"github.com/mjl-/mox/smtpserver"
|
||||
"github.com/mjl-/mox/webaccount"
|
||||
"github.com/mjl-/mox/webadmin"
|
||||
"github.com/mjl-/mox/webapisrv"
|
||||
"github.com/mjl-/mox/webmail"
|
||||
)
|
||||
|
||||
@ -79,29 +73,6 @@ var (
|
||||
)
|
||||
)
|
||||
|
||||
// We serve a favicon when webaccount/webmail/webadmin/webapi for account-related
|
||||
// domains. They are configured as "service handler", which have a lower priority
|
||||
// than web handler. Admins can configure a custom /favicon.ico route to override
|
||||
// the builtin favicon. In the future, we may want to make it easier to customize
|
||||
// the favicon, possibly per client settings domain.
|
||||
//
|
||||
//go:embed favicon.ico
|
||||
var faviconIco string
|
||||
var faviconModTime = time.Now()
|
||||
|
||||
func init() {
|
||||
p, err := os.Executable()
|
||||
if err == nil {
|
||||
if st, err := os.Stat(p); err == nil {
|
||||
faviconModTime = st.ModTime()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func faviconHandle(w http.ResponseWriter, r *http.Request) {
|
||||
http.ServeContent(w, r, "favicon.ico", faviconModTime, strings.NewReader(faviconIco))
|
||||
}
|
||||
|
||||
type responseWriterFlusher interface {
|
||||
http.ResponseWriter
|
||||
http.Flusher
|
||||
@ -352,7 +323,7 @@ func (w *loggingWriter) Done() {
|
||||
slog.Any("remoteaddr", w.R.RemoteAddr),
|
||||
slog.String("tlsinfo", tlsinfo),
|
||||
slog.String("useragent", w.R.Header.Get("User-Agent")),
|
||||
slog.String("referer", w.R.Header.Get("Referer")),
|
||||
slog.String("referrr", w.R.Header.Get("Referrer")),
|
||||
}
|
||||
if w.WebsocketRequest {
|
||||
attrs = append(attrs,
|
||||
@ -379,45 +350,37 @@ func (w *loggingWriter) Done() {
|
||||
pkglog.WithContext(w.R.Context()).Debugx("http request", err, attrs...)
|
||||
}
|
||||
|
||||
// Set some http headers that should prevent potential abuse. Better safe than sorry.
|
||||
func safeHeaders(fn http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
h := w.Header()
|
||||
h.Set("X-Frame-Options", "deny")
|
||||
h.Set("X-Content-Type-Options", "nosniff")
|
||||
h.Set("Content-Security-Policy", "default-src 'self' 'unsafe-inline' data:")
|
||||
h.Set("Referrer-Policy", "same-origin")
|
||||
fn.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
// Built-in handlers, e.g. mta-sts and autoconfig.
|
||||
type pathHandler struct {
|
||||
Name string // For logging/metrics.
|
||||
HostMatch func(host dns.IPDomain) bool // If not nil, called to see if domain of requests matches. Host can be zero value for invalid domain/ip.
|
||||
Path string // Path to register, like on http.ServeMux.
|
||||
Name string // For logging/metrics.
|
||||
HostMatch func(dom dns.Domain) bool // If not nil, called to see if domain of requests matches. Only called if requested host is a valid domain.
|
||||
Path string // Path to register, like on http.ServeMux.
|
||||
Handler http.Handler
|
||||
}
|
||||
|
||||
type serve struct {
|
||||
Kinds []string // Type of handler and protocol (e.g. acme-tls-alpn-01, account-http, admin-https, imap-https, smtp-https).
|
||||
TLSConfig *tls.Config
|
||||
NextProto tlsNextProtoMap // For HTTP server, when we do submission/imap with ALPN over the HTTPS port.
|
||||
Favicon bool
|
||||
Forwarded bool // Requests are coming from a reverse proxy, we'll use X-Forwarded-For for the IP address to ratelimit.
|
||||
RateLimitDisabled bool // Don't apply ratelimiting.
|
||||
|
||||
// SystemHandlers are for MTA-STS, autoconfig, ACME validation. They can't be
|
||||
// overridden by WebHandlers. WebHandlers are evaluated next, and the internal
|
||||
// service handlers from Listeners in mox.conf (for admin, account, webmail, webapi
|
||||
// interfaces) last. WebHandlers can also pass requests to the internal servers.
|
||||
// This order allows admins to serve other content on domains serving the mox.conf
|
||||
// internal services.
|
||||
SystemHandlers []pathHandler // Sorted, longest first.
|
||||
Webserver bool
|
||||
ServiceHandlers []pathHandler // Sorted, longest first.
|
||||
Kinds []string // Type of handler and protocol (e.g. acme-tls-alpn-01, account-http, admin-https).
|
||||
TLSConfig *tls.Config
|
||||
PathHandlers []pathHandler // Sorted, longest first.
|
||||
Webserver bool // Whether serving WebHandler. PathHandlers are always evaluated before WebHandlers.
|
||||
}
|
||||
|
||||
// SystemHandle registers a named system handler for a path and optional host. If
|
||||
// path ends with a slash, it is used as prefix match, otherwise a full path match
|
||||
// is required. If hostOpt is set, only requests to those host are handled by this
|
||||
// handler.
|
||||
func (s *serve) SystemHandle(name string, hostMatch func(dns.IPDomain) bool, path string, fn http.Handler) {
|
||||
s.SystemHandlers = append(s.SystemHandlers, pathHandler{name, hostMatch, path, fn})
|
||||
}
|
||||
|
||||
// Like SystemHandle, but for internal services "admin", "account", "webmail",
|
||||
// "webapi" configured in the mox.conf Listener.
|
||||
func (s *serve) ServiceHandle(name string, hostMatch func(dns.IPDomain) bool, path string, fn http.Handler) {
|
||||
s.ServiceHandlers = append(s.ServiceHandlers, pathHandler{name, hostMatch, path, fn})
|
||||
// Handle registers a named handler for a path and optional host. If path ends with
|
||||
// a slash, it is used as prefix match, otherwise a full path match is required. If
|
||||
// hostOpt is set, only requests to those host are handled by this handler.
|
||||
func (s *serve) Handle(name string, hostMatch func(dns.Domain) bool, path string, fn http.Handler) {
|
||||
s.PathHandlers = append(s.PathHandlers, pathHandler{name, hostMatch, path, fn})
|
||||
}
|
||||
|
||||
var (
|
||||
@ -440,41 +403,23 @@ var (
|
||||
// metrics.
|
||||
func (s *serve) ServeHTTP(xw http.ResponseWriter, r *http.Request) {
|
||||
now := time.Now()
|
||||
// Rate limiting as early as possible.
|
||||
ipstr, _, err := net.SplitHostPort(r.RemoteAddr)
|
||||
if err != nil {
|
||||
pkglog.Debugx("split host:port client remoteaddr", err, slog.Any("remoteaddr", r.RemoteAddr))
|
||||
} else if ip := net.ParseIP(ipstr); ip == nil {
|
||||
pkglog.Debug("parsing ip for client remoteaddr", slog.Any("remoteaddr", r.RemoteAddr))
|
||||
} else if !limiterConnectionrate.Add(ip, now, 1) {
|
||||
method := metricHTTPMethod(r.Method)
|
||||
proto := "http"
|
||||
if r.TLS != nil {
|
||||
proto = "https"
|
||||
}
|
||||
metricRequest.WithLabelValues("(ratelimited)", proto, method, "429").Observe(0)
|
||||
// No logging, that's just noise.
|
||||
|
||||
// Rate limiting as early as possible, if enabled.
|
||||
if !s.RateLimitDisabled {
|
||||
// If requests are coming from a reverse proxy, use the IP from X-Forwarded-For.
|
||||
// Otherwise the remote IP for this connection.
|
||||
var ipstr string
|
||||
if s.Forwarded {
|
||||
s := r.Header.Get("X-Forwarded-For")
|
||||
ipstr = strings.TrimSpace(strings.Split(s, ",")[0])
|
||||
if ipstr == "" {
|
||||
pkglog.Debug("ratelimit: no ip address in X-Forwarded-For header")
|
||||
}
|
||||
} else {
|
||||
var err error
|
||||
ipstr, _, err = net.SplitHostPort(r.RemoteAddr)
|
||||
if err != nil {
|
||||
pkglog.Debugx("ratelimit: parsing remote address", err, slog.String("remoteaddr", r.RemoteAddr))
|
||||
}
|
||||
}
|
||||
ip := net.ParseIP(ipstr)
|
||||
if ip == nil && ipstr != "" {
|
||||
pkglog.Debug("ratelimit: invalid ip", slog.String("ip", ipstr))
|
||||
}
|
||||
if ip != nil && !limiterConnectionrate.Add(ip, now, 1) {
|
||||
method := metricHTTPMethod(r.Method)
|
||||
proto := "http"
|
||||
if r.TLS != nil {
|
||||
proto = "https"
|
||||
}
|
||||
metricRequest.WithLabelValues("(ratelimited)", proto, method, "429").Observe(0)
|
||||
// No logging, that's just noise.
|
||||
|
||||
http.Error(xw, "429 - too many auth attempts", http.StatusTooManyRequests)
|
||||
return
|
||||
}
|
||||
http.Error(xw, "429 - too many auth attempts", http.StatusTooManyRequests)
|
||||
return
|
||||
}
|
||||
|
||||
ctx := context.WithValue(r.Context(), mlog.CidKey, mox.Cid())
|
||||
@ -506,44 +451,28 @@ func (s *serve) ServeHTTP(xw http.ResponseWriter, r *http.Request) {
|
||||
r.URL.Path += "/"
|
||||
}
|
||||
|
||||
var dom dns.Domain
|
||||
host := r.Host
|
||||
nhost, _, err := net.SplitHostPort(host)
|
||||
if err == nil {
|
||||
host = nhost
|
||||
}
|
||||
ipdom := dns.IPDomain{IP: net.ParseIP(host)}
|
||||
if ipdom.IP == nil {
|
||||
dom, domErr := dns.ParseDomain(host)
|
||||
if domErr == nil {
|
||||
ipdom = dns.IPDomain{Domain: dom}
|
||||
}
|
||||
}
|
||||
// host could be an IP, some handles may match, not an error.
|
||||
dom, domErr := dns.ParseDomain(host)
|
||||
|
||||
handle := func(h pathHandler) bool {
|
||||
if h.HostMatch != nil && !h.HostMatch(ipdom) {
|
||||
return false
|
||||
for _, h := range s.PathHandlers {
|
||||
if h.HostMatch != nil && (domErr != nil || !h.HostMatch(dom)) {
|
||||
continue
|
||||
}
|
||||
if r.URL.Path == h.Path || strings.HasSuffix(h.Path, "/") && strings.HasPrefix(r.URL.Path, h.Path) {
|
||||
nw.Handler = h.Name
|
||||
nw.Compress = true
|
||||
h.Handler.ServeHTTP(nw, r)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
for _, h := range s.SystemHandlers {
|
||||
if handle(h) {
|
||||
return
|
||||
}
|
||||
}
|
||||
if s.Webserver {
|
||||
if WebHandle(nw, r, ipdom) {
|
||||
return
|
||||
}
|
||||
}
|
||||
for _, h := range s.ServiceHandlers {
|
||||
if handle(h) {
|
||||
if s.Webserver && domErr == nil {
|
||||
if WebHandle(nw, r, dom) {
|
||||
return
|
||||
}
|
||||
}
|
||||
@ -551,348 +480,218 @@ func (s *serve) ServeHTTP(xw http.ResponseWriter, r *http.Request) {
|
||||
http.NotFound(nw, r)
|
||||
}
|
||||
|
||||
func redirectToTrailingSlash(srv *serve, hostMatch func(dns.IPDomain) bool, name, path string) {
|
||||
// Helpfully redirect user to version with ending slash.
|
||||
if path != "/" && strings.HasSuffix(path, "/") {
|
||||
handler := mox.SafeHeaders(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
http.Redirect(w, r, path, http.StatusSeeOther)
|
||||
}))
|
||||
srv.ServiceHandle(name, hostMatch, strings.TrimRight(path, "/"), handler)
|
||||
}
|
||||
}
|
||||
|
||||
// Listen binds to sockets for HTTP listeners, including those required for ACME to
|
||||
// generate TLS certificates. It stores the listeners so Serve can start serving them.
|
||||
func Listen() {
|
||||
redirectToTrailingSlash := func(srv *serve, name, path string) {
|
||||
// Helpfully redirect user to version with ending slash.
|
||||
if path != "/" && strings.HasSuffix(path, "/") {
|
||||
handler := safeHeaders(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
http.Redirect(w, r, path, http.StatusSeeOther)
|
||||
}))
|
||||
srv.Handle(name, nil, path[:len(path)-1], handler)
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize listeners in deterministic order for the same potential error
|
||||
// messages.
|
||||
names := slices.Sorted(maps.Keys(mox.Conf.Static.Listeners))
|
||||
names := maps.Keys(mox.Conf.Static.Listeners)
|
||||
sort.Strings(names)
|
||||
for _, name := range names {
|
||||
l := mox.Conf.Static.Listeners[name]
|
||||
portServe := portServes(name, l)
|
||||
|
||||
ports := slices.Sorted(maps.Keys(portServe))
|
||||
for _, port := range ports {
|
||||
srv := portServe[port]
|
||||
for _, ip := range l.IPs {
|
||||
listen1(ip, port, srv.TLSConfig, name, srv.Kinds, srv, srv.NextProto)
|
||||
portServe := map[int]*serve{}
|
||||
|
||||
var ensureServe func(https bool, port int, kind string) *serve
|
||||
ensureServe = func(https bool, port int, kind string) *serve {
|
||||
s := portServe[port]
|
||||
if s == nil {
|
||||
s = &serve{nil, nil, nil, false}
|
||||
portServe[port] = s
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func portServes(name string, l config.Listener) map[int]*serve {
|
||||
portServe := map[int]*serve{}
|
||||
|
||||
// For system/services, we serve on host localhost too, for ssh tunnel scenario's.
|
||||
localhost := dns.Domain{ASCII: "localhost"}
|
||||
|
||||
ldom := l.HostnameDomain
|
||||
if l.Hostname == "" {
|
||||
ldom = mox.Conf.Static.HostnameDomain
|
||||
}
|
||||
listenerHostMatch := func(host dns.IPDomain) bool {
|
||||
if host.IsIP() {
|
||||
return true
|
||||
}
|
||||
return host.Domain == ldom || host.Domain == localhost
|
||||
}
|
||||
accountHostMatch := func(host dns.IPDomain) bool {
|
||||
if listenerHostMatch(host) {
|
||||
return true
|
||||
}
|
||||
return mox.Conf.IsClientSettingsDomain(host.Domain)
|
||||
}
|
||||
|
||||
var ensureServe func(https, forwarded, noRateLimiting bool, port int, kind string, favicon bool) *serve
|
||||
ensureServe = func(https, forwarded, rateLimitDisabled bool, port int, kind string, favicon bool) *serve {
|
||||
s := portServe[port]
|
||||
if s == nil {
|
||||
s = &serve{nil, nil, tlsNextProtoMap{}, false, false, false, nil, false, nil}
|
||||
portServe[port] = s
|
||||
}
|
||||
s.Kinds = append(s.Kinds, kind)
|
||||
if favicon && !s.Favicon {
|
||||
s.ServiceHandle("favicon", accountHostMatch, "/favicon.ico", mox.SafeHeaders(http.HandlerFunc(faviconHandle)))
|
||||
s.Favicon = true
|
||||
}
|
||||
s.Forwarded = s.Forwarded || forwarded
|
||||
s.RateLimitDisabled = s.RateLimitDisabled || rateLimitDisabled
|
||||
|
||||
// We clone TLS configs because we may modify it later on for this server, for
|
||||
// ALPN. And we need copies because multiple listeners on http.Server where the
|
||||
// config is used will try to modify it concurrently.
|
||||
if https && l.TLS.ACME != "" {
|
||||
s.TLSConfig = l.TLS.ACMEConfig.Clone()
|
||||
|
||||
tlsport := config.Port(mox.Conf.Static.ACME[l.TLS.ACME].Port, 443)
|
||||
if portServe[tlsport] == nil || !slices.Contains(portServe[tlsport].Kinds, "acme-tls-alpn-01") {
|
||||
ensureServe(true, false, false, tlsport, "acme-tls-alpn-01", false)
|
||||
s.Kinds = append(s.Kinds, kind)
|
||||
if https && l.TLS.ACME != "" {
|
||||
s.TLSConfig = l.TLS.ACMEConfig
|
||||
} else if https {
|
||||
s.TLSConfig = l.TLS.Config
|
||||
if l.TLS.ACME != "" {
|
||||
tlsport := config.Port(mox.Conf.Static.ACME[l.TLS.ACME].Port, 443)
|
||||
ensureServe(true, tlsport, "acme-tls-alpn-01")
|
||||
}
|
||||
}
|
||||
} else if https {
|
||||
s.TLSConfig = l.TLS.Config.Clone()
|
||||
return s
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// If TLS with ACME is enabled on this plain HTTP port, and it hasn't been enabled
|
||||
// yet, add http-01 validation mechanism handler to server.
|
||||
ensureACMEHTTP01 := func(srv *serve) {
|
||||
if l.TLS != nil && l.TLS.ACME != "" && !slices.Contains(srv.Kinds, "acme-http-01") {
|
||||
m := mox.Conf.Static.ACME[l.TLS.ACME].Manager
|
||||
srv.Kinds = append(srv.Kinds, "acme-http-01")
|
||||
srv.SystemHandle("acme-http-01", nil, "/.well-known/acme-challenge/", m.Manager.HTTPHandler(nil))
|
||||
if l.TLS != nil && l.TLS.ACME != "" && (l.SMTP.Enabled && !l.SMTP.NoSTARTTLS || l.Submissions.Enabled || l.IMAPS.Enabled) {
|
||||
port := config.Port(mox.Conf.Static.ACME[l.TLS.ACME].Port, 443)
|
||||
ensureServe(true, port, "acme-tls-alpn-01")
|
||||
}
|
||||
}
|
||||
|
||||
if l.TLS != nil && l.TLS.ACME != "" && (l.SMTP.Enabled && !l.SMTP.NoSTARTTLS || l.Submissions.Enabled || l.IMAPS.Enabled) {
|
||||
port := config.Port(mox.Conf.Static.ACME[l.TLS.ACME].Port, 443)
|
||||
ensureServe(true, false, false, port, "acme-tls-alpn-01", false)
|
||||
}
|
||||
if l.Submissions.Enabled && l.Submissions.EnabledOnHTTPS {
|
||||
s := ensureServe(true, false, false, 443, "smtp-https", false)
|
||||
hostname := mox.Conf.Static.HostnameDomain
|
||||
if l.Hostname != "" {
|
||||
hostname = l.HostnameDomain
|
||||
if l.AccountHTTP.Enabled {
|
||||
port := config.Port(l.AccountHTTP.Port, 80)
|
||||
path := "/"
|
||||
if l.AccountHTTP.Path != "" {
|
||||
path = l.AccountHTTP.Path
|
||||
}
|
||||
srv := ensureServe(false, port, "account-http at "+path)
|
||||
handler := safeHeaders(http.StripPrefix(path[:len(path)-1], http.HandlerFunc(webaccount.Handler(path, l.AccountHTTP.Forwarded))))
|
||||
srv.Handle("account", nil, path, handler)
|
||||
redirectToTrailingSlash(srv, "account", path)
|
||||
}
|
||||
if l.AccountHTTPS.Enabled {
|
||||
port := config.Port(l.AccountHTTPS.Port, 443)
|
||||
path := "/"
|
||||
if l.AccountHTTPS.Path != "" {
|
||||
path = l.AccountHTTPS.Path
|
||||
}
|
||||
srv := ensureServe(true, port, "account-https at "+path)
|
||||
handler := safeHeaders(http.StripPrefix(path[:len(path)-1], http.HandlerFunc(webaccount.Handler(path, l.AccountHTTPS.Forwarded))))
|
||||
srv.Handle("account", nil, path, handler)
|
||||
redirectToTrailingSlash(srv, "account", path)
|
||||
}
|
||||
|
||||
if l.AdminHTTP.Enabled {
|
||||
port := config.Port(l.AdminHTTP.Port, 80)
|
||||
path := "/admin/"
|
||||
if l.AdminHTTP.Path != "" {
|
||||
path = l.AdminHTTP.Path
|
||||
}
|
||||
srv := ensureServe(false, port, "admin-http at "+path)
|
||||
handler := safeHeaders(http.StripPrefix(path[:len(path)-1], http.HandlerFunc(webadmin.Handler(path, l.AdminHTTP.Forwarded))))
|
||||
srv.Handle("admin", nil, path, handler)
|
||||
redirectToTrailingSlash(srv, "admin", path)
|
||||
}
|
||||
if l.AdminHTTPS.Enabled {
|
||||
port := config.Port(l.AdminHTTPS.Port, 443)
|
||||
path := "/admin/"
|
||||
if l.AdminHTTPS.Path != "" {
|
||||
path = l.AdminHTTPS.Path
|
||||
}
|
||||
srv := ensureServe(true, port, "admin-https at "+path)
|
||||
handler := safeHeaders(http.StripPrefix(path[:len(path)-1], http.HandlerFunc(webadmin.Handler(path, l.AdminHTTPS.Forwarded))))
|
||||
srv.Handle("admin", nil, path, handler)
|
||||
redirectToTrailingSlash(srv, "admin", path)
|
||||
}
|
||||
|
||||
maxMsgSize := l.SMTPMaxMessageSize
|
||||
if maxMsgSize == 0 {
|
||||
maxMsgSize = config.DefaultMaxMsgSize
|
||||
}
|
||||
requireTLS := !l.SMTP.NoRequireTLS
|
||||
|
||||
s.NextProto["smtp"] = func(_ *http.Server, conn *tls.Conn, _ http.Handler) {
|
||||
smtpserver.ServeTLSConn(name, hostname, conn, s.TLSConfig, true, true, maxMsgSize, requireTLS)
|
||||
}
|
||||
}
|
||||
if l.IMAPS.Enabled && l.IMAPS.EnabledOnHTTPS {
|
||||
s := ensureServe(true, false, false, 443, "imap-https", false)
|
||||
s.NextProto["imap"] = func(_ *http.Server, conn *tls.Conn, _ http.Handler) {
|
||||
imapserver.ServeTLSConn(name, conn, s.TLSConfig)
|
||||
}
|
||||
}
|
||||
if l.AccountHTTP.Enabled {
|
||||
port := config.Port(l.AccountHTTP.Port, 80)
|
||||
path := "/"
|
||||
if l.AccountHTTP.Path != "" {
|
||||
path = l.AccountHTTP.Path
|
||||
}
|
||||
srv := ensureServe(false, l.AccountHTTP.Forwarded, false, port, "account-http at "+path, true)
|
||||
handler := mox.SafeHeaders(http.StripPrefix(strings.TrimRight(path, "/"), http.HandlerFunc(webaccount.Handler(path, l.AccountHTTP.Forwarded))))
|
||||
srv.ServiceHandle("account", accountHostMatch, path, handler)
|
||||
redirectToTrailingSlash(srv, accountHostMatch, "account", path)
|
||||
ensureACMEHTTP01(srv)
|
||||
}
|
||||
if l.AccountHTTPS.Enabled {
|
||||
port := config.Port(l.AccountHTTPS.Port, 443)
|
||||
path := "/"
|
||||
if l.AccountHTTPS.Path != "" {
|
||||
path = l.AccountHTTPS.Path
|
||||
}
|
||||
srv := ensureServe(true, l.AccountHTTPS.Forwarded, false, port, "account-https at "+path, true)
|
||||
handler := mox.SafeHeaders(http.StripPrefix(strings.TrimRight(path, "/"), http.HandlerFunc(webaccount.Handler(path, l.AccountHTTPS.Forwarded))))
|
||||
srv.ServiceHandle("account", accountHostMatch, path, handler)
|
||||
redirectToTrailingSlash(srv, accountHostMatch, "account", path)
|
||||
}
|
||||
|
||||
if l.AdminHTTP.Enabled {
|
||||
port := config.Port(l.AdminHTTP.Port, 80)
|
||||
path := "/admin/"
|
||||
if l.AdminHTTP.Path != "" {
|
||||
path = l.AdminHTTP.Path
|
||||
}
|
||||
srv := ensureServe(false, l.AdminHTTP.Forwarded, false, port, "admin-http at "+path, true)
|
||||
handler := mox.SafeHeaders(http.StripPrefix(strings.TrimRight(path, "/"), http.HandlerFunc(webadmin.Handler(path, l.AdminHTTP.Forwarded))))
|
||||
srv.ServiceHandle("admin", listenerHostMatch, path, handler)
|
||||
redirectToTrailingSlash(srv, listenerHostMatch, "admin", path)
|
||||
ensureACMEHTTP01(srv)
|
||||
}
|
||||
if l.AdminHTTPS.Enabled {
|
||||
port := config.Port(l.AdminHTTPS.Port, 443)
|
||||
path := "/admin/"
|
||||
if l.AdminHTTPS.Path != "" {
|
||||
path = l.AdminHTTPS.Path
|
||||
}
|
||||
srv := ensureServe(true, l.AdminHTTPS.Forwarded, false, port, "admin-https at "+path, true)
|
||||
handler := mox.SafeHeaders(http.StripPrefix(strings.TrimRight(path, "/"), http.HandlerFunc(webadmin.Handler(path, l.AdminHTTPS.Forwarded))))
|
||||
srv.ServiceHandle("admin", listenerHostMatch, path, handler)
|
||||
redirectToTrailingSlash(srv, listenerHostMatch, "admin", path)
|
||||
}
|
||||
|
||||
maxMsgSize := l.SMTPMaxMessageSize
|
||||
if maxMsgSize == 0 {
|
||||
maxMsgSize = config.DefaultMaxMsgSize
|
||||
}
|
||||
|
||||
if l.WebAPIHTTP.Enabled {
|
||||
port := config.Port(l.WebAPIHTTP.Port, 80)
|
||||
path := "/webapi/"
|
||||
if l.WebAPIHTTP.Path != "" {
|
||||
path = l.WebAPIHTTP.Path
|
||||
}
|
||||
srv := ensureServe(false, l.WebAPIHTTP.Forwarded, false, port, "webapi-http at "+path, true)
|
||||
handler := mox.SafeHeaders(http.StripPrefix(strings.TrimRight(path, "/"), webapisrv.NewServer(maxMsgSize, path, l.WebAPIHTTP.Forwarded)))
|
||||
srv.ServiceHandle("webapi", accountHostMatch, path, handler)
|
||||
redirectToTrailingSlash(srv, accountHostMatch, "webapi", path)
|
||||
ensureACMEHTTP01(srv)
|
||||
}
|
||||
if l.WebAPIHTTPS.Enabled {
|
||||
port := config.Port(l.WebAPIHTTPS.Port, 443)
|
||||
path := "/webapi/"
|
||||
if l.WebAPIHTTPS.Path != "" {
|
||||
path = l.WebAPIHTTPS.Path
|
||||
}
|
||||
srv := ensureServe(true, l.WebAPIHTTPS.Forwarded, false, port, "webapi-https at "+path, true)
|
||||
handler := mox.SafeHeaders(http.StripPrefix(strings.TrimRight(path, "/"), webapisrv.NewServer(maxMsgSize, path, l.WebAPIHTTPS.Forwarded)))
|
||||
srv.ServiceHandle("webapi", accountHostMatch, path, handler)
|
||||
redirectToTrailingSlash(srv, accountHostMatch, "webapi", path)
|
||||
}
|
||||
|
||||
if l.WebmailHTTP.Enabled {
|
||||
port := config.Port(l.WebmailHTTP.Port, 80)
|
||||
path := "/webmail/"
|
||||
if l.WebmailHTTP.Path != "" {
|
||||
path = l.WebmailHTTP.Path
|
||||
}
|
||||
srv := ensureServe(false, l.WebmailHTTP.Forwarded, false, port, "webmail-http at "+path, true)
|
||||
var accountPath string
|
||||
if l.AccountHTTP.Enabled {
|
||||
accountPath = "/"
|
||||
if l.AccountHTTP.Path != "" {
|
||||
accountPath = l.AccountHTTP.Path
|
||||
if l.WebmailHTTP.Enabled {
|
||||
port := config.Port(l.WebmailHTTP.Port, 80)
|
||||
path := "/webmail/"
|
||||
if l.WebmailHTTP.Path != "" {
|
||||
path = l.WebmailHTTP.Path
|
||||
}
|
||||
srv := ensureServe(false, port, "webmail-http at "+path)
|
||||
handler := http.StripPrefix(path[:len(path)-1], http.HandlerFunc(webmail.Handler(maxMsgSize, path, l.WebmailHTTP.Forwarded)))
|
||||
srv.Handle("webmail", nil, path, handler)
|
||||
redirectToTrailingSlash(srv, "webmail", path)
|
||||
}
|
||||
handler := http.StripPrefix(strings.TrimRight(path, "/"), http.HandlerFunc(webmail.Handler(maxMsgSize, path, l.WebmailHTTP.Forwarded, accountPath)))
|
||||
srv.ServiceHandle("webmail", accountHostMatch, path, handler)
|
||||
redirectToTrailingSlash(srv, accountHostMatch, "webmail", path)
|
||||
ensureACMEHTTP01(srv)
|
||||
}
|
||||
if l.WebmailHTTPS.Enabled {
|
||||
port := config.Port(l.WebmailHTTPS.Port, 443)
|
||||
path := "/webmail/"
|
||||
if l.WebmailHTTPS.Path != "" {
|
||||
path = l.WebmailHTTPS.Path
|
||||
}
|
||||
srv := ensureServe(true, l.WebmailHTTPS.Forwarded, false, port, "webmail-https at "+path, true)
|
||||
var accountPath string
|
||||
if l.AccountHTTPS.Enabled {
|
||||
accountPath = "/"
|
||||
if l.AccountHTTPS.Path != "" {
|
||||
accountPath = l.AccountHTTPS.Path
|
||||
if l.WebmailHTTPS.Enabled {
|
||||
port := config.Port(l.WebmailHTTPS.Port, 443)
|
||||
path := "/webmail/"
|
||||
if l.WebmailHTTPS.Path != "" {
|
||||
path = l.WebmailHTTPS.Path
|
||||
}
|
||||
}
|
||||
handler := http.StripPrefix(strings.TrimRight(path, "/"), http.HandlerFunc(webmail.Handler(maxMsgSize, path, l.WebmailHTTPS.Forwarded, accountPath)))
|
||||
srv.ServiceHandle("webmail", accountHostMatch, path, handler)
|
||||
redirectToTrailingSlash(srv, accountHostMatch, "webmail", path)
|
||||
}
|
||||
|
||||
if l.MetricsHTTP.Enabled {
|
||||
port := config.Port(l.MetricsHTTP.Port, 8010)
|
||||
srv := ensureServe(false, false, false, port, "metrics-http", false)
|
||||
srv.SystemHandle("metrics", nil, "/metrics", mox.SafeHeaders(promhttp.Handler()))
|
||||
srv.SystemHandle("metrics", nil, "/", mox.SafeHeaders(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path != "/" {
|
||||
http.NotFound(w, r)
|
||||
return
|
||||
} else if r.Method != "GET" {
|
||||
http.Error(w, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Type", "text/html")
|
||||
fmt.Fprint(w, `<html><body>see <a href="metrics">metrics</a></body></html>`)
|
||||
})))
|
||||
}
|
||||
if l.AutoconfigHTTPS.Enabled {
|
||||
port := config.Port(l.AutoconfigHTTPS.Port, 443)
|
||||
srv := ensureServe(!l.AutoconfigHTTPS.NonTLS, false, false, port, "autoconfig-https", false)
|
||||
if l.AutoconfigHTTPS.NonTLS {
|
||||
ensureACMEHTTP01(srv)
|
||||
}
|
||||
autoconfigMatch := func(ipdom dns.IPDomain) bool {
|
||||
dom := ipdom.Domain
|
||||
if dom.IsZero() {
|
||||
return false
|
||||
}
|
||||
// Thunderbird requests an autodiscovery URL at the email address domain name, so
|
||||
// autoconfig prefix is optional.
|
||||
if strings.HasPrefix(dom.ASCII, "autoconfig.") {
|
||||
dom.ASCII = strings.TrimPrefix(dom.ASCII, "autoconfig.")
|
||||
dom.Unicode = strings.TrimPrefix(dom.Unicode, "autoconfig.")
|
||||
}
|
||||
// Autodiscovery uses a SRV record. It shouldn't point to a CNAME. So we directly
|
||||
// use the mail server's host name.
|
||||
if dom == mox.Conf.Static.HostnameDomain || dom == mox.Conf.Static.Listeners["public"].HostnameDomain {
|
||||
return true
|
||||
}
|
||||
dc, ok := mox.Conf.Domain(dom)
|
||||
return ok && !dc.ReportsOnly
|
||||
}
|
||||
srv.SystemHandle("autoconfig", autoconfigMatch, "/mail/config-v1.1.xml", mox.SafeHeaders(http.HandlerFunc(autoconfHandle)))
|
||||
srv.SystemHandle("autodiscover", autoconfigMatch, "/autodiscover/autodiscover.xml", mox.SafeHeaders(http.HandlerFunc(autodiscoverHandle)))
|
||||
srv.SystemHandle("mobileconfig", autoconfigMatch, "/profile.mobileconfig", mox.SafeHeaders(http.HandlerFunc(mobileconfigHandle)))
|
||||
srv.SystemHandle("mobileconfigqrcodepng", autoconfigMatch, "/profile.mobileconfig.qrcode.png", mox.SafeHeaders(http.HandlerFunc(mobileconfigQRCodeHandle)))
|
||||
}
|
||||
if l.MTASTSHTTPS.Enabled {
|
||||
port := config.Port(l.MTASTSHTTPS.Port, 443)
|
||||
srv := ensureServe(!l.MTASTSHTTPS.NonTLS, false, false, port, "mtasts-https", false)
|
||||
if l.MTASTSHTTPS.NonTLS {
|
||||
ensureACMEHTTP01(srv)
|
||||
}
|
||||
mtastsMatch := func(ipdom dns.IPDomain) bool {
|
||||
// todo: may want to check this against the configured domains, could in theory be just a webserver.
|
||||
dom := ipdom.Domain
|
||||
if dom.IsZero() {
|
||||
return false
|
||||
}
|
||||
return strings.HasPrefix(dom.ASCII, "mta-sts.")
|
||||
}
|
||||
srv.SystemHandle("mtasts", mtastsMatch, "/.well-known/mta-sts.txt", mox.SafeHeaders(http.HandlerFunc(mtastsPolicyHandle)))
|
||||
}
|
||||
if l.PprofHTTP.Enabled {
|
||||
// Importing net/http/pprof registers handlers on the default serve mux.
|
||||
port := config.Port(l.PprofHTTP.Port, 8011)
|
||||
if _, ok := portServe[port]; ok {
|
||||
pkglog.Fatal("cannot serve pprof on same endpoint as other http services")
|
||||
}
|
||||
srv := &serve{[]string{"pprof-http"}, nil, nil, false, false, false, nil, false, nil}
|
||||
portServe[port] = srv
|
||||
srv.SystemHandle("pprof", nil, "/", http.DefaultServeMux)
|
||||
}
|
||||
if l.WebserverHTTP.Enabled {
|
||||
port := config.Port(l.WebserverHTTP.Port, 80)
|
||||
srv := ensureServe(false, false, l.WebserverHTTP.RateLimitDisabled, port, "webserver-http", false)
|
||||
srv.Webserver = true
|
||||
ensureACMEHTTP01(srv)
|
||||
}
|
||||
if l.WebserverHTTPS.Enabled {
|
||||
port := config.Port(l.WebserverHTTPS.Port, 443)
|
||||
srv := ensureServe(true, false, l.WebserverHTTPS.RateLimitDisabled, port, "webserver-https", false)
|
||||
srv.Webserver = true
|
||||
}
|
||||
|
||||
if l.TLS != nil && l.TLS.ACME != "" {
|
||||
m := mox.Conf.Static.ACME[l.TLS.ACME].Manager
|
||||
if ensureManagerHosts[m] == nil {
|
||||
ensureManagerHosts[m] = map[dns.Domain]struct{}{}
|
||||
}
|
||||
hosts := ensureManagerHosts[m]
|
||||
hosts[mox.Conf.Static.HostnameDomain] = struct{}{}
|
||||
|
||||
if l.HostnameDomain.ASCII != "" {
|
||||
hosts[l.HostnameDomain] = struct{}{}
|
||||
srv := ensureServe(true, port, "webmail-https at "+path)
|
||||
handler := http.StripPrefix(path[:len(path)-1], http.HandlerFunc(webmail.Handler(maxMsgSize, path, l.WebmailHTTPS.Forwarded)))
|
||||
srv.Handle("webmail", nil, path, handler)
|
||||
redirectToTrailingSlash(srv, "webmail", path)
|
||||
}
|
||||
|
||||
// All domains are served on all listeners. Gather autoconfig hostnames to ensure
|
||||
// presence of TLS certificates. Fetching a certificate on-demand may be too slow
|
||||
// for the timeouts of clients doing autoconfig.
|
||||
if l.MetricsHTTP.Enabled {
|
||||
port := config.Port(l.MetricsHTTP.Port, 8010)
|
||||
srv := ensureServe(false, port, "metrics-http")
|
||||
srv.Handle("metrics", nil, "/metrics", safeHeaders(promhttp.Handler()))
|
||||
srv.Handle("metrics", nil, "/", safeHeaders(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path != "/" {
|
||||
http.NotFound(w, r)
|
||||
return
|
||||
} else if r.Method != "GET" {
|
||||
http.Error(w, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Type", "text/html")
|
||||
fmt.Fprint(w, `<html><body>see <a href="metrics">metrics</a></body></html>`)
|
||||
})))
|
||||
}
|
||||
if l.AutoconfigHTTPS.Enabled {
|
||||
port := config.Port(l.AutoconfigHTTPS.Port, 443)
|
||||
srv := ensureServe(!l.AutoconfigHTTPS.NonTLS, port, "autoconfig-https")
|
||||
autoconfigMatch := func(dom dns.Domain) bool {
|
||||
// Thunderbird requests an autodiscovery URL at the email address domain name, so
|
||||
// autoconfig prefix is optional.
|
||||
if strings.HasPrefix(dom.ASCII, "autoconfig.") {
|
||||
dom.ASCII = strings.TrimPrefix(dom.ASCII, "autoconfig.")
|
||||
dom.Unicode = strings.TrimPrefix(dom.Unicode, "autoconfig.")
|
||||
}
|
||||
// Autodiscovery uses a SRV record. It shouldn't point to a CNAME. So we directly
|
||||
// use the mail server's host name.
|
||||
if dom == mox.Conf.Static.HostnameDomain || dom == mox.Conf.Static.Listeners["public"].HostnameDomain {
|
||||
return true
|
||||
}
|
||||
dc, ok := mox.Conf.Domain(dom)
|
||||
return ok && !dc.ReportsOnly
|
||||
}
|
||||
srv.Handle("autoconfig", autoconfigMatch, "/mail/config-v1.1.xml", safeHeaders(http.HandlerFunc(autoconfHandle)))
|
||||
srv.Handle("autodiscover", autoconfigMatch, "/autodiscover/autodiscover.xml", safeHeaders(http.HandlerFunc(autodiscoverHandle)))
|
||||
srv.Handle("mobileconfig", autoconfigMatch, "/profile.mobileconfig", safeHeaders(http.HandlerFunc(mobileconfigHandle)))
|
||||
srv.Handle("mobileconfigqrcodepng", autoconfigMatch, "/profile.mobileconfig.qrcode.png", safeHeaders(http.HandlerFunc(mobileconfigQRCodeHandle)))
|
||||
}
|
||||
if l.MTASTSHTTPS.Enabled {
|
||||
port := config.Port(l.MTASTSHTTPS.Port, 443)
|
||||
srv := ensureServe(!l.MTASTSHTTPS.NonTLS, port, "mtasts-https")
|
||||
mtastsMatch := func(dom dns.Domain) bool {
|
||||
// todo: may want to check this against the configured domains, could in theory be just a webserver.
|
||||
return strings.HasPrefix(dom.ASCII, "mta-sts.")
|
||||
}
|
||||
srv.Handle("mtasts", mtastsMatch, "/.well-known/mta-sts.txt", safeHeaders(http.HandlerFunc(mtastsPolicyHandle)))
|
||||
}
|
||||
if l.PprofHTTP.Enabled {
|
||||
// Importing net/http/pprof registers handlers on the default serve mux.
|
||||
port := config.Port(l.PprofHTTP.Port, 8011)
|
||||
if _, ok := portServe[port]; ok {
|
||||
pkglog.Fatal("cannot serve pprof on same endpoint as other http services")
|
||||
}
|
||||
srv := &serve{[]string{"pprof-http"}, nil, nil, false}
|
||||
portServe[port] = srv
|
||||
srv.Handle("pprof", nil, "/", http.DefaultServeMux)
|
||||
}
|
||||
if l.WebserverHTTP.Enabled {
|
||||
port := config.Port(l.WebserverHTTP.Port, 80)
|
||||
srv := ensureServe(false, port, "webserver-http")
|
||||
srv.Webserver = true
|
||||
}
|
||||
if l.WebserverHTTPS.Enabled {
|
||||
port := config.Port(l.WebserverHTTPS.Port, 443)
|
||||
srv := ensureServe(true, port, "webserver-https")
|
||||
srv.Webserver = true
|
||||
}
|
||||
|
||||
if l.AutoconfigHTTPS.Enabled && !l.AutoconfigHTTPS.NonTLS {
|
||||
if l.TLS != nil && l.TLS.ACME != "" {
|
||||
m := mox.Conf.Static.ACME[l.TLS.ACME].Manager
|
||||
|
||||
// If we are listening on port 80 for plain http, also register acme http-01
|
||||
// validation handler.
|
||||
if srv, ok := portServe[80]; ok && srv.TLSConfig == nil {
|
||||
srv.Kinds = append(srv.Kinds, "acme-http-01")
|
||||
srv.Handle("acme-http-01", nil, "/.well-known/acme-challenge/", m.Manager.HTTPHandler(nil))
|
||||
}
|
||||
|
||||
hosts := map[dns.Domain]struct{}{
|
||||
mox.Conf.Static.HostnameDomain: {},
|
||||
}
|
||||
if l.HostnameDomain.ASCII != "" {
|
||||
hosts[l.HostnameDomain] = struct{}{}
|
||||
}
|
||||
// All domains are served on all listeners. Gather autoconfig hostnames to ensure
|
||||
// presence of TLS certificates for.
|
||||
for _, name := range mox.Conf.Domains() {
|
||||
if dom, err := dns.ParseDomain(name); err != nil {
|
||||
pkglog.Errorx("parsing domain from config", err)
|
||||
} else if d, _ := mox.Conf.Domain(dom); d.ReportsOnly || d.Disabled {
|
||||
// Do not gather autoconfig name if we aren't accepting email for this domain or when it is disabled.
|
||||
} else if d, _ := mox.Conf.Domain(dom); d.ReportsOnly {
|
||||
// Do not gather autoconfig name if we aren't accepting email for this domain.
|
||||
continue
|
||||
}
|
||||
|
||||
@ -903,32 +702,29 @@ func portServes(name string, l config.Listener) map[int]*serve {
|
||||
hosts[autoconfdom] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
ensureManagerHosts[m] = hosts
|
||||
}
|
||||
|
||||
ports := maps.Keys(portServe)
|
||||
sort.Ints(ports)
|
||||
for _, port := range ports {
|
||||
srv := portServe[port]
|
||||
sort.Slice(srv.PathHandlers, func(i, j int) bool {
|
||||
a := srv.PathHandlers[i].Path
|
||||
b := srv.PathHandlers[j].Path
|
||||
if len(a) == len(b) {
|
||||
// For consistent order.
|
||||
return a < b
|
||||
}
|
||||
// Longest paths first.
|
||||
return len(a) > len(b)
|
||||
})
|
||||
for _, ip := range l.IPs {
|
||||
listen1(ip, port, srv.TLSConfig, name, srv.Kinds, srv)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if s := portServe[443]; s != nil && s.TLSConfig != nil && len(s.NextProto) > 0 {
|
||||
s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, slices.Collect(maps.Keys(s.NextProto))...)
|
||||
}
|
||||
|
||||
for _, srv := range portServe {
|
||||
sortPathHandlers(srv.SystemHandlers)
|
||||
sortPathHandlers(srv.ServiceHandlers)
|
||||
}
|
||||
|
||||
return portServe
|
||||
}
|
||||
|
||||
func sortPathHandlers(l []pathHandler) {
|
||||
sort.Slice(l, func(i, j int) bool {
|
||||
a := l[i].Path
|
||||
b := l[j].Path
|
||||
if len(a) == len(b) {
|
||||
// For consistent order.
|
||||
return a < b
|
||||
}
|
||||
// Longest paths first.
|
||||
return len(a) > len(b)
|
||||
})
|
||||
}
|
||||
|
||||
// functions to be launched in goroutine that will serve on a listener.
|
||||
@ -941,10 +737,8 @@ var servers []func()
|
||||
// the certificate to be given during the first https connection.
|
||||
var ensureManagerHosts = map[*autotls.Manager]map[dns.Domain]struct{}{}
|
||||
|
||||
type tlsNextProtoMap = map[string]func(*http.Server, *tls.Conn, http.Handler)
|
||||
|
||||
// listen prepares a listener, and adds it to "servers", to be launched (if not running as root) through Serve.
|
||||
func listen1(ip string, port int, tlsConfig *tls.Config, name string, kinds []string, handler http.Handler, nextProto tlsNextProtoMap) {
|
||||
func listen1(ip string, port int, tlsConfig *tls.Config, name string, kinds []string, handler http.Handler) {
|
||||
addr := net.JoinHostPort(ip, fmt.Sprintf("%d", port))
|
||||
|
||||
var protocol string
|
||||
@ -983,15 +777,6 @@ func listen1(ip string, port int, tlsConfig *tls.Config, name string, kinds []st
|
||||
ReadHeaderTimeout: 30 * time.Second,
|
||||
IdleTimeout: 65 * time.Second, // Chrome closes connections after 60 seconds, firefox after 115 seconds.
|
||||
ErrorLog: golog.New(mlog.LogWriter(pkglog.With(slog.String("pkg", "net/http")), slog.LevelInfo, protocol+" error"), "", 0),
|
||||
TLSNextProto: nextProto,
|
||||
}
|
||||
// By default, the Go 1.6 and above http.Server includes support for HTTP2.
|
||||
// However, HTTP2 is negotiated via ALPN. Because we are configuring
|
||||
// TLSNextProto above, we have to explicitly enable HTTP2 by importing http2
|
||||
// and calling ConfigureServer.
|
||||
err = http2.ConfigureServer(server, nil)
|
||||
if err != nil {
|
||||
pkglog.Fatalx("https: unable to configure http2", err)
|
||||
}
|
||||
serve := func() {
|
||||
err := server.Serve(ln)
|
||||
|
@ -6,8 +6,10 @@ import (
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/mjl-/mox/dns"
|
||||
"github.com/mjl-/mox/mox-"
|
||||
)
|
||||
|
||||
@ -17,8 +19,20 @@ func TestServeHTTP(t *testing.T) {
|
||||
mox.ConfigDynamicPath = filepath.Join(filepath.Dir(mox.ConfigStaticPath), "domains.conf")
|
||||
mox.MustLoadConfig(true, false)
|
||||
|
||||
portSrvs := portServes("local", mox.Conf.Static.Listeners["local"])
|
||||
srv := portSrvs[80]
|
||||
srv := &serve{
|
||||
PathHandlers: []pathHandler{
|
||||
{
|
||||
HostMatch: func(dom dns.Domain) bool {
|
||||
return strings.HasPrefix(dom.ASCII, "mta-sts.")
|
||||
},
|
||||
Path: "/.well-known/mta-sts.txt",
|
||||
Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Write([]byte("mta-sts!"))
|
||||
}),
|
||||
},
|
||||
},
|
||||
Webserver: true,
|
||||
}
|
||||
|
||||
test := func(method, target string, expCode int, expContent string, expHeaders map[string]string) {
|
||||
t.Helper()
|
||||
@ -29,22 +43,22 @@ func TestServeHTTP(t *testing.T) {
|
||||
srv.ServeHTTP(rw, req)
|
||||
resp := rw.Result()
|
||||
if resp.StatusCode != expCode {
|
||||
t.Errorf("got statuscode %d, expected %d", resp.StatusCode, expCode)
|
||||
t.Fatalf("got statuscode %d, expected %d", resp.StatusCode, expCode)
|
||||
}
|
||||
if expContent != "" {
|
||||
s := rw.Body.String()
|
||||
if s != expContent {
|
||||
t.Errorf("got response data %q, expected %q", s, expContent)
|
||||
t.Fatalf("got response data %q, expected %q", s, expContent)
|
||||
}
|
||||
}
|
||||
for k, v := range expHeaders {
|
||||
if xv := resp.Header.Get(k); xv != v {
|
||||
t.Errorf("got %q for header %q, expected %q", xv, k, v)
|
||||
t.Fatalf("got %q for header %q, expected %q", xv, k, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
test("GET", "http://mta-sts.mox.example/.well-known/mta-sts.txt", http.StatusOK, "version: STSv1\nmode: enforce\nmax_age: 86400\nmx: mox.example\n", nil)
|
||||
test("GET", "http://mta-sts.mox.example/.well-known/mta-sts.txt", http.StatusOK, "mta-sts!", nil)
|
||||
test("GET", "http://mox.example/.well-known/mta-sts.txt", http.StatusNotFound, "", nil) // mta-sts endpoint not in this domain.
|
||||
test("GET", "http://mta-sts.mox.example/static/", http.StatusNotFound, "", nil) // static not served on this domain.
|
||||
test("GET", "http://mta-sts.mox.example/other", http.StatusNotFound, "", nil)
|
||||
@ -52,24 +66,4 @@ func TestServeHTTP(t *testing.T) {
|
||||
test("GET", "http://mox.example/static/index.html", http.StatusOK, "html\n", map[string]string{"X-Test": "mox"})
|
||||
test("GET", "http://mox.example/static/dir/", http.StatusOK, "", map[string]string{"X-Test": "mox"}) // Dir listing.
|
||||
test("GET", "http://mox.example/other", http.StatusNotFound, "", nil)
|
||||
|
||||
// Webmail on IP, localhost, mail host, clientsettingsdomain, not others.
|
||||
test("GET", "http://127.0.0.1/webmail/", http.StatusOK, "", nil)
|
||||
test("GET", "http://localhost/webmail/", http.StatusOK, "", nil)
|
||||
test("GET", "http://mox.example/webmail/", http.StatusOK, "", nil)
|
||||
test("GET", "http://mail.mox.example/webmail/", http.StatusOK, "", nil)
|
||||
test("GET", "http://mail.other.example/webmail/", http.StatusNotFound, "", nil)
|
||||
test("GET", "http://remotehost/webmail/", http.StatusNotFound, "", nil)
|
||||
|
||||
// admin on IP, localhost, mail host, not clientsettingsdomain.
|
||||
test("GET", "http://127.0.0.1/admin/", http.StatusOK, "", nil)
|
||||
test("GET", "http://localhost/admin/", http.StatusOK, "", nil)
|
||||
test("GET", "http://mox.example/admin/", http.StatusPermanentRedirect, "", nil) // Override by WebHandler.
|
||||
test("GET", "http://mail.mox.example/admin/", http.StatusNotFound, "", nil)
|
||||
|
||||
// account is off.
|
||||
test("GET", "http://127.0.0.1/", http.StatusNotFound, "", nil)
|
||||
test("GET", "http://localhost/", http.StatusNotFound, "", nil)
|
||||
test("GET", "http://mox.example/", http.StatusNotFound, "", nil)
|
||||
test("GET", "http://mail.mox.example/", http.StatusNotFound, "", nil)
|
||||
}
|
||||
|
@ -30,6 +30,7 @@ import (
|
||||
"github.com/mjl-/mox/dns"
|
||||
"github.com/mjl-/mox/mlog"
|
||||
"github.com/mjl-/mox/mox-"
|
||||
"github.com/mjl-/mox/moxio"
|
||||
)
|
||||
|
||||
func recvid(r *http.Request) string {
|
||||
@ -45,13 +46,11 @@ func recvid(r *http.Request) string {
|
||||
// WebHandle runs after the built-in handlers for mta-sts, autoconfig, etc.
|
||||
// If no handler matched, false is returned.
|
||||
// WebHandle sets w.Name to that of the matching handler.
|
||||
func WebHandle(w *loggingWriter, r *http.Request, host dns.IPDomain) (handled bool) {
|
||||
conf := mox.Conf.DynamicConfig()
|
||||
redirects := conf.WebDNSDomainRedirects
|
||||
handlers := conf.WebHandlers
|
||||
func WebHandle(w *loggingWriter, r *http.Request, host dns.Domain) (handled bool) {
|
||||
redirects, handlers := mox.Conf.WebServer()
|
||||
|
||||
for from, to := range redirects {
|
||||
if host.Domain != from {
|
||||
if host != from {
|
||||
continue
|
||||
}
|
||||
u := r.URL
|
||||
@ -63,7 +62,7 @@ func WebHandle(w *loggingWriter, r *http.Request, host dns.IPDomain) (handled bo
|
||||
}
|
||||
|
||||
for _, h := range handlers {
|
||||
if host.Domain != h.DNSDomain {
|
||||
if host != h.DNSDomain {
|
||||
continue
|
||||
}
|
||||
loc := h.Path.FindStringIndex(r.URL.Path)
|
||||
@ -98,10 +97,6 @@ func WebHandle(w *loggingWriter, r *http.Request, host dns.IPDomain) (handled bo
|
||||
w.Handler = h.Name
|
||||
return true
|
||||
}
|
||||
if h.WebInternal != nil && HandleInternal(h.WebInternal, w, r) {
|
||||
w.Handler = h.Name
|
||||
return true
|
||||
}
|
||||
}
|
||||
w.Compress = false
|
||||
return false
|
||||
@ -214,41 +209,31 @@ func HandleStatic(h *config.WebStatic, compress bool, w http.ResponseWriter, r *
|
||||
}
|
||||
http.NotFound(w, r)
|
||||
return true
|
||||
} else if errors.Is(err, syscall.ENAMETOOLONG) {
|
||||
http.NotFound(w, r)
|
||||
return true
|
||||
} else if os.IsPermission(err) {
|
||||
// If we tried opening a directory, we may not have permission to read it, but
|
||||
// still access files inside it (execute bit), such as index.html. So try to serve it.
|
||||
index, err := os.Open(filepath.Join(fspath, "index.html"))
|
||||
if err != nil {
|
||||
http.Error(w, "403 - permission denied", http.StatusForbidden)
|
||||
if err == nil {
|
||||
defer index.Close()
|
||||
var ifi os.FileInfo
|
||||
ifi, err = index.Stat()
|
||||
if err != nil {
|
||||
log().Errorx("stat index.html in directory we cannot list", err, slog.Any("url", r.URL), slog.String("fspath", fspath))
|
||||
http.Error(w, "500 - internal server error"+recvid(r), http.StatusInternalServerError)
|
||||
return true
|
||||
}
|
||||
w.Header().Set("Content-Type", "text/html; charset=utf-8")
|
||||
serveFile("index.html", ifi, index)
|
||||
return true
|
||||
}
|
||||
defer func() {
|
||||
err := index.Close()
|
||||
log().Check(err, "closing index file for serving")
|
||||
}()
|
||||
var ifi os.FileInfo
|
||||
ifi, err = index.Stat()
|
||||
if err != nil {
|
||||
log().Errorx("stat index.html in directory we cannot list", err, slog.Any("url", r.URL), slog.String("fspath", fspath))
|
||||
http.Error(w, "500 - internal server error"+recvid(r), http.StatusInternalServerError)
|
||||
return true
|
||||
}
|
||||
w.Header().Set("Content-Type", "text/html; charset=utf-8")
|
||||
serveFile("index.html", ifi, index)
|
||||
http.Error(w, "403 - permission denied", http.StatusForbidden)
|
||||
return true
|
||||
}
|
||||
log().Errorx("open file for static file serving", err, slog.Any("url", r.URL), slog.String("fspath", fspath))
|
||||
http.Error(w, "500 - internal server error"+recvid(r), http.StatusInternalServerError)
|
||||
return true
|
||||
}
|
||||
defer func() {
|
||||
if err := f.Close(); err != nil {
|
||||
log().Check(err, "closing file for static file serving")
|
||||
}
|
||||
}()
|
||||
defer f.Close()
|
||||
|
||||
fi, err := f.Stat()
|
||||
if err != nil {
|
||||
@ -280,12 +265,7 @@ func HandleStatic(h *config.WebStatic, compress bool, w http.ResponseWriter, r *
|
||||
http.Error(w, "403 - permission denied", http.StatusForbidden)
|
||||
return true
|
||||
} else if err == nil {
|
||||
defer func() {
|
||||
if err := index.Close(); err != nil {
|
||||
log().Check(err, "closing index file for serving")
|
||||
}
|
||||
}()
|
||||
|
||||
defer index.Close()
|
||||
var ifi os.FileInfo
|
||||
ifi, err = index.Stat()
|
||||
if err == nil {
|
||||
@ -352,8 +332,8 @@ func HandleStatic(h *config.WebStatic, compress bool, w http.ResponseWriter, r *
|
||||
}
|
||||
}
|
||||
err = lsTemplate.Execute(w, map[string]any{"Files": files})
|
||||
if err != nil {
|
||||
log().Check(err, "executing directory listing template")
|
||||
if err != nil && !moxio.IsClosed(err) {
|
||||
log().Errorx("executing directory listing template", err)
|
||||
}
|
||||
return true
|
||||
}
|
||||
@ -414,12 +394,6 @@ func HandleRedirect(h *config.WebRedirect, w http.ResponseWriter, r *http.Reques
|
||||
return true
|
||||
}
|
||||
|
||||
// HandleInternal passes the request to an internal service.
|
||||
func HandleInternal(h *config.WebInternal, w http.ResponseWriter, r *http.Request) (handled bool) {
|
||||
h.Handler.ServeHTTP(w, r)
|
||||
return true
|
||||
}
|
||||
|
||||
// HandleForward handles a request by forwarding it to another webserver and
|
||||
// passing the response on. I.e. a reverse proxy. It handles websocket
|
||||
// connections by monitoring the websocket handshake and then just passing along the
|
||||
@ -606,9 +580,7 @@ func forwardWebsocket(h *config.WebForward, w http.ResponseWriter, r *http.Reque
|
||||
}
|
||||
defer func() {
|
||||
if beconn != nil {
|
||||
if err := beconn.Close(); err != nil {
|
||||
log().Check(err, "closing backend websocket connection")
|
||||
}
|
||||
beconn.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
@ -624,9 +596,7 @@ func forwardWebsocket(h *config.WebForward, w http.ResponseWriter, r *http.Reque
|
||||
}
|
||||
defer func() {
|
||||
if cconn != nil {
|
||||
if err := cconn.Close(); err != nil {
|
||||
log().Check(err, "closing client websocket connection")
|
||||
}
|
||||
cconn.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
@ -679,12 +649,8 @@ func forwardWebsocket(h *config.WebForward, w http.ResponseWriter, r *http.Reque
|
||||
// connection whose closing was already announced with a websocket frame.
|
||||
lw.error(<-errc)
|
||||
// Close connections so other goroutine stops as well.
|
||||
if err := cconn.Close(); err != nil {
|
||||
log().Check(err, "closing client websocket connection")
|
||||
}
|
||||
if err := beconn.Close(); err != nil {
|
||||
log().Check(err, "closing backend websocket connection")
|
||||
}
|
||||
cconn.Close()
|
||||
beconn.Close()
|
||||
// Wait for goroutine so it has updated the logWriter.Size*Client fields before we
|
||||
// continue with logging.
|
||||
<-errc
|
||||
@ -737,9 +703,7 @@ func websocketTransact(ctx context.Context, targetURL *url.URL, r *http.Request)
|
||||
}
|
||||
defer func() {
|
||||
if rerr != nil {
|
||||
if xerr := conn.Close(); xerr != nil {
|
||||
log().Check(xerr, "cleaning up websocket connection")
|
||||
}
|
||||
conn.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
@ -766,9 +730,7 @@ func websocketTransact(ctx context.Context, targetURL *url.URL, r *http.Request)
|
||||
}
|
||||
defer func() {
|
||||
if rerr != nil {
|
||||
if xerr := resp.Body.Close(); xerr != nil {
|
||||
log().Check(xerr, "closing response body after error")
|
||||
}
|
||||
resp.Body.Close()
|
||||
}
|
||||
}()
|
||||
if err := conn.SetDeadline(time.Time{}); err != nil {
|
||||
|
@ -134,10 +134,6 @@ func TestWebserver(t *testing.T) {
|
||||
|
||||
test("GET", "http://mox.example/bogus", nil, http.StatusNotFound, "", nil) // path not registered.
|
||||
test("GET", "http://bogus.mox.example/static/", nil, http.StatusNotFound, "", nil) // domain not registered.
|
||||
test("GET", "http://mox.example/xadmin/", nil, http.StatusOK, "", nil) // internal admin service
|
||||
test("GET", "http://mox.example/xaccount/", nil, http.StatusOK, "", nil) // internal account service
|
||||
test("GET", "http://mox.example/xwebmail/", nil, http.StatusOK, "", nil) // internal webmail service
|
||||
test("GET", "http://mox.example/xwebapi/v0/", nil, http.StatusOK, "", nil) // internal webapi service
|
||||
|
||||
npaths := len(staticgzcache.paths)
|
||||
if npaths != 1 {
|
||||
@ -339,4 +335,5 @@ func TestWebsocket(t *testing.T) {
|
||||
w.WriteHeader(http.StatusSwitchingProtocols)
|
||||
})
|
||||
test("GET", wsreqhdrs, http.StatusSwitchingProtocols, wsresphdrs)
|
||||
|
||||
}
|
||||
|
@ -1,102 +1,40 @@
|
||||
/*
|
||||
Package imapclient provides an IMAP4 client implementing IMAP4rev1 (RFC 3501),
|
||||
IMAP4rev2 (RFC 9051) and various extensions.
|
||||
Package imapclient provides an IMAP4 client, primarily for testing the IMAP4 server.
|
||||
|
||||
Warning: Currently primarily for testing the mox IMAP4 server. Behaviour that
|
||||
may not be required by the IMAP4 specification may be expected by this client.
|
||||
|
||||
See [Conn] for a high-level client for executing IMAP commands. Use its embedded
|
||||
[Proto] for lower-level writing of commands and reading of responses.
|
||||
Commands can be sent to the server free-form, but responses are parsed strictly.
|
||||
Behaviour that may not be required by the IMAP4 specification may be expected by
|
||||
this client.
|
||||
*/
|
||||
package imapclient
|
||||
|
||||
/*
|
||||
- Try to keep the parsing method names and the types similar to the ABNF names in the RFCs.
|
||||
|
||||
- todo: have mode for imap4rev1 vs imap4rev2, refusing what is not allowed. we are accepting too much now.
|
||||
- todo: stricter parsing. xnonspace() and xword() should be replaced by proper parsers.
|
||||
*/
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"net"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/mjl-/mox/mlog"
|
||||
"github.com/mjl-/mox/moxio"
|
||||
)
|
||||
|
||||
// Conn is an connection to an IMAP server.
|
||||
//
|
||||
// Method names on Conn are the names of IMAP commands. CloseMailbox, which
|
||||
// executes the IMAP CLOSE command, is an exception. The Close method closes the
|
||||
// connection.
|
||||
//
|
||||
// The methods starting with MSN are the original (old) IMAP commands. The variants
|
||||
// starting with UID should almost always be used instead, if available.
|
||||
//
|
||||
// The methods on Conn typically return errors of type Error or Response. Error
|
||||
// represents protocol and i/o level errors, including io.ErrDeadlineExceeded and
|
||||
// various errors for closed connections. Response is returned as error if the IMAP
|
||||
// result is NO or BAD instead of OK. The responses returned by the IMAP command
|
||||
// methods can also be non-zero on errors. Callers may wish to process any untagged
|
||||
// responses.
|
||||
//
|
||||
// The IMAP command methods defined on Conn don't interpret the untagged responses
|
||||
// except for untagged CAPABILITY and untagged ENABLED responses, and the
|
||||
// CAPABILITY response code. Fields CapAvailable and CapEnabled are updated when
|
||||
// those untagged responses are received.
|
||||
//
|
||||
// Capabilities indicate which optional IMAP functionality is supported by a
|
||||
// server. Capabilities are typically implicitly enabled when the client sends a
|
||||
// command using syntax of an optional extension. Extensions without new syntax
|
||||
// from client to server, but with new behaviour or syntax from server to client,
|
||||
// the client needs to explicitly enable the capability with the ENABLE command,
|
||||
// see the Enable method.
|
||||
// Conn is an IMAP connection to a server.
|
||||
type Conn struct {
|
||||
// If true, server sent a PREAUTH tag and the connection is already authenticated,
|
||||
// e.g. based on TLS certificate authentication.
|
||||
Preauth bool
|
||||
|
||||
// Capabilities available at server, from CAPABILITY command or response code.
|
||||
CapAvailable []Capability
|
||||
// Capabilities marked as enabled by the server, typically after an ENABLE command.
|
||||
CapEnabled []Capability
|
||||
|
||||
// Proto provides lower-level functions for interacting with the IMAP connection,
|
||||
// such as reading and writing individual lines/commands/responses.
|
||||
Proto
|
||||
}
|
||||
|
||||
// Proto provides low-level operations for writing requests and reading responses
|
||||
// on an IMAP connection.
|
||||
//
|
||||
// To implement the IDLE command, write "IDLE" using [Proto.WriteCommandf], then
|
||||
// read a line with [Proto.Readline]. If it starts with "+ ", the connection is in
|
||||
// idle mode and untagged responses can be read using [Proto.ReadUntagged]. If the
|
||||
// line doesn't start with "+ ", use [ParseResult] to interpret it as a response to
|
||||
// IDLE, which should be a NO or BAD. To abort idle mode, write "DONE" using
|
||||
// [Proto.Writelinef] and wait until a result line has been read.
|
||||
type Proto struct {
|
||||
// Connection, may be original TCP or TLS connection. Reads go through c.br, and
|
||||
// writes through c.xbw. The "x" for the writes indicate that failed writes cause
|
||||
// an i/o panic, which is either turned into a returned error, or passed on (see
|
||||
// boolean panic). The reader and writer wrap a tracing reading/writer and may wrap
|
||||
// flate compression.
|
||||
conn net.Conn
|
||||
connBroken bool // If connection is broken, we won't flush (and write) again.
|
||||
br *bufio.Reader
|
||||
tr *moxio.TraceReader
|
||||
xbw *bufio.Writer
|
||||
compress bool // If compression is enabled, we must flush flateWriter and its target original bufio writer.
|
||||
xflateWriter *moxio.FlateWriter
|
||||
xflateBW *bufio.Writer
|
||||
xtw *moxio.TraceWriter
|
||||
|
||||
log mlog.Log
|
||||
errHandle func(err error) // If set, called for all errors. Can panic. Used for imapserver tests.
|
||||
conn net.Conn
|
||||
r *bufio.Reader
|
||||
panic bool
|
||||
tagGen int
|
||||
record bool // If true, bytes read are added to recordBuf. recorded() resets.
|
||||
recordBuf []byte
|
||||
|
||||
lastTag string
|
||||
LastTag string
|
||||
CapAvailable map[Capability]struct{} // Capabilities available at server, from CAPABILITY command or response code.
|
||||
CapEnabled map[Capability]struct{} // Capabilities enabled through ENABLE command.
|
||||
}
|
||||
|
||||
// Error is a parse or other protocol error.
|
||||
@ -110,52 +48,22 @@ func (e Error) Unwrap() error {
|
||||
return e.err
|
||||
}
|
||||
|
||||
// Opts has optional fields that influence behaviour of a Conn.
|
||||
type Opts struct {
|
||||
Logger *slog.Logger
|
||||
|
||||
// Error is called for IMAP-level and connection-level errors during the IMAP
|
||||
// command methods on Conn, not for errors in calls on Proto. Error is allowed to
|
||||
// call panic.
|
||||
Error func(err error)
|
||||
}
|
||||
|
||||
// New initializes a new IMAP client on conn.
|
||||
// New creates a new client on conn.
|
||||
//
|
||||
// Conn should normally be a TLS connection, typically connected to port 993 of an
|
||||
// IMAP server. Alternatively, conn can be a plain TCP connection to port 143. TLS
|
||||
// should be enabled on plain TCP connections with the [Conn.StartTLS] method.
|
||||
// If xpanic is true, functions that would return an error instead panic. For parse
|
||||
// errors, the resulting stack traces show typically show what was being parsed.
|
||||
//
|
||||
// The initial untagged greeting response is read and must be "OK" or
|
||||
// "PREAUTH". If preauth, the connection is already in authenticated state,
|
||||
// typically through TLS client certificate. This is indicated in Conn.Preauth.
|
||||
//
|
||||
// Logging is written to opts.Logger. In particular, IMAP protocol traces are
|
||||
// written with prefixes "CR: " and "CW: " (client read/write) as quoted strings at
|
||||
// levels Debug-4, with authentication messages at Debug-6 and (user) data at level
|
||||
// Debug-8.
|
||||
func New(conn net.Conn, opts *Opts) (client *Conn, rerr error) {
|
||||
// The initial untagged greeting response is read and must be "OK".
|
||||
func New(conn net.Conn, xpanic bool) (client *Conn, rerr error) {
|
||||
c := Conn{
|
||||
Proto: Proto{conn: conn},
|
||||
conn: conn,
|
||||
r: bufio.NewReader(conn),
|
||||
panic: xpanic,
|
||||
CapAvailable: map[Capability]struct{}{},
|
||||
CapEnabled: map[Capability]struct{}{},
|
||||
}
|
||||
|
||||
var clog *slog.Logger
|
||||
if opts != nil {
|
||||
c.errHandle = opts.Error
|
||||
clog = opts.Logger
|
||||
} else {
|
||||
clog = slog.Default()
|
||||
}
|
||||
c.log = mlog.New("imapclient", clog)
|
||||
|
||||
c.tr = moxio.NewTraceReader(c.log, "CR: ", &c)
|
||||
c.br = bufio.NewReader(c.tr)
|
||||
|
||||
// Writes are buffered and write to Conn, which may panic.
|
||||
c.xtw = moxio.NewTraceWriter(c.log, "CW: ", &c)
|
||||
c.xbw = bufio.NewWriter(c.xtw)
|
||||
|
||||
defer c.recoverErr(&rerr)
|
||||
defer c.recover(&rerr)
|
||||
tag := c.xnonspace()
|
||||
if tag != "*" {
|
||||
c.xerrorf("expected untagged *, got %q", tag)
|
||||
@ -167,15 +75,9 @@ func New(conn net.Conn, opts *Opts) (client *Conn, rerr error) {
|
||||
if x.Status != OK {
|
||||
c.xerrorf("greeting, got status %q, expected OK", x.Status)
|
||||
}
|
||||
if x.Code != nil {
|
||||
if caps, ok := x.Code.(CodeCapability); ok {
|
||||
c.CapAvailable = caps
|
||||
}
|
||||
}
|
||||
return &c, nil
|
||||
case UntaggedPreauth:
|
||||
c.Preauth = true
|
||||
return &c, nil
|
||||
c.xerrorf("greeting: unexpected preauth")
|
||||
case UntaggedBye:
|
||||
c.xerrorf("greeting: server sent bye")
|
||||
default:
|
||||
@ -184,16 +86,8 @@ func New(conn net.Conn, opts *Opts) (client *Conn, rerr error) {
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
func (c *Conn) recoverErr(rerr *error) {
|
||||
c.recover(rerr, nil)
|
||||
}
|
||||
|
||||
func (c *Conn) recover(rerr *error, resp *Response) {
|
||||
if *rerr != nil {
|
||||
if r, ok := (*rerr).(Response); ok && resp != nil {
|
||||
*resp = r
|
||||
}
|
||||
c.errHandle(*rerr)
|
||||
func (c *Conn) recover(rerr *error) {
|
||||
if c.panic {
|
||||
return
|
||||
}
|
||||
|
||||
@ -201,163 +95,30 @@ func (c *Conn) recover(rerr *error, resp *Response) {
|
||||
if x == nil {
|
||||
return
|
||||
}
|
||||
var err error
|
||||
switch e := x.(type) {
|
||||
case Error:
|
||||
err = e
|
||||
case Response:
|
||||
err = e
|
||||
if resp != nil {
|
||||
*resp = e
|
||||
}
|
||||
default:
|
||||
err, ok := x.(Error)
|
||||
if !ok {
|
||||
panic(x)
|
||||
}
|
||||
if c.errHandle != nil {
|
||||
c.errHandle(err)
|
||||
}
|
||||
*rerr = err
|
||||
}
|
||||
|
||||
func (p *Proto) recover(rerr *error) {
|
||||
if *rerr != nil {
|
||||
return
|
||||
}
|
||||
|
||||
x := recover()
|
||||
if x == nil {
|
||||
return
|
||||
}
|
||||
switch e := x.(type) {
|
||||
case Error:
|
||||
*rerr = e
|
||||
default:
|
||||
panic(x)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Proto) xerrorf(format string, args ...any) {
|
||||
func (c *Conn) xerrorf(format string, args ...any) {
|
||||
panic(Error{fmt.Errorf(format, args...)})
|
||||
}
|
||||
|
||||
func (p *Proto) xcheckf(err error, format string, args ...any) {
|
||||
func (c *Conn) xcheckf(err error, format string, args ...any) {
|
||||
if err != nil {
|
||||
p.xerrorf("%s: %w", fmt.Sprintf(format, args...), err)
|
||||
c.xerrorf("%s: %w", fmt.Sprintf(format, args...), err)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Proto) xcheck(err error) {
|
||||
func (c *Conn) xcheck(err error) {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// xresponse sets resp if err is a Response and resp is not nil.
|
||||
func (p *Proto) xresponse(err error, resp *Response) {
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
if r, ok := err.(Response); ok && resp != nil {
|
||||
*resp = r
|
||||
}
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Write writes directly to underlying connection (TCP, TLS). For internal use
|
||||
// only, to implement io.Writer. Write errors do take the connection's panic mode
|
||||
// into account, i.e. Write can panic.
|
||||
func (p *Proto) Write(buf []byte) (n int, rerr error) {
|
||||
defer p.recover(&rerr)
|
||||
|
||||
n, rerr = p.conn.Write(buf)
|
||||
if rerr != nil {
|
||||
p.connBroken = true
|
||||
}
|
||||
p.xcheckf(rerr, "write")
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Read reads directly from the underlying connection (TCP, TLS). For internal use
|
||||
// only, to implement io.Reader.
|
||||
func (p *Proto) Read(buf []byte) (n int, err error) {
|
||||
return p.conn.Read(buf)
|
||||
}
|
||||
|
||||
func (p *Proto) xflush() {
|
||||
// Not writing any more when connection is broken.
|
||||
if p.connBroken {
|
||||
return
|
||||
}
|
||||
|
||||
err := p.xbw.Flush()
|
||||
p.xcheckf(err, "flush")
|
||||
|
||||
// If compression is active, we need to flush the deflate stream.
|
||||
if p.compress {
|
||||
err := p.xflateWriter.Flush()
|
||||
p.xcheckf(err, "flush deflate")
|
||||
err = p.xflateBW.Flush()
|
||||
p.xcheckf(err, "flush deflate buffer")
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Proto) xtraceread(level slog.Level) func() {
|
||||
if p.tr == nil {
|
||||
// For ParseUntagged and other parse functions.
|
||||
return func() {}
|
||||
}
|
||||
p.tr.SetTrace(level)
|
||||
return func() {
|
||||
p.tr.SetTrace(mlog.LevelTrace)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Proto) xtracewrite(level slog.Level) func() {
|
||||
if p.xtw == nil {
|
||||
// For ParseUntagged and other parse functions.
|
||||
return func() {}
|
||||
}
|
||||
|
||||
p.xflush()
|
||||
p.xtw.SetTrace(level)
|
||||
return func() {
|
||||
p.xflush()
|
||||
p.xtw.SetTrace(mlog.LevelTrace)
|
||||
}
|
||||
}
|
||||
|
||||
// Close closes the connection, flushing and closing any compression and TLS layer.
|
||||
//
|
||||
// You may want to call Logout first. Closing a connection with a mailbox with
|
||||
// deleted messages not yet expunged will not expunge those messages.
|
||||
//
|
||||
// Closing a TLS connection that is logged out, or closing a TLS connection with
|
||||
// compression enabled (i.e. two layered streams), may cause spurious errors
|
||||
// because the server may immediate close the underlying connection when it sees
|
||||
// the connection is being closed.
|
||||
func (c *Conn) Close() (rerr error) {
|
||||
defer c.recoverErr(&rerr)
|
||||
|
||||
if c.conn == nil {
|
||||
return nil
|
||||
}
|
||||
if !c.connBroken && c.xflateWriter != nil {
|
||||
err := c.xflateWriter.Close()
|
||||
c.xcheckf(err, "close deflate writer")
|
||||
err = c.xflateBW.Flush()
|
||||
c.xcheckf(err, "flush deflate buffer")
|
||||
c.xflateWriter = nil
|
||||
c.xflateBW = nil
|
||||
}
|
||||
err := c.conn.Close()
|
||||
c.xcheckf(err, "close connection")
|
||||
c.conn = nil
|
||||
return
|
||||
}
|
||||
|
||||
// TLSConnectionState returns the TLS connection state if the connection uses TLS,
|
||||
// either because the conn passed to [New] was a TLS connection, or because
|
||||
// [Conn.StartTLS] was called.
|
||||
// TLSConnectionState returns the TLS connection state if the connection uses TLS.
|
||||
func (c *Conn) TLSConnectionState() *tls.ConnectionState {
|
||||
if conn, ok := c.conn.(*tls.Conn); ok {
|
||||
cs := conn.ConnectionState()
|
||||
@ -366,266 +127,177 @@ func (c *Conn) TLSConnectionState() *tls.ConnectionState {
|
||||
return nil
|
||||
}
|
||||
|
||||
// WriteCommandf writes a free-form IMAP command to the server. An ending \r\n is
|
||||
// written too.
|
||||
//
|
||||
// Commandf writes a free-form IMAP command to the server.
|
||||
// If tag is empty, a next unique tag is assigned.
|
||||
func (p *Proto) WriteCommandf(tag string, format string, args ...any) (rerr error) {
|
||||
defer p.recover(&rerr)
|
||||
func (c *Conn) Commandf(tag string, format string, args ...any) (rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
|
||||
if tag == "" {
|
||||
p.nextTag()
|
||||
} else {
|
||||
p.lastTag = tag
|
||||
tag = c.nextTag()
|
||||
}
|
||||
c.LastTag = tag
|
||||
|
||||
fmt.Fprintf(p.xbw, "%s %s\r\n", p.lastTag, fmt.Sprintf(format, args...))
|
||||
p.xflush()
|
||||
_, err := fmt.Fprintf(c.conn, "%s %s\r\n", tag, fmt.Sprintf(format, args...))
|
||||
c.xcheckf(err, "write command")
|
||||
return
|
||||
}
|
||||
|
||||
func (p *Proto) nextTag() string {
|
||||
p.tagGen++
|
||||
p.lastTag = fmt.Sprintf("x%03d", p.tagGen)
|
||||
return p.lastTag
|
||||
func (c *Conn) nextTag() string {
|
||||
c.tagGen++
|
||||
return fmt.Sprintf("x%03d", c.tagGen)
|
||||
}
|
||||
|
||||
// LastTag returns the tag last used for a command. For checking against a command
|
||||
// completion result.
|
||||
func (p *Proto) LastTag() string {
|
||||
return p.lastTag
|
||||
}
|
||||
|
||||
// LastTagSet sets a new last tag, as used for checking against a command completion result.
|
||||
func (p *Proto) LastTagSet(tag string) {
|
||||
p.lastTag = tag
|
||||
}
|
||||
|
||||
// ReadResponse reads from the IMAP server until a tagged response line is found.
|
||||
// Response reads from the IMAP server until a tagged response line is found.
|
||||
// The tag must be the same as the tag for the last written command.
|
||||
//
|
||||
// If an error is returned, resp can still be non-empty, and a caller may wish to
|
||||
// process resp.Untagged.
|
||||
//
|
||||
// Caller should check resp.Status for the result of the command too.
|
||||
//
|
||||
// Common types for the return error:
|
||||
// - Error, for protocol errors
|
||||
// - Various I/O errors from the underlying connection, including os.ErrDeadlineExceeded
|
||||
func (p *Proto) ReadResponse() (resp Response, rerr error) {
|
||||
defer p.recover(&rerr)
|
||||
// Result holds the status of the command. The caller must check if this the status is OK.
|
||||
func (c *Conn) Response() (untagged []Untagged, result Result, rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
|
||||
for {
|
||||
tag := p.xnonspace()
|
||||
p.xspace()
|
||||
tag := c.xnonspace()
|
||||
c.xspace()
|
||||
if tag == "*" {
|
||||
resp.Untagged = append(resp.Untagged, p.xuntagged())
|
||||
untagged = append(untagged, c.xuntagged())
|
||||
continue
|
||||
}
|
||||
|
||||
if tag != p.lastTag {
|
||||
p.xerrorf("got tag %q, expected %q", tag, p.lastTag)
|
||||
if tag != c.LastTag {
|
||||
c.xerrorf("got tag %q, expected %q", tag, c.LastTag)
|
||||
}
|
||||
|
||||
status := p.xstatus()
|
||||
p.xspace()
|
||||
resp.Result = p.xresult(status)
|
||||
p.xcrlf()
|
||||
status := c.xstatus()
|
||||
c.xspace()
|
||||
result = c.xresult(status)
|
||||
c.xcrlf()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// ParseCode parses a response code. The string must not have enclosing brackets.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// "APPENDUID 123 10"
|
||||
func ParseCode(s string) (code Code, rerr error) {
|
||||
p := Proto{br: bufio.NewReader(strings.NewReader(s + "]"))}
|
||||
defer p.recover(&rerr)
|
||||
code = p.xrespCode()
|
||||
p.xtake("]")
|
||||
buf, err := io.ReadAll(p.br)
|
||||
p.xcheckf(err, "read")
|
||||
if len(buf) != 0 {
|
||||
p.xerrorf("leftover data %q", buf)
|
||||
}
|
||||
return code, nil
|
||||
}
|
||||
|
||||
// ParseResult parses a line, including required crlf, as a command result line.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// "tag1 OK [APPENDUID 123 10] message added\r\n"
|
||||
func ParseResult(s string) (tag string, result Result, rerr error) {
|
||||
p := Proto{br: bufio.NewReader(strings.NewReader(s))}
|
||||
defer p.recover(&rerr)
|
||||
tag = p.xnonspace()
|
||||
p.xspace()
|
||||
status := p.xstatus()
|
||||
p.xspace()
|
||||
result = p.xresult(status)
|
||||
p.xcrlf()
|
||||
return
|
||||
}
|
||||
|
||||
// ReadUntagged reads a single untagged response line.
|
||||
func (p *Proto) ReadUntagged() (untagged Untagged, rerr error) {
|
||||
defer p.recover(&rerr)
|
||||
return p.readUntagged()
|
||||
}
|
||||
// Useful for reading lines from IDLE.
|
||||
func (c *Conn) ReadUntagged() (untagged Untagged, rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
|
||||
// ParseUntagged parses a line, including required crlf, as untagged response.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// "* BYE shutting down connection\r\n"
|
||||
func ParseUntagged(s string) (untagged Untagged, rerr error) {
|
||||
p := Proto{br: bufio.NewReader(strings.NewReader(s))}
|
||||
defer p.recover(&rerr)
|
||||
untagged, rerr = p.readUntagged()
|
||||
return
|
||||
}
|
||||
|
||||
func (p *Proto) readUntagged() (untagged Untagged, rerr error) {
|
||||
defer p.recover(&rerr)
|
||||
tag := p.xnonspace()
|
||||
tag := c.xnonspace()
|
||||
if tag != "*" {
|
||||
p.xerrorf("got tag %q, expected untagged", tag)
|
||||
c.xerrorf("got tag %q, expected untagged", tag)
|
||||
}
|
||||
p.xspace()
|
||||
ut := p.xuntagged()
|
||||
c.xspace()
|
||||
ut := c.xuntagged()
|
||||
return ut, nil
|
||||
}
|
||||
|
||||
// Readline reads a line, including CRLF.
|
||||
// Used with IDLE and synchronous literals.
|
||||
func (p *Proto) Readline() (line string, rerr error) {
|
||||
defer p.recover(&rerr)
|
||||
func (c *Conn) Readline() (line string, rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
|
||||
line, err := p.br.ReadString('\n')
|
||||
p.xcheckf(err, "read line")
|
||||
line, err := c.r.ReadString('\n')
|
||||
c.xcheckf(err, "read line")
|
||||
return line, nil
|
||||
}
|
||||
|
||||
func (c *Conn) readContinuation() (line string, rerr error) {
|
||||
defer c.recover(&rerr, nil)
|
||||
line, rerr = c.ReadContinuation()
|
||||
if rerr != nil {
|
||||
if resp, ok := rerr.(Response); ok {
|
||||
c.processUntagged(resp.Untagged)
|
||||
c.processResult(resp.Result)
|
||||
}
|
||||
// ReadContinuation reads a line. If it is a continuation, i.e. starts with a +, it
|
||||
// is returned without leading "+ " and without trailing crlf. Otherwise, a command
|
||||
// response is returned. A successfully read continuation can return an empty line.
|
||||
// Callers should check rerr and result.Status being empty to check if a
|
||||
// continuation was read.
|
||||
func (c *Conn) ReadContinuation() (line string, untagged []Untagged, result Result, rerr error) {
|
||||
if !c.peek('+') {
|
||||
untagged, result, rerr = c.Response()
|
||||
c.xcheckf(rerr, "reading non-continuation response")
|
||||
c.xerrorf("response status %q, expected OK", result.Status)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ReadContinuation reads a line. If it is a continuation, i.e. starts with "+", it
|
||||
// is returned without leading "+ " and without trailing crlf. Otherwise, an error
|
||||
// is returned, which can be a Response with Untagged that a caller may wish to
|
||||
// process. A successfully read continuation can return an empty line.
|
||||
func (p *Proto) ReadContinuation() (line string, rerr error) {
|
||||
defer p.recover(&rerr)
|
||||
|
||||
if !p.peek('+') {
|
||||
var resp Response
|
||||
resp, rerr = p.ReadResponse()
|
||||
if rerr == nil {
|
||||
rerr = resp
|
||||
}
|
||||
return "", rerr
|
||||
}
|
||||
p.xtake("+ ")
|
||||
line, err := p.Readline()
|
||||
p.xcheckf(err, "read line")
|
||||
c.xtake("+ ")
|
||||
line, err := c.Readline()
|
||||
c.xcheckf(err, "read line")
|
||||
line = strings.TrimSuffix(line, "\r\n")
|
||||
return
|
||||
}
|
||||
|
||||
// Writelinef writes the formatted format and args as a single line, adding CRLF.
|
||||
// Used with IDLE and synchronous literals.
|
||||
func (p *Proto) Writelinef(format string, args ...any) (rerr error) {
|
||||
defer p.recover(&rerr)
|
||||
func (c *Conn) Writelinef(format string, args ...any) (rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
|
||||
s := fmt.Sprintf(format, args...)
|
||||
fmt.Fprintf(p.xbw, "%s\r\n", s)
|
||||
p.xflush()
|
||||
_, err := fmt.Fprintf(c.conn, "%s\r\n", s)
|
||||
c.xcheckf(err, "writeline")
|
||||
return nil
|
||||
}
|
||||
|
||||
// WriteSyncLiteral first writes the synchronous literal size, then reads the
|
||||
// continuation "+" and finally writes the data. If the literal is not accepted, an
|
||||
// error is returned, which may be a Response.
|
||||
func (p *Proto) WriteSyncLiteral(s string) (rerr error) {
|
||||
defer p.recover(&rerr)
|
||||
// Write writes directly to the connection. Write errors do take the connections
|
||||
// panic mode into account, i.e. Write can panic.
|
||||
func (c *Conn) Write(buf []byte) (n int, rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
|
||||
fmt.Fprintf(p.xbw, "{%d}\r\n", len(s))
|
||||
p.xflush()
|
||||
|
||||
plus, err := p.br.Peek(1)
|
||||
p.xcheckf(err, "read continuation")
|
||||
if plus[0] == '+' {
|
||||
_, err = p.Readline()
|
||||
p.xcheckf(err, "read continuation line")
|
||||
|
||||
defer p.xtracewrite(mlog.LevelTracedata)()
|
||||
_, err = p.xbw.Write([]byte(s))
|
||||
p.xcheckf(err, "write literal data")
|
||||
p.xtracewrite(mlog.LevelTrace)
|
||||
return nil
|
||||
}
|
||||
var resp Response
|
||||
resp, rerr = p.ReadResponse()
|
||||
if rerr == nil {
|
||||
rerr = resp
|
||||
}
|
||||
return
|
||||
n, rerr = c.conn.Write(buf)
|
||||
c.xcheckf(rerr, "write")
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (c *Conn) processUntagged(l []Untagged) {
|
||||
for _, ut := range l {
|
||||
switch e := ut.(type) {
|
||||
case UntaggedCapability:
|
||||
c.CapAvailable = []Capability(e)
|
||||
case UntaggedEnabled:
|
||||
c.CapEnabled = append(c.CapEnabled, e...)
|
||||
}
|
||||
// WriteSyncLiteral first writes the synchronous literal size, then read the
|
||||
// continuation "+" and finally writes the data.
|
||||
func (c *Conn) WriteSyncLiteral(s string) (rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
|
||||
_, err := fmt.Fprintf(c.conn, "{%d}\r\n", len(s))
|
||||
c.xcheckf(err, "write sync literal size")
|
||||
line, err := c.Readline()
|
||||
c.xcheckf(err, "read line")
|
||||
if !strings.HasPrefix(line, "+") {
|
||||
c.xerrorf("no continuation received for sync literal")
|
||||
}
|
||||
_, err = c.conn.Write([]byte(s))
|
||||
c.xcheckf(err, "write literal data")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Conn) processResult(r Result) {
|
||||
if r.Code == nil {
|
||||
return
|
||||
}
|
||||
switch e := r.Code.(type) {
|
||||
case CodeCapability:
|
||||
c.CapAvailable = []Capability(e)
|
||||
}
|
||||
}
|
||||
|
||||
// transactf writes format and args as an IMAP command, using Commandf with an
|
||||
// Transactf writes format and args as an IMAP command, using Commandf with an
|
||||
// empty tag. I.e. format must not contain a tag. Transactf then reads a response
|
||||
// using ReadResponse and checks the result status is OK.
|
||||
func (c *Conn) transactf(format string, args ...any) (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
func (c *Conn) Transactf(format string, args ...any) (untagged []Untagged, result Result, rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
|
||||
err := c.WriteCommandf("", format, args...)
|
||||
err := c.Commandf("", format, args...)
|
||||
if err != nil {
|
||||
return Response{}, err
|
||||
return nil, Result{}, err
|
||||
}
|
||||
|
||||
return c.responseOK()
|
||||
return c.ResponseOK()
|
||||
}
|
||||
|
||||
func (c *Conn) responseOK() (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
|
||||
resp, rerr = c.ReadResponse()
|
||||
c.processUntagged(resp.Untagged)
|
||||
c.processResult(resp.Result)
|
||||
if rerr == nil && resp.Status != OK {
|
||||
rerr = resp
|
||||
func (c *Conn) ResponseOK() (untagged []Untagged, result Result, rerr error) {
|
||||
untagged, result, rerr = c.Response()
|
||||
if rerr != nil {
|
||||
return nil, Result{}, rerr
|
||||
}
|
||||
return
|
||||
if result.Status != OK {
|
||||
c.xerrorf("response status %q, expected OK", result.Status)
|
||||
}
|
||||
return untagged, result, rerr
|
||||
}
|
||||
|
||||
func (c *Conn) xgetUntagged(l []Untagged, dst any) {
|
||||
if len(l) != 1 {
|
||||
c.xerrorf("got %d untagged, expected 1: %v", len(l), l)
|
||||
}
|
||||
got := l[0]
|
||||
gotv := reflect.ValueOf(got)
|
||||
dstv := reflect.ValueOf(dst)
|
||||
if gotv.Type() != dstv.Type().Elem() {
|
||||
c.xerrorf("got %v, expected %v", gotv.Type(), dstv.Type().Elem())
|
||||
}
|
||||
dstv.Elem().Set(gotv)
|
||||
}
|
||||
|
||||
// Close closes the connection without writing anything to the server.
|
||||
// You may want to call Logout. Closing a connection with a mailbox with deleted
|
||||
// message not yet expunged will not expunge those messages.
|
||||
func (c *Conn) Close() error {
|
||||
var err error
|
||||
if c.conn != nil {
|
||||
err = c.conn.Close()
|
||||
c.conn = nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
@ -6,121 +6,73 @@ import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/mjl-/flate"
|
||||
|
||||
"github.com/mjl-/mox/mlog"
|
||||
"github.com/mjl-/mox/moxio"
|
||||
"github.com/mjl-/mox/scram"
|
||||
)
|
||||
|
||||
// Capability writes the IMAP4 "CAPABILITY" command, requesting a list of
|
||||
// capabilities from the server. They are returned in an UntaggedCapability
|
||||
// response. The server also sends capabilities in initial server greeting, in the
|
||||
// response code.
|
||||
func (c *Conn) Capability() (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
return c.transactf("capability")
|
||||
// Capability requests a list of capabilities from the server. They are returned in
|
||||
// an UntaggedCapability response. The server also sends capabilities in initial
|
||||
// server greeting, in the response code.
|
||||
func (c *Conn) Capability() (untagged []Untagged, result Result, rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
return c.Transactf("capability")
|
||||
}
|
||||
|
||||
// Noop writes the IMAP4 "NOOP" command, which does nothing on its own, but a
|
||||
// server will return any pending untagged responses for new message delivery and
|
||||
// changes to mailboxes.
|
||||
func (c *Conn) Noop() (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
return c.transactf("noop")
|
||||
// Noop does nothing on its own, but a server will return any pending untagged
|
||||
// responses for new message delivery and changes to mailboxes.
|
||||
func (c *Conn) Noop() (untagged []Untagged, result Result, rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
return c.Transactf("noop")
|
||||
}
|
||||
|
||||
// Logout ends the IMAP4 session by writing an IMAP "LOGOUT" command. [Conn.Close]
|
||||
// must still be called on this client to close the socket.
|
||||
func (c *Conn) Logout() (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
return c.transactf("logout")
|
||||
// Logout ends the IMAP session by writing a LOGOUT command. Close must still be
|
||||
// called on this client to close the socket.
|
||||
func (c *Conn) Logout() (untagged []Untagged, result Result, rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
return c.Transactf("logout")
|
||||
}
|
||||
|
||||
// StartTLS enables TLS on the connection with the IMAP4 "STARTTLS" command.
|
||||
func (c *Conn) StartTLS(config *tls.Config) (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
resp, rerr = c.transactf("starttls")
|
||||
// Starttls enables TLS on the connection with the STARTTLS command.
|
||||
func (c *Conn) Starttls(config *tls.Config) (untagged []Untagged, result Result, rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
untagged, result, rerr = c.Transactf("starttls")
|
||||
c.xcheckf(rerr, "starttls command")
|
||||
|
||||
conn := c.xprefixConn()
|
||||
tlsConn := tls.Client(conn, config)
|
||||
err := tlsConn.Handshake()
|
||||
conn := tls.Client(c.conn, config)
|
||||
err := conn.Handshake()
|
||||
c.xcheckf(err, "tls handshake")
|
||||
c.conn = tlsConn
|
||||
c.conn = conn
|
||||
c.r = bufio.NewReader(conn)
|
||||
return untagged, result, nil
|
||||
}
|
||||
|
||||
// Login authenticates with username and password
|
||||
func (c *Conn) Login(username, password string) (untagged []Untagged, result Result, rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
return c.Transactf("login %s %s", astring(username), astring(password))
|
||||
}
|
||||
|
||||
// Authenticate with plaintext password using AUTHENTICATE PLAIN.
|
||||
func (c *Conn) AuthenticatePlain(username, password string) (untagged []Untagged, result Result, rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
|
||||
untagged, result, rerr = c.Transactf("authenticate plain %s", base64.StdEncoding.EncodeToString(fmt.Appendf(nil, "\u0000%s\u0000%s", username, password)))
|
||||
return
|
||||
}
|
||||
|
||||
// Login authenticates using the IMAP4 "LOGIN" command, sending the plain text
|
||||
// password to the server.
|
||||
//
|
||||
// Authentication is not allowed while the "LOGINDISABLED" capability is announced.
|
||||
// Call [Conn.StartTLS] first.
|
||||
//
|
||||
// See [Conn.AuthenticateSCRAM] for a better authentication mechanism.
|
||||
func (c *Conn) Login(username, password string) (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
|
||||
fmt.Fprintf(c.xbw, "%s login %s ", c.nextTag(), astring(username))
|
||||
defer c.xtracewrite(mlog.LevelTraceauth)()
|
||||
fmt.Fprintf(c.xbw, "%s\r\n", astring(password))
|
||||
c.xtracewrite(mlog.LevelTrace) // Restore.
|
||||
return c.responseOK()
|
||||
}
|
||||
|
||||
// AuthenticatePlain executes the AUTHENTICATE command with SASL mechanism "PLAIN",
|
||||
// sending the password in plain text password to the server.
|
||||
//
|
||||
// Required capability: "AUTH=PLAIN"
|
||||
//
|
||||
// Authentication is not allowed while the "LOGINDISABLED" capability is announced.
|
||||
// Call [Conn.StartTLS] first.
|
||||
//
|
||||
// See [Conn.AuthenticateSCRAM] for a better authentication mechanism.
|
||||
func (c *Conn) AuthenticatePlain(username, password string) (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
|
||||
err := c.WriteCommandf("", "authenticate plain")
|
||||
c.xcheckf(err, "writing authenticate command")
|
||||
_, rerr = c.readContinuation()
|
||||
c.xresponse(rerr, &resp)
|
||||
|
||||
defer c.xtracewrite(mlog.LevelTraceauth)()
|
||||
xw := base64.NewEncoder(base64.StdEncoding, c.xbw)
|
||||
fmt.Fprintf(xw, "\u0000%s\u0000%s", username, password)
|
||||
xw.Close()
|
||||
c.xtracewrite(mlog.LevelTrace) // Restore.
|
||||
fmt.Fprintf(c.xbw, "\r\n")
|
||||
c.xflush()
|
||||
return c.responseOK()
|
||||
}
|
||||
|
||||
// todo: implement cram-md5, write its credentials as traceauth.
|
||||
|
||||
// AuthenticateSCRAM executes the IMAP4 "AUTHENTICATE" command with one of the
|
||||
// following SASL mechanisms: SCRAM-SHA-256(-PLUS) or SCRAM-SHA-1(-PLUS).//
|
||||
//
|
||||
// With SCRAM, the password is not sent to the server in plain text, but only
|
||||
// derived hashes are exchanged by both parties as proof of knowledge of password.
|
||||
//
|
||||
// Authentication is not allowed while the "LOGINDISABLED" capability is announced.
|
||||
// Call [Conn.StartTLS] first.
|
||||
//
|
||||
// Required capability: SCRAM-SHA-256-PLUS, SCRAM-SHA-256, SCRAM-SHA-1-PLUS,
|
||||
// SCRAM-SHA-1.
|
||||
// Authenticate with SCRAM-SHA-256(-PLUS) or SCRAM-SHA-1(-PLUS). With SCRAM, the
|
||||
// password is not exchanged in plaintext form, but only derived hashes are
|
||||
// exchanged by both parties as proof of knowledge of password.
|
||||
//
|
||||
// The PLUS variants bind the authentication exchange to the TLS connection,
|
||||
// detecting MitM attacks.
|
||||
func (c *Conn) AuthenticateSCRAM(mechanism string, h func() hash.Hash, username, password string) (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
func (c *Conn) AuthenticateSCRAM(method string, h func() hash.Hash, username, password string) (untagged []Untagged, result Result, rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
|
||||
var cs *tls.ConnectionState
|
||||
lmech := strings.ToLower(mechanism)
|
||||
if strings.HasSuffix(lmech, "-plus") {
|
||||
lmethod := strings.ToLower(method)
|
||||
if strings.HasSuffix(lmethod, "-plus") {
|
||||
tlsConn, ok := c.conn.(*tls.Conn)
|
||||
if !ok {
|
||||
c.xerrorf("cannot use scram plus without tls")
|
||||
@ -131,14 +83,17 @@ func (c *Conn) AuthenticateSCRAM(mechanism string, h func() hash.Hash, username,
|
||||
sc := scram.NewClient(h, username, "", false, cs)
|
||||
clientFirst, err := sc.ClientFirst()
|
||||
c.xcheckf(err, "scram clientFirst")
|
||||
// todo: only send clientFirst if server has announced SASL-IR
|
||||
err = c.Writelinef("%s authenticate %s %s", c.nextTag(), mechanism, base64.StdEncoding.EncodeToString([]byte(clientFirst)))
|
||||
c.LastTag = c.nextTag()
|
||||
err = c.Writelinef("%s authenticate %s %s", c.LastTag, method, base64.StdEncoding.EncodeToString([]byte(clientFirst)))
|
||||
c.xcheckf(err, "writing command line")
|
||||
|
||||
xreadContinuation := func() []byte {
|
||||
var line string
|
||||
line, rerr = c.readContinuation()
|
||||
c.xresponse(rerr, &resp)
|
||||
line, untagged, result, rerr = c.ReadContinuation()
|
||||
c.xcheckf(err, "read continuation")
|
||||
if result.Status != "" {
|
||||
c.xerrorf("unexpected status %q", result.Status)
|
||||
}
|
||||
buf, err := base64.StdEncoding.DecodeString(line)
|
||||
c.xcheckf(err, "parsing base64 from remote")
|
||||
return buf
|
||||
@ -158,131 +113,83 @@ func (c *Conn) AuthenticateSCRAM(mechanism string, h func() hash.Hash, username,
|
||||
err = c.Writelinef("%s", base64.StdEncoding.EncodeToString(nil))
|
||||
c.xcheckf(err, "scram client end")
|
||||
|
||||
return c.responseOK()
|
||||
return c.ResponseOK()
|
||||
}
|
||||
|
||||
// CompressDeflate enables compression with deflate on the connection by executing
|
||||
// the IMAP4 "COMPRESS=DEFAULT" command.
|
||||
//
|
||||
// Required capability: "COMPRESS=DEFLATE".
|
||||
//
|
||||
// State: Authenticated or selected.
|
||||
func (c *Conn) CompressDeflate() (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
// Enable enables capabilities for use with the connection, verifying the server has indeed enabled them.
|
||||
func (c *Conn) Enable(capabilities ...string) (untagged []Untagged, result Result, rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
|
||||
resp, rerr = c.transactf("compress deflate")
|
||||
untagged, result, rerr = c.Transactf("enable %s", strings.Join(capabilities, " "))
|
||||
c.xcheck(rerr)
|
||||
|
||||
c.xflateBW = bufio.NewWriter(c)
|
||||
fw0, err := flate.NewWriter(c.xflateBW, flate.DefaultCompression)
|
||||
c.xcheckf(err, "deflate") // Cannot happen.
|
||||
fw := moxio.NewFlateWriter(fw0)
|
||||
|
||||
c.compress = true
|
||||
c.xflateWriter = fw
|
||||
c.xtw = moxio.NewTraceWriter(mlog.New("imapclient", nil), "CW: ", fw)
|
||||
c.xbw = bufio.NewWriter(c.xtw)
|
||||
|
||||
rc := c.xprefixConn()
|
||||
fr := flate.NewReaderPartial(rc)
|
||||
c.tr = moxio.NewTraceReader(mlog.New("imapclient", nil), "CR: ", fr)
|
||||
c.br = bufio.NewReader(c.tr)
|
||||
|
||||
var enabled UntaggedEnabled
|
||||
c.xgetUntagged(untagged, &enabled)
|
||||
got := map[string]struct{}{}
|
||||
for _, cap := range enabled {
|
||||
got[cap] = struct{}{}
|
||||
}
|
||||
for _, cap := range capabilities {
|
||||
if _, ok := got[cap]; !ok {
|
||||
c.xerrorf("capability %q not enabled by server", cap)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Enable enables capabilities for use with the connection by executing the IMAP4 "ENABLE" command.
|
||||
//
|
||||
// Required capability: "ENABLE" or "IMAP4rev2"
|
||||
func (c *Conn) Enable(capabilities ...Capability) (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
|
||||
var caps strings.Builder
|
||||
for _, c := range capabilities {
|
||||
caps.WriteString(" " + string(c))
|
||||
}
|
||||
return c.transactf("enable%s", caps.String())
|
||||
// Select opens mailbox as active mailbox.
|
||||
func (c *Conn) Select(mailbox string) (untagged []Untagged, result Result, rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
return c.Transactf("select %s", astring(mailbox))
|
||||
}
|
||||
|
||||
// Select opens the mailbox with the IMAP4 "SELECT" command.
|
||||
//
|
||||
// If a mailbox is selected/active, it is automatically deselected before
|
||||
// selecting the mailbox, without permanently removing ("expunging") messages
|
||||
// marked \Deleted.
|
||||
//
|
||||
// If the mailbox cannot be opened, the connection is left in Authenticated state,
|
||||
// not Selected.
|
||||
func (c *Conn) Select(mailbox string) (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
return c.transactf("select %s", astring(mailbox))
|
||||
// Examine opens mailbox as active mailbox read-only.
|
||||
func (c *Conn) Examine(mailbox string) (untagged []Untagged, result Result, rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
return c.Transactf("examine %s", astring(mailbox))
|
||||
}
|
||||
|
||||
// Examine opens the mailbox like [Conn.Select], but read-only, with the IMAP4
|
||||
// "EXAMINE" command.
|
||||
func (c *Conn) Examine(mailbox string) (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
return c.transactf("examine %s", astring(mailbox))
|
||||
// Create makes a new mailbox on the server.
|
||||
func (c *Conn) Create(mailbox string) (untagged []Untagged, result Result, rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
return c.Transactf("create %s", astring(mailbox))
|
||||
}
|
||||
|
||||
// Create makes a new mailbox on the server using the IMAP4 "CREATE" command.
|
||||
//
|
||||
// SpecialUse can only be used on servers that announced the "CREATE-SPECIAL-USE"
|
||||
// capability. Specify flags like \Archive, \Drafts, \Junk, \Sent, \Trash, \All.
|
||||
func (c *Conn) Create(mailbox string, specialUse []string) (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
var useStr string
|
||||
if len(specialUse) > 0 {
|
||||
useStr = fmt.Sprintf(" USE (%s)", strings.Join(specialUse, " "))
|
||||
}
|
||||
return c.transactf("create %s%s", astring(mailbox), useStr)
|
||||
// Delete removes an entire mailbox and its messages.
|
||||
func (c *Conn) Delete(mailbox string) (untagged []Untagged, result Result, rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
return c.Transactf("delete %s", astring(mailbox))
|
||||
}
|
||||
|
||||
// Delete removes an entire mailbox and its messages using the IMAP4 "DELETE"
|
||||
// command.
|
||||
func (c *Conn) Delete(mailbox string) (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
return c.transactf("delete %s", astring(mailbox))
|
||||
// Rename changes the name of a mailbox and all its child mailboxes.
|
||||
func (c *Conn) Rename(omailbox, nmailbox string) (untagged []Untagged, result Result, rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
return c.Transactf("rename %s %s", astring(omailbox), astring(nmailbox))
|
||||
}
|
||||
|
||||
// Rename changes the name of a mailbox and all its child mailboxes
|
||||
// using the IMAP4 "RENAME" command.
|
||||
func (c *Conn) Rename(omailbox, nmailbox string) (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
return c.transactf("rename %s %s", astring(omailbox), astring(nmailbox))
|
||||
// Subscribe marks a mailbox as subscribed. The mailbox does not have to exist. It
|
||||
// is not an error if the mailbox is already subscribed.
|
||||
func (c *Conn) Subscribe(mailbox string) (untagged []Untagged, result Result, rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
return c.Transactf("subscribe %s", astring(mailbox))
|
||||
}
|
||||
|
||||
// Subscribe marks a mailbox as subscribed using the IMAP4 "SUBSCRIBE" command.
|
||||
//
|
||||
// The mailbox does not have to exist. It is not an error if the mailbox is already
|
||||
// subscribed.
|
||||
func (c *Conn) Subscribe(mailbox string) (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
return c.transactf("subscribe %s", astring(mailbox))
|
||||
// Unsubscribe marks a mailbox as unsubscribed.
|
||||
func (c *Conn) Unsubscribe(mailbox string) (untagged []Untagged, result Result, rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
return c.Transactf("unsubscribe %s", astring(mailbox))
|
||||
}
|
||||
|
||||
// Unsubscribe marks a mailbox as unsubscribed using the IMAP4 "UNSUBSCRIBE"
|
||||
// command.
|
||||
func (c *Conn) Unsubscribe(mailbox string) (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
return c.transactf("unsubscribe %s", astring(mailbox))
|
||||
}
|
||||
|
||||
// List lists mailboxes using the IMAP4 "LIST" command with the basic LIST syntax.
|
||||
// List lists mailboxes with the basic LIST syntax.
|
||||
// Pattern can contain * (match any) or % (match any except hierarchy delimiter).
|
||||
func (c *Conn) List(pattern string) (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
return c.transactf(`list "" %s`, astring(pattern))
|
||||
func (c *Conn) List(pattern string) (untagged []Untagged, result Result, rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
return c.Transactf(`list "" %s`, astring(pattern))
|
||||
}
|
||||
|
||||
// ListFull lists mailboxes using the LIST command with the extended LIST
|
||||
// syntax requesting all supported data.
|
||||
//
|
||||
// Required capability: "LIST-EXTENDED". If "IMAP4rev2" is announced, the command
|
||||
// is also available but only with a single pattern.
|
||||
//
|
||||
// ListFull lists mailboxes with the extended LIST syntax requesting all supported data.
|
||||
// Pattern can contain * (match any) or % (match any except hierarchy delimiter).
|
||||
func (c *Conn) ListFull(subscribedOnly bool, patterns ...string) (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
func (c *Conn) ListFull(subscribedOnly bool, patterns ...string) (untagged []Untagged, result Result, rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
var subscribedStr string
|
||||
if subscribedOnly {
|
||||
subscribedStr = "subscribed recursivematch"
|
||||
@ -290,313 +197,110 @@ func (c *Conn) ListFull(subscribedOnly bool, patterns ...string) (resp Response,
|
||||
for i, s := range patterns {
|
||||
patterns[i] = astring(s)
|
||||
}
|
||||
return c.transactf(`list (%s) "" (%s) return (subscribed children special-use status (messages uidnext uidvalidity unseen deleted size recent appendlimit))`, subscribedStr, strings.Join(patterns, " "))
|
||||
return c.Transactf(`list (%s) "" (%s) return (subscribed children special-use status (messages uidnext uidvalidity unseen deleted size recent appendlimit))`, subscribedStr, strings.Join(patterns, " "))
|
||||
}
|
||||
|
||||
// Namespace requests the hiearchy separator using the IMAP4 "NAMESPACE" command.
|
||||
//
|
||||
// Required capability: "NAMESPACE" or "IMAP4rev2".
|
||||
//
|
||||
// Server will return an UntaggedNamespace response with personal/shared/other
|
||||
// namespaces if present.
|
||||
func (c *Conn) Namespace() (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
return c.transactf("namespace")
|
||||
// Namespace returns the hiearchy separator in an UntaggedNamespace response with personal/shared/other namespaces if present.
|
||||
func (c *Conn) Namespace() (untagged []Untagged, result Result, rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
return c.Transactf("namespace")
|
||||
}
|
||||
|
||||
// Status requests information about a mailbox using the IMAP4 "STATUS" command. For
|
||||
// example, number of messages, size, etc. At least one attribute required.
|
||||
func (c *Conn) Status(mailbox string, attrs ...StatusAttr) (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
l := make([]string, len(attrs))
|
||||
for i, a := range attrs {
|
||||
l[i] = string(a)
|
||||
// Status requests information about a mailbox, such as number of messages, size, etc.
|
||||
func (c *Conn) Status(mailbox string) (untagged []Untagged, result Result, rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
return c.Transactf("status %s", astring(mailbox))
|
||||
}
|
||||
|
||||
// Append adds message to mailbox with flags and optional receive time.
|
||||
func (c *Conn) Append(mailbox string, flags []string, received *time.Time, message []byte) (untagged []Untagged, result Result, rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
var date string
|
||||
if received != nil {
|
||||
date = ` "` + received.Format("_2-Jan-2006 15:04:05 -0700") + `"`
|
||||
}
|
||||
return c.transactf("status %s (%s)", astring(mailbox), strings.Join(l, " "))
|
||||
return c.Transactf("append %s (%s)%s {%d+}\r\n%s", astring(mailbox), strings.Join(flags, " "), date, len(message), message)
|
||||
}
|
||||
|
||||
// Append represents a parameter to the IMAP4 "APPEND" or "REPLACE" commands, for
|
||||
// adding a message to mailbox, or replacing a message with a new version in a
|
||||
// mailbox.
|
||||
type Append struct {
|
||||
Flags []string // Optional, flags for the new message.
|
||||
Received *time.Time // Optional, the INTERNALDATE field, typically time at which a message was received.
|
||||
Size int64
|
||||
Data io.Reader // Required, must return Size bytes.
|
||||
// note: No idle command. Idle is better implemented by writing the request and reading and handling the responses as they come in.
|
||||
|
||||
// CloseMailbox closes the currently selected/active mailbox, permanently removing
|
||||
// any messages marked with \Deleted.
|
||||
func (c *Conn) CloseMailbox() (untagged []Untagged, result Result, rerr error) {
|
||||
return c.Transactf("close")
|
||||
}
|
||||
|
||||
// Append adds message to mailbox with flags and optional receive time using the
|
||||
// IMAP4 "APPEND" command.
|
||||
func (c *Conn) Append(mailbox string, message Append) (resp Response, rerr error) {
|
||||
return c.MultiAppend(mailbox, message)
|
||||
// Unselect closes the currently selected/active mailbox, but unlike CloseMailbox
|
||||
// does not permanently remove any messages marked with \Deleted.
|
||||
func (c *Conn) Unselect() (untagged []Untagged, result Result, rerr error) {
|
||||
return c.Transactf("unselect")
|
||||
}
|
||||
|
||||
// MultiAppend atomatically adds multiple messages to the mailbox.
|
||||
//
|
||||
// Required capability: "MULTIAPPEND"
|
||||
func (c *Conn) MultiAppend(mailbox string, message Append, more ...Append) (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
|
||||
fmt.Fprintf(c.xbw, "%s append %s", c.nextTag(), astring(mailbox))
|
||||
|
||||
msgs := append([]Append{message}, more...)
|
||||
for _, m := range msgs {
|
||||
var date string
|
||||
if m.Received != nil {
|
||||
date = ` "` + m.Received.Format("_2-Jan-2006 15:04:05 -0700") + `"`
|
||||
}
|
||||
|
||||
// todo: use literal8 if needed, with "UTF8()" if required.
|
||||
// todo: for larger messages, use a synchronizing literal.
|
||||
|
||||
fmt.Fprintf(c.xbw, " (%s)%s {%d+}\r\n", strings.Join(m.Flags, " "), date, m.Size)
|
||||
defer c.xtracewrite(mlog.LevelTracedata)()
|
||||
_, err := io.Copy(c.xbw, m.Data)
|
||||
c.xcheckf(err, "write message data")
|
||||
c.xtracewrite(mlog.LevelTrace) // Restore
|
||||
}
|
||||
|
||||
fmt.Fprintf(c.xbw, "\r\n")
|
||||
c.xflush()
|
||||
return c.responseOK()
|
||||
// Expunge removes messages marked as deleted for the selected mailbox.
|
||||
func (c *Conn) Expunge() (untagged []Untagged, result Result, rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
return c.Transactf("expunge")
|
||||
}
|
||||
|
||||
// note: No Idle or Notify command. Idle/Notify is better implemented by
|
||||
// writing the request and reading and handling the responses as they come in.
|
||||
|
||||
// CloseMailbox closes the selected/active mailbox using the IMAP4 "CLOSE" command,
|
||||
// permanently removing ("expunging") any messages marked with \Deleted.
|
||||
//
|
||||
// See [Conn.Unselect] for closing a mailbox without permanently removing messages.
|
||||
func (c *Conn) CloseMailbox() (resp Response, rerr error) {
|
||||
return c.transactf("close")
|
||||
}
|
||||
|
||||
// Unselect closes the selected/active mailbox using the IMAP4 "UNSELECT" command,
|
||||
// but unlike MailboxClose does not permanently remove ("expunge") any messages
|
||||
// marked with \Deleted.
|
||||
//
|
||||
// Required capability: "UNSELECT" or "IMAP4rev2".
|
||||
//
|
||||
// If Unselect is not available, call [Conn.Select] with a non-existent mailbox for
|
||||
// the same effect: Deselecting a mailbox without permanently removing messages
|
||||
// marked \Deleted.
|
||||
func (c *Conn) Unselect() (resp Response, rerr error) {
|
||||
return c.transactf("unselect")
|
||||
}
|
||||
|
||||
// Expunge removes all messages marked as deleted for the selected mailbox using
|
||||
// the IMAP4 "EXPUNGE" command. If other sessions marked messages as deleted, even
|
||||
// if they aren't visible in the session, they are removed as well.
|
||||
//
|
||||
// UIDExpunge gives more control over which the messages that are removed.
|
||||
func (c *Conn) Expunge() (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
return c.transactf("expunge")
|
||||
}
|
||||
|
||||
// UIDExpunge is like expunge, but only removes messages matching UID set, using
|
||||
// the IMAP4 "UID EXPUNGE" command.
|
||||
//
|
||||
// Required capability: "UIDPLUS" or "IMAP4rev2".
|
||||
func (c *Conn) UIDExpunge(uidSet NumSet) (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
return c.transactf("uid expunge %s", uidSet.String())
|
||||
// UIDExpunge is like expunge, but only removes messages matching uidSet.
|
||||
func (c *Conn) UIDExpunge(uidSet NumSet) (untagged []Untagged, result Result, rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
return c.Transactf("uid expunge %s", uidSet.String())
|
||||
}
|
||||
|
||||
// Note: No search, fetch command yet due to its large syntax.
|
||||
|
||||
// MSNStoreFlagsSet stores a new set of flags for messages matching message
|
||||
// sequence numbers (MSNs) from sequence set with the IMAP4 "STORE" command.
|
||||
//
|
||||
// If silent, no untagged responses with the updated flags will be sent by the
|
||||
// server.
|
||||
//
|
||||
// Method [Conn.UIDStoreFlagsSet], which operates on a uid set, should be
|
||||
// preferred.
|
||||
func (c *Conn) MSNStoreFlagsSet(seqset string, silent bool, flags ...string) (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
// StoreFlagsSet stores a new set of flags for messages from seqset with the STORE command.
|
||||
// If silent, no untagged responses with the updated flags will be sent by the server.
|
||||
func (c *Conn) StoreFlagsSet(seqset string, silent bool, flags ...string) (untagged []Untagged, result Result, rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
item := "flags"
|
||||
if silent {
|
||||
item += ".silent"
|
||||
}
|
||||
return c.transactf("store %s %s (%s)", seqset, item, strings.Join(flags, " "))
|
||||
return c.Transactf("store %s %s (%s)", seqset, item, strings.Join(flags, " "))
|
||||
}
|
||||
|
||||
// MSNStoreFlagsAdd is like [Conn.MSNStoreFlagsSet], but only adds flags, leaving
|
||||
// current flags on the message intact.
|
||||
//
|
||||
// Method [Conn.UIDStoreFlagsAdd], which operates on a uid set, should be
|
||||
// preferred.
|
||||
func (c *Conn) MSNStoreFlagsAdd(seqset string, silent bool, flags ...string) (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
// StoreFlagsAdd is like StoreFlagsSet, but only adds flags, leaving current flags on the message intact.
|
||||
func (c *Conn) StoreFlagsAdd(seqset string, silent bool, flags ...string) (untagged []Untagged, result Result, rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
item := "+flags"
|
||||
if silent {
|
||||
item += ".silent"
|
||||
}
|
||||
return c.transactf("store %s %s (%s)", seqset, item, strings.Join(flags, " "))
|
||||
return c.Transactf("store %s %s (%s)", seqset, item, strings.Join(flags, " "))
|
||||
}
|
||||
|
||||
// MSNStoreFlagsClear is like [Conn.MSNStoreFlagsSet], but only removes flags,
|
||||
// leaving other flags on the message intact.
|
||||
//
|
||||
// Method [Conn.UIDStoreFlagsClear], which operates on a uid set, should be
|
||||
// preferred.
|
||||
func (c *Conn) MSNStoreFlagsClear(seqset string, silent bool, flags ...string) (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
// StoreFlagsClear is like StoreFlagsSet, but only removes flags, leaving other flags on the message intact.
|
||||
func (c *Conn) StoreFlagsClear(seqset string, silent bool, flags ...string) (untagged []Untagged, result Result, rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
item := "-flags"
|
||||
if silent {
|
||||
item += ".silent"
|
||||
}
|
||||
return c.transactf("store %s %s (%s)", seqset, item, strings.Join(flags, " "))
|
||||
return c.Transactf("store %s %s (%s)", seqset, item, strings.Join(flags, " "))
|
||||
}
|
||||
|
||||
// UIDStoreFlagsSet stores a new set of flags for messages matching UIDs from
|
||||
// uidSet with the IMAP4 "UID STORE" command.
|
||||
//
|
||||
// If silent, no untagged responses with the updated flags will be sent by the
|
||||
// server.
|
||||
//
|
||||
// Required capability: "UIDPLUS" or "IMAP4rev2".
|
||||
func (c *Conn) UIDStoreFlagsSet(uidSet string, silent bool, flags ...string) (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
item := "flags"
|
||||
if silent {
|
||||
item += ".silent"
|
||||
}
|
||||
return c.transactf("uid store %s %s (%s)", uidSet, item, strings.Join(flags, " "))
|
||||
// Copy adds messages from the sequences in seqSet in the currently selected/active mailbox to dstMailbox.
|
||||
func (c *Conn) Copy(seqSet NumSet, dstMailbox string) (untagged []Untagged, result Result, rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
return c.Transactf("copy %s %s", seqSet.String(), astring(dstMailbox))
|
||||
}
|
||||
|
||||
// UIDStoreFlagsAdd is like UIDStoreFlagsSet, but only adds flags, leaving
|
||||
// current flags on the message intact.
|
||||
//
|
||||
// Required capability: "UIDPLUS" or "IMAP4rev2".
|
||||
func (c *Conn) UIDStoreFlagsAdd(uidSet string, silent bool, flags ...string) (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
item := "+flags"
|
||||
if silent {
|
||||
item += ".silent"
|
||||
}
|
||||
return c.transactf("uid store %s %s (%s)", uidSet, item, strings.Join(flags, " "))
|
||||
// UIDCopy is like copy, but operates on UIDs.
|
||||
func (c *Conn) UIDCopy(uidSet NumSet, dstMailbox string) (untagged []Untagged, result Result, rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
return c.Transactf("uid copy %s %s", uidSet.String(), astring(dstMailbox))
|
||||
}
|
||||
|
||||
// UIDStoreFlagsClear is like UIDStoreFlagsSet, but only removes flags, leaving
|
||||
// other flags on the message intact.
|
||||
//
|
||||
// Required capability: "UIDPLUS" or "IMAP4rev2".
|
||||
func (c *Conn) UIDStoreFlagsClear(uidSet string, silent bool, flags ...string) (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
item := "-flags"
|
||||
if silent {
|
||||
item += ".silent"
|
||||
}
|
||||
return c.transactf("uid store %s %s (%s)", uidSet, item, strings.Join(flags, " "))
|
||||
// Move moves messages from the sequences in seqSet in the currently selected/active mailbox to dstMailbox.
|
||||
func (c *Conn) Move(seqSet NumSet, dstMailbox string) (untagged []Untagged, result Result, rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
return c.Transactf("move %s %s", seqSet.String(), astring(dstMailbox))
|
||||
}
|
||||
|
||||
// MSNCopy adds messages from the sequences in the sequence set in the
|
||||
// selected/active mailbox to destMailbox using the IMAP4 "COPY" command.
|
||||
//
|
||||
// Method [Conn.UIDCopy], operating on UIDs instead of sequence numbers, should be
|
||||
// preferred.
|
||||
func (c *Conn) MSNCopy(seqSet string, destMailbox string) (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
return c.transactf("copy %s %s", seqSet, astring(destMailbox))
|
||||
}
|
||||
|
||||
// UIDCopy is like copy, but operates on UIDs, using the IMAP4 "UID COPY" command.
|
||||
//
|
||||
// Required capability: "UIDPLUS" or "IMAP4rev2".
|
||||
func (c *Conn) UIDCopy(uidSet string, destMailbox string) (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
return c.transactf("uid copy %s %s", uidSet, astring(destMailbox))
|
||||
}
|
||||
|
||||
// MSNSearch returns messages from the sequence set in the selected/active mailbox
|
||||
// that match the search critera using the IMAP4 "SEARCH" command.
|
||||
//
|
||||
// Method [Conn.UIDSearch], operating on UIDs instead of sequence numbers, should be
|
||||
// preferred.
|
||||
func (c *Conn) MSNSearch(seqSet string, criteria string) (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
return c.transactf("seach %s %s", seqSet, criteria)
|
||||
}
|
||||
|
||||
// UIDSearch returns messages from the uid set in the selected/active mailbox that
|
||||
// match the search critera using the IMAP4 "SEARCH" command.
|
||||
//
|
||||
// Criteria is a search program, see RFC 9051 and RFC 3501 for details.
|
||||
//
|
||||
// Required capability: "UIDPLUS" or "IMAP4rev2".
|
||||
func (c *Conn) UIDSearch(seqSet string, criteria string) (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
return c.transactf("seach %s %s", seqSet, criteria)
|
||||
}
|
||||
|
||||
// MSNMove moves messages from the sequence set in the selected/active mailbox to
|
||||
// destMailbox using the IMAP4 "MOVE" command.
|
||||
//
|
||||
// Required capability: "MOVE" or "IMAP4rev2".
|
||||
//
|
||||
// Method [Conn.UIDMove], operating on UIDs instead of sequence numbers, should be
|
||||
// preferred.
|
||||
func (c *Conn) MSNMove(seqSet string, destMailbox string) (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
return c.transactf("move %s %s", seqSet, astring(destMailbox))
|
||||
}
|
||||
|
||||
// UIDMove is like move, but operates on UIDs, using the IMAP4 "UID MOVE" command.
|
||||
//
|
||||
// Required capability: "MOVE" or "IMAP4rev2".
|
||||
func (c *Conn) UIDMove(uidSet string, destMailbox string) (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
return c.transactf("uid move %s %s", uidSet, astring(destMailbox))
|
||||
}
|
||||
|
||||
// MSNReplace is like the preferred [Conn.UIDReplace], but operates on a message
|
||||
// sequence number (MSN) instead of a UID.
|
||||
//
|
||||
// Required capability: "REPLACE".
|
||||
//
|
||||
// Method [Conn.UIDReplace], operating on UIDs instead of sequence numbers, should be
|
||||
// preferred.
|
||||
func (c *Conn) MSNReplace(msgseq string, mailbox string, msg Append) (resp Response, rerr error) {
|
||||
// todo: parse msgseq, must be nznumber, with a known msgseq. or "*" with at least one message.
|
||||
return c.replace("replace", msgseq, mailbox, msg)
|
||||
}
|
||||
|
||||
// UIDReplace uses the IMAP4 "UID REPLACE" command to replace a message from the
|
||||
// selected/active mailbox with a new/different version of the message in the named
|
||||
// mailbox, which may be the same or different than the selected mailbox.
|
||||
//
|
||||
// The replaced message is indicated by uid.
|
||||
//
|
||||
// Required capability: "REPLACE".
|
||||
func (c *Conn) UIDReplace(uid string, mailbox string, msg Append) (resp Response, rerr error) {
|
||||
// todo: parse uid, must be nznumber, with a known uid. or "*" with at least one message.
|
||||
return c.replace("uid replace", uid, mailbox, msg)
|
||||
}
|
||||
|
||||
func (c *Conn) replace(cmd string, num string, mailbox string, msg Append) (resp Response, rerr error) {
|
||||
defer c.recover(&rerr, &resp)
|
||||
|
||||
// todo: use synchronizing literal for larger messages.
|
||||
|
||||
var date string
|
||||
if msg.Received != nil {
|
||||
date = ` "` + msg.Received.Format("_2-Jan-2006 15:04:05 -0700") + `"`
|
||||
}
|
||||
// todo: only use literal8 if needed, possibly with "UTF8()"
|
||||
// todo: encode mailbox
|
||||
err := c.WriteCommandf("", "%s %s %s (%s)%s ~{%d+}", cmd, num, astring(mailbox), strings.Join(msg.Flags, " "), date, msg.Size)
|
||||
c.xcheckf(err, "writing replace command")
|
||||
|
||||
defer c.xtracewrite(mlog.LevelTracedata)()
|
||||
_, err = io.Copy(c.xbw, msg.Data)
|
||||
c.xcheckf(err, "write message data")
|
||||
c.xtracewrite(mlog.LevelTrace)
|
||||
|
||||
fmt.Fprintf(c.xbw, "\r\n")
|
||||
c.xflush()
|
||||
|
||||
return c.responseOK()
|
||||
// UIDMove is like move, but operates on UIDs.
|
||||
func (c *Conn) UIDMove(uidSet NumSet, dstMailbox string) (untagged []Untagged, result Result, rerr error) {
|
||||
defer c.recover(&rerr)
|
||||
return c.Transactf("uid move %s %s", uidSet.String(), astring(dstMailbox))
|
||||
}
|
||||
|
@ -1,38 +0,0 @@
|
||||
package imapclient
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func FuzzParser(f *testing.F) {
|
||||
/*
|
||||
Gathering all untagged responses and command completion results from the RFCs:
|
||||
|
||||
cd ../rfc
|
||||
(
|
||||
grep ' S: \* [A-Z]' * | sed 's/^.*S: //g'
|
||||
grep -E ' S: [^ *]+ (OK|NO|BAD) ' * | sed 's/^.*S: //g'
|
||||
) | grep -v '\.\.\/' | sort | uniq >../testdata/imapclient/fuzzseed.txt
|
||||
*/
|
||||
buf, err := os.ReadFile("../testdata/imapclient/fuzzseed.txt")
|
||||
if err != nil {
|
||||
f.Fatalf("reading seed: %v", err)
|
||||
}
|
||||
for _, s := range strings.Split(string(buf), "\n") {
|
||||
f.Add(s + "\r\n")
|
||||
}
|
||||
f.Add("1:3")
|
||||
f.Add("3:1")
|
||||
f.Add("3,1")
|
||||
f.Add("*")
|
||||
|
||||
f.Fuzz(func(t *testing.T, data string) {
|
||||
ParseUntagged(data)
|
||||
ParseCode(data)
|
||||
ParseResult(data)
|
||||
ParseNumSet(data)
|
||||
ParseUIDRange(data)
|
||||
})
|
||||
}
|
1707
imapclient/parse.go
1707
imapclient/parse.go
File diff suppressed because it is too large
Load Diff
@ -1,42 +0,0 @@
|
||||
package imapclient
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func tcheckf(t *testing.T, err error, format string, args ...any) {
|
||||
if err != nil {
|
||||
t.Fatalf("%s: %s", fmt.Sprintf(format, args...), err)
|
||||
}
|
||||
}
|
||||
|
||||
func tcompare(t *testing.T, a, b any) {
|
||||
if !reflect.DeepEqual(a, b) {
|
||||
t.Fatalf("got:\n%#v\nexpected:\n%#v", a, b)
|
||||
}
|
||||
}
|
||||
|
||||
func uint32ptr(v uint32) *uint32 { return &v }
|
||||
|
||||
func TestParse(t *testing.T) {
|
||||
code, err := ParseCode("COPYUID 1 1:3 2:4")
|
||||
tcheckf(t, err, "parsing code")
|
||||
tcompare(t, code,
|
||||
CodeCopyUID{
|
||||
DestUIDValidity: 1,
|
||||
From: []NumRange{{First: 1, Last: uint32ptr(3)}},
|
||||
To: []NumRange{{First: 2, Last: uint32ptr(4)}},
|
||||
},
|
||||
)
|
||||
|
||||
ut, err := ParseUntagged("* BYE done\r\n")
|
||||
tcheckf(t, err, "parsing untagged")
|
||||
tcompare(t, ut, UntaggedBye{Text: "done"})
|
||||
|
||||
tag, result, err := ParseResult("tag1 OK [ALERT] Hello\r\n")
|
||||
tcheckf(t, err, "parsing result")
|
||||
tcompare(t, tag, "tag1")
|
||||
tcompare(t, result, Result{Status: OK, Code: CodeWord("ALERT"), Text: "Hello"})
|
||||
}
|
@ -1,41 +0,0 @@
|
||||
package imapclient
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net"
|
||||
)
|
||||
|
||||
// prefixConn is a net.Conn with a buffer from which the first reads are satisfied.
|
||||
// used for STARTTLS where already did a buffered read of initial TLS data.
|
||||
type prefixConn struct {
|
||||
prefix []byte
|
||||
net.Conn
|
||||
}
|
||||
|
||||
func (c *prefixConn) Read(buf []byte) (int, error) {
|
||||
if len(c.prefix) > 0 {
|
||||
n := min(len(buf), len(c.prefix))
|
||||
copy(buf[:n], c.prefix[:n])
|
||||
c.prefix = c.prefix[n:]
|
||||
if len(c.prefix) == 0 {
|
||||
c.prefix = nil
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
return c.Conn.Read(buf)
|
||||
}
|
||||
|
||||
// xprefixConn checks if there are any buffered unconsumed reads. If not, it
|
||||
// returns c.conn. Otherwise, it returns a *prefixConn from which the buffered data
|
||||
// can be read followed by data from c.conn.
|
||||
func (c *Conn) xprefixConn() net.Conn {
|
||||
n := c.br.Buffered()
|
||||
if n == 0 {
|
||||
return c.conn
|
||||
}
|
||||
|
||||
buf := make([]byte, n)
|
||||
_, err := io.ReadFull(c.br, buf)
|
||||
c.xcheckf(err, "get buffered data")
|
||||
return &prefixConn{buf, c.conn}
|
||||
}
|
@ -2,57 +2,35 @@ package imapclient
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Capability is a known string for with the ENABLED command and response and
|
||||
// CAPABILITY responses. Servers could send unknown values. Always in upper case.
|
||||
// Capability is a known string for with the ENABLED and CAPABILITY command.
|
||||
type Capability string
|
||||
|
||||
const (
|
||||
CapIMAP4rev1 Capability = "IMAP4REV1" // ../rfc/3501:1310
|
||||
CapIMAP4rev2 Capability = "IMAP4REV2" // ../rfc/9051:1219
|
||||
CapLoginDisabled Capability = "LOGINDISABLED" // ../rfc/3501:3792 ../rfc/9051:5436
|
||||
CapStartTLS Capability = "STARTTLS" // ../rfc/3501:1327 ../rfc/9051:1238
|
||||
CapAuthPlain Capability = "AUTH=PLAIN" // ../rfc/3501:1327 ../rfc/9051:1238
|
||||
CapAuthExternal Capability = "AUTH=EXTERNAL" // ../rfc/4422:1575
|
||||
CapAuthSCRAMSHA256Plus Capability = "AUTH=SCRAM-SHA-256-PLUS" // ../rfc/7677:80
|
||||
CapAuthSCRAMSHA256 Capability = "AUTH=SCRAM-SHA-256"
|
||||
CapAuthSCRAMSHA1Plus Capability = "AUTH=SCRAM-SHA-1-PLUS" // ../rfc/5802:465
|
||||
CapAuthSCRAMSHA1 Capability = "AUTH=SCRAM-SHA-1"
|
||||
CapAuthCRAMMD5 Capability = "AUTH=CRAM-MD5" // ../rfc/2195:80
|
||||
CapLiteralPlus Capability = "LITERAL+" // ../rfc/2088:45
|
||||
CapLiteralMinus Capability = "LITERAL-" // ../rfc/7888:26 ../rfc/9051:847 Default since IMAP4rev2
|
||||
CapIdle Capability = "IDLE" // ../rfc/2177:69 ../rfc/9051:3542 Default since IMAP4rev2
|
||||
CapNamespace Capability = "NAMESPACE" // ../rfc/2342:130 ../rfc/9051:135 Default since IMAP4rev2
|
||||
CapBinary Capability = "BINARY" // ../rfc/3516:100
|
||||
CapUnselect Capability = "UNSELECT" // ../rfc/3691:78 ../rfc/9051:3667 Default since IMAP4rev2
|
||||
CapUidplus Capability = "UIDPLUS" // ../rfc/4315:36 ../rfc/9051:8015 Default since IMAP4rev2
|
||||
CapEsearch Capability = "ESEARCH" // ../rfc/4731:69 ../rfc/9051:8016 Default since IMAP4rev2
|
||||
CapEnable Capability = "ENABLE" // ../rfc/5161:52 ../rfc/9051:8016 Default since IMAP4rev2
|
||||
CapListExtended Capability = "LIST-EXTENDED" // ../rfc/5258:150 ../rfc/9051:7987 Syntax except multiple mailboxes default since IMAP4rev2
|
||||
CapSpecialUse Capability = "SPECIAL-USE" // ../rfc/6154:156 ../rfc/9051:8021 Special-use attributes in LIST responses by default since IMAP4rev2
|
||||
CapMove Capability = "MOVE" // ../rfc/6851:87 ../rfc/9051:8018 Default since IMAP4rev2
|
||||
CapUTF8Only Capability = "UTF8=ONLY"
|
||||
CapUTF8Accept Capability = "UTF8=ACCEPT"
|
||||
CapCondstore Capability = "CONDSTORE" // ../rfc/7162:411
|
||||
CapQresync Capability = "QRESYNC" // ../rfc/7162:1376
|
||||
CapID Capability = "ID" // ../rfc/2971:80
|
||||
CapMetadata Capability = "METADATA" // ../rfc/5464:124
|
||||
CapMetadataServer Capability = "METADATA-SERVER" // ../rfc/5464:124
|
||||
CapSaveDate Capability = "SAVEDATE" // ../rfc/8514
|
||||
CapCreateSpecialUse Capability = "CREATE-SPECIAL-USE" // ../rfc/6154:296
|
||||
CapCompressDeflate Capability = "COMPRESS=DEFLATE" // ../rfc/4978:65
|
||||
CapListMetadata Capability = "LIST-METADATA" // ../rfc/9590:73
|
||||
CapMultiAppend Capability = "MULTIAPPEND" // ../rfc/3502:33
|
||||
CapReplace Capability = "REPLACE" // ../rfc/8508:155
|
||||
CapPreview Capability = "PREVIEW" // ../rfc/8970:114
|
||||
CapMultiSearch Capability = "MULTISEARCH" // ../rfc/7377:187
|
||||
CapNotify Capability = "NOTIFY" // ../rfc/5465:195
|
||||
CapUIDOnly Capability = "UIDONLY" // ../rfc/9586:129
|
||||
CapIMAP4rev1 Capability = "IMAP4rev1"
|
||||
CapIMAP4rev2 Capability = "IMAP4rev2"
|
||||
CapLoginDisabled Capability = "LOGINDISABLED"
|
||||
CapStarttls Capability = "STARTTLS"
|
||||
CapAuthPlain Capability = "AUTH=PLAIN"
|
||||
CapLiteralPlus Capability = "LITERAL+"
|
||||
CapLiteralMinus Capability = "LITERAL-"
|
||||
CapIdle Capability = "IDLE"
|
||||
CapNamespace Capability = "NAMESPACE"
|
||||
CapBinary Capability = "BINARY"
|
||||
CapUnselect Capability = "UNSELECT"
|
||||
CapUidplus Capability = "UIDPLUS"
|
||||
CapEsearch Capability = "ESEARCH"
|
||||
CapEnable Capability = "ENABLE"
|
||||
CapSave Capability = "SAVE"
|
||||
CapListExtended Capability = "LIST-EXTENDED"
|
||||
CapSpecialUse Capability = "SPECIAL-USE"
|
||||
CapMove Capability = "MOVE"
|
||||
CapUTF8Only Capability = "UTF8=ONLY"
|
||||
CapUTF8Accept Capability = "UTF8=ACCEPT"
|
||||
CapID Capability = "ID" // ../rfc/2971:80
|
||||
)
|
||||
|
||||
// Status is the tagged final result of a command.
|
||||
@ -64,144 +42,73 @@ const (
|
||||
OK Status = "OK" // Command succeeded.
|
||||
)
|
||||
|
||||
// Response is a response to an IMAP command including any preceding untagged
|
||||
// responses. Response implements the error interface through result.
|
||||
//
|
||||
// See [UntaggedResponseGet] and [UntaggedResponseList] to retrieve specific types
|
||||
// of untagged responses.
|
||||
type Response struct {
|
||||
Untagged []Untagged
|
||||
Result
|
||||
}
|
||||
|
||||
var (
|
||||
ErrMissing = errors.New("no response of type") // Returned by UntaggedResponseGet.
|
||||
ErrMultiple = errors.New("multiple responses of type") // Idem.
|
||||
)
|
||||
|
||||
// UntaggedResponseGet returns the single untagged response of type T. Only
|
||||
// [ErrMissing] or [ErrMultiple] can be returned as error.
|
||||
func UntaggedResponseGet[T Untagged](resp Response) (T, error) {
|
||||
var t T
|
||||
var have bool
|
||||
for _, e := range resp.Untagged {
|
||||
if tt, ok := e.(T); ok {
|
||||
if have {
|
||||
return t, ErrMultiple
|
||||
}
|
||||
t = tt
|
||||
}
|
||||
}
|
||||
if !have {
|
||||
return t, ErrMissing
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// UntaggedResponseList returns all untagged responses of type T.
|
||||
func UntaggedResponseList[T Untagged](resp Response) []T {
|
||||
var l []T
|
||||
for _, e := range resp.Untagged {
|
||||
if tt, ok := e.(T); ok {
|
||||
l = append(l, tt)
|
||||
}
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
// Result is the final response for a command, indicating success or failure.
|
||||
type Result struct {
|
||||
Status Status
|
||||
Code Code // Set if response code is present.
|
||||
Text string // Any remaining text.
|
||||
RespText
|
||||
}
|
||||
|
||||
func (r Result) Error() string {
|
||||
s := fmt.Sprintf("IMAP result %s", r.Status)
|
||||
if r.Code != nil {
|
||||
s += "[" + r.Code.CodeString() + "]"
|
||||
}
|
||||
if r.Text != "" {
|
||||
s += " " + r.Text
|
||||
// CodeArg represents a response code with arguments, i.e. the data between [] in the response line.
|
||||
type CodeArg interface {
|
||||
CodeString() string
|
||||
}
|
||||
|
||||
// CodeOther is a valid but unrecognized response code.
|
||||
type CodeOther struct {
|
||||
Code string
|
||||
Args []string
|
||||
}
|
||||
|
||||
func (c CodeOther) CodeString() string {
|
||||
return c.Code + " " + strings.Join(c.Args, " ")
|
||||
}
|
||||
|
||||
// CodeWords is a code with space-separated string parameters. E.g. CAPABILITY.
|
||||
type CodeWords struct {
|
||||
Code string
|
||||
Args []string
|
||||
}
|
||||
|
||||
func (c CodeWords) CodeString() string {
|
||||
s := c.Code
|
||||
for _, w := range c.Args {
|
||||
s += " " + w
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Code represents a response code with optional arguments, i.e. the data between [] in the response line.
|
||||
type Code interface {
|
||||
CodeString() string
|
||||
// CodeList is a code with a list with space-separated strings as parameters. E.g. BADCHARSET, PERMANENTFLAGS.
|
||||
type CodeList struct {
|
||||
Code string
|
||||
Args []string // If nil, no list was present. List can also be empty.
|
||||
}
|
||||
|
||||
// CodeWord is a response code without parameters, always in upper case.
|
||||
type CodeWord string
|
||||
|
||||
func (c CodeWord) CodeString() string {
|
||||
return string(c)
|
||||
}
|
||||
|
||||
// CodeOther is an unrecognized response code with parameters.
|
||||
type CodeParams struct {
|
||||
Code string // Always in upper case.
|
||||
Args []string
|
||||
}
|
||||
|
||||
func (c CodeParams) CodeString() string {
|
||||
return c.Code + " " + strings.Join(c.Args, " ")
|
||||
}
|
||||
|
||||
// CodeCapability is a CAPABILITY response code with the capabilities supported by the server.
|
||||
type CodeCapability []Capability
|
||||
|
||||
func (c CodeCapability) CodeString() string {
|
||||
var s string
|
||||
for _, c := range c {
|
||||
s += " " + string(c)
|
||||
}
|
||||
return "CAPABILITY" + s
|
||||
}
|
||||
|
||||
type CodeBadCharset []string
|
||||
|
||||
func (c CodeBadCharset) CodeString() string {
|
||||
s := "BADCHARSET"
|
||||
if len(c) == 0 {
|
||||
func (c CodeList) CodeString() string {
|
||||
s := c.Code
|
||||
if c.Args == nil {
|
||||
return s
|
||||
}
|
||||
return s + " (" + strings.Join([]string(c), " ") + ")"
|
||||
return s + "(" + strings.Join(c.Args, " ") + ")"
|
||||
}
|
||||
|
||||
type CodePermanentFlags []string
|
||||
|
||||
func (c CodePermanentFlags) CodeString() string {
|
||||
return "PERMANENTFLAGS (" + strings.Join([]string(c), " ") + ")"
|
||||
// CodeUint is a code with a uint32 parameter, e.g. UIDNEXT and UIDVALIDITY.
|
||||
type CodeUint struct {
|
||||
Code string
|
||||
Num uint32
|
||||
}
|
||||
|
||||
type CodeUIDNext uint32
|
||||
|
||||
func (c CodeUIDNext) CodeString() string {
|
||||
return fmt.Sprintf("UIDNEXT %d", c)
|
||||
}
|
||||
|
||||
type CodeUIDValidity uint32
|
||||
|
||||
func (c CodeUIDValidity) CodeString() string {
|
||||
return fmt.Sprintf("UIDVALIDITY %d", c)
|
||||
}
|
||||
|
||||
type CodeUnseen uint32
|
||||
|
||||
func (c CodeUnseen) CodeString() string {
|
||||
return fmt.Sprintf("UNSEEN %d", c)
|
||||
func (c CodeUint) CodeString() string {
|
||||
return fmt.Sprintf("%s %d", c.Code, c.Num)
|
||||
}
|
||||
|
||||
// "APPENDUID" response code.
|
||||
type CodeAppendUID struct {
|
||||
UIDValidity uint32
|
||||
UIDs NumRange
|
||||
UID uint32
|
||||
}
|
||||
|
||||
func (c CodeAppendUID) CodeString() string {
|
||||
return fmt.Sprintf("APPENDUID %d %s", c.UIDValidity, c.UIDs.String())
|
||||
return fmt.Sprintf("APPENDUID %d %d", c.UIDValidity, c.UID)
|
||||
}
|
||||
|
||||
// "COPYUID" response code.
|
||||
@ -242,66 +149,11 @@ func (c CodeHighestModSeq) CodeString() string {
|
||||
return fmt.Sprintf("HIGHESTMODSEQ %d", c)
|
||||
}
|
||||
|
||||
// "INPROGRESS" response code.
|
||||
type CodeInProgress struct {
|
||||
Tag string // Nil is empty string.
|
||||
Current *uint32
|
||||
Goal *uint32
|
||||
}
|
||||
|
||||
func (c CodeInProgress) CodeString() string {
|
||||
// ABNF allows inprogress-tag/state with all nil values. Doesn't seem useful enough
|
||||
// to keep track of.
|
||||
if c.Tag == "" && c.Current == nil && c.Goal == nil {
|
||||
return "INPROGRESS"
|
||||
}
|
||||
|
||||
// todo: quote tag properly
|
||||
current := "nil"
|
||||
goal := "nil"
|
||||
if c.Current != nil {
|
||||
current = fmt.Sprintf("%d", *c.Current)
|
||||
}
|
||||
if c.Goal != nil {
|
||||
goal = fmt.Sprintf("%d", *c.Goal)
|
||||
}
|
||||
return fmt.Sprintf("INPROGRESS (%q %s %s)", c.Tag, current, goal)
|
||||
}
|
||||
|
||||
// "BADEVENT" response code, with the events that are supported, for the NOTIFY
|
||||
// extension.
|
||||
type CodeBadEvent []string
|
||||
|
||||
func (c CodeBadEvent) CodeString() string {
|
||||
return fmt.Sprintf("BADEVENT (%s)", strings.Join([]string(c), " "))
|
||||
}
|
||||
|
||||
// "METADATA LONGENTRIES number" response for GETMETADATA command.
|
||||
type CodeMetadataLongEntries uint32
|
||||
|
||||
func (c CodeMetadataLongEntries) CodeString() string {
|
||||
return fmt.Sprintf("METADATA LONGENTRIES %d", c)
|
||||
}
|
||||
|
||||
// "METADATA (MAXSIZE number)" response for SETMETADATA command.
|
||||
type CodeMetadataMaxSize uint32
|
||||
|
||||
func (c CodeMetadataMaxSize) CodeString() string {
|
||||
return fmt.Sprintf("METADATA (MAXSIZE %d)", c)
|
||||
}
|
||||
|
||||
// "METADATA (TOOMANY)" response for SETMETADATA command.
|
||||
type CodeMetadataTooMany struct{}
|
||||
|
||||
func (c CodeMetadataTooMany) CodeString() string {
|
||||
return "METADATA (TOOMANY)"
|
||||
}
|
||||
|
||||
// "METADATA (NOPRIVATE)" response for SETMETADATA command.
|
||||
type CodeMetadataNoPrivate struct{}
|
||||
|
||||
func (c CodeMetadataNoPrivate) CodeString() string {
|
||||
return "METADATA (NOPRIVATE)"
|
||||
// RespText represents a response line minus the leading tag.
|
||||
type RespText struct {
|
||||
Code string // The first word between [] after the status.
|
||||
CodeArg CodeArg // Set if code has a parameter.
|
||||
More string // Any remaining text.
|
||||
}
|
||||
|
||||
// atom or string.
|
||||
@ -342,30 +194,17 @@ func syncliteral(s string) string {
|
||||
// todo: make an interface that the untagged responses implement?
|
||||
type Untagged any
|
||||
|
||||
type UntaggedBye struct {
|
||||
Code Code // Set if response code is present.
|
||||
Text string // Any remaining text.
|
||||
}
|
||||
type UntaggedPreauth struct {
|
||||
Code Code // Set if response code is present.
|
||||
Text string // Any remaining text.
|
||||
}
|
||||
type UntaggedBye RespText
|
||||
type UntaggedPreauth RespText
|
||||
type UntaggedExpunge uint32
|
||||
type UntaggedExists uint32
|
||||
type UntaggedRecent uint32
|
||||
|
||||
// UntaggedCapability lists all capabilities the server implements.
|
||||
type UntaggedCapability []Capability
|
||||
|
||||
// UntaggedEnabled indicates the capabilities that were enabled on the connection
|
||||
// by the server, typically in response to an ENABLE command.
|
||||
type UntaggedEnabled []Capability
|
||||
|
||||
type UntaggedCapability []string
|
||||
type UntaggedEnabled []string
|
||||
type UntaggedResult Result
|
||||
type UntaggedFlags []string
|
||||
type UntaggedList struct {
|
||||
// ../rfc/9051:6690
|
||||
|
||||
Flags []string
|
||||
Separator byte // 0 for NIL
|
||||
Mailbox string
|
||||
@ -376,76 +215,22 @@ type UntaggedFetch struct {
|
||||
Seq uint32
|
||||
Attrs []FetchAttr
|
||||
}
|
||||
|
||||
// UntaggedUIDFetch is like UntaggedFetch, but with UIDs instead of message
|
||||
// sequence numbers, and returned instead of regular fetch responses when UIDONLY
|
||||
// is enabled.
|
||||
type UntaggedUIDFetch struct {
|
||||
UID uint32
|
||||
Attrs []FetchAttr
|
||||
}
|
||||
type UntaggedSearch []uint32
|
||||
|
||||
// ../rfc/7162:1101
|
||||
type UntaggedSearchModSeq struct {
|
||||
// ../rfc/7162:1101
|
||||
|
||||
Nums []uint32
|
||||
ModSeq int64
|
||||
}
|
||||
type UntaggedStatus struct {
|
||||
Mailbox string
|
||||
Attrs map[StatusAttr]int64 // Upper case status attributes.
|
||||
Attrs map[string]int64 // Upper case status attributes. ../rfc/9051:7059
|
||||
}
|
||||
|
||||
// Unsolicited response, indicating an annotation has changed.
|
||||
type UntaggedMetadataKeys struct {
|
||||
// ../rfc/5464:716
|
||||
|
||||
Mailbox string // Empty means not specific to mailbox.
|
||||
|
||||
// Keys that have changed. To get values (or determine absence), the server must be
|
||||
// queried.
|
||||
Keys []string
|
||||
}
|
||||
|
||||
// Annotation is a metadata server of mailbox annotation.
|
||||
type Annotation struct {
|
||||
Key string
|
||||
// Nil is represented by IsString false and a nil Value.
|
||||
IsString bool
|
||||
Value []byte
|
||||
}
|
||||
|
||||
type UntaggedMetadataAnnotations struct {
|
||||
// ../rfc/5464:683
|
||||
|
||||
Mailbox string // Empty means not specific to mailbox.
|
||||
Annotations []Annotation
|
||||
}
|
||||
|
||||
type StatusAttr string
|
||||
|
||||
// ../rfc/9051:7059 ../9208:712
|
||||
|
||||
const (
|
||||
StatusMessages StatusAttr = "MESSAGES"
|
||||
StatusUIDNext StatusAttr = "UIDNEXT"
|
||||
StatusUIDValidity StatusAttr = "UIDVALIDITY"
|
||||
StatusUnseen StatusAttr = "UNSEEN"
|
||||
StatusDeleted StatusAttr = "DELETED"
|
||||
StatusSize StatusAttr = "SIZE"
|
||||
StatusRecent StatusAttr = "RECENT"
|
||||
StatusAppendLimit StatusAttr = "APPENDLIMIT"
|
||||
StatusHighestModSeq StatusAttr = "HIGHESTMODSEQ"
|
||||
StatusDeletedStorage StatusAttr = "DELETED-STORAGE"
|
||||
)
|
||||
|
||||
type UntaggedNamespace struct {
|
||||
Personal, Other, Shared []NamespaceDescr
|
||||
}
|
||||
type UntaggedLsub struct {
|
||||
// ../rfc/3501:4833
|
||||
|
||||
Flags []string
|
||||
Separator byte
|
||||
Mailbox string
|
||||
@ -453,17 +238,15 @@ type UntaggedLsub struct {
|
||||
|
||||
// Fields are optional and zero if absent.
|
||||
type UntaggedEsearch struct {
|
||||
Tag string // ../rfc/9051:6546
|
||||
Mailbox string // For MULTISEARCH. ../rfc/7377:437
|
||||
UIDValidity uint32 // For MULTISEARCH, ../rfc/7377:438
|
||||
|
||||
UID bool
|
||||
Min uint32
|
||||
Max uint32
|
||||
All NumSet
|
||||
Count *uint32
|
||||
ModSeq int64
|
||||
Exts []EsearchDataExt
|
||||
// ../rfc/9051:6546
|
||||
Correlator string
|
||||
UID bool
|
||||
Min uint32
|
||||
Max uint32
|
||||
All NumSet
|
||||
Count *uint32
|
||||
ModSeq int64
|
||||
Exts []EsearchDataExt
|
||||
}
|
||||
|
||||
// UntaggedVanished is used in QRESYNC to send UIDs that have been removed.
|
||||
@ -472,37 +255,6 @@ type UntaggedVanished struct {
|
||||
UIDs NumSet
|
||||
}
|
||||
|
||||
// UntaggedQuotaroot lists the roots for which quota can be present.
|
||||
type UntaggedQuotaroot []string
|
||||
|
||||
// UntaggedQuota holds the quota for a quota root.
|
||||
type UntaggedQuota struct {
|
||||
Root string
|
||||
|
||||
// Always has at least one. Any QUOTA=RES-* capability not mentioned has no limit
|
||||
// or this quota root.
|
||||
Resources []QuotaResource
|
||||
}
|
||||
|
||||
// Resource types ../rfc/9208:533
|
||||
|
||||
// QuotaResourceName is the name of a resource type. More can be defined in the
|
||||
// future and encountered in the wild. Always in upper case.
|
||||
type QuotaResourceName string
|
||||
|
||||
const (
|
||||
QuotaResourceStorage = "STORAGE"
|
||||
QuotaResourceMesssage = "MESSAGE"
|
||||
QuotaResourceMailbox = "MAILBOX"
|
||||
QuotaResourceAnnotationStorage = "ANNOTATION-STORAGE"
|
||||
)
|
||||
|
||||
type QuotaResource struct {
|
||||
Name QuotaResourceName
|
||||
Usage int64 // Currently in use. Count or disk size in 1024 byte blocks.
|
||||
Limit int64 // Maximum allowed usage.
|
||||
}
|
||||
|
||||
// ../rfc/2971:184
|
||||
|
||||
type UntaggedID map[string]string
|
||||
@ -515,7 +267,6 @@ type EsearchDataExt struct {
|
||||
|
||||
type NamespaceDescr struct {
|
||||
// ../rfc/9051:6769
|
||||
|
||||
Prefix string
|
||||
Separator byte // If 0 then separator was absent.
|
||||
Exts []NamespaceExtension
|
||||
@ -523,14 +274,13 @@ type NamespaceDescr struct {
|
||||
|
||||
type NamespaceExtension struct {
|
||||
// ../rfc/9051:6773
|
||||
|
||||
Key string
|
||||
Values []string
|
||||
}
|
||||
|
||||
// FetchAttr represents a FETCH response attribute.
|
||||
type FetchAttr interface {
|
||||
Attr() string // Name of attribute in upper case, e.g. "UID".
|
||||
Attr() string // Name of attribute.
|
||||
}
|
||||
|
||||
type NumSet struct {
|
||||
@ -557,19 +307,12 @@ func (ns NumSet) String() string {
|
||||
}
|
||||
|
||||
func ParseNumSet(s string) (ns NumSet, rerr error) {
|
||||
c := Proto{br: bufio.NewReader(strings.NewReader(s))}
|
||||
c := Conn{r: bufio.NewReader(strings.NewReader(s))}
|
||||
defer c.recover(&rerr)
|
||||
ns = c.xsequenceSet()
|
||||
return
|
||||
}
|
||||
|
||||
func ParseUIDRange(s string) (nr NumRange, rerr error) {
|
||||
c := Proto{br: bufio.NewReader(strings.NewReader(s))}
|
||||
defer c.recover(&rerr)
|
||||
nr = c.xuidrange()
|
||||
return
|
||||
}
|
||||
|
||||
// NumRange is a single number or range.
|
||||
type NumRange struct {
|
||||
First uint32 // 0 for "*".
|
||||
@ -603,7 +346,6 @@ type TaggedExtComp struct {
|
||||
|
||||
type TaggedExtVal struct {
|
||||
// ../rfc/9051:7111
|
||||
|
||||
Number *int64
|
||||
SeqSet *NumSet
|
||||
Comp *TaggedExtComp // If SimpleNumber and SimpleSeqSet is nil, this is a Comp. But Comp is optional and can also be nil. Not great.
|
||||
@ -611,7 +353,6 @@ type TaggedExtVal struct {
|
||||
|
||||
type MboxListExtendedItem struct {
|
||||
// ../rfc/9051:6699
|
||||
|
||||
Tag string
|
||||
Val TaggedExtVal
|
||||
}
|
||||
@ -640,21 +381,9 @@ type Address struct {
|
||||
}
|
||||
|
||||
// "INTERNALDATE" fetch response.
|
||||
type FetchInternalDate struct {
|
||||
Date time.Time
|
||||
}
|
||||
|
||||
type FetchInternalDate string // todo: parsed time
|
||||
func (f FetchInternalDate) Attr() string { return "INTERNALDATE" }
|
||||
|
||||
// "SAVEDATE" fetch response.
|
||||
type FetchSaveDate struct {
|
||||
// ../rfc/8514:265
|
||||
|
||||
SaveDate *time.Time // nil means absent for message.
|
||||
}
|
||||
|
||||
func (f FetchSaveDate) Attr() string { return "SAVEDATE" }
|
||||
|
||||
// "RFC822.SIZE" fetch response.
|
||||
type FetchRFC822Size int64
|
||||
|
||||
@ -678,7 +407,6 @@ func (f FetchRFC822Text) Attr() string { return "RFC822.TEXT" }
|
||||
// "BODYSTRUCTURE" fetch response.
|
||||
type FetchBodystructure struct {
|
||||
// ../rfc/9051:6355
|
||||
|
||||
RespAttr string
|
||||
Body any // BodyType*
|
||||
}
|
||||
@ -688,7 +416,6 @@ func (f FetchBodystructure) Attr() string { return f.RespAttr }
|
||||
// "BODY" fetch response.
|
||||
type FetchBody struct {
|
||||
// ../rfc/9051:6756 ../rfc/9051:6985
|
||||
|
||||
RespAttr string
|
||||
Section string // todo: parse more ../rfc/9051:6985
|
||||
Offset int32
|
||||
@ -704,96 +431,36 @@ type BodyFields struct {
|
||||
Octets int32
|
||||
}
|
||||
|
||||
// BodyTypeMpart represents the body structure a multipart message, with
|
||||
// subparts and the multipart media subtype. Used in a FETCH response.
|
||||
// BodyTypeMpart represents the body structure a multipart message, with subparts and the multipart media subtype. Used in a FETCH response.
|
||||
type BodyTypeMpart struct {
|
||||
// ../rfc/9051:6411
|
||||
|
||||
Bodies []any // BodyTypeBasic, BodyTypeMsg, BodyTypeText
|
||||
MediaSubtype string
|
||||
Ext *BodyExtensionMpart
|
||||
}
|
||||
|
||||
// BodyTypeBasic represents basic information about a part, used in a FETCH
|
||||
// response.
|
||||
// BodyTypeBasic represents basic information about a part, used in a FETCH response.
|
||||
type BodyTypeBasic struct {
|
||||
// ../rfc/9051:6407
|
||||
|
||||
MediaType, MediaSubtype string
|
||||
BodyFields BodyFields
|
||||
Ext *BodyExtension1Part
|
||||
}
|
||||
|
||||
// BodyTypeMsg represents an email message as a body structure, used in a FETCH
|
||||
// response.
|
||||
// BodyTypeMsg represents an email message as a body structure, used in a FETCH response.
|
||||
type BodyTypeMsg struct {
|
||||
// ../rfc/9051:6415
|
||||
|
||||
MediaType, MediaSubtype string
|
||||
BodyFields BodyFields
|
||||
Envelope Envelope
|
||||
Bodystructure any // One of the BodyType*
|
||||
Lines int64
|
||||
Ext *BodyExtension1Part
|
||||
}
|
||||
|
||||
// BodyTypeText represents a text part as a body structure, used in a FETCH
|
||||
// response.
|
||||
// BodyTypeText represents a text part as a body structure, used in a FETCH response.
|
||||
type BodyTypeText struct {
|
||||
// ../rfc/9051:6418
|
||||
|
||||
MediaType, MediaSubtype string
|
||||
BodyFields BodyFields
|
||||
Lines int64
|
||||
Ext *BodyExtension1Part
|
||||
}
|
||||
|
||||
// BodyExtension1Part has the extensible form fields of a BODYSTRUCTURE for
|
||||
// multiparts.
|
||||
//
|
||||
// Fields in this struct are optional in IMAP4, and can be NIL or contain a value.
|
||||
// The first field is always present, otherwise the "parent" struct would have a
|
||||
// nil *BodyExtensionMpart. The second and later fields are nil when absent. For
|
||||
// non-reference types (e.g. strings), an IMAP4 NIL is represented as a pointer to
|
||||
// (*T)(nil). For reference types (e.g. slices), an IMAP4 NIL is represented by a
|
||||
// pointer to nil.
|
||||
type BodyExtensionMpart struct {
|
||||
// ../rfc/9051:5986 ../rfc/3501:4161 ../rfc/9051:6371 ../rfc/3501:4599
|
||||
|
||||
Params [][2]string
|
||||
Disposition **string
|
||||
DispositionParams *[][2]string
|
||||
Language *[]string
|
||||
Location **string
|
||||
More []BodyExtension // Nil if absent.
|
||||
}
|
||||
|
||||
// BodyExtension1Part has the extensible form fields of a BODYSTRUCTURE for
|
||||
// non-multiparts.
|
||||
//
|
||||
// Fields in this struct are optional in IMAP4, and can be NIL or contain a value.
|
||||
// The first field is always present, otherwise the "parent" struct would have a
|
||||
// nil *BodyExtensionMpart. The second and later fields are nil when absent. For
|
||||
// non-reference types (e.g. strings), an IMAP4 NIL is represented as a pointer to
|
||||
// (*T)(nil). For reference types (e.g. slices), an IMAP4 NIL is represented by a
|
||||
// pointer to nil.
|
||||
type BodyExtension1Part struct {
|
||||
// ../rfc/9051:6023 ../rfc/3501:4191 ../rfc/9051:6366 ../rfc/3501:4584
|
||||
|
||||
MD5 *string
|
||||
Disposition **string
|
||||
DispositionParams *[][2]string
|
||||
Language *[]string
|
||||
Location **string
|
||||
More []BodyExtension // Nil means absent.
|
||||
}
|
||||
|
||||
// BodyExtension has the additional extension fields for future expansion of
|
||||
// extensions.
|
||||
type BodyExtension struct {
|
||||
String *string
|
||||
Number *int64
|
||||
More []BodyExtension
|
||||
}
|
||||
|
||||
// "BINARY" fetch response.
|
||||
@ -823,12 +490,3 @@ func (f FetchUID) Attr() string { return "UID" }
|
||||
type FetchModSeq int64
|
||||
|
||||
func (f FetchModSeq) Attr() string { return "MODSEQ" }
|
||||
|
||||
// "PREVIEW" fetch response.
|
||||
type FetchPreview struct {
|
||||
Preview *string
|
||||
}
|
||||
|
||||
// ../rfc/8970:146
|
||||
|
||||
func (f FetchPreview) Attr() string { return "PREVIEW" }
|
||||
|
@ -7,30 +7,22 @@ import (
|
||||
)
|
||||
|
||||
func TestAppend(t *testing.T) {
|
||||
testAppend(t, false)
|
||||
}
|
||||
|
||||
func TestAppendUIDOnly(t *testing.T) {
|
||||
testAppend(t, true)
|
||||
}
|
||||
|
||||
func testAppend(t *testing.T, uidonly bool) {
|
||||
defer mockUIDValidity()()
|
||||
|
||||
tc := start(t, uidonly) // note: with switchboard because this connection stays alive unlike tc2.
|
||||
tc := start(t) // note: with switchboard because this connection stays alive unlike tc2.
|
||||
defer tc.close()
|
||||
|
||||
tc2 := startNoSwitchboard(t, uidonly) // note: without switchboard because this connection will break during tests.
|
||||
defer tc2.closeNoWait()
|
||||
tc2 := startNoSwitchboard(t) // note: without switchboard because this connection will break during tests.
|
||||
defer tc2.close()
|
||||
|
||||
tc3 := startNoSwitchboard(t, uidonly)
|
||||
defer tc3.closeNoWait()
|
||||
tc3 := startNoSwitchboard(t)
|
||||
defer tc3.close()
|
||||
|
||||
tc2.login("mjl@mox.example", password0)
|
||||
tc2.client.Login("mjl@mox.example", password0)
|
||||
tc2.client.Select("inbox")
|
||||
tc.login("mjl@mox.example", password0)
|
||||
tc.client.Login("mjl@mox.example", password0)
|
||||
tc.client.Select("inbox")
|
||||
tc3.login("mjl@mox.example", password0)
|
||||
tc3.client.Login("mjl@mox.example", password0)
|
||||
|
||||
tc2.transactf("bad", "append") // Missing params.
|
||||
tc2.transactf("bad", `append inbox`) // Missing message.
|
||||
@ -38,44 +30,43 @@ func testAppend(t *testing.T, uidonly bool) {
|
||||
|
||||
// Syntax error for line ending in literal causes connection abort.
|
||||
tc2.transactf("bad", "append inbox (\\Badflag) {1+}\r\nx") // Unknown flag.
|
||||
tc2 = startNoSwitchboard(t, uidonly)
|
||||
defer tc2.closeNoWait()
|
||||
tc2.login("mjl@mox.example", password0)
|
||||
tc2 = startNoSwitchboard(t)
|
||||
defer tc2.close()
|
||||
tc2.client.Login("mjl@mox.example", password0)
|
||||
tc2.client.Select("inbox")
|
||||
|
||||
tc2.transactf("bad", "append inbox () \"bad time\" {1+}\r\nx") // Bad time.
|
||||
tc2 = startNoSwitchboard(t, uidonly)
|
||||
defer tc2.closeNoWait()
|
||||
tc2.login("mjl@mox.example", password0)
|
||||
tc2 = startNoSwitchboard(t)
|
||||
defer tc2.close()
|
||||
tc2.client.Login("mjl@mox.example", password0)
|
||||
tc2.client.Select("inbox")
|
||||
|
||||
tc2.transactf("no", "append nobox (\\Seen) \" 1-Jan-2022 10:10:00 +0100\" {1}")
|
||||
tc2.xcodeWord("TRYCREATE")
|
||||
|
||||
tc2.transactf("no", "append expungebox (\\Seen) {1}")
|
||||
tc2.xcodeWord("TRYCREATE")
|
||||
tc2.xcode("TRYCREATE")
|
||||
|
||||
tc2.transactf("ok", "append inbox (\\Seen Label1 $label2) \" 1-Jan-2022 10:10:00 +0100\" {1+}\r\nx")
|
||||
tc2.xuntagged(imapclient.UntaggedExists(1))
|
||||
tc2.xcode(imapclient.CodeAppendUID{UIDValidity: 1, UIDs: xparseUIDRange("1")})
|
||||
tc2.xcodeArg(imapclient.CodeAppendUID{UIDValidity: 1, UID: 1})
|
||||
|
||||
tc.transactf("ok", "noop")
|
||||
uid1 := imapclient.FetchUID(1)
|
||||
flags := imapclient.FetchFlags{`\Seen`, "$label2", "label1"}
|
||||
tc.xuntagged(imapclient.UntaggedExists(1), tc.untaggedFetch(1, 1, flags))
|
||||
tc.xuntagged(imapclient.UntaggedExists(1), imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, flags}})
|
||||
tc3.transactf("ok", "noop")
|
||||
tc3.xuntagged() // Inbox is not selected, nothing to report.
|
||||
|
||||
tc2.transactf("ok", "append inbox (\\Seen) \" 1-Jan-2022 10:10:00 +0100\" UTF8 (~{47+}\r\ncontent-type: just completely invalid;;\r\n\r\ntest)")
|
||||
tc2.transactf("ok", "append inbox (\\Seen) \" 1-Jan-2022 10:10:00 +0100\" UTF8 ({47+}\r\ncontent-type: just completely invalid;;\r\n\r\ntest)")
|
||||
tc2.xuntagged(imapclient.UntaggedExists(2))
|
||||
tc2.xcode(imapclient.CodeAppendUID{UIDValidity: 1, UIDs: xparseUIDRange("2")})
|
||||
tc2.xcodeArg(imapclient.CodeAppendUID{UIDValidity: 1, UID: 2})
|
||||
|
||||
tc2.transactf("ok", "append inbox (\\Seen) \" 1-Jan-2022 10:10:00 +0100\" UTF8 (~{31+}\r\ncontent-type: text/plain;\n\ntest)")
|
||||
tc2.transactf("ok", "append inbox (\\Seen) \" 1-Jan-2022 10:10:00 +0100\" UTF8 ({31+}\r\ncontent-type: text/plain;\n\ntest)")
|
||||
tc2.xuntagged(imapclient.UntaggedExists(3))
|
||||
tc2.xcode(imapclient.CodeAppendUID{UIDValidity: 1, UIDs: xparseUIDRange("3")})
|
||||
tc2.xcodeArg(imapclient.CodeAppendUID{UIDValidity: 1, UID: 3})
|
||||
|
||||
// Messages that we cannot parse are marked as application/octet-stream. Perhaps
|
||||
// the imap client knows how to deal with them.
|
||||
tc2.transactf("ok", "uid fetch 2 body")
|
||||
uid2 := imapclient.FetchUID(2)
|
||||
xbs := imapclient.FetchBodystructure{
|
||||
RespAttr: "BODY",
|
||||
Body: imapclient.BodyTypeBasic{
|
||||
@ -86,50 +77,16 @@ func testAppend(t *testing.T, uidonly bool) {
|
||||
},
|
||||
},
|
||||
}
|
||||
tc2.xuntagged(tc.untaggedFetch(2, 2, xbs))
|
||||
tc2.xuntagged(imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, xbs}})
|
||||
|
||||
// Multiappend with two messages.
|
||||
tc.transactf("ok", "noop") // Flush pending untagged responses.
|
||||
tc.transactf("ok", "append inbox {6+}\r\ntest\r\n ~{6+}\r\ntost\r\n")
|
||||
tc.xuntagged(imapclient.UntaggedExists(5))
|
||||
tc.xcode(imapclient.CodeAppendUID{UIDValidity: 1, UIDs: xparseUIDRange("4:5")})
|
||||
|
||||
// Cancelled with zero-length message.
|
||||
tc.transactf("no", "append inbox {6+}\r\ntest\r\n {0+}\r\n")
|
||||
|
||||
tclimit := startArgs(t, uidonly, false, false, true, true, "limit")
|
||||
tclimit := startArgs(t, false, false, true, true, "limit")
|
||||
defer tclimit.close()
|
||||
tclimit.login("limit@mox.example", password0)
|
||||
tclimit.client.Login("limit@mox.example", password0)
|
||||
tclimit.client.Select("inbox")
|
||||
// First message of 1 byte is within limits.
|
||||
tclimit.transactf("ok", "append inbox (\\Seen Label1 $label2) \" 1-Jan-2022 10:10:00 +0100\" {1+}\r\nx")
|
||||
tclimit.xuntagged(imapclient.UntaggedExists(1))
|
||||
// Second message would take account past limit.
|
||||
tclimit.transactf("no", "append inbox (\\Seen Label1 $label2) \" 1-Jan-2022 10:10:00 +0100\" {1+}\r\nx")
|
||||
tclimit.xcodeWord("OVERQUOTA")
|
||||
|
||||
// Empty mailbox.
|
||||
if uidonly {
|
||||
tclimit.transactf("ok", `uid store 1 flags (\deleted)`)
|
||||
} else {
|
||||
tclimit.transactf("ok", `store 1 flags (\deleted)`)
|
||||
}
|
||||
tclimit.transactf("ok", "expunge")
|
||||
|
||||
// Multiappend with first message within quota, and second message with sync
|
||||
// literal causing quota error. Request should get error response immediately.
|
||||
tclimit.transactf("no", "append inbox {1+}\r\nx {100000}")
|
||||
tclimit.xcodeWord("OVERQUOTA")
|
||||
|
||||
// Again, but second message now with non-sync literal, which is fully consumed by server.
|
||||
tclimit.client.WriteCommandf("", "append inbox {1+}\r\nx {4000+}")
|
||||
buf := make([]byte, 4000, 4002)
|
||||
for i := range buf {
|
||||
buf[i] = 'x'
|
||||
}
|
||||
buf = append(buf, "\r\n"...)
|
||||
_, err := tclimit.client.Write(buf)
|
||||
tclimit.check(err, "write append message")
|
||||
tclimit.response("no")
|
||||
tclimit.xcodeWord("OVERQUOTA")
|
||||
tclimit.xcode("OVERQUOTA")
|
||||
}
|
||||
|
@ -1,74 +1,65 @@
|
||||
package imapserver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/hmac"
|
||||
"crypto/md5"
|
||||
"crypto/sha1"
|
||||
"crypto/sha256"
|
||||
"crypto/tls"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"golang.org/x/text/secure/precis"
|
||||
|
||||
"github.com/mjl-/mox/imapclient"
|
||||
"github.com/mjl-/mox/mox-"
|
||||
"github.com/mjl-/mox/scram"
|
||||
"github.com/mjl-/mox/store"
|
||||
)
|
||||
|
||||
func TestAuthenticateLogin(t *testing.T) {
|
||||
// NFD username and PRECIS-cleaned password.
|
||||
tc := start(t, false)
|
||||
tc := start(t)
|
||||
tc.client.Login("mo\u0301x@mox.example", password1)
|
||||
tc.close()
|
||||
}
|
||||
|
||||
func TestAuthenticatePlain(t *testing.T) {
|
||||
tc := start(t, false)
|
||||
tc := start(t)
|
||||
|
||||
tc.transactf("no", "authenticate bogus ")
|
||||
tc.transactf("bad", "authenticate plain not base64...")
|
||||
tc.transactf("no", "authenticate plain %s", base64.StdEncoding.EncodeToString([]byte("\u0000baduser\u0000badpass")))
|
||||
tc.xcodeWord("AUTHENTICATIONFAILED")
|
||||
tc.xcode("AUTHENTICATIONFAILED")
|
||||
tc.transactf("no", "authenticate plain %s", base64.StdEncoding.EncodeToString([]byte("\u0000mjl@mox.example\u0000badpass")))
|
||||
tc.xcodeWord("AUTHENTICATIONFAILED")
|
||||
tc.xcode("AUTHENTICATIONFAILED")
|
||||
tc.transactf("no", "authenticate plain %s", base64.StdEncoding.EncodeToString([]byte("\u0000mjl\u0000badpass"))) // Need email, not account.
|
||||
tc.xcodeWord("AUTHENTICATIONFAILED")
|
||||
tc.xcode("AUTHENTICATIONFAILED")
|
||||
tc.transactf("no", "authenticate plain %s", base64.StdEncoding.EncodeToString([]byte("\u0000mjl@mox.example\u0000test")))
|
||||
tc.xcodeWord("AUTHENTICATIONFAILED")
|
||||
tc.xcode("AUTHENTICATIONFAILED")
|
||||
tc.transactf("no", "authenticate plain %s", base64.StdEncoding.EncodeToString([]byte("\u0000mjl@mox.example\u0000test"+password0)))
|
||||
tc.xcodeWord("AUTHENTICATIONFAILED")
|
||||
tc.xcode("AUTHENTICATIONFAILED")
|
||||
tc.transactf("bad", "authenticate plain %s", base64.StdEncoding.EncodeToString([]byte("\u0000")))
|
||||
tc.xcode(nil)
|
||||
tc.xcode("")
|
||||
tc.transactf("no", "authenticate plain %s", base64.StdEncoding.EncodeToString([]byte("other\u0000mjl@mox.example\u0000"+password0)))
|
||||
tc.xcodeWord("AUTHORIZATIONFAILED")
|
||||
tc.xcode("AUTHORIZATIONFAILED")
|
||||
tc.transactf("ok", "authenticate plain %s", base64.StdEncoding.EncodeToString([]byte("\u0000mjl@mox.example\u0000"+password0)))
|
||||
tc.close()
|
||||
|
||||
tc = start(t, false)
|
||||
tc = start(t)
|
||||
tc.transactf("ok", "authenticate plain %s", base64.StdEncoding.EncodeToString([]byte("mjl@mox.example\u0000mjl@mox.example\u0000"+password0)))
|
||||
tc.close()
|
||||
|
||||
// NFD username and PRECIS-cleaned password.
|
||||
tc = start(t, false)
|
||||
tc = start(t)
|
||||
tc.transactf("ok", "authenticate plain %s", base64.StdEncoding.EncodeToString([]byte("mo\u0301x@mox.example\u0000mo\u0301x@mox.example\u0000"+password1)))
|
||||
tc.close()
|
||||
|
||||
tc = start(t, false)
|
||||
tc = start(t)
|
||||
tc.client.AuthenticatePlain("mjl@mox.example", password0)
|
||||
tc.close()
|
||||
|
||||
tc = start(t, false)
|
||||
tc = start(t)
|
||||
defer tc.close()
|
||||
|
||||
tc.cmdf("", "authenticate plain")
|
||||
@ -82,28 +73,6 @@ func TestAuthenticatePlain(t *testing.T) {
|
||||
tc.readstatus("ok")
|
||||
}
|
||||
|
||||
func TestLoginDisabled(t *testing.T) {
|
||||
tc := start(t, false)
|
||||
defer tc.close()
|
||||
|
||||
acc, err := store.OpenAccount(pkglog, "disabled", false)
|
||||
tcheck(t, err, "open account")
|
||||
err = acc.SetPassword(pkglog, "test1234")
|
||||
tcheck(t, err, "set password")
|
||||
err = acc.Close()
|
||||
tcheck(t, err, "close account")
|
||||
|
||||
tc.transactf("no", "authenticate plain %s", base64.StdEncoding.EncodeToString([]byte("\u0000disabled@mox.example\u0000test1234")))
|
||||
tc.xcode(nil)
|
||||
tc.transactf("no", "authenticate plain %s", base64.StdEncoding.EncodeToString([]byte("\u0000disabled@mox.example\u0000bogus")))
|
||||
tc.xcodeWord("AUTHENTICATIONFAILED")
|
||||
|
||||
tc.transactf("no", "login disabled@mox.example test1234")
|
||||
tc.xcode(nil)
|
||||
tc.transactf("no", "login disabled@mox.example bogus")
|
||||
tc.xcodeWord("AUTHENTICATIONFAILED")
|
||||
}
|
||||
|
||||
func TestAuthenticateSCRAMSHA1(t *testing.T) {
|
||||
testAuthenticateSCRAM(t, false, "SCRAM-SHA-1", sha1.New)
|
||||
}
|
||||
@ -121,7 +90,7 @@ func TestAuthenticateSCRAMSHA256PLUS(t *testing.T) {
|
||||
}
|
||||
|
||||
func testAuthenticateSCRAM(t *testing.T, tls bool, method string, h func() hash.Hash) {
|
||||
tc := startArgs(t, false, true, tls, true, true, "mjl")
|
||||
tc := startArgs(t, true, tls, true, true, "mjl")
|
||||
tc.client.AuthenticateSCRAM(method, h, "mjl@mox.example", password0)
|
||||
tc.close()
|
||||
|
||||
@ -132,11 +101,15 @@ func testAuthenticateSCRAM(t *testing.T, tls bool, method string, h func() hash.
|
||||
sc := scram.NewClient(h, username, "", noServerPlus, tc.client.TLSConnectionState())
|
||||
clientFirst, err := sc.ClientFirst()
|
||||
tc.check(err, "scram clientFirst")
|
||||
tc.client.WriteCommandf("", "authenticate %s %s", method, base64.StdEncoding.EncodeToString([]byte(clientFirst)))
|
||||
tc.client.LastTag = "x001"
|
||||
tc.writelinef("%s authenticate %s %s", tc.client.LastTag, method, base64.StdEncoding.EncodeToString([]byte(clientFirst)))
|
||||
|
||||
xreadContinuation := func() []byte {
|
||||
line, err := tc.client.ReadContinuation()
|
||||
tcheck(t, err, "read continuation")
|
||||
line, _, result, rerr := tc.client.ReadContinuation()
|
||||
tc.check(rerr, "read continuation")
|
||||
if result.Status != "" {
|
||||
tc.t.Fatalf("expected continuation")
|
||||
}
|
||||
buf, err := base64.StdEncoding.DecodeString(line)
|
||||
tc.check(err, "parsing base64 from remote")
|
||||
return buf
|
||||
@ -159,14 +132,14 @@ func testAuthenticateSCRAM(t *testing.T, tls bool, method string, h func() hash.
|
||||
} else {
|
||||
tc.writelinef("")
|
||||
}
|
||||
resp, err := tc.client.ReadResponse()
|
||||
_, result, err := tc.client.Response()
|
||||
tc.check(err, "read response")
|
||||
if string(resp.Status) != strings.ToUpper(status) {
|
||||
tc.t.Fatalf("got status %q, expected %q", resp.Status, strings.ToUpper(status))
|
||||
if string(result.Status) != strings.ToUpper(status) {
|
||||
tc.t.Fatalf("got status %q, expected %q", result.Status, strings.ToUpper(status))
|
||||
}
|
||||
}
|
||||
|
||||
tc = startArgs(t, false, true, tls, true, true, "mjl")
|
||||
tc = startArgs(t, true, tls, true, true, "mjl")
|
||||
auth("no", scram.ErrInvalidProof, "mjl@mox.example", "badpass")
|
||||
auth("no", scram.ErrInvalidProof, "mjl@mox.example", "")
|
||||
// todo: server aborts due to invalid username. we should probably make client continue with fake determinisitically generated salt and result in error in the end.
|
||||
@ -174,7 +147,7 @@ func testAuthenticateSCRAM(t *testing.T, tls bool, method string, h func() hash.
|
||||
|
||||
tc.transactf("no", "authenticate bogus ")
|
||||
tc.transactf("bad", "authenticate %s not base64...", method)
|
||||
tc.transactf("no", "authenticate %s %s", method, base64.StdEncoding.EncodeToString([]byte("bad data")))
|
||||
tc.transactf("bad", "authenticate %s %s", method, base64.StdEncoding.EncodeToString([]byte("bad data")))
|
||||
|
||||
// NFD username, with PRECIS-cleaned password.
|
||||
auth("ok", nil, "mo\u0301x@mox.example", password1)
|
||||
@ -183,7 +156,7 @@ func testAuthenticateSCRAM(t *testing.T, tls bool, method string, h func() hash.
|
||||
}
|
||||
|
||||
func TestAuthenticateCRAMMD5(t *testing.T) {
|
||||
tc := start(t, false)
|
||||
tc := start(t)
|
||||
|
||||
tc.transactf("no", "authenticate bogus ")
|
||||
tc.transactf("bad", "authenticate CRAM-MD5 not base64...")
|
||||
@ -193,11 +166,15 @@ func TestAuthenticateCRAMMD5(t *testing.T) {
|
||||
auth := func(status string, username, password string) {
|
||||
t.Helper()
|
||||
|
||||
tc.client.WriteCommandf("", "authenticate CRAM-MD5")
|
||||
tc.client.LastTag = "x001"
|
||||
tc.writelinef("%s authenticate CRAM-MD5", tc.client.LastTag)
|
||||
|
||||
xreadContinuation := func() []byte {
|
||||
line, err := tc.client.ReadContinuation()
|
||||
tcheck(t, err, "read continuation")
|
||||
line, _, result, rerr := tc.client.ReadContinuation()
|
||||
tc.check(rerr, "read continuation")
|
||||
if result.Status != "" {
|
||||
tc.t.Fatalf("expected continuation")
|
||||
}
|
||||
buf, err := base64.StdEncoding.DecodeString(line)
|
||||
tc.check(err, "parsing base64 from remote")
|
||||
return buf
|
||||
@ -210,13 +187,13 @@ func TestAuthenticateCRAMMD5(t *testing.T) {
|
||||
}
|
||||
h := hmac.New(md5.New, []byte(password))
|
||||
h.Write([]byte(chal))
|
||||
data := fmt.Sprintf("%s %x", username, h.Sum(nil))
|
||||
tc.writelinef("%s", base64.StdEncoding.EncodeToString([]byte(data)))
|
||||
resp := fmt.Sprintf("%s %x", username, h.Sum(nil))
|
||||
tc.writelinef("%s", base64.StdEncoding.EncodeToString([]byte(resp)))
|
||||
|
||||
resp, err := tc.client.ReadResponse()
|
||||
_, result, err := tc.client.Response()
|
||||
tc.check(err, "read response")
|
||||
if string(resp.Status) != strings.ToUpper(status) {
|
||||
tc.t.Fatalf("got status %q, expected %q", resp.Status, strings.ToUpper(status))
|
||||
if string(result.Status) != strings.ToUpper(status) {
|
||||
tc.t.Fatalf("got status %q, expected %q", result.Status, strings.ToUpper(status))
|
||||
}
|
||||
}
|
||||
|
||||
@ -229,154 +206,7 @@ func TestAuthenticateCRAMMD5(t *testing.T) {
|
||||
tc.close()
|
||||
|
||||
// NFD username, with PRECIS-cleaned password.
|
||||
tc = start(t, false)
|
||||
tc = start(t)
|
||||
auth("ok", "mo\u0301x@mox.example", password1)
|
||||
tc.close()
|
||||
}
|
||||
|
||||
func TestAuthenticateTLSClientCert(t *testing.T) {
|
||||
tc := startArgsMore(t, false, true, true, nil, nil, true, true, "mjl", nil)
|
||||
tc.transactf("no", "authenticate external ") // No TLS auth.
|
||||
tc.close()
|
||||
|
||||
// Create a certificate, register its public key with account, and make a tls
|
||||
// client config that sends the certificate.
|
||||
clientCert0 := fakeCert(t, true)
|
||||
clientConfig := tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
Certificates: []tls.Certificate{clientCert0},
|
||||
}
|
||||
|
||||
tlspubkey, err := store.ParseTLSPublicKeyCert(clientCert0.Certificate[0])
|
||||
tcheck(t, err, "parse certificate")
|
||||
tlspubkey.Account = "mjl"
|
||||
tlspubkey.LoginAddress = "mjl@mox.example"
|
||||
tlspubkey.NoIMAPPreauth = true
|
||||
|
||||
addClientCert := func() error {
|
||||
return store.TLSPublicKeyAdd(ctxbg, &tlspubkey)
|
||||
}
|
||||
|
||||
// No preauth, explicit authenticate with TLS.
|
||||
tc = startArgsMore(t, false, true, true, nil, &clientConfig, false, true, "mjl", addClientCert)
|
||||
if tc.client.Preauth {
|
||||
t.Fatalf("preauthentication while not configured for tls public key")
|
||||
}
|
||||
tc.transactf("ok", "authenticate external ")
|
||||
tc.close()
|
||||
|
||||
// External with explicit username.
|
||||
tc = startArgsMore(t, false, true, true, nil, &clientConfig, false, true, "mjl", addClientCert)
|
||||
if tc.client.Preauth {
|
||||
t.Fatalf("preauthentication while not configured for tls public key")
|
||||
}
|
||||
tc.transactf("ok", "authenticate external %s", base64.StdEncoding.EncodeToString([]byte("mjl@mox.example")))
|
||||
tc.close()
|
||||
|
||||
// No preauth, also allow other mechanisms.
|
||||
tc = startArgsMore(t, false, true, true, nil, &clientConfig, false, true, "mjl", addClientCert)
|
||||
tc.transactf("ok", "authenticate plain %s", base64.StdEncoding.EncodeToString([]byte("\u0000mjl@mox.example\u0000"+password0)))
|
||||
tc.close()
|
||||
|
||||
// No preauth, also allow other username for same account.
|
||||
tc = startArgsMore(t, false, true, true, nil, &clientConfig, false, true, "mjl", addClientCert)
|
||||
tc.transactf("ok", "authenticate plain %s", base64.StdEncoding.EncodeToString([]byte("\u0000móx@mox.example\u0000"+password0)))
|
||||
tc.close()
|
||||
|
||||
// No preauth, other mechanism must be for same account.
|
||||
acc, err := store.OpenAccount(pkglog, "other", false)
|
||||
tcheck(t, err, "open account")
|
||||
err = acc.SetPassword(pkglog, "test1234")
|
||||
tcheck(t, err, "set password")
|
||||
err = acc.Close()
|
||||
tcheck(t, err, "close account")
|
||||
tc = startArgsMore(t, false, true, true, nil, &clientConfig, false, true, "mjl", addClientCert)
|
||||
tc.transactf("no", "authenticate plain %s", base64.StdEncoding.EncodeToString([]byte("\u0000other@mox.example\u0000test1234")))
|
||||
tc.close()
|
||||
|
||||
// Starttls and external auth.
|
||||
tc = startArgsMore(t, false, true, false, nil, &clientConfig, false, true, "mjl", addClientCert)
|
||||
tc.client.StartTLS(&clientConfig)
|
||||
tc.transactf("ok", "authenticate external =")
|
||||
tc.close()
|
||||
|
||||
tlspubkey.NoIMAPPreauth = false
|
||||
err = store.TLSPublicKeyUpdate(ctxbg, &tlspubkey)
|
||||
tcheck(t, err, "update tls public key")
|
||||
|
||||
// With preauth, no authenticate command needed/allowed.
|
||||
// Already set up tls session ticket cache, for next test.
|
||||
serverConfig := tls.Config{
|
||||
Certificates: []tls.Certificate{fakeCert(t, false)},
|
||||
}
|
||||
ctx, cancel := context.WithCancel(ctxbg)
|
||||
defer cancel()
|
||||
mox.StartTLSSessionTicketKeyRefresher(ctx, pkglog, &serverConfig)
|
||||
clientConfig.ClientSessionCache = tls.NewLRUClientSessionCache(10)
|
||||
tc = startArgsMore(t, false, true, true, &serverConfig, &clientConfig, false, true, "mjl", addClientCert)
|
||||
if !tc.client.Preauth {
|
||||
t.Fatalf("not preauthentication while configured for tls public key")
|
||||
}
|
||||
cs := tc.conn.(*tls.Conn).ConnectionState()
|
||||
if cs.DidResume {
|
||||
t.Fatalf("tls connection was resumed")
|
||||
}
|
||||
tc.transactf("no", "authenticate external ") // Not allowed, already in authenticated state.
|
||||
tc.close()
|
||||
|
||||
// Authentication works with TLS resumption.
|
||||
tc = startArgsMore(t, false, true, true, &serverConfig, &clientConfig, false, true, "mjl", addClientCert)
|
||||
if !tc.client.Preauth {
|
||||
t.Fatalf("not preauthentication while configured for tls public key")
|
||||
}
|
||||
cs = tc.conn.(*tls.Conn).ConnectionState()
|
||||
if !cs.DidResume {
|
||||
t.Fatalf("tls connection was not resumed")
|
||||
}
|
||||
// Check that operations that require an account work.
|
||||
tc.client.Enable(imapclient.CapIMAP4rev2)
|
||||
received, err := time.Parse(time.RFC3339, "2022-11-16T10:01:00+01:00")
|
||||
tc.check(err, "parse time")
|
||||
tc.client.Append("inbox", makeAppendTime(exampleMsg, received))
|
||||
tc.client.Select("inbox")
|
||||
tc.close()
|
||||
|
||||
// Authentication with unknown key should fail.
|
||||
// todo: less duplication, change startArgs so this can be merged into it.
|
||||
err = store.Close()
|
||||
tcheck(t, err, "store close")
|
||||
os.RemoveAll("../testdata/imap/data")
|
||||
err = store.Init(ctxbg)
|
||||
tcheck(t, err, "store init")
|
||||
mox.ConfigStaticPath = filepath.FromSlash("../testdata/imap/mox.conf")
|
||||
mox.MustLoadConfig(true, false)
|
||||
switchStop := store.Switchboard()
|
||||
defer switchStop()
|
||||
|
||||
serverConn, clientConn := net.Pipe()
|
||||
defer clientConn.Close()
|
||||
|
||||
done := make(chan struct{})
|
||||
defer func() { <-done }()
|
||||
connCounter++
|
||||
cid := connCounter
|
||||
go func() {
|
||||
defer serverConn.Close()
|
||||
serve("test", cid, &serverConfig, serverConn, true, false, false, false, "")
|
||||
close(done)
|
||||
}()
|
||||
|
||||
clientConfig.ClientSessionCache = nil
|
||||
clientConn = tls.Client(clientConn, &clientConfig)
|
||||
// note: It's not enough to do a handshake and check if that was successful. If the
|
||||
// client cert is not acceptable, we only learn after the handshake, when the first
|
||||
// data messages are exchanged.
|
||||
buf := make([]byte, 100)
|
||||
_, err = clientConn.Read(buf)
|
||||
if err == nil {
|
||||
t.Fatalf("tls handshake with unknown client certificate succeeded")
|
||||
}
|
||||
if alert, ok := mox.AsTLSAlert(err); !ok || alert != 42 {
|
||||
t.Fatalf("got err %#v, expected tls 'bad certificate' alert", err)
|
||||
}
|
||||
}
|
||||
|
@ -1,82 +0,0 @@
|
||||
package imapserver
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"encoding/base64"
|
||||
"io"
|
||||
mathrand "math/rand/v2"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestCompress(t *testing.T) {
|
||||
tc := start(t, false)
|
||||
defer tc.close()
|
||||
|
||||
tc.login("mjl@mox.example", password0)
|
||||
|
||||
tc.transactf("bad", "compress")
|
||||
tc.transactf("bad", "compress bogus ")
|
||||
tc.transactf("no", "compress bogus")
|
||||
|
||||
tc.client.CompressDeflate()
|
||||
tc.transactf("no", "compress deflate") // Cannot have multiple.
|
||||
tc.xcodeWord("COMPRESSIONACTIVE")
|
||||
|
||||
tc.client.Select("inbox")
|
||||
tc.transactf("ok", "append inbox (\\seen) {%d+}\r\n%s", len(exampleMsg), exampleMsg)
|
||||
tc.transactf("ok", "noop")
|
||||
tc.transactf("ok", "fetch 1 body.peek[1]")
|
||||
}
|
||||
|
||||
func TestCompressStartTLS(t *testing.T) {
|
||||
tc := start(t, false)
|
||||
defer tc.close()
|
||||
|
||||
tc.client.StartTLS(&tls.Config{InsecureSkipVerify: true})
|
||||
tc.login("mjl@mox.example", password0)
|
||||
tc.client.CompressDeflate()
|
||||
tc.client.Select("inbox")
|
||||
tc.transactf("ok", "append inbox (\\seen) {%d+}\r\n%s", len(exampleMsg), exampleMsg)
|
||||
tc.transactf("ok", "noop")
|
||||
tc.transactf("ok", "fetch 1 body.peek[1]")
|
||||
}
|
||||
|
||||
func TestCompressBreak(t *testing.T) {
|
||||
// Close the client connection when the server is writing. That causes writes in
|
||||
// the server to fail (panic), jumping out of the flate writer and leaving its
|
||||
// state inconsistent. We must not call into the flate writer again because due to
|
||||
// its broken internal state it may cause array out of bounds accesses.
|
||||
|
||||
tc := start(t, false)
|
||||
defer tc.close()
|
||||
|
||||
msg := exampleMsg
|
||||
// Add random data (so it is not compressible). Don't know why, but only
|
||||
// reproducible with large writes. As if setting socket buffers had no effect.
|
||||
buf := make([]byte, 64*1024)
|
||||
_, err := io.ReadFull(mathrand.NewChaCha8([32]byte{}), buf)
|
||||
tcheck(t, err, "read random")
|
||||
text := base64.StdEncoding.EncodeToString(buf)
|
||||
for len(text) > 0 {
|
||||
n := min(76, len(text))
|
||||
msg += text[:n] + "\r\n"
|
||||
text = text[n:]
|
||||
}
|
||||
|
||||
tc.login("mjl@mox.example", password0)
|
||||
tc.client.CompressDeflate()
|
||||
tc.client.Select("inbox")
|
||||
tc.transactf("ok", "append inbox (\\seen) {%d+}\r\n%s", len(msg), msg)
|
||||
tc.transactf("ok", "noop")
|
||||
|
||||
// Write request. Close connection instead of reading data. Write will panic,
|
||||
// coming through flate writer leaving its state inconsistent. Server must not try
|
||||
// to Flush/Write again on flate writer or it may panic.
|
||||
tc.client.Writelinef("x fetch 1 body.peek[1]")
|
||||
|
||||
// Close client connection and prevent cleanup from closing the client again.
|
||||
time.Sleep(time.Second / 10)
|
||||
tc.client = nil
|
||||
tc.conn.Close() // Simulate client disappearing.
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -7,25 +7,17 @@ import (
|
||||
)
|
||||
|
||||
func TestCopy(t *testing.T) {
|
||||
testCopy(t, false)
|
||||
}
|
||||
|
||||
func TestCopyUIDOnly(t *testing.T) {
|
||||
testCopy(t, true)
|
||||
}
|
||||
|
||||
func testCopy(t *testing.T, uidonly bool) {
|
||||
defer mockUIDValidity()()
|
||||
tc := start(t, uidonly)
|
||||
tc := start(t)
|
||||
defer tc.close()
|
||||
|
||||
tc2 := startNoSwitchboard(t, uidonly)
|
||||
defer tc2.closeNoWait()
|
||||
tc2 := startNoSwitchboard(t)
|
||||
defer tc2.close()
|
||||
|
||||
tc.login("mjl@mox.example", password0)
|
||||
tc.client.Login("mjl@mox.example", password0)
|
||||
tc.client.Select("inbox")
|
||||
|
||||
tc2.login("mjl@mox.example", password0)
|
||||
tc2.client.Login("mjl@mox.example", password0)
|
||||
tc2.client.Select("Trash")
|
||||
|
||||
tc.transactf("bad", "copy") // Missing params.
|
||||
@ -33,53 +25,48 @@ func testCopy(t *testing.T, uidonly bool) {
|
||||
tc.transactf("bad", "copy 1 inbox ") // Leftover.
|
||||
|
||||
// Seqs 1,2 and UIDs 3,4.
|
||||
tc.client.Append("inbox", makeAppend(exampleMsg))
|
||||
tc.client.Append("inbox", makeAppend(exampleMsg))
|
||||
tc.transactf("ok", `Uid Store 1:2 +Flags.Silent (\Deleted)`)
|
||||
tc.client.Append("inbox", nil, nil, []byte(exampleMsg))
|
||||
tc.client.Append("inbox", nil, nil, []byte(exampleMsg))
|
||||
tc.client.StoreFlagsSet("1:2", true, `\Deleted`)
|
||||
tc.client.Expunge()
|
||||
tc.client.Append("inbox", makeAppend(exampleMsg))
|
||||
tc.client.Append("inbox", makeAppend(exampleMsg))
|
||||
tc.client.Append("inbox", nil, nil, []byte(exampleMsg))
|
||||
tc.client.Append("inbox", nil, nil, []byte(exampleMsg))
|
||||
|
||||
if uidonly {
|
||||
tc.transactf("ok", "uid copy 3:* Trash")
|
||||
} else {
|
||||
tc.transactf("no", "copy 1 nonexistent")
|
||||
tc.xcodeWord("TRYCREATE")
|
||||
tc.transactf("no", "copy 1 expungebox")
|
||||
tc.xcodeWord("TRYCREATE")
|
||||
tc.transactf("no", "copy 1 nonexistent")
|
||||
tc.xcode("TRYCREATE")
|
||||
|
||||
tc.transactf("no", "copy 1 inbox") // Cannot copy to same mailbox.
|
||||
tc.transactf("no", "copy 1 inbox") // Cannot copy to same mailbox.
|
||||
|
||||
tc2.transactf("ok", "noop") // Drain.
|
||||
tc2.transactf("ok", "noop") // Drain.
|
||||
|
||||
tc.transactf("ok", "copy 1:* Trash")
|
||||
tc.xcode(mustParseCode("COPYUID 1 3:4 1:2"))
|
||||
}
|
||||
tc.transactf("ok", "copy 1:* Trash")
|
||||
ptr := func(v uint32) *uint32 { return &v }
|
||||
tc.xcodeArg(imapclient.CodeCopyUID{DestUIDValidity: 1, From: []imapclient.NumRange{{First: 3, Last: ptr(4)}}, To: []imapclient.NumRange{{First: 1, Last: ptr(2)}}})
|
||||
tc2.transactf("ok", "noop")
|
||||
tc2.xuntagged(
|
||||
imapclient.UntaggedExists(2),
|
||||
tc2.untaggedFetch(1, 1, imapclient.FetchFlags(nil)),
|
||||
tc2.untaggedFetch(2, 2, imapclient.FetchFlags(nil)),
|
||||
imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{imapclient.FetchUID(1), imapclient.FetchFlags(nil)}},
|
||||
imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{imapclient.FetchUID(2), imapclient.FetchFlags(nil)}},
|
||||
)
|
||||
|
||||
tc.transactf("no", "uid copy 1,2 Trash") // No match.
|
||||
tc.transactf("ok", "uid copy 4,3 Trash")
|
||||
tc.xcode(mustParseCode("COPYUID 1 3:4 3:4"))
|
||||
tc.xcodeArg(imapclient.CodeCopyUID{DestUIDValidity: 1, From: []imapclient.NumRange{{First: 3, Last: ptr(4)}}, To: []imapclient.NumRange{{First: 3, Last: ptr(4)}}})
|
||||
tc2.transactf("ok", "noop")
|
||||
tc2.xuntagged(
|
||||
imapclient.UntaggedExists(4),
|
||||
tc2.untaggedFetch(3, 3, imapclient.FetchFlags(nil)),
|
||||
tc2.untaggedFetch(4, 4, imapclient.FetchFlags(nil)),
|
||||
imapclient.UntaggedFetch{Seq: 3, Attrs: []imapclient.FetchAttr{imapclient.FetchUID(3), imapclient.FetchFlags(nil)}},
|
||||
imapclient.UntaggedFetch{Seq: 4, Attrs: []imapclient.FetchAttr{imapclient.FetchUID(4), imapclient.FetchFlags(nil)}},
|
||||
)
|
||||
|
||||
tclimit := startArgs(t, uidonly, false, false, true, true, "limit")
|
||||
tclimit := startArgs(t, false, false, true, true, "limit")
|
||||
defer tclimit.close()
|
||||
tclimit.login("limit@mox.example", password0)
|
||||
tclimit.client.Login("limit@mox.example", password0)
|
||||
tclimit.client.Select("inbox")
|
||||
// First message of 1 byte is within limits.
|
||||
tclimit.transactf("ok", "append inbox (\\Seen Label1 $label2) \" 1-Jan-2022 10:10:00 +0100\" {1+}\r\nx")
|
||||
tclimit.xuntagged(imapclient.UntaggedExists(1))
|
||||
// Second message would take account past limit.
|
||||
tclimit.transactf("no", "uid copy 1:* Trash")
|
||||
tclimit.xcodeWord("OVERQUOTA")
|
||||
tclimit.transactf("no", "copy 1:* Trash")
|
||||
tclimit.xcode("OVERQUOTA")
|
||||
}
|
||||
|
@ -7,42 +7,24 @@ import (
|
||||
)
|
||||
|
||||
func TestCreate(t *testing.T) {
|
||||
testCreate(t, false)
|
||||
}
|
||||
|
||||
func TestCreateUIDOnly(t *testing.T) {
|
||||
testCreate(t, true)
|
||||
}
|
||||
|
||||
func testCreate(t *testing.T, uidonly bool) {
|
||||
tc := start(t, uidonly)
|
||||
tc := start(t)
|
||||
defer tc.close()
|
||||
|
||||
tc2 := startNoSwitchboard(t, uidonly)
|
||||
defer tc2.closeNoWait()
|
||||
tc2 := startNoSwitchboard(t)
|
||||
defer tc2.close()
|
||||
|
||||
tc.login("mjl@mox.example", password0)
|
||||
tc2.login("mjl@mox.example", password0)
|
||||
tc.client.Login("mjl@mox.example", password0)
|
||||
tc2.client.Login("mjl@mox.example", password0)
|
||||
|
||||
tc.transactf("no", "create inbox") // Already exists and not allowed. ../rfc/9051:1913
|
||||
tc.transactf("no", "create Inbox") // Idem.
|
||||
|
||||
// Don't allow names that can cause trouble when exporting to directories.
|
||||
tc.transactf("no", "create .")
|
||||
tc.transactf("no", "create ..")
|
||||
tc.transactf("no", "create legit/..")
|
||||
tc.transactf("ok", "create ...") // No special meaning.
|
||||
|
||||
// ../rfc/9051:1937
|
||||
tc.transactf("ok", "create inbox/a/c")
|
||||
tc.xuntagged(imapclient.UntaggedList{Flags: []string{`\Subscribed`}, Separator: '/', Mailbox: "Inbox/a"}, imapclient.UntaggedList{Flags: []string{`\Subscribed`}, Separator: '/', Mailbox: "Inbox/a/c"})
|
||||
|
||||
tc2.transactf("ok", "noop")
|
||||
tc2.xuntagged(
|
||||
imapclient.UntaggedList{Flags: []string{`\Subscribed`}, Separator: '/', Mailbox: "..."},
|
||||
imapclient.UntaggedList{Flags: []string{`\Subscribed`}, Separator: '/', Mailbox: "Inbox/a"},
|
||||
imapclient.UntaggedList{Flags: []string{`\Subscribed`}, Separator: '/', Mailbox: "Inbox/a/c"},
|
||||
)
|
||||
tc2.xuntagged(imapclient.UntaggedList{Flags: []string{`\Subscribed`}, Separator: '/', Mailbox: "Inbox/a"}, imapclient.UntaggedList{Flags: []string{`\Subscribed`}, Separator: '/', Mailbox: "Inbox/a/c"})
|
||||
|
||||
tc.transactf("no", "create inbox/a/c") // Exists.
|
||||
|
||||
@ -57,7 +39,7 @@ func testCreate(t *testing.T, uidonly bool) {
|
||||
tc.xuntagged(imapclient.UntaggedList{Flags: []string{`\Subscribed`}, Separator: '/', Mailbox: "mailbox"})
|
||||
|
||||
// OldName is only set for IMAP4rev2 or NOTIFY.
|
||||
tc.client.Enable(imapclient.CapIMAP4rev2)
|
||||
tc.client.Enable("imap4rev2")
|
||||
tc.transactf("ok", "create mailbox2/")
|
||||
tc.xuntagged(imapclient.UntaggedList{Flags: []string{`\Subscribed`}, Separator: '/', Mailbox: "mailbox2", OldName: "mailbox2/"})
|
||||
|
||||
@ -90,19 +72,8 @@ func testCreate(t *testing.T, uidonly bool) {
|
||||
tc.transactf("no", `create "#"`) // Leading hash not allowed.
|
||||
tc.transactf("ok", `create "test#"`)
|
||||
|
||||
// Create with flags.
|
||||
tc.transactf("no", `create "newwithflags" (use (\unknown))`)
|
||||
tc.transactf("no", `create "newwithflags" (use (\all))`)
|
||||
tc.transactf("ok", `create "newwithflags" (use (\archive))`)
|
||||
tc.transactf("ok", "noop")
|
||||
tc.xuntagged()
|
||||
tc.transactf("ok", `create "newwithflags2" (use (\archive) use (\drafts \sent))`)
|
||||
|
||||
// UTF-7 checks are only for IMAP4 before rev2 and without UTF8=ACCEPT.
|
||||
tc.transactf("ok", `create "&"`) // Interpreted as UTF-8, no UTF-7.
|
||||
tc2.transactf("bad", `create "&"`) // Bad UTF-7.
|
||||
tc2.transactf("ok", `create "&Jjo-"`) // ☺, valid UTF-7.
|
||||
|
||||
tc.transactf("ok", "create expungebox") // Existed in past.
|
||||
tc.transactf("ok", "delete expungebox") // Gone again.
|
||||
}
|
||||
|
@ -7,45 +7,36 @@ import (
|
||||
)
|
||||
|
||||
func TestDelete(t *testing.T) {
|
||||
testDelete(t, false)
|
||||
}
|
||||
|
||||
func TestDeleteUIDOnly(t *testing.T) {
|
||||
testDelete(t, false)
|
||||
}
|
||||
|
||||
func testDelete(t *testing.T, uidonly bool) {
|
||||
tc := start(t, uidonly)
|
||||
tc := start(t)
|
||||
defer tc.close()
|
||||
|
||||
tc2 := startNoSwitchboard(t, uidonly)
|
||||
defer tc2.closeNoWait()
|
||||
tc2 := startNoSwitchboard(t)
|
||||
defer tc2.close()
|
||||
|
||||
tc3 := startNoSwitchboard(t, uidonly)
|
||||
defer tc3.closeNoWait()
|
||||
tc3 := startNoSwitchboard(t)
|
||||
defer tc3.close()
|
||||
|
||||
tc.login("mjl@mox.example", password0)
|
||||
tc2.login("mjl@mox.example", password0)
|
||||
tc3.login("mjl@mox.example", password0)
|
||||
tc.client.Login("mjl@mox.example", password0)
|
||||
tc2.client.Login("mjl@mox.example", password0)
|
||||
tc3.client.Login("mjl@mox.example", password0)
|
||||
|
||||
tc.transactf("bad", "delete") // Missing mailbox.
|
||||
tc.transactf("no", "delete inbox") // Cannot delete inbox.
|
||||
tc.transactf("no", "delete nonexistent") // Cannot delete mailbox that does not exist.
|
||||
tc.transactf("no", `delete "nonexistent"`) // Again, with quoted string syntax.
|
||||
tc.transactf("no", `delete "expungebox"`) // Already removed.
|
||||
|
||||
tc.client.Subscribe("x")
|
||||
tc.transactf("no", "delete x") // Subscription does not mean there is a mailbox that can be deleted.
|
||||
|
||||
tc.client.Create("a/b", nil)
|
||||
tc.client.Create("a/b")
|
||||
tc2.transactf("ok", "noop") // Drain changes.
|
||||
tc3.transactf("ok", "noop")
|
||||
|
||||
// ../rfc/9051:2000
|
||||
tc.transactf("no", "delete a") // Still has child.
|
||||
tc.xcodeWord("HASCHILDREN")
|
||||
tc.xcode("HASCHILDREN")
|
||||
|
||||
tc3.client.Enable(imapclient.CapIMAP4rev2) // For \NonExistent support.
|
||||
tc3.client.Enable("IMAP4rev2") // For \NonExistent support.
|
||||
tc.transactf("ok", "delete a/b")
|
||||
tc2.transactf("ok", "noop")
|
||||
tc2.xuntagged() // No IMAP4rev2, no \NonExistent.
|
||||
@ -62,12 +53,12 @@ func testDelete(t *testing.T, uidonly bool) {
|
||||
)
|
||||
|
||||
// Let's try again with a message present.
|
||||
tc.client.Create("msgs", nil)
|
||||
tc.client.Append("msgs", makeAppend(exampleMsg))
|
||||
tc.client.Create("msgs")
|
||||
tc.client.Append("msgs", nil, nil, []byte(exampleMsg))
|
||||
tc.transactf("ok", "delete msgs")
|
||||
|
||||
// Delete for inbox/* is allowed.
|
||||
tc.client.Create("inbox/a", nil)
|
||||
tc.client.Create("inbox/a")
|
||||
tc.transactf("ok", "delete inbox/a")
|
||||
|
||||
}
|
||||
|
@ -57,9 +57,3 @@ func xsyntaxErrorf(format string, args ...any) {
|
||||
err := errors.New(errmsg)
|
||||
panic(syntaxError{"", "", errmsg, err})
|
||||
}
|
||||
|
||||
func xsyntaxCodeErrorf(code, format string, args ...any) {
|
||||
errmsg := fmt.Sprintf(format, args...)
|
||||
err := errors.New(errmsg)
|
||||
panic(syntaxError{"", code, errmsg, err})
|
||||
}
|
||||
|
@ -7,25 +7,17 @@ import (
|
||||
)
|
||||
|
||||
func TestExpunge(t *testing.T) {
|
||||
testExpunge(t, false)
|
||||
}
|
||||
|
||||
func TestExpungeUIDOnly(t *testing.T) {
|
||||
testExpunge(t, true)
|
||||
}
|
||||
|
||||
func testExpunge(t *testing.T, uidonly bool) {
|
||||
defer mockUIDValidity()()
|
||||
tc := start(t, uidonly)
|
||||
tc := start(t)
|
||||
defer tc.close()
|
||||
|
||||
tc2 := startNoSwitchboard(t, uidonly)
|
||||
defer tc2.closeNoWait()
|
||||
tc2 := startNoSwitchboard(t)
|
||||
defer tc2.close()
|
||||
|
||||
tc.login("mjl@mox.example", password0)
|
||||
tc.client.Login("mjl@mox.example", password0)
|
||||
tc.client.Select("inbox")
|
||||
|
||||
tc2.login("mjl@mox.example", password0)
|
||||
tc2.client.Login("mjl@mox.example", password0)
|
||||
tc2.client.Select("inbox")
|
||||
|
||||
tc.transactf("bad", "expunge leftover") // Leftover data.
|
||||
@ -39,43 +31,35 @@ func testExpunge(t *testing.T, uidonly bool) {
|
||||
|
||||
tc.client.Unselect()
|
||||
tc.client.Select("inbox")
|
||||
tc.client.Append("inbox", makeAppend(exampleMsg))
|
||||
tc.client.Append("inbox", makeAppend(exampleMsg))
|
||||
tc.client.Append("inbox", makeAppend(exampleMsg))
|
||||
tc.client.Append("inbox", nil, nil, []byte(exampleMsg))
|
||||
tc.client.Append("inbox", nil, nil, []byte(exampleMsg))
|
||||
tc.client.Append("inbox", nil, nil, []byte(exampleMsg))
|
||||
tc.transactf("ok", "expunge") // Still nothing to remove.
|
||||
tc.xuntagged()
|
||||
|
||||
tc.transactf("ok", `uid store 1,3 +flags.silent \Deleted`)
|
||||
tc.client.StoreFlagsAdd("1,3", true, `\Deleted`)
|
||||
|
||||
tc2.transactf("ok", "noop") // Drain.
|
||||
|
||||
tc.transactf("ok", "expunge")
|
||||
if uidonly {
|
||||
tc.xuntagged(imapclient.UntaggedVanished{UIDs: xparseNumSet("1,3")})
|
||||
} else {
|
||||
tc.xuntagged(imapclient.UntaggedExpunge(1), imapclient.UntaggedExpunge(2))
|
||||
}
|
||||
tc.xuntagged(imapclient.UntaggedExpunge(1), imapclient.UntaggedExpunge(2))
|
||||
|
||||
tc2.transactf("ok", "noop") // Drain.
|
||||
if uidonly {
|
||||
tc2.xuntagged(imapclient.UntaggedVanished{UIDs: xparseNumSet("1,3")})
|
||||
} else {
|
||||
tc2.xuntagged(imapclient.UntaggedExpunge(1), imapclient.UntaggedExpunge(2))
|
||||
}
|
||||
tc2.xuntagged(imapclient.UntaggedExpunge(1), imapclient.UntaggedExpunge(2))
|
||||
|
||||
tc.transactf("ok", "expunge") // Nothing to remove anymore.
|
||||
tc.xuntagged()
|
||||
|
||||
// Only UID 2 is still left. We'll add 3 more. Getting us to UIDs 2,4,5,6.
|
||||
tc.client.Append("inbox", makeAppend(exampleMsg))
|
||||
tc.client.Append("inbox", makeAppend(exampleMsg))
|
||||
tc.client.Append("inbox", makeAppend(exampleMsg))
|
||||
tc.client.Append("inbox", nil, nil, []byte(exampleMsg))
|
||||
tc.client.Append("inbox", nil, nil, []byte(exampleMsg))
|
||||
tc.client.Append("inbox", nil, nil, []byte(exampleMsg))
|
||||
|
||||
tc.transactf("bad", "uid expunge") // Missing uid set.
|
||||
tc.transactf("bad", "uid expunge 1 leftover") // Leftover data.
|
||||
tc.transactf("bad", "uid expunge 1 leftover") // Leftover data.
|
||||
|
||||
tc.transactf("ok", `uid store 2,4,6 +flags.silent \Deleted`)
|
||||
tc.client.StoreFlagsAdd("1,2,4", true, `\Deleted`) // Marks UID 2,4,6 as deleted.
|
||||
|
||||
tc.transactf("ok", "uid expunge 1")
|
||||
tc.xuntagged() // No match.
|
||||
@ -83,16 +67,8 @@ func testExpunge(t *testing.T, uidonly bool) {
|
||||
tc2.transactf("ok", "noop") // Drain.
|
||||
|
||||
tc.transactf("ok", "uid expunge 4:6") // Removes UID 4,6 at seqs 2,4.
|
||||
if uidonly {
|
||||
tc.xuntagged(imapclient.UntaggedVanished{UIDs: xparseNumSet("4,6")})
|
||||
} else {
|
||||
tc.xuntagged(imapclient.UntaggedExpunge(2), imapclient.UntaggedExpunge(3))
|
||||
}
|
||||
tc.xuntagged(imapclient.UntaggedExpunge(2), imapclient.UntaggedExpunge(3))
|
||||
|
||||
tc2.transactf("ok", "noop")
|
||||
if uidonly {
|
||||
tc2.xuntagged(imapclient.UntaggedVanished{UIDs: xparseNumSet("4,6")})
|
||||
} else {
|
||||
tc2.xuntagged(imapclient.UntaggedExpunge(2), imapclient.UntaggedExpunge(3))
|
||||
}
|
||||
tc.xuntagged(imapclient.UntaggedExpunge(2), imapclient.UntaggedExpunge(3))
|
||||
}
|
||||
|
@ -4,20 +4,19 @@ package imapserver
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"maps"
|
||||
"mime"
|
||||
"net/textproto"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/exp/maps"
|
||||
|
||||
"github.com/mjl-/bstore"
|
||||
|
||||
"github.com/mjl-/mox/message"
|
||||
"github.com/mjl-/mox/mlog"
|
||||
"github.com/mjl-/mox/mox-"
|
||||
"github.com/mjl-/mox/moxio"
|
||||
"github.com/mjl-/mox/store"
|
||||
@ -26,20 +25,18 @@ import (
|
||||
// functions to handle fetch attribute requests are defined on fetchCmd.
|
||||
type fetchCmd struct {
|
||||
conn *conn
|
||||
isUID bool // If this is a UID FETCH command.
|
||||
rtx *bstore.Tx // Read-only transaction, kept open while processing all messages.
|
||||
updateSeen []store.UID // To mark as seen after processing all messages. UID instead of message ID since moved messages keep their ID and insert a new ID in the original mailbox.
|
||||
hasChangedSince bool // Whether CHANGEDSINCE was set. Enables MODSEQ in response.
|
||||
expungeIssued bool // Set if any message has been expunged. Can happen for expunged messages.
|
||||
|
||||
// For message currently processing.
|
||||
mailboxID int64
|
||||
uid store.UID
|
||||
|
||||
markSeen bool
|
||||
needFlags bool
|
||||
needModseq bool // Whether untagged responses needs modseq.
|
||||
newPreviews map[store.UID]string // Save with messages when done.
|
||||
mailboxID int64
|
||||
uid store.UID
|
||||
tx *bstore.Tx // Writable tx, for storing message when first parsed as mime parts.
|
||||
changes []store.Change // For updated Seen flag.
|
||||
markSeen bool
|
||||
needFlags bool
|
||||
needModseq bool // Whether untagged responses needs modseq.
|
||||
expungeIssued bool // Set if a message cannot be read. Can happen for expunged messages.
|
||||
modseq store.ModSeq // Initialized on first change, for marking messages as seen.
|
||||
isUID bool // If this is a UID FETCH command.
|
||||
hasChangedSince bool // Whether CHANGEDSINCE was set. Enables MODSEQ in response.
|
||||
deltaCounts store.MailboxCounts // By marking \Seen, the number of unread/unseen messages will go down. We update counts at the end.
|
||||
|
||||
// Loaded when first needed, closed when message was processed.
|
||||
m *store.Message // Message currently being processed.
|
||||
@ -79,7 +76,7 @@ func (c *conn) cmdxFetch(isUID bool, tag, cmdstr string, p *parser) {
|
||||
p.xspace()
|
||||
nums := p.xnumSet()
|
||||
p.xspace()
|
||||
atts := p.xfetchAtts()
|
||||
atts := p.xfetchAtts(isUID)
|
||||
var changedSince int64
|
||||
var haveChangedSince bool
|
||||
var vanished bool
|
||||
@ -128,66 +125,42 @@ func (c *conn) cmdxFetch(isUID bool, tag, cmdstr string, p *parser) {
|
||||
}
|
||||
p.xempty()
|
||||
|
||||
// We only keep a wlock, only for initial checks and listing the uids. Then we
|
||||
// unlock and work without a lock. So changes to the store can happen, and we need
|
||||
// to deal with that. If we need to mark messages as seen, we do so after
|
||||
// processing the fetch for all messages, in a single write transaction. We don't
|
||||
// send untagged changes for those \seen flag changes before finishing this
|
||||
// command, because we have to sequence all changes properly, and since we don't
|
||||
// (want to) hold a wlock while processing messages (can be many!), other changes
|
||||
// may have happened to the store. So instead, we'll silently mark messages as seen
|
||||
// (the client should know this is happening anyway!), then broadcast the changes
|
||||
// to everyone, including ourselves. A noop/idle command that may come next will
|
||||
// return the \seen flag changes, in the correct order, with the correct modseq. We
|
||||
// also cannot just apply pending changes while processing. It is not allowed at
|
||||
// all for non-uid-fetch. It would also make life more complicated, e.g. we would
|
||||
// perhaps have to check if newly added messages also match uid fetch set that was
|
||||
// requested.
|
||||
|
||||
var uids []store.UID
|
||||
var vanishedUIDs []store.UID
|
||||
|
||||
cmd := &fetchCmd{conn: c, isUID: isUID, hasChangedSince: haveChangedSince, mailboxID: c.mailboxID, newPreviews: map[store.UID]string{}}
|
||||
|
||||
// We don't use c.account.WithRLock because we write to the client while reading messages.
|
||||
// We get the rlock, then we check the mailbox, release the lock and read the messages.
|
||||
// The db transaction still locks out any changes to the database...
|
||||
c.account.RLock()
|
||||
runlock := c.account.RUnlock
|
||||
// Note: we call runlock in a closure because we replace it below.
|
||||
defer func() {
|
||||
if cmd.rtx == nil {
|
||||
return
|
||||
}
|
||||
err := cmd.rtx.Rollback()
|
||||
c.log.Check(err, "rollback rtx")
|
||||
cmd.rtx = nil
|
||||
runlock()
|
||||
}()
|
||||
|
||||
c.account.WithRLock(func() {
|
||||
var err error
|
||||
cmd.rtx, err = c.account.DB.Begin(context.TODO(), false)
|
||||
cmd.xcheckf(err, "begin transaction")
|
||||
var vanishedUIDs []store.UID
|
||||
cmd := &fetchCmd{conn: c, mailboxID: c.mailboxID, isUID: isUID, hasChangedSince: haveChangedSince}
|
||||
c.xdbwrite(func(tx *bstore.Tx) {
|
||||
cmd.tx = tx
|
||||
|
||||
// Ensure the mailbox still exists.
|
||||
c.xmailboxID(cmd.rtx, c.mailboxID)
|
||||
mb := c.xmailboxID(tx, c.mailboxID)
|
||||
|
||||
var uids []store.UID
|
||||
|
||||
// With changedSince, the client is likely asking for a small set of changes. Use a
|
||||
// database query to trim down the uids we need to look at. We need to go through
|
||||
// the database for "VANISHED (EARLIER)" anyway, to see UIDs that aren't in the
|
||||
// session anymore. Vanished must be used with changedSince. ../rfc/7162:871
|
||||
// database query to trim down the uids we need to look at.
|
||||
// ../rfc/7162:871
|
||||
if changedSince > 0 {
|
||||
q := bstore.QueryTx[store.Message](cmd.rtx)
|
||||
q := bstore.QueryTx[store.Message](tx)
|
||||
q.FilterNonzero(store.Message{MailboxID: c.mailboxID})
|
||||
q.FilterGreater("ModSeq", store.ModSeqFromClient(changedSince))
|
||||
if !vanished {
|
||||
q.FilterEqual("Expunged", false)
|
||||
}
|
||||
err := q.ForEach(func(m store.Message) error {
|
||||
if m.UID >= c.uidnext {
|
||||
return nil
|
||||
}
|
||||
if isUID {
|
||||
if nums.xcontainsKnownUID(m.UID, c.searchResult, func() store.UID { return c.uidnext - 1 }) {
|
||||
if m.Expunged {
|
||||
vanishedUIDs = append(vanishedUIDs, m.UID)
|
||||
} else {
|
||||
uids = append(uids, m.UID)
|
||||
}
|
||||
if m.Expunged {
|
||||
vanishedUIDs = append(vanishedUIDs, m.UID)
|
||||
} else if isUID {
|
||||
if nums.containsUID(m.UID, c.uids, c.searchResult) {
|
||||
uids = append(uids, m.UID)
|
||||
}
|
||||
} else {
|
||||
seq := c.sequence(m.UID)
|
||||
@ -198,196 +171,115 @@ func (c *conn) cmdxFetch(isUID bool, tag, cmdstr string, p *parser) {
|
||||
return nil
|
||||
})
|
||||
xcheckf(err, "looking up messages with changedsince")
|
||||
} else {
|
||||
uids = c.xnumSetUIDs(isUID, nums)
|
||||
}
|
||||
|
||||
// In case of vanished where we don't have the full history, we must send VANISHED
|
||||
// for all uids matching nums. ../rfc/7162:1718
|
||||
delModSeq, err := c.account.HighestDeletedModSeq(cmd.rtx)
|
||||
// Send vanished for all missing requested UIDs. ../rfc/7162:1718
|
||||
if vanished {
|
||||
delModSeq, err := c.account.HighestDeletedModSeq(tx)
|
||||
xcheckf(err, "looking up highest deleted modseq")
|
||||
if !vanished || changedSince >= delModSeq.Client() {
|
||||
return
|
||||
if changedSince < delModSeq.Client() {
|
||||
// First sort the uids we already found, for fast lookup.
|
||||
sort.Slice(vanishedUIDs, func(i, j int) bool {
|
||||
return vanishedUIDs[i] < vanishedUIDs[j]
|
||||
})
|
||||
|
||||
// We'll be gathering any more vanished uids in more.
|
||||
more := map[store.UID]struct{}{}
|
||||
checkVanished := func(uid store.UID) {
|
||||
if uidSearch(c.uids, uid) <= 0 && uidSearch(vanishedUIDs, uid) <= 0 {
|
||||
more[uid] = struct{}{}
|
||||
}
|
||||
}
|
||||
// Now look through the requested uids. We may have a searchResult, handle it
|
||||
// separately from a numset with potential stars, over which we can more easily
|
||||
// iterate.
|
||||
if nums.searchResult {
|
||||
for _, uid := range c.searchResult {
|
||||
checkVanished(uid)
|
||||
}
|
||||
} else {
|
||||
iter := nums.interpretStar(c.uids).newIter()
|
||||
for {
|
||||
num, ok := iter.Next()
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
checkVanished(store.UID(num))
|
||||
}
|
||||
}
|
||||
vanishedUIDs = append(vanishedUIDs, maps.Keys(more)...)
|
||||
}
|
||||
|
||||
// We'll iterate through all UIDs in the numset, and add anything that isn't
|
||||
// already in uids and vanishedUIDs. First sort the uids we already found, for fast
|
||||
// lookup. We'll gather new UIDs in more, so we don't break the binary search.
|
||||
slices.Sort(vanishedUIDs)
|
||||
slices.Sort(uids)
|
||||
|
||||
more := map[store.UID]struct{}{} // We'll add them at the end.
|
||||
checkVanished := func(uid store.UID) {
|
||||
if uid < c.uidnext && uidSearch(uids, uid) <= 0 && uidSearch(vanishedUIDs, uid) <= 0 {
|
||||
more[uid] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// Now look through the requested uids. We may have a searchResult, handle it
|
||||
// separately from a numset with potential stars, over which we can more easily
|
||||
// iterate.
|
||||
if nums.searchResult {
|
||||
for _, uid := range c.searchResult {
|
||||
checkVanished(uid)
|
||||
}
|
||||
} else {
|
||||
xlastUID := c.newCachedLastUID(cmd.rtx, c.mailboxID, func(xerr error) { xuserErrorf("%s", xerr) })
|
||||
iter := nums.xinterpretStar(xlastUID).newIter()
|
||||
for {
|
||||
num, ok := iter.Next()
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
checkVanished(store.UID(num))
|
||||
}
|
||||
}
|
||||
vanishedUIDs = slices.AppendSeq(vanishedUIDs, maps.Keys(more))
|
||||
slices.Sort(vanishedUIDs)
|
||||
} else {
|
||||
uids = c.xnumSetEval(cmd.rtx, isUID, nums)
|
||||
}
|
||||
|
||||
})
|
||||
// We are continuing without a lock, working off our snapshot of uids to process.
|
||||
// Release the account lock.
|
||||
runlock()
|
||||
runlock = func() {} // Prevent defer from unlocking again.
|
||||
|
||||
// First report all vanished UIDs. ../rfc/7162:1714
|
||||
if len(vanishedUIDs) > 0 {
|
||||
// Mention all vanished UIDs in compact numset form.
|
||||
// ../rfc/7162:1985
|
||||
// No hard limit on response sizes, but clients are recommended to not send more
|
||||
// than 8k. We send a more conservative max 4k.
|
||||
for _, s := range compactUIDSet(vanishedUIDs).Strings(4*1024 - 32) {
|
||||
c.xbwritelinef("* VANISHED (EARLIER) %s", s)
|
||||
}
|
||||
}
|
||||
|
||||
defer cmd.msgclose() // In case of panic.
|
||||
|
||||
for _, cmd.uid = range uids {
|
||||
cmd.conn.log.Debug("processing uid", slog.Any("uid", cmd.uid))
|
||||
data, err := cmd.process(atts)
|
||||
if err != nil {
|
||||
cmd.conn.log.Infox("processing fetch attribute", err, slog.Any("uid", cmd.uid))
|
||||
xuserErrorf("processing fetch attribute: %v", err)
|
||||
}
|
||||
|
||||
// UIDFETCH in case of uidonly. ../rfc/9586:181
|
||||
if c.uidonly {
|
||||
fmt.Fprintf(cmd.conn.xbw, "* %d UIDFETCH ", cmd.uid)
|
||||
} else {
|
||||
fmt.Fprintf(cmd.conn.xbw, "* %d FETCH ", cmd.conn.xsequence(cmd.uid))
|
||||
}
|
||||
data.xwriteTo(cmd.conn, cmd.conn.xbw)
|
||||
cmd.conn.xbw.Write([]byte("\r\n"))
|
||||
|
||||
cmd.msgclose()
|
||||
}
|
||||
|
||||
// We've returned all data. Now we mark messages as seen in one go, in a new write
|
||||
// transaction. We don't send untagged messages for the changes, since there may be
|
||||
// unprocessed pending changes. Instead, we broadcast them to ourselve too, so a
|
||||
// next noop/idle will return the flags to the client.
|
||||
|
||||
err := cmd.rtx.Rollback()
|
||||
c.log.Check(err, "fetch read tx rollback")
|
||||
cmd.rtx = nil
|
||||
|
||||
// ../rfc/9051:4432 We mark all messages that need it as seen at the end of the
|
||||
// command, in a single transaction.
|
||||
if len(cmd.updateSeen) > 0 || len(cmd.newPreviews) > 0 {
|
||||
c.account.WithWLock(func() {
|
||||
changes := make([]store.Change, 0, len(cmd.updateSeen)+1)
|
||||
|
||||
c.xdbwrite(func(wtx *bstore.Tx) {
|
||||
mb, err := store.MailboxID(wtx, c.mailboxID)
|
||||
if err == store.ErrMailboxExpunged {
|
||||
xusercodeErrorf("NONEXISTENT", "mailbox has been expunged")
|
||||
}
|
||||
xcheckf(err, "get mailbox for updating counts after marking as seen")
|
||||
|
||||
var modseq store.ModSeq
|
||||
|
||||
for _, uid := range cmd.updateSeen {
|
||||
m, err := bstore.QueryTx[store.Message](wtx).FilterNonzero(store.Message{MailboxID: c.mailboxID, UID: uid}).Get()
|
||||
xcheckf(err, "get message")
|
||||
if m.Expunged {
|
||||
// Message has been deleted in the mean time.
|
||||
cmd.expungeIssued = true
|
||||
continue
|
||||
}
|
||||
if m.Seen {
|
||||
// Message already marked as seen by another process.
|
||||
continue
|
||||
}
|
||||
|
||||
if modseq == 0 {
|
||||
modseq, err = c.account.NextModSeq(wtx)
|
||||
xcheckf(err, "get next mod seq")
|
||||
}
|
||||
|
||||
oldFlags := m.Flags
|
||||
mb.Sub(m.MailboxCounts())
|
||||
m.Seen = true
|
||||
mb.Add(m.MailboxCounts())
|
||||
changes = append(changes, m.ChangeFlags(oldFlags, mb))
|
||||
|
||||
m.ModSeq = modseq
|
||||
err = wtx.Update(&m)
|
||||
xcheckf(err, "mark message as seen")
|
||||
}
|
||||
|
||||
changes = append(changes, mb.ChangeCounts())
|
||||
|
||||
for uid, s := range cmd.newPreviews {
|
||||
m, err := bstore.QueryTx[store.Message](wtx).FilterNonzero(store.Message{MailboxID: c.mailboxID, UID: uid}).Get()
|
||||
xcheckf(err, "get message")
|
||||
if m.Expunged {
|
||||
// Message has been deleted in the mean time.
|
||||
cmd.expungeIssued = true
|
||||
continue
|
||||
}
|
||||
|
||||
// note: we are not updating modseq.
|
||||
|
||||
m.Preview = &s
|
||||
err = wtx.Update(&m)
|
||||
xcheckf(err, "saving preview with message")
|
||||
}
|
||||
|
||||
if modseq > 0 {
|
||||
mb.ModSeq = modseq
|
||||
err = wtx.Update(&mb)
|
||||
xcheckf(err, "update mailbox with counts and modseq")
|
||||
}
|
||||
// First report all vanished UIDs. ../rfc/7162:1714
|
||||
if len(vanishedUIDs) > 0 {
|
||||
// Mention all vanished UIDs in compact numset form.
|
||||
// ../rfc/7162:1985
|
||||
sort.Slice(vanishedUIDs, func(i, j int) bool {
|
||||
return vanishedUIDs[i] < vanishedUIDs[j]
|
||||
})
|
||||
// No hard limit on response sizes, but clients are recommended to not send more
|
||||
// than 8k. We send a more conservative max 4k.
|
||||
for _, s := range compactUIDSet(vanishedUIDs).Strings(4*1024 - 32) {
|
||||
c.bwritelinef("* VANISHED (EARLIER) %s", s)
|
||||
}
|
||||
}
|
||||
|
||||
// Broadcast these changes also to ourselves, so we'll send the updated flags, but
|
||||
// in the correct order, after other changes.
|
||||
store.BroadcastChanges(c.account, changes)
|
||||
})
|
||||
for _, uid := range uids {
|
||||
cmd.uid = uid
|
||||
cmd.conn.log.Debug("processing uid", slog.Any("uid", uid))
|
||||
cmd.process(atts)
|
||||
}
|
||||
|
||||
var zeromc store.MailboxCounts
|
||||
if cmd.deltaCounts != zeromc {
|
||||
mb.Add(cmd.deltaCounts) // Unseen/Unread will be <= 0.
|
||||
err := tx.Update(&mb)
|
||||
xcheckf(err, "updating mailbox counts")
|
||||
cmd.changes = append(cmd.changes, mb.ChangeCounts())
|
||||
// No need to update account total message size.
|
||||
}
|
||||
})
|
||||
|
||||
if len(cmd.changes) > 0 {
|
||||
// Broadcast seen updates to other connections.
|
||||
c.broadcast(cmd.changes)
|
||||
}
|
||||
|
||||
if cmd.expungeIssued {
|
||||
// ../rfc/2180:343
|
||||
// ../rfc/9051:5102
|
||||
c.xwriteresultf("%s OK [EXPUNGEISSUED] at least one message was expunged", tag)
|
||||
c.writeresultf("%s NO [EXPUNGEISSUED] at least one message was expunged", tag)
|
||||
} else {
|
||||
c.ok(tag, cmdstr)
|
||||
}
|
||||
}
|
||||
|
||||
func (cmd *fetchCmd) xmodseq() store.ModSeq {
|
||||
if cmd.modseq == 0 {
|
||||
var err error
|
||||
cmd.modseq, err = cmd.conn.account.NextModSeq(cmd.tx)
|
||||
cmd.xcheckf(err, "assigning next modseq")
|
||||
}
|
||||
return cmd.modseq
|
||||
}
|
||||
|
||||
func (cmd *fetchCmd) xensureMessage() *store.Message {
|
||||
if cmd.m != nil {
|
||||
return cmd.m
|
||||
}
|
||||
|
||||
// We do not filter by Expunged, the message may have been deleted in other
|
||||
// sessions, but not in ours.
|
||||
q := bstore.QueryTx[store.Message](cmd.rtx)
|
||||
q := bstore.QueryTx[store.Message](cmd.tx)
|
||||
q.FilterNonzero(store.Message{MailboxID: cmd.mailboxID, UID: cmd.uid})
|
||||
q.FilterEqual("Expunged", false)
|
||||
m, err := q.Get()
|
||||
cmd.xcheckf(err, "get message for uid %d", cmd.uid)
|
||||
cmd.m = &m
|
||||
if m.Expunged {
|
||||
cmd.expungeIssued = true
|
||||
}
|
||||
return cmd.m
|
||||
}
|
||||
|
||||
@ -413,20 +305,16 @@ func (cmd *fetchCmd) xensureParsed() (*store.MsgReader, *message.Part) {
|
||||
return cmd.msgr, cmd.part
|
||||
}
|
||||
|
||||
// msgclose must be called after processing a message (after having written/used
|
||||
// its data), even in the case of a panic.
|
||||
func (cmd *fetchCmd) msgclose() {
|
||||
cmd.m = nil
|
||||
cmd.part = nil
|
||||
if cmd.msgr != nil {
|
||||
err := cmd.msgr.Close()
|
||||
cmd.conn.xsanity(err, "closing messagereader")
|
||||
cmd.msgr = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (cmd *fetchCmd) process(atts []fetchAtt) (rdata listspace, rerr error) {
|
||||
func (cmd *fetchCmd) process(atts []fetchAtt) {
|
||||
defer func() {
|
||||
cmd.m = nil
|
||||
cmd.part = nil
|
||||
if cmd.msgr != nil {
|
||||
err := cmd.msgr.Close()
|
||||
cmd.conn.xsanity(err, "closing messagereader")
|
||||
cmd.msgr = nil
|
||||
}
|
||||
|
||||
x := recover()
|
||||
if x == nil {
|
||||
return
|
||||
@ -434,15 +322,16 @@ func (cmd *fetchCmd) process(atts []fetchAtt) (rdata listspace, rerr error) {
|
||||
err, ok := x.(attrError)
|
||||
if !ok {
|
||||
panic(x)
|
||||
} else if rerr == nil {
|
||||
rerr = err
|
||||
}
|
||||
if errors.Is(err, bstore.ErrAbsent) {
|
||||
cmd.expungeIssued = true
|
||||
return
|
||||
}
|
||||
cmd.conn.log.Infox("processing fetch attribute", err, slog.Any("uid", cmd.uid))
|
||||
xuserErrorf("processing fetch attribute: %v", err)
|
||||
}()
|
||||
|
||||
var data listspace
|
||||
if !cmd.conn.uidonly {
|
||||
data = append(data, bare("UID"), number(cmd.uid))
|
||||
}
|
||||
data := listspace{bare("UID"), number(cmd.uid)}
|
||||
|
||||
cmd.markSeen = false
|
||||
cmd.needFlags = false
|
||||
@ -453,7 +342,17 @@ func (cmd *fetchCmd) process(atts []fetchAtt) (rdata listspace, rerr error) {
|
||||
}
|
||||
|
||||
if cmd.markSeen {
|
||||
cmd.updateSeen = append(cmd.updateSeen, cmd.uid)
|
||||
m := cmd.xensureMessage()
|
||||
cmd.deltaCounts.Sub(m.MailboxCounts())
|
||||
origFlags := m.Flags
|
||||
m.Seen = true
|
||||
cmd.deltaCounts.Add(m.MailboxCounts())
|
||||
m.ModSeq = cmd.xmodseq()
|
||||
err := cmd.tx.Update(m)
|
||||
xcheckf(err, "marking message as seen")
|
||||
// No need to update account total message size.
|
||||
|
||||
cmd.changes = append(cmd.changes, m.ChangeFlags(origFlags))
|
||||
}
|
||||
|
||||
if cmd.needFlags {
|
||||
@ -476,12 +375,15 @@ func (cmd *fetchCmd) process(atts []fetchAtt) (rdata listspace, rerr error) {
|
||||
// other mentioning of cases elsewhere in the RFC would be too superfluous.
|
||||
//
|
||||
// ../rfc/7162:877 ../rfc/7162:388 ../rfc/7162:909 ../rfc/7162:1426
|
||||
if cmd.needModseq || cmd.hasChangedSince || cmd.conn.enabled[capQresync] && cmd.isUID {
|
||||
if cmd.needModseq || cmd.hasChangedSince || cmd.conn.enabled[capQresync] && (cmd.isUID || cmd.markSeen) {
|
||||
m := cmd.xensureMessage()
|
||||
data = append(data, bare("MODSEQ"), listspace{bare(fmt.Sprintf("%d", m.ModSeq.Client()))})
|
||||
}
|
||||
|
||||
return data, nil
|
||||
// Write errors are turned into panics because we write through c.
|
||||
fmt.Fprintf(cmd.conn.bw, "* %d FETCH ", cmd.conn.xsequence(cmd.uid))
|
||||
data.writeTo(cmd.conn, cmd.conn.bw)
|
||||
cmd.conn.bw.Write([]byte("\r\n"))
|
||||
}
|
||||
|
||||
// result for one attribute. if processing fails, e.g. because data was requested
|
||||
@ -490,12 +392,8 @@ func (cmd *fetchCmd) process(atts []fetchAtt) (rdata listspace, rerr error) {
|
||||
func (cmd *fetchCmd) xprocessAtt(a fetchAtt) []token {
|
||||
switch a.field {
|
||||
case "UID":
|
||||
// Present by default without uidonly. For uidonly, we only add it when explicitly
|
||||
// requested. ../rfc/9586:184
|
||||
if cmd.conn.uidonly {
|
||||
return []token{bare("UID"), number(cmd.uid)}
|
||||
}
|
||||
|
||||
// Always present.
|
||||
return nil
|
||||
case "ENVELOPE":
|
||||
_, part := cmd.xensureParsed()
|
||||
envelope := xenvelope(part)
|
||||
@ -506,20 +404,9 @@ func (cmd *fetchCmd) xprocessAtt(a fetchAtt) []token {
|
||||
m := cmd.xensureMessage()
|
||||
return []token{bare("INTERNALDATE"), dquote(m.Received.Format("_2-Jan-2006 15:04:05 -0700"))}
|
||||
|
||||
case "SAVEDATE":
|
||||
m := cmd.xensureMessage()
|
||||
// For messages in storage from before we implemented this extension, we don't have
|
||||
// a savedate, and we return nil. This is normally meant to be per mailbox, but
|
||||
// returning it per message should be fine. ../rfc/8514:191
|
||||
var savedate token = nilt
|
||||
if m.SaveDate != nil {
|
||||
savedate = dquote(m.SaveDate.Format("_2-Jan-2006 15:04:05 -0700"))
|
||||
}
|
||||
return []token{bare("SAVEDATE"), savedate}
|
||||
|
||||
case "BODYSTRUCTURE":
|
||||
_, part := cmd.xensureParsed()
|
||||
bs := xbodystructure(cmd.conn.log, part, true)
|
||||
bs := xbodystructure(part)
|
||||
return []token{bare("BODYSTRUCTURE"), bs}
|
||||
|
||||
case "BODY":
|
||||
@ -600,37 +487,6 @@ func (cmd *fetchCmd) xprocessAtt(a fetchAtt) []token {
|
||||
case "MODSEQ":
|
||||
cmd.needModseq = true
|
||||
|
||||
case "PREVIEW":
|
||||
m := cmd.xensureMessage()
|
||||
preview := m.Preview
|
||||
// We ignore "lazy", generating the preview is fast enough.
|
||||
if preview == nil {
|
||||
// Get the preview. We'll save all generated previews in a single transaction at
|
||||
// the end.
|
||||
_, p := cmd.xensureParsed()
|
||||
s, err := p.Preview(cmd.conn.log)
|
||||
cmd.xcheckf(err, "generating preview")
|
||||
preview = &s
|
||||
cmd.newPreviews[m.UID] = s
|
||||
}
|
||||
var t token = nilt
|
||||
if preview != nil {
|
||||
s := *preview
|
||||
|
||||
// Limit to 200 characters (not bytes). ../rfc/8970:206
|
||||
var n, o int
|
||||
for o = range s {
|
||||
n++
|
||||
if n > 200 {
|
||||
s = s[:o]
|
||||
break
|
||||
}
|
||||
}
|
||||
s = strings.TrimSpace(s)
|
||||
t = string0(s)
|
||||
}
|
||||
return []token{bare(a.field), t}
|
||||
|
||||
default:
|
||||
xserverErrorf("field %q not yet implemented", a.field)
|
||||
}
|
||||
@ -776,15 +632,11 @@ func (cmd *fetchCmd) xbinary(a fetchAtt) (string, token) {
|
||||
cmd.xerrorf("binary only allowed on leaf parts, not multipart/* or message/rfc822 or message/global")
|
||||
}
|
||||
|
||||
var cte string
|
||||
if p.ContentTransferEncoding != nil {
|
||||
cte = *p.ContentTransferEncoding
|
||||
}
|
||||
switch cte {
|
||||
switch p.ContentTransferEncoding {
|
||||
case "", "7BIT", "8BIT", "BINARY", "BASE64", "QUOTED-PRINTABLE":
|
||||
default:
|
||||
// ../rfc/9051:5913
|
||||
xusercodeErrorf("UNKNOWN-CTE", "unknown Content-Transfer-Encoding %q", cte)
|
||||
xusercodeErrorf("UNKNOWN-CTE", "unknown Content-Transfer-Encoding %q", p.ContentTransferEncoding)
|
||||
}
|
||||
|
||||
r := p.Reader()
|
||||
@ -808,7 +660,7 @@ func (cmd *fetchCmd) xbody(a fetchAtt) (string, token) {
|
||||
|
||||
if a.section == nil {
|
||||
// Non-extensible form of BODYSTRUCTURE.
|
||||
return a.field, xbodystructure(cmd.conn.log, part, false)
|
||||
return a.field, xbodystructure(part)
|
||||
}
|
||||
|
||||
cmd.peekOrSeen(a.peek)
|
||||
@ -820,13 +672,16 @@ func (cmd *fetchCmd) xbody(a fetchAtt) (string, token) {
|
||||
var offset int64
|
||||
count := m.Size
|
||||
if a.partial != nil {
|
||||
offset = min(int64(a.partial.offset), m.Size)
|
||||
offset = int64(a.partial.offset)
|
||||
if offset > m.Size {
|
||||
offset = m.Size
|
||||
}
|
||||
count = int64(a.partial.count)
|
||||
if offset+count > m.Size {
|
||||
count = m.Size - offset
|
||||
}
|
||||
}
|
||||
return respField, readerSizeSyncliteral{&moxio.AtReader{R: msgr, Offset: offset}, count, false}
|
||||
return respField, readerSizeSyncliteral{&moxio.AtReader{R: msgr, Offset: offset}, count}
|
||||
}
|
||||
|
||||
sr := cmd.xsection(a.section, part)
|
||||
@ -865,40 +720,35 @@ func (cmd *fetchCmd) xpartnumsDeref(nums []uint32, p *message.Part) *message.Par
|
||||
}
|
||||
|
||||
func (cmd *fetchCmd) xsection(section *sectionSpec, p *message.Part) io.Reader {
|
||||
// msgtext is not nil, i.e. HEADER* or TEXT (not MIME), for the top-level part (a message).
|
||||
if section.part == nil {
|
||||
return cmd.xsectionMsgtext(section.msgtext, p)
|
||||
}
|
||||
|
||||
p = cmd.xpartnumsDeref(section.part.part, p)
|
||||
|
||||
// If there is no sectionMsgText, then this isn't for HEADER*, TEXT or MIME, i.e. a
|
||||
// part body, e.g. "BODY[1]".
|
||||
if section.part.text == nil {
|
||||
return p.RawReader()
|
||||
}
|
||||
|
||||
// MIME is defined for all parts. Otherwise it's HEADER* or TEXT, which is only
|
||||
// defined for parts that are messages. ../rfc/9051:4500 ../rfc/9051:4517
|
||||
if !section.part.text.mime {
|
||||
if p.Message == nil {
|
||||
cmd.xerrorf("part is not a message, cannot request header* or text")
|
||||
}
|
||||
|
||||
// ../rfc/9051:4535
|
||||
if p.Message != nil {
|
||||
err := p.SetMessageReaderAt()
|
||||
cmd.xcheckf(err, "preparing submessage")
|
||||
p = p.Message
|
||||
}
|
||||
|
||||
if !section.part.text.mime {
|
||||
return cmd.xsectionMsgtext(section.part.text.msgtext, p)
|
||||
}
|
||||
|
||||
// MIME header, see ../rfc/9051:4514 ../rfc/2045:1652
|
||||
// MIME header, see ../rfc/9051:4534 ../rfc/2045:1645
|
||||
h, err := io.ReadAll(p.HeaderReader())
|
||||
cmd.xcheckf(err, "reading header")
|
||||
|
||||
matchesFields := func(line []byte) bool {
|
||||
k := textproto.CanonicalMIMEHeaderKey(string(bytes.TrimRight(bytes.SplitN(line, []byte(":"), 2)[0], " \t")))
|
||||
return strings.HasPrefix(k, "Content-")
|
||||
// Only add MIME-Version and additional CRLF for messages, not other parts. ../rfc/2045:1645 ../rfc/2045:1652
|
||||
return (p.Envelope != nil && k == "Mime-Version") || strings.HasPrefix(k, "Content-")
|
||||
}
|
||||
|
||||
var match bool
|
||||
@ -912,7 +762,7 @@ func (cmd *fetchCmd) xsection(section *sectionSpec, p *message.Part) io.Reader {
|
||||
h = h[len(line):]
|
||||
|
||||
match = matchesFields(line) || match && (bytes.HasPrefix(line, []byte(" ")) || bytes.HasPrefix(line, []byte("\t")))
|
||||
if match {
|
||||
if match || len(line) == 2 {
|
||||
hb.Write(line)
|
||||
}
|
||||
}
|
||||
@ -920,10 +770,11 @@ func (cmd *fetchCmd) xsection(section *sectionSpec, p *message.Part) io.Reader {
|
||||
}
|
||||
|
||||
func (cmd *fetchCmd) xsectionMsgtext(smt *sectionMsgtext, p *message.Part) io.Reader {
|
||||
switch smt.s {
|
||||
case "HEADER":
|
||||
if smt.s == "HEADER" {
|
||||
return p.HeaderReader()
|
||||
}
|
||||
|
||||
switch smt.s {
|
||||
case "HEADER.FIELDS":
|
||||
return cmd.xmodifiedHeader(p, smt.headers, false)
|
||||
|
||||
@ -931,8 +782,8 @@ func (cmd *fetchCmd) xsectionMsgtext(smt *sectionMsgtext, p *message.Part) io.Re
|
||||
return cmd.xmodifiedHeader(p, smt.headers, true)
|
||||
|
||||
case "TEXT":
|
||||
// TEXT the body (excluding headers) of a message, either the top-level message, or
|
||||
// a nested as message/rfc822 or message/global. ../rfc/9051:4517
|
||||
// It appears imap clients expect to get the body of the message, not a "text body"
|
||||
// which sounds like it means a text/* part of a message. ../rfc/9051:4517
|
||||
return p.RawReader()
|
||||
}
|
||||
panic(serverError{fmt.Errorf("missing case")})
|
||||
@ -983,24 +834,27 @@ func (cmd *fetchCmd) sectionMsgtextName(smt *sectionMsgtext) string {
|
||||
return s
|
||||
}
|
||||
|
||||
func bodyFldParams(p *message.Part) token {
|
||||
if len(p.ContentTypeParams) == 0 {
|
||||
func bodyFldParams(params map[string]string) token {
|
||||
if len(params) == 0 {
|
||||
return nilt
|
||||
}
|
||||
params := make(listspace, 0, 2*len(p.ContentTypeParams))
|
||||
// Ensure same ordering, easier for testing.
|
||||
for _, k := range slices.Sorted(maps.Keys(p.ContentTypeParams)) {
|
||||
v := p.ContentTypeParams[k]
|
||||
params = append(params, string0(strings.ToUpper(k)), string0(v))
|
||||
var keys []string
|
||||
for k := range params {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
return params
|
||||
sort.Strings(keys)
|
||||
l := make(listspace, 2*len(keys))
|
||||
i := 0
|
||||
for _, k := range keys {
|
||||
l[i] = string0(strings.ToUpper(k))
|
||||
l[i+1] = string0(params[k])
|
||||
i += 2
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
func bodyFldEnc(cte *string) token {
|
||||
var s string
|
||||
if cte != nil {
|
||||
s = *cte
|
||||
}
|
||||
func bodyFldEnc(s string) token {
|
||||
up := strings.ToUpper(s)
|
||||
switch up {
|
||||
case "7BIT", "8BIT", "BINARY", "BASE64", "QUOTED-PRINTABLE":
|
||||
@ -1009,92 +863,25 @@ func bodyFldEnc(cte *string) token {
|
||||
return string0(s)
|
||||
}
|
||||
|
||||
func bodyFldMd5(p *message.Part) token {
|
||||
if p.ContentMD5 == nil {
|
||||
return nilt
|
||||
}
|
||||
return string0(*p.ContentMD5)
|
||||
}
|
||||
|
||||
func bodyFldDisp(log mlog.Log, p *message.Part) token {
|
||||
if p.ContentDisposition == nil {
|
||||
return nilt
|
||||
}
|
||||
|
||||
// ../rfc/9051:5989
|
||||
// mime.ParseMediaType recombines parameter value continuations like "title*0" and
|
||||
// "title*1" into "title". ../rfc/2231:147
|
||||
// And decodes character sets and removes language tags, like
|
||||
// "title*0*=us-ascii'en'hello%20world. ../rfc/2231:210
|
||||
|
||||
disp, params, err := mime.ParseMediaType(*p.ContentDisposition)
|
||||
if err != nil {
|
||||
log.Debugx("parsing content-disposition, ignoring", err, slog.String("header", *p.ContentDisposition))
|
||||
return nilt
|
||||
} else if len(params) == 0 {
|
||||
log.Debug("content-disposition has no parameters, ignoring", slog.String("header", *p.ContentDisposition))
|
||||
return nilt
|
||||
}
|
||||
var fields listspace
|
||||
for _, k := range slices.Sorted(maps.Keys(params)) {
|
||||
fields = append(fields, string0(k), string0(params[k]))
|
||||
}
|
||||
return listspace{string0(disp), fields}
|
||||
}
|
||||
|
||||
func bodyFldLang(p *message.Part) token {
|
||||
// todo: ../rfc/3282:86 ../rfc/5646:218 we currently just split on comma and trim space, should properly parse header.
|
||||
if p.ContentLanguage == nil {
|
||||
return nilt
|
||||
}
|
||||
var l listspace
|
||||
for _, s := range strings.Split(*p.ContentLanguage, ",") {
|
||||
s = strings.TrimSpace(s)
|
||||
if s == "" {
|
||||
return string0(*p.ContentLanguage)
|
||||
}
|
||||
l = append(l, string0(s))
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
func bodyFldLoc(p *message.Part) token {
|
||||
if p.ContentLocation == nil {
|
||||
return nilt
|
||||
}
|
||||
return string0(*p.ContentLocation)
|
||||
}
|
||||
|
||||
// xbodystructure returns a "body".
|
||||
// calls itself for multipart messages and message/{rfc822,global}.
|
||||
func xbodystructure(log mlog.Log, p *message.Part, extensible bool) token {
|
||||
func xbodystructure(p *message.Part) token {
|
||||
if p.MediaType == "MULTIPART" {
|
||||
// Multipart, ../rfc/9051:6355 ../rfc/9051:6411
|
||||
var bodies concat
|
||||
for i := range p.Parts {
|
||||
bodies = append(bodies, xbodystructure(log, &p.Parts[i], extensible))
|
||||
bodies = append(bodies, xbodystructure(&p.Parts[i]))
|
||||
}
|
||||
r := listspace{bodies, string0(p.MediaSubType)}
|
||||
// ../rfc/9051:6371
|
||||
if extensible {
|
||||
r = append(r,
|
||||
bodyFldParams(p),
|
||||
bodyFldDisp(log, p),
|
||||
bodyFldLang(p),
|
||||
bodyFldLoc(p),
|
||||
)
|
||||
}
|
||||
return r
|
||||
return listspace{bodies, string0(p.MediaSubType)}
|
||||
}
|
||||
|
||||
// ../rfc/9051:6355
|
||||
var r listspace
|
||||
if p.MediaType == "TEXT" {
|
||||
// ../rfc/9051:6404 ../rfc/9051:6418
|
||||
r = listspace{
|
||||
return listspace{
|
||||
dquote("TEXT"), string0(p.MediaSubType), // ../rfc/9051:6739
|
||||
// ../rfc/9051:6376
|
||||
bodyFldParams(p), // ../rfc/9051:6401
|
||||
bodyFldParams(p.ContentTypeParams), // ../rfc/9051:6401
|
||||
nilOrString(p.ContentID),
|
||||
nilOrString(p.ContentDescription),
|
||||
bodyFldEnc(p.ContentTransferEncoding),
|
||||
@ -1104,45 +891,34 @@ func xbodystructure(log mlog.Log, p *message.Part, extensible bool) token {
|
||||
} else if p.MediaType == "MESSAGE" && (p.MediaSubType == "RFC822" || p.MediaSubType == "GLOBAL") {
|
||||
// ../rfc/9051:6415
|
||||
// note: we don't have to prepare p.Message for reading, because we aren't going to read from it.
|
||||
r = listspace{
|
||||
return listspace{
|
||||
dquote("MESSAGE"), dquote(p.MediaSubType), // ../rfc/9051:6732
|
||||
// ../rfc/9051:6376
|
||||
bodyFldParams(p), // ../rfc/9051:6401
|
||||
bodyFldParams(p.ContentTypeParams), // ../rfc/9051:6401
|
||||
nilOrString(p.ContentID),
|
||||
nilOrString(p.ContentDescription),
|
||||
bodyFldEnc(p.ContentTransferEncoding),
|
||||
number(p.EndOffset - p.BodyOffset),
|
||||
xenvelope(p.Message),
|
||||
xbodystructure(log, p.Message, extensible),
|
||||
xbodystructure(p.Message),
|
||||
number(p.RawLineCount), // todo: or mp.RawLineCount?
|
||||
}
|
||||
} else {
|
||||
var media token
|
||||
switch p.MediaType {
|
||||
case "APPLICATION", "AUDIO", "IMAGE", "FONT", "MESSAGE", "MODEL", "VIDEO":
|
||||
media = dquote(p.MediaType)
|
||||
default:
|
||||
media = string0(p.MediaType)
|
||||
}
|
||||
// ../rfc/9051:6404 ../rfc/9051:6407
|
||||
r = listspace{
|
||||
media, string0(p.MediaSubType), // ../rfc/9051:6723
|
||||
// ../rfc/9051:6376
|
||||
bodyFldParams(p), // ../rfc/9051:6401
|
||||
nilOrString(p.ContentID),
|
||||
nilOrString(p.ContentDescription),
|
||||
bodyFldEnc(p.ContentTransferEncoding),
|
||||
number(p.EndOffset - p.BodyOffset),
|
||||
}
|
||||
}
|
||||
if extensible {
|
||||
// ../rfc/9051:6366
|
||||
r = append(r,
|
||||
bodyFldMd5(p),
|
||||
bodyFldDisp(log, p),
|
||||
bodyFldLang(p),
|
||||
bodyFldLoc(p),
|
||||
)
|
||||
var media token
|
||||
switch p.MediaType {
|
||||
case "APPLICATION", "AUDIO", "IMAGE", "FONT", "MESSAGE", "MODEL", "VIDEO":
|
||||
media = dquote(p.MediaType)
|
||||
default:
|
||||
media = string0(p.MediaType)
|
||||
}
|
||||
// ../rfc/9051:6404 ../rfc/9051:6407
|
||||
return listspace{
|
||||
media, string0(p.MediaSubType), // ../rfc/9051:6723
|
||||
// ../rfc/9051:6376
|
||||
bodyFldParams(p.ContentTypeParams), // ../rfc/9051:6401
|
||||
nilOrString(p.ContentID),
|
||||
nilOrString(p.ContentDescription),
|
||||
bodyFldEnc(p.ContentTransferEncoding),
|
||||
number(p.EndOffset - p.BodyOffset),
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
@ -5,33 +5,22 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/mjl-/bstore"
|
||||
|
||||
"github.com/mjl-/mox/imapclient"
|
||||
"github.com/mjl-/mox/store"
|
||||
)
|
||||
|
||||
func TestFetch(t *testing.T) {
|
||||
testFetch(t, false)
|
||||
}
|
||||
|
||||
func TestFetchUIDOnly(t *testing.T) {
|
||||
testFetch(t, true)
|
||||
}
|
||||
|
||||
func testFetch(t *testing.T, uidonly bool) {
|
||||
tc := start(t, uidonly)
|
||||
tc := start(t)
|
||||
defer tc.close()
|
||||
|
||||
tc.login("mjl@mox.example", password0)
|
||||
tc.client.Enable(imapclient.CapIMAP4rev2)
|
||||
tc.client.Login("mjl@mox.example", password0)
|
||||
tc.client.Enable("imap4rev2")
|
||||
received, err := time.Parse(time.RFC3339, "2022-11-16T10:01:00+01:00")
|
||||
tc.check(err, "parse time")
|
||||
tc.client.Append("inbox", makeAppendTime(exampleMsg, received))
|
||||
tc.client.Append("inbox", nil, &received, []byte(exampleMsg))
|
||||
tc.client.Select("inbox")
|
||||
|
||||
uid1 := imapclient.FetchUID(1)
|
||||
date1 := imapclient.FetchInternalDate{Date: received}
|
||||
date1 := imapclient.FetchInternalDate("16-Nov-2022 10:01:00 +0100")
|
||||
rfcsize1 := imapclient.FetchRFC822Size(len(exampleMsg))
|
||||
env1 := imapclient.FetchEnvelope{
|
||||
Date: "Mon, 7 Feb 1994 21:52:25 -0800",
|
||||
@ -43,29 +32,20 @@ func testFetch(t *testing.T, uidonly bool) {
|
||||
MessageID: "<B27397-0100000@Blurdybloop.example>",
|
||||
}
|
||||
noflags := imapclient.FetchFlags(nil)
|
||||
bodystructbody1 := imapclient.BodyTypeText{
|
||||
MediaType: "TEXT",
|
||||
MediaSubtype: "PLAIN",
|
||||
BodyFields: imapclient.BodyFields{
|
||||
Params: [][2]string{[...]string{"CHARSET", "US-ASCII"}},
|
||||
Octets: 57,
|
||||
},
|
||||
Lines: 2,
|
||||
}
|
||||
bodyxstructure1 := imapclient.FetchBodystructure{
|
||||
RespAttr: "BODY",
|
||||
Body: bodystructbody1,
|
||||
Body: imapclient.BodyTypeText{
|
||||
MediaType: "TEXT",
|
||||
MediaSubtype: "PLAIN",
|
||||
BodyFields: imapclient.BodyFields{
|
||||
Params: [][2]string{[...]string{"CHARSET", "US-ASCII"}},
|
||||
Octets: 57,
|
||||
},
|
||||
Lines: 2,
|
||||
},
|
||||
}
|
||||
bodystructure1 := bodyxstructure1
|
||||
bodystructure1.RespAttr = "BODYSTRUCTURE"
|
||||
bodyext1 := imapclient.BodyExtension1Part{
|
||||
Disposition: ptr((*string)(nil)),
|
||||
DispositionParams: ptr([][2]string(nil)),
|
||||
Language: ptr([]string(nil)),
|
||||
Location: ptr((*string)(nil)),
|
||||
}
|
||||
bodystructbody1.Ext = &bodyext1
|
||||
bodystructure1.Body = bodystructbody1
|
||||
|
||||
split := strings.SplitN(exampleMsg, "\r\n\r\n", 2)
|
||||
exampleMsgHeader := split[0] + "\r\n\r\n"
|
||||
@ -92,188 +72,136 @@ func testFetch(t *testing.T, uidonly bool) {
|
||||
headerSplit := strings.SplitN(exampleMsgHeader, "\r\n", 2)
|
||||
dateheader1 := imapclient.FetchBody{RespAttr: "BODY[HEADER.FIELDS (Date)]", Section: "HEADER.FIELDS (Date)", Body: headerSplit[0] + "\r\n\r\n"}
|
||||
nodateheader1 := imapclient.FetchBody{RespAttr: "BODY[HEADER.FIELDS.NOT (Date)]", Section: "HEADER.FIELDS.NOT (Date)", Body: headerSplit[1]}
|
||||
mime1 := imapclient.FetchBody{RespAttr: "BODY[1.MIME]", Section: "1.MIME", Body: "Content-Type: TEXT/PLAIN; CHARSET=US-ASCII\r\n"}
|
||||
date1header1 := imapclient.FetchBody{RespAttr: "BODY[1.HEADER.FIELDS (Date)]", Section: "1.HEADER.FIELDS (Date)", Body: headerSplit[0] + "\r\n\r\n"}
|
||||
nodate1header1 := imapclient.FetchBody{RespAttr: "BODY[1.HEADER.FIELDS.NOT (Date)]", Section: "1.HEADER.FIELDS.NOT (Date)", Body: headerSplit[1]}
|
||||
mime1 := imapclient.FetchBody{RespAttr: "BODY[1.MIME]", Section: "1.MIME", Body: "MIME-Version: 1.0\r\nContent-Type: TEXT/PLAIN; CHARSET=US-ASCII\r\n\r\n"}
|
||||
|
||||
flagsSeen := imapclient.FetchFlags{`\Seen`}
|
||||
|
||||
if !uidonly {
|
||||
tc.transactf("ok", "fetch 1 all")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, date1, rfcsize1, env1, noflags))
|
||||
tc.transactf("ok", "fetch 1 all")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, date1, rfcsize1, env1, noflags}})
|
||||
|
||||
tc.transactf("ok", "fetch 1 fast")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, date1, rfcsize1, noflags))
|
||||
tc.transactf("ok", "fetch 1 fast")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, date1, rfcsize1, noflags}})
|
||||
|
||||
tc.transactf("ok", "fetch 1 full")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, date1, rfcsize1, env1, bodyxstructure1, noflags))
|
||||
tc.transactf("ok", "fetch 1 full")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, date1, rfcsize1, env1, bodyxstructure1, noflags}})
|
||||
|
||||
tc.transactf("ok", "fetch 1 flags")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, noflags))
|
||||
tc.transactf("ok", "fetch 1 flags")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, noflags}})
|
||||
|
||||
tc.transactf("ok", "fetch 1 bodystructure")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, bodystructure1))
|
||||
tc.transactf("ok", "fetch 1 bodystructure")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, bodystructure1}})
|
||||
|
||||
// Should be returned unmodified, because there is no content-transfer-encoding.
|
||||
tc.transactf("ok", "fetch 1 binary[]")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, binary1, noflags))
|
||||
tc.transactf("ok", "noop")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, flagsSeen))
|
||||
// Should be returned unmodified, because there is no content-transfer-encoding.
|
||||
tc.transactf("ok", "fetch 1 binary[]")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, binary1, flagsSeen}})
|
||||
|
||||
tc.transactf("ok", "fetch 1 binary[1]")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, binarypart1)) // Seen flag not changed.
|
||||
tc.transactf("ok", "fetch 1 binary[1]")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, binarypart1}}) // Seen flag not changed.
|
||||
|
||||
tc.client.MSNStoreFlagsClear("1", true, `\Seen`)
|
||||
tc.transactf("ok", "uid fetch 1 binary[]<1.1>")
|
||||
tc.xuntagged(
|
||||
tc.untaggedFetch(1, 1, binarypartial1, noflags),
|
||||
tc.untaggedFetch(1, 1, flagsSeen), // For UID FETCH, we get the flags during the command.
|
||||
)
|
||||
tc.client.StoreFlagsClear("1", true, `\Seen`)
|
||||
tc.transactf("ok", "fetch 1 binary[]<1.1>")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, binarypartial1, flagsSeen}})
|
||||
|
||||
tc.client.MSNStoreFlagsClear("1", true, `\Seen`)
|
||||
tc.transactf("ok", "fetch 1 binary[1]<1.1>")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, binarypartpartial1, noflags))
|
||||
tc.transactf("ok", "noop")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, flagsSeen))
|
||||
tc.client.StoreFlagsClear("1", true, `\Seen`)
|
||||
tc.transactf("ok", "fetch 1 binary[1]<1.1>")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, binarypartpartial1, flagsSeen}})
|
||||
|
||||
tc.client.MSNStoreFlagsClear("1", true, `\Seen`)
|
||||
tc.transactf("ok", "fetch 1 binary[]<10000.10001>")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, binaryend1, noflags))
|
||||
tc.transactf("ok", "noop")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, flagsSeen))
|
||||
tc.client.StoreFlagsClear("1", true, `\Seen`)
|
||||
tc.transactf("ok", "fetch 1 binary[]<10000.10001>")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, binaryend1, flagsSeen}})
|
||||
|
||||
tc.client.MSNStoreFlagsClear("1", true, `\Seen`)
|
||||
tc.transactf("ok", "fetch 1 binary[1]<10000.10001>")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, binarypartend1, noflags))
|
||||
tc.transactf("ok", "noop")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, flagsSeen))
|
||||
tc.client.StoreFlagsClear("1", true, `\Seen`)
|
||||
tc.transactf("ok", "fetch 1 binary[1]<10000.10001>")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, binarypartend1, flagsSeen}})
|
||||
|
||||
tc.transactf("ok", "fetch 1 binary.size[]")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, binarysize1))
|
||||
tc.client.StoreFlagsClear("1", true, `\Seen`)
|
||||
tc.transactf("ok", "fetch 1 binary.size[]")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, binarysize1}})
|
||||
|
||||
tc.transactf("ok", "fetch 1 binary.size[1]")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, binarysizepart1))
|
||||
tc.transactf("ok", "fetch 1 binary.size[1]")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, binarysizepart1}})
|
||||
|
||||
tc.client.MSNStoreFlagsClear("1", true, `\Seen`)
|
||||
tc.transactf("ok", "fetch 1 body[]")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, body1, noflags))
|
||||
tc.transactf("ok", "noop")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, flagsSeen))
|
||||
tc.transactf("ok", "fetch 1 body[]<1.2>")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, bodyoff1)) // Already seen.
|
||||
tc.transactf("ok", "noop")
|
||||
tc.xuntagged() // Already seen.
|
||||
tc.client.StoreFlagsClear("1", true, `\Seen`)
|
||||
tc.transactf("ok", "fetch 1 body[]")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, body1, flagsSeen}})
|
||||
tc.transactf("ok", "fetch 1 body[]<1.2>")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, bodyoff1}}) // Already seen.
|
||||
|
||||
tc.client.MSNStoreFlagsClear("1", true, `\Seen`)
|
||||
tc.transactf("ok", "fetch 1 body[1]")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, bodypart1, noflags))
|
||||
tc.transactf("ok", "noop")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, flagsSeen))
|
||||
tc.client.StoreFlagsClear("1", true, `\Seen`)
|
||||
tc.transactf("ok", "fetch 1 body[1]")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, bodypart1, flagsSeen}})
|
||||
|
||||
tc.client.MSNStoreFlagsClear("1", true, `\Seen`)
|
||||
tc.transactf("ok", "fetch 1 body[1]<1.2>")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, body1off1, noflags))
|
||||
tc.transactf("ok", "noop")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, flagsSeen))
|
||||
tc.client.StoreFlagsClear("1", true, `\Seen`)
|
||||
tc.transactf("ok", "fetch 1 body[1]<1.2>")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, body1off1, flagsSeen}})
|
||||
|
||||
tc.client.MSNStoreFlagsClear("1", true, `\Seen`)
|
||||
tc.transactf("ok", "fetch 1 body[1]<100000.100000>")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, bodyend1, noflags))
|
||||
tc.transactf("ok", "noop")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, flagsSeen))
|
||||
tc.client.StoreFlagsClear("1", true, `\Seen`)
|
||||
tc.transactf("ok", "fetch 1 body[1]<100000.100000>")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, bodyend1, flagsSeen}})
|
||||
|
||||
tc.client.MSNStoreFlagsClear("1", true, `\Seen`)
|
||||
tc.transactf("ok", "fetch 1 body[header]")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, bodyheader1, noflags))
|
||||
tc.transactf("ok", "noop")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, flagsSeen))
|
||||
tc.client.StoreFlagsClear("1", true, `\Seen`)
|
||||
tc.transactf("ok", "fetch 1 body[header]")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, bodyheader1, flagsSeen}})
|
||||
|
||||
tc.client.MSNStoreFlagsClear("1", true, `\Seen`)
|
||||
tc.transactf("ok", "fetch 1 body[text]")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, bodytext1, noflags))
|
||||
tc.transactf("ok", "noop")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, flagsSeen))
|
||||
tc.client.StoreFlagsClear("1", true, `\Seen`)
|
||||
tc.transactf("ok", "fetch 1 body[text]")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, bodytext1, flagsSeen}})
|
||||
|
||||
// equivalent to body.peek[header], ../rfc/3501:3183
|
||||
tc.transactf("ok", "fetch 1 rfc822.header")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, rfcheader1))
|
||||
// equivalent to body.peek[header], ../rfc/3501:3183
|
||||
tc.client.StoreFlagsClear("1", true, `\Seen`)
|
||||
tc.transactf("ok", "fetch 1 rfc822.header")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, rfcheader1}})
|
||||
|
||||
// equivalent to body[text], ../rfc/3501:3199
|
||||
tc.client.MSNStoreFlagsClear("1", true, `\Seen`)
|
||||
tc.transactf("ok", "fetch 1 rfc822.text")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, rfctext1, noflags))
|
||||
tc.transactf("ok", "noop")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, flagsSeen))
|
||||
// equivalent to body[text], ../rfc/3501:3199
|
||||
tc.client.StoreFlagsClear("1", true, `\Seen`)
|
||||
tc.transactf("ok", "fetch 1 rfc822.text")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, rfctext1, flagsSeen}})
|
||||
|
||||
// equivalent to body[], ../rfc/3501:3179
|
||||
tc.client.MSNStoreFlagsClear("1", true, `\Seen`)
|
||||
tc.transactf("ok", "fetch 1 rfc822")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, rfc1, noflags))
|
||||
tc.transactf("ok", "noop")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, flagsSeen))
|
||||
// equivalent to body[], ../rfc/3501:3179
|
||||
tc.client.StoreFlagsClear("1", true, `\Seen`)
|
||||
tc.transactf("ok", "fetch 1 rfc822")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, rfc1, flagsSeen}})
|
||||
|
||||
// With PEEK, we should not get the \Seen flag.
|
||||
tc.client.MSNStoreFlagsClear("1", true, `\Seen`)
|
||||
tc.transactf("ok", "fetch 1 body.peek[]")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, body1))
|
||||
// With PEEK, we should not get the \Seen flag.
|
||||
tc.client.StoreFlagsClear("1", true, `\Seen`)
|
||||
tc.transactf("ok", "fetch 1 body.peek[]")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, body1}})
|
||||
|
||||
tc.transactf("ok", "fetch 1 binary.peek[]")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, binary1))
|
||||
tc.transactf("ok", "fetch 1 binary.peek[]")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, binary1}})
|
||||
|
||||
// HEADER.FIELDS and .NOT
|
||||
tc.transactf("ok", "fetch 1 body.peek[header.fields (date)]")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, dateheader1))
|
||||
tc.transactf("ok", "fetch 1 body.peek[header.fields.not (date)]")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, nodateheader1))
|
||||
// For non-multipart messages, 1 means the whole message, but since it's not of
|
||||
// type message/{rfc822,global} (a message), you can't get the message headers.
|
||||
// ../rfc/9051:4481
|
||||
tc.transactf("no", "fetch 1 body.peek[1.header]")
|
||||
// HEADER.FIELDS and .NOT
|
||||
tc.transactf("ok", "fetch 1 body.peek[header.fields (date)]")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, dateheader1}})
|
||||
tc.transactf("ok", "fetch 1 body.peek[header.fields.not (date)]")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, nodateheader1}})
|
||||
// For non-multipart messages, 1 means the whole message. ../rfc/9051:4481
|
||||
tc.transactf("ok", "fetch 1 body.peek[1.header.fields (date)]")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, date1header1}})
|
||||
tc.transactf("ok", "fetch 1 body.peek[1.header.fields.not (date)]")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, nodate1header1}})
|
||||
|
||||
// MIME, part 1 for non-multipart messages is the message itself. ../rfc/9051:4481
|
||||
tc.transactf("ok", "fetch 1 body.peek[1.mime]")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, mime1))
|
||||
// MIME, part 1 for non-multipart messages is the message itself. ../rfc/9051:4481
|
||||
tc.transactf("ok", "fetch 1 body.peek[1.mime]")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, mime1}})
|
||||
|
||||
// Missing sequence number. ../rfc/9051:7018
|
||||
tc.transactf("bad", "fetch 2 body[]")
|
||||
// Missing sequence number. ../rfc/9051:7018
|
||||
tc.transactf("bad", "fetch 2 body[]")
|
||||
|
||||
tc.client.MSNStoreFlagsClear("1", true, `\Seen`)
|
||||
tc.transactf("ok", "fetch 1:1 body[]")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, body1, noflags))
|
||||
tc.transactf("ok", "noop")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, flagsSeen))
|
||||
} else {
|
||||
tc.client.UIDStoreFlagsAdd("1", true, `\Seen`)
|
||||
tc.transactf("ok", "noop")
|
||||
}
|
||||
tc.transactf("ok", "fetch 1:1 body[]")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, body1, flagsSeen}})
|
||||
|
||||
// UID fetch
|
||||
tc.transactf("ok", "uid fetch 1 body[]")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, body1))
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, body1}})
|
||||
|
||||
// UID fetch
|
||||
tc.transactf("ok", "uid fetch 2 body[]")
|
||||
tc.xuntagged()
|
||||
|
||||
// SAVEDATE
|
||||
tc.transactf("ok", "uid fetch 1 savedate")
|
||||
// Fetch exact SaveDate we'll be expecting from server.
|
||||
var saveDate time.Time
|
||||
err = tc.account.DB.Read(ctxbg, func(tx *bstore.Tx) error {
|
||||
inbox, err := tc.account.MailboxFind(tx, "Inbox")
|
||||
tc.check(err, "get inbox")
|
||||
if inbox == nil {
|
||||
t.Fatalf("missing inbox")
|
||||
}
|
||||
m, err := bstore.QueryTx[store.Message](tx).FilterNonzero(store.Message{MailboxID: inbox.ID, UID: store.UID(uid1)}).Get()
|
||||
tc.check(err, "get message")
|
||||
if m.SaveDate == nil {
|
||||
t.Fatalf("zero savedate for message")
|
||||
}
|
||||
saveDate = m.SaveDate.Truncate(time.Second)
|
||||
return nil
|
||||
})
|
||||
tc.check(err, "get savedate")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, imapclient.FetchSaveDate{SaveDate: &saveDate}))
|
||||
|
||||
// Test some invalid syntax. Also invalid for uidonly.
|
||||
// Test some invalid syntax.
|
||||
tc.transactf("bad", "fetch")
|
||||
tc.transactf("bad", "fetch ")
|
||||
tc.transactf("bad", "fetch ")
|
||||
@ -296,38 +224,25 @@ func testFetch(t *testing.T, uidonly bool) {
|
||||
tc.transactf("bad", "fetch 1 body[header.fields.not ()]") // List must be non-empty.
|
||||
tc.transactf("bad", "fetch 1 body[mime]") // MIME must be prefixed with a number. ../rfc/9051:4497
|
||||
|
||||
if !uidonly {
|
||||
tc.transactf("no", "fetch 1 body[2]") // No such part.
|
||||
}
|
||||
tc.transactf("no", "fetch 1 body[2]") // No such part.
|
||||
|
||||
// Add more complex message.
|
||||
|
||||
uid2 := imapclient.FetchUID(2)
|
||||
bodystructure2 := imapclient.FetchBodystructure{
|
||||
RespAttr: "BODYSTRUCTURE",
|
||||
Body: imapclient.BodyTypeMpart{
|
||||
Bodies: []any{
|
||||
imapclient.BodyTypeBasic{BodyFields: imapclient.BodyFields{Octets: 275}, Ext: &bodyext1},
|
||||
imapclient.BodyTypeText{MediaType: "TEXT", MediaSubtype: "PLAIN", BodyFields: imapclient.BodyFields{Params: [][2]string{{"CHARSET", "US-ASCII"}}, Octets: 114}, Lines: 3, Ext: &bodyext1},
|
||||
imapclient.BodyTypeBasic{BodyFields: imapclient.BodyFields{Octets: 275}},
|
||||
imapclient.BodyTypeText{MediaType: "TEXT", MediaSubtype: "PLAIN", BodyFields: imapclient.BodyFields{Params: [][2]string{{"CHARSET", "US-ASCII"}}, Octets: 114}, Lines: 3},
|
||||
imapclient.BodyTypeMpart{
|
||||
Bodies: []any{
|
||||
imapclient.BodyTypeBasic{MediaType: "AUDIO", MediaSubtype: "BASIC", BodyFields: imapclient.BodyFields{CTE: "BASE64", Octets: 22}, Ext: &bodyext1},
|
||||
imapclient.BodyTypeBasic{MediaType: "IMAGE", MediaSubtype: "JPEG", BodyFields: imapclient.BodyFields{CTE: "BASE64"}, Ext: &imapclient.BodyExtension1Part{
|
||||
Disposition: ptr(ptr("inline")),
|
||||
DispositionParams: ptr([][2]string{{"filename", "image.jpg"}}),
|
||||
Language: ptr([]string(nil)),
|
||||
Location: ptr((*string)(nil)),
|
||||
}},
|
||||
imapclient.BodyTypeBasic{MediaType: "AUDIO", MediaSubtype: "BASIC", BodyFields: imapclient.BodyFields{CTE: "BASE64", Octets: 22}},
|
||||
imapclient.BodyTypeBasic{MediaType: "IMAGE", MediaSubtype: "JPEG", BodyFields: imapclient.BodyFields{CTE: "BASE64"}},
|
||||
},
|
||||
MediaSubtype: "PARALLEL",
|
||||
Ext: &imapclient.BodyExtensionMpart{
|
||||
Params: [][2]string{{"BOUNDARY", "unique-boundary-2"}},
|
||||
Disposition: ptr((*string)(nil)), // Present but nil.
|
||||
DispositionParams: ptr([][2]string(nil)),
|
||||
Language: ptr([]string(nil)),
|
||||
Location: ptr((*string)(nil)),
|
||||
},
|
||||
},
|
||||
imapclient.BodyTypeText{MediaType: "TEXT", MediaSubtype: "ENRICHED", BodyFields: imapclient.BodyFields{Octets: 145}, Lines: 5, Ext: &bodyext1},
|
||||
imapclient.BodyTypeText{MediaType: "TEXT", MediaSubtype: "ENRICHED", BodyFields: imapclient.BodyFields{Octets: 145}, Lines: 5},
|
||||
imapclient.BodyTypeMsg{
|
||||
MediaType: "MESSAGE",
|
||||
MediaSubtype: "RFC822",
|
||||
@ -340,64 +255,49 @@ func testFetch(t *testing.T, uidonly bool) {
|
||||
To: []imapclient.Address{{Name: "mox", Adl: "", Mailbox: "info", Host: "mox.example"}},
|
||||
},
|
||||
Bodystructure: imapclient.BodyTypeText{
|
||||
MediaType: "TEXT", MediaSubtype: "PLAIN", BodyFields: imapclient.BodyFields{Params: [][2]string{{"CHARSET", "ISO-8859-1"}}, CTE: "QUOTED-PRINTABLE", Octets: 51}, Lines: 1, Ext: &bodyext1},
|
||||
MediaType: "TEXT", MediaSubtype: "PLAIN", BodyFields: imapclient.BodyFields{Params: [][2]string{{"CHARSET", "ISO-8859-1"}}, CTE: "QUOTED-PRINTABLE", Octets: 51}, Lines: 1},
|
||||
Lines: 7,
|
||||
Ext: &imapclient.BodyExtension1Part{
|
||||
MD5: ptr("MDEyMzQ1Njc4OWFiY2RlZjAxMjM0NTY3ODlhYmNkZWY="),
|
||||
Disposition: ptr((*string)(nil)),
|
||||
DispositionParams: ptr([][2]string(nil)),
|
||||
Language: ptr([]string{"en", "de"}),
|
||||
Location: ptr(ptr("http://localhost")),
|
||||
},
|
||||
},
|
||||
},
|
||||
MediaSubtype: "MIXED",
|
||||
Ext: &imapclient.BodyExtensionMpart{
|
||||
Params: [][2]string{{"BOUNDARY", "unique-boundary-1"}},
|
||||
Disposition: ptr((*string)(nil)), // Present but nil.
|
||||
DispositionParams: ptr([][2]string(nil)),
|
||||
Language: ptr([]string(nil)),
|
||||
Location: ptr((*string)(nil)),
|
||||
},
|
||||
},
|
||||
}
|
||||
tc.client.Append("inbox", makeAppendTime(nestedMessage, received))
|
||||
tc.transactf("ok", "uid fetch 2 bodystructure")
|
||||
tc.xuntagged(tc.untaggedFetch(2, 2, bodystructure2))
|
||||
tc.client.Append("inbox", nil, &received, []byte(nestedMessage))
|
||||
tc.transactf("ok", "fetch 2 bodystructure")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, bodystructure2}})
|
||||
|
||||
// Multiple responses.
|
||||
if !uidonly {
|
||||
tc.transactf("ok", "fetch 1:2 bodystructure")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, bodystructure1), tc.untaggedFetch(2, 2, bodystructure2))
|
||||
tc.transactf("ok", "fetch 1,2 bodystructure")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, bodystructure1), tc.untaggedFetch(2, 2, bodystructure2))
|
||||
tc.transactf("ok", "fetch 2:1 bodystructure")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, bodystructure1), tc.untaggedFetch(2, 2, bodystructure2))
|
||||
tc.transactf("ok", "fetch 1:* bodystructure")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, bodystructure1), tc.untaggedFetch(2, 2, bodystructure2))
|
||||
tc.transactf("ok", "fetch *:1 bodystructure")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, bodystructure1), tc.untaggedFetch(2, 2, bodystructure2))
|
||||
tc.transactf("ok", "fetch *:2 bodystructure")
|
||||
tc.xuntagged(tc.untaggedFetch(2, 2, bodystructure2))
|
||||
tc.transactf("ok", "fetch * bodystructure") // Highest msgseq.
|
||||
tc.xuntagged(tc.untaggedFetch(2, 2, bodystructure2))
|
||||
}
|
||||
tc.transactf("ok", "fetch 1:2 bodystructure")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, bodystructure1}}, imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, bodystructure2}})
|
||||
tc.transactf("ok", "fetch 1,2 bodystructure")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, bodystructure1}}, imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, bodystructure2}})
|
||||
tc.transactf("ok", "fetch 2:1 bodystructure")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, bodystructure1}}, imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, bodystructure2}})
|
||||
tc.transactf("ok", "fetch 1:* bodystructure")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, bodystructure1}}, imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, bodystructure2}})
|
||||
tc.transactf("ok", "fetch *:1 bodystructure")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, bodystructure1}}, imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, bodystructure2}})
|
||||
tc.transactf("ok", "fetch *:2 bodystructure")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, bodystructure2}})
|
||||
|
||||
tc.transactf("ok", "fetch * bodystructure") // Highest msgseq.
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, bodystructure2}})
|
||||
|
||||
tc.transactf("ok", "uid fetch 1:* bodystructure")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, bodystructure1), tc.untaggedFetch(2, 2, bodystructure2))
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, bodystructure1}}, imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, bodystructure2}})
|
||||
|
||||
tc.transactf("ok", "uid fetch 1:2 bodystructure")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, bodystructure1), tc.untaggedFetch(2, 2, bodystructure2))
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, bodystructure1}}, imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, bodystructure2}})
|
||||
|
||||
tc.transactf("ok", "uid fetch 1,2 bodystructure")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, bodystructure1), tc.untaggedFetch(2, 2, bodystructure2))
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, bodystructure1}}, imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, bodystructure2}})
|
||||
|
||||
tc.transactf("ok", "uid fetch 2:2 bodystructure")
|
||||
tc.xuntagged(tc.untaggedFetch(2, 2, bodystructure2))
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, bodystructure2}})
|
||||
|
||||
// todo: read the bodies/headers of the parts, and of the nested message.
|
||||
tc.transactf("ok", "uid fetch 2 body.peek[]")
|
||||
tc.xuntagged(tc.untaggedFetch(2, 2, imapclient.FetchBody{RespAttr: "BODY[]", Body: nestedMessage}))
|
||||
tc.transactf("ok", "fetch 2 body.peek[]")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, imapclient.FetchBody{RespAttr: "BODY[]", Body: nestedMessage}}})
|
||||
|
||||
part1 := tocrlf(` ... Some text appears here ...
|
||||
|
||||
@ -407,22 +307,22 @@ func testFetch(t *testing.T, uidonly bool) {
|
||||
It could have been done with explicit typing as in the
|
||||
next part.]
|
||||
`)
|
||||
tc.transactf("ok", "uid fetch 2 body.peek[1]")
|
||||
tc.xuntagged(tc.untaggedFetch(2, 2, imapclient.FetchBody{RespAttr: "BODY[1]", Section: "1", Body: part1}))
|
||||
tc.transactf("ok", "fetch 2 body.peek[1]")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, imapclient.FetchBody{RespAttr: "BODY[1]", Section: "1", Body: part1}}})
|
||||
|
||||
tc.transactf("no", "uid fetch 2 binary.peek[3]") // Only allowed on leaf parts, not multiparts.
|
||||
tc.transactf("no", "uid fetch 2 binary.peek[5]") // Only allowed on leaf parts, not messages.
|
||||
tc.transactf("no", "fetch 2 binary.peek[3]") // Only allowed on leaf parts, not multiparts.
|
||||
tc.transactf("no", "fetch 2 binary.peek[5]") // Only allowed on leaf parts, not messages.
|
||||
|
||||
part31 := "aGVsbG8NCndvcmxkDQo=\r\n"
|
||||
part31dec := "hello\r\nworld\r\n"
|
||||
tc.transactf("ok", "uid fetch 2 binary.size[3.1]")
|
||||
tc.xuntagged(tc.untaggedFetch(2, 2, imapclient.FetchBinarySize{RespAttr: "BINARY.SIZE[3.1]", Parts: []uint32{3, 1}, Size: int64(len(part31dec))}))
|
||||
tc.transactf("ok", "fetch 2 binary.size[3.1]")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, imapclient.FetchBinarySize{RespAttr: "BINARY.SIZE[3.1]", Parts: []uint32{3, 1}, Size: int64(len(part31dec))}}})
|
||||
|
||||
tc.transactf("ok", "uid fetch 2 body.peek[3.1]")
|
||||
tc.xuntagged(tc.untaggedFetch(2, 2, imapclient.FetchBody{RespAttr: "BODY[3.1]", Section: "3.1", Body: part31}))
|
||||
tc.transactf("ok", "fetch 2 body.peek[3.1]")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, imapclient.FetchBody{RespAttr: "BODY[3.1]", Section: "3.1", Body: part31}}})
|
||||
|
||||
tc.transactf("ok", "uid fetch 2 binary.peek[3.1]")
|
||||
tc.xuntagged(tc.untaggedFetch(2, 2, imapclient.FetchBinary{RespAttr: "BINARY[3.1]", Parts: []uint32{3, 1}, Data: part31dec}))
|
||||
tc.transactf("ok", "fetch 2 binary.peek[3.1]")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, imapclient.FetchBinary{RespAttr: "BINARY[3.1]", Parts: []uint32{3, 1}, Data: part31dec}}})
|
||||
|
||||
part3 := tocrlf(`--unique-boundary-2
|
||||
Content-Type: audio/basic
|
||||
@ -433,18 +333,19 @@ aGVsbG8NCndvcmxkDQo=
|
||||
--unique-boundary-2
|
||||
Content-Type: image/jpeg
|
||||
Content-Transfer-Encoding: base64
|
||||
Content-Disposition: inline; filename=image.jpg
|
||||
|
||||
|
||||
--unique-boundary-2--
|
||||
|
||||
`)
|
||||
tc.transactf("ok", "uid fetch 2 body.peek[3]")
|
||||
tc.xuntagged(tc.untaggedFetch(2, 2, imapclient.FetchBody{RespAttr: "BODY[3]", Section: "3", Body: part3}))
|
||||
tc.transactf("ok", "fetch 2 body.peek[3]")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, imapclient.FetchBody{RespAttr: "BODY[3]", Section: "3", Body: part3}}})
|
||||
|
||||
part2mime := "Content-type: text/plain; charset=US-ASCII\r\n"
|
||||
tc.transactf("ok", "uid fetch 2 body.peek[2.mime]")
|
||||
tc.xuntagged(tc.untaggedFetch(2, 2, imapclient.FetchBody{RespAttr: "BODY[2.MIME]", Section: "2.MIME", Body: part2mime}))
|
||||
part2mime := tocrlf(`Content-type: text/plain; charset=US-ASCII
|
||||
|
||||
`)
|
||||
tc.transactf("ok", "fetch 2 body.peek[2.mime]")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, imapclient.FetchBody{RespAttr: "BODY[2.MIME]", Section: "2.MIME", Body: part2mime}}})
|
||||
|
||||
part5 := tocrlf(`From: info@mox.example
|
||||
To: mox <info@mox.example>
|
||||
@ -454,8 +355,8 @@ Content-Transfer-Encoding: Quoted-printable
|
||||
|
||||
... Additional text in ISO-8859-1 goes here ...
|
||||
`)
|
||||
tc.transactf("ok", "uid fetch 2 body.peek[5]")
|
||||
tc.xuntagged(tc.untaggedFetch(2, 2, imapclient.FetchBody{RespAttr: "BODY[5]", Section: "5", Body: part5}))
|
||||
tc.transactf("ok", "fetch 2 body.peek[5]")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, imapclient.FetchBody{RespAttr: "BODY[5]", Section: "5", Body: part5}}})
|
||||
|
||||
part5header := tocrlf(`From: info@mox.example
|
||||
To: mox <info@mox.example>
|
||||
@ -464,101 +365,39 @@ Content-Type: Text/plain; charset=ISO-8859-1
|
||||
Content-Transfer-Encoding: Quoted-printable
|
||||
|
||||
`)
|
||||
tc.transactf("ok", "uid fetch 2 body.peek[5.header]")
|
||||
tc.xuntagged(tc.untaggedFetch(2, 2, imapclient.FetchBody{RespAttr: "BODY[5.HEADER]", Section: "5.HEADER", Body: part5header}))
|
||||
tc.transactf("ok", "fetch 2 body.peek[5.header]")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, imapclient.FetchBody{RespAttr: "BODY[5.HEADER]", Section: "5.HEADER", Body: part5header}}})
|
||||
|
||||
part5mime := tocrlf(`Content-Type: Text/plain; charset=ISO-8859-1
|
||||
Content-Transfer-Encoding: Quoted-printable
|
||||
|
||||
part5mime := tocrlf(`Content-Type: message/rfc822
|
||||
Content-MD5: MDEyMzQ1Njc4OWFiY2RlZjAxMjM0NTY3ODlhYmNkZWY=
|
||||
Content-Language: en,de
|
||||
Content-Location: http://localhost
|
||||
`)
|
||||
tc.transactf("ok", "uid fetch 2 body.peek[5.mime]")
|
||||
tc.xuntagged(tc.untaggedFetch(2, 2, imapclient.FetchBody{RespAttr: "BODY[5.MIME]", Section: "5.MIME", Body: part5mime}))
|
||||
tc.transactf("ok", "fetch 2 body.peek[5.mime]")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, imapclient.FetchBody{RespAttr: "BODY[5.MIME]", Section: "5.MIME", Body: part5mime}}})
|
||||
|
||||
part5text := " ... Additional text in ISO-8859-1 goes here ...\r\n"
|
||||
tc.transactf("ok", "fetch 2 body.peek[5.text]")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, imapclient.FetchBody{RespAttr: "BODY[5.TEXT]", Section: "5.TEXT", Body: part5text}}})
|
||||
|
||||
tc.transactf("ok", "uid fetch 2 body.peek[5.text]")
|
||||
tc.xuntagged(tc.untaggedFetch(2, 2, imapclient.FetchBody{RespAttr: "BODY[5.TEXT]", Section: "5.TEXT", Body: part5text}))
|
||||
|
||||
part5body := " ... Additional text in ISO-8859-1 goes here ...\r\n"
|
||||
tc.transactf("ok", "uid fetch 2 body.peek[5.1]")
|
||||
tc.xuntagged(tc.untaggedFetch(2, 2, imapclient.FetchBody{RespAttr: "BODY[5.1]", Section: "5.1", Body: part5body}))
|
||||
|
||||
// 5.1 is the part that is the sub message, but not as message/rfc822, but as part,
|
||||
// so we cannot request a header.
|
||||
tc.transactf("no", "uid fetch 2 body.peek[5.1.header]")
|
||||
tc.transactf("ok", "fetch 2 body.peek[5.1]")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{uid2, imapclient.FetchBody{RespAttr: "BODY[5.1]", Section: "5.1", Body: part5text}}})
|
||||
|
||||
// In case of EXAMINE instead of SELECT, we should not be seeing any changed \Seen flags for non-peek commands.
|
||||
tc.client.UIDStoreFlagsClear("1", true, `\Seen`)
|
||||
tc.client.StoreFlagsClear("1", true, `\Seen`)
|
||||
tc.client.Unselect()
|
||||
tc.client.Examine("inbox")
|
||||
|
||||
// Preview
|
||||
preview := "Hello Joe, do you think we can meet at 3:30 tomorrow?"
|
||||
tc.transactf("ok", "uid fetch 1 preview")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, imapclient.FetchPreview{Preview: &preview}))
|
||||
tc.transactf("ok", "fetch 1 binary[]")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, binary1}})
|
||||
|
||||
tc.transactf("ok", "uid fetch 1 preview (lazy)")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, imapclient.FetchPreview{Preview: &preview}))
|
||||
tc.transactf("ok", "fetch 1 body[]")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, body1}})
|
||||
|
||||
// On-demand preview and saving on first request.
|
||||
err = tc.account.DB.Write(ctxbg, func(tx *bstore.Tx) error {
|
||||
m := store.Message{ID: 1}
|
||||
err := tx.Get(&m)
|
||||
tcheck(t, err, "get message")
|
||||
if m.UID != 1 {
|
||||
t.Fatalf("uid %d instead of 1", m.UID)
|
||||
}
|
||||
m.Preview = nil
|
||||
err = tx.Update(&m)
|
||||
tcheck(t, err, "remove preview from message")
|
||||
return nil
|
||||
})
|
||||
tcheck(t, err, "remove preview from database")
|
||||
tc.transactf("ok", "fetch 1 rfc822.text")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, rfctext1}})
|
||||
|
||||
tc.transactf("ok", "uid fetch 1 preview")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, imapclient.FetchPreview{Preview: &preview}))
|
||||
m := store.Message{ID: 1}
|
||||
err = tc.account.DB.Get(ctxbg, &m)
|
||||
tcheck(t, err, "get message")
|
||||
if m.Preview == nil {
|
||||
t.Fatalf("preview missing")
|
||||
} else if *m.Preview != preview+"\n" {
|
||||
t.Fatalf("got preview %q, expected %q", *m.Preview, preview+"\n")
|
||||
}
|
||||
|
||||
tc.transactf("bad", "uid fetch 1 preview (bogus)")
|
||||
|
||||
// Start a second session. Use it to remove the message. First session should still
|
||||
// be able to access the messages.
|
||||
tc2 := startNoSwitchboard(t, uidonly)
|
||||
defer tc2.closeNoWait()
|
||||
tc2.login("mjl@mox.example", password0)
|
||||
tc2.client.Select("inbox")
|
||||
tc2.client.UIDStoreFlagsSet("1", true, `\Deleted`)
|
||||
tc2.client.Expunge()
|
||||
tc2.client.Logout()
|
||||
|
||||
if uidonly {
|
||||
tc.transactf("ok", "uid fetch 1 binary[]")
|
||||
tc.xuntagged(
|
||||
tc.untaggedFetch(1, 1, imapclient.FetchFlags{`\Deleted`}),
|
||||
imapclient.UntaggedVanished{UIDs: xparseNumSet("1")},
|
||||
)
|
||||
// Message no longer available in session.
|
||||
} else {
|
||||
tc.transactf("ok", "fetch 1 binary[]")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, binary1))
|
||||
|
||||
tc.transactf("ok", "fetch 1 body[]")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, body1))
|
||||
|
||||
tc.transactf("ok", "fetch 1 rfc822.text")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, rfctext1))
|
||||
|
||||
tc.transactf("ok", "fetch 1 rfc822")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, rfc1))
|
||||
}
|
||||
tc.transactf("ok", "fetch 1 rfc822")
|
||||
tc.xuntagged(imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{uid1, rfc1}})
|
||||
|
||||
tc.client.Logout()
|
||||
}
|
||||
|
@ -5,7 +5,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@ -60,11 +59,30 @@ func FuzzServer(f *testing.F) {
|
||||
f.Add(tag + cmd)
|
||||
}
|
||||
|
||||
log := mlog.New("imapserver", nil)
|
||||
mox.Context = ctxbg
|
||||
mox.ConfigStaticPath = filepath.FromSlash("../testdata/imapserverfuzz/mox.conf")
|
||||
mox.MustLoadConfig(true, false)
|
||||
dataDir := mox.ConfigDirPath(mox.Conf.Static.DataDir)
|
||||
os.RemoveAll(dataDir)
|
||||
acc, err := store.OpenAccount(log, "mjl")
|
||||
if err != nil {
|
||||
f.Fatalf("open account: %v", err)
|
||||
}
|
||||
defer acc.Close()
|
||||
err = acc.SetPassword(log, password0)
|
||||
if err != nil {
|
||||
f.Fatalf("set password: %v", err)
|
||||
}
|
||||
defer store.Switchboard()()
|
||||
|
||||
comm := store.RegisterComm(acc)
|
||||
defer comm.Unregister()
|
||||
|
||||
var cid int64 = 1
|
||||
|
||||
var fl *os.File
|
||||
if false {
|
||||
var err error
|
||||
fl, err = os.OpenFile("fuzz.log", os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600)
|
||||
if err != nil {
|
||||
f.Fatalf("fuzz log")
|
||||
@ -78,34 +96,6 @@ func FuzzServer(f *testing.F) {
|
||||
}
|
||||
|
||||
f.Fuzz(func(t *testing.T, s string) {
|
||||
log := mlog.New("imapserver", nil)
|
||||
mox.ConfigStaticPath = filepath.FromSlash("../testdata/imapserverfuzz/mox.conf")
|
||||
mox.MustLoadConfig(true, false)
|
||||
store.Close() // May not be open, we ignore error.
|
||||
dataDir := mox.ConfigDirPath(mox.Conf.Static.DataDir)
|
||||
os.RemoveAll(dataDir)
|
||||
err := store.Init(ctxbg)
|
||||
if err != nil {
|
||||
t.Fatalf("store init: %v", err)
|
||||
}
|
||||
defer store.Switchboard()()
|
||||
|
||||
acc, err := store.OpenAccount(log, "mjl", false)
|
||||
if err != nil {
|
||||
t.Fatalf("open account: %v", err)
|
||||
}
|
||||
defer func() {
|
||||
acc.Close()
|
||||
acc.WaitClosed()
|
||||
}()
|
||||
err = acc.SetPassword(log, password0)
|
||||
if err != nil {
|
||||
t.Fatalf("set password: %v", err)
|
||||
}
|
||||
|
||||
comm := store.RegisterComm(acc)
|
||||
defer comm.Unregister()
|
||||
|
||||
run := func(cmds []string) {
|
||||
limitersInit() // Reset rate limiters.
|
||||
serverConn, clientConn := net.Pipe()
|
||||
@ -128,32 +118,28 @@ func FuzzServer(f *testing.F) {
|
||||
|
||||
err := clientConn.SetDeadline(time.Now().Add(time.Second))
|
||||
flog(err, "set client deadline")
|
||||
opts := imapclient.Opts{
|
||||
Logger: slog.Default().With("cid", mox.Cid()),
|
||||
Error: func(err error) { panic(err) },
|
||||
}
|
||||
client, _ := imapclient.New(clientConn, &opts)
|
||||
client, _ := imapclient.New(clientConn, true)
|
||||
|
||||
for _, cmd := range cmds {
|
||||
client.WriteCommandf("", "%s", cmd)
|
||||
client.ReadResponse()
|
||||
client.Commandf("", "%s", cmd)
|
||||
client.Response()
|
||||
}
|
||||
client.WriteCommandf("", "%s", s)
|
||||
client.ReadResponse()
|
||||
client.Commandf("", "%s", s)
|
||||
client.Response()
|
||||
}()
|
||||
|
||||
err = serverConn.SetDeadline(time.Now().Add(time.Second))
|
||||
flog(err, "set server deadline")
|
||||
serve("test", cid, nil, serverConn, false, false, true, false, "")
|
||||
serve("test", cid, nil, serverConn, false, true)
|
||||
cid++
|
||||
}
|
||||
|
||||
// Each command brings the connection state one step further. We try the fuzzing
|
||||
// input for each state.
|
||||
run([]string{})
|
||||
run([]string{`login mjl@mox.example "` + password0 + `"`})
|
||||
run([]string{`login mjl@mox.example "` + password0 + `"`, "select inbox"})
|
||||
run([]string{"login mjl@mox.example testtest"})
|
||||
run([]string{"login mjl@mox.example testtest", "select inbox"})
|
||||
xappend := fmt.Sprintf("append inbox () {%d+}\r\n%s", len(exampleMsg), exampleMsg)
|
||||
run([]string{`login mjl@mox.example "` + password0 + `"`, "select inbox", xappend})
|
||||
run([]string{"login mjl@mox.example testtest", "select inbox", xappend})
|
||||
})
|
||||
}
|
||||
|
@ -9,14 +9,13 @@ import (
|
||||
)
|
||||
|
||||
func TestIdle(t *testing.T) {
|
||||
tc1 := start(t, false)
|
||||
tc1 := start(t)
|
||||
defer tc1.close()
|
||||
tc1.client.Login("mjl@mox.example", password0)
|
||||
|
||||
tc2 := startNoSwitchboard(t, false)
|
||||
defer tc2.closeNoWait()
|
||||
|
||||
tc1.login("mjl@mox.example", password0)
|
||||
tc2.login("mjl@mox.example", password0)
|
||||
tc2 := startNoSwitchboard(t)
|
||||
defer tc2.close()
|
||||
tc2.client.Login("mjl@mox.example", password0)
|
||||
|
||||
tc1.transactf("ok", "select inbox")
|
||||
tc2.transactf("ok", "select inbox")
|
||||
|
@ -1,14 +1,12 @@
|
||||
package imapserver
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"path"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/mjl-/bstore"
|
||||
|
||||
"github.com/mjl-/mox/mox-"
|
||||
"github.com/mjl-/mox/store"
|
||||
)
|
||||
|
||||
@ -62,7 +60,6 @@ func (c *conn) cmdList(tag, cmd string, p *parser) {
|
||||
isExtended = isExtended || isList
|
||||
var retSubscribed, retChildren bool
|
||||
var retStatusAttrs []string
|
||||
var retMetadata []string
|
||||
if p.take(" RETURN (") {
|
||||
isExtended = true
|
||||
// ../rfc/9051:6613 ../rfc/9051:6915 ../rfc/9051:7072 ../rfc/9051:6821 ../rfc/5819:95
|
||||
@ -93,18 +90,6 @@ func (c *conn) cmdList(tag, cmd string, p *parser) {
|
||||
retStatusAttrs = append(retStatusAttrs, p.xstatusAtt())
|
||||
}
|
||||
p.xtake(")")
|
||||
case "METADATA":
|
||||
// ../rfc/9590:167
|
||||
p.xspace()
|
||||
p.xtake("(")
|
||||
for {
|
||||
s := p.xmetadataKey()
|
||||
retMetadata = append(retMetadata, s)
|
||||
if !p.space() {
|
||||
break
|
||||
}
|
||||
}
|
||||
p.xtake(")")
|
||||
default:
|
||||
// ../rfc/9051:2398
|
||||
xsyntaxErrorf("bad list return option %q", w)
|
||||
@ -115,7 +100,7 @@ func (c *conn) cmdList(tag, cmd string, p *parser) {
|
||||
|
||||
if !isExtended && reference == "" && patterns[0] == "" {
|
||||
// ../rfc/9051:2277 ../rfc/3501:2221
|
||||
c.xbwritelinef(`* LIST () "/" ""`)
|
||||
c.bwritelinef(`* LIST () "/" ""`)
|
||||
c.ok(tag, cmd)
|
||||
return
|
||||
}
|
||||
@ -132,7 +117,6 @@ func (c *conn) cmdList(tag, cmd string, p *parser) {
|
||||
}
|
||||
re := xmailboxPatternMatcher(reference, patterns)
|
||||
var responseLines []string
|
||||
var respMetadata []concatspace
|
||||
|
||||
c.account.WithRLock(func() {
|
||||
c.xdbread(func(tx *bstore.Tx) {
|
||||
@ -146,11 +130,10 @@ func (c *conn) cmdList(tag, cmd string, p *parser) {
|
||||
var nameList []string
|
||||
|
||||
q := bstore.QueryTx[store.Mailbox](tx)
|
||||
q.FilterEqual("Expunged", false)
|
||||
err := q.ForEach(func(mb store.Mailbox) error {
|
||||
names[mb.Name] = info{mailbox: &mb}
|
||||
nameList = append(nameList, mb.Name)
|
||||
for p := mox.ParentMailboxName(mb.Name); p != ""; p = mox.ParentMailboxName(p) {
|
||||
for p := path.Dir(mb.Name); p != "."; p = path.Dir(p) {
|
||||
hasChild[p] = true
|
||||
}
|
||||
return nil
|
||||
@ -165,7 +148,7 @@ func (c *conn) cmdList(tag, cmd string, p *parser) {
|
||||
if !ok {
|
||||
nameList = append(nameList, sub.Name)
|
||||
}
|
||||
for p := mox.ParentMailboxName(sub.Name); p != ""; p = mox.ParentMailboxName(p) {
|
||||
for p := path.Dir(sub.Name); p != "."; p = path.Dir(p) {
|
||||
hasSubscribedChild[p] = true
|
||||
}
|
||||
return nil
|
||||
@ -208,64 +191,39 @@ func (c *conn) cmdList(tag, cmd string, p *parser) {
|
||||
flags = append(flags, bare(`\Subscribed`))
|
||||
}
|
||||
if info.mailbox != nil {
|
||||
add := func(b bool, v string) {
|
||||
if b {
|
||||
flags = append(flags, bare(v))
|
||||
}
|
||||
if info.mailbox.Archive {
|
||||
flags = append(flags, bare(`\Archive`))
|
||||
}
|
||||
if info.mailbox.Draft {
|
||||
flags = append(flags, bare(`\Drafts`))
|
||||
}
|
||||
if info.mailbox.Junk {
|
||||
flags = append(flags, bare(`\Junk`))
|
||||
}
|
||||
if info.mailbox.Sent {
|
||||
flags = append(flags, bare(`\Sent`))
|
||||
}
|
||||
if info.mailbox.Trash {
|
||||
flags = append(flags, bare(`\Trash`))
|
||||
}
|
||||
mb := info.mailbox
|
||||
add(mb.Archive, `\Archive`)
|
||||
add(mb.Draft, `\Drafts`)
|
||||
add(mb.Junk, `\Junk`)
|
||||
add(mb.Sent, `\Sent`)
|
||||
add(mb.Trash, `\Trash`)
|
||||
}
|
||||
|
||||
var extStr string
|
||||
if extended != nil {
|
||||
extStr = " " + extended.pack(c)
|
||||
}
|
||||
line := fmt.Sprintf(`* LIST %s "/" %s%s`, flags.pack(c), mailboxt(name).pack(c), extStr)
|
||||
line := fmt.Sprintf(`* LIST %s "/" %s%s`, flags.pack(c), astring(c.encodeMailbox(name)).pack(c), extStr)
|
||||
responseLines = append(responseLines, line)
|
||||
|
||||
if retStatusAttrs != nil && info.mailbox != nil {
|
||||
responseLines = append(responseLines, c.xstatusLine(tx, *info.mailbox, retStatusAttrs))
|
||||
}
|
||||
|
||||
// ../rfc/9590:101
|
||||
if info.mailbox != nil && len(retMetadata) > 0 {
|
||||
var meta listspace
|
||||
for _, k := range retMetadata {
|
||||
q := bstore.QueryTx[store.Annotation](tx)
|
||||
q.FilterNonzero(store.Annotation{MailboxID: info.mailbox.ID, Key: k})
|
||||
q.FilterEqual("Expunged", false)
|
||||
a, err := q.Get()
|
||||
var v token
|
||||
if err == bstore.ErrAbsent {
|
||||
v = nilt
|
||||
} else {
|
||||
xcheckf(err, "get annotation")
|
||||
if a.IsString {
|
||||
v = string0(string(a.Value))
|
||||
} else {
|
||||
v = readerSizeSyncliteral{bytes.NewReader(a.Value), int64(len(a.Value)), true}
|
||||
}
|
||||
}
|
||||
meta = append(meta, astring(k), v)
|
||||
}
|
||||
line := concatspace{bare("*"), bare("METADATA"), mailboxt(info.mailbox.Name), meta}
|
||||
respMetadata = append(respMetadata, line)
|
||||
}
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
for _, line := range responseLines {
|
||||
c.xbwritelinef("%s", line)
|
||||
}
|
||||
for _, meta := range respMetadata {
|
||||
meta.xwriteTo(c, c.xbw)
|
||||
c.xbwritelinef("")
|
||||
c.bwritelinef("%s", line)
|
||||
}
|
||||
c.ok(tag, cmd)
|
||||
}
|
||||
|
@ -8,18 +8,10 @@ import (
|
||||
)
|
||||
|
||||
func TestListBasic(t *testing.T) {
|
||||
testListBasic(t, false)
|
||||
}
|
||||
|
||||
func TestListBasicUIDOnly(t *testing.T) {
|
||||
testListBasic(t, true)
|
||||
}
|
||||
|
||||
func testListBasic(t *testing.T, uidonly bool) {
|
||||
tc := start(t, uidonly)
|
||||
tc := start(t)
|
||||
defer tc.close()
|
||||
|
||||
tc.login("mjl@mox.example", password0)
|
||||
tc.client.Login("mjl@mox.example", password0)
|
||||
|
||||
ulist := func(name string, flags ...string) imapclient.UntaggedList {
|
||||
if len(flags) == 0 {
|
||||
@ -34,9 +26,6 @@ func testListBasic(t *testing.T, uidonly bool) {
|
||||
tc.last(tc.client.List("Inbox"))
|
||||
tc.xuntagged(ulist("Inbox"))
|
||||
|
||||
tc.last(tc.client.List("expungebox"))
|
||||
tc.xuntagged()
|
||||
|
||||
tc.last(tc.client.List("%"))
|
||||
tc.xuntagged(ulist("Archive", `\Archive`), ulist("Drafts", `\Drafts`), ulist("Inbox"), ulist("Junk", `\Junk`), ulist("Sent", `\Sent`), ulist("Trash", `\Trash`))
|
||||
|
||||
@ -46,7 +35,7 @@ func testListBasic(t *testing.T, uidonly bool) {
|
||||
tc.last(tc.client.List("A*"))
|
||||
tc.xuntagged(ulist("Archive", `\Archive`))
|
||||
|
||||
tc.client.Create("Inbox/todo", nil)
|
||||
tc.client.Create("Inbox/todo")
|
||||
|
||||
tc.last(tc.client.List("Inbox*"))
|
||||
tc.xuntagged(ulist("Inbox"), ulist("Inbox/todo"))
|
||||
@ -67,20 +56,12 @@ func testListBasic(t *testing.T, uidonly bool) {
|
||||
}
|
||||
|
||||
func TestListExtended(t *testing.T) {
|
||||
testListExtended(t, false)
|
||||
}
|
||||
|
||||
func TestListExtendedUIDOnly(t *testing.T) {
|
||||
testListExtended(t, true)
|
||||
}
|
||||
|
||||
func testListExtended(t *testing.T, uidonly bool) {
|
||||
defer mockUIDValidity()()
|
||||
|
||||
tc := start(t, uidonly)
|
||||
tc := start(t)
|
||||
defer tc.close()
|
||||
|
||||
tc.login("mjl@mox.example", password0)
|
||||
tc.client.Login("mjl@mox.example", password0)
|
||||
|
||||
ulist := func(name string, flags ...string) imapclient.UntaggedList {
|
||||
if len(flags) == 0 {
|
||||
@ -97,7 +78,7 @@ func testListExtended(t *testing.T, uidonly bool) {
|
||||
for _, name := range store.DefaultInitialMailboxes.Regular {
|
||||
uidvals[name] = 1
|
||||
}
|
||||
var uidvalnext uint32 = 3
|
||||
var uidvalnext uint32 = 2
|
||||
uidval := func(name string) uint32 {
|
||||
v, ok := uidvals[name]
|
||||
if !ok {
|
||||
@ -109,15 +90,15 @@ func testListExtended(t *testing.T, uidonly bool) {
|
||||
}
|
||||
|
||||
ustatus := func(name string) imapclient.UntaggedStatus {
|
||||
attrs := map[imapclient.StatusAttr]int64{
|
||||
imapclient.StatusMessages: 0,
|
||||
imapclient.StatusUIDNext: 1,
|
||||
imapclient.StatusUIDValidity: int64(uidval(name)),
|
||||
imapclient.StatusUnseen: 0,
|
||||
imapclient.StatusDeleted: 0,
|
||||
imapclient.StatusSize: 0,
|
||||
imapclient.StatusRecent: 0,
|
||||
imapclient.StatusAppendLimit: 0,
|
||||
attrs := map[string]int64{
|
||||
"MESSAGES": 0,
|
||||
"UIDNEXT": 1,
|
||||
"UIDVALIDITY": int64(uidval(name)),
|
||||
"UNSEEN": 0,
|
||||
"DELETED": 0,
|
||||
"SIZE": 0,
|
||||
"RECENT": 0,
|
||||
"APPENDLIMIT": 0,
|
||||
}
|
||||
return imapclient.UntaggedStatus{Mailbox: name, Attrs: attrs}
|
||||
}
|
||||
@ -165,7 +146,7 @@ func testListExtended(t *testing.T, uidonly bool) {
|
||||
tc.last(tc.client.ListFull(false, "A*", "Junk"))
|
||||
tc.xuntagged(xlist("Archive", Farchive), ustatus("Archive"), xlist("Junk", Fjunk), ustatus("Junk"))
|
||||
|
||||
tc.client.Create("Inbox/todo", nil)
|
||||
tc.client.Create("Inbox/todo")
|
||||
|
||||
tc.last(tc.client.ListFull(false, "Inbox*"))
|
||||
tc.xuntagged(ulist("Inbox", Fhaschildren, Fsubscribed), ustatus("Inbox"), xlist("Inbox/todo"), ustatus("Inbox/todo"))
|
||||
@ -223,7 +204,7 @@ func testListExtended(t *testing.T, uidonly bool) {
|
||||
tc.transactf("ok", `list (remote) "inbox" "a"`)
|
||||
tc.xuntagged()
|
||||
|
||||
tc.client.Create("inbox/a", nil)
|
||||
tc.client.Create("inbox/a")
|
||||
tc.transactf("ok", `list (remote) "inbox" "a"`)
|
||||
tc.xuntagged(ulist("Inbox/a"))
|
||||
|
||||
@ -235,21 +216,4 @@ func testListExtended(t *testing.T, uidonly bool) {
|
||||
tc.transactf("bad", `list (recursivematch remote) "" "*"`) // "remote" is not a base selection option.
|
||||
tc.transactf("bad", `list (unknown) "" "*"`) // Unknown selection options must result in BAD.
|
||||
tc.transactf("bad", `list () "" "*" return (unknown)`) // Unknown return options must result in BAD.
|
||||
|
||||
// Return metadata.
|
||||
tc.transactf("ok", `setmetadata inbox (/private/comment "y")`)
|
||||
tc.transactf("ok", `list () "" ("inbox") return (metadata (/private/comment /shared/comment))`)
|
||||
tc.xuntagged(
|
||||
ulist("Inbox"),
|
||||
imapclient.UntaggedMetadataAnnotations{
|
||||
Mailbox: "Inbox",
|
||||
Annotations: []imapclient.Annotation{
|
||||
{Key: "/private/comment", IsString: true, Value: []byte("y")},
|
||||
{Key: "/shared/comment"},
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
tc.transactf("bad", `list () "" ("inbox") return (metadata ())`) // Metadata list must be non-empty.
|
||||
tc.transactf("bad", `list () "" ("inbox") return (metadata (/shared/comment "/private/comment" ))`) // Extra space.
|
||||
}
|
||||
|
@ -7,18 +7,10 @@ import (
|
||||
)
|
||||
|
||||
func TestLsub(t *testing.T) {
|
||||
testLsub(t, false)
|
||||
}
|
||||
|
||||
func TestLsubUIDOnly(t *testing.T) {
|
||||
testLsub(t, true)
|
||||
}
|
||||
|
||||
func testLsub(t *testing.T, uidonly bool) {
|
||||
tc := start(t, uidonly)
|
||||
tc := start(t)
|
||||
defer tc.close()
|
||||
|
||||
tc.login("mjl@mox.example", password0)
|
||||
tc.client.Login("mjl@mox.example", password0)
|
||||
|
||||
tc.transactf("bad", "lsub") // Missing params.
|
||||
tc.transactf("bad", `lsub ""`) // Missing param.
|
||||
@ -27,9 +19,6 @@ func testLsub(t *testing.T, uidonly bool) {
|
||||
tc.transactf("ok", `lsub "" x*`)
|
||||
tc.xuntagged()
|
||||
|
||||
tc.transactf("ok", `lsub "" expungebox`)
|
||||
tc.xuntagged(imapclient.UntaggedLsub{Separator: '/', Mailbox: "expungebox"})
|
||||
|
||||
tc.transactf("ok", "create a/b/c")
|
||||
tc.transactf("ok", `lsub "" a/*`)
|
||||
tc.xuntagged(imapclient.UntaggedLsub{Separator: '/', Mailbox: "a/b"}, imapclient.UntaggedLsub{Separator: '/', Mailbox: "a/b/c"})
|
||||
|
@ -1,17 +0,0 @@
|
||||
package imapserver
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/mjl-/mox/metrics"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
m.Run()
|
||||
if metrics.Panics.Load() > 0 {
|
||||
fmt.Println("unhandled panics encountered")
|
||||
os.Exit(2)
|
||||
}
|
||||
}
|
@ -1,317 +0,0 @@
|
||||
package imapserver
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/mjl-/bstore"
|
||||
|
||||
"github.com/mjl-/mox/store"
|
||||
)
|
||||
|
||||
// Changed during tests.
|
||||
var metadataMaxKeys = 1000
|
||||
var metadataMaxSize = 1000 * 1000
|
||||
|
||||
// Metadata errata:
|
||||
// ../rfc/5464:183 ../rfc/5464-eid1691
|
||||
// ../rfc/5464:564 ../rfc/5464-eid1692
|
||||
// ../rfc/5464:494 ../rfc/5464-eid2785 ../rfc/5464-eid2786
|
||||
// ../rfc/5464:698 ../rfc/5464-eid3868
|
||||
|
||||
// Note: We do not tie the special-use mailbox flags to a (synthetic) private
|
||||
// per-mailbox annotation. ../rfc/6154:303
|
||||
|
||||
// For registration of names, see https://www.iana.org/assignments/imap-metadata/imap-metadata.xhtml
|
||||
|
||||
// Get metadata annotations, per mailbox or globally.
|
||||
//
|
||||
// State: Authenticated and selected.
|
||||
func (c *conn) cmdGetmetadata(tag, cmd string, p *parser) {
|
||||
// Command: ../rfc/5464:412
|
||||
|
||||
// Request syntax: ../rfc/5464:792
|
||||
|
||||
p.xspace()
|
||||
var optMaxSize int64 = -1
|
||||
var optDepth string
|
||||
if p.take("(") {
|
||||
for {
|
||||
if p.take("MAXSIZE") {
|
||||
// ../rfc/5464:804
|
||||
p.xspace()
|
||||
v := p.xnumber()
|
||||
if optMaxSize >= 0 {
|
||||
p.xerrorf("only a single maxsize option accepted")
|
||||
}
|
||||
optMaxSize = int64(v)
|
||||
} else if p.take("DEPTH") {
|
||||
// ../rfc/5464:823
|
||||
p.xspace()
|
||||
s := p.xtakelist("0", "1", "INFINITY")
|
||||
if optDepth != "" {
|
||||
p.xerrorf("only single depth option accepted")
|
||||
}
|
||||
optDepth = s
|
||||
} else {
|
||||
// ../rfc/5464:800 We are not doing anything further parsing for future extensions.
|
||||
p.xerrorf("unknown option for getmetadata, expected maxsize or depth")
|
||||
}
|
||||
|
||||
if p.take(")") {
|
||||
break
|
||||
}
|
||||
p.xspace()
|
||||
}
|
||||
p.xspace()
|
||||
}
|
||||
mailboxName := p.xmailbox()
|
||||
if mailboxName != "" {
|
||||
mailboxName = xcheckmailboxname(mailboxName, true)
|
||||
}
|
||||
p.xspace()
|
||||
// Entries ../rfc/5464:768
|
||||
entryNames := map[string]struct{}{}
|
||||
if p.take("(") {
|
||||
for {
|
||||
s := p.xmetadataKey()
|
||||
entryNames[s] = struct{}{}
|
||||
if p.take(")") {
|
||||
break
|
||||
}
|
||||
p.xtake(" ")
|
||||
}
|
||||
} else {
|
||||
s := p.xmetadataKey()
|
||||
entryNames[s] = struct{}{}
|
||||
}
|
||||
p.xempty()
|
||||
|
||||
var annotations []store.Annotation
|
||||
longentries := -1 // Size of largest value skipped due to optMaxSize. ../rfc/5464:482
|
||||
|
||||
c.account.WithRLock(func() {
|
||||
c.xdbread(func(tx *bstore.Tx) {
|
||||
q := bstore.QueryTx[store.Annotation](tx)
|
||||
if mailboxName == "" {
|
||||
q.FilterEqual("MailboxID", 0)
|
||||
} else {
|
||||
mb := c.xmailbox(tx, mailboxName, "TRYCREATE")
|
||||
q.FilterNonzero(store.Annotation{MailboxID: mb.ID})
|
||||
}
|
||||
q.FilterEqual("Expunged", false)
|
||||
q.SortAsc("MailboxID", "Key") // For tests.
|
||||
err := q.ForEach(func(a store.Annotation) error {
|
||||
// ../rfc/5464:516
|
||||
switch optDepth {
|
||||
case "", "0":
|
||||
if _, ok := entryNames[a.Key]; !ok {
|
||||
return nil
|
||||
}
|
||||
case "1", "INFINITY":
|
||||
// Go through all keys, matching depth.
|
||||
if _, ok := entryNames[a.Key]; ok {
|
||||
break
|
||||
}
|
||||
var match bool
|
||||
for s := range entryNames {
|
||||
prefix := s
|
||||
if s != "/" {
|
||||
prefix += "/"
|
||||
}
|
||||
if !strings.HasPrefix(a.Key, prefix) {
|
||||
continue
|
||||
}
|
||||
if optDepth == "INFINITY" {
|
||||
match = true
|
||||
break
|
||||
}
|
||||
suffix := a.Key[len(prefix):]
|
||||
t := strings.SplitN(suffix, "/", 2)
|
||||
if len(t) == 1 {
|
||||
match = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !match {
|
||||
return nil
|
||||
}
|
||||
default:
|
||||
xcheckf(fmt.Errorf("%q", optDepth), "missing case for depth")
|
||||
}
|
||||
|
||||
if optMaxSize >= 0 && int64(len(a.Value)) > optMaxSize {
|
||||
longentries = max(longentries, len(a.Value))
|
||||
} else {
|
||||
annotations = append(annotations, a)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
xcheckf(err, "looking up annotations")
|
||||
})
|
||||
})
|
||||
|
||||
// Response syntax: ../rfc/5464:807 ../rfc/5464:778
|
||||
// We can only send untagged responses when we have any matches.
|
||||
if len(annotations) > 0 {
|
||||
fmt.Fprintf(c.xbw, "* METADATA %s (", mailboxt(mailboxName).pack(c))
|
||||
for i, a := range annotations {
|
||||
if i > 0 {
|
||||
fmt.Fprint(c.xbw, " ")
|
||||
}
|
||||
astring(a.Key).xwriteTo(c, c.xbw)
|
||||
fmt.Fprint(c.xbw, " ")
|
||||
if a.IsString {
|
||||
string0(string(a.Value)).xwriteTo(c, c.xbw)
|
||||
} else {
|
||||
v := readerSizeSyncliteral{bytes.NewReader(a.Value), int64(len(a.Value)), true}
|
||||
v.xwriteTo(c, c.xbw)
|
||||
}
|
||||
}
|
||||
c.xbwritelinef(")")
|
||||
}
|
||||
|
||||
if longentries >= 0 {
|
||||
c.xbwritelinef("%s OK [METADATA LONGENTRIES %d] getmetadata done", tag, longentries)
|
||||
} else {
|
||||
c.ok(tag, cmd)
|
||||
}
|
||||
}
|
||||
|
||||
// Set metadata annotation, per mailbox or globally.
|
||||
//
|
||||
// We allow both /private/* and /shared/*, we store them in the same way since we
|
||||
// don't have ACL extension support yet or another mechanism for access control.
|
||||
//
|
||||
// State: Authenticated and selected.
|
||||
func (c *conn) cmdSetmetadata(tag, cmd string, p *parser) {
|
||||
// Command: ../rfc/5464:547
|
||||
|
||||
// Request syntax: ../rfc/5464:826
|
||||
|
||||
p.xspace()
|
||||
mailboxName := p.xmailbox()
|
||||
// Empty name means a global (per-account) annotation, not for a mailbox.
|
||||
if mailboxName != "" {
|
||||
mailboxName = xcheckmailboxname(mailboxName, true)
|
||||
}
|
||||
p.xspace()
|
||||
p.xtake("(")
|
||||
var l []store.Annotation
|
||||
for {
|
||||
key, isString, value := p.xmetadataKeyValue()
|
||||
l = append(l, store.Annotation{Key: key, IsString: isString, Value: value})
|
||||
if p.take(")") {
|
||||
break
|
||||
}
|
||||
p.xspace()
|
||||
}
|
||||
p.xempty()
|
||||
|
||||
// Additional checks on entry names.
|
||||
for _, a := range l {
|
||||
// ../rfc/5464:217
|
||||
if !strings.HasPrefix(a.Key, "/private/") && !strings.HasPrefix(a.Key, "/shared/") {
|
||||
// ../rfc/5464:346
|
||||
xuserErrorf("only /private/* and /shared/* entry names allowed")
|
||||
}
|
||||
|
||||
// We also enforce that /private/vendor/ is followed by at least 2 elements.
|
||||
// ../rfc/5464:234
|
||||
switch {
|
||||
case a.Key == "/private/vendor",
|
||||
strings.HasPrefix(a.Key, "/private/vendor/"),
|
||||
a.Key == "/shared/vendor", strings.HasPrefix(a.Key, "/shared/vendor/"):
|
||||
|
||||
t := strings.SplitN(a.Key[1:], "/", 4)
|
||||
if len(t) < 4 {
|
||||
xuserErrorf("entry names starting with /private/vendor or /shared/vendor must have at least 4 components")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Store the annotations, possibly removing/inserting/updating them.
|
||||
c.account.WithWLock(func() {
|
||||
var changes []store.Change
|
||||
var modseq store.ModSeq
|
||||
|
||||
c.xdbwrite(func(tx *bstore.Tx) {
|
||||
var mb store.Mailbox // mb.ID as 0 is used in query below.
|
||||
if mailboxName != "" {
|
||||
mb = c.xmailbox(tx, mailboxName, "TRYCREATE")
|
||||
}
|
||||
|
||||
for _, a := range l {
|
||||
q := bstore.QueryTx[store.Annotation](tx)
|
||||
q.FilterNonzero(store.Annotation{Key: a.Key})
|
||||
q.FilterEqual("MailboxID", mb.ID) // Can be zero.
|
||||
q.FilterEqual("Expunged", false)
|
||||
oa, err := q.Get()
|
||||
// Nil means remove. ../rfc/5464:579
|
||||
if err == bstore.ErrAbsent && a.Value == nil {
|
||||
continue
|
||||
}
|
||||
if modseq == 0 {
|
||||
var err error
|
||||
modseq, err = c.account.NextModSeq(tx)
|
||||
xcheckf(err, "get next modseq")
|
||||
}
|
||||
if err == bstore.ErrAbsent {
|
||||
a.MailboxID = mb.ID
|
||||
a.CreateSeq = modseq
|
||||
a.ModSeq = modseq
|
||||
err = tx.Insert(&a)
|
||||
xcheckf(err, "inserting annotation")
|
||||
changes = append(changes, a.Change(mailboxName))
|
||||
} else {
|
||||
xcheckf(err, "get metadata")
|
||||
oa.ModSeq = modseq
|
||||
if a.Value == nil {
|
||||
oa.Expunged = true
|
||||
}
|
||||
oa.IsString = a.IsString
|
||||
oa.Value = a.Value
|
||||
err = tx.Update(&oa)
|
||||
xcheckf(err, "updating metdata")
|
||||
changes = append(changes, oa.Change(mailboxName))
|
||||
}
|
||||
}
|
||||
|
||||
c.xcheckMetadataSize(tx)
|
||||
|
||||
// ../rfc/7162:1335
|
||||
if mb.ID != 0 && modseq != 0 {
|
||||
mb.ModSeq = modseq
|
||||
err := tx.Update(&mb)
|
||||
xcheckf(err, "updating mailbox with modseq")
|
||||
}
|
||||
})
|
||||
|
||||
c.broadcast(changes)
|
||||
})
|
||||
|
||||
c.ok(tag, cmd)
|
||||
}
|
||||
|
||||
func (c *conn) xcheckMetadataSize(tx *bstore.Tx) {
|
||||
// Check for total size. We allow a total of 1000 entries, with total capacity of 1MB.
|
||||
// ../rfc/5464:383
|
||||
var n int
|
||||
var size int
|
||||
err := bstore.QueryTx[store.Annotation](tx).FilterEqual("Expunged", false).ForEach(func(a store.Annotation) error {
|
||||
n++
|
||||
if n > metadataMaxKeys {
|
||||
// ../rfc/5464:590
|
||||
xusercodeErrorf("METADATA (TOOMANY)", "too many metadata entries, 1000 allowed in total")
|
||||
}
|
||||
size += len(a.Key) + len(a.Value)
|
||||
if size > metadataMaxSize {
|
||||
// ../rfc/5464:585 We only have a max total size limit, not per entry. We'll
|
||||
// mention the max total size.
|
||||
xusercodeErrorf(fmt.Sprintf("METADATA (MAXSIZE %d)", metadataMaxSize), "metadata entry values too large, total maximum size is 1MB")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
xcheckf(err, "checking metadata annotation size")
|
||||
}
|
@ -1,296 +0,0 @@
|
||||
package imapserver
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/mjl-/mox/imapclient"
|
||||
)
|
||||
|
||||
func TestMetadata(t *testing.T) {
|
||||
testMetadata(t, false)
|
||||
}
|
||||
|
||||
func TestMetadataUIDOnly(t *testing.T) {
|
||||
testMetadata(t, true)
|
||||
}
|
||||
|
||||
func testMetadata(t *testing.T, uidonly bool) {
|
||||
tc := start(t, uidonly)
|
||||
defer tc.close()
|
||||
|
||||
tc.login("mjl@mox.example", password0)
|
||||
|
||||
tc.transactf("ok", `getmetadata "" /private/comment`)
|
||||
tc.xuntagged()
|
||||
|
||||
tc.transactf("ok", `getmetadata inbox (/private/comment)`)
|
||||
tc.xuntagged()
|
||||
|
||||
tc.transactf("ok", `setmetadata "" (/PRIVATE/COMMENT "global value")`)
|
||||
tc.transactf("ok", `setmetadata inbox (/private/comment "mailbox value")`)
|
||||
|
||||
tc.transactf("ok", `create metabox`)
|
||||
tc.transactf("ok", `setmetadata metabox (/private/comment "mailbox value")`)
|
||||
tc.transactf("ok", `setmetadata metabox (/shared/comment "mailbox value")`)
|
||||
tc.transactf("ok", `setmetadata metabox (/shared/comment nil)`) // Remove.
|
||||
tc.transactf("ok", `delete metabox`) // Delete mailbox with live and expunged metadata.
|
||||
|
||||
tc.transactf("no", `setmetadata expungebox (/private/comment "mailbox value")`)
|
||||
tc.xcodeWord("TRYCREATE")
|
||||
|
||||
tc.transactf("ok", `getmetadata "" ("/private/comment")`)
|
||||
tc.xuntagged(imapclient.UntaggedMetadataAnnotations{
|
||||
Mailbox: "",
|
||||
Annotations: []imapclient.Annotation{
|
||||
{Key: "/private/comment", IsString: true, Value: []byte("global value")},
|
||||
},
|
||||
})
|
||||
|
||||
tc.transactf("ok", `setmetadata Inbox (/shared/comment "share")`)
|
||||
|
||||
tc.transactf("ok", `getmetadata inbox (/private/comment /private/unknown /shared/comment)`)
|
||||
tc.xuntagged(imapclient.UntaggedMetadataAnnotations{
|
||||
Mailbox: "Inbox",
|
||||
Annotations: []imapclient.Annotation{
|
||||
{Key: "/private/comment", IsString: true, Value: []byte("mailbox value")},
|
||||
{Key: "/shared/comment", IsString: true, Value: []byte("share")},
|
||||
},
|
||||
})
|
||||
|
||||
tc.transactf("no", `setmetadata doesnotexist (/private/comment "test")`) // Bad mailbox.
|
||||
tc.transactf("no", `setmetadata Inbox (/badprefix/comment "")`)
|
||||
tc.transactf("no", `setmetadata Inbox (/private/vendor "")`) // /*/vendor must have more components.
|
||||
tc.transactf("no", `setmetadata Inbox (/private/vendor/stillbad "")`) // /*/vendor must have more components.
|
||||
tc.transactf("ok", `setmetadata Inbox (/private/vendor/a/b "")`)
|
||||
tc.transactf("bad", `setmetadata Inbox (/private/no* "")`)
|
||||
tc.transactf("bad", `setmetadata Inbox (/private/no%% "")`)
|
||||
tc.transactf("bad", `setmetadata Inbox (/private/notrailingslash/ "")`)
|
||||
tc.transactf("bad", `setmetadata Inbox (/private//nodupslash "")`)
|
||||
tc.transactf("bad", "setmetadata Inbox (/private/\001 \"\")")
|
||||
tc.transactf("bad", "setmetadata Inbox (/private/\u007f \"\")")
|
||||
tc.transactf("bad", `getmetadata (depth 0 depth 0) inbox (/private/a)`) // Duplicate option.
|
||||
tc.transactf("bad", `getmetadata (depth badvalue) inbox (/private/a)`)
|
||||
tc.transactf("bad", `getmetadata (maxsize invalid) inbox (/private/a)`)
|
||||
tc.transactf("bad", `getmetadata (badoption) inbox (/private/a)`)
|
||||
|
||||
// Update existing annotation by key.
|
||||
tc.transactf("ok", `setmetadata "" (/PRIVATE/COMMENT "global updated")`)
|
||||
tc.transactf("ok", `setmetadata inbox (/private/comment "mailbox updated")`)
|
||||
tc.transactf("ok", `getmetadata "" (/private/comment)`)
|
||||
tc.xuntagged(imapclient.UntaggedMetadataAnnotations{
|
||||
Mailbox: "",
|
||||
Annotations: []imapclient.Annotation{
|
||||
{Key: "/private/comment", IsString: true, Value: []byte("global updated")},
|
||||
},
|
||||
})
|
||||
tc.transactf("ok", `getmetadata inbox (/private/comment)`)
|
||||
tc.xuntagged(imapclient.UntaggedMetadataAnnotations{
|
||||
Mailbox: "Inbox",
|
||||
Annotations: []imapclient.Annotation{
|
||||
{Key: "/private/comment", IsString: true, Value: []byte("mailbox updated")},
|
||||
},
|
||||
})
|
||||
|
||||
// Delete annotation with nil value.
|
||||
tc.transactf("ok", `setmetadata "" (/private/comment nil)`)
|
||||
tc.transactf("ok", `setmetadata inbox (/private/comment nil)`)
|
||||
tc.transactf("ok", `getmetadata "" (/private/comment)`)
|
||||
tc.xuntagged()
|
||||
tc.transactf("ok", `getmetadata inbox (/private/comment)`)
|
||||
tc.xuntagged()
|
||||
|
||||
// Create a literal8 value, not a string.
|
||||
tc.transactf("ok", "setmetadata inbox (/private/comment ~{4+}\r\ntest)")
|
||||
tc.transactf("ok", `getmetadata inbox (/private/comment)`)
|
||||
tc.xuntagged(imapclient.UntaggedMetadataAnnotations{
|
||||
Mailbox: "Inbox",
|
||||
Annotations: []imapclient.Annotation{
|
||||
{Key: "/private/comment", IsString: false, Value: []byte("test")},
|
||||
},
|
||||
})
|
||||
|
||||
// Request with a maximum size, we don't get anything larger.
|
||||
tc.transactf("ok", `setmetadata inbox (/private/another "longer")`)
|
||||
tc.transactf("ok", `getmetadata (maxsize 4) inbox (/private/comment /private/another)`)
|
||||
tc.xcode(imapclient.CodeMetadataLongEntries(6))
|
||||
tc.xuntagged(imapclient.UntaggedMetadataAnnotations{
|
||||
Mailbox: "Inbox",
|
||||
Annotations: []imapclient.Annotation{
|
||||
{Key: "/private/comment", IsString: false, Value: []byte("test")},
|
||||
},
|
||||
})
|
||||
|
||||
// Request with various depth values.
|
||||
tc.transactf("ok", `setmetadata inbox (/private/a "x" /private/a/b "x" /private/a/b/c "x" /private/a/b/c/d "x")`)
|
||||
tc.transactf("ok", `getmetadata (depth 0) inbox (/private/a)`)
|
||||
tc.xuntagged(imapclient.UntaggedMetadataAnnotations{
|
||||
Mailbox: "Inbox",
|
||||
Annotations: []imapclient.Annotation{
|
||||
{Key: "/private/a", IsString: true, Value: []byte("x")},
|
||||
},
|
||||
})
|
||||
tc.transactf("ok", `getmetadata (depth 1) inbox (/private/a)`)
|
||||
tc.xuntagged(imapclient.UntaggedMetadataAnnotations{
|
||||
Mailbox: "Inbox",
|
||||
Annotations: []imapclient.Annotation{
|
||||
{Key: "/private/a", IsString: true, Value: []byte("x")},
|
||||
{Key: "/private/a/b", IsString: true, Value: []byte("x")},
|
||||
},
|
||||
})
|
||||
tc.transactf("ok", `getmetadata (depth infinity) inbox (/private/a)`)
|
||||
tc.xuntagged(imapclient.UntaggedMetadataAnnotations{
|
||||
Mailbox: "Inbox",
|
||||
Annotations: []imapclient.Annotation{
|
||||
{Key: "/private/a", IsString: true, Value: []byte("x")},
|
||||
{Key: "/private/a/b", IsString: true, Value: []byte("x")},
|
||||
{Key: "/private/a/b/c", IsString: true, Value: []byte("x")},
|
||||
{Key: "/private/a/b/c/d", IsString: true, Value: []byte("x")},
|
||||
},
|
||||
})
|
||||
// Same as previous, but ask for everything below /.
|
||||
tc.transactf("ok", `getmetadata (depth infinity) inbox ("")`)
|
||||
tc.xuntagged(imapclient.UntaggedMetadataAnnotations{
|
||||
Mailbox: "Inbox",
|
||||
Annotations: []imapclient.Annotation{
|
||||
{Key: "/private/a", IsString: true, Value: []byte("x")},
|
||||
{Key: "/private/a/b", IsString: true, Value: []byte("x")},
|
||||
{Key: "/private/a/b/c", IsString: true, Value: []byte("x")},
|
||||
{Key: "/private/a/b/c/d", IsString: true, Value: []byte("x")},
|
||||
{Key: "/private/another", IsString: true, Value: []byte("longer")},
|
||||
{Key: "/private/comment", IsString: false, Value: []byte("test")},
|
||||
{Key: "/private/vendor/a/b", IsString: true, Value: []byte("")},
|
||||
{Key: "/shared/comment", IsString: true, Value: []byte("share")},
|
||||
},
|
||||
})
|
||||
|
||||
// Deleting a mailbox with an annotation should work and annotations should not
|
||||
// come back when recreating mailbox.
|
||||
tc.transactf("ok", "create testbox")
|
||||
tc.transactf("ok", `setmetadata testbox (/private/a "x")`)
|
||||
tc.transactf("ok", "delete testbox")
|
||||
tc.transactf("ok", "create testbox")
|
||||
tc.transactf("ok", `getmetadata testbox (/private/a)`)
|
||||
tc.xuntagged()
|
||||
|
||||
// When renaming mailbox, annotations must be copied to destination mailbox.
|
||||
tc.transactf("ok", "rename inbox newbox")
|
||||
tc.transactf("ok", `getmetadata newbox (/private/a)`)
|
||||
tc.xuntagged(imapclient.UntaggedMetadataAnnotations{
|
||||
Mailbox: "newbox",
|
||||
Annotations: []imapclient.Annotation{
|
||||
{Key: "/private/a", IsString: true, Value: []byte("x")},
|
||||
},
|
||||
})
|
||||
tc.transactf("ok", `getmetadata inbox (/private/a)`)
|
||||
tc.xuntagged(imapclient.UntaggedMetadataAnnotations{
|
||||
Mailbox: "Inbox",
|
||||
Annotations: []imapclient.Annotation{
|
||||
{Key: "/private/a", IsString: true, Value: []byte("x")},
|
||||
},
|
||||
})
|
||||
|
||||
// Broadcast should not happen when metadata capability is not enabled.
|
||||
tc2 := startNoSwitchboard(t, uidonly)
|
||||
defer tc2.closeNoWait()
|
||||
tc2.login("mjl@mox.example", password0)
|
||||
tc2.client.Select("inbox")
|
||||
|
||||
tc2.cmdf("", "idle")
|
||||
tc2.readprefixline("+ ")
|
||||
done := make(chan error)
|
||||
go func() {
|
||||
defer func() {
|
||||
x := recover()
|
||||
if x != nil {
|
||||
done <- fmt.Errorf("%v", x)
|
||||
}
|
||||
}()
|
||||
untagged, _ := tc2.client.ReadUntagged()
|
||||
var exists imapclient.UntaggedExists
|
||||
tuntagged(tc2.t, untagged, &exists)
|
||||
tc2.writelinef("done")
|
||||
tc2.response("ok")
|
||||
done <- nil
|
||||
}()
|
||||
|
||||
// Should not cause idle to return.
|
||||
tc.transactf("ok", `setmetadata inbox (/private/a "y")`)
|
||||
// Cause to return.
|
||||
tc.transactf("ok", "append inbox {4+}\r\ntest")
|
||||
|
||||
timer := time.NewTimer(time.Second)
|
||||
defer timer.Stop()
|
||||
select {
|
||||
case err := <-done:
|
||||
tc.check(err, "idle")
|
||||
case <-timer.C:
|
||||
t.Fatalf("idle did not finish")
|
||||
}
|
||||
|
||||
// Broadcast should happen when metadata capability is enabled.
|
||||
tc2.client.Enable(imapclient.CapMetadata)
|
||||
tc2.cmdf("", "idle")
|
||||
tc2.readprefixline("+ ")
|
||||
done = make(chan error)
|
||||
go func() {
|
||||
defer func() {
|
||||
x := recover()
|
||||
if x != nil {
|
||||
done <- fmt.Errorf("%v", x)
|
||||
}
|
||||
}()
|
||||
untagged, _ := tc2.client.ReadUntagged()
|
||||
var metadataKeys imapclient.UntaggedMetadataKeys
|
||||
tuntagged(tc2.t, untagged, &metadataKeys)
|
||||
tc2.writelinef("done")
|
||||
tc2.response("ok")
|
||||
done <- nil
|
||||
}()
|
||||
|
||||
// Should cause idle to return.
|
||||
tc.transactf("ok", `setmetadata inbox (/private/a "z")`)
|
||||
|
||||
timer = time.NewTimer(time.Second)
|
||||
defer timer.Stop()
|
||||
select {
|
||||
case err := <-done:
|
||||
tc.check(err, "idle")
|
||||
case <-timer.C:
|
||||
t.Fatalf("idle did not finish")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetadataLimit(t *testing.T) {
|
||||
tc := start(t, false)
|
||||
defer tc.close()
|
||||
|
||||
tc.login("mjl@mox.example", password0)
|
||||
|
||||
maxKeys, maxSize := metadataMaxKeys, metadataMaxSize
|
||||
defer func() {
|
||||
metadataMaxKeys = maxKeys
|
||||
metadataMaxSize = maxSize
|
||||
}()
|
||||
metadataMaxKeys = 10
|
||||
metadataMaxSize = 1000
|
||||
|
||||
// Reach max total size limit.
|
||||
buf := make([]byte, metadataMaxSize+1)
|
||||
for i := range buf {
|
||||
buf[i] = 'x'
|
||||
}
|
||||
tc.cmdf("", "setmetadata inbox (/private/large ~{%d+}", len(buf))
|
||||
tc.client.Write(buf)
|
||||
tc.client.Writelinef(")")
|
||||
tc.response("no")
|
||||
tc.xcode(imapclient.CodeMetadataMaxSize(metadataMaxSize))
|
||||
|
||||
// Reach limit for max number.
|
||||
for i := 1; i <= metadataMaxKeys; i++ {
|
||||
tc.transactf("ok", `setmetadata inbox (/private/key%d "test")`, i)
|
||||
}
|
||||
tc.transactf("no", `setmetadata inbox (/private/toomany "test")`)
|
||||
tc.xcode(imapclient.CodeMetadataTooMany{})
|
||||
}
|
@ -7,31 +7,23 @@ import (
|
||||
)
|
||||
|
||||
func TestMove(t *testing.T) {
|
||||
testMove(t, false)
|
||||
}
|
||||
|
||||
func TestMoveUIDOnly(t *testing.T) {
|
||||
testMove(t, true)
|
||||
}
|
||||
|
||||
func testMove(t *testing.T, uidonly bool) {
|
||||
defer mockUIDValidity()()
|
||||
tc := start(t, uidonly)
|
||||
tc := start(t)
|
||||
defer tc.close()
|
||||
|
||||
tc2 := startNoSwitchboard(t, uidonly)
|
||||
defer tc2.closeNoWait()
|
||||
tc2 := startNoSwitchboard(t)
|
||||
defer tc2.close()
|
||||
|
||||
tc3 := startNoSwitchboard(t, uidonly)
|
||||
defer tc3.closeNoWait()
|
||||
tc3 := startNoSwitchboard(t)
|
||||
defer tc3.close()
|
||||
|
||||
tc.login("mjl@mox.example", password0)
|
||||
tc.client.Login("mjl@mox.example", password0)
|
||||
tc.client.Select("inbox")
|
||||
|
||||
tc2.login("mjl@mox.example", password0)
|
||||
tc2.client.Login("mjl@mox.example", password0)
|
||||
tc2.client.Select("Trash")
|
||||
|
||||
tc3.login("mjl@mox.example", password0)
|
||||
tc3.client.Login("mjl@mox.example", password0)
|
||||
tc3.client.Select("inbox")
|
||||
|
||||
tc.transactf("bad", "move") // Missing params.
|
||||
@ -39,79 +31,62 @@ func testMove(t *testing.T, uidonly bool) {
|
||||
tc.transactf("bad", "move 1 inbox ") // Leftover.
|
||||
|
||||
// Seqs 1,2 and UIDs 3,4.
|
||||
tc.client.Append("inbox", makeAppend(exampleMsg))
|
||||
tc.client.Append("inbox", makeAppend(exampleMsg))
|
||||
tc.client.UIDStoreFlagsSet("1:2", true, `\Deleted`)
|
||||
tc.client.Append("inbox", nil, nil, []byte(exampleMsg))
|
||||
tc.client.Append("inbox", nil, nil, []byte(exampleMsg))
|
||||
tc.client.StoreFlagsSet("1:2", true, `\Deleted`)
|
||||
tc.client.Expunge()
|
||||
tc.client.Append("inbox", makeAppend(exampleMsg))
|
||||
tc.client.Append("inbox", makeAppend(exampleMsg))
|
||||
tc.client.Append("inbox", nil, nil, []byte(exampleMsg))
|
||||
tc.client.Append("inbox", nil, nil, []byte(exampleMsg))
|
||||
|
||||
if uidonly {
|
||||
tc.transactf("ok", "uid move 1:* Trash")
|
||||
} else {
|
||||
tc.client.Unselect()
|
||||
tc.client.Examine("inbox")
|
||||
tc.transactf("no", "move 1 Trash") // Opened readonly.
|
||||
tc.client.Unselect()
|
||||
tc.client.Select("inbox")
|
||||
tc.client.Unselect()
|
||||
tc.client.Examine("inbox")
|
||||
tc.transactf("no", "move 1 Trash") // Opened readonly.
|
||||
tc.client.Unselect()
|
||||
tc.client.Select("inbox")
|
||||
|
||||
tc.transactf("no", "move 1 nonexistent")
|
||||
tc.xcodeWord("TRYCREATE")
|
||||
tc.transactf("no", "move 1 nonexistent")
|
||||
tc.xcode("TRYCREATE")
|
||||
|
||||
tc.transactf("no", "move 1 expungebox")
|
||||
tc.xcodeWord("TRYCREATE")
|
||||
tc.transactf("no", "move 1 inbox") // Cannot move to same mailbox.
|
||||
|
||||
tc.transactf("no", "move 1 inbox") // Cannot move to same mailbox.
|
||||
tc2.transactf("ok", "noop") // Drain.
|
||||
tc3.transactf("ok", "noop") // Drain.
|
||||
|
||||
tc2.transactf("ok", "noop") // Drain.
|
||||
tc3.transactf("ok", "noop") // Drain.
|
||||
|
||||
tc.transactf("ok", "move 1:* Trash")
|
||||
tc.xuntagged(
|
||||
imapclient.UntaggedResult{Status: "OK", Code: imapclient.CodeCopyUID{DestUIDValidity: 1, From: []imapclient.NumRange{{First: 3, Last: uint32ptr(4)}}, To: []imapclient.NumRange{{First: 1, Last: uint32ptr(2)}}}, Text: "moved"},
|
||||
imapclient.UntaggedExpunge(1),
|
||||
imapclient.UntaggedExpunge(1),
|
||||
)
|
||||
tc2.transactf("ok", "noop")
|
||||
tc2.xuntagged(
|
||||
imapclient.UntaggedExists(2),
|
||||
tc.untaggedFetch(1, 1, imapclient.FetchFlags(nil)),
|
||||
tc.untaggedFetch(2, 2, imapclient.FetchFlags(nil)),
|
||||
)
|
||||
tc3.transactf("ok", "noop")
|
||||
tc3.xuntagged(imapclient.UntaggedExpunge(1), imapclient.UntaggedExpunge(1))
|
||||
}
|
||||
tc.transactf("ok", "move 1:* Trash")
|
||||
ptr := func(v uint32) *uint32 { return &v }
|
||||
tc.xuntagged(
|
||||
imapclient.UntaggedResult{Status: "OK", RespText: imapclient.RespText{Code: "COPYUID", CodeArg: imapclient.CodeCopyUID{DestUIDValidity: 1, From: []imapclient.NumRange{{First: 3, Last: ptr(4)}}, To: []imapclient.NumRange{{First: 1, Last: ptr(2)}}}, More: "moved"}},
|
||||
imapclient.UntaggedExpunge(1),
|
||||
imapclient.UntaggedExpunge(1),
|
||||
)
|
||||
tc2.transactf("ok", "noop")
|
||||
tc2.xuntagged(
|
||||
imapclient.UntaggedExists(2),
|
||||
imapclient.UntaggedFetch{Seq: 1, Attrs: []imapclient.FetchAttr{imapclient.FetchUID(1), imapclient.FetchFlags(nil)}},
|
||||
imapclient.UntaggedFetch{Seq: 2, Attrs: []imapclient.FetchAttr{imapclient.FetchUID(2), imapclient.FetchFlags(nil)}},
|
||||
)
|
||||
tc3.transactf("ok", "noop")
|
||||
tc3.xuntagged(imapclient.UntaggedExpunge(1), imapclient.UntaggedExpunge(1))
|
||||
|
||||
// UIDs 5,6
|
||||
tc.client.Append("inbox", makeAppend(exampleMsg))
|
||||
tc.client.Append("inbox", makeAppend(exampleMsg))
|
||||
tc.client.Append("inbox", nil, nil, []byte(exampleMsg))
|
||||
tc.client.Append("inbox", nil, nil, []byte(exampleMsg))
|
||||
tc2.transactf("ok", "noop") // Drain.
|
||||
tc3.transactf("ok", "noop") // Drain.
|
||||
|
||||
tc.transactf("no", "uid move 1:4 Trash") // No match.
|
||||
tc.transactf("ok", "uid move 6:5 Trash")
|
||||
if uidonly {
|
||||
tc.xuntagged(
|
||||
imapclient.UntaggedResult{Status: "OK", Code: imapclient.CodeCopyUID{DestUIDValidity: 1, From: []imapclient.NumRange{{First: 5, Last: uint32ptr(6)}}, To: []imapclient.NumRange{{First: 3, Last: uint32ptr(4)}}}, Text: "moved"},
|
||||
imapclient.UntaggedVanished{UIDs: xparseNumSet("5:6")},
|
||||
)
|
||||
} else {
|
||||
tc.xuntagged(
|
||||
imapclient.UntaggedResult{Status: "OK", Code: imapclient.CodeCopyUID{DestUIDValidity: 1, From: []imapclient.NumRange{{First: 5, Last: uint32ptr(6)}}, To: []imapclient.NumRange{{First: 3, Last: uint32ptr(4)}}}, Text: "moved"},
|
||||
imapclient.UntaggedExpunge(1),
|
||||
imapclient.UntaggedExpunge(1),
|
||||
)
|
||||
}
|
||||
tc.xuntagged(
|
||||
imapclient.UntaggedResult{Status: "OK", RespText: imapclient.RespText{Code: "COPYUID", CodeArg: imapclient.CodeCopyUID{DestUIDValidity: 1, From: []imapclient.NumRange{{First: 5, Last: ptr(6)}}, To: []imapclient.NumRange{{First: 3, Last: ptr(4)}}}, More: "moved"}},
|
||||
imapclient.UntaggedExpunge(1),
|
||||
imapclient.UntaggedExpunge(1),
|
||||
)
|
||||
tc2.transactf("ok", "noop")
|
||||
tc2.xuntagged(
|
||||
imapclient.UntaggedExists(4),
|
||||
tc2.untaggedFetch(3, 3, imapclient.FetchFlags(nil)),
|
||||
tc2.untaggedFetch(4, 4, imapclient.FetchFlags(nil)),
|
||||
imapclient.UntaggedFetch{Seq: 3, Attrs: []imapclient.FetchAttr{imapclient.FetchUID(3), imapclient.FetchFlags(nil)}},
|
||||
imapclient.UntaggedFetch{Seq: 4, Attrs: []imapclient.FetchAttr{imapclient.FetchUID(4), imapclient.FetchFlags(nil)}},
|
||||
)
|
||||
tc3.transactf("ok", "noop")
|
||||
if uidonly {
|
||||
tc3.xuntagged(imapclient.UntaggedVanished{UIDs: xparseNumSet("5:6")})
|
||||
} else {
|
||||
tc3.xuntagged(imapclient.UntaggedExpunge(1), imapclient.UntaggedExpunge(1))
|
||||
}
|
||||
tc3.xuntagged(imapclient.UntaggedExpunge(1), imapclient.UntaggedExpunge(1))
|
||||
}
|
||||
|
@ -1,329 +0,0 @@
|
||||
package imapserver
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/mjl-/bstore"
|
||||
|
||||
"github.com/mjl-/mox/store"
|
||||
)
|
||||
|
||||
// Max number of pending changes for selected-delayed mailbox before we write a
|
||||
// NOTIFICATIONOVERFLOW message, flush changes and stop gathering more changes.
|
||||
// Changed during tests.
|
||||
var selectedDelayedChangesMax = 1000
|
||||
|
||||
// notify represents a configuration as passed to the notify command.
|
||||
type notify struct {
|
||||
// "NOTIFY NONE" results in an empty list, matching no events.
|
||||
EventGroups []eventGroup
|
||||
|
||||
// Changes for the selected mailbox in case of SELECTED-DELAYED, when we don't send
|
||||
// events asynchrously. These must still be processed later on for their
|
||||
// ChangeRemoveUIDs, to erase expunged message files. At the end of a command (e.g.
|
||||
// NOOP) or immediately upon IDLE we will send untagged responses for these
|
||||
// changes. If the connection breaks, we still process the ChangeRemoveUIDs.
|
||||
Delayed []store.Change
|
||||
}
|
||||
|
||||
// match checks if an event for a mailbox id/name (optional depending on type)
|
||||
// should be turned into a notification to the client.
|
||||
func (n notify) match(c *conn, xtxfn func() *bstore.Tx, mailboxID int64, mailbox string, kind eventKind) (mailboxSpecifier, notifyEvent, bool) {
|
||||
// We look through the event groups, and won't stop looking until we've found a
|
||||
// confirmation the event should be notified. ../rfc/5465:756
|
||||
|
||||
// Non-message-related events are only matched by non-"selected" mailbox
|
||||
// specifiers. ../rfc/5465:268
|
||||
// If you read the mailboxes matching paragraph in isolation, you would think only
|
||||
// "SELECTED" and "SELECTED-DELAYED" can match events for the selected mailbox. But
|
||||
// a few other places hint that that only applies to message events, not to mailbox
|
||||
// events, such as subscriptions and mailbox metadata changes. With a strict
|
||||
// interpretation, clients couldn't request notifications for such events for the
|
||||
// selection mailbox. ../rfc/5465:752
|
||||
|
||||
for _, eg := range n.EventGroups {
|
||||
switch eg.MailboxSpecifier.Kind {
|
||||
case mbspecSelected, mbspecSelectedDelayed: // ../rfc/5465:800
|
||||
if mailboxID != c.mailboxID || !slices.Contains(messageEventKinds, kind) {
|
||||
continue
|
||||
}
|
||||
for _, ev := range eg.Events {
|
||||
if eventKind(ev.Kind) == kind {
|
||||
return eg.MailboxSpecifier, ev, true
|
||||
}
|
||||
}
|
||||
// We can only have a single selected for notify, so no point in continuing the search.
|
||||
return mailboxSpecifier{}, notifyEvent{}, false
|
||||
|
||||
default:
|
||||
// The selected mailbox can only match for non-message events for specifiers other
|
||||
// than "selected"/"selected-delayed".
|
||||
if c.mailboxID == mailboxID && slices.Contains(messageEventKinds, kind) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
var match bool
|
||||
Match:
|
||||
switch eg.MailboxSpecifier.Kind {
|
||||
case mbspecPersonal: // ../rfc/5465:817
|
||||
match = true
|
||||
|
||||
case mbspecInboxes: // ../rfc/5465:822
|
||||
if mailbox == "Inbox" || strings.HasPrefix(mailbox, "Inbox/") {
|
||||
match = true
|
||||
break Match
|
||||
}
|
||||
|
||||
if mailbox == "" {
|
||||
break Match
|
||||
}
|
||||
|
||||
// Include mailboxes we may deliver to based on destinations, or based on rulesets,
|
||||
// not including deliveries for mailing lists.
|
||||
conf, _ := c.account.Conf()
|
||||
for _, dest := range conf.Destinations {
|
||||
if dest.Mailbox == mailbox {
|
||||
match = true
|
||||
break Match
|
||||
}
|
||||
|
||||
for _, rs := range dest.Rulesets {
|
||||
if rs.ListAllowDomain == "" && rs.Mailbox == mailbox {
|
||||
match = true
|
||||
break Match
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
case mbspecSubscribed: // ../rfc/5465:831
|
||||
sub := store.Subscription{Name: mailbox}
|
||||
err := xtxfn().Get(&sub)
|
||||
if err != bstore.ErrAbsent {
|
||||
xcheckf(err, "lookup subscription")
|
||||
}
|
||||
match = err == nil
|
||||
|
||||
case mbspecSubtree: // ../rfc/5465:847
|
||||
for _, name := range eg.MailboxSpecifier.Mailboxes {
|
||||
if mailbox == name || strings.HasPrefix(mailbox, name+"/") {
|
||||
match = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
case mbspecSubtreeOne: // ../rfc/7377:274
|
||||
ntoken := len(strings.Split(mailbox, "/"))
|
||||
for _, name := range eg.MailboxSpecifier.Mailboxes {
|
||||
if mailbox == name || (strings.HasPrefix(mailbox, name+"/") && len(strings.Split(name, "/"))+1 == ntoken) {
|
||||
match = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
case mbspecMailboxes: // ../rfc/5465:853
|
||||
match = slices.Contains(eg.MailboxSpecifier.Mailboxes, mailbox)
|
||||
|
||||
default:
|
||||
panic("missing case for " + string(eg.MailboxSpecifier.Kind))
|
||||
}
|
||||
|
||||
if !match {
|
||||
continue
|
||||
}
|
||||
|
||||
// NONE is the signal we shouldn't return events for this mailbox. ../rfc/5465:455
|
||||
if len(eg.Events) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
// If event kind matches, we will be notifying about this change. If not, we'll
|
||||
// look again at next mailbox specifiers.
|
||||
for _, ev := range eg.Events {
|
||||
if eventKind(ev.Kind) == kind {
|
||||
return eg.MailboxSpecifier, ev, true
|
||||
}
|
||||
}
|
||||
}
|
||||
return mailboxSpecifier{}, notifyEvent{}, false
|
||||
}
|
||||
|
||||
// Notify enables continuous notifications from the server to the client, without
|
||||
// the client issuing an IDLE command. The mailboxes and events to notify about are
|
||||
// specified in the account. When notify is enabled, instead of being blocked
|
||||
// waiting for a command from the client, we also wait for events from the account,
|
||||
// and send events about it.
|
||||
//
|
||||
// State: Authenticated and selected.
|
||||
func (c *conn) cmdNotify(tag, cmd string, p *parser) {
|
||||
// Command: ../rfc/5465:203
|
||||
// Request syntax: ../rfc/5465:923
|
||||
|
||||
p.xspace()
|
||||
|
||||
// NONE indicates client doesn't want any events, also not the "normal" events
|
||||
// without notify. ../rfc/5465:234
|
||||
// ../rfc/5465:930
|
||||
if p.take("NONE") {
|
||||
p.xempty()
|
||||
|
||||
// If we have delayed changes for the selected mailbox, we are no longer going to
|
||||
// notify about them. The client can't know anymore whether messages still exist,
|
||||
// and trying to read them can cause errors if the messages have been expunged and
|
||||
// erased.
|
||||
var changes []store.Change
|
||||
if c.notify != nil {
|
||||
changes = c.notify.Delayed
|
||||
}
|
||||
c.notify = ¬ify{}
|
||||
c.flushChanges(changes)
|
||||
|
||||
c.ok(tag, cmd)
|
||||
return
|
||||
}
|
||||
|
||||
var n notify
|
||||
var status bool
|
||||
|
||||
// ../rfc/5465:926
|
||||
p.xtake("SET")
|
||||
p.xspace()
|
||||
if p.take("STATUS") {
|
||||
status = true
|
||||
p.xspace()
|
||||
}
|
||||
for {
|
||||
eg := p.xeventGroup()
|
||||
n.EventGroups = append(n.EventGroups, eg)
|
||||
if !p.space() {
|
||||
break
|
||||
}
|
||||
}
|
||||
p.xempty()
|
||||
|
||||
for _, eg := range n.EventGroups {
|
||||
var hasNew, hasExpunge, hasFlag, hasAnnotation bool
|
||||
for _, ev := range eg.Events {
|
||||
switch eventKind(ev.Kind) {
|
||||
case eventMessageNew:
|
||||
hasNew = true
|
||||
case eventMessageExpunge:
|
||||
hasExpunge = true
|
||||
case eventFlagChange:
|
||||
hasFlag = true
|
||||
case eventMailboxName, eventSubscriptionChange, eventMailboxMetadataChange, eventServerMetadataChange:
|
||||
// Nothing special.
|
||||
default: // Including eventAnnotationChange.
|
||||
hasAnnotation = true // Ineffective, we don't implement message annotations yet.
|
||||
// Result must be NO instead of BAD, and we must include BADEVENT and the events we
|
||||
// support. ../rfc/5465:343
|
||||
// ../rfc/5465:1033
|
||||
xusercodeErrorf("BADEVENT (MessageNew MessageExpunge FlagChange MailboxName SubscriptionChange MailboxMetadataChange ServerMetadataChange)", "unimplemented event %s", ev.Kind)
|
||||
}
|
||||
}
|
||||
if hasNew != hasExpunge {
|
||||
// ../rfc/5465:443 ../rfc/5465:987
|
||||
xsyntaxErrorf("MessageNew and MessageExpunge must be specified together")
|
||||
}
|
||||
if (hasFlag || hasAnnotation) && !hasNew {
|
||||
// ../rfc/5465:439
|
||||
xsyntaxErrorf("FlagChange and/or AnnotationChange requires MessageNew and MessageExpunge")
|
||||
}
|
||||
}
|
||||
|
||||
for _, eg := range n.EventGroups {
|
||||
for i, name := range eg.MailboxSpecifier.Mailboxes {
|
||||
eg.MailboxSpecifier.Mailboxes[i] = xcheckmailboxname(name, true)
|
||||
}
|
||||
}
|
||||
|
||||
// Only one selected/selected-delay mailbox filter is allowed. ../rfc/5465:779
|
||||
// Only message events are allowed for selected/selected-delayed. ../rfc/5465:796
|
||||
var haveSelected bool
|
||||
for _, eg := range n.EventGroups {
|
||||
switch eg.MailboxSpecifier.Kind {
|
||||
case mbspecSelected, mbspecSelectedDelayed:
|
||||
if haveSelected {
|
||||
xsyntaxErrorf("cannot have multiple selected/selected-delayed mailbox filters")
|
||||
}
|
||||
haveSelected = true
|
||||
|
||||
// Only events from message-event are allowed with selected mailbox specifiers.
|
||||
// ../rfc/5465:977
|
||||
for _, ev := range eg.Events {
|
||||
if !slices.Contains(messageEventKinds, eventKind(ev.Kind)) {
|
||||
xsyntaxErrorf("selected/selected-delayed is only allowed with message events, not %s", ev.Kind)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We must apply any changes for delayed select. ../rfc/5465:248
|
||||
if c.notify != nil {
|
||||
delayed := c.notify.Delayed
|
||||
c.notify.Delayed = nil
|
||||
c.xapplyChangesNotify(delayed, true)
|
||||
}
|
||||
|
||||
if status {
|
||||
var statuses []string
|
||||
|
||||
// Flush new pending changes before we read the current state from the database.
|
||||
// Don't allow any concurrent changes for a consistent snapshot.
|
||||
c.account.WithRLock(func() {
|
||||
select {
|
||||
case <-c.comm.Pending:
|
||||
overflow, changes := c.comm.Get()
|
||||
c.xapplyChanges(overflow, changes, true)
|
||||
default:
|
||||
}
|
||||
|
||||
c.xdbread(func(tx *bstore.Tx) {
|
||||
// Send STATUS responses for all matching mailboxes. ../rfc/5465:271
|
||||
q := bstore.QueryTx[store.Mailbox](tx)
|
||||
q.FilterEqual("Expunged", false)
|
||||
q.SortAsc("Name")
|
||||
for mb, err := range q.All() {
|
||||
xcheckf(err, "list mailboxes for status")
|
||||
|
||||
if mb.ID == c.mailboxID {
|
||||
continue
|
||||
}
|
||||
_, _, ok := n.match(c, func() *bstore.Tx { return tx }, mb.ID, mb.Name, eventMessageNew)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
list := listspace{
|
||||
bare("MESSAGES"), number(mb.MessageCountIMAP()),
|
||||
bare("UIDNEXT"), number(mb.UIDNext),
|
||||
bare("UIDVALIDITY"), number(mb.UIDValidity),
|
||||
// Unseen is not mentioned for STATUS, but clients are able to parse it due to
|
||||
// FlagChange, and it will be useful to have.
|
||||
bare("UNSEEN"), number(mb.MailboxCounts.Unseen),
|
||||
}
|
||||
if c.enabled[capCondstore] || c.enabled[capQresync] {
|
||||
list = append(list, bare("HIGHESTMODSEQ"), number(mb.ModSeq))
|
||||
}
|
||||
|
||||
status := fmt.Sprintf("* STATUS %s %s", mailboxt(mb.Name).pack(c), list.pack(c))
|
||||
statuses = append(statuses, status)
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
// Write outside of db transaction and lock.
|
||||
for _, s := range statuses {
|
||||
c.xbwritelinef("%s", s)
|
||||
}
|
||||
}
|
||||
|
||||
// We replace the previous notify config. ../rfc/5465:245
|
||||
c.notify = &n
|
||||
|
||||
// Writing OK will flush any other pending changes for the account according to the
|
||||
// new filters.
|
||||
c.ok(tag, cmd)
|
||||
}
|
@ -1,516 +0,0 @@
|
||||
package imapserver
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/mjl-/mox/imapclient"
|
||||
"github.com/mjl-/mox/store"
|
||||
)
|
||||
|
||||
func TestNotify(t *testing.T) {
|
||||
testNotify(t, false)
|
||||
}
|
||||
|
||||
func TestNotifyUIDOnly(t *testing.T) {
|
||||
testNotify(t, true)
|
||||
}
|
||||
|
||||
func testNotify(t *testing.T, uidonly bool) {
|
||||
defer mockUIDValidity()()
|
||||
tc := start(t, uidonly)
|
||||
defer tc.close()
|
||||
tc.login("mjl@mox.example", password0)
|
||||
tc.client.Select("inbox")
|
||||
|
||||
// Check for some invalid syntax.
|
||||
tc.transactf("bad", "Notify")
|
||||
tc.transactf("bad", "Notify bogus")
|
||||
tc.transactf("bad", "Notify None ") // Trailing space.
|
||||
tc.transactf("bad", "Notify Set")
|
||||
tc.transactf("bad", "Notify Set ")
|
||||
tc.transactf("bad", "Notify Set Status")
|
||||
tc.transactf("bad", "Notify Set Status ()") // Empty list.
|
||||
tc.transactf("bad", "Notify Set Status (UnknownSpecifier (messageNew))")
|
||||
tc.transactf("bad", "Notify Set Status (Personal messageNew)") // Missing list around events.
|
||||
tc.transactf("bad", "Notify Set Status (Personal (messageNew) )") // Trailing space.
|
||||
tc.transactf("bad", "Notify Set Status (Personal (messageNew)) ") // Trailing space.
|
||||
|
||||
tc.transactf("bad", "Notify Set Status (Selected (mailboxName))") // MailboxName not allowed on Selected.
|
||||
tc.transactf("bad", "Notify Set Status (Selected (messageNew))") // MessageNew must come with MessageExpunge.
|
||||
tc.transactf("bad", "Notify Set Status (Selected (flagChange))") // flagChange must come with MessageNew and MessageExpunge.
|
||||
tc.transactf("bad", "Notify Set Status (Selected (mailboxName)) (Selected-Delayed (mailboxName))") // Duplicate selected.
|
||||
tc.transactf("no", "Notify Set Status (Selected (annotationChange))") // We don't implement annotation change.
|
||||
tc.xcode(imapclient.CodeBadEvent{"MessageNew", "MessageExpunge", "FlagChange", "MailboxName", "SubscriptionChange", "MailboxMetadataChange", "ServerMetadataChange"})
|
||||
tc.transactf("no", "Notify Set Status (Personal (unknownEvent))")
|
||||
tc.xcode(imapclient.CodeBadEvent{"MessageNew", "MessageExpunge", "FlagChange", "MailboxName", "SubscriptionChange", "MailboxMetadataChange", "ServerMetadataChange"})
|
||||
|
||||
tc2 := startNoSwitchboard(t, uidonly)
|
||||
defer tc2.closeNoWait()
|
||||
tc2.login("mjl@mox.example", password0)
|
||||
tc2.client.Select("inbox")
|
||||
|
||||
var modseq uint32 = 4
|
||||
|
||||
// Check that we don't get pending changes when we set "notify none". We first make
|
||||
// changes that we drain with noop. Then add new pending changes and execute
|
||||
// "notify none". Server should still process changes to the message sequence
|
||||
// numbers of the selected mailbox.
|
||||
tc2.client.Append("inbox", makeAppend(searchMsg)) // Results in exists and fetch.
|
||||
modseq++
|
||||
tc2.client.Append("Junk", makeAppend(searchMsg)) // Not selected, not mentioned.
|
||||
modseq++
|
||||
tc.transactf("ok", "noop")
|
||||
tc.xuntagged(
|
||||
imapclient.UntaggedExists(1),
|
||||
tc.untaggedFetch(1, 1, imapclient.FetchFlags(nil)),
|
||||
)
|
||||
tc2.client.UIDStoreFlagsAdd("1:*", true, `\Deleted`)
|
||||
modseq++
|
||||
tc2.client.Expunge()
|
||||
modseq++
|
||||
tc.transactf("ok", "Notify None")
|
||||
tc.xuntagged() // No untagged responses for delete/expunge.
|
||||
|
||||
// Enable notify, will first result in a the pending changes, then status.
|
||||
tc.transactf("ok", "Notify Set Status (Selected (messageNew (Uid Modseq Bodystructure Preview) messageExpunge flagChange)) (personal (messageNew messageExpunge flagChange mailboxName subscriptionChange mailboxMetadataChange serverMetadataChange))")
|
||||
tc.xuntagged(
|
||||
imapclient.UntaggedResult{Status: imapclient.OK, Code: imapclient.CodeHighestModSeq(modseq), Text: "after condstore-enabling command"},
|
||||
// note: no status for Inbox since it is selected.
|
||||
imapclient.UntaggedStatus{Mailbox: "Drafts", Attrs: map[imapclient.StatusAttr]int64{imapclient.StatusMessages: 0, imapclient.StatusUIDNext: 1, imapclient.StatusUIDValidity: 1, imapclient.StatusUnseen: 0, imapclient.StatusHighestModSeq: 2}},
|
||||
imapclient.UntaggedStatus{Mailbox: "Sent", Attrs: map[imapclient.StatusAttr]int64{imapclient.StatusMessages: 0, imapclient.StatusUIDNext: 1, imapclient.StatusUIDValidity: 1, imapclient.StatusUnseen: 0, imapclient.StatusHighestModSeq: 2}},
|
||||
imapclient.UntaggedStatus{Mailbox: "Archive", Attrs: map[imapclient.StatusAttr]int64{imapclient.StatusMessages: 0, imapclient.StatusUIDNext: 1, imapclient.StatusUIDValidity: 1, imapclient.StatusUnseen: 0, imapclient.StatusHighestModSeq: 2}},
|
||||
imapclient.UntaggedStatus{Mailbox: "Trash", Attrs: map[imapclient.StatusAttr]int64{imapclient.StatusMessages: 0, imapclient.StatusUIDNext: 1, imapclient.StatusUIDValidity: 1, imapclient.StatusUnseen: 0, imapclient.StatusHighestModSeq: 2}},
|
||||
imapclient.UntaggedStatus{Mailbox: "Junk", Attrs: map[imapclient.StatusAttr]int64{imapclient.StatusMessages: 1, imapclient.StatusUIDNext: 2, imapclient.StatusUIDValidity: 1, imapclient.StatusUnseen: 1, imapclient.StatusHighestModSeq: int64(modseq - 2)}},
|
||||
)
|
||||
|
||||
// Selecting the mailbox again results in a refresh of the message sequence
|
||||
// numbers, with the deleted message gone (it wasn't acknowledged yet due to
|
||||
// "notify none").
|
||||
tc.client.Select("inbox")
|
||||
|
||||
// Add message, should result in EXISTS and FETCH with the configured attributes.
|
||||
tc2.client.Append("inbox", makeAppend(searchMsg))
|
||||
modseq++
|
||||
tc.readuntagged(
|
||||
imapclient.UntaggedExists(1),
|
||||
tc.untaggedFetchUID(1, 2,
|
||||
imapclient.FetchBodystructure{
|
||||
RespAttr: "BODYSTRUCTURE",
|
||||
Body: imapclient.BodyTypeMpart{
|
||||
Bodies: []any{
|
||||
imapclient.BodyTypeText{
|
||||
MediaType: "TEXT",
|
||||
MediaSubtype: "PLAIN",
|
||||
BodyFields: imapclient.BodyFields{
|
||||
Params: [][2]string{[...]string{"CHARSET", "utf-8"}},
|
||||
Octets: 21,
|
||||
},
|
||||
Lines: 1,
|
||||
Ext: &imapclient.BodyExtension1Part{
|
||||
Disposition: ptr((*string)(nil)),
|
||||
DispositionParams: ptr([][2]string(nil)),
|
||||
Language: ptr([]string(nil)),
|
||||
Location: ptr((*string)(nil)),
|
||||
},
|
||||
},
|
||||
imapclient.BodyTypeText{
|
||||
MediaType: "TEXT",
|
||||
MediaSubtype: "HTML",
|
||||
BodyFields: imapclient.BodyFields{
|
||||
Params: [][2]string{[...]string{"CHARSET", "utf-8"}},
|
||||
Octets: 15,
|
||||
},
|
||||
Lines: 1,
|
||||
Ext: &imapclient.BodyExtension1Part{
|
||||
Disposition: ptr((*string)(nil)),
|
||||
DispositionParams: ptr([][2]string(nil)),
|
||||
Language: ptr([]string(nil)),
|
||||
Location: ptr((*string)(nil)),
|
||||
},
|
||||
},
|
||||
},
|
||||
MediaSubtype: "ALTERNATIVE",
|
||||
Ext: &imapclient.BodyExtensionMpart{
|
||||
Params: [][2]string{{"BOUNDARY", "x"}},
|
||||
Disposition: ptr((*string)(nil)), // Present but nil.
|
||||
DispositionParams: ptr([][2]string(nil)),
|
||||
Language: ptr([]string(nil)),
|
||||
Location: ptr((*string)(nil)),
|
||||
},
|
||||
},
|
||||
},
|
||||
imapclient.FetchPreview{Preview: ptr("this is plain text.")},
|
||||
imapclient.FetchModSeq(modseq),
|
||||
),
|
||||
)
|
||||
|
||||
// Change flags.
|
||||
tc2.client.UIDStoreFlagsAdd("1:*", true, `\Deleted`)
|
||||
modseq++
|
||||
tc.readuntagged(tc.untaggedFetch(1, 2, imapclient.FetchFlags{`\Deleted`}, imapclient.FetchModSeq(modseq)))
|
||||
|
||||
// Remove message.
|
||||
tc2.client.Expunge()
|
||||
modseq++
|
||||
if uidonly {
|
||||
tc.readuntagged(imapclient.UntaggedVanished{UIDs: xparseNumSet("2")})
|
||||
} else {
|
||||
tc.readuntagged(imapclient.UntaggedExpunge(1))
|
||||
}
|
||||
|
||||
// MailboxMetadataChange for mailbox annotation.
|
||||
tc2.transactf("ok", `setmetadata Archive (/private/comment "test")`)
|
||||
modseq++
|
||||
tc.readuntagged(
|
||||
imapclient.UntaggedMetadataKeys{Mailbox: "Archive", Keys: []string{"/private/comment"}},
|
||||
)
|
||||
|
||||
// MailboxMetadataChange also for the selected Inbox.
|
||||
tc2.transactf("ok", `setmetadata Inbox (/private/comment "test")`)
|
||||
modseq++
|
||||
tc.readuntagged(
|
||||
imapclient.UntaggedMetadataKeys{Mailbox: "Inbox", Keys: []string{"/private/comment"}},
|
||||
)
|
||||
|
||||
// ServerMetadataChange for server annotation.
|
||||
tc2.transactf("ok", `setmetadata "" (/private/vendor/other/x "test")`)
|
||||
modseq++
|
||||
tc.readuntagged(
|
||||
imapclient.UntaggedMetadataKeys{Mailbox: "", Keys: []string{"/private/vendor/other/x"}},
|
||||
)
|
||||
|
||||
// SubscriptionChange for new subscription.
|
||||
tc2.client.Subscribe("doesnotexist")
|
||||
tc.readuntagged(
|
||||
imapclient.UntaggedList{Mailbox: "doesnotexist", Separator: '/', Flags: []string{`\Subscribed`, `\NonExistent`}},
|
||||
)
|
||||
|
||||
// SubscriptionChange for removed subscription.
|
||||
tc2.client.Unsubscribe("doesnotexist")
|
||||
tc.readuntagged(
|
||||
imapclient.UntaggedList{Mailbox: "doesnotexist", Separator: '/', Flags: []string{`\NonExistent`}},
|
||||
)
|
||||
|
||||
// SubscriptionChange for selected mailbox.
|
||||
tc2.client.Unsubscribe("Inbox")
|
||||
tc2.client.Subscribe("Inbox")
|
||||
tc.readuntagged(
|
||||
imapclient.UntaggedList{Mailbox: "Inbox", Separator: '/'},
|
||||
imapclient.UntaggedList{Mailbox: "Inbox", Separator: '/', Flags: []string{`\Subscribed`}},
|
||||
)
|
||||
|
||||
// MailboxName for creating mailbox.
|
||||
tc2.client.Create("newbox", nil)
|
||||
modseq++
|
||||
tc.readuntagged(
|
||||
imapclient.UntaggedList{Mailbox: "newbox", Separator: '/', Flags: []string{`\Subscribed`}},
|
||||
)
|
||||
|
||||
// MailboxName for renaming mailbox.
|
||||
tc2.client.Rename("newbox", "oldbox")
|
||||
modseq++
|
||||
tc.readuntagged(
|
||||
imapclient.UntaggedList{Mailbox: "oldbox", Separator: '/', OldName: "newbox"},
|
||||
)
|
||||
|
||||
// MailboxName for deleting mailbox.
|
||||
tc2.client.Delete("oldbox")
|
||||
modseq++
|
||||
tc.readuntagged(
|
||||
imapclient.UntaggedList{Mailbox: "oldbox", Separator: '/', Flags: []string{`\NonExistent`}},
|
||||
)
|
||||
|
||||
// Add message again to check for modseq. First set notify again with fewer fetch
|
||||
// attributes for simpler checking.
|
||||
tc.transactf("ok", "Notify Set (personal (messageNew messageExpunge flagChange mailboxName subscriptionChange mailboxMetadataChange serverMetadataChange)) (Selected (messageNew (Uid Modseq) messageExpunge flagChange))")
|
||||
tc2.client.Append("inbox", makeAppend(searchMsg))
|
||||
modseq++
|
||||
tc.readuntagged(
|
||||
imapclient.UntaggedExists(1),
|
||||
tc.untaggedFetchUID(1, 3, imapclient.FetchModSeq(modseq)),
|
||||
)
|
||||
|
||||
// Next round of events must be ignored. We shouldn't get anything until we add a
|
||||
// message to "testbox".
|
||||
tc.transactf("ok", "Notify Set (Selected None) (mailboxes testbox (messageNew messageExpunge)) (personal None)")
|
||||
tc2.client.Append("inbox", makeAppend(searchMsg)) // MessageNew
|
||||
modseq++
|
||||
tc2.client.UIDStoreFlagsAdd("1:*", true, `\Deleted`) // FlagChange
|
||||
modseq++
|
||||
tc2.client.Expunge() // MessageExpunge
|
||||
modseq++
|
||||
tc2.transactf("ok", `setmetadata Archive (/private/comment "test2")`) // MailboxMetadataChange
|
||||
modseq++
|
||||
tc2.transactf("ok", `setmetadata "" (/private/vendor/other/x "test2")`) // ServerMetadataChange
|
||||
modseq++
|
||||
tc2.client.Subscribe("doesnotexist2") // SubscriptionChange
|
||||
tc2.client.Unsubscribe("doesnotexist2") // SubscriptionChange
|
||||
tc2.client.Create("newbox2", nil) // MailboxName
|
||||
modseq++
|
||||
tc2.client.Rename("newbox2", "oldbox2") // MailboxName
|
||||
modseq++
|
||||
tc2.client.Delete("oldbox2") // MailboxName
|
||||
modseq++
|
||||
// Now trigger receiving a notification.
|
||||
tc2.client.Create("testbox", nil) // MailboxName
|
||||
modseq++
|
||||
tc2.client.Append("testbox", makeAppend(searchMsg)) // MessageNew
|
||||
modseq++
|
||||
tc.readuntagged(
|
||||
imapclient.UntaggedStatus{Mailbox: "testbox", Attrs: map[imapclient.StatusAttr]int64{imapclient.StatusMessages: 1, imapclient.StatusUIDNext: 2, imapclient.StatusUnseen: 1, imapclient.StatusHighestModSeq: int64(modseq)}},
|
||||
)
|
||||
|
||||
// Test filtering per mailbox specifier. We create two mailboxes.
|
||||
tc.client.Create("inbox/a/b", nil)
|
||||
modseq++
|
||||
tc.client.Create("other/a/b", nil)
|
||||
modseq++
|
||||
tc.client.Unsubscribe("other/a/b")
|
||||
|
||||
// Inboxes
|
||||
tc3 := startNoSwitchboard(t, uidonly)
|
||||
defer tc3.closeNoWait()
|
||||
tc3.login("mjl@mox.example", password0)
|
||||
tc3.transactf("ok", "Notify Set (Inboxes (messageNew messageExpunge))")
|
||||
|
||||
// Subscribed
|
||||
tc4 := startNoSwitchboard(t, uidonly)
|
||||
defer tc4.closeNoWait()
|
||||
tc4.login("mjl@mox.example", password0)
|
||||
tc4.transactf("ok", "Notify Set (Subscribed (messageNew messageExpunge))")
|
||||
|
||||
// Subtree
|
||||
tc5 := startNoSwitchboard(t, uidonly)
|
||||
defer tc5.closeNoWait()
|
||||
tc5.login("mjl@mox.example", password0)
|
||||
tc5.transactf("ok", "Notify Set (Subtree (Nonexistent inbox) (messageNew messageExpunge))")
|
||||
|
||||
// Subtree-One
|
||||
tc6 := startNoSwitchboard(t, uidonly)
|
||||
defer tc6.closeNoWait()
|
||||
tc6.login("mjl@mox.example", password0)
|
||||
tc6.transactf("ok", "Notify Set (Subtree-One (Nonexistent Inbox/a other) (messageNew messageExpunge))")
|
||||
|
||||
// We append to other/a/b first. It would normally come first in the notifications,
|
||||
// but we check we only get the second event.
|
||||
tc2.client.Append("other/a/b", makeAppend(searchMsg))
|
||||
modseq++
|
||||
tc2.client.Append("inbox/a/b", makeAppend(searchMsg))
|
||||
modseq++
|
||||
|
||||
// No highestmodseq, these connections don't have CONDSTORE enabled.
|
||||
tc3.readuntagged(
|
||||
imapclient.UntaggedStatus{Mailbox: "Inbox/a/b", Attrs: map[imapclient.StatusAttr]int64{imapclient.StatusMessages: 1, imapclient.StatusUIDNext: 2, imapclient.StatusUnseen: 1}},
|
||||
)
|
||||
tc4.readuntagged(
|
||||
imapclient.UntaggedStatus{Mailbox: "Inbox/a/b", Attrs: map[imapclient.StatusAttr]int64{imapclient.StatusMessages: 1, imapclient.StatusUIDNext: 2, imapclient.StatusUnseen: 1}},
|
||||
)
|
||||
tc5.readuntagged(
|
||||
imapclient.UntaggedStatus{Mailbox: "Inbox/a/b", Attrs: map[imapclient.StatusAttr]int64{imapclient.StatusMessages: 1, imapclient.StatusUIDNext: 2, imapclient.StatusUnseen: 1}},
|
||||
)
|
||||
tc6.readuntagged(
|
||||
imapclient.UntaggedStatus{Mailbox: "Inbox/a/b", Attrs: map[imapclient.StatusAttr]int64{imapclient.StatusMessages: 1, imapclient.StatusUIDNext: 2, imapclient.StatusUnseen: 1}},
|
||||
)
|
||||
|
||||
// Test for STATUS events on non-selected mailbox for message events.
|
||||
tc.transactf("ok", "notify set (personal (messageNew messageExpunge flagChange))")
|
||||
tc.client.Unselect()
|
||||
tc2.client.Create("statusbox", nil)
|
||||
modseq++
|
||||
tc2.client.Append("statusbox", makeAppend(searchMsg))
|
||||
modseq++
|
||||
tc.readuntagged(
|
||||
imapclient.UntaggedStatus{Mailbox: "statusbox", Attrs: map[imapclient.StatusAttr]int64{imapclient.StatusMessages: 1, imapclient.StatusUIDNext: 2, imapclient.StatusUnseen: 1, imapclient.StatusHighestModSeq: int64(modseq)}},
|
||||
)
|
||||
|
||||
// With Selected-Delayed, we only get the events for the selected mailbox for
|
||||
// explicit commands. We still get other events.
|
||||
tc.transactf("ok", "notify set (selected-delayed (messageNew messageExpunge flagChange)) (personal (messageNew messageExpunge flagChange))")
|
||||
tc.client.Select("statusbox")
|
||||
tc2.client.Append("inbox", makeAppend(searchMsg))
|
||||
modseq++
|
||||
tc2.client.UIDStoreFlagsSet("*", true, `\Seen`)
|
||||
modseq++
|
||||
tc2.client.Append("statusbox", imapclient.Append{Flags: []string{"newflag"}, Size: int64(len(searchMsg)), Data: strings.NewReader(searchMsg)})
|
||||
modseq++
|
||||
tc2.client.Select("statusbox")
|
||||
|
||||
tc.readuntagged(
|
||||
imapclient.UntaggedStatus{Mailbox: "Inbox", Attrs: map[imapclient.StatusAttr]int64{imapclient.StatusMessages: 1, imapclient.StatusUIDNext: 6, imapclient.StatusUnseen: 1, imapclient.StatusHighestModSeq: int64(modseq - 2)}},
|
||||
imapclient.UntaggedStatus{Mailbox: "Inbox", Attrs: map[imapclient.StatusAttr]int64{imapclient.StatusUIDValidity: 1, imapclient.StatusUnseen: 0, imapclient.StatusHighestModSeq: int64(modseq - 1)}},
|
||||
)
|
||||
|
||||
tc.transactf("ok", "noop")
|
||||
tc.xuntagged(
|
||||
imapclient.UntaggedExists(2),
|
||||
tc.untaggedFetch(2, 2, imapclient.FetchFlags{"newflag"}, imapclient.FetchModSeq(modseq)),
|
||||
imapclient.UntaggedFlags{`\Seen`, `\Answered`, `\Flagged`, `\Deleted`, `\Draft`, `$Forwarded`, `$Junk`, `$NotJunk`, `$Phishing`, `$MDNSent`, `newflag`},
|
||||
)
|
||||
|
||||
tc2.client.UIDStoreFlagsSet("2", true, `\Deleted`)
|
||||
modseq++
|
||||
tc2.client.Expunge()
|
||||
modseq++
|
||||
tc.transactf("ok", "noop")
|
||||
if uidonly {
|
||||
tc.xuntagged(
|
||||
tc.untaggedFetch(2, 2, imapclient.FetchFlags{`\Deleted`}, imapclient.FetchModSeq(modseq-1)),
|
||||
imapclient.UntaggedVanished{UIDs: xparseNumSet("2")},
|
||||
)
|
||||
} else {
|
||||
tc.xuntagged(
|
||||
tc.untaggedFetch(2, 2, imapclient.FetchFlags{`\Deleted`}, imapclient.FetchModSeq(modseq-1)),
|
||||
imapclient.UntaggedExpunge(2),
|
||||
)
|
||||
}
|
||||
|
||||
// With Selected-Delayed, we should get events for selected mailboxes immediately when using IDLE.
|
||||
tc2.client.UIDStoreFlagsSet("*", true, `\Answered`)
|
||||
modseq++
|
||||
tc2.client.Select("inbox")
|
||||
tc2.client.UIDStoreFlagsClear("*", true, `\Seen`)
|
||||
modseq++
|
||||
tc2.client.Select("statusbox")
|
||||
|
||||
tc.readuntagged(
|
||||
imapclient.UntaggedStatus{Mailbox: "Inbox", Attrs: map[imapclient.StatusAttr]int64{imapclient.StatusUIDValidity: 1, imapclient.StatusUnseen: 1, imapclient.StatusHighestModSeq: int64(modseq)}},
|
||||
)
|
||||
|
||||
tc.conn.SetReadDeadline(time.Now().Add(3 * time.Second))
|
||||
tc.cmdf("", "idle")
|
||||
tc.readprefixline("+ ")
|
||||
tc.readuntagged(tc.untaggedFetch(1, 1, imapclient.FetchFlags{`\Answered`}, imapclient.FetchModSeq(modseq-1)))
|
||||
tc.writelinef("done")
|
||||
tc.response("ok")
|
||||
tc.conn.SetReadDeadline(time.Now().Add(30 * time.Second))
|
||||
|
||||
// If any event matches, we normally return it. But NONE prevents looking further.
|
||||
tc.client.Unselect()
|
||||
tc.transactf("ok", "notify set (mailboxes statusbox NONE) (personal (mailboxName))")
|
||||
tc2.client.UIDStoreFlagsSet("*", true, `\Answered`) // Matches NONE, ignored.
|
||||
//modseq++
|
||||
tc2.client.Create("eventbox", nil)
|
||||
//modseq++
|
||||
tc.readuntagged(
|
||||
imapclient.UntaggedList{Mailbox: "eventbox", Separator: '/', Flags: []string{`\Subscribed`}},
|
||||
)
|
||||
|
||||
// Check we can return message contents.
|
||||
tc.transactf("ok", "notify set (selected (messageNew (body[header] body[text]) messageExpunge))")
|
||||
tc.client.Select("statusbox")
|
||||
tc2.client.Append("statusbox", makeAppend(searchMsg))
|
||||
// modseq++
|
||||
offset := strings.Index(searchMsg, "\r\n\r\n")
|
||||
tc.readuntagged(
|
||||
imapclient.UntaggedExists(2),
|
||||
tc.untaggedFetch(2, 3,
|
||||
imapclient.FetchBody{
|
||||
RespAttr: "BODY[HEADER]",
|
||||
Section: "HEADER",
|
||||
Body: searchMsg[:offset+4],
|
||||
},
|
||||
imapclient.FetchBody{
|
||||
RespAttr: "BODY[TEXT]",
|
||||
Section: "TEXT",
|
||||
Body: searchMsg[offset+4:],
|
||||
},
|
||||
imapclient.FetchFlags(nil),
|
||||
),
|
||||
)
|
||||
|
||||
// If we encounter an error during fetch, an untagged NO is returned.
|
||||
// We ask for the 2nd part of a message, and we add a message with just 1 part.
|
||||
tc.transactf("ok", "notify set (selected (messageNew (body[2]) messageExpunge))")
|
||||
tc2.client.Append("statusbox", makeAppend(exampleMsg))
|
||||
// modseq++
|
||||
tc.readuntagged(
|
||||
imapclient.UntaggedExists(3),
|
||||
imapclient.UntaggedResult{Status: "NO", Text: "generating notify fetch response: requested part does not exist"},
|
||||
tc.untaggedFetchUID(3, 4),
|
||||
)
|
||||
|
||||
// When adding new tests, uncomment modseq++ lines above.
|
||||
}
|
||||
|
||||
func TestNotifyOverflow(t *testing.T) {
|
||||
testNotifyOverflow(t, false)
|
||||
}
|
||||
|
||||
func TestNotifyOverflowUIDOnly(t *testing.T) {
|
||||
testNotifyOverflow(t, true)
|
||||
}
|
||||
|
||||
func testNotifyOverflow(t *testing.T, uidonly bool) {
|
||||
orig := store.CommPendingChangesMax
|
||||
store.CommPendingChangesMax = 3
|
||||
defer func() {
|
||||
store.CommPendingChangesMax = orig
|
||||
}()
|
||||
|
||||
defer mockUIDValidity()()
|
||||
tc := start(t, uidonly)
|
||||
defer tc.close()
|
||||
tc.login("mjl@mox.example", password0)
|
||||
tc.client.Select("inbox")
|
||||
tc.transactf("ok", "noop")
|
||||
|
||||
tc2 := startNoSwitchboard(t, uidonly)
|
||||
defer tc2.closeNoWait()
|
||||
tc2.login("mjl@mox.example", password0)
|
||||
tc2.client.Select("inbox")
|
||||
|
||||
// Generates 4 changes, crossing max 3.
|
||||
tc2.client.Append("inbox", makeAppend(searchMsg))
|
||||
tc2.client.Append("inbox", makeAppend(searchMsg))
|
||||
|
||||
tc.transactf("ok", "noop")
|
||||
tc.xuntagged(imapclient.UntaggedResult{Status: "OK", Code: imapclient.CodeWord("NOTIFICATIONOVERFLOW"), Text: "out of sync after too many pending changes"})
|
||||
|
||||
// Won't be getting any more notifications until we enable them again with NOTIFY.
|
||||
tc2.client.Append("inbox", makeAppend(searchMsg))
|
||||
tc.transactf("ok", "noop")
|
||||
tc.xuntagged()
|
||||
|
||||
// Enable notify again. Without uidonly, we won't get a notification because the
|
||||
// message isn't known in the session.
|
||||
tc.transactf("ok", "notify set (selected (messageNew messageExpunge flagChange))")
|
||||
tc2.client.UIDStoreFlagsAdd("1", true, `\Seen`)
|
||||
if uidonly {
|
||||
tc.readuntagged(tc.untaggedFetch(1, 1, imapclient.FetchFlags{`\Seen`}))
|
||||
} else {
|
||||
tc.transactf("ok", "noop")
|
||||
tc.xuntagged()
|
||||
}
|
||||
|
||||
// Reselect to get the message visible in the session.
|
||||
tc.client.Select("inbox")
|
||||
tc2.client.UIDStoreFlagsClear("1", true, `\Seen`)
|
||||
tc.transactf("ok", "noop")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, imapclient.FetchFlags(nil)))
|
||||
|
||||
// Trigger overflow for changes for "selected-delayed".
|
||||
store.CommPendingChangesMax = 10
|
||||
delayedMax := selectedDelayedChangesMax
|
||||
selectedDelayedChangesMax = 1
|
||||
defer func() {
|
||||
selectedDelayedChangesMax = delayedMax
|
||||
}()
|
||||
tc.transactf("ok", "notify set (selected-delayed (messageNew messageExpunge flagChange))")
|
||||
tc2.client.UIDStoreFlagsAdd("1", true, `\Seen`)
|
||||
tc2.client.UIDStoreFlagsClear("1", true, `\Seen`)
|
||||
tc.transactf("ok", "noop")
|
||||
tc.xuntagged(imapclient.UntaggedResult{Status: "OK", Code: imapclient.CodeWord("NOTIFICATIONOVERFLOW"), Text: "out of sync after too many pending changes for selected mailbox"})
|
||||
|
||||
// Again, no new notifications until we select and enable again.
|
||||
tc2.client.UIDStoreFlagsAdd("1", true, `\Seen`)
|
||||
tc.transactf("ok", "noop")
|
||||
tc.xuntagged()
|
||||
|
||||
tc.client.Select("inbox")
|
||||
tc.transactf("ok", "notify set (selected-delayed (messageNew messageExpunge flagChange))")
|
||||
tc2.client.UIDStoreFlagsClear("1", true, `\Seen`)
|
||||
tc.transactf("ok", "noop")
|
||||
tc.xuntagged(tc.untaggedFetch(1, 1, imapclient.FetchFlags(nil)))
|
||||
}
|
@ -9,7 +9,7 @@ import (
|
||||
|
||||
type token interface {
|
||||
pack(c *conn) string
|
||||
xwriteTo(c *conn, xw io.Writer) // Writes to xw panic on error.
|
||||
writeTo(c *conn, w io.Writer)
|
||||
}
|
||||
|
||||
type bare string
|
||||
@ -18,8 +18,8 @@ func (t bare) pack(c *conn) string {
|
||||
return string(t)
|
||||
}
|
||||
|
||||
func (t bare) xwriteTo(c *conn, xw io.Writer) {
|
||||
xw.Write([]byte(t.pack(c)))
|
||||
func (t bare) writeTo(c *conn, w io.Writer) {
|
||||
w.Write([]byte(t.pack(c)))
|
||||
}
|
||||
|
||||
type niltoken struct{}
|
||||
@ -30,15 +30,15 @@ func (t niltoken) pack(c *conn) string {
|
||||
return "NIL"
|
||||
}
|
||||
|
||||
func (t niltoken) xwriteTo(c *conn, xw io.Writer) {
|
||||
xw.Write([]byte(t.pack(c)))
|
||||
func (t niltoken) writeTo(c *conn, w io.Writer) {
|
||||
w.Write([]byte(t.pack(c)))
|
||||
}
|
||||
|
||||
func nilOrString(s *string) token {
|
||||
if s == nil {
|
||||
func nilOrString(s string) token {
|
||||
if s == "" {
|
||||
return nilt
|
||||
}
|
||||
return string0(*s)
|
||||
return string0(s)
|
||||
}
|
||||
|
||||
type string0 string
|
||||
@ -60,8 +60,8 @@ func (t string0) pack(c *conn) string {
|
||||
return r
|
||||
}
|
||||
|
||||
func (t string0) xwriteTo(c *conn, xw io.Writer) {
|
||||
xw.Write([]byte(t.pack(c)))
|
||||
func (t string0) writeTo(c *conn, w io.Writer) {
|
||||
w.Write([]byte(t.pack(c)))
|
||||
}
|
||||
|
||||
type dquote string
|
||||
@ -78,8 +78,8 @@ func (t dquote) pack(c *conn) string {
|
||||
return r
|
||||
}
|
||||
|
||||
func (t dquote) xwriteTo(c *conn, xw io.Writer) {
|
||||
xw.Write([]byte(t.pack(c)))
|
||||
func (t dquote) writeTo(c *conn, w io.Writer) {
|
||||
w.Write([]byte(t.pack(c)))
|
||||
}
|
||||
|
||||
type syncliteral string
|
||||
@ -88,16 +88,15 @@ func (t syncliteral) pack(c *conn) string {
|
||||
return fmt.Sprintf("{%d}\r\n", len(t)) + string(t)
|
||||
}
|
||||
|
||||
func (t syncliteral) xwriteTo(c *conn, xw io.Writer) {
|
||||
fmt.Fprintf(xw, "{%d}\r\n", len(t))
|
||||
xw.Write([]byte(t))
|
||||
func (t syncliteral) writeTo(c *conn, w io.Writer) {
|
||||
fmt.Fprintf(w, "{%d}\r\n", len(t))
|
||||
w.Write([]byte(t))
|
||||
}
|
||||
|
||||
// data from reader with known size.
|
||||
type readerSizeSyncliteral struct {
|
||||
r io.Reader
|
||||
size int64
|
||||
lit8 bool
|
||||
}
|
||||
|
||||
func (t readerSizeSyncliteral) pack(c *conn) string {
|
||||
@ -105,21 +104,13 @@ func (t readerSizeSyncliteral) pack(c *conn) string {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
var lit string
|
||||
if t.lit8 {
|
||||
lit = "~"
|
||||
}
|
||||
return fmt.Sprintf("%s{%d}\r\n", lit, t.size) + string(buf)
|
||||
return fmt.Sprintf("{%d}\r\n", t.size) + string(buf)
|
||||
}
|
||||
|
||||
func (t readerSizeSyncliteral) xwriteTo(c *conn, xw io.Writer) {
|
||||
var lit string
|
||||
if t.lit8 {
|
||||
lit = "~"
|
||||
}
|
||||
fmt.Fprintf(xw, "%s{%d}\r\n", lit, t.size)
|
||||
defer c.xtracewrite(mlog.LevelTracedata)()
|
||||
if _, err := io.Copy(xw, io.LimitReader(t.r, t.size)); err != nil {
|
||||
func (t readerSizeSyncliteral) writeTo(c *conn, w io.Writer) {
|
||||
fmt.Fprintf(w, "{%d}\r\n", t.size)
|
||||
defer c.xtrace(mlog.LevelTracedata)()
|
||||
if _, err := io.Copy(w, io.LimitReader(t.r, t.size)); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
@ -137,14 +128,17 @@ func (t readerSyncliteral) pack(c *conn) string {
|
||||
return fmt.Sprintf("{%d}\r\n", len(buf)) + string(buf)
|
||||
}
|
||||
|
||||
func (t readerSyncliteral) xwriteTo(c *conn, xw io.Writer) {
|
||||
func (t readerSyncliteral) writeTo(c *conn, w io.Writer) {
|
||||
buf, err := io.ReadAll(t.r)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
fmt.Fprintf(xw, "{%d}\r\n", len(buf))
|
||||
defer c.xtracewrite(mlog.LevelTracedata)()
|
||||
xw.Write(buf)
|
||||
fmt.Fprintf(w, "{%d}\r\n", len(buf))
|
||||
defer c.xtrace(mlog.LevelTracedata)()
|
||||
_, err = w.Write(buf)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// list with tokens space-separated
|
||||
@ -162,38 +156,15 @@ func (t listspace) pack(c *conn) string {
|
||||
return s
|
||||
}
|
||||
|
||||
func (t listspace) xwriteTo(c *conn, xw io.Writer) {
|
||||
fmt.Fprint(xw, "(")
|
||||
func (t listspace) writeTo(c *conn, w io.Writer) {
|
||||
fmt.Fprint(w, "(")
|
||||
for i, e := range t {
|
||||
if i > 0 {
|
||||
fmt.Fprint(xw, " ")
|
||||
fmt.Fprint(w, " ")
|
||||
}
|
||||
e.xwriteTo(c, xw)
|
||||
}
|
||||
fmt.Fprint(xw, ")")
|
||||
}
|
||||
|
||||
// concatenate tokens space-separated
|
||||
type concatspace []token
|
||||
|
||||
func (t concatspace) pack(c *conn) string {
|
||||
var s string
|
||||
for i, e := range t {
|
||||
if i > 0 {
|
||||
s += " "
|
||||
}
|
||||
s += e.pack(c)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (t concatspace) xwriteTo(c *conn, xw io.Writer) {
|
||||
for i, e := range t {
|
||||
if i > 0 {
|
||||
fmt.Fprint(xw, " ")
|
||||
}
|
||||
e.xwriteTo(c, xw)
|
||||
e.writeTo(c, w)
|
||||
}
|
||||
fmt.Fprint(w, ")")
|
||||
}
|
||||
|
||||
// Concatenated tokens, no spaces or list syntax.
|
||||
@ -207,9 +178,9 @@ func (t concat) pack(c *conn) string {
|
||||
return s
|
||||
}
|
||||
|
||||
func (t concat) xwriteTo(c *conn, xw io.Writer) {
|
||||
func (t concat) writeTo(c *conn, w io.Writer) {
|
||||
for _, e := range t {
|
||||
e.xwriteTo(c, xw)
|
||||
e.writeTo(c, w)
|
||||
}
|
||||
}
|
||||
|
||||
@ -231,23 +202,8 @@ next:
|
||||
return string(t)
|
||||
}
|
||||
|
||||
func (t astring) xwriteTo(c *conn, xw io.Writer) {
|
||||
xw.Write([]byte(t.pack(c)))
|
||||
}
|
||||
|
||||
// mailbox with utf7 encoding if connection requires it, or utf8 otherwise.
|
||||
type mailboxt string
|
||||
|
||||
func (t mailboxt) pack(c *conn) string {
|
||||
s := string(t)
|
||||
if !c.utf8strings() {
|
||||
s = utf7encode(s)
|
||||
}
|
||||
return astring(s).pack(c)
|
||||
}
|
||||
|
||||
func (t mailboxt) xwriteTo(c *conn, xw io.Writer) {
|
||||
xw.Write([]byte(t.pack(c)))
|
||||
func (t astring) writeTo(c *conn, w io.Writer) {
|
||||
w.Write([]byte(t.pack(c)))
|
||||
}
|
||||
|
||||
type number uint32
|
||||
@ -256,6 +212,6 @@ func (t number) pack(c *conn) string {
|
||||
return fmt.Sprintf("%d", t)
|
||||
}
|
||||
|
||||
func (t number) xwriteTo(c *conn, xw io.Writer) {
|
||||
xw.Write([]byte(t.pack(c)))
|
||||
func (t number) writeTo(c *conn, w io.Writer) {
|
||||
w.Write([]byte(t.pack(c)))
|
||||
}
|
||||
|
@ -48,13 +48,11 @@ type parser struct {
|
||||
// Orig is the line in original casing, and upper in upper casing. We often match
|
||||
// against upper for easy case insensitive handling as IMAP requires, but sometimes
|
||||
// return from orig to keep the original case.
|
||||
orig string
|
||||
upper string
|
||||
o int // Current offset in parsing.
|
||||
contexts []string // What we're parsing, for error messages.
|
||||
literals int // Literals in command, for limit.
|
||||
literalSize int64 // Total size of literals in command, for limit.
|
||||
conn *conn
|
||||
orig string
|
||||
upper string
|
||||
o int // Current offset in parsing.
|
||||
contexts []string // What we're parsing, for error messages.
|
||||
conn *conn
|
||||
}
|
||||
|
||||
// toUpper upper cases bytes that are a-z. strings.ToUpper does too much. and
|
||||
@ -72,7 +70,7 @@ func toUpper(s string) string {
|
||||
}
|
||||
|
||||
func newParser(s string, conn *conn) *parser {
|
||||
return &parser{s, toUpper(s), 0, nil, 0, 0, conn}
|
||||
return &parser{s, toUpper(s), 0, nil, conn}
|
||||
}
|
||||
|
||||
func (p *parser) xerrorf(format string, args ...any) {
|
||||
@ -304,11 +302,11 @@ func (p *parser) xstring() (r string) {
|
||||
}
|
||||
p.xerrorf("missing closing dquote in string")
|
||||
}
|
||||
size, sync := p.xliteralSize(false, true)
|
||||
buf := p.conn.xreadliteral(size, sync)
|
||||
line := p.conn.xreadline(false)
|
||||
size, sync := p.xliteralSize(100*1024, false)
|
||||
s := p.conn.xreadliteral(size, sync)
|
||||
line := p.conn.readline(false)
|
||||
p.orig, p.upper, p.o = line, toUpper(line), 0
|
||||
return string(buf)
|
||||
return s
|
||||
}
|
||||
|
||||
func (p *parser) xnil() {
|
||||
@ -438,9 +436,9 @@ func (p *parser) xmboxOrPat() ([]string, bool) {
|
||||
return l, true
|
||||
}
|
||||
|
||||
// ../rfc/9051:7056, RECENT ../rfc/3501:5047, APPENDLIMIT ../rfc/7889:252, HIGHESTMODSEQ ../rfc/7162:2452, DELETED-STORAGE ../rfc/9208:696
|
||||
// ../rfc/9051:7056, RECENT ../rfc/3501:5047, APPENDLIMIT ../rfc/7889:252, HIGHESTMODSEQ ../rfc/7162:2452
|
||||
func (p *parser) xstatusAtt() string {
|
||||
w := p.xtakelist("MESSAGES", "UIDNEXT", "UIDVALIDITY", "UNSEEN", "DELETED-STORAGE", "DELETED", "SIZE", "RECENT", "APPENDLIMIT", "HIGHESTMODSEQ")
|
||||
w := p.xtakelist("MESSAGES", "UIDNEXT", "UIDVALIDITY", "UNSEEN", "DELETED", "SIZE", "RECENT", "APPENDLIMIT", "HIGHESTMODSEQ")
|
||||
if w == "HIGHESTMODSEQ" {
|
||||
// HIGHESTMODSEQ is a CONDSTORE-enabling parameter. ../rfc/7162:375
|
||||
p.conn.enabled[capCondstore] = true
|
||||
@ -575,13 +573,11 @@ func (p *parser) xsectionBinary() (r []uint32) {
|
||||
var fetchAttWords = []string{
|
||||
"ENVELOPE", "FLAGS", "INTERNALDATE", "RFC822.SIZE", "BODYSTRUCTURE", "UID", "BODY.PEEK", "BODY", "BINARY.PEEK", "BINARY.SIZE", "BINARY",
|
||||
"RFC822.HEADER", "RFC822.TEXT", "RFC822", // older IMAP
|
||||
"MODSEQ", // CONDSTORE extension.
|
||||
"SAVEDATE", // SAVEDATE extension, ../rfc/8514:186
|
||||
"PREVIEW", // ../rfc/8970:345
|
||||
"MODSEQ", // CONDSTORE extension.
|
||||
}
|
||||
|
||||
// ../rfc/9051:6557 ../rfc/3501:4751 ../rfc/7162:2483
|
||||
func (p *parser) xfetchAtt() (r fetchAtt) {
|
||||
func (p *parser) xfetchAtt(isUID bool) (r fetchAtt) {
|
||||
defer p.context("fetchAtt")()
|
||||
f := p.xtakelist(fetchAttWords...)
|
||||
r.peek = strings.HasSuffix(f, ".PEEK")
|
||||
@ -609,14 +605,12 @@ func (p *parser) xfetchAtt() (r fetchAtt) {
|
||||
// The wording about when to respond with a MODSEQ attribute could be more clear. ../rfc/7162:923 ../rfc/7162:388
|
||||
// MODSEQ attribute is a CONDSTORE-enabling parameter. ../rfc/7162:377
|
||||
p.conn.xensureCondstore(nil)
|
||||
case "PREVIEW":
|
||||
r.previewLazy = p.take(" (LAZY)")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ../rfc/9051:6553 ../rfc/3501:4748
|
||||
func (p *parser) xfetchAtts() []fetchAtt {
|
||||
func (p *parser) xfetchAtts(isUID bool) []fetchAtt {
|
||||
defer p.context("fetchAtts")()
|
||||
|
||||
fields := func(l ...string) []fetchAtt {
|
||||
@ -640,13 +634,13 @@ func (p *parser) xfetchAtts() []fetchAtt {
|
||||
}
|
||||
|
||||
if !p.hasPrefix("(") {
|
||||
return []fetchAtt{p.xfetchAtt()}
|
||||
return []fetchAtt{p.xfetchAtt(isUID)}
|
||||
}
|
||||
|
||||
l := []fetchAtt{}
|
||||
p.xtake("(")
|
||||
for {
|
||||
l = append(l, p.xfetchAtt())
|
||||
l = append(l, p.xfetchAtt(isUID))
|
||||
if !p.take(" ") {
|
||||
break
|
||||
}
|
||||
@ -747,47 +741,23 @@ func (p *parser) xdateTime() time.Time {
|
||||
}
|
||||
|
||||
// ../rfc/9051:6655 ../rfc/7888:330 ../rfc/3501:4801
|
||||
func (p *parser) xliteralSize(lit8 bool, checkSize bool) (size int64, sync bool) {
|
||||
func (p *parser) xliteralSize(maxSize int64, lit8 bool) (size int64, sync bool) {
|
||||
// todo: enforce that we get non-binary when ~ isn't present?
|
||||
if lit8 {
|
||||
p.take("~")
|
||||
}
|
||||
p.xtake("{")
|
||||
size = p.xnumber64()
|
||||
if maxSize > 0 && size > maxSize {
|
||||
// ../rfc/7888:249
|
||||
line := fmt.Sprintf("* BYE [ALERT] Max literal size %d is larger than allowed %d in this context", size, maxSize)
|
||||
err := errors.New("literal too big")
|
||||
panic(syntaxError{line, "TOOBIG", err.Error(), err})
|
||||
}
|
||||
|
||||
sync = !p.take("+")
|
||||
p.xtake("}")
|
||||
p.xempty()
|
||||
|
||||
if checkSize {
|
||||
// ../rfc/7888:249
|
||||
var errmsg string
|
||||
const (
|
||||
litSizeMax = 100 * 1024
|
||||
totalLitSizeMax = 10 * litSizeMax
|
||||
litMax = 1000
|
||||
)
|
||||
p.literalSize += size
|
||||
p.literals++
|
||||
if size > litSizeMax {
|
||||
errmsg = fmt.Sprintf("max literal size %d is larger than allowed %d", size, litSizeMax)
|
||||
} else if p.literalSize > totalLitSizeMax {
|
||||
errmsg = fmt.Sprintf("max total literal size for command %d is larger than allowed %d", p.literalSize, totalLitSizeMax)
|
||||
} else if p.literals > litMax {
|
||||
errmsg = fmt.Sprintf("max literals for command %d is larger than allowed %d", p.literals, litMax)
|
||||
}
|
||||
if errmsg != "" {
|
||||
// ../rfc/9051:357 ../rfc/3501:347
|
||||
err := errors.New("literal too big: " + errmsg)
|
||||
if sync {
|
||||
errmsg = ""
|
||||
} else {
|
||||
errmsg = "* BYE [ALERT] " + errmsg
|
||||
}
|
||||
panic(syntaxError{errmsg, "TOOBIG", err.Error(), err})
|
||||
}
|
||||
}
|
||||
|
||||
return size, sync
|
||||
}
|
||||
|
||||
@ -796,7 +766,6 @@ var searchKeyWords = []string{
|
||||
"BEFORE", "BODY",
|
||||
"CC", "DELETED", "FLAGGED",
|
||||
"FROM", "KEYWORD",
|
||||
"OLDER", "YOUNGER", // WITHIN extension, ../rfc/5032:72
|
||||
"NEW", "OLD", "ON", "RECENT", "SEEN",
|
||||
"SINCE", "SUBJECT",
|
||||
"TEXT", "TO",
|
||||
@ -808,8 +777,7 @@ var searchKeyWords = []string{
|
||||
"SENTBEFORE", "SENTON",
|
||||
"SENTSINCE", "SMALLER",
|
||||
"UID", "UNDRAFT",
|
||||
"MODSEQ", // CONDSTORE extension.
|
||||
"SAVEDBEFORE", "SAVEDON", "SAVEDSINCE", "SAVEDATESUPPORTED", // SAVEDATE extension, ../rfc/8514:203
|
||||
"MODSEQ", // CONDSTORE extension.
|
||||
}
|
||||
|
||||
// ../rfc/9051:6923 ../rfc/3501:4957, MODSEQ ../rfc/7162:2492
|
||||
@ -933,19 +901,31 @@ func (p *parser) xsearchKey() *searchKey {
|
||||
sk.clientModseq = &v
|
||||
// MODSEQ is a CONDSTORE-enabling parameter. ../rfc/7162:377
|
||||
p.conn.enabled[capCondstore] = true
|
||||
case "SAVEDBEFORE", "SAVEDON", "SAVEDSINCE":
|
||||
p.xspace()
|
||||
sk.date = p.xdate() // ../rfc/8514:267
|
||||
case "SAVEDATESUPPORTED":
|
||||
case "OLDER", "YOUNGER":
|
||||
p.xspace()
|
||||
sk.number = int64(p.xnznumber())
|
||||
default:
|
||||
p.xerrorf("missing case for op %q", sk.op)
|
||||
}
|
||||
return sk
|
||||
}
|
||||
|
||||
// hasModseq returns whether there is a modseq filter anywhere in the searchkey.
|
||||
func (sk searchKey) hasModseq() bool {
|
||||
if sk.clientModseq != nil {
|
||||
return true
|
||||
}
|
||||
for _, e := range sk.searchKeys {
|
||||
if e.hasModseq() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
if sk.searchKey != nil && sk.searchKey.hasModseq() {
|
||||
return true
|
||||
}
|
||||
if sk.searchKey2 != nil && sk.searchKey2.hasModseq() {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ../rfc/9051:6489 ../rfc/3501:4692
|
||||
func (p *parser) xdateDay() int {
|
||||
d := p.xdigit()
|
||||
@ -968,195 +948,3 @@ func (p *parser) xdate() time.Time {
|
||||
}
|
||||
return time.Date(year, mon, day, 0, 0, 0, 0, time.UTC)
|
||||
}
|
||||
|
||||
// Parse and validate a metadata key (entry name), returned as lower-case.
|
||||
//
|
||||
// ../rfc/5464:190
|
||||
func (p *parser) xmetadataKey() string {
|
||||
// ../rfc/5464:772
|
||||
s := p.xastring()
|
||||
|
||||
// ../rfc/5464:192
|
||||
if strings.Contains(s, "//") {
|
||||
p.xerrorf("entry name must not contain two slashes")
|
||||
}
|
||||
// We allow a single slash, so it can be used with option "(depth infinity)" to get
|
||||
// all annotations.
|
||||
if s != "/" && strings.HasSuffix(s, "/") {
|
||||
p.xerrorf("entry name must not end with slash")
|
||||
}
|
||||
// ../rfc/5464:202
|
||||
if strings.Contains(s, "*") || strings.Contains(s, "%") {
|
||||
p.xerrorf("entry name must not contain * or %%")
|
||||
}
|
||||
for _, c := range s {
|
||||
if c < ' ' || c >= 0x7f {
|
||||
p.xerrorf("entry name must only contain non-control ascii characters")
|
||||
}
|
||||
}
|
||||
return strings.ToLower(s)
|
||||
}
|
||||
|
||||
// ../rfc/5464:776
|
||||
func (p *parser) xmetadataKeyValue() (key string, isString bool, value []byte) {
|
||||
key = p.xmetadataKey()
|
||||
p.xspace()
|
||||
|
||||
if p.hasPrefix("~{") {
|
||||
size, sync := p.xliteralSize(true, true)
|
||||
value = p.conn.xreadliteral(size, sync)
|
||||
line := p.conn.xreadline(false)
|
||||
p.orig, p.upper, p.o = line, toUpper(line), 0
|
||||
} else if p.hasPrefix(`"`) {
|
||||
value = []byte(p.xstring())
|
||||
isString = true
|
||||
} else if p.take("NIL") {
|
||||
value = nil
|
||||
} else {
|
||||
p.xerrorf("expected metadata value")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
type eventGroup struct {
|
||||
MailboxSpecifier mailboxSpecifier
|
||||
Events []notifyEvent // NONE is represented by an empty list.
|
||||
}
|
||||
|
||||
type mbspecKind string
|
||||
|
||||
const (
|
||||
mbspecSelected mbspecKind = "SELECTED"
|
||||
mbspecSelectedDelayed mbspecKind = "SELECTED-DELAYED" // Only for NOTIFY.
|
||||
mbspecInboxes mbspecKind = "INBOXES"
|
||||
mbspecPersonal mbspecKind = "PERSONAL"
|
||||
mbspecSubscribed mbspecKind = "SUBSCRIBED"
|
||||
mbspecSubtreeOne mbspecKind = "SUBTREE-ONE" // For ESEARCH, we allow it for NOTIFY too.
|
||||
mbspecSubtree mbspecKind = "SUBTREE"
|
||||
mbspecMailboxes mbspecKind = "MAILBOXES"
|
||||
)
|
||||
|
||||
// Used by both the ESEARCH and NOTIFY commands.
|
||||
type mailboxSpecifier struct {
|
||||
Kind mbspecKind
|
||||
Mailboxes []string
|
||||
}
|
||||
|
||||
type notifyEvent struct {
|
||||
// Kind is always upper case. Should be one of eventKind, anything else must result
|
||||
// in a BADEVENT response code.
|
||||
Kind string
|
||||
|
||||
FetchAtt []fetchAtt // Only for MessageNew
|
||||
}
|
||||
|
||||
// ../rfc/5465:943
|
||||
func (p *parser) xeventGroup() (eg eventGroup) {
|
||||
p.xtake("(")
|
||||
eg.MailboxSpecifier = p.xfilterMailbox(mbspecsNotify)
|
||||
p.xspace()
|
||||
if p.take("NONE") {
|
||||
p.xtake(")")
|
||||
return eg
|
||||
}
|
||||
p.xtake("(")
|
||||
for {
|
||||
e := p.xnotifyEvent()
|
||||
eg.Events = append(eg.Events, e)
|
||||
if !p.space() {
|
||||
break
|
||||
}
|
||||
}
|
||||
p.xtake(")")
|
||||
p.xtake(")")
|
||||
return eg
|
||||
}
|
||||
|
||||
var mbspecsEsearch = []mbspecKind{
|
||||
mbspecSelected, // selected-delayed is only for NOTIFY.
|
||||
mbspecInboxes,
|
||||
mbspecPersonal,
|
||||
mbspecSubscribed,
|
||||
mbspecSubtreeOne, // Must come before Subtree due to eager parsing.
|
||||
mbspecSubtree,
|
||||
mbspecMailboxes,
|
||||
}
|
||||
|
||||
var mbspecsNotify = []mbspecKind{
|
||||
mbspecSelectedDelayed, // Must come before mbspecSelected, for eager parsing and mbspecSelected.
|
||||
mbspecSelected,
|
||||
mbspecInboxes,
|
||||
mbspecPersonal,
|
||||
mbspecSubscribed,
|
||||
mbspecSubtreeOne, // From ESEARCH, we also allow it in NOTIFY.
|
||||
mbspecSubtree,
|
||||
mbspecMailboxes,
|
||||
}
|
||||
|
||||
// If not esearch with "subtree-one", then for notify with "selected-delayed".
|
||||
func (p *parser) xfilterMailbox(allowed []mbspecKind) (ms mailboxSpecifier) {
|
||||
var kind mbspecKind
|
||||
for _, s := range allowed {
|
||||
if p.take(string(s)) {
|
||||
kind = s
|
||||
break
|
||||
}
|
||||
}
|
||||
if kind == mbspecKind("") {
|
||||
xsyntaxErrorf("expected mailbox specifier")
|
||||
}
|
||||
|
||||
ms.Kind = kind
|
||||
switch kind {
|
||||
case "SUBTREE", "SUBTREE-ONE", "MAILBOXES":
|
||||
p.xtake(" ")
|
||||
// One or more mailboxes. Multiple start with a list. ../rfc/5465:937
|
||||
if p.take("(") {
|
||||
for {
|
||||
ms.Mailboxes = append(ms.Mailboxes, p.xmailbox())
|
||||
if !p.take(" ") {
|
||||
break
|
||||
}
|
||||
}
|
||||
p.xtake(")")
|
||||
} else {
|
||||
ms.Mailboxes = []string{p.xmailbox()}
|
||||
}
|
||||
}
|
||||
return ms
|
||||
}
|
||||
|
||||
type eventKind string
|
||||
|
||||
const (
|
||||
eventMessageNew eventKind = "MESSAGENEW"
|
||||
eventMessageExpunge eventKind = "MESSAGEEXPUNGE"
|
||||
eventFlagChange eventKind = "FLAGCHANGE"
|
||||
eventAnnotationChange eventKind = "ANNOTATIONCHANGE"
|
||||
eventMailboxName eventKind = "MAILBOXNAME"
|
||||
eventSubscriptionChange eventKind = "SUBSCRIPTIONCHANGE"
|
||||
eventMailboxMetadataChange eventKind = "MAILBOXMETADATACHANGE"
|
||||
eventServerMetadataChange eventKind = "SERVERMETADATACHANGE"
|
||||
)
|
||||
|
||||
var messageEventKinds = []eventKind{eventMessageNew, eventMessageExpunge, eventFlagChange, eventAnnotationChange}
|
||||
|
||||
// ../rfc/5465:974
|
||||
func (p *parser) xnotifyEvent() notifyEvent {
|
||||
s := strings.ToUpper(p.xatom())
|
||||
e := notifyEvent{Kind: s}
|
||||
if eventKind(e.Kind) == eventMessageNew {
|
||||
if p.take(" (") {
|
||||
for {
|
||||
a := p.xfetchAtt()
|
||||
e.FetchAtt = append(e.FetchAtt, a)
|
||||
if !p.take(" ") {
|
||||
break
|
||||
}
|
||||
}
|
||||
p.xtake(")")
|
||||
}
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
@ -1,8 +1,6 @@
|
||||
package imapserver
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
"net"
|
||||
)
|
||||
|
||||
@ -15,7 +13,10 @@ type prefixConn struct {
|
||||
|
||||
func (c *prefixConn) Read(buf []byte) (int, error) {
|
||||
if len(c.prefix) > 0 {
|
||||
n := min(len(buf), len(c.prefix))
|
||||
n := len(buf)
|
||||
if n > len(c.prefix) {
|
||||
n = len(c.prefix)
|
||||
}
|
||||
copy(buf[:n], c.prefix[:n])
|
||||
c.prefix = c.prefix[n:]
|
||||
if len(c.prefix) == 0 {
|
||||
@ -25,18 +26,3 @@ func (c *prefixConn) Read(buf []byte) (int, error) {
|
||||
}
|
||||
return c.Conn.Read(buf)
|
||||
}
|
||||
|
||||
// xprefixConn returns either the original net.Conn passed as parameter, or returns
|
||||
// a *prefixConn returning the buffered data available in br followed data from the
|
||||
// net.Conn passed in.
|
||||
func xprefixConn(c net.Conn, br *bufio.Reader) net.Conn {
|
||||
n := br.Buffered()
|
||||
if n == 0 {
|
||||
return c
|
||||
}
|
||||
|
||||
buf := make([]byte, n)
|
||||
_, err := io.ReadFull(c, buf)
|
||||
xcheckf(err, "get buffered data")
|
||||
return &prefixConn{buf, c}
|
||||
}
|
||||
|
@ -32,26 +32,17 @@ func (ss numSet) containsSeq(seq msgseq, uids []store.UID, searchResult []store.
|
||||
uid := uids[int(seq)-1]
|
||||
return uidSearch(searchResult, uid) > 0 && uidSearch(uids, uid) > 0
|
||||
}
|
||||
return ss.containsSeqCount(seq, uint32(len(uids)))
|
||||
}
|
||||
|
||||
// containsSeqCount returns whether seq is contained in ss, which must not be a
|
||||
// searchResult, assuming the message count.
|
||||
func (ss numSet) containsSeqCount(seq msgseq, msgCount uint32) bool {
|
||||
if msgCount == 0 {
|
||||
return false
|
||||
}
|
||||
for _, r := range ss.ranges {
|
||||
first := r.first.number
|
||||
if r.first.star || first > msgCount {
|
||||
first = msgCount
|
||||
if r.first.star || first > uint32(len(uids)) {
|
||||
first = uint32(len(uids))
|
||||
}
|
||||
|
||||
last := first
|
||||
if r.last != nil {
|
||||
last = r.last.number
|
||||
if r.last.star || last > msgCount {
|
||||
last = msgCount
|
||||
if r.last.star || last > uint32(len(uids)) {
|
||||
last = uint32(len(uids))
|
||||
}
|
||||
}
|
||||
if first > last {
|
||||
@ -65,77 +56,35 @@ func (ss numSet) containsSeqCount(seq msgseq, msgCount uint32) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// containsKnownUID returns whether uid, which is known to exist, matches the numSet.
|
||||
// highestUID must return the highest/last UID in the mailbox, or an error. A last UID must
|
||||
// exist, otherwise this method wouldn't have been called with a known uid.
|
||||
// highestUID is needed for interpreting UID sets like "<num>:*" where num is
|
||||
// higher than the uid to check.
|
||||
func (ss numSet) xcontainsKnownUID(uid store.UID, searchResult []store.UID, xhighestUID func() store.UID) bool {
|
||||
func (ss numSet) containsUID(uid store.UID, uids []store.UID, searchResult []store.UID) bool {
|
||||
if len(uids) == 0 {
|
||||
return false
|
||||
}
|
||||
if ss.searchResult {
|
||||
return uidSearch(searchResult, uid) > 0
|
||||
return uidSearch(searchResult, uid) > 0 && uidSearch(uids, uid) > 0
|
||||
}
|
||||
|
||||
for _, r := range ss.ranges {
|
||||
a := store.UID(r.first.number)
|
||||
// Num in <num>:* can be larger than last, but it still matches the last...
|
||||
// Similar for *:<num>. ../rfc/9051:4814
|
||||
if r.first.star {
|
||||
if r.last != nil && uid >= store.UID(r.last.number) {
|
||||
return true
|
||||
}
|
||||
a = xhighestUID()
|
||||
}
|
||||
b := a
|
||||
if r.last != nil {
|
||||
b = store.UID(r.last.number)
|
||||
if r.last.star {
|
||||
if uid >= a {
|
||||
return true
|
||||
}
|
||||
b = xhighestUID()
|
||||
}
|
||||
}
|
||||
if a > b {
|
||||
a, b = b, a
|
||||
}
|
||||
if uid >= a && uid <= b {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// xinterpretStar returns a numset that interprets stars in a uid set using
|
||||
// xlastUID, returning a new uid set without stars, with increasing first/last, and
|
||||
// without unneeded ranges (first.number != last.number).
|
||||
// If there are no messages in the mailbox, xlastUID must return zero and the
|
||||
// returned numSet will include 0.
|
||||
func (s numSet) xinterpretStar(xlastUID func() store.UID) numSet {
|
||||
var ns numSet
|
||||
|
||||
for _, r := range s.ranges {
|
||||
first := r.first.number
|
||||
if r.first.star {
|
||||
first = uint32(xlastUID())
|
||||
first := store.UID(r.first.number)
|
||||
if r.first.star || first > uids[len(uids)-1] {
|
||||
first = uids[len(uids)-1]
|
||||
}
|
||||
last := first
|
||||
// Num in <num>:* can be larger than last, but it still matches the last...
|
||||
// Similar for *:<num>. ../rfc/9051:4814
|
||||
if r.last != nil {
|
||||
if r.last.star {
|
||||
last = uint32(xlastUID())
|
||||
} else {
|
||||
last = r.last.number
|
||||
last = store.UID(r.last.number)
|
||||
if r.last.star || last > uids[len(uids)-1] {
|
||||
last = uids[len(uids)-1]
|
||||
}
|
||||
}
|
||||
if first > last {
|
||||
first, last = last, first
|
||||
}
|
||||
nr := numRange{first: setNumber{number: first}}
|
||||
if first != last {
|
||||
nr.last = &setNumber{number: last}
|
||||
if uid >= first && uid <= last && uidSearch(uids, uid) > 0 {
|
||||
return true
|
||||
}
|
||||
ns.ranges = append(ns.ranges, nr)
|
||||
}
|
||||
return ns
|
||||
return false
|
||||
}
|
||||
|
||||
// contains returns whether the numset contains the number.
|
||||
@ -209,6 +158,38 @@ func (ss numSet) String() string {
|
||||
return l[0]
|
||||
}
|
||||
|
||||
// interpretStar returns a numset that interprets stars in a numset, returning a new
|
||||
// numset without stars with increasing first/last.
|
||||
func (s numSet) interpretStar(uids []store.UID) numSet {
|
||||
var ns numSet
|
||||
if len(uids) == 0 {
|
||||
return ns
|
||||
}
|
||||
|
||||
for _, r := range s.ranges {
|
||||
first := r.first.number
|
||||
if r.first.star || first > uint32(uids[len(uids)-1]) {
|
||||
first = uint32(uids[len(uids)-1])
|
||||
}
|
||||
last := first
|
||||
if r.last != nil {
|
||||
last = r.last.number
|
||||
if r.last.star || last > uint32(uids[len(uids)-1]) {
|
||||
last = uint32(uids[len(uids)-1])
|
||||
}
|
||||
}
|
||||
if first > last {
|
||||
first, last = last, first
|
||||
}
|
||||
nr := numRange{first: setNumber{number: first}}
|
||||
if first != last {
|
||||
nr.last = &setNumber{number: last}
|
||||
}
|
||||
ns.ranges = append(ns.ranges, nr)
|
||||
}
|
||||
return ns
|
||||
}
|
||||
|
||||
// whether numSet only has numbers (no star/search), and is strictly increasing.
|
||||
func (s *numSet) isBasicIncreasing() bool {
|
||||
if s.searchResult {
|
||||
@ -326,15 +307,13 @@ type fetchAtt struct {
|
||||
section *sectionSpec
|
||||
sectionBinary []uint32
|
||||
partial *partial
|
||||
previewLazy bool // Not regular "PREVIEW", but "PREVIEW (LAZY)".
|
||||
}
|
||||
|
||||
type searchKey struct {
|
||||
// Only one of searchKeys, seqSet and op can be non-nil/non-empty.
|
||||
searchKeys []searchKey // In case of nested/multiple keys. Also for the top-level command.
|
||||
seqSet *numSet // In case of bare sequence set. For op UID, field uidSet contains the parameter.
|
||||
op string // Determines which of the fields below are set.
|
||||
|
||||
searchKeys []searchKey // In case of nested/multiple keys. Also for the top-level command.
|
||||
seqSet *numSet // In case of bare sequence set. For op UID, field uidSet contains the parameter.
|
||||
op string // Determines which of the fields below are set.
|
||||
headerField string
|
||||
astring string
|
||||
date time.Time
|
||||
@ -346,40 +325,6 @@ type searchKey struct {
|
||||
clientModseq *int64
|
||||
}
|
||||
|
||||
// Whether we need message sequence numbers to evaluate. Sequence numbers are not
|
||||
// allowed with UIDONLY. And if we need sequence numbers we cannot optimize
|
||||
// searching for MAX with a query in reverse order.
|
||||
func (sk *searchKey) hasSequenceNumbers() bool {
|
||||
for _, k := range sk.searchKeys {
|
||||
if k.hasSequenceNumbers() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
if sk.searchKey != nil && sk.searchKey.hasSequenceNumbers() || sk.searchKey2 != nil && sk.searchKey2.hasSequenceNumbers() {
|
||||
return true
|
||||
}
|
||||
return sk.seqSet != nil && !sk.seqSet.searchResult
|
||||
}
|
||||
|
||||
// hasModseq returns whether there is a modseq filter anywhere in the searchkey.
|
||||
func (sk *searchKey) hasModseq() bool {
|
||||
if sk.clientModseq != nil {
|
||||
return true
|
||||
}
|
||||
for _, e := range sk.searchKeys {
|
||||
if e.hasModseq() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
if sk.searchKey != nil && sk.searchKey.hasModseq() {
|
||||
return true
|
||||
}
|
||||
if sk.searchKey2 != nil && sk.searchKey2.hasModseq() {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func compactUIDSet(l []store.UID) (r numSet) {
|
||||
for len(l) > 0 {
|
||||
e := 1
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user